xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 0d456bad)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 #include <net/bluetooth/smp.h>
33 
34 static void hci_le_create_connection(struct hci_conn *conn)
35 {
36 	struct hci_dev *hdev = conn->hdev;
37 	struct hci_cp_le_create_conn cp;
38 
39 	conn->state = BT_CONNECT;
40 	conn->out = true;
41 	conn->link_mode |= HCI_LM_MASTER;
42 	conn->sec_level = BT_SECURITY_LOW;
43 
44 	memset(&cp, 0, sizeof(cp));
45 	cp.scan_interval = __constant_cpu_to_le16(0x0060);
46 	cp.scan_window = __constant_cpu_to_le16(0x0030);
47 	bacpy(&cp.peer_addr, &conn->dst);
48 	cp.peer_addr_type = conn->dst_type;
49 	cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
50 	cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
51 	cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
52 	cp.min_ce_len = __constant_cpu_to_le16(0x0000);
53 	cp.max_ce_len = __constant_cpu_to_le16(0x0000);
54 
55 	hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
56 }
57 
58 static void hci_le_create_connection_cancel(struct hci_conn *conn)
59 {
60 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
61 }
62 
63 static void hci_acl_create_connection(struct hci_conn *conn)
64 {
65 	struct hci_dev *hdev = conn->hdev;
66 	struct inquiry_entry *ie;
67 	struct hci_cp_create_conn cp;
68 
69 	BT_DBG("hcon %p", conn);
70 
71 	conn->state = BT_CONNECT;
72 	conn->out = true;
73 
74 	conn->link_mode = HCI_LM_MASTER;
75 
76 	conn->attempt++;
77 
78 	conn->link_policy = hdev->link_policy;
79 
80 	memset(&cp, 0, sizeof(cp));
81 	bacpy(&cp.bdaddr, &conn->dst);
82 	cp.pscan_rep_mode = 0x02;
83 
84 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
85 	if (ie) {
86 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
87 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
88 			cp.pscan_mode     = ie->data.pscan_mode;
89 			cp.clock_offset   = ie->data.clock_offset |
90 					    __constant_cpu_to_le16(0x8000);
91 		}
92 
93 		memcpy(conn->dev_class, ie->data.dev_class, 3);
94 		if (ie->data.ssp_mode > 0)
95 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
96 	}
97 
98 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
99 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
100 		cp.role_switch = 0x01;
101 	else
102 		cp.role_switch = 0x00;
103 
104 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
105 }
106 
107 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
108 {
109 	struct hci_cp_create_conn_cancel cp;
110 
111 	BT_DBG("hcon %p", conn);
112 
113 	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
114 		return;
115 
116 	bacpy(&cp.bdaddr, &conn->dst);
117 	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
118 }
119 
120 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
121 {
122 	struct hci_cp_disconnect cp;
123 
124 	BT_DBG("hcon %p", conn);
125 
126 	conn->state = BT_DISCONN;
127 
128 	cp.handle = cpu_to_le16(conn->handle);
129 	cp.reason = reason;
130 	hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
131 }
132 
133 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
134 {
135 	struct hci_cp_disconn_phy_link cp;
136 
137 	BT_DBG("hcon %p", conn);
138 
139 	conn->state = BT_DISCONN;
140 
141 	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
142 	cp.reason = reason;
143 	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
144 		     sizeof(cp), &cp);
145 }
146 
147 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
148 {
149 	struct hci_dev *hdev = conn->hdev;
150 	struct hci_cp_add_sco cp;
151 
152 	BT_DBG("hcon %p", conn);
153 
154 	conn->state = BT_CONNECT;
155 	conn->out = true;
156 
157 	conn->attempt++;
158 
159 	cp.handle   = cpu_to_le16(handle);
160 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
161 
162 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
163 }
164 
165 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
166 {
167 	struct hci_dev *hdev = conn->hdev;
168 	struct hci_cp_setup_sync_conn cp;
169 
170 	BT_DBG("hcon %p", conn);
171 
172 	conn->state = BT_CONNECT;
173 	conn->out = true;
174 
175 	conn->attempt++;
176 
177 	cp.handle   = cpu_to_le16(handle);
178 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
179 
180 	cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
181 	cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
182 	cp.max_latency    = __constant_cpu_to_le16(0xffff);
183 	cp.voice_setting  = cpu_to_le16(hdev->voice_setting);
184 	cp.retrans_effort = 0xff;
185 
186 	hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
187 }
188 
189 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
190 			u16 latency, u16 to_multiplier)
191 {
192 	struct hci_cp_le_conn_update cp;
193 	struct hci_dev *hdev = conn->hdev;
194 
195 	memset(&cp, 0, sizeof(cp));
196 
197 	cp.handle		= cpu_to_le16(conn->handle);
198 	cp.conn_interval_min	= cpu_to_le16(min);
199 	cp.conn_interval_max	= cpu_to_le16(max);
200 	cp.conn_latency		= cpu_to_le16(latency);
201 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
202 	cp.min_ce_len		= __constant_cpu_to_le16(0x0001);
203 	cp.max_ce_len		= __constant_cpu_to_le16(0x0001);
204 
205 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
206 }
207 
208 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 		      __u8 ltk[16])
210 {
211 	struct hci_dev *hdev = conn->hdev;
212 	struct hci_cp_le_start_enc cp;
213 
214 	BT_DBG("hcon %p", conn);
215 
216 	memset(&cp, 0, sizeof(cp));
217 
218 	cp.handle = cpu_to_le16(conn->handle);
219 	memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 	cp.ediv = ediv;
221 	memcpy(cp.rand, rand, sizeof(cp.rand));
222 
223 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224 }
225 
226 /* Device _must_ be locked */
227 void hci_sco_setup(struct hci_conn *conn, __u8 status)
228 {
229 	struct hci_conn *sco = conn->link;
230 
231 	if (!sco)
232 		return;
233 
234 	BT_DBG("hcon %p", conn);
235 
236 	if (!status) {
237 		if (lmp_esco_capable(conn->hdev))
238 			hci_setup_sync(sco, conn->handle);
239 		else
240 			hci_add_sco(sco, conn->handle);
241 	} else {
242 		hci_proto_connect_cfm(sco, status);
243 		hci_conn_del(sco);
244 	}
245 }
246 
247 static void hci_conn_disconnect(struct hci_conn *conn)
248 {
249 	__u8 reason = hci_proto_disconn_ind(conn);
250 
251 	switch (conn->type) {
252 	case ACL_LINK:
253 		hci_acl_disconn(conn, reason);
254 		break;
255 	case AMP_LINK:
256 		hci_amp_disconn(conn, reason);
257 		break;
258 	}
259 }
260 
261 static void hci_conn_timeout(struct work_struct *work)
262 {
263 	struct hci_conn *conn = container_of(work, struct hci_conn,
264 					     disc_work.work);
265 
266 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
267 
268 	if (atomic_read(&conn->refcnt))
269 		return;
270 
271 	switch (conn->state) {
272 	case BT_CONNECT:
273 	case BT_CONNECT2:
274 		if (conn->out) {
275 			if (conn->type == ACL_LINK)
276 				hci_acl_create_connection_cancel(conn);
277 			else if (conn->type == LE_LINK)
278 				hci_le_create_connection_cancel(conn);
279 		}
280 		break;
281 	case BT_CONFIG:
282 	case BT_CONNECTED:
283 		hci_conn_disconnect(conn);
284 		break;
285 	default:
286 		conn->state = BT_CLOSED;
287 		break;
288 	}
289 }
290 
291 /* Enter sniff mode */
292 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
293 {
294 	struct hci_dev *hdev = conn->hdev;
295 
296 	BT_DBG("hcon %p mode %d", conn, conn->mode);
297 
298 	if (test_bit(HCI_RAW, &hdev->flags))
299 		return;
300 
301 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
302 		return;
303 
304 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
305 		return;
306 
307 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
308 		struct hci_cp_sniff_subrate cp;
309 		cp.handle             = cpu_to_le16(conn->handle);
310 		cp.max_latency        = __constant_cpu_to_le16(0);
311 		cp.min_remote_timeout = __constant_cpu_to_le16(0);
312 		cp.min_local_timeout  = __constant_cpu_to_le16(0);
313 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
314 	}
315 
316 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
317 		struct hci_cp_sniff_mode cp;
318 		cp.handle       = cpu_to_le16(conn->handle);
319 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
320 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
321 		cp.attempt      = __constant_cpu_to_le16(4);
322 		cp.timeout      = __constant_cpu_to_le16(1);
323 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
324 	}
325 }
326 
327 static void hci_conn_idle(unsigned long arg)
328 {
329 	struct hci_conn *conn = (void *) arg;
330 
331 	BT_DBG("hcon %p mode %d", conn, conn->mode);
332 
333 	hci_conn_enter_sniff_mode(conn);
334 }
335 
336 static void hci_conn_auto_accept(unsigned long arg)
337 {
338 	struct hci_conn *conn = (void *) arg;
339 	struct hci_dev *hdev = conn->hdev;
340 
341 	hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
342 		     &conn->dst);
343 }
344 
345 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
346 {
347 	struct hci_conn *conn;
348 
349 	BT_DBG("%s dst %pMR", hdev->name, dst);
350 
351 	conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
352 	if (!conn)
353 		return NULL;
354 
355 	bacpy(&conn->dst, dst);
356 	conn->hdev  = hdev;
357 	conn->type  = type;
358 	conn->mode  = HCI_CM_ACTIVE;
359 	conn->state = BT_OPEN;
360 	conn->auth_type = HCI_AT_GENERAL_BONDING;
361 	conn->io_capability = hdev->io_capability;
362 	conn->remote_auth = 0xff;
363 	conn->key_type = 0xff;
364 
365 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
366 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
367 
368 	switch (type) {
369 	case ACL_LINK:
370 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
371 		break;
372 	case SCO_LINK:
373 		if (lmp_esco_capable(hdev))
374 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
375 					(hdev->esco_type & EDR_ESCO_MASK);
376 		else
377 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
378 		break;
379 	case ESCO_LINK:
380 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
381 		break;
382 	}
383 
384 	skb_queue_head_init(&conn->data_q);
385 
386 	INIT_LIST_HEAD(&conn->chan_list);
387 
388 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
389 	setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
390 	setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
391 		    (unsigned long) conn);
392 
393 	atomic_set(&conn->refcnt, 0);
394 
395 	hci_dev_hold(hdev);
396 
397 	hci_conn_hash_add(hdev, conn);
398 	if (hdev->notify)
399 		hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
400 
401 	atomic_set(&conn->devref, 0);
402 
403 	hci_conn_init_sysfs(conn);
404 
405 	return conn;
406 }
407 
408 int hci_conn_del(struct hci_conn *conn)
409 {
410 	struct hci_dev *hdev = conn->hdev;
411 
412 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
413 
414 	del_timer(&conn->idle_timer);
415 
416 	cancel_delayed_work_sync(&conn->disc_work);
417 
418 	del_timer(&conn->auto_accept_timer);
419 
420 	if (conn->type == ACL_LINK) {
421 		struct hci_conn *sco = conn->link;
422 		if (sco)
423 			sco->link = NULL;
424 
425 		/* Unacked frames */
426 		hdev->acl_cnt += conn->sent;
427 	} else if (conn->type == LE_LINK) {
428 		if (hdev->le_pkts)
429 			hdev->le_cnt += conn->sent;
430 		else
431 			hdev->acl_cnt += conn->sent;
432 	} else {
433 		struct hci_conn *acl = conn->link;
434 		if (acl) {
435 			acl->link = NULL;
436 			hci_conn_put(acl);
437 		}
438 	}
439 
440 	hci_chan_list_flush(conn);
441 
442 	if (conn->amp_mgr)
443 		amp_mgr_put(conn->amp_mgr);
444 
445 	hci_conn_hash_del(hdev, conn);
446 	if (hdev->notify)
447 		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
448 
449 	skb_queue_purge(&conn->data_q);
450 
451 	hci_conn_put_device(conn);
452 
453 	hci_dev_put(hdev);
454 
455 	if (conn->handle == 0)
456 		kfree(conn);
457 
458 	return 0;
459 }
460 
461 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
462 {
463 	int use_src = bacmp(src, BDADDR_ANY);
464 	struct hci_dev *hdev = NULL, *d;
465 
466 	BT_DBG("%pMR -> %pMR", src, dst);
467 
468 	read_lock(&hci_dev_list_lock);
469 
470 	list_for_each_entry(d, &hci_dev_list, list) {
471 		if (!test_bit(HCI_UP, &d->flags) ||
472 		    test_bit(HCI_RAW, &d->flags) ||
473 		    d->dev_type != HCI_BREDR)
474 			continue;
475 
476 		/* Simple routing:
477 		 *   No source address - find interface with bdaddr != dst
478 		 *   Source address    - find interface with bdaddr == src
479 		 */
480 
481 		if (use_src) {
482 			if (!bacmp(&d->bdaddr, src)) {
483 				hdev = d; break;
484 			}
485 		} else {
486 			if (bacmp(&d->bdaddr, dst)) {
487 				hdev = d; break;
488 			}
489 		}
490 	}
491 
492 	if (hdev)
493 		hdev = hci_dev_hold(hdev);
494 
495 	read_unlock(&hci_dev_list_lock);
496 	return hdev;
497 }
498 EXPORT_SYMBOL(hci_get_route);
499 
500 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
501 				    u8 dst_type, u8 sec_level, u8 auth_type)
502 {
503 	struct hci_conn *le;
504 
505 	if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
506 		return ERR_PTR(-ENOTSUPP);
507 
508 	le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
509 	if (!le) {
510 		le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
511 		if (le)
512 			return ERR_PTR(-EBUSY);
513 
514 		le = hci_conn_add(hdev, LE_LINK, dst);
515 		if (!le)
516 			return ERR_PTR(-ENOMEM);
517 
518 		le->dst_type = bdaddr_to_le(dst_type);
519 		hci_le_create_connection(le);
520 	}
521 
522 	le->pending_sec_level = sec_level;
523 	le->auth_type = auth_type;
524 
525 	hci_conn_hold(le);
526 
527 	return le;
528 }
529 
530 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
531 						u8 sec_level, u8 auth_type)
532 {
533 	struct hci_conn *acl;
534 
535 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
536 	if (!acl) {
537 		acl = hci_conn_add(hdev, ACL_LINK, dst);
538 		if (!acl)
539 			return ERR_PTR(-ENOMEM);
540 	}
541 
542 	hci_conn_hold(acl);
543 
544 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
545 		acl->sec_level = BT_SECURITY_LOW;
546 		acl->pending_sec_level = sec_level;
547 		acl->auth_type = auth_type;
548 		hci_acl_create_connection(acl);
549 	}
550 
551 	return acl;
552 }
553 
554 static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
555 				bdaddr_t *dst, u8 sec_level, u8 auth_type)
556 {
557 	struct hci_conn *acl;
558 	struct hci_conn *sco;
559 
560 	acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
561 	if (IS_ERR(acl))
562 		return acl;
563 
564 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
565 	if (!sco) {
566 		sco = hci_conn_add(hdev, type, dst);
567 		if (!sco) {
568 			hci_conn_put(acl);
569 			return ERR_PTR(-ENOMEM);
570 		}
571 	}
572 
573 	acl->link = sco;
574 	sco->link = acl;
575 
576 	hci_conn_hold(sco);
577 
578 	if (acl->state == BT_CONNECTED &&
579 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
580 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
581 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
582 
583 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
584 			/* defer SCO setup until mode change completed */
585 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
586 			return sco;
587 		}
588 
589 		hci_sco_setup(acl, 0x00);
590 	}
591 
592 	return sco;
593 }
594 
595 /* Create SCO, ACL or LE connection. */
596 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
597 			     __u8 dst_type, __u8 sec_level, __u8 auth_type)
598 {
599 	BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
600 
601 	switch (type) {
602 	case LE_LINK:
603 		return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
604 	case ACL_LINK:
605 		return hci_connect_acl(hdev, dst, sec_level, auth_type);
606 	case SCO_LINK:
607 	case ESCO_LINK:
608 		return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
609 	}
610 
611 	return ERR_PTR(-EINVAL);
612 }
613 
614 /* Check link security requirement */
615 int hci_conn_check_link_mode(struct hci_conn *conn)
616 {
617 	BT_DBG("hcon %p", conn);
618 
619 	if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
620 		return 0;
621 
622 	return 1;
623 }
624 
625 /* Authenticate remote device */
626 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
627 {
628 	BT_DBG("hcon %p", conn);
629 
630 	if (conn->pending_sec_level > sec_level)
631 		sec_level = conn->pending_sec_level;
632 
633 	if (sec_level > conn->sec_level)
634 		conn->pending_sec_level = sec_level;
635 	else if (conn->link_mode & HCI_LM_AUTH)
636 		return 1;
637 
638 	/* Make sure we preserve an existing MITM requirement*/
639 	auth_type |= (conn->auth_type & 0x01);
640 
641 	conn->auth_type = auth_type;
642 
643 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
644 		struct hci_cp_auth_requested cp;
645 
646 		/* encrypt must be pending if auth is also pending */
647 		set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
648 
649 		cp.handle = cpu_to_le16(conn->handle);
650 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
651 			     sizeof(cp), &cp);
652 		if (conn->key_type != 0xff)
653 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
654 	}
655 
656 	return 0;
657 }
658 
659 /* Encrypt the the link */
660 static void hci_conn_encrypt(struct hci_conn *conn)
661 {
662 	BT_DBG("hcon %p", conn);
663 
664 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
665 		struct hci_cp_set_conn_encrypt cp;
666 		cp.handle  = cpu_to_le16(conn->handle);
667 		cp.encrypt = 0x01;
668 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
669 			     &cp);
670 	}
671 }
672 
673 /* Enable security */
674 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
675 {
676 	BT_DBG("hcon %p", conn);
677 
678 	if (conn->type == LE_LINK)
679 		return smp_conn_security(conn, sec_level);
680 
681 	/* For sdp we don't need the link key. */
682 	if (sec_level == BT_SECURITY_SDP)
683 		return 1;
684 
685 	/* For non 2.1 devices and low security level we don't need the link
686 	   key. */
687 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
688 		return 1;
689 
690 	/* For other security levels we need the link key. */
691 	if (!(conn->link_mode & HCI_LM_AUTH))
692 		goto auth;
693 
694 	/* An authenticated combination key has sufficient security for any
695 	   security level. */
696 	if (conn->key_type == HCI_LK_AUTH_COMBINATION)
697 		goto encrypt;
698 
699 	/* An unauthenticated combination key has sufficient security for
700 	   security level 1 and 2. */
701 	if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
702 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
703 		goto encrypt;
704 
705 	/* A combination key has always sufficient security for the security
706 	   levels 1 or 2. High security level requires the combination key
707 	   is generated using maximum PIN code length (16).
708 	   For pre 2.1 units. */
709 	if (conn->key_type == HCI_LK_COMBINATION &&
710 	    (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
711 		goto encrypt;
712 
713 auth:
714 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
715 		return 0;
716 
717 	if (!hci_conn_auth(conn, sec_level, auth_type))
718 		return 0;
719 
720 encrypt:
721 	if (conn->link_mode & HCI_LM_ENCRYPT)
722 		return 1;
723 
724 	hci_conn_encrypt(conn);
725 	return 0;
726 }
727 EXPORT_SYMBOL(hci_conn_security);
728 
729 /* Check secure link requirement */
730 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
731 {
732 	BT_DBG("hcon %p", conn);
733 
734 	if (sec_level != BT_SECURITY_HIGH)
735 		return 1; /* Accept if non-secure is required */
736 
737 	if (conn->sec_level == BT_SECURITY_HIGH)
738 		return 1;
739 
740 	return 0; /* Reject not secure link */
741 }
742 EXPORT_SYMBOL(hci_conn_check_secure);
743 
744 /* Change link key */
745 int hci_conn_change_link_key(struct hci_conn *conn)
746 {
747 	BT_DBG("hcon %p", conn);
748 
749 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
750 		struct hci_cp_change_conn_link_key cp;
751 		cp.handle = cpu_to_le16(conn->handle);
752 		hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
753 			     sizeof(cp), &cp);
754 	}
755 
756 	return 0;
757 }
758 
759 /* Switch role */
760 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
761 {
762 	BT_DBG("hcon %p", conn);
763 
764 	if (!role && conn->link_mode & HCI_LM_MASTER)
765 		return 1;
766 
767 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
768 		struct hci_cp_switch_role cp;
769 		bacpy(&cp.bdaddr, &conn->dst);
770 		cp.role = role;
771 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
772 	}
773 
774 	return 0;
775 }
776 EXPORT_SYMBOL(hci_conn_switch_role);
777 
778 /* Enter active mode */
779 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
780 {
781 	struct hci_dev *hdev = conn->hdev;
782 
783 	BT_DBG("hcon %p mode %d", conn, conn->mode);
784 
785 	if (test_bit(HCI_RAW, &hdev->flags))
786 		return;
787 
788 	if (conn->mode != HCI_CM_SNIFF)
789 		goto timer;
790 
791 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
792 		goto timer;
793 
794 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
795 		struct hci_cp_exit_sniff_mode cp;
796 		cp.handle = cpu_to_le16(conn->handle);
797 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
798 	}
799 
800 timer:
801 	if (hdev->idle_timeout > 0)
802 		mod_timer(&conn->idle_timer,
803 			  jiffies + msecs_to_jiffies(hdev->idle_timeout));
804 }
805 
806 /* Drop all connection on the device */
807 void hci_conn_hash_flush(struct hci_dev *hdev)
808 {
809 	struct hci_conn_hash *h = &hdev->conn_hash;
810 	struct hci_conn *c, *n;
811 
812 	BT_DBG("hdev %s", hdev->name);
813 
814 	list_for_each_entry_safe(c, n, &h->list, list) {
815 		c->state = BT_CLOSED;
816 
817 		hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
818 		hci_conn_del(c);
819 	}
820 }
821 
822 /* Check pending connect attempts */
823 void hci_conn_check_pending(struct hci_dev *hdev)
824 {
825 	struct hci_conn *conn;
826 
827 	BT_DBG("hdev %s", hdev->name);
828 
829 	hci_dev_lock(hdev);
830 
831 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
832 	if (conn)
833 		hci_acl_create_connection(conn);
834 
835 	hci_dev_unlock(hdev);
836 }
837 
838 void hci_conn_hold_device(struct hci_conn *conn)
839 {
840 	atomic_inc(&conn->devref);
841 }
842 EXPORT_SYMBOL(hci_conn_hold_device);
843 
844 void hci_conn_put_device(struct hci_conn *conn)
845 {
846 	if (atomic_dec_and_test(&conn->devref))
847 		hci_conn_del_sysfs(conn);
848 }
849 EXPORT_SYMBOL(hci_conn_put_device);
850 
851 int hci_get_conn_list(void __user *arg)
852 {
853 	struct hci_conn *c;
854 	struct hci_conn_list_req req, *cl;
855 	struct hci_conn_info *ci;
856 	struct hci_dev *hdev;
857 	int n = 0, size, err;
858 
859 	if (copy_from_user(&req, arg, sizeof(req)))
860 		return -EFAULT;
861 
862 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
863 		return -EINVAL;
864 
865 	size = sizeof(req) + req.conn_num * sizeof(*ci);
866 
867 	cl = kmalloc(size, GFP_KERNEL);
868 	if (!cl)
869 		return -ENOMEM;
870 
871 	hdev = hci_dev_get(req.dev_id);
872 	if (!hdev) {
873 		kfree(cl);
874 		return -ENODEV;
875 	}
876 
877 	ci = cl->conn_info;
878 
879 	hci_dev_lock(hdev);
880 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
881 		bacpy(&(ci + n)->bdaddr, &c->dst);
882 		(ci + n)->handle = c->handle;
883 		(ci + n)->type  = c->type;
884 		(ci + n)->out   = c->out;
885 		(ci + n)->state = c->state;
886 		(ci + n)->link_mode = c->link_mode;
887 		if (++n >= req.conn_num)
888 			break;
889 	}
890 	hci_dev_unlock(hdev);
891 
892 	cl->dev_id = hdev->id;
893 	cl->conn_num = n;
894 	size = sizeof(req) + n * sizeof(*ci);
895 
896 	hci_dev_put(hdev);
897 
898 	err = copy_to_user(arg, cl, size);
899 	kfree(cl);
900 
901 	return err ? -EFAULT : 0;
902 }
903 
904 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
905 {
906 	struct hci_conn_info_req req;
907 	struct hci_conn_info ci;
908 	struct hci_conn *conn;
909 	char __user *ptr = arg + sizeof(req);
910 
911 	if (copy_from_user(&req, arg, sizeof(req)))
912 		return -EFAULT;
913 
914 	hci_dev_lock(hdev);
915 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
916 	if (conn) {
917 		bacpy(&ci.bdaddr, &conn->dst);
918 		ci.handle = conn->handle;
919 		ci.type  = conn->type;
920 		ci.out   = conn->out;
921 		ci.state = conn->state;
922 		ci.link_mode = conn->link_mode;
923 	}
924 	hci_dev_unlock(hdev);
925 
926 	if (!conn)
927 		return -ENOENT;
928 
929 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
930 }
931 
932 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
933 {
934 	struct hci_auth_info_req req;
935 	struct hci_conn *conn;
936 
937 	if (copy_from_user(&req, arg, sizeof(req)))
938 		return -EFAULT;
939 
940 	hci_dev_lock(hdev);
941 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
942 	if (conn)
943 		req.type = conn->auth_type;
944 	hci_dev_unlock(hdev);
945 
946 	if (!conn)
947 		return -ENOENT;
948 
949 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
950 }
951 
952 struct hci_chan *hci_chan_create(struct hci_conn *conn)
953 {
954 	struct hci_dev *hdev = conn->hdev;
955 	struct hci_chan *chan;
956 
957 	BT_DBG("%s hcon %p", hdev->name, conn);
958 
959 	chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
960 	if (!chan)
961 		return NULL;
962 
963 	chan->conn = conn;
964 	skb_queue_head_init(&chan->data_q);
965 	chan->state = BT_CONNECTED;
966 
967 	list_add_rcu(&chan->list, &conn->chan_list);
968 
969 	return chan;
970 }
971 
972 void hci_chan_del(struct hci_chan *chan)
973 {
974 	struct hci_conn *conn = chan->conn;
975 	struct hci_dev *hdev = conn->hdev;
976 
977 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
978 
979 	list_del_rcu(&chan->list);
980 
981 	synchronize_rcu();
982 
983 	hci_conn_put(conn);
984 
985 	skb_queue_purge(&chan->data_q);
986 	kfree(chan);
987 }
988 
989 void hci_chan_list_flush(struct hci_conn *conn)
990 {
991 	struct hci_chan *chan, *n;
992 
993 	BT_DBG("hcon %p", conn);
994 
995 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
996 		hci_chan_del(chan);
997 }
998 
999 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1000 						 __u16 handle)
1001 {
1002 	struct hci_chan *hchan;
1003 
1004 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1005 		if (hchan->handle == handle)
1006 			return hchan;
1007 	}
1008 
1009 	return NULL;
1010 }
1011 
1012 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1013 {
1014 	struct hci_conn_hash *h = &hdev->conn_hash;
1015 	struct hci_conn *hcon;
1016 	struct hci_chan *hchan = NULL;
1017 
1018 	rcu_read_lock();
1019 
1020 	list_for_each_entry_rcu(hcon, &h->list, list) {
1021 		hchan = __hci_chan_lookup_handle(hcon, handle);
1022 		if (hchan)
1023 			break;
1024 	}
1025 
1026 	rcu_read_unlock();
1027 
1028 	return hchan;
1029 }
1030