xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision a86ddbffa6ed05bc2465a545a96627b6e776c019)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37 
38 struct sco_param {
39 	u16 pkt_type;
40 	u16 max_latency;
41 	u8  retrans_effort;
42 };
43 
44 static const struct sco_param esco_param_cvsd[] = {
45 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
46 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
47 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
48 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
49 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
50 };
51 
52 static const struct sco_param sco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
54 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
55 };
56 
57 static const struct sco_param esco_param_msbc[] = {
58 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
59 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
60 };
61 
62 /* This function requires the caller holds hdev->lock */
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65 	struct hci_conn_params *params;
66 	struct hci_dev *hdev = conn->hdev;
67 	struct smp_irk *irk;
68 	bdaddr_t *bdaddr;
69 	u8 bdaddr_type;
70 
71 	bdaddr = &conn->dst;
72 	bdaddr_type = conn->dst_type;
73 
74 	/* Check if we need to convert to identity address */
75 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76 	if (irk) {
77 		bdaddr = &irk->bdaddr;
78 		bdaddr_type = irk->addr_type;
79 	}
80 
81 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82 					   bdaddr_type);
83 	if (!params || !params->explicit_connect)
84 		return;
85 
86 	/* The connection attempt was doing scan for new RPA, and is
87 	 * in scan phase. If params are not associated with any other
88 	 * autoconnect action, remove them completely. If they are, just unmark
89 	 * them as waiting for connection, by clearing explicit_connect field.
90 	 */
91 	params->explicit_connect = false;
92 
93 	list_del_init(&params->action);
94 
95 	switch (params->auto_connect) {
96 	case HCI_AUTO_CONN_EXPLICIT:
97 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98 		/* return instead of break to avoid duplicate scan update */
99 		return;
100 	case HCI_AUTO_CONN_DIRECT:
101 	case HCI_AUTO_CONN_ALWAYS:
102 		list_add(&params->action, &hdev->pend_le_conns);
103 		break;
104 	case HCI_AUTO_CONN_REPORT:
105 		list_add(&params->action, &hdev->pend_le_reports);
106 		break;
107 	default:
108 		break;
109 	}
110 
111 	hci_update_passive_scan(hdev);
112 }
113 
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116 	struct hci_dev *hdev = conn->hdev;
117 
118 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120 
121 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
122 		hci_remove_link_key(hdev, &conn->dst);
123 
124 	hci_chan_list_flush(conn);
125 
126 	hci_conn_hash_del(hdev, conn);
127 
128 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
129 		switch (conn->setting & SCO_AIRMODE_MASK) {
130 		case SCO_AIRMODE_CVSD:
131 		case SCO_AIRMODE_TRANSP:
132 			if (hdev->notify)
133 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
134 			break;
135 		}
136 	} else {
137 		if (hdev->notify)
138 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
139 	}
140 
141 	hci_conn_del_sysfs(conn);
142 
143 	debugfs_remove_recursive(conn->debugfs);
144 
145 	hci_dev_put(hdev);
146 
147 	hci_conn_put(conn);
148 }
149 
150 static void le_scan_cleanup(struct work_struct *work)
151 {
152 	struct hci_conn *conn = container_of(work, struct hci_conn,
153 					     le_scan_cleanup);
154 	struct hci_dev *hdev = conn->hdev;
155 	struct hci_conn *c = NULL;
156 
157 	BT_DBG("%s hcon %p", hdev->name, conn);
158 
159 	hci_dev_lock(hdev);
160 
161 	/* Check that the hci_conn is still around */
162 	rcu_read_lock();
163 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
164 		if (c == conn)
165 			break;
166 	}
167 	rcu_read_unlock();
168 
169 	if (c == conn) {
170 		hci_connect_le_scan_cleanup(conn);
171 		hci_conn_cleanup(conn);
172 	}
173 
174 	hci_dev_unlock(hdev);
175 	hci_dev_put(hdev);
176 	hci_conn_put(conn);
177 }
178 
179 static void hci_connect_le_scan_remove(struct hci_conn *conn)
180 {
181 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
182 
183 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
184 	 * could deadlock with another hci_conn_del() call that's holding
185 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
186 	 * Instead, grab temporary extra references to the hci_dev and
187 	 * hci_conn and perform the necessary cleanup in a separate work
188 	 * callback.
189 	 */
190 
191 	hci_dev_hold(conn->hdev);
192 	hci_conn_get(conn);
193 
194 	/* Even though we hold a reference to the hdev, many other
195 	 * things might get cleaned up meanwhile, including the hdev's
196 	 * own workqueue, so we can't use that for scheduling.
197 	 */
198 	schedule_work(&conn->le_scan_cleanup);
199 }
200 
201 static void hci_acl_create_connection(struct hci_conn *conn)
202 {
203 	struct hci_dev *hdev = conn->hdev;
204 	struct inquiry_entry *ie;
205 	struct hci_cp_create_conn cp;
206 
207 	BT_DBG("hcon %p", conn);
208 
209 	/* Many controllers disallow HCI Create Connection while it is doing
210 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
211 	 * Connection. This may cause the MGMT discovering state to become false
212 	 * without user space's request but it is okay since the MGMT Discovery
213 	 * APIs do not promise that discovery should be done forever. Instead,
214 	 * the user space monitors the status of MGMT discovering and it may
215 	 * request for discovery again when this flag becomes false.
216 	 */
217 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
218 		/* Put this connection to "pending" state so that it will be
219 		 * executed after the inquiry cancel command complete event.
220 		 */
221 		conn->state = BT_CONNECT2;
222 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
223 		return;
224 	}
225 
226 	conn->state = BT_CONNECT;
227 	conn->out = true;
228 	conn->role = HCI_ROLE_MASTER;
229 
230 	conn->attempt++;
231 
232 	conn->link_policy = hdev->link_policy;
233 
234 	memset(&cp, 0, sizeof(cp));
235 	bacpy(&cp.bdaddr, &conn->dst);
236 	cp.pscan_rep_mode = 0x02;
237 
238 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
239 	if (ie) {
240 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
241 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
242 			cp.pscan_mode     = ie->data.pscan_mode;
243 			cp.clock_offset   = ie->data.clock_offset |
244 					    cpu_to_le16(0x8000);
245 		}
246 
247 		memcpy(conn->dev_class, ie->data.dev_class, 3);
248 	}
249 
250 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
251 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
252 		cp.role_switch = 0x01;
253 	else
254 		cp.role_switch = 0x00;
255 
256 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
257 }
258 
259 int hci_disconnect(struct hci_conn *conn, __u8 reason)
260 {
261 	BT_DBG("hcon %p", conn);
262 
263 	/* When we are central of an established connection and it enters
264 	 * the disconnect timeout, then go ahead and try to read the
265 	 * current clock offset.  Processing of the result is done
266 	 * within the event handling and hci_clock_offset_evt function.
267 	 */
268 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
269 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
270 		struct hci_dev *hdev = conn->hdev;
271 		struct hci_cp_read_clock_offset clkoff_cp;
272 
273 		clkoff_cp.handle = cpu_to_le16(conn->handle);
274 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
275 			     &clkoff_cp);
276 	}
277 
278 	return hci_abort_conn(conn, reason);
279 }
280 
281 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
282 {
283 	struct hci_dev *hdev = conn->hdev;
284 	struct hci_cp_add_sco cp;
285 
286 	BT_DBG("hcon %p", conn);
287 
288 	conn->state = BT_CONNECT;
289 	conn->out = true;
290 
291 	conn->attempt++;
292 
293 	cp.handle   = cpu_to_le16(handle);
294 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
295 
296 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
297 }
298 
299 static bool find_next_esco_param(struct hci_conn *conn,
300 				 const struct sco_param *esco_param, int size)
301 {
302 	for (; conn->attempt <= size; conn->attempt++) {
303 		if (lmp_esco_2m_capable(conn->link) ||
304 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
305 			break;
306 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
307 		       conn, conn->attempt);
308 	}
309 
310 	return conn->attempt <= size;
311 }
312 
313 static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
314 {
315 	struct hci_dev *hdev = conn->hdev;
316 	struct hci_cp_enhanced_setup_sync_conn cp;
317 	const struct sco_param *param;
318 
319 	bt_dev_dbg(hdev, "hcon %p", conn);
320 
321 	/* for offload use case, codec needs to configured before opening SCO */
322 	if (conn->codec.data_path)
323 		hci_req_configure_datapath(hdev, &conn->codec);
324 
325 	conn->state = BT_CONNECT;
326 	conn->out = true;
327 
328 	conn->attempt++;
329 
330 	memset(&cp, 0x00, sizeof(cp));
331 
332 	cp.handle   = cpu_to_le16(handle);
333 
334 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
335 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
336 
337 	switch (conn->codec.id) {
338 	case BT_CODEC_MSBC:
339 		if (!find_next_esco_param(conn, esco_param_msbc,
340 					  ARRAY_SIZE(esco_param_msbc)))
341 			return false;
342 
343 		param = &esco_param_msbc[conn->attempt - 1];
344 		cp.tx_coding_format.id = 0x05;
345 		cp.rx_coding_format.id = 0x05;
346 		cp.tx_codec_frame_size = __cpu_to_le16(60);
347 		cp.rx_codec_frame_size = __cpu_to_le16(60);
348 		cp.in_bandwidth = __cpu_to_le32(32000);
349 		cp.out_bandwidth = __cpu_to_le32(32000);
350 		cp.in_coding_format.id = 0x04;
351 		cp.out_coding_format.id = 0x04;
352 		cp.in_coded_data_size = __cpu_to_le16(16);
353 		cp.out_coded_data_size = __cpu_to_le16(16);
354 		cp.in_pcm_data_format = 2;
355 		cp.out_pcm_data_format = 2;
356 		cp.in_pcm_sample_payload_msb_pos = 0;
357 		cp.out_pcm_sample_payload_msb_pos = 0;
358 		cp.in_data_path = conn->codec.data_path;
359 		cp.out_data_path = conn->codec.data_path;
360 		cp.in_transport_unit_size = 1;
361 		cp.out_transport_unit_size = 1;
362 		break;
363 
364 	case BT_CODEC_TRANSPARENT:
365 		if (!find_next_esco_param(conn, esco_param_msbc,
366 					  ARRAY_SIZE(esco_param_msbc)))
367 			return false;
368 		param = &esco_param_msbc[conn->attempt - 1];
369 		cp.tx_coding_format.id = 0x03;
370 		cp.rx_coding_format.id = 0x03;
371 		cp.tx_codec_frame_size = __cpu_to_le16(60);
372 		cp.rx_codec_frame_size = __cpu_to_le16(60);
373 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
374 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
375 		cp.in_coding_format.id = 0x03;
376 		cp.out_coding_format.id = 0x03;
377 		cp.in_coded_data_size = __cpu_to_le16(16);
378 		cp.out_coded_data_size = __cpu_to_le16(16);
379 		cp.in_pcm_data_format = 2;
380 		cp.out_pcm_data_format = 2;
381 		cp.in_pcm_sample_payload_msb_pos = 0;
382 		cp.out_pcm_sample_payload_msb_pos = 0;
383 		cp.in_data_path = conn->codec.data_path;
384 		cp.out_data_path = conn->codec.data_path;
385 		cp.in_transport_unit_size = 1;
386 		cp.out_transport_unit_size = 1;
387 		break;
388 
389 	case BT_CODEC_CVSD:
390 		if (lmp_esco_capable(conn->link)) {
391 			if (!find_next_esco_param(conn, esco_param_cvsd,
392 						  ARRAY_SIZE(esco_param_cvsd)))
393 				return false;
394 			param = &esco_param_cvsd[conn->attempt - 1];
395 		} else {
396 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
397 				return false;
398 			param = &sco_param_cvsd[conn->attempt - 1];
399 		}
400 		cp.tx_coding_format.id = 2;
401 		cp.rx_coding_format.id = 2;
402 		cp.tx_codec_frame_size = __cpu_to_le16(60);
403 		cp.rx_codec_frame_size = __cpu_to_le16(60);
404 		cp.in_bandwidth = __cpu_to_le32(16000);
405 		cp.out_bandwidth = __cpu_to_le32(16000);
406 		cp.in_coding_format.id = 4;
407 		cp.out_coding_format.id = 4;
408 		cp.in_coded_data_size = __cpu_to_le16(16);
409 		cp.out_coded_data_size = __cpu_to_le16(16);
410 		cp.in_pcm_data_format = 2;
411 		cp.out_pcm_data_format = 2;
412 		cp.in_pcm_sample_payload_msb_pos = 0;
413 		cp.out_pcm_sample_payload_msb_pos = 0;
414 		cp.in_data_path = conn->codec.data_path;
415 		cp.out_data_path = conn->codec.data_path;
416 		cp.in_transport_unit_size = 16;
417 		cp.out_transport_unit_size = 16;
418 		break;
419 	default:
420 		return false;
421 	}
422 
423 	cp.retrans_effort = param->retrans_effort;
424 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
425 	cp.max_latency = __cpu_to_le16(param->max_latency);
426 
427 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
428 		return false;
429 
430 	return true;
431 }
432 
433 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
434 {
435 	struct hci_dev *hdev = conn->hdev;
436 	struct hci_cp_setup_sync_conn cp;
437 	const struct sco_param *param;
438 
439 	bt_dev_dbg(hdev, "hcon %p", conn);
440 
441 	conn->state = BT_CONNECT;
442 	conn->out = true;
443 
444 	conn->attempt++;
445 
446 	cp.handle   = cpu_to_le16(handle);
447 
448 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
449 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
450 	cp.voice_setting  = cpu_to_le16(conn->setting);
451 
452 	switch (conn->setting & SCO_AIRMODE_MASK) {
453 	case SCO_AIRMODE_TRANSP:
454 		if (!find_next_esco_param(conn, esco_param_msbc,
455 					  ARRAY_SIZE(esco_param_msbc)))
456 			return false;
457 		param = &esco_param_msbc[conn->attempt - 1];
458 		break;
459 	case SCO_AIRMODE_CVSD:
460 		if (lmp_esco_capable(conn->link)) {
461 			if (!find_next_esco_param(conn, esco_param_cvsd,
462 						  ARRAY_SIZE(esco_param_cvsd)))
463 				return false;
464 			param = &esco_param_cvsd[conn->attempt - 1];
465 		} else {
466 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
467 				return false;
468 			param = &sco_param_cvsd[conn->attempt - 1];
469 		}
470 		break;
471 	default:
472 		return false;
473 	}
474 
475 	cp.retrans_effort = param->retrans_effort;
476 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
477 	cp.max_latency = __cpu_to_le16(param->max_latency);
478 
479 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
480 		return false;
481 
482 	return true;
483 }
484 
485 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
486 {
487 	if (enhanced_sync_conn_capable(conn->hdev))
488 		return hci_enhanced_setup_sync_conn(conn, handle);
489 
490 	return hci_setup_sync_conn(conn, handle);
491 }
492 
493 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
494 		      u16 to_multiplier)
495 {
496 	struct hci_dev *hdev = conn->hdev;
497 	struct hci_conn_params *params;
498 	struct hci_cp_le_conn_update cp;
499 
500 	hci_dev_lock(hdev);
501 
502 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
503 	if (params) {
504 		params->conn_min_interval = min;
505 		params->conn_max_interval = max;
506 		params->conn_latency = latency;
507 		params->supervision_timeout = to_multiplier;
508 	}
509 
510 	hci_dev_unlock(hdev);
511 
512 	memset(&cp, 0, sizeof(cp));
513 	cp.handle		= cpu_to_le16(conn->handle);
514 	cp.conn_interval_min	= cpu_to_le16(min);
515 	cp.conn_interval_max	= cpu_to_le16(max);
516 	cp.conn_latency		= cpu_to_le16(latency);
517 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
518 	cp.min_ce_len		= cpu_to_le16(0x0000);
519 	cp.max_ce_len		= cpu_to_le16(0x0000);
520 
521 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
522 
523 	if (params)
524 		return 0x01;
525 
526 	return 0x00;
527 }
528 
529 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
530 		      __u8 ltk[16], __u8 key_size)
531 {
532 	struct hci_dev *hdev = conn->hdev;
533 	struct hci_cp_le_start_enc cp;
534 
535 	BT_DBG("hcon %p", conn);
536 
537 	memset(&cp, 0, sizeof(cp));
538 
539 	cp.handle = cpu_to_le16(conn->handle);
540 	cp.rand = rand;
541 	cp.ediv = ediv;
542 	memcpy(cp.ltk, ltk, key_size);
543 
544 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
545 }
546 
547 /* Device _must_ be locked */
548 void hci_sco_setup(struct hci_conn *conn, __u8 status)
549 {
550 	struct hci_conn *sco = conn->link;
551 
552 	if (!sco)
553 		return;
554 
555 	BT_DBG("hcon %p", conn);
556 
557 	if (!status) {
558 		if (lmp_esco_capable(conn->hdev))
559 			hci_setup_sync(sco, conn->handle);
560 		else
561 			hci_add_sco(sco, conn->handle);
562 	} else {
563 		hci_connect_cfm(sco, status);
564 		hci_conn_del(sco);
565 	}
566 }
567 
568 static void hci_conn_timeout(struct work_struct *work)
569 {
570 	struct hci_conn *conn = container_of(work, struct hci_conn,
571 					     disc_work.work);
572 	int refcnt = atomic_read(&conn->refcnt);
573 
574 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
575 
576 	WARN_ON(refcnt < 0);
577 
578 	/* FIXME: It was observed that in pairing failed scenario, refcnt
579 	 * drops below 0. Probably this is because l2cap_conn_del calls
580 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
581 	 * dropped. After that loop hci_chan_del is called which also drops
582 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
583 	 * otherwise drop it.
584 	 */
585 	if (refcnt > 0)
586 		return;
587 
588 	/* LE connections in scanning state need special handling */
589 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
590 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
591 		hci_connect_le_scan_remove(conn);
592 		return;
593 	}
594 
595 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
596 }
597 
598 /* Enter sniff mode */
599 static void hci_conn_idle(struct work_struct *work)
600 {
601 	struct hci_conn *conn = container_of(work, struct hci_conn,
602 					     idle_work.work);
603 	struct hci_dev *hdev = conn->hdev;
604 
605 	BT_DBG("hcon %p mode %d", conn, conn->mode);
606 
607 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
608 		return;
609 
610 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
611 		return;
612 
613 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
614 		struct hci_cp_sniff_subrate cp;
615 		cp.handle             = cpu_to_le16(conn->handle);
616 		cp.max_latency        = cpu_to_le16(0);
617 		cp.min_remote_timeout = cpu_to_le16(0);
618 		cp.min_local_timeout  = cpu_to_le16(0);
619 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
620 	}
621 
622 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
623 		struct hci_cp_sniff_mode cp;
624 		cp.handle       = cpu_to_le16(conn->handle);
625 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
626 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
627 		cp.attempt      = cpu_to_le16(4);
628 		cp.timeout      = cpu_to_le16(1);
629 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
630 	}
631 }
632 
633 static void hci_conn_auto_accept(struct work_struct *work)
634 {
635 	struct hci_conn *conn = container_of(work, struct hci_conn,
636 					     auto_accept_work.work);
637 
638 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
639 		     &conn->dst);
640 }
641 
642 static void le_disable_advertising(struct hci_dev *hdev)
643 {
644 	if (ext_adv_capable(hdev)) {
645 		struct hci_cp_le_set_ext_adv_enable cp;
646 
647 		cp.enable = 0x00;
648 		cp.num_of_sets = 0x00;
649 
650 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
651 			     &cp);
652 	} else {
653 		u8 enable = 0x00;
654 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
655 			     &enable);
656 	}
657 }
658 
659 static void le_conn_timeout(struct work_struct *work)
660 {
661 	struct hci_conn *conn = container_of(work, struct hci_conn,
662 					     le_conn_timeout.work);
663 	struct hci_dev *hdev = conn->hdev;
664 
665 	BT_DBG("");
666 
667 	/* We could end up here due to having done directed advertising,
668 	 * so clean up the state if necessary. This should however only
669 	 * happen with broken hardware or if low duty cycle was used
670 	 * (which doesn't have a timeout of its own).
671 	 */
672 	if (conn->role == HCI_ROLE_SLAVE) {
673 		/* Disable LE Advertising */
674 		le_disable_advertising(hdev);
675 		hci_dev_lock(hdev);
676 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
677 		hci_dev_unlock(hdev);
678 		return;
679 	}
680 
681 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
682 }
683 
684 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
685 			      u8 role)
686 {
687 	struct hci_conn *conn;
688 
689 	BT_DBG("%s dst %pMR", hdev->name, dst);
690 
691 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
692 	if (!conn)
693 		return NULL;
694 
695 	bacpy(&conn->dst, dst);
696 	bacpy(&conn->src, &hdev->bdaddr);
697 	conn->handle = HCI_CONN_HANDLE_UNSET;
698 	conn->hdev  = hdev;
699 	conn->type  = type;
700 	conn->role  = role;
701 	conn->mode  = HCI_CM_ACTIVE;
702 	conn->state = BT_OPEN;
703 	conn->auth_type = HCI_AT_GENERAL_BONDING;
704 	conn->io_capability = hdev->io_capability;
705 	conn->remote_auth = 0xff;
706 	conn->key_type = 0xff;
707 	conn->rssi = HCI_RSSI_INVALID;
708 	conn->tx_power = HCI_TX_POWER_INVALID;
709 	conn->max_tx_power = HCI_TX_POWER_INVALID;
710 
711 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
712 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
713 
714 	/* Set Default Authenticated payload timeout to 30s */
715 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
716 
717 	if (conn->role == HCI_ROLE_MASTER)
718 		conn->out = true;
719 
720 	switch (type) {
721 	case ACL_LINK:
722 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
723 		break;
724 	case LE_LINK:
725 		/* conn->src should reflect the local identity address */
726 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
727 		break;
728 	case SCO_LINK:
729 		if (lmp_esco_capable(hdev))
730 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
731 					(hdev->esco_type & EDR_ESCO_MASK);
732 		else
733 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
734 		break;
735 	case ESCO_LINK:
736 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
737 		break;
738 	}
739 
740 	skb_queue_head_init(&conn->data_q);
741 
742 	INIT_LIST_HEAD(&conn->chan_list);
743 
744 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
745 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
746 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
747 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
748 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
749 
750 	atomic_set(&conn->refcnt, 0);
751 
752 	hci_dev_hold(hdev);
753 
754 	hci_conn_hash_add(hdev, conn);
755 
756 	/* The SCO and eSCO connections will only be notified when their
757 	 * setup has been completed. This is different to ACL links which
758 	 * can be notified right away.
759 	 */
760 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
761 		if (hdev->notify)
762 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
763 	}
764 
765 	hci_conn_init_sysfs(conn);
766 
767 	return conn;
768 }
769 
770 int hci_conn_del(struct hci_conn *conn)
771 {
772 	struct hci_dev *hdev = conn->hdev;
773 
774 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
775 
776 	cancel_delayed_work_sync(&conn->disc_work);
777 	cancel_delayed_work_sync(&conn->auto_accept_work);
778 	cancel_delayed_work_sync(&conn->idle_work);
779 
780 	if (conn->type == ACL_LINK) {
781 		struct hci_conn *sco = conn->link;
782 		if (sco)
783 			sco->link = NULL;
784 
785 		/* Unacked frames */
786 		hdev->acl_cnt += conn->sent;
787 	} else if (conn->type == LE_LINK) {
788 		cancel_delayed_work(&conn->le_conn_timeout);
789 
790 		if (hdev->le_pkts)
791 			hdev->le_cnt += conn->sent;
792 		else
793 			hdev->acl_cnt += conn->sent;
794 	} else {
795 		struct hci_conn *acl = conn->link;
796 		if (acl) {
797 			acl->link = NULL;
798 			hci_conn_drop(acl);
799 		}
800 	}
801 
802 	if (conn->amp_mgr)
803 		amp_mgr_put(conn->amp_mgr);
804 
805 	skb_queue_purge(&conn->data_q);
806 
807 	/* Remove the connection from the list and cleanup its remaining
808 	 * state. This is a separate function since for some cases like
809 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
810 	 * rest of hci_conn_del.
811 	 */
812 	hci_conn_cleanup(conn);
813 
814 	return 0;
815 }
816 
817 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
818 {
819 	int use_src = bacmp(src, BDADDR_ANY);
820 	struct hci_dev *hdev = NULL, *d;
821 
822 	BT_DBG("%pMR -> %pMR", src, dst);
823 
824 	read_lock(&hci_dev_list_lock);
825 
826 	list_for_each_entry(d, &hci_dev_list, list) {
827 		if (!test_bit(HCI_UP, &d->flags) ||
828 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
829 		    d->dev_type != HCI_PRIMARY)
830 			continue;
831 
832 		/* Simple routing:
833 		 *   No source address - find interface with bdaddr != dst
834 		 *   Source address    - find interface with bdaddr == src
835 		 */
836 
837 		if (use_src) {
838 			bdaddr_t id_addr;
839 			u8 id_addr_type;
840 
841 			if (src_type == BDADDR_BREDR) {
842 				if (!lmp_bredr_capable(d))
843 					continue;
844 				bacpy(&id_addr, &d->bdaddr);
845 				id_addr_type = BDADDR_BREDR;
846 			} else {
847 				if (!lmp_le_capable(d))
848 					continue;
849 
850 				hci_copy_identity_address(d, &id_addr,
851 							  &id_addr_type);
852 
853 				/* Convert from HCI to three-value type */
854 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
855 					id_addr_type = BDADDR_LE_PUBLIC;
856 				else
857 					id_addr_type = BDADDR_LE_RANDOM;
858 			}
859 
860 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
861 				hdev = d; break;
862 			}
863 		} else {
864 			if (bacmp(&d->bdaddr, dst)) {
865 				hdev = d; break;
866 			}
867 		}
868 	}
869 
870 	if (hdev)
871 		hdev = hci_dev_hold(hdev);
872 
873 	read_unlock(&hci_dev_list_lock);
874 	return hdev;
875 }
876 EXPORT_SYMBOL(hci_get_route);
877 
878 /* This function requires the caller holds hdev->lock */
879 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
880 {
881 	struct hci_dev *hdev = conn->hdev;
882 	struct hci_conn_params *params;
883 
884 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
885 					   conn->dst_type);
886 	if (params && params->conn) {
887 		hci_conn_drop(params->conn);
888 		hci_conn_put(params->conn);
889 		params->conn = NULL;
890 	}
891 
892 	/* If the status indicates successful cancellation of
893 	 * the attempt (i.e. Unknown Connection Id) there's no point of
894 	 * notifying failure since we'll go back to keep trying to
895 	 * connect. The only exception is explicit connect requests
896 	 * where a timeout + cancel does indicate an actual failure.
897 	 */
898 	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
899 	    (params && params->explicit_connect))
900 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
901 				    conn->dst_type, status);
902 
903 	/* Since we may have temporarily stopped the background scanning in
904 	 * favor of connection establishment, we should restart it.
905 	 */
906 	hci_update_passive_scan(hdev);
907 
908 	/* Enable advertising in case this was a failed connection
909 	 * attempt as a peripheral.
910 	 */
911 	hci_enable_advertising(hdev);
912 }
913 
914 /* This function requires the caller holds hdev->lock */
915 void hci_conn_failed(struct hci_conn *conn, u8 status)
916 {
917 	struct hci_dev *hdev = conn->hdev;
918 
919 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
920 
921 	switch (conn->type) {
922 	case LE_LINK:
923 		hci_le_conn_failed(conn, status);
924 		break;
925 	case ACL_LINK:
926 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
927 				    conn->dst_type, status);
928 		break;
929 	}
930 
931 	conn->state = BT_CLOSED;
932 	hci_connect_cfm(conn, status);
933 	hci_conn_del(conn);
934 }
935 
936 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
937 {
938 	struct hci_conn *conn = data;
939 
940 	hci_dev_lock(hdev);
941 
942 	if (!err) {
943 		hci_connect_le_scan_cleanup(conn);
944 		goto done;
945 	}
946 
947 	bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
948 
949 	/* Check if connection is still pending */
950 	if (conn != hci_lookup_le_connect(hdev))
951 		goto done;
952 
953 	hci_conn_failed(conn, bt_status(err));
954 
955 done:
956 	hci_dev_unlock(hdev);
957 }
958 
959 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
960 {
961 	struct hci_conn *conn = data;
962 
963 	bt_dev_dbg(hdev, "conn %p", conn);
964 
965 	return hci_le_create_conn_sync(hdev, conn);
966 }
967 
968 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
969 				u8 dst_type, bool dst_resolved, u8 sec_level,
970 				u16 conn_timeout, u8 role)
971 {
972 	struct hci_conn *conn;
973 	struct smp_irk *irk;
974 	int err;
975 
976 	/* Let's make sure that le is enabled.*/
977 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
978 		if (lmp_le_capable(hdev))
979 			return ERR_PTR(-ECONNREFUSED);
980 
981 		return ERR_PTR(-EOPNOTSUPP);
982 	}
983 
984 	/* Since the controller supports only one LE connection attempt at a
985 	 * time, we return -EBUSY if there is any connection attempt running.
986 	 */
987 	if (hci_lookup_le_connect(hdev))
988 		return ERR_PTR(-EBUSY);
989 
990 	/* If there's already a connection object but it's not in
991 	 * scanning state it means it must already be established, in
992 	 * which case we can't do anything else except report a failure
993 	 * to connect.
994 	 */
995 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
996 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
997 		return ERR_PTR(-EBUSY);
998 	}
999 
1000 	/* Check if the destination address has been resolved by the controller
1001 	 * since if it did then the identity address shall be used.
1002 	 */
1003 	if (!dst_resolved) {
1004 		/* When given an identity address with existing identity
1005 		 * resolving key, the connection needs to be established
1006 		 * to a resolvable random address.
1007 		 *
1008 		 * Storing the resolvable random address is required here
1009 		 * to handle connection failures. The address will later
1010 		 * be resolved back into the original identity address
1011 		 * from the connect request.
1012 		 */
1013 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1014 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1015 			dst = &irk->rpa;
1016 			dst_type = ADDR_LE_DEV_RANDOM;
1017 		}
1018 	}
1019 
1020 	if (conn) {
1021 		bacpy(&conn->dst, dst);
1022 	} else {
1023 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1024 		if (!conn)
1025 			return ERR_PTR(-ENOMEM);
1026 		hci_conn_hold(conn);
1027 		conn->pending_sec_level = sec_level;
1028 	}
1029 
1030 	conn->dst_type = dst_type;
1031 	conn->sec_level = BT_SECURITY_LOW;
1032 	conn->conn_timeout = conn_timeout;
1033 
1034 	conn->state = BT_CONNECT;
1035 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1036 
1037 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1038 				 create_le_conn_complete);
1039 	if (err) {
1040 		hci_conn_del(conn);
1041 		return ERR_PTR(err);
1042 	}
1043 
1044 	return conn;
1045 }
1046 
1047 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1048 {
1049 	struct hci_conn *conn;
1050 
1051 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1052 	if (!conn)
1053 		return false;
1054 
1055 	if (conn->state != BT_CONNECTED)
1056 		return false;
1057 
1058 	return true;
1059 }
1060 
1061 /* This function requires the caller holds hdev->lock */
1062 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1063 					bdaddr_t *addr, u8 addr_type)
1064 {
1065 	struct hci_conn_params *params;
1066 
1067 	if (is_connected(hdev, addr, addr_type))
1068 		return -EISCONN;
1069 
1070 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1071 	if (!params) {
1072 		params = hci_conn_params_add(hdev, addr, addr_type);
1073 		if (!params)
1074 			return -ENOMEM;
1075 
1076 		/* If we created new params, mark them to be deleted in
1077 		 * hci_connect_le_scan_cleanup. It's different case than
1078 		 * existing disabled params, those will stay after cleanup.
1079 		 */
1080 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1081 	}
1082 
1083 	/* We're trying to connect, so make sure params are at pend_le_conns */
1084 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1085 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1086 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1087 		list_del_init(&params->action);
1088 		list_add(&params->action, &hdev->pend_le_conns);
1089 	}
1090 
1091 	params->explicit_connect = true;
1092 
1093 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1094 	       params->auto_connect);
1095 
1096 	return 0;
1097 }
1098 
1099 /* This function requires the caller holds hdev->lock */
1100 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1101 				     u8 dst_type, u8 sec_level,
1102 				     u16 conn_timeout,
1103 				     enum conn_reasons conn_reason)
1104 {
1105 	struct hci_conn *conn;
1106 
1107 	/* Let's make sure that le is enabled.*/
1108 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1109 		if (lmp_le_capable(hdev))
1110 			return ERR_PTR(-ECONNREFUSED);
1111 
1112 		return ERR_PTR(-EOPNOTSUPP);
1113 	}
1114 
1115 	/* Some devices send ATT messages as soon as the physical link is
1116 	 * established. To be able to handle these ATT messages, the user-
1117 	 * space first establishes the connection and then starts the pairing
1118 	 * process.
1119 	 *
1120 	 * So if a hci_conn object already exists for the following connection
1121 	 * attempt, we simply update pending_sec_level and auth_type fields
1122 	 * and return the object found.
1123 	 */
1124 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1125 	if (conn) {
1126 		if (conn->pending_sec_level < sec_level)
1127 			conn->pending_sec_level = sec_level;
1128 		goto done;
1129 	}
1130 
1131 	BT_DBG("requesting refresh of dst_addr");
1132 
1133 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1134 	if (!conn)
1135 		return ERR_PTR(-ENOMEM);
1136 
1137 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1138 		hci_conn_del(conn);
1139 		return ERR_PTR(-EBUSY);
1140 	}
1141 
1142 	conn->state = BT_CONNECT;
1143 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1144 	conn->dst_type = dst_type;
1145 	conn->sec_level = BT_SECURITY_LOW;
1146 	conn->pending_sec_level = sec_level;
1147 	conn->conn_timeout = conn_timeout;
1148 	conn->conn_reason = conn_reason;
1149 
1150 	hci_update_passive_scan(hdev);
1151 
1152 done:
1153 	hci_conn_hold(conn);
1154 	return conn;
1155 }
1156 
1157 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1158 				 u8 sec_level, u8 auth_type,
1159 				 enum conn_reasons conn_reason)
1160 {
1161 	struct hci_conn *acl;
1162 
1163 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1164 		if (lmp_bredr_capable(hdev))
1165 			return ERR_PTR(-ECONNREFUSED);
1166 
1167 		return ERR_PTR(-EOPNOTSUPP);
1168 	}
1169 
1170 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1171 	if (!acl) {
1172 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1173 		if (!acl)
1174 			return ERR_PTR(-ENOMEM);
1175 	}
1176 
1177 	hci_conn_hold(acl);
1178 
1179 	acl->conn_reason = conn_reason;
1180 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1181 		acl->sec_level = BT_SECURITY_LOW;
1182 		acl->pending_sec_level = sec_level;
1183 		acl->auth_type = auth_type;
1184 		hci_acl_create_connection(acl);
1185 	}
1186 
1187 	return acl;
1188 }
1189 
1190 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1191 				 __u16 setting, struct bt_codec *codec)
1192 {
1193 	struct hci_conn *acl;
1194 	struct hci_conn *sco;
1195 
1196 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1197 			      CONN_REASON_SCO_CONNECT);
1198 	if (IS_ERR(acl))
1199 		return acl;
1200 
1201 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1202 	if (!sco) {
1203 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1204 		if (!sco) {
1205 			hci_conn_drop(acl);
1206 			return ERR_PTR(-ENOMEM);
1207 		}
1208 	}
1209 
1210 	acl->link = sco;
1211 	sco->link = acl;
1212 
1213 	hci_conn_hold(sco);
1214 
1215 	sco->setting = setting;
1216 	sco->codec = *codec;
1217 
1218 	if (acl->state == BT_CONNECTED &&
1219 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1220 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1221 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1222 
1223 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1224 			/* defer SCO setup until mode change completed */
1225 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1226 			return sco;
1227 		}
1228 
1229 		hci_sco_setup(acl, 0x00);
1230 	}
1231 
1232 	return sco;
1233 }
1234 
1235 /* Check link security requirement */
1236 int hci_conn_check_link_mode(struct hci_conn *conn)
1237 {
1238 	BT_DBG("hcon %p", conn);
1239 
1240 	/* In Secure Connections Only mode, it is required that Secure
1241 	 * Connections is used and the link is encrypted with AES-CCM
1242 	 * using a P-256 authenticated combination key.
1243 	 */
1244 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1245 		if (!hci_conn_sc_enabled(conn) ||
1246 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1247 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1248 			return 0;
1249 	}
1250 
1251 	 /* AES encryption is required for Level 4:
1252 	  *
1253 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
1254 	  * page 1319:
1255 	  *
1256 	  * 128-bit equivalent strength for link and encryption keys
1257 	  * required using FIPS approved algorithms (E0 not allowed,
1258 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
1259 	  * not shortened)
1260 	  */
1261 	if (conn->sec_level == BT_SECURITY_FIPS &&
1262 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
1263 		bt_dev_err(conn->hdev,
1264 			   "Invalid security: Missing AES-CCM usage");
1265 		return 0;
1266 	}
1267 
1268 	if (hci_conn_ssp_enabled(conn) &&
1269 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1270 		return 0;
1271 
1272 	return 1;
1273 }
1274 
1275 /* Authenticate remote device */
1276 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1277 {
1278 	BT_DBG("hcon %p", conn);
1279 
1280 	if (conn->pending_sec_level > sec_level)
1281 		sec_level = conn->pending_sec_level;
1282 
1283 	if (sec_level > conn->sec_level)
1284 		conn->pending_sec_level = sec_level;
1285 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1286 		return 1;
1287 
1288 	/* Make sure we preserve an existing MITM requirement*/
1289 	auth_type |= (conn->auth_type & 0x01);
1290 
1291 	conn->auth_type = auth_type;
1292 
1293 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1294 		struct hci_cp_auth_requested cp;
1295 
1296 		cp.handle = cpu_to_le16(conn->handle);
1297 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1298 			     sizeof(cp), &cp);
1299 
1300 		/* If we're already encrypted set the REAUTH_PEND flag,
1301 		 * otherwise set the ENCRYPT_PEND.
1302 		 */
1303 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1304 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1305 		else
1306 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1307 	}
1308 
1309 	return 0;
1310 }
1311 
1312 /* Encrypt the link */
1313 static void hci_conn_encrypt(struct hci_conn *conn)
1314 {
1315 	BT_DBG("hcon %p", conn);
1316 
1317 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1318 		struct hci_cp_set_conn_encrypt cp;
1319 		cp.handle  = cpu_to_le16(conn->handle);
1320 		cp.encrypt = 0x01;
1321 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1322 			     &cp);
1323 	}
1324 }
1325 
1326 /* Enable security */
1327 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1328 		      bool initiator)
1329 {
1330 	BT_DBG("hcon %p", conn);
1331 
1332 	if (conn->type == LE_LINK)
1333 		return smp_conn_security(conn, sec_level);
1334 
1335 	/* For sdp we don't need the link key. */
1336 	if (sec_level == BT_SECURITY_SDP)
1337 		return 1;
1338 
1339 	/* For non 2.1 devices and low security level we don't need the link
1340 	   key. */
1341 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1342 		return 1;
1343 
1344 	/* For other security levels we need the link key. */
1345 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1346 		goto auth;
1347 
1348 	/* An authenticated FIPS approved combination key has sufficient
1349 	 * security for security level 4. */
1350 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1351 	    sec_level == BT_SECURITY_FIPS)
1352 		goto encrypt;
1353 
1354 	/* An authenticated combination key has sufficient security for
1355 	   security level 3. */
1356 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1357 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1358 	    sec_level == BT_SECURITY_HIGH)
1359 		goto encrypt;
1360 
1361 	/* An unauthenticated combination key has sufficient security for
1362 	   security level 1 and 2. */
1363 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1364 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1365 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1366 		goto encrypt;
1367 
1368 	/* A combination key has always sufficient security for the security
1369 	   levels 1 or 2. High security level requires the combination key
1370 	   is generated using maximum PIN code length (16).
1371 	   For pre 2.1 units. */
1372 	if (conn->key_type == HCI_LK_COMBINATION &&
1373 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1374 	     conn->pin_length == 16))
1375 		goto encrypt;
1376 
1377 auth:
1378 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1379 		return 0;
1380 
1381 	if (initiator)
1382 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1383 
1384 	if (!hci_conn_auth(conn, sec_level, auth_type))
1385 		return 0;
1386 
1387 encrypt:
1388 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1389 		/* Ensure that the encryption key size has been read,
1390 		 * otherwise stall the upper layer responses.
1391 		 */
1392 		if (!conn->enc_key_size)
1393 			return 0;
1394 
1395 		/* Nothing else needed, all requirements are met */
1396 		return 1;
1397 	}
1398 
1399 	hci_conn_encrypt(conn);
1400 	return 0;
1401 }
1402 EXPORT_SYMBOL(hci_conn_security);
1403 
1404 /* Check secure link requirement */
1405 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1406 {
1407 	BT_DBG("hcon %p", conn);
1408 
1409 	/* Accept if non-secure or higher security level is required */
1410 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1411 		return 1;
1412 
1413 	/* Accept if secure or higher security level is already present */
1414 	if (conn->sec_level == BT_SECURITY_HIGH ||
1415 	    conn->sec_level == BT_SECURITY_FIPS)
1416 		return 1;
1417 
1418 	/* Reject not secure link */
1419 	return 0;
1420 }
1421 EXPORT_SYMBOL(hci_conn_check_secure);
1422 
1423 /* Switch role */
1424 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1425 {
1426 	BT_DBG("hcon %p", conn);
1427 
1428 	if (role == conn->role)
1429 		return 1;
1430 
1431 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1432 		struct hci_cp_switch_role cp;
1433 		bacpy(&cp.bdaddr, &conn->dst);
1434 		cp.role = role;
1435 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1436 	}
1437 
1438 	return 0;
1439 }
1440 EXPORT_SYMBOL(hci_conn_switch_role);
1441 
1442 /* Enter active mode */
1443 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1444 {
1445 	struct hci_dev *hdev = conn->hdev;
1446 
1447 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1448 
1449 	if (conn->mode != HCI_CM_SNIFF)
1450 		goto timer;
1451 
1452 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1453 		goto timer;
1454 
1455 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1456 		struct hci_cp_exit_sniff_mode cp;
1457 		cp.handle = cpu_to_le16(conn->handle);
1458 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1459 	}
1460 
1461 timer:
1462 	if (hdev->idle_timeout > 0)
1463 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1464 				   msecs_to_jiffies(hdev->idle_timeout));
1465 }
1466 
1467 /* Drop all connection on the device */
1468 void hci_conn_hash_flush(struct hci_dev *hdev)
1469 {
1470 	struct hci_conn_hash *h = &hdev->conn_hash;
1471 	struct hci_conn *c, *n;
1472 
1473 	BT_DBG("hdev %s", hdev->name);
1474 
1475 	list_for_each_entry_safe(c, n, &h->list, list) {
1476 		c->state = BT_CLOSED;
1477 
1478 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1479 		hci_conn_del(c);
1480 	}
1481 }
1482 
1483 /* Check pending connect attempts */
1484 void hci_conn_check_pending(struct hci_dev *hdev)
1485 {
1486 	struct hci_conn *conn;
1487 
1488 	BT_DBG("hdev %s", hdev->name);
1489 
1490 	hci_dev_lock(hdev);
1491 
1492 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1493 	if (conn)
1494 		hci_acl_create_connection(conn);
1495 
1496 	hci_dev_unlock(hdev);
1497 }
1498 
1499 static u32 get_link_mode(struct hci_conn *conn)
1500 {
1501 	u32 link_mode = 0;
1502 
1503 	if (conn->role == HCI_ROLE_MASTER)
1504 		link_mode |= HCI_LM_MASTER;
1505 
1506 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1507 		link_mode |= HCI_LM_ENCRYPT;
1508 
1509 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
1510 		link_mode |= HCI_LM_AUTH;
1511 
1512 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
1513 		link_mode |= HCI_LM_SECURE;
1514 
1515 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
1516 		link_mode |= HCI_LM_FIPS;
1517 
1518 	return link_mode;
1519 }
1520 
1521 int hci_get_conn_list(void __user *arg)
1522 {
1523 	struct hci_conn *c;
1524 	struct hci_conn_list_req req, *cl;
1525 	struct hci_conn_info *ci;
1526 	struct hci_dev *hdev;
1527 	int n = 0, size, err;
1528 
1529 	if (copy_from_user(&req, arg, sizeof(req)))
1530 		return -EFAULT;
1531 
1532 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1533 		return -EINVAL;
1534 
1535 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1536 
1537 	cl = kmalloc(size, GFP_KERNEL);
1538 	if (!cl)
1539 		return -ENOMEM;
1540 
1541 	hdev = hci_dev_get(req.dev_id);
1542 	if (!hdev) {
1543 		kfree(cl);
1544 		return -ENODEV;
1545 	}
1546 
1547 	ci = cl->conn_info;
1548 
1549 	hci_dev_lock(hdev);
1550 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1551 		bacpy(&(ci + n)->bdaddr, &c->dst);
1552 		(ci + n)->handle = c->handle;
1553 		(ci + n)->type  = c->type;
1554 		(ci + n)->out   = c->out;
1555 		(ci + n)->state = c->state;
1556 		(ci + n)->link_mode = get_link_mode(c);
1557 		if (++n >= req.conn_num)
1558 			break;
1559 	}
1560 	hci_dev_unlock(hdev);
1561 
1562 	cl->dev_id = hdev->id;
1563 	cl->conn_num = n;
1564 	size = sizeof(req) + n * sizeof(*ci);
1565 
1566 	hci_dev_put(hdev);
1567 
1568 	err = copy_to_user(arg, cl, size);
1569 	kfree(cl);
1570 
1571 	return err ? -EFAULT : 0;
1572 }
1573 
1574 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1575 {
1576 	struct hci_conn_info_req req;
1577 	struct hci_conn_info ci;
1578 	struct hci_conn *conn;
1579 	char __user *ptr = arg + sizeof(req);
1580 
1581 	if (copy_from_user(&req, arg, sizeof(req)))
1582 		return -EFAULT;
1583 
1584 	hci_dev_lock(hdev);
1585 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1586 	if (conn) {
1587 		bacpy(&ci.bdaddr, &conn->dst);
1588 		ci.handle = conn->handle;
1589 		ci.type  = conn->type;
1590 		ci.out   = conn->out;
1591 		ci.state = conn->state;
1592 		ci.link_mode = get_link_mode(conn);
1593 	}
1594 	hci_dev_unlock(hdev);
1595 
1596 	if (!conn)
1597 		return -ENOENT;
1598 
1599 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1600 }
1601 
1602 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1603 {
1604 	struct hci_auth_info_req req;
1605 	struct hci_conn *conn;
1606 
1607 	if (copy_from_user(&req, arg, sizeof(req)))
1608 		return -EFAULT;
1609 
1610 	hci_dev_lock(hdev);
1611 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1612 	if (conn)
1613 		req.type = conn->auth_type;
1614 	hci_dev_unlock(hdev);
1615 
1616 	if (!conn)
1617 		return -ENOENT;
1618 
1619 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1620 }
1621 
1622 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1623 {
1624 	struct hci_dev *hdev = conn->hdev;
1625 	struct hci_chan *chan;
1626 
1627 	BT_DBG("%s hcon %p", hdev->name, conn);
1628 
1629 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1630 		BT_DBG("Refusing to create new hci_chan");
1631 		return NULL;
1632 	}
1633 
1634 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1635 	if (!chan)
1636 		return NULL;
1637 
1638 	chan->conn = hci_conn_get(conn);
1639 	skb_queue_head_init(&chan->data_q);
1640 	chan->state = BT_CONNECTED;
1641 
1642 	list_add_rcu(&chan->list, &conn->chan_list);
1643 
1644 	return chan;
1645 }
1646 
1647 void hci_chan_del(struct hci_chan *chan)
1648 {
1649 	struct hci_conn *conn = chan->conn;
1650 	struct hci_dev *hdev = conn->hdev;
1651 
1652 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1653 
1654 	list_del_rcu(&chan->list);
1655 
1656 	synchronize_rcu();
1657 
1658 	/* Prevent new hci_chan's to be created for this hci_conn */
1659 	set_bit(HCI_CONN_DROP, &conn->flags);
1660 
1661 	hci_conn_put(conn);
1662 
1663 	skb_queue_purge(&chan->data_q);
1664 	kfree(chan);
1665 }
1666 
1667 void hci_chan_list_flush(struct hci_conn *conn)
1668 {
1669 	struct hci_chan *chan, *n;
1670 
1671 	BT_DBG("hcon %p", conn);
1672 
1673 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1674 		hci_chan_del(chan);
1675 }
1676 
1677 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1678 						 __u16 handle)
1679 {
1680 	struct hci_chan *hchan;
1681 
1682 	list_for_each_entry(hchan, &hcon->chan_list, list) {
1683 		if (hchan->handle == handle)
1684 			return hchan;
1685 	}
1686 
1687 	return NULL;
1688 }
1689 
1690 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1691 {
1692 	struct hci_conn_hash *h = &hdev->conn_hash;
1693 	struct hci_conn *hcon;
1694 	struct hci_chan *hchan = NULL;
1695 
1696 	rcu_read_lock();
1697 
1698 	list_for_each_entry_rcu(hcon, &h->list, list) {
1699 		hchan = __hci_chan_lookup_handle(hcon, handle);
1700 		if (hchan)
1701 			break;
1702 	}
1703 
1704 	rcu_read_unlock();
1705 
1706 	return hchan;
1707 }
1708 
1709 u32 hci_conn_get_phy(struct hci_conn *conn)
1710 {
1711 	u32 phys = 0;
1712 
1713 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
1714 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
1715 	 * CPB logical transport types.
1716 	 */
1717 	switch (conn->type) {
1718 	case SCO_LINK:
1719 		/* SCO logical transport (1 Mb/s):
1720 		 * HV1, HV2, HV3 and DV.
1721 		 */
1722 		phys |= BT_PHY_BR_1M_1SLOT;
1723 
1724 		break;
1725 
1726 	case ACL_LINK:
1727 		/* ACL logical transport (1 Mb/s) ptt=0:
1728 		 * DH1, DM3, DH3, DM5 and DH5.
1729 		 */
1730 		phys |= BT_PHY_BR_1M_1SLOT;
1731 
1732 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
1733 			phys |= BT_PHY_BR_1M_3SLOT;
1734 
1735 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
1736 			phys |= BT_PHY_BR_1M_5SLOT;
1737 
1738 		/* ACL logical transport (2 Mb/s) ptt=1:
1739 		 * 2-DH1, 2-DH3 and 2-DH5.
1740 		 */
1741 		if (!(conn->pkt_type & HCI_2DH1))
1742 			phys |= BT_PHY_EDR_2M_1SLOT;
1743 
1744 		if (!(conn->pkt_type & HCI_2DH3))
1745 			phys |= BT_PHY_EDR_2M_3SLOT;
1746 
1747 		if (!(conn->pkt_type & HCI_2DH5))
1748 			phys |= BT_PHY_EDR_2M_5SLOT;
1749 
1750 		/* ACL logical transport (3 Mb/s) ptt=1:
1751 		 * 3-DH1, 3-DH3 and 3-DH5.
1752 		 */
1753 		if (!(conn->pkt_type & HCI_3DH1))
1754 			phys |= BT_PHY_EDR_3M_1SLOT;
1755 
1756 		if (!(conn->pkt_type & HCI_3DH3))
1757 			phys |= BT_PHY_EDR_3M_3SLOT;
1758 
1759 		if (!(conn->pkt_type & HCI_3DH5))
1760 			phys |= BT_PHY_EDR_3M_5SLOT;
1761 
1762 		break;
1763 
1764 	case ESCO_LINK:
1765 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
1766 		phys |= BT_PHY_BR_1M_1SLOT;
1767 
1768 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
1769 			phys |= BT_PHY_BR_1M_3SLOT;
1770 
1771 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
1772 		if (!(conn->pkt_type & ESCO_2EV3))
1773 			phys |= BT_PHY_EDR_2M_1SLOT;
1774 
1775 		if (!(conn->pkt_type & ESCO_2EV5))
1776 			phys |= BT_PHY_EDR_2M_3SLOT;
1777 
1778 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
1779 		if (!(conn->pkt_type & ESCO_3EV3))
1780 			phys |= BT_PHY_EDR_3M_1SLOT;
1781 
1782 		if (!(conn->pkt_type & ESCO_3EV5))
1783 			phys |= BT_PHY_EDR_3M_3SLOT;
1784 
1785 		break;
1786 
1787 	case LE_LINK:
1788 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
1789 			phys |= BT_PHY_LE_1M_TX;
1790 
1791 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
1792 			phys |= BT_PHY_LE_1M_RX;
1793 
1794 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
1795 			phys |= BT_PHY_LE_2M_TX;
1796 
1797 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
1798 			phys |= BT_PHY_LE_2M_RX;
1799 
1800 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
1801 			phys |= BT_PHY_LE_CODED_TX;
1802 
1803 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
1804 			phys |= BT_PHY_LE_CODED_RX;
1805 
1806 		break;
1807 	}
1808 
1809 	return phys;
1810 }
1811