xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision ea8d90a5)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn,u8 status)71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, conn, status);
111 
112 	/* The connection attempt was doing scan for new RPA, and is
113 	 * in scan phase. If params are not associated with any other
114 	 * autoconnect action, remove them completely. If they are, just unmark
115 	 * them as waiting for connection, by clearing explicit_connect field.
116 	 */
117 	params->explicit_connect = false;
118 
119 	hci_pend_le_list_del_init(params);
120 
121 	switch (params->auto_connect) {
122 	case HCI_AUTO_CONN_EXPLICIT:
123 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
124 		/* return instead of break to avoid duplicate scan update */
125 		return;
126 	case HCI_AUTO_CONN_DIRECT:
127 	case HCI_AUTO_CONN_ALWAYS:
128 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
129 		break;
130 	case HCI_AUTO_CONN_REPORT:
131 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
132 		break;
133 	default:
134 		break;
135 	}
136 
137 	hci_update_passive_scan(hdev);
138 }
139 
hci_conn_cleanup(struct hci_conn * conn)140 static void hci_conn_cleanup(struct hci_conn *conn)
141 {
142 	struct hci_dev *hdev = conn->hdev;
143 
144 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
145 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
146 
147 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
148 		hci_remove_link_key(hdev, &conn->dst);
149 
150 	hci_chan_list_flush(conn);
151 
152 	hci_conn_hash_del(hdev, conn);
153 
154 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
155 		ida_free(&hdev->unset_handle_ida, conn->handle);
156 
157 	if (conn->cleanup)
158 		conn->cleanup(conn);
159 
160 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
161 		switch (conn->setting & SCO_AIRMODE_MASK) {
162 		case SCO_AIRMODE_CVSD:
163 		case SCO_AIRMODE_TRANSP:
164 			if (hdev->notify)
165 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
166 			break;
167 		}
168 	} else {
169 		if (hdev->notify)
170 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
171 	}
172 
173 	debugfs_remove_recursive(conn->debugfs);
174 
175 	hci_conn_del_sysfs(conn);
176 
177 	hci_dev_put(hdev);
178 }
179 
hci_disconnect(struct hci_conn * conn,__u8 reason)180 int hci_disconnect(struct hci_conn *conn, __u8 reason)
181 {
182 	BT_DBG("hcon %p", conn);
183 
184 	/* When we are central of an established connection and it enters
185 	 * the disconnect timeout, then go ahead and try to read the
186 	 * current clock offset.  Processing of the result is done
187 	 * within the event handling and hci_clock_offset_evt function.
188 	 */
189 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
190 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
191 		struct hci_dev *hdev = conn->hdev;
192 		struct hci_cp_read_clock_offset clkoff_cp;
193 
194 		clkoff_cp.handle = cpu_to_le16(conn->handle);
195 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
196 			     &clkoff_cp);
197 	}
198 
199 	return hci_abort_conn(conn, reason);
200 }
201 
hci_add_sco(struct hci_conn * conn,__u16 handle)202 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
203 {
204 	struct hci_dev *hdev = conn->hdev;
205 	struct hci_cp_add_sco cp;
206 
207 	BT_DBG("hcon %p", conn);
208 
209 	conn->state = BT_CONNECT;
210 	conn->out = true;
211 
212 	conn->attempt++;
213 
214 	cp.handle   = cpu_to_le16(handle);
215 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
216 
217 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
218 }
219 
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)220 static bool find_next_esco_param(struct hci_conn *conn,
221 				 const struct sco_param *esco_param, int size)
222 {
223 	if (!conn->parent)
224 		return false;
225 
226 	for (; conn->attempt <= size; conn->attempt++) {
227 		if (lmp_esco_2m_capable(conn->parent) ||
228 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
229 			break;
230 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
231 		       conn, conn->attempt);
232 	}
233 
234 	return conn->attempt <= size;
235 }
236 
configure_datapath_sync(struct hci_dev * hdev,struct bt_codec * codec)237 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
238 {
239 	int err;
240 	__u8 vnd_len, *vnd_data = NULL;
241 	struct hci_op_configure_data_path *cmd = NULL;
242 
243 	if (!codec->data_path || !hdev->get_codec_config_data)
244 		return 0;
245 
246 	/* Do not take me as error */
247 	if (!hdev->get_codec_config_data)
248 		return 0;
249 
250 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
251 					  &vnd_data);
252 	if (err < 0)
253 		goto error;
254 
255 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
256 	if (!cmd) {
257 		err = -ENOMEM;
258 		goto error;
259 	}
260 
261 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
262 	if (err < 0)
263 		goto error;
264 
265 	cmd->vnd_len = vnd_len;
266 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
267 
268 	cmd->direction = 0x00;
269 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
270 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
271 
272 	cmd->direction = 0x01;
273 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
274 				    sizeof(*cmd) + vnd_len, cmd,
275 				    HCI_CMD_TIMEOUT);
276 error:
277 
278 	kfree(cmd);
279 	kfree(vnd_data);
280 	return err;
281 }
282 
hci_enhanced_setup_sync(struct hci_dev * hdev,void * data)283 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
284 {
285 	struct conn_handle_t *conn_handle = data;
286 	struct hci_conn *conn = conn_handle->conn;
287 	__u16 handle = conn_handle->handle;
288 	struct hci_cp_enhanced_setup_sync_conn cp;
289 	const struct sco_param *param;
290 
291 	kfree(conn_handle);
292 
293 	bt_dev_dbg(hdev, "hcon %p", conn);
294 
295 	configure_datapath_sync(hdev, &conn->codec);
296 
297 	conn->state = BT_CONNECT;
298 	conn->out = true;
299 
300 	conn->attempt++;
301 
302 	memset(&cp, 0x00, sizeof(cp));
303 
304 	cp.handle   = cpu_to_le16(handle);
305 
306 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
307 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
308 
309 	switch (conn->codec.id) {
310 	case BT_CODEC_MSBC:
311 		if (!find_next_esco_param(conn, esco_param_msbc,
312 					  ARRAY_SIZE(esco_param_msbc)))
313 			return -EINVAL;
314 
315 		param = &esco_param_msbc[conn->attempt - 1];
316 		cp.tx_coding_format.id = 0x05;
317 		cp.rx_coding_format.id = 0x05;
318 		cp.tx_codec_frame_size = __cpu_to_le16(60);
319 		cp.rx_codec_frame_size = __cpu_to_le16(60);
320 		cp.in_bandwidth = __cpu_to_le32(32000);
321 		cp.out_bandwidth = __cpu_to_le32(32000);
322 		cp.in_coding_format.id = 0x04;
323 		cp.out_coding_format.id = 0x04;
324 		cp.in_coded_data_size = __cpu_to_le16(16);
325 		cp.out_coded_data_size = __cpu_to_le16(16);
326 		cp.in_pcm_data_format = 2;
327 		cp.out_pcm_data_format = 2;
328 		cp.in_pcm_sample_payload_msb_pos = 0;
329 		cp.out_pcm_sample_payload_msb_pos = 0;
330 		cp.in_data_path = conn->codec.data_path;
331 		cp.out_data_path = conn->codec.data_path;
332 		cp.in_transport_unit_size = 1;
333 		cp.out_transport_unit_size = 1;
334 		break;
335 
336 	case BT_CODEC_TRANSPARENT:
337 		if (!find_next_esco_param(conn, esco_param_msbc,
338 					  ARRAY_SIZE(esco_param_msbc)))
339 			return false;
340 		param = &esco_param_msbc[conn->attempt - 1];
341 		cp.tx_coding_format.id = 0x03;
342 		cp.rx_coding_format.id = 0x03;
343 		cp.tx_codec_frame_size = __cpu_to_le16(60);
344 		cp.rx_codec_frame_size = __cpu_to_le16(60);
345 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
346 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
347 		cp.in_coding_format.id = 0x03;
348 		cp.out_coding_format.id = 0x03;
349 		cp.in_coded_data_size = __cpu_to_le16(16);
350 		cp.out_coded_data_size = __cpu_to_le16(16);
351 		cp.in_pcm_data_format = 2;
352 		cp.out_pcm_data_format = 2;
353 		cp.in_pcm_sample_payload_msb_pos = 0;
354 		cp.out_pcm_sample_payload_msb_pos = 0;
355 		cp.in_data_path = conn->codec.data_path;
356 		cp.out_data_path = conn->codec.data_path;
357 		cp.in_transport_unit_size = 1;
358 		cp.out_transport_unit_size = 1;
359 		break;
360 
361 	case BT_CODEC_CVSD:
362 		if (conn->parent && lmp_esco_capable(conn->parent)) {
363 			if (!find_next_esco_param(conn, esco_param_cvsd,
364 						  ARRAY_SIZE(esco_param_cvsd)))
365 				return -EINVAL;
366 			param = &esco_param_cvsd[conn->attempt - 1];
367 		} else {
368 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
369 				return -EINVAL;
370 			param = &sco_param_cvsd[conn->attempt - 1];
371 		}
372 		cp.tx_coding_format.id = 2;
373 		cp.rx_coding_format.id = 2;
374 		cp.tx_codec_frame_size = __cpu_to_le16(60);
375 		cp.rx_codec_frame_size = __cpu_to_le16(60);
376 		cp.in_bandwidth = __cpu_to_le32(16000);
377 		cp.out_bandwidth = __cpu_to_le32(16000);
378 		cp.in_coding_format.id = 4;
379 		cp.out_coding_format.id = 4;
380 		cp.in_coded_data_size = __cpu_to_le16(16);
381 		cp.out_coded_data_size = __cpu_to_le16(16);
382 		cp.in_pcm_data_format = 2;
383 		cp.out_pcm_data_format = 2;
384 		cp.in_pcm_sample_payload_msb_pos = 0;
385 		cp.out_pcm_sample_payload_msb_pos = 0;
386 		cp.in_data_path = conn->codec.data_path;
387 		cp.out_data_path = conn->codec.data_path;
388 		cp.in_transport_unit_size = 16;
389 		cp.out_transport_unit_size = 16;
390 		break;
391 	default:
392 		return -EINVAL;
393 	}
394 
395 	cp.retrans_effort = param->retrans_effort;
396 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
397 	cp.max_latency = __cpu_to_le16(param->max_latency);
398 
399 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
400 		return -EIO;
401 
402 	return 0;
403 }
404 
hci_setup_sync_conn(struct hci_conn * conn,__u16 handle)405 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
406 {
407 	struct hci_dev *hdev = conn->hdev;
408 	struct hci_cp_setup_sync_conn cp;
409 	const struct sco_param *param;
410 
411 	bt_dev_dbg(hdev, "hcon %p", conn);
412 
413 	conn->state = BT_CONNECT;
414 	conn->out = true;
415 
416 	conn->attempt++;
417 
418 	cp.handle   = cpu_to_le16(handle);
419 
420 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
421 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
422 	cp.voice_setting  = cpu_to_le16(conn->setting);
423 
424 	switch (conn->setting & SCO_AIRMODE_MASK) {
425 	case SCO_AIRMODE_TRANSP:
426 		if (!find_next_esco_param(conn, esco_param_msbc,
427 					  ARRAY_SIZE(esco_param_msbc)))
428 			return false;
429 		param = &esco_param_msbc[conn->attempt - 1];
430 		break;
431 	case SCO_AIRMODE_CVSD:
432 		if (conn->parent && lmp_esco_capable(conn->parent)) {
433 			if (!find_next_esco_param(conn, esco_param_cvsd,
434 						  ARRAY_SIZE(esco_param_cvsd)))
435 				return false;
436 			param = &esco_param_cvsd[conn->attempt - 1];
437 		} else {
438 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
439 				return false;
440 			param = &sco_param_cvsd[conn->attempt - 1];
441 		}
442 		break;
443 	default:
444 		return false;
445 	}
446 
447 	cp.retrans_effort = param->retrans_effort;
448 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
449 	cp.max_latency = __cpu_to_le16(param->max_latency);
450 
451 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
452 		return false;
453 
454 	return true;
455 }
456 
hci_setup_sync(struct hci_conn * conn,__u16 handle)457 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
458 {
459 	int result;
460 	struct conn_handle_t *conn_handle;
461 
462 	if (enhanced_sync_conn_capable(conn->hdev)) {
463 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
464 
465 		if (!conn_handle)
466 			return false;
467 
468 		conn_handle->conn = conn;
469 		conn_handle->handle = handle;
470 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
471 					    conn_handle, NULL);
472 		if (result < 0)
473 			kfree(conn_handle);
474 
475 		return result == 0;
476 	}
477 
478 	return hci_setup_sync_conn(conn, handle);
479 }
480 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)481 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
482 		      u16 to_multiplier)
483 {
484 	struct hci_dev *hdev = conn->hdev;
485 	struct hci_conn_params *params;
486 	struct hci_cp_le_conn_update cp;
487 
488 	hci_dev_lock(hdev);
489 
490 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
491 	if (params) {
492 		params->conn_min_interval = min;
493 		params->conn_max_interval = max;
494 		params->conn_latency = latency;
495 		params->supervision_timeout = to_multiplier;
496 	}
497 
498 	hci_dev_unlock(hdev);
499 
500 	memset(&cp, 0, sizeof(cp));
501 	cp.handle		= cpu_to_le16(conn->handle);
502 	cp.conn_interval_min	= cpu_to_le16(min);
503 	cp.conn_interval_max	= cpu_to_le16(max);
504 	cp.conn_latency		= cpu_to_le16(latency);
505 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
506 	cp.min_ce_len		= cpu_to_le16(0x0000);
507 	cp.max_ce_len		= cpu_to_le16(0x0000);
508 
509 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
510 
511 	if (params)
512 		return 0x01;
513 
514 	return 0x00;
515 }
516 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)517 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
518 		      __u8 ltk[16], __u8 key_size)
519 {
520 	struct hci_dev *hdev = conn->hdev;
521 	struct hci_cp_le_start_enc cp;
522 
523 	BT_DBG("hcon %p", conn);
524 
525 	memset(&cp, 0, sizeof(cp));
526 
527 	cp.handle = cpu_to_le16(conn->handle);
528 	cp.rand = rand;
529 	cp.ediv = ediv;
530 	memcpy(cp.ltk, ltk, key_size);
531 
532 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
533 }
534 
535 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)536 void hci_sco_setup(struct hci_conn *conn, __u8 status)
537 {
538 	struct hci_link *link;
539 
540 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
541 	if (!link || !link->conn)
542 		return;
543 
544 	BT_DBG("hcon %p", conn);
545 
546 	if (!status) {
547 		if (lmp_esco_capable(conn->hdev))
548 			hci_setup_sync(link->conn, conn->handle);
549 		else
550 			hci_add_sco(link->conn, conn->handle);
551 	} else {
552 		hci_connect_cfm(link->conn, status);
553 		hci_conn_del(link->conn);
554 	}
555 }
556 
hci_conn_timeout(struct work_struct * work)557 static void hci_conn_timeout(struct work_struct *work)
558 {
559 	struct hci_conn *conn = container_of(work, struct hci_conn,
560 					     disc_work.work);
561 	int refcnt = atomic_read(&conn->refcnt);
562 
563 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
564 
565 	WARN_ON(refcnt < 0);
566 
567 	/* FIXME: It was observed that in pairing failed scenario, refcnt
568 	 * drops below 0. Probably this is because l2cap_conn_del calls
569 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
570 	 * dropped. After that loop hci_chan_del is called which also drops
571 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
572 	 * otherwise drop it.
573 	 */
574 	if (refcnt > 0)
575 		return;
576 
577 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
578 }
579 
580 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)581 static void hci_conn_idle(struct work_struct *work)
582 {
583 	struct hci_conn *conn = container_of(work, struct hci_conn,
584 					     idle_work.work);
585 	struct hci_dev *hdev = conn->hdev;
586 
587 	BT_DBG("hcon %p mode %d", conn, conn->mode);
588 
589 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
590 		return;
591 
592 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
593 		return;
594 
595 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
596 		struct hci_cp_sniff_subrate cp;
597 		cp.handle             = cpu_to_le16(conn->handle);
598 		cp.max_latency        = cpu_to_le16(0);
599 		cp.min_remote_timeout = cpu_to_le16(0);
600 		cp.min_local_timeout  = cpu_to_le16(0);
601 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
602 	}
603 
604 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
605 		struct hci_cp_sniff_mode cp;
606 		cp.handle       = cpu_to_le16(conn->handle);
607 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
608 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
609 		cp.attempt      = cpu_to_le16(4);
610 		cp.timeout      = cpu_to_le16(1);
611 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
612 	}
613 }
614 
hci_conn_auto_accept(struct work_struct * work)615 static void hci_conn_auto_accept(struct work_struct *work)
616 {
617 	struct hci_conn *conn = container_of(work, struct hci_conn,
618 					     auto_accept_work.work);
619 
620 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
621 		     &conn->dst);
622 }
623 
le_disable_advertising(struct hci_dev * hdev)624 static void le_disable_advertising(struct hci_dev *hdev)
625 {
626 	if (ext_adv_capable(hdev)) {
627 		struct hci_cp_le_set_ext_adv_enable cp;
628 
629 		cp.enable = 0x00;
630 		cp.num_of_sets = 0x00;
631 
632 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
633 			     &cp);
634 	} else {
635 		u8 enable = 0x00;
636 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
637 			     &enable);
638 	}
639 }
640 
le_conn_timeout(struct work_struct * work)641 static void le_conn_timeout(struct work_struct *work)
642 {
643 	struct hci_conn *conn = container_of(work, struct hci_conn,
644 					     le_conn_timeout.work);
645 	struct hci_dev *hdev = conn->hdev;
646 
647 	BT_DBG("");
648 
649 	/* We could end up here due to having done directed advertising,
650 	 * so clean up the state if necessary. This should however only
651 	 * happen with broken hardware or if low duty cycle was used
652 	 * (which doesn't have a timeout of its own).
653 	 */
654 	if (conn->role == HCI_ROLE_SLAVE) {
655 		/* Disable LE Advertising */
656 		le_disable_advertising(hdev);
657 		hci_dev_lock(hdev);
658 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
659 		hci_dev_unlock(hdev);
660 		return;
661 	}
662 
663 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
664 }
665 
666 struct iso_cig_params {
667 	struct hci_cp_le_set_cig_params cp;
668 	struct hci_cis_params cis[0x1f];
669 };
670 
671 struct iso_list_data {
672 	union {
673 		u8  cig;
674 		u8  big;
675 	};
676 	union {
677 		u8  cis;
678 		u8  bis;
679 		u16 sync_handle;
680 	};
681 	int count;
682 	bool big_term;
683 	bool pa_sync_term;
684 	bool big_sync_term;
685 };
686 
bis_list(struct hci_conn * conn,void * data)687 static void bis_list(struct hci_conn *conn, void *data)
688 {
689 	struct iso_list_data *d = data;
690 
691 	/* Skip if not broadcast/ANY address */
692 	if (bacmp(&conn->dst, BDADDR_ANY))
693 		return;
694 
695 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
696 	    d->bis != conn->iso_qos.bcast.bis)
697 		return;
698 
699 	d->count++;
700 }
701 
terminate_big_sync(struct hci_dev * hdev,void * data)702 static int terminate_big_sync(struct hci_dev *hdev, void *data)
703 {
704 	struct iso_list_data *d = data;
705 
706 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
707 
708 	hci_disable_per_advertising_sync(hdev, d->bis);
709 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
710 
711 	/* Only terminate BIG if it has been created */
712 	if (!d->big_term)
713 		return 0;
714 
715 	return hci_le_terminate_big_sync(hdev, d->big,
716 					 HCI_ERROR_LOCAL_HOST_TERM);
717 }
718 
terminate_big_destroy(struct hci_dev * hdev,void * data,int err)719 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
720 {
721 	kfree(data);
722 }
723 
hci_le_terminate_big(struct hci_dev * hdev,struct hci_conn * conn)724 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
725 {
726 	struct iso_list_data *d;
727 	int ret;
728 
729 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
730 		   conn->iso_qos.bcast.bis);
731 
732 	d = kzalloc(sizeof(*d), GFP_KERNEL);
733 	if (!d)
734 		return -ENOMEM;
735 
736 	d->big = conn->iso_qos.bcast.big;
737 	d->bis = conn->iso_qos.bcast.bis;
738 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
739 
740 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
741 				 terminate_big_destroy);
742 	if (ret)
743 		kfree(d);
744 
745 	return ret;
746 }
747 
big_terminate_sync(struct hci_dev * hdev,void * data)748 static int big_terminate_sync(struct hci_dev *hdev, void *data)
749 {
750 	struct iso_list_data *d = data;
751 
752 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
753 		   d->sync_handle);
754 
755 	if (d->big_sync_term)
756 		hci_le_big_terminate_sync(hdev, d->big);
757 
758 	if (d->pa_sync_term)
759 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
760 
761 	return 0;
762 }
763 
hci_le_big_terminate(struct hci_dev * hdev,u8 big,struct hci_conn * conn)764 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
765 {
766 	struct iso_list_data *d;
767 	int ret;
768 
769 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
770 
771 	d = kzalloc(sizeof(*d), GFP_KERNEL);
772 	if (!d)
773 		return -ENOMEM;
774 
775 	d->big = big;
776 	d->sync_handle = conn->sync_handle;
777 	d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
778 	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
779 
780 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
781 				 terminate_big_destroy);
782 	if (ret)
783 		kfree(d);
784 
785 	return ret;
786 }
787 
788 /* Cleanup BIS connection
789  *
790  * Detects if there any BIS left connected in a BIG
791  * broadcaster: Remove advertising instance and terminate BIG.
792  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
793  */
bis_cleanup(struct hci_conn * conn)794 static void bis_cleanup(struct hci_conn *conn)
795 {
796 	struct hci_dev *hdev = conn->hdev;
797 	struct hci_conn *bis;
798 
799 	bt_dev_dbg(hdev, "conn %p", conn);
800 
801 	if (conn->role == HCI_ROLE_MASTER) {
802 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
803 			return;
804 
805 		/* Check if ISO connection is a BIS and terminate advertising
806 		 * set and BIG if there are no other connections using it.
807 		 */
808 		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
809 		if (bis)
810 			return;
811 
812 		hci_le_terminate_big(hdev, conn);
813 	} else {
814 		bis = hci_conn_hash_lookup_big_any_dst(hdev,
815 						       conn->iso_qos.bcast.big);
816 
817 		if (bis)
818 			return;
819 
820 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
821 				     conn);
822 	}
823 }
824 
remove_cig_sync(struct hci_dev * hdev,void * data)825 static int remove_cig_sync(struct hci_dev *hdev, void *data)
826 {
827 	u8 handle = PTR_UINT(data);
828 
829 	return hci_le_remove_cig_sync(hdev, handle);
830 }
831 
hci_le_remove_cig(struct hci_dev * hdev,u8 handle)832 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
833 {
834 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
835 
836 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
837 				  NULL);
838 }
839 
find_cis(struct hci_conn * conn,void * data)840 static void find_cis(struct hci_conn *conn, void *data)
841 {
842 	struct iso_list_data *d = data;
843 
844 	/* Ignore broadcast or if CIG don't match */
845 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
846 		return;
847 
848 	d->count++;
849 }
850 
851 /* Cleanup CIS connection:
852  *
853  * Detects if there any CIS left connected in a CIG and remove it.
854  */
cis_cleanup(struct hci_conn * conn)855 static void cis_cleanup(struct hci_conn *conn)
856 {
857 	struct hci_dev *hdev = conn->hdev;
858 	struct iso_list_data d;
859 
860 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
861 		return;
862 
863 	memset(&d, 0, sizeof(d));
864 	d.cig = conn->iso_qos.ucast.cig;
865 
866 	/* Check if ISO connection is a CIS and remove CIG if there are
867 	 * no other connections using it.
868 	 */
869 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
870 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
871 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
872 	if (d.count)
873 		return;
874 
875 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
876 }
877 
hci_conn_hash_alloc_unset(struct hci_dev * hdev)878 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
879 {
880 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
881 			       U16_MAX, GFP_ATOMIC);
882 }
883 
__hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)884 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
885 				       u8 role, u16 handle)
886 {
887 	struct hci_conn *conn;
888 
889 	switch (type) {
890 	case ACL_LINK:
891 		if (!hdev->acl_mtu)
892 			return ERR_PTR(-ECONNREFUSED);
893 		break;
894 	case ISO_LINK:
895 		if (hdev->iso_mtu)
896 			/* Dedicated ISO Buffer exists */
897 			break;
898 		fallthrough;
899 	case LE_LINK:
900 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
901 			return ERR_PTR(-ECONNREFUSED);
902 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
903 			return ERR_PTR(-ECONNREFUSED);
904 		break;
905 	case SCO_LINK:
906 	case ESCO_LINK:
907 		if (!hdev->sco_pkts)
908 			/* Controller does not support SCO or eSCO over HCI */
909 			return ERR_PTR(-ECONNREFUSED);
910 		break;
911 	default:
912 		return ERR_PTR(-ECONNREFUSED);
913 	}
914 
915 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
916 
917 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
918 	if (!conn)
919 		return ERR_PTR(-ENOMEM);
920 
921 	bacpy(&conn->dst, dst);
922 	bacpy(&conn->src, &hdev->bdaddr);
923 	conn->handle = handle;
924 	conn->hdev  = hdev;
925 	conn->type  = type;
926 	conn->role  = role;
927 	conn->mode  = HCI_CM_ACTIVE;
928 	conn->state = BT_OPEN;
929 	conn->auth_type = HCI_AT_GENERAL_BONDING;
930 	conn->io_capability = hdev->io_capability;
931 	conn->remote_auth = 0xff;
932 	conn->key_type = 0xff;
933 	conn->rssi = HCI_RSSI_INVALID;
934 	conn->tx_power = HCI_TX_POWER_INVALID;
935 	conn->max_tx_power = HCI_TX_POWER_INVALID;
936 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
937 
938 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
939 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
940 
941 	/* Set Default Authenticated payload timeout to 30s */
942 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
943 
944 	if (conn->role == HCI_ROLE_MASTER)
945 		conn->out = true;
946 
947 	switch (type) {
948 	case ACL_LINK:
949 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
950 		conn->mtu = hdev->acl_mtu;
951 		break;
952 	case LE_LINK:
953 		/* conn->src should reflect the local identity address */
954 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
955 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
956 		break;
957 	case ISO_LINK:
958 		/* conn->src should reflect the local identity address */
959 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
960 
961 		/* set proper cleanup function */
962 		if (!bacmp(dst, BDADDR_ANY))
963 			conn->cleanup = bis_cleanup;
964 		else if (conn->role == HCI_ROLE_MASTER)
965 			conn->cleanup = cis_cleanup;
966 
967 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
968 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
969 		break;
970 	case SCO_LINK:
971 		if (lmp_esco_capable(hdev))
972 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
973 					(hdev->esco_type & EDR_ESCO_MASK);
974 		else
975 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
976 
977 		conn->mtu = hdev->sco_mtu;
978 		break;
979 	case ESCO_LINK:
980 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
981 		conn->mtu = hdev->sco_mtu;
982 		break;
983 	}
984 
985 	skb_queue_head_init(&conn->data_q);
986 
987 	INIT_LIST_HEAD(&conn->chan_list);
988 	INIT_LIST_HEAD(&conn->link_list);
989 
990 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
991 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
992 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
993 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
994 
995 	atomic_set(&conn->refcnt, 0);
996 
997 	hci_dev_hold(hdev);
998 
999 	hci_conn_hash_add(hdev, conn);
1000 
1001 	/* The SCO and eSCO connections will only be notified when their
1002 	 * setup has been completed. This is different to ACL links which
1003 	 * can be notified right away.
1004 	 */
1005 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1006 		if (hdev->notify)
1007 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1008 	}
1009 
1010 	hci_conn_init_sysfs(conn);
1011 
1012 	return conn;
1013 }
1014 
hci_conn_add_unset(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)1015 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1016 				    bdaddr_t *dst, u8 role)
1017 {
1018 	int handle;
1019 
1020 	bt_dev_dbg(hdev, "dst %pMR", dst);
1021 
1022 	handle = hci_conn_hash_alloc_unset(hdev);
1023 	if (unlikely(handle < 0))
1024 		return ERR_PTR(-ECONNREFUSED);
1025 
1026 	return __hci_conn_add(hdev, type, dst, role, handle);
1027 }
1028 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)1029 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1030 			      u8 role, u16 handle)
1031 {
1032 	if (handle > HCI_CONN_HANDLE_MAX)
1033 		return ERR_PTR(-EINVAL);
1034 
1035 	return __hci_conn_add(hdev, type, dst, role, handle);
1036 }
1037 
hci_conn_cleanup_child(struct hci_conn * conn,u8 reason)1038 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1039 {
1040 	if (!reason)
1041 		reason = HCI_ERROR_REMOTE_USER_TERM;
1042 
1043 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1044 	 * and nothing else will clean it up. In other cases it is done via HCI
1045 	 * events.
1046 	 */
1047 	switch (conn->type) {
1048 	case SCO_LINK:
1049 	case ESCO_LINK:
1050 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1051 			hci_conn_failed(conn, reason);
1052 		break;
1053 	case ISO_LINK:
1054 		if (conn->state != BT_CONNECTED &&
1055 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
1056 			hci_conn_failed(conn, reason);
1057 		break;
1058 	}
1059 }
1060 
hci_conn_unlink(struct hci_conn * conn)1061 static void hci_conn_unlink(struct hci_conn *conn)
1062 {
1063 	struct hci_dev *hdev = conn->hdev;
1064 
1065 	bt_dev_dbg(hdev, "hcon %p", conn);
1066 
1067 	if (!conn->parent) {
1068 		struct hci_link *link, *t;
1069 
1070 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1071 			struct hci_conn *child = link->conn;
1072 
1073 			hci_conn_unlink(child);
1074 
1075 			/* If hdev is down it means
1076 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1077 			 * and links don't need to be cleanup as all connections
1078 			 * would be cleanup.
1079 			 */
1080 			if (!test_bit(HCI_UP, &hdev->flags))
1081 				continue;
1082 
1083 			hci_conn_cleanup_child(child, conn->abort_reason);
1084 		}
1085 
1086 		return;
1087 	}
1088 
1089 	if (!conn->link)
1090 		return;
1091 
1092 	list_del_rcu(&conn->link->list);
1093 	synchronize_rcu();
1094 
1095 	hci_conn_drop(conn->parent);
1096 	hci_conn_put(conn->parent);
1097 	conn->parent = NULL;
1098 
1099 	kfree(conn->link);
1100 	conn->link = NULL;
1101 }
1102 
hci_conn_del(struct hci_conn * conn)1103 void hci_conn_del(struct hci_conn *conn)
1104 {
1105 	struct hci_dev *hdev = conn->hdev;
1106 
1107 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1108 
1109 	hci_conn_unlink(conn);
1110 
1111 	cancel_delayed_work_sync(&conn->disc_work);
1112 	cancel_delayed_work_sync(&conn->auto_accept_work);
1113 	cancel_delayed_work_sync(&conn->idle_work);
1114 
1115 	if (conn->type == ACL_LINK) {
1116 		/* Unacked frames */
1117 		hdev->acl_cnt += conn->sent;
1118 	} else if (conn->type == LE_LINK) {
1119 		cancel_delayed_work(&conn->le_conn_timeout);
1120 
1121 		if (hdev->le_pkts)
1122 			hdev->le_cnt += conn->sent;
1123 		else
1124 			hdev->acl_cnt += conn->sent;
1125 	} else {
1126 		/* Unacked ISO frames */
1127 		if (conn->type == ISO_LINK) {
1128 			if (hdev->iso_pkts)
1129 				hdev->iso_cnt += conn->sent;
1130 			else if (hdev->le_pkts)
1131 				hdev->le_cnt += conn->sent;
1132 			else
1133 				hdev->acl_cnt += conn->sent;
1134 		}
1135 	}
1136 
1137 	skb_queue_purge(&conn->data_q);
1138 
1139 	/* Remove the connection from the list and cleanup its remaining
1140 	 * state. This is a separate function since for some cases like
1141 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1142 	 * rest of hci_conn_del.
1143 	 */
1144 	hci_conn_cleanup(conn);
1145 
1146 	/* Dequeue callbacks using connection pointer as data */
1147 	hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1148 }
1149 
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)1150 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1151 {
1152 	int use_src = bacmp(src, BDADDR_ANY);
1153 	struct hci_dev *hdev = NULL, *d;
1154 
1155 	BT_DBG("%pMR -> %pMR", src, dst);
1156 
1157 	read_lock(&hci_dev_list_lock);
1158 
1159 	list_for_each_entry(d, &hci_dev_list, list) {
1160 		if (!test_bit(HCI_UP, &d->flags) ||
1161 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
1162 			continue;
1163 
1164 		/* Simple routing:
1165 		 *   No source address - find interface with bdaddr != dst
1166 		 *   Source address    - find interface with bdaddr == src
1167 		 */
1168 
1169 		if (use_src) {
1170 			bdaddr_t id_addr;
1171 			u8 id_addr_type;
1172 
1173 			if (src_type == BDADDR_BREDR) {
1174 				if (!lmp_bredr_capable(d))
1175 					continue;
1176 				bacpy(&id_addr, &d->bdaddr);
1177 				id_addr_type = BDADDR_BREDR;
1178 			} else {
1179 				if (!lmp_le_capable(d))
1180 					continue;
1181 
1182 				hci_copy_identity_address(d, &id_addr,
1183 							  &id_addr_type);
1184 
1185 				/* Convert from HCI to three-value type */
1186 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1187 					id_addr_type = BDADDR_LE_PUBLIC;
1188 				else
1189 					id_addr_type = BDADDR_LE_RANDOM;
1190 			}
1191 
1192 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1193 				hdev = d; break;
1194 			}
1195 		} else {
1196 			if (bacmp(&d->bdaddr, dst)) {
1197 				hdev = d; break;
1198 			}
1199 		}
1200 	}
1201 
1202 	if (hdev)
1203 		hdev = hci_dev_hold(hdev);
1204 
1205 	read_unlock(&hci_dev_list_lock);
1206 	return hdev;
1207 }
1208 EXPORT_SYMBOL(hci_get_route);
1209 
1210 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)1211 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1212 {
1213 	struct hci_dev *hdev = conn->hdev;
1214 
1215 	hci_connect_le_scan_cleanup(conn, status);
1216 
1217 	/* Enable advertising in case this was a failed connection
1218 	 * attempt as a peripheral.
1219 	 */
1220 	hci_enable_advertising(hdev);
1221 }
1222 
1223 /* This function requires the caller holds hdev->lock */
hci_conn_failed(struct hci_conn * conn,u8 status)1224 void hci_conn_failed(struct hci_conn *conn, u8 status)
1225 {
1226 	struct hci_dev *hdev = conn->hdev;
1227 
1228 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1229 
1230 	switch (conn->type) {
1231 	case LE_LINK:
1232 		hci_le_conn_failed(conn, status);
1233 		break;
1234 	case ACL_LINK:
1235 		mgmt_connect_failed(hdev, conn, status);
1236 		break;
1237 	}
1238 
1239 	/* In case of BIG/PA sync failed, clear conn flags so that
1240 	 * the conns will be correctly cleaned up by ISO layer
1241 	 */
1242 	test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1243 	test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1244 
1245 	conn->state = BT_CLOSED;
1246 	hci_connect_cfm(conn, status);
1247 	hci_conn_del(conn);
1248 }
1249 
1250 /* This function requires the caller holds hdev->lock */
hci_conn_set_handle(struct hci_conn * conn,u16 handle)1251 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1252 {
1253 	struct hci_dev *hdev = conn->hdev;
1254 
1255 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1256 
1257 	if (conn->handle == handle)
1258 		return 0;
1259 
1260 	if (handle > HCI_CONN_HANDLE_MAX) {
1261 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1262 			   handle, HCI_CONN_HANDLE_MAX);
1263 		return HCI_ERROR_INVALID_PARAMETERS;
1264 	}
1265 
1266 	/* If abort_reason has been sent it means the connection is being
1267 	 * aborted and the handle shall not be changed.
1268 	 */
1269 	if (conn->abort_reason)
1270 		return conn->abort_reason;
1271 
1272 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1273 		ida_free(&hdev->unset_handle_ida, conn->handle);
1274 
1275 	conn->handle = handle;
1276 
1277 	return 0;
1278 }
1279 
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,bool dst_resolved,u8 sec_level,u16 conn_timeout,u8 role)1280 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1281 				u8 dst_type, bool dst_resolved, u8 sec_level,
1282 				u16 conn_timeout, u8 role)
1283 {
1284 	struct hci_conn *conn;
1285 	struct smp_irk *irk;
1286 	int err;
1287 
1288 	/* Let's make sure that le is enabled.*/
1289 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1290 		if (lmp_le_capable(hdev))
1291 			return ERR_PTR(-ECONNREFUSED);
1292 
1293 		return ERR_PTR(-EOPNOTSUPP);
1294 	}
1295 
1296 	/* Since the controller supports only one LE connection attempt at a
1297 	 * time, we return -EBUSY if there is any connection attempt running.
1298 	 */
1299 	if (hci_lookup_le_connect(hdev))
1300 		return ERR_PTR(-EBUSY);
1301 
1302 	/* If there's already a connection object but it's not in
1303 	 * scanning state it means it must already be established, in
1304 	 * which case we can't do anything else except report a failure
1305 	 * to connect.
1306 	 */
1307 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1308 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1309 		return ERR_PTR(-EBUSY);
1310 	}
1311 
1312 	/* Check if the destination address has been resolved by the controller
1313 	 * since if it did then the identity address shall be used.
1314 	 */
1315 	if (!dst_resolved) {
1316 		/* When given an identity address with existing identity
1317 		 * resolving key, the connection needs to be established
1318 		 * to a resolvable random address.
1319 		 *
1320 		 * Storing the resolvable random address is required here
1321 		 * to handle connection failures. The address will later
1322 		 * be resolved back into the original identity address
1323 		 * from the connect request.
1324 		 */
1325 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1326 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1327 			dst = &irk->rpa;
1328 			dst_type = ADDR_LE_DEV_RANDOM;
1329 		}
1330 	}
1331 
1332 	if (conn) {
1333 		bacpy(&conn->dst, dst);
1334 	} else {
1335 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1336 		if (IS_ERR(conn))
1337 			return conn;
1338 		hci_conn_hold(conn);
1339 		conn->pending_sec_level = sec_level;
1340 	}
1341 
1342 	conn->dst_type = dst_type;
1343 	conn->sec_level = BT_SECURITY_LOW;
1344 	conn->conn_timeout = conn_timeout;
1345 
1346 	err = hci_connect_le_sync(hdev, conn);
1347 	if (err) {
1348 		hci_conn_del(conn);
1349 		return ERR_PTR(err);
1350 	}
1351 
1352 	return conn;
1353 }
1354 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1355 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1356 {
1357 	struct hci_conn *conn;
1358 
1359 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1360 	if (!conn)
1361 		return false;
1362 
1363 	if (conn->state != BT_CONNECTED)
1364 		return false;
1365 
1366 	return true;
1367 }
1368 
1369 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1370 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1371 					bdaddr_t *addr, u8 addr_type)
1372 {
1373 	struct hci_conn_params *params;
1374 
1375 	if (is_connected(hdev, addr, addr_type))
1376 		return -EISCONN;
1377 
1378 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1379 	if (!params) {
1380 		params = hci_conn_params_add(hdev, addr, addr_type);
1381 		if (!params)
1382 			return -ENOMEM;
1383 
1384 		/* If we created new params, mark them to be deleted in
1385 		 * hci_connect_le_scan_cleanup. It's different case than
1386 		 * existing disabled params, those will stay after cleanup.
1387 		 */
1388 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1389 	}
1390 
1391 	/* We're trying to connect, so make sure params are at pend_le_conns */
1392 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1393 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1394 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1395 		hci_pend_le_list_del_init(params);
1396 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1397 	}
1398 
1399 	params->explicit_connect = true;
1400 
1401 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1402 	       params->auto_connect);
1403 
1404 	return 0;
1405 }
1406 
qos_set_big(struct hci_dev * hdev,struct bt_iso_qos * qos)1407 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1408 {
1409 	struct hci_conn *conn;
1410 	u8  big;
1411 
1412 	/* Allocate a BIG if not set */
1413 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1414 		for (big = 0x00; big < 0xef; big++) {
1415 
1416 			conn = hci_conn_hash_lookup_big(hdev, big);
1417 			if (!conn)
1418 				break;
1419 		}
1420 
1421 		if (big == 0xef)
1422 			return -EADDRNOTAVAIL;
1423 
1424 		/* Update BIG */
1425 		qos->bcast.big = big;
1426 	}
1427 
1428 	return 0;
1429 }
1430 
qos_set_bis(struct hci_dev * hdev,struct bt_iso_qos * qos)1431 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1432 {
1433 	struct hci_conn *conn;
1434 	u8  bis;
1435 
1436 	/* Allocate BIS if not set */
1437 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1438 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1439 		 * since it is reserved as general purpose set.
1440 		 */
1441 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1442 		     bis++) {
1443 
1444 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1445 			if (!conn)
1446 				break;
1447 		}
1448 
1449 		if (bis == hdev->le_num_of_adv_sets)
1450 			return -EADDRNOTAVAIL;
1451 
1452 		/* Update BIS */
1453 		qos->bcast.bis = bis;
1454 	}
1455 
1456 	return 0;
1457 }
1458 
1459 /* This function requires the caller holds hdev->lock */
hci_add_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)1460 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1461 				    struct bt_iso_qos *qos, __u8 base_len,
1462 				    __u8 *base)
1463 {
1464 	struct hci_conn *conn;
1465 	int err;
1466 
1467 	/* Let's make sure that le is enabled.*/
1468 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1469 		if (lmp_le_capable(hdev))
1470 			return ERR_PTR(-ECONNREFUSED);
1471 		return ERR_PTR(-EOPNOTSUPP);
1472 	}
1473 
1474 	err = qos_set_big(hdev, qos);
1475 	if (err)
1476 		return ERR_PTR(err);
1477 
1478 	err = qos_set_bis(hdev, qos);
1479 	if (err)
1480 		return ERR_PTR(err);
1481 
1482 	/* Check if the LE Create BIG command has already been sent */
1483 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1484 						qos->bcast.big);
1485 	if (conn)
1486 		return ERR_PTR(-EADDRINUSE);
1487 
1488 	/* Check BIS settings against other bound BISes, since all
1489 	 * BISes in a BIG must have the same value for all parameters
1490 	 */
1491 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1492 
1493 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1494 		     base_len != conn->le_per_adv_data_len ||
1495 		     memcmp(conn->le_per_adv_data, base, base_len)))
1496 		return ERR_PTR(-EADDRINUSE);
1497 
1498 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1499 	if (IS_ERR(conn))
1500 		return conn;
1501 
1502 	conn->state = BT_CONNECT;
1503 
1504 	hci_conn_hold(conn);
1505 	return conn;
1506 }
1507 
1508 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1509 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1510 				     u8 dst_type, u8 sec_level,
1511 				     u16 conn_timeout,
1512 				     enum conn_reasons conn_reason)
1513 {
1514 	struct hci_conn *conn;
1515 
1516 	/* Let's make sure that le is enabled.*/
1517 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1518 		if (lmp_le_capable(hdev))
1519 			return ERR_PTR(-ECONNREFUSED);
1520 
1521 		return ERR_PTR(-EOPNOTSUPP);
1522 	}
1523 
1524 	/* Some devices send ATT messages as soon as the physical link is
1525 	 * established. To be able to handle these ATT messages, the user-
1526 	 * space first establishes the connection and then starts the pairing
1527 	 * process.
1528 	 *
1529 	 * So if a hci_conn object already exists for the following connection
1530 	 * attempt, we simply update pending_sec_level and auth_type fields
1531 	 * and return the object found.
1532 	 */
1533 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1534 	if (conn) {
1535 		if (conn->pending_sec_level < sec_level)
1536 			conn->pending_sec_level = sec_level;
1537 		goto done;
1538 	}
1539 
1540 	BT_DBG("requesting refresh of dst_addr");
1541 
1542 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1543 	if (IS_ERR(conn))
1544 		return conn;
1545 
1546 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1547 		hci_conn_del(conn);
1548 		return ERR_PTR(-EBUSY);
1549 	}
1550 
1551 	conn->state = BT_CONNECT;
1552 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1553 	conn->dst_type = dst_type;
1554 	conn->sec_level = BT_SECURITY_LOW;
1555 	conn->pending_sec_level = sec_level;
1556 	conn->conn_timeout = conn_timeout;
1557 	conn->conn_reason = conn_reason;
1558 
1559 	hci_update_passive_scan(hdev);
1560 
1561 done:
1562 	hci_conn_hold(conn);
1563 	return conn;
1564 }
1565 
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason)1566 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1567 				 u8 sec_level, u8 auth_type,
1568 				 enum conn_reasons conn_reason)
1569 {
1570 	struct hci_conn *acl;
1571 
1572 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1573 		if (lmp_bredr_capable(hdev))
1574 			return ERR_PTR(-ECONNREFUSED);
1575 
1576 		return ERR_PTR(-EOPNOTSUPP);
1577 	}
1578 
1579 	/* Reject outgoing connection to device with same BD ADDR against
1580 	 * CVE-2020-26555
1581 	 */
1582 	if (!bacmp(&hdev->bdaddr, dst)) {
1583 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1584 			   dst);
1585 		return ERR_PTR(-ECONNREFUSED);
1586 	}
1587 
1588 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1589 	if (!acl) {
1590 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1591 		if (IS_ERR(acl))
1592 			return acl;
1593 	}
1594 
1595 	hci_conn_hold(acl);
1596 
1597 	acl->conn_reason = conn_reason;
1598 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1599 		int err;
1600 
1601 		acl->sec_level = BT_SECURITY_LOW;
1602 		acl->pending_sec_level = sec_level;
1603 		acl->auth_type = auth_type;
1604 
1605 		err = hci_connect_acl_sync(hdev, acl);
1606 		if (err) {
1607 			hci_conn_del(acl);
1608 			return ERR_PTR(err);
1609 		}
1610 	}
1611 
1612 	return acl;
1613 }
1614 
hci_conn_link(struct hci_conn * parent,struct hci_conn * conn)1615 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1616 				      struct hci_conn *conn)
1617 {
1618 	struct hci_dev *hdev = parent->hdev;
1619 	struct hci_link *link;
1620 
1621 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1622 
1623 	if (conn->link)
1624 		return conn->link;
1625 
1626 	if (conn->parent)
1627 		return NULL;
1628 
1629 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1630 	if (!link)
1631 		return NULL;
1632 
1633 	link->conn = hci_conn_hold(conn);
1634 	conn->link = link;
1635 	conn->parent = hci_conn_get(parent);
1636 
1637 	/* Use list_add_tail_rcu append to the list */
1638 	list_add_tail_rcu(&link->list, &parent->link_list);
1639 
1640 	return link;
1641 }
1642 
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting,struct bt_codec * codec)1643 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1644 				 __u16 setting, struct bt_codec *codec)
1645 {
1646 	struct hci_conn *acl;
1647 	struct hci_conn *sco;
1648 	struct hci_link *link;
1649 
1650 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1651 			      CONN_REASON_SCO_CONNECT);
1652 	if (IS_ERR(acl))
1653 		return acl;
1654 
1655 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1656 	if (!sco) {
1657 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1658 		if (IS_ERR(sco)) {
1659 			hci_conn_drop(acl);
1660 			return sco;
1661 		}
1662 	}
1663 
1664 	link = hci_conn_link(acl, sco);
1665 	if (!link) {
1666 		hci_conn_drop(acl);
1667 		hci_conn_drop(sco);
1668 		return ERR_PTR(-ENOLINK);
1669 	}
1670 
1671 	sco->setting = setting;
1672 	sco->codec = *codec;
1673 
1674 	if (acl->state == BT_CONNECTED &&
1675 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1676 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1677 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1678 
1679 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1680 			/* defer SCO setup until mode change completed */
1681 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1682 			return sco;
1683 		}
1684 
1685 		hci_sco_setup(acl, 0x00);
1686 	}
1687 
1688 	return sco;
1689 }
1690 
hci_le_create_big(struct hci_conn * conn,struct bt_iso_qos * qos)1691 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1692 {
1693 	struct hci_dev *hdev = conn->hdev;
1694 	struct hci_cp_le_create_big cp;
1695 	struct iso_list_data data;
1696 
1697 	memset(&cp, 0, sizeof(cp));
1698 
1699 	data.big = qos->bcast.big;
1700 	data.bis = qos->bcast.bis;
1701 	data.count = 0;
1702 
1703 	/* Create a BIS for each bound connection */
1704 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1705 				 BT_BOUND, &data);
1706 
1707 	cp.handle = qos->bcast.big;
1708 	cp.adv_handle = qos->bcast.bis;
1709 	cp.num_bis  = data.count;
1710 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1711 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1712 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1713 	cp.bis.rtn  = qos->bcast.out.rtn;
1714 	cp.bis.phy  = qos->bcast.out.phy;
1715 	cp.bis.packing = qos->bcast.packing;
1716 	cp.bis.framing = qos->bcast.framing;
1717 	cp.bis.encryption = qos->bcast.encryption;
1718 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1719 
1720 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1721 }
1722 
set_cig_params_sync(struct hci_dev * hdev,void * data)1723 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1724 {
1725 	u8 cig_id = PTR_UINT(data);
1726 	struct hci_conn *conn;
1727 	struct bt_iso_qos *qos;
1728 	struct iso_cig_params pdu;
1729 	u8 cis_id;
1730 
1731 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1732 	if (!conn)
1733 		return 0;
1734 
1735 	memset(&pdu, 0, sizeof(pdu));
1736 
1737 	qos = &conn->iso_qos;
1738 	pdu.cp.cig_id = cig_id;
1739 	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1740 	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1741 	pdu.cp.sca = qos->ucast.sca;
1742 	pdu.cp.packing = qos->ucast.packing;
1743 	pdu.cp.framing = qos->ucast.framing;
1744 	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1745 	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1746 
1747 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1748 	 * num_cis: 0x00 to 0x1F
1749 	 * cis_id: 0x00 to 0xEF
1750 	 */
1751 	for (cis_id = 0x00; cis_id < 0xf0 &&
1752 	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1753 		struct hci_cis_params *cis;
1754 
1755 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1756 		if (!conn)
1757 			continue;
1758 
1759 		qos = &conn->iso_qos;
1760 
1761 		cis = &pdu.cis[pdu.cp.num_cis++];
1762 		cis->cis_id = cis_id;
1763 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1764 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1765 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1766 			      qos->ucast.in.phy;
1767 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1768 			      qos->ucast.out.phy;
1769 		cis->c_rtn  = qos->ucast.out.rtn;
1770 		cis->p_rtn  = qos->ucast.in.rtn;
1771 	}
1772 
1773 	if (!pdu.cp.num_cis)
1774 		return 0;
1775 
1776 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1777 				     sizeof(pdu.cp) +
1778 				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1779 				     HCI_CMD_TIMEOUT);
1780 }
1781 
hci_le_set_cig_params(struct hci_conn * conn,struct bt_iso_qos * qos)1782 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1783 {
1784 	struct hci_dev *hdev = conn->hdev;
1785 	struct iso_list_data data;
1786 
1787 	memset(&data, 0, sizeof(data));
1788 
1789 	/* Allocate first still reconfigurable CIG if not set */
1790 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1791 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1792 			data.count = 0;
1793 
1794 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1795 						 BT_CONNECT, &data);
1796 			if (data.count)
1797 				continue;
1798 
1799 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1800 						 BT_CONNECTED, &data);
1801 			if (!data.count)
1802 				break;
1803 		}
1804 
1805 		if (data.cig == 0xf0)
1806 			return false;
1807 
1808 		/* Update CIG */
1809 		qos->ucast.cig = data.cig;
1810 	}
1811 
1812 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1813 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1814 					     qos->ucast.cis))
1815 			return false;
1816 		goto done;
1817 	}
1818 
1819 	/* Allocate first available CIS if not set */
1820 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1821 	     data.cis++) {
1822 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1823 					      data.cis)) {
1824 			/* Update CIS */
1825 			qos->ucast.cis = data.cis;
1826 			break;
1827 		}
1828 	}
1829 
1830 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1831 		return false;
1832 
1833 done:
1834 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1835 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1836 		return false;
1837 
1838 	return true;
1839 }
1840 
hci_bind_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)1841 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1842 			      __u8 dst_type, struct bt_iso_qos *qos)
1843 {
1844 	struct hci_conn *cis;
1845 
1846 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1847 				       qos->ucast.cis);
1848 	if (!cis) {
1849 		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1850 		if (IS_ERR(cis))
1851 			return cis;
1852 		cis->cleanup = cis_cleanup;
1853 		cis->dst_type = dst_type;
1854 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1855 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1856 	}
1857 
1858 	if (cis->state == BT_CONNECTED)
1859 		return cis;
1860 
1861 	/* Check if CIS has been set and the settings matches */
1862 	if (cis->state == BT_BOUND &&
1863 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1864 		return cis;
1865 
1866 	/* Update LINK PHYs according to QoS preference */
1867 	cis->le_tx_phy = qos->ucast.out.phy;
1868 	cis->le_rx_phy = qos->ucast.in.phy;
1869 
1870 	/* If output interval is not set use the input interval as it cannot be
1871 	 * 0x000000.
1872 	 */
1873 	if (!qos->ucast.out.interval)
1874 		qos->ucast.out.interval = qos->ucast.in.interval;
1875 
1876 	/* If input interval is not set use the output interval as it cannot be
1877 	 * 0x000000.
1878 	 */
1879 	if (!qos->ucast.in.interval)
1880 		qos->ucast.in.interval = qos->ucast.out.interval;
1881 
1882 	/* If output latency is not set use the input latency as it cannot be
1883 	 * 0x0000.
1884 	 */
1885 	if (!qos->ucast.out.latency)
1886 		qos->ucast.out.latency = qos->ucast.in.latency;
1887 
1888 	/* If input latency is not set use the output latency as it cannot be
1889 	 * 0x0000.
1890 	 */
1891 	if (!qos->ucast.in.latency)
1892 		qos->ucast.in.latency = qos->ucast.out.latency;
1893 
1894 	if (!hci_le_set_cig_params(cis, qos)) {
1895 		hci_conn_drop(cis);
1896 		return ERR_PTR(-EINVAL);
1897 	}
1898 
1899 	hci_conn_hold(cis);
1900 
1901 	cis->iso_qos = *qos;
1902 	cis->state = BT_BOUND;
1903 
1904 	return cis;
1905 }
1906 
hci_iso_setup_path(struct hci_conn * conn)1907 bool hci_iso_setup_path(struct hci_conn *conn)
1908 {
1909 	struct hci_dev *hdev = conn->hdev;
1910 	struct hci_cp_le_setup_iso_path cmd;
1911 
1912 	memset(&cmd, 0, sizeof(cmd));
1913 
1914 	if (conn->iso_qos.ucast.out.sdu) {
1915 		cmd.handle = cpu_to_le16(conn->handle);
1916 		cmd.direction = 0x00; /* Input (Host to Controller) */
1917 		cmd.path = 0x00; /* HCI path if enabled */
1918 		cmd.codec = 0x03; /* Transparent Data */
1919 
1920 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1921 				 &cmd) < 0)
1922 			return false;
1923 	}
1924 
1925 	if (conn->iso_qos.ucast.in.sdu) {
1926 		cmd.handle = cpu_to_le16(conn->handle);
1927 		cmd.direction = 0x01; /* Output (Controller to Host) */
1928 		cmd.path = 0x00; /* HCI path if enabled */
1929 		cmd.codec = 0x03; /* Transparent Data */
1930 
1931 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1932 				 &cmd) < 0)
1933 			return false;
1934 	}
1935 
1936 	return true;
1937 }
1938 
hci_conn_check_create_cis(struct hci_conn * conn)1939 int hci_conn_check_create_cis(struct hci_conn *conn)
1940 {
1941 	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1942 		return -EINVAL;
1943 
1944 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1945 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1946 		return 1;
1947 
1948 	return 0;
1949 }
1950 
hci_create_cis_sync(struct hci_dev * hdev,void * data)1951 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1952 {
1953 	return hci_le_create_cis_sync(hdev);
1954 }
1955 
hci_le_create_cis_pending(struct hci_dev * hdev)1956 int hci_le_create_cis_pending(struct hci_dev *hdev)
1957 {
1958 	struct hci_conn *conn;
1959 	bool pending = false;
1960 
1961 	rcu_read_lock();
1962 
1963 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1964 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1965 			rcu_read_unlock();
1966 			return -EBUSY;
1967 		}
1968 
1969 		if (!hci_conn_check_create_cis(conn))
1970 			pending = true;
1971 	}
1972 
1973 	rcu_read_unlock();
1974 
1975 	if (!pending)
1976 		return 0;
1977 
1978 	/* Queue Create CIS */
1979 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
1980 }
1981 
hci_iso_qos_setup(struct hci_dev * hdev,struct hci_conn * conn,struct bt_iso_io_qos * qos,__u8 phy)1982 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1983 			      struct bt_iso_io_qos *qos, __u8 phy)
1984 {
1985 	/* Only set MTU if PHY is enabled */
1986 	if (!qos->sdu && qos->phy)
1987 		qos->sdu = conn->mtu;
1988 
1989 	/* Use the same PHY as ACL if set to any */
1990 	if (qos->phy == BT_ISO_PHY_ANY)
1991 		qos->phy = phy;
1992 
1993 	/* Use LE ACL connection interval if not set */
1994 	if (!qos->interval)
1995 		/* ACL interval unit in 1.25 ms to us */
1996 		qos->interval = conn->le_conn_interval * 1250;
1997 
1998 	/* Use LE ACL connection latency if not set */
1999 	if (!qos->latency)
2000 		qos->latency = conn->le_conn_latency;
2001 }
2002 
create_big_sync(struct hci_dev * hdev,void * data)2003 static int create_big_sync(struct hci_dev *hdev, void *data)
2004 {
2005 	struct hci_conn *conn = data;
2006 	struct bt_iso_qos *qos = &conn->iso_qos;
2007 	u16 interval, sync_interval = 0;
2008 	u32 flags = 0;
2009 	int err;
2010 
2011 	if (qos->bcast.out.phy == 0x02)
2012 		flags |= MGMT_ADV_FLAG_SEC_2M;
2013 
2014 	/* Align intervals */
2015 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2016 
2017 	if (qos->bcast.bis)
2018 		sync_interval = interval * 4;
2019 
2020 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2021 				     conn->le_per_adv_data, flags, interval,
2022 				     interval, sync_interval);
2023 	if (err)
2024 		return err;
2025 
2026 	return hci_le_create_big(conn, &conn->iso_qos);
2027 }
2028 
create_pa_complete(struct hci_dev * hdev,void * data,int err)2029 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2030 {
2031 	struct hci_cp_le_pa_create_sync *cp = data;
2032 
2033 	bt_dev_dbg(hdev, "");
2034 
2035 	if (err)
2036 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2037 
2038 	kfree(cp);
2039 }
2040 
create_pa_sync(struct hci_dev * hdev,void * data)2041 static int create_pa_sync(struct hci_dev *hdev, void *data)
2042 {
2043 	struct hci_cp_le_pa_create_sync *cp = data;
2044 	int err;
2045 
2046 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2047 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2048 	if (err) {
2049 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2050 		return err;
2051 	}
2052 
2053 	return hci_update_passive_scan_sync(hdev);
2054 }
2055 
hci_pa_create_sync(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid,struct bt_iso_qos * qos)2056 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2057 		       __u8 sid, struct bt_iso_qos *qos)
2058 {
2059 	struct hci_cp_le_pa_create_sync *cp;
2060 
2061 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2062 		return -EBUSY;
2063 
2064 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2065 	if (!cp) {
2066 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2067 		return -ENOMEM;
2068 	}
2069 
2070 	cp->options = qos->bcast.options;
2071 	cp->sid = sid;
2072 	cp->addr_type = dst_type;
2073 	bacpy(&cp->addr, dst);
2074 	cp->skip = cpu_to_le16(qos->bcast.skip);
2075 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2076 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2077 
2078 	/* Queue start pa_create_sync and scan */
2079 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2080 }
2081 
hci_le_big_create_sync(struct hci_dev * hdev,struct hci_conn * hcon,struct bt_iso_qos * qos,__u16 sync_handle,__u8 num_bis,__u8 bis[])2082 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2083 			   struct bt_iso_qos *qos,
2084 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2085 {
2086 	struct _packed {
2087 		struct hci_cp_le_big_create_sync cp;
2088 		__u8  bis[0x11];
2089 	} pdu;
2090 	int err;
2091 
2092 	if (num_bis > sizeof(pdu.bis))
2093 		return -EINVAL;
2094 
2095 	err = qos_set_big(hdev, qos);
2096 	if (err)
2097 		return err;
2098 
2099 	if (hcon)
2100 		hcon->iso_qos.bcast.big = qos->bcast.big;
2101 
2102 	memset(&pdu, 0, sizeof(pdu));
2103 	pdu.cp.handle = qos->bcast.big;
2104 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2105 	pdu.cp.encryption = qos->bcast.encryption;
2106 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2107 	pdu.cp.mse = qos->bcast.mse;
2108 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2109 	pdu.cp.num_bis = num_bis;
2110 	memcpy(pdu.bis, bis, num_bis);
2111 
2112 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2113 			    sizeof(pdu.cp) + num_bis, &pdu);
2114 }
2115 
create_big_complete(struct hci_dev * hdev,void * data,int err)2116 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2117 {
2118 	struct hci_conn *conn = data;
2119 
2120 	bt_dev_dbg(hdev, "conn %p", conn);
2121 
2122 	if (err) {
2123 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2124 		hci_connect_cfm(conn, err);
2125 		hci_conn_del(conn);
2126 	}
2127 }
2128 
hci_bind_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2129 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2130 			      struct bt_iso_qos *qos,
2131 			      __u8 base_len, __u8 *base)
2132 {
2133 	struct hci_conn *conn;
2134 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2135 
2136 	if (base_len && base)
2137 		base_len = eir_append_service_data(eir, 0,  0x1851,
2138 						   base, base_len);
2139 
2140 	/* We need hci_conn object using the BDADDR_ANY as dst */
2141 	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2142 	if (IS_ERR(conn))
2143 		return conn;
2144 
2145 	/* Update LINK PHYs according to QoS preference */
2146 	conn->le_tx_phy = qos->bcast.out.phy;
2147 	conn->le_tx_phy = qos->bcast.out.phy;
2148 
2149 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2150 	if (base_len && base) {
2151 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2152 		conn->le_per_adv_data_len = base_len;
2153 	}
2154 
2155 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2156 			  conn->le_tx_phy ? conn->le_tx_phy :
2157 			  hdev->le_tx_def_phys);
2158 
2159 	conn->iso_qos = *qos;
2160 	conn->state = BT_BOUND;
2161 
2162 	return conn;
2163 }
2164 
bis_mark_per_adv(struct hci_conn * conn,void * data)2165 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2166 {
2167 	struct iso_list_data *d = data;
2168 
2169 	/* Skip if not broadcast/ANY address */
2170 	if (bacmp(&conn->dst, BDADDR_ANY))
2171 		return;
2172 
2173 	if (d->big != conn->iso_qos.bcast.big ||
2174 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2175 	    d->bis != conn->iso_qos.bcast.bis)
2176 		return;
2177 
2178 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2179 }
2180 
hci_connect_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2181 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2182 				 __u8 dst_type, struct bt_iso_qos *qos,
2183 				 __u8 base_len, __u8 *base)
2184 {
2185 	struct hci_conn *conn;
2186 	int err;
2187 	struct iso_list_data data;
2188 
2189 	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2190 	if (IS_ERR(conn))
2191 		return conn;
2192 
2193 	data.big = qos->bcast.big;
2194 	data.bis = qos->bcast.bis;
2195 
2196 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2197 	 * the start periodic advertising and create BIG commands have
2198 	 * been queued
2199 	 */
2200 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2201 				 BT_BOUND, &data);
2202 
2203 	/* Queue start periodic advertising and create BIG */
2204 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2205 				 create_big_complete);
2206 	if (err < 0) {
2207 		hci_conn_drop(conn);
2208 		return ERR_PTR(err);
2209 	}
2210 
2211 	return conn;
2212 }
2213 
hci_connect_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)2214 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2215 				 __u8 dst_type, struct bt_iso_qos *qos)
2216 {
2217 	struct hci_conn *le;
2218 	struct hci_conn *cis;
2219 	struct hci_link *link;
2220 
2221 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2222 		le = hci_connect_le(hdev, dst, dst_type, false,
2223 				    BT_SECURITY_LOW,
2224 				    HCI_LE_CONN_TIMEOUT,
2225 				    HCI_ROLE_SLAVE);
2226 	else
2227 		le = hci_connect_le_scan(hdev, dst, dst_type,
2228 					 BT_SECURITY_LOW,
2229 					 HCI_LE_CONN_TIMEOUT,
2230 					 CONN_REASON_ISO_CONNECT);
2231 	if (IS_ERR(le))
2232 		return le;
2233 
2234 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2235 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2236 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2237 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2238 
2239 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2240 	if (IS_ERR(cis)) {
2241 		hci_conn_drop(le);
2242 		return cis;
2243 	}
2244 
2245 	link = hci_conn_link(le, cis);
2246 	if (!link) {
2247 		hci_conn_drop(le);
2248 		hci_conn_drop(cis);
2249 		return ERR_PTR(-ENOLINK);
2250 	}
2251 
2252 	/* Link takes the refcount */
2253 	hci_conn_drop(cis);
2254 
2255 	cis->state = BT_CONNECT;
2256 
2257 	hci_le_create_cis_pending(hdev);
2258 
2259 	return cis;
2260 }
2261 
2262 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)2263 int hci_conn_check_link_mode(struct hci_conn *conn)
2264 {
2265 	BT_DBG("hcon %p", conn);
2266 
2267 	/* In Secure Connections Only mode, it is required that Secure
2268 	 * Connections is used and the link is encrypted with AES-CCM
2269 	 * using a P-256 authenticated combination key.
2270 	 */
2271 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2272 		if (!hci_conn_sc_enabled(conn) ||
2273 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2274 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2275 			return 0;
2276 	}
2277 
2278 	 /* AES encryption is required for Level 4:
2279 	  *
2280 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2281 	  * page 1319:
2282 	  *
2283 	  * 128-bit equivalent strength for link and encryption keys
2284 	  * required using FIPS approved algorithms (E0 not allowed,
2285 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2286 	  * not shortened)
2287 	  */
2288 	if (conn->sec_level == BT_SECURITY_FIPS &&
2289 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2290 		bt_dev_err(conn->hdev,
2291 			   "Invalid security: Missing AES-CCM usage");
2292 		return 0;
2293 	}
2294 
2295 	if (hci_conn_ssp_enabled(conn) &&
2296 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2297 		return 0;
2298 
2299 	return 1;
2300 }
2301 
2302 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)2303 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2304 {
2305 	BT_DBG("hcon %p", conn);
2306 
2307 	if (conn->pending_sec_level > sec_level)
2308 		sec_level = conn->pending_sec_level;
2309 
2310 	if (sec_level > conn->sec_level)
2311 		conn->pending_sec_level = sec_level;
2312 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2313 		return 1;
2314 
2315 	/* Make sure we preserve an existing MITM requirement*/
2316 	auth_type |= (conn->auth_type & 0x01);
2317 
2318 	conn->auth_type = auth_type;
2319 
2320 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2321 		struct hci_cp_auth_requested cp;
2322 
2323 		cp.handle = cpu_to_le16(conn->handle);
2324 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2325 			     sizeof(cp), &cp);
2326 
2327 		/* Set the ENCRYPT_PEND to trigger encryption after
2328 		 * authentication.
2329 		 */
2330 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2331 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2332 	}
2333 
2334 	return 0;
2335 }
2336 
2337 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)2338 static void hci_conn_encrypt(struct hci_conn *conn)
2339 {
2340 	BT_DBG("hcon %p", conn);
2341 
2342 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2343 		struct hci_cp_set_conn_encrypt cp;
2344 		cp.handle  = cpu_to_le16(conn->handle);
2345 		cp.encrypt = 0x01;
2346 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2347 			     &cp);
2348 	}
2349 }
2350 
2351 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)2352 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2353 		      bool initiator)
2354 {
2355 	BT_DBG("hcon %p", conn);
2356 
2357 	if (conn->type == LE_LINK)
2358 		return smp_conn_security(conn, sec_level);
2359 
2360 	/* For sdp we don't need the link key. */
2361 	if (sec_level == BT_SECURITY_SDP)
2362 		return 1;
2363 
2364 	/* For non 2.1 devices and low security level we don't need the link
2365 	   key. */
2366 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2367 		return 1;
2368 
2369 	/* For other security levels we need the link key. */
2370 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2371 		goto auth;
2372 
2373 	switch (conn->key_type) {
2374 	case HCI_LK_AUTH_COMBINATION_P256:
2375 		/* An authenticated FIPS approved combination key has
2376 		 * sufficient security for security level 4 or lower.
2377 		 */
2378 		if (sec_level <= BT_SECURITY_FIPS)
2379 			goto encrypt;
2380 		break;
2381 	case HCI_LK_AUTH_COMBINATION_P192:
2382 		/* An authenticated combination key has sufficient security for
2383 		 * security level 3 or lower.
2384 		 */
2385 		if (sec_level <= BT_SECURITY_HIGH)
2386 			goto encrypt;
2387 		break;
2388 	case HCI_LK_UNAUTH_COMBINATION_P192:
2389 	case HCI_LK_UNAUTH_COMBINATION_P256:
2390 		/* An unauthenticated combination key has sufficient security
2391 		 * for security level 2 or lower.
2392 		 */
2393 		if (sec_level <= BT_SECURITY_MEDIUM)
2394 			goto encrypt;
2395 		break;
2396 	case HCI_LK_COMBINATION:
2397 		/* A combination key has always sufficient security for the
2398 		 * security levels 2 or lower. High security level requires the
2399 		 * combination key is generated using maximum PIN code length
2400 		 * (16). For pre 2.1 units.
2401 		 */
2402 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2403 			goto encrypt;
2404 		break;
2405 	default:
2406 		break;
2407 	}
2408 
2409 auth:
2410 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2411 		return 0;
2412 
2413 	if (initiator)
2414 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2415 
2416 	if (!hci_conn_auth(conn, sec_level, auth_type))
2417 		return 0;
2418 
2419 encrypt:
2420 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2421 		/* Ensure that the encryption key size has been read,
2422 		 * otherwise stall the upper layer responses.
2423 		 */
2424 		if (!conn->enc_key_size)
2425 			return 0;
2426 
2427 		/* Nothing else needed, all requirements are met */
2428 		return 1;
2429 	}
2430 
2431 	hci_conn_encrypt(conn);
2432 	return 0;
2433 }
2434 EXPORT_SYMBOL(hci_conn_security);
2435 
2436 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)2437 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2438 {
2439 	BT_DBG("hcon %p", conn);
2440 
2441 	/* Accept if non-secure or higher security level is required */
2442 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2443 		return 1;
2444 
2445 	/* Accept if secure or higher security level is already present */
2446 	if (conn->sec_level == BT_SECURITY_HIGH ||
2447 	    conn->sec_level == BT_SECURITY_FIPS)
2448 		return 1;
2449 
2450 	/* Reject not secure link */
2451 	return 0;
2452 }
2453 EXPORT_SYMBOL(hci_conn_check_secure);
2454 
2455 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)2456 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2457 {
2458 	BT_DBG("hcon %p", conn);
2459 
2460 	if (role == conn->role)
2461 		return 1;
2462 
2463 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2464 		struct hci_cp_switch_role cp;
2465 		bacpy(&cp.bdaddr, &conn->dst);
2466 		cp.role = role;
2467 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2468 	}
2469 
2470 	return 0;
2471 }
2472 EXPORT_SYMBOL(hci_conn_switch_role);
2473 
2474 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)2475 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2476 {
2477 	struct hci_dev *hdev = conn->hdev;
2478 
2479 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2480 
2481 	if (conn->mode != HCI_CM_SNIFF)
2482 		goto timer;
2483 
2484 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2485 		goto timer;
2486 
2487 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2488 		struct hci_cp_exit_sniff_mode cp;
2489 		cp.handle = cpu_to_le16(conn->handle);
2490 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2491 	}
2492 
2493 timer:
2494 	if (hdev->idle_timeout > 0)
2495 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2496 				   msecs_to_jiffies(hdev->idle_timeout));
2497 }
2498 
2499 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)2500 void hci_conn_hash_flush(struct hci_dev *hdev)
2501 {
2502 	struct list_head *head = &hdev->conn_hash.list;
2503 	struct hci_conn *conn;
2504 
2505 	BT_DBG("hdev %s", hdev->name);
2506 
2507 	/* We should not traverse the list here, because hci_conn_del
2508 	 * can remove extra links, which may cause the list traversal
2509 	 * to hit items that have already been released.
2510 	 */
2511 	while ((conn = list_first_entry_or_null(head,
2512 						struct hci_conn,
2513 						list)) != NULL) {
2514 		conn->state = BT_CLOSED;
2515 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2516 		hci_conn_del(conn);
2517 	}
2518 }
2519 
get_link_mode(struct hci_conn * conn)2520 static u32 get_link_mode(struct hci_conn *conn)
2521 {
2522 	u32 link_mode = 0;
2523 
2524 	if (conn->role == HCI_ROLE_MASTER)
2525 		link_mode |= HCI_LM_MASTER;
2526 
2527 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2528 		link_mode |= HCI_LM_ENCRYPT;
2529 
2530 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2531 		link_mode |= HCI_LM_AUTH;
2532 
2533 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2534 		link_mode |= HCI_LM_SECURE;
2535 
2536 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2537 		link_mode |= HCI_LM_FIPS;
2538 
2539 	return link_mode;
2540 }
2541 
hci_get_conn_list(void __user * arg)2542 int hci_get_conn_list(void __user *arg)
2543 {
2544 	struct hci_conn *c;
2545 	struct hci_conn_list_req req, *cl;
2546 	struct hci_conn_info *ci;
2547 	struct hci_dev *hdev;
2548 	int n = 0, size, err;
2549 
2550 	if (copy_from_user(&req, arg, sizeof(req)))
2551 		return -EFAULT;
2552 
2553 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2554 		return -EINVAL;
2555 
2556 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2557 
2558 	cl = kmalloc(size, GFP_KERNEL);
2559 	if (!cl)
2560 		return -ENOMEM;
2561 
2562 	hdev = hci_dev_get(req.dev_id);
2563 	if (!hdev) {
2564 		kfree(cl);
2565 		return -ENODEV;
2566 	}
2567 
2568 	ci = cl->conn_info;
2569 
2570 	hci_dev_lock(hdev);
2571 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2572 		bacpy(&(ci + n)->bdaddr, &c->dst);
2573 		(ci + n)->handle = c->handle;
2574 		(ci + n)->type  = c->type;
2575 		(ci + n)->out   = c->out;
2576 		(ci + n)->state = c->state;
2577 		(ci + n)->link_mode = get_link_mode(c);
2578 		if (++n >= req.conn_num)
2579 			break;
2580 	}
2581 	hci_dev_unlock(hdev);
2582 
2583 	cl->dev_id = hdev->id;
2584 	cl->conn_num = n;
2585 	size = sizeof(req) + n * sizeof(*ci);
2586 
2587 	hci_dev_put(hdev);
2588 
2589 	err = copy_to_user(arg, cl, size);
2590 	kfree(cl);
2591 
2592 	return err ? -EFAULT : 0;
2593 }
2594 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)2595 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2596 {
2597 	struct hci_conn_info_req req;
2598 	struct hci_conn_info ci;
2599 	struct hci_conn *conn;
2600 	char __user *ptr = arg + sizeof(req);
2601 
2602 	if (copy_from_user(&req, arg, sizeof(req)))
2603 		return -EFAULT;
2604 
2605 	hci_dev_lock(hdev);
2606 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2607 	if (conn) {
2608 		bacpy(&ci.bdaddr, &conn->dst);
2609 		ci.handle = conn->handle;
2610 		ci.type  = conn->type;
2611 		ci.out   = conn->out;
2612 		ci.state = conn->state;
2613 		ci.link_mode = get_link_mode(conn);
2614 	}
2615 	hci_dev_unlock(hdev);
2616 
2617 	if (!conn)
2618 		return -ENOENT;
2619 
2620 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2621 }
2622 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)2623 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2624 {
2625 	struct hci_auth_info_req req;
2626 	struct hci_conn *conn;
2627 
2628 	if (copy_from_user(&req, arg, sizeof(req)))
2629 		return -EFAULT;
2630 
2631 	hci_dev_lock(hdev);
2632 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2633 	if (conn)
2634 		req.type = conn->auth_type;
2635 	hci_dev_unlock(hdev);
2636 
2637 	if (!conn)
2638 		return -ENOENT;
2639 
2640 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2641 }
2642 
hci_chan_create(struct hci_conn * conn)2643 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2644 {
2645 	struct hci_dev *hdev = conn->hdev;
2646 	struct hci_chan *chan;
2647 
2648 	BT_DBG("%s hcon %p", hdev->name, conn);
2649 
2650 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2651 		BT_DBG("Refusing to create new hci_chan");
2652 		return NULL;
2653 	}
2654 
2655 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2656 	if (!chan)
2657 		return NULL;
2658 
2659 	chan->conn = hci_conn_get(conn);
2660 	skb_queue_head_init(&chan->data_q);
2661 	chan->state = BT_CONNECTED;
2662 
2663 	list_add_rcu(&chan->list, &conn->chan_list);
2664 
2665 	return chan;
2666 }
2667 
hci_chan_del(struct hci_chan * chan)2668 void hci_chan_del(struct hci_chan *chan)
2669 {
2670 	struct hci_conn *conn = chan->conn;
2671 	struct hci_dev *hdev = conn->hdev;
2672 
2673 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2674 
2675 	list_del_rcu(&chan->list);
2676 
2677 	synchronize_rcu();
2678 
2679 	/* Prevent new hci_chan's to be created for this hci_conn */
2680 	set_bit(HCI_CONN_DROP, &conn->flags);
2681 
2682 	hci_conn_put(conn);
2683 
2684 	skb_queue_purge(&chan->data_q);
2685 	kfree(chan);
2686 }
2687 
hci_chan_list_flush(struct hci_conn * conn)2688 void hci_chan_list_flush(struct hci_conn *conn)
2689 {
2690 	struct hci_chan *chan, *n;
2691 
2692 	BT_DBG("hcon %p", conn);
2693 
2694 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2695 		hci_chan_del(chan);
2696 }
2697 
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)2698 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2699 						 __u16 handle)
2700 {
2701 	struct hci_chan *hchan;
2702 
2703 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2704 		if (hchan->handle == handle)
2705 			return hchan;
2706 	}
2707 
2708 	return NULL;
2709 }
2710 
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)2711 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2712 {
2713 	struct hci_conn_hash *h = &hdev->conn_hash;
2714 	struct hci_conn *hcon;
2715 	struct hci_chan *hchan = NULL;
2716 
2717 	rcu_read_lock();
2718 
2719 	list_for_each_entry_rcu(hcon, &h->list, list) {
2720 		hchan = __hci_chan_lookup_handle(hcon, handle);
2721 		if (hchan)
2722 			break;
2723 	}
2724 
2725 	rcu_read_unlock();
2726 
2727 	return hchan;
2728 }
2729 
hci_conn_get_phy(struct hci_conn * conn)2730 u32 hci_conn_get_phy(struct hci_conn *conn)
2731 {
2732 	u32 phys = 0;
2733 
2734 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2735 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2736 	 * CPB logical transport types.
2737 	 */
2738 	switch (conn->type) {
2739 	case SCO_LINK:
2740 		/* SCO logical transport (1 Mb/s):
2741 		 * HV1, HV2, HV3 and DV.
2742 		 */
2743 		phys |= BT_PHY_BR_1M_1SLOT;
2744 
2745 		break;
2746 
2747 	case ACL_LINK:
2748 		/* ACL logical transport (1 Mb/s) ptt=0:
2749 		 * DH1, DM3, DH3, DM5 and DH5.
2750 		 */
2751 		phys |= BT_PHY_BR_1M_1SLOT;
2752 
2753 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2754 			phys |= BT_PHY_BR_1M_3SLOT;
2755 
2756 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2757 			phys |= BT_PHY_BR_1M_5SLOT;
2758 
2759 		/* ACL logical transport (2 Mb/s) ptt=1:
2760 		 * 2-DH1, 2-DH3 and 2-DH5.
2761 		 */
2762 		if (!(conn->pkt_type & HCI_2DH1))
2763 			phys |= BT_PHY_EDR_2M_1SLOT;
2764 
2765 		if (!(conn->pkt_type & HCI_2DH3))
2766 			phys |= BT_PHY_EDR_2M_3SLOT;
2767 
2768 		if (!(conn->pkt_type & HCI_2DH5))
2769 			phys |= BT_PHY_EDR_2M_5SLOT;
2770 
2771 		/* ACL logical transport (3 Mb/s) ptt=1:
2772 		 * 3-DH1, 3-DH3 and 3-DH5.
2773 		 */
2774 		if (!(conn->pkt_type & HCI_3DH1))
2775 			phys |= BT_PHY_EDR_3M_1SLOT;
2776 
2777 		if (!(conn->pkt_type & HCI_3DH3))
2778 			phys |= BT_PHY_EDR_3M_3SLOT;
2779 
2780 		if (!(conn->pkt_type & HCI_3DH5))
2781 			phys |= BT_PHY_EDR_3M_5SLOT;
2782 
2783 		break;
2784 
2785 	case ESCO_LINK:
2786 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2787 		phys |= BT_PHY_BR_1M_1SLOT;
2788 
2789 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2790 			phys |= BT_PHY_BR_1M_3SLOT;
2791 
2792 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2793 		if (!(conn->pkt_type & ESCO_2EV3))
2794 			phys |= BT_PHY_EDR_2M_1SLOT;
2795 
2796 		if (!(conn->pkt_type & ESCO_2EV5))
2797 			phys |= BT_PHY_EDR_2M_3SLOT;
2798 
2799 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2800 		if (!(conn->pkt_type & ESCO_3EV3))
2801 			phys |= BT_PHY_EDR_3M_1SLOT;
2802 
2803 		if (!(conn->pkt_type & ESCO_3EV5))
2804 			phys |= BT_PHY_EDR_3M_3SLOT;
2805 
2806 		break;
2807 
2808 	case LE_LINK:
2809 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2810 			phys |= BT_PHY_LE_1M_TX;
2811 
2812 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2813 			phys |= BT_PHY_LE_1M_RX;
2814 
2815 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2816 			phys |= BT_PHY_LE_2M_TX;
2817 
2818 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2819 			phys |= BT_PHY_LE_2M_RX;
2820 
2821 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2822 			phys |= BT_PHY_LE_CODED_TX;
2823 
2824 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2825 			phys |= BT_PHY_LE_CODED_RX;
2826 
2827 		break;
2828 	}
2829 
2830 	return phys;
2831 }
2832 
abort_conn_sync(struct hci_dev * hdev,void * data)2833 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2834 {
2835 	struct hci_conn *conn = data;
2836 
2837 	if (!hci_conn_valid(hdev, conn))
2838 		return -ECANCELED;
2839 
2840 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2841 }
2842 
hci_abort_conn(struct hci_conn * conn,u8 reason)2843 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2844 {
2845 	struct hci_dev *hdev = conn->hdev;
2846 
2847 	/* If abort_reason has already been set it means the connection is
2848 	 * already being aborted so don't attempt to overwrite it.
2849 	 */
2850 	if (conn->abort_reason)
2851 		return 0;
2852 
2853 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2854 
2855 	conn->abort_reason = reason;
2856 
2857 	/* If the connection is pending check the command opcode since that
2858 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2859 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2860 	 *
2861 	 * hci_connect_le serializes the connection attempts so only one
2862 	 * connection can be in BT_CONNECT at time.
2863 	 */
2864 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2865 		switch (hci_skb_event(hdev->sent_cmd)) {
2866 		case HCI_EV_CONN_COMPLETE:
2867 		case HCI_EV_LE_CONN_COMPLETE:
2868 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2869 		case HCI_EVT_LE_CIS_ESTABLISHED:
2870 			hci_cmd_sync_cancel(hdev, ECANCELED);
2871 			break;
2872 		}
2873 	/* Cancel connect attempt if still queued/pending */
2874 	} else if (!hci_cancel_connect_sync(hdev, conn)) {
2875 		return 0;
2876 	}
2877 
2878 	/* Run immediately if on cmd_sync_work since this may be called
2879 	 * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
2880 	 * already queue its callback on cmd_sync_work.
2881 	 */
2882 	return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
2883 }
2884