xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 86763930)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn,u8 status)71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, conn, status);
111 
112 	/* The connection attempt was doing scan for new RPA, and is
113 	 * in scan phase. If params are not associated with any other
114 	 * autoconnect action, remove them completely. If they are, just unmark
115 	 * them as waiting for connection, by clearing explicit_connect field.
116 	 */
117 	params->explicit_connect = false;
118 
119 	hci_pend_le_list_del_init(params);
120 
121 	switch (params->auto_connect) {
122 	case HCI_AUTO_CONN_EXPLICIT:
123 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
124 		/* return instead of break to avoid duplicate scan update */
125 		return;
126 	case HCI_AUTO_CONN_DIRECT:
127 	case HCI_AUTO_CONN_ALWAYS:
128 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
129 		break;
130 	case HCI_AUTO_CONN_REPORT:
131 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
132 		break;
133 	default:
134 		break;
135 	}
136 
137 	hci_update_passive_scan(hdev);
138 }
139 
hci_conn_cleanup(struct hci_conn * conn)140 static void hci_conn_cleanup(struct hci_conn *conn)
141 {
142 	struct hci_dev *hdev = conn->hdev;
143 
144 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
145 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
146 
147 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
148 		hci_remove_link_key(hdev, &conn->dst);
149 
150 	hci_chan_list_flush(conn);
151 
152 	hci_conn_hash_del(hdev, conn);
153 
154 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
155 		ida_free(&hdev->unset_handle_ida, conn->handle);
156 
157 	if (conn->cleanup)
158 		conn->cleanup(conn);
159 
160 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
161 		switch (conn->setting & SCO_AIRMODE_MASK) {
162 		case SCO_AIRMODE_CVSD:
163 		case SCO_AIRMODE_TRANSP:
164 			if (hdev->notify)
165 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
166 			break;
167 		}
168 	} else {
169 		if (hdev->notify)
170 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
171 	}
172 
173 	debugfs_remove_recursive(conn->debugfs);
174 
175 	hci_conn_del_sysfs(conn);
176 
177 	hci_dev_put(hdev);
178 }
179 
hci_disconnect(struct hci_conn * conn,__u8 reason)180 int hci_disconnect(struct hci_conn *conn, __u8 reason)
181 {
182 	BT_DBG("hcon %p", conn);
183 
184 	/* When we are central of an established connection and it enters
185 	 * the disconnect timeout, then go ahead and try to read the
186 	 * current clock offset.  Processing of the result is done
187 	 * within the event handling and hci_clock_offset_evt function.
188 	 */
189 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
190 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
191 		struct hci_dev *hdev = conn->hdev;
192 		struct hci_cp_read_clock_offset clkoff_cp;
193 
194 		clkoff_cp.handle = cpu_to_le16(conn->handle);
195 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
196 			     &clkoff_cp);
197 	}
198 
199 	return hci_abort_conn(conn, reason);
200 }
201 
hci_add_sco(struct hci_conn * conn,__u16 handle)202 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
203 {
204 	struct hci_dev *hdev = conn->hdev;
205 	struct hci_cp_add_sco cp;
206 
207 	BT_DBG("hcon %p", conn);
208 
209 	conn->state = BT_CONNECT;
210 	conn->out = true;
211 
212 	conn->attempt++;
213 
214 	cp.handle   = cpu_to_le16(handle);
215 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
216 
217 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
218 }
219 
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)220 static bool find_next_esco_param(struct hci_conn *conn,
221 				 const struct sco_param *esco_param, int size)
222 {
223 	if (!conn->parent)
224 		return false;
225 
226 	for (; conn->attempt <= size; conn->attempt++) {
227 		if (lmp_esco_2m_capable(conn->parent) ||
228 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
229 			break;
230 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
231 		       conn, conn->attempt);
232 	}
233 
234 	return conn->attempt <= size;
235 }
236 
configure_datapath_sync(struct hci_dev * hdev,struct bt_codec * codec)237 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
238 {
239 	int err;
240 	__u8 vnd_len, *vnd_data = NULL;
241 	struct hci_op_configure_data_path *cmd = NULL;
242 
243 	if (!codec->data_path || !hdev->get_codec_config_data)
244 		return 0;
245 
246 	/* Do not take me as error */
247 	if (!hdev->get_codec_config_data)
248 		return 0;
249 
250 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
251 					  &vnd_data);
252 	if (err < 0)
253 		goto error;
254 
255 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
256 	if (!cmd) {
257 		err = -ENOMEM;
258 		goto error;
259 	}
260 
261 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
262 	if (err < 0)
263 		goto error;
264 
265 	cmd->vnd_len = vnd_len;
266 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
267 
268 	cmd->direction = 0x00;
269 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
270 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
271 
272 	cmd->direction = 0x01;
273 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
274 				    sizeof(*cmd) + vnd_len, cmd,
275 				    HCI_CMD_TIMEOUT);
276 error:
277 
278 	kfree(cmd);
279 	kfree(vnd_data);
280 	return err;
281 }
282 
hci_enhanced_setup_sync(struct hci_dev * hdev,void * data)283 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
284 {
285 	struct conn_handle_t *conn_handle = data;
286 	struct hci_conn *conn = conn_handle->conn;
287 	__u16 handle = conn_handle->handle;
288 	struct hci_cp_enhanced_setup_sync_conn cp;
289 	const struct sco_param *param;
290 
291 	kfree(conn_handle);
292 
293 	if (!hci_conn_valid(hdev, conn))
294 		return -ECANCELED;
295 
296 	bt_dev_dbg(hdev, "hcon %p", conn);
297 
298 	configure_datapath_sync(hdev, &conn->codec);
299 
300 	conn->state = BT_CONNECT;
301 	conn->out = true;
302 
303 	conn->attempt++;
304 
305 	memset(&cp, 0x00, sizeof(cp));
306 
307 	cp.handle   = cpu_to_le16(handle);
308 
309 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
310 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
311 
312 	switch (conn->codec.id) {
313 	case BT_CODEC_MSBC:
314 		if (!find_next_esco_param(conn, esco_param_msbc,
315 					  ARRAY_SIZE(esco_param_msbc)))
316 			return -EINVAL;
317 
318 		param = &esco_param_msbc[conn->attempt - 1];
319 		cp.tx_coding_format.id = 0x05;
320 		cp.rx_coding_format.id = 0x05;
321 		cp.tx_codec_frame_size = __cpu_to_le16(60);
322 		cp.rx_codec_frame_size = __cpu_to_le16(60);
323 		cp.in_bandwidth = __cpu_to_le32(32000);
324 		cp.out_bandwidth = __cpu_to_le32(32000);
325 		cp.in_coding_format.id = 0x04;
326 		cp.out_coding_format.id = 0x04;
327 		cp.in_coded_data_size = __cpu_to_le16(16);
328 		cp.out_coded_data_size = __cpu_to_le16(16);
329 		cp.in_pcm_data_format = 2;
330 		cp.out_pcm_data_format = 2;
331 		cp.in_pcm_sample_payload_msb_pos = 0;
332 		cp.out_pcm_sample_payload_msb_pos = 0;
333 		cp.in_data_path = conn->codec.data_path;
334 		cp.out_data_path = conn->codec.data_path;
335 		cp.in_transport_unit_size = 1;
336 		cp.out_transport_unit_size = 1;
337 		break;
338 
339 	case BT_CODEC_TRANSPARENT:
340 		if (!find_next_esco_param(conn, esco_param_msbc,
341 					  ARRAY_SIZE(esco_param_msbc)))
342 			return false;
343 		param = &esco_param_msbc[conn->attempt - 1];
344 		cp.tx_coding_format.id = 0x03;
345 		cp.rx_coding_format.id = 0x03;
346 		cp.tx_codec_frame_size = __cpu_to_le16(60);
347 		cp.rx_codec_frame_size = __cpu_to_le16(60);
348 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
349 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
350 		cp.in_coding_format.id = 0x03;
351 		cp.out_coding_format.id = 0x03;
352 		cp.in_coded_data_size = __cpu_to_le16(16);
353 		cp.out_coded_data_size = __cpu_to_le16(16);
354 		cp.in_pcm_data_format = 2;
355 		cp.out_pcm_data_format = 2;
356 		cp.in_pcm_sample_payload_msb_pos = 0;
357 		cp.out_pcm_sample_payload_msb_pos = 0;
358 		cp.in_data_path = conn->codec.data_path;
359 		cp.out_data_path = conn->codec.data_path;
360 		cp.in_transport_unit_size = 1;
361 		cp.out_transport_unit_size = 1;
362 		break;
363 
364 	case BT_CODEC_CVSD:
365 		if (conn->parent && lmp_esco_capable(conn->parent)) {
366 			if (!find_next_esco_param(conn, esco_param_cvsd,
367 						  ARRAY_SIZE(esco_param_cvsd)))
368 				return -EINVAL;
369 			param = &esco_param_cvsd[conn->attempt - 1];
370 		} else {
371 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
372 				return -EINVAL;
373 			param = &sco_param_cvsd[conn->attempt - 1];
374 		}
375 		cp.tx_coding_format.id = 2;
376 		cp.rx_coding_format.id = 2;
377 		cp.tx_codec_frame_size = __cpu_to_le16(60);
378 		cp.rx_codec_frame_size = __cpu_to_le16(60);
379 		cp.in_bandwidth = __cpu_to_le32(16000);
380 		cp.out_bandwidth = __cpu_to_le32(16000);
381 		cp.in_coding_format.id = 4;
382 		cp.out_coding_format.id = 4;
383 		cp.in_coded_data_size = __cpu_to_le16(16);
384 		cp.out_coded_data_size = __cpu_to_le16(16);
385 		cp.in_pcm_data_format = 2;
386 		cp.out_pcm_data_format = 2;
387 		cp.in_pcm_sample_payload_msb_pos = 0;
388 		cp.out_pcm_sample_payload_msb_pos = 0;
389 		cp.in_data_path = conn->codec.data_path;
390 		cp.out_data_path = conn->codec.data_path;
391 		cp.in_transport_unit_size = 16;
392 		cp.out_transport_unit_size = 16;
393 		break;
394 	default:
395 		return -EINVAL;
396 	}
397 
398 	cp.retrans_effort = param->retrans_effort;
399 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
400 	cp.max_latency = __cpu_to_le16(param->max_latency);
401 
402 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
403 		return -EIO;
404 
405 	return 0;
406 }
407 
hci_setup_sync_conn(struct hci_conn * conn,__u16 handle)408 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
409 {
410 	struct hci_dev *hdev = conn->hdev;
411 	struct hci_cp_setup_sync_conn cp;
412 	const struct sco_param *param;
413 
414 	bt_dev_dbg(hdev, "hcon %p", conn);
415 
416 	conn->state = BT_CONNECT;
417 	conn->out = true;
418 
419 	conn->attempt++;
420 
421 	cp.handle   = cpu_to_le16(handle);
422 
423 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
424 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
425 	cp.voice_setting  = cpu_to_le16(conn->setting);
426 
427 	switch (conn->setting & SCO_AIRMODE_MASK) {
428 	case SCO_AIRMODE_TRANSP:
429 		if (!find_next_esco_param(conn, esco_param_msbc,
430 					  ARRAY_SIZE(esco_param_msbc)))
431 			return false;
432 		param = &esco_param_msbc[conn->attempt - 1];
433 		break;
434 	case SCO_AIRMODE_CVSD:
435 		if (conn->parent && lmp_esco_capable(conn->parent)) {
436 			if (!find_next_esco_param(conn, esco_param_cvsd,
437 						  ARRAY_SIZE(esco_param_cvsd)))
438 				return false;
439 			param = &esco_param_cvsd[conn->attempt - 1];
440 		} else {
441 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
442 				return false;
443 			param = &sco_param_cvsd[conn->attempt - 1];
444 		}
445 		break;
446 	default:
447 		return false;
448 	}
449 
450 	cp.retrans_effort = param->retrans_effort;
451 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
452 	cp.max_latency = __cpu_to_le16(param->max_latency);
453 
454 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
455 		return false;
456 
457 	return true;
458 }
459 
hci_setup_sync(struct hci_conn * conn,__u16 handle)460 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
461 {
462 	int result;
463 	struct conn_handle_t *conn_handle;
464 
465 	if (enhanced_sync_conn_capable(conn->hdev)) {
466 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
467 
468 		if (!conn_handle)
469 			return false;
470 
471 		conn_handle->conn = conn;
472 		conn_handle->handle = handle;
473 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
474 					    conn_handle, NULL);
475 		if (result < 0)
476 			kfree(conn_handle);
477 
478 		return result == 0;
479 	}
480 
481 	return hci_setup_sync_conn(conn, handle);
482 }
483 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)484 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
485 		      u16 to_multiplier)
486 {
487 	struct hci_dev *hdev = conn->hdev;
488 	struct hci_conn_params *params;
489 	struct hci_cp_le_conn_update cp;
490 
491 	hci_dev_lock(hdev);
492 
493 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
494 	if (params) {
495 		params->conn_min_interval = min;
496 		params->conn_max_interval = max;
497 		params->conn_latency = latency;
498 		params->supervision_timeout = to_multiplier;
499 	}
500 
501 	hci_dev_unlock(hdev);
502 
503 	memset(&cp, 0, sizeof(cp));
504 	cp.handle		= cpu_to_le16(conn->handle);
505 	cp.conn_interval_min	= cpu_to_le16(min);
506 	cp.conn_interval_max	= cpu_to_le16(max);
507 	cp.conn_latency		= cpu_to_le16(latency);
508 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
509 	cp.min_ce_len		= cpu_to_le16(0x0000);
510 	cp.max_ce_len		= cpu_to_le16(0x0000);
511 
512 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
513 
514 	if (params)
515 		return 0x01;
516 
517 	return 0x00;
518 }
519 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)520 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
521 		      __u8 ltk[16], __u8 key_size)
522 {
523 	struct hci_dev *hdev = conn->hdev;
524 	struct hci_cp_le_start_enc cp;
525 
526 	BT_DBG("hcon %p", conn);
527 
528 	memset(&cp, 0, sizeof(cp));
529 
530 	cp.handle = cpu_to_le16(conn->handle);
531 	cp.rand = rand;
532 	cp.ediv = ediv;
533 	memcpy(cp.ltk, ltk, key_size);
534 
535 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
536 }
537 
538 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)539 void hci_sco_setup(struct hci_conn *conn, __u8 status)
540 {
541 	struct hci_link *link;
542 
543 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
544 	if (!link || !link->conn)
545 		return;
546 
547 	BT_DBG("hcon %p", conn);
548 
549 	if (!status) {
550 		if (lmp_esco_capable(conn->hdev))
551 			hci_setup_sync(link->conn, conn->handle);
552 		else
553 			hci_add_sco(link->conn, conn->handle);
554 	} else {
555 		hci_connect_cfm(link->conn, status);
556 		hci_conn_del(link->conn);
557 	}
558 }
559 
hci_conn_timeout(struct work_struct * work)560 static void hci_conn_timeout(struct work_struct *work)
561 {
562 	struct hci_conn *conn = container_of(work, struct hci_conn,
563 					     disc_work.work);
564 	int refcnt = atomic_read(&conn->refcnt);
565 
566 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
567 
568 	WARN_ON(refcnt < 0);
569 
570 	/* FIXME: It was observed that in pairing failed scenario, refcnt
571 	 * drops below 0. Probably this is because l2cap_conn_del calls
572 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
573 	 * dropped. After that loop hci_chan_del is called which also drops
574 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
575 	 * otherwise drop it.
576 	 */
577 	if (refcnt > 0)
578 		return;
579 
580 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
581 }
582 
583 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)584 static void hci_conn_idle(struct work_struct *work)
585 {
586 	struct hci_conn *conn = container_of(work, struct hci_conn,
587 					     idle_work.work);
588 	struct hci_dev *hdev = conn->hdev;
589 
590 	BT_DBG("hcon %p mode %d", conn, conn->mode);
591 
592 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
593 		return;
594 
595 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
596 		return;
597 
598 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
599 		struct hci_cp_sniff_subrate cp;
600 		cp.handle             = cpu_to_le16(conn->handle);
601 		cp.max_latency        = cpu_to_le16(0);
602 		cp.min_remote_timeout = cpu_to_le16(0);
603 		cp.min_local_timeout  = cpu_to_le16(0);
604 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
605 	}
606 
607 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
608 		struct hci_cp_sniff_mode cp;
609 		cp.handle       = cpu_to_le16(conn->handle);
610 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
611 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
612 		cp.attempt      = cpu_to_le16(4);
613 		cp.timeout      = cpu_to_le16(1);
614 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
615 	}
616 }
617 
hci_conn_auto_accept(struct work_struct * work)618 static void hci_conn_auto_accept(struct work_struct *work)
619 {
620 	struct hci_conn *conn = container_of(work, struct hci_conn,
621 					     auto_accept_work.work);
622 
623 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
624 		     &conn->dst);
625 }
626 
le_disable_advertising(struct hci_dev * hdev)627 static void le_disable_advertising(struct hci_dev *hdev)
628 {
629 	if (ext_adv_capable(hdev)) {
630 		struct hci_cp_le_set_ext_adv_enable cp;
631 
632 		cp.enable = 0x00;
633 		cp.num_of_sets = 0x00;
634 
635 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
636 			     &cp);
637 	} else {
638 		u8 enable = 0x00;
639 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
640 			     &enable);
641 	}
642 }
643 
le_conn_timeout(struct work_struct * work)644 static void le_conn_timeout(struct work_struct *work)
645 {
646 	struct hci_conn *conn = container_of(work, struct hci_conn,
647 					     le_conn_timeout.work);
648 	struct hci_dev *hdev = conn->hdev;
649 
650 	BT_DBG("");
651 
652 	/* We could end up here due to having done directed advertising,
653 	 * so clean up the state if necessary. This should however only
654 	 * happen with broken hardware or if low duty cycle was used
655 	 * (which doesn't have a timeout of its own).
656 	 */
657 	if (conn->role == HCI_ROLE_SLAVE) {
658 		/* Disable LE Advertising */
659 		le_disable_advertising(hdev);
660 		hci_dev_lock(hdev);
661 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
662 		hci_dev_unlock(hdev);
663 		return;
664 	}
665 
666 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
667 }
668 
669 struct iso_cig_params {
670 	struct hci_cp_le_set_cig_params cp;
671 	struct hci_cis_params cis[0x1f];
672 };
673 
674 struct iso_list_data {
675 	union {
676 		u8  cig;
677 		u8  big;
678 	};
679 	union {
680 		u8  cis;
681 		u8  bis;
682 		u16 sync_handle;
683 	};
684 	int count;
685 	bool big_term;
686 	bool pa_sync_term;
687 	bool big_sync_term;
688 };
689 
bis_list(struct hci_conn * conn,void * data)690 static void bis_list(struct hci_conn *conn, void *data)
691 {
692 	struct iso_list_data *d = data;
693 
694 	/* Skip if not broadcast/ANY address */
695 	if (bacmp(&conn->dst, BDADDR_ANY))
696 		return;
697 
698 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
699 	    d->bis != conn->iso_qos.bcast.bis)
700 		return;
701 
702 	d->count++;
703 }
704 
terminate_big_sync(struct hci_dev * hdev,void * data)705 static int terminate_big_sync(struct hci_dev *hdev, void *data)
706 {
707 	struct iso_list_data *d = data;
708 
709 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
710 
711 	hci_disable_per_advertising_sync(hdev, d->bis);
712 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
713 
714 	/* Only terminate BIG if it has been created */
715 	if (!d->big_term)
716 		return 0;
717 
718 	return hci_le_terminate_big_sync(hdev, d->big,
719 					 HCI_ERROR_LOCAL_HOST_TERM);
720 }
721 
terminate_big_destroy(struct hci_dev * hdev,void * data,int err)722 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
723 {
724 	kfree(data);
725 }
726 
hci_le_terminate_big(struct hci_dev * hdev,struct hci_conn * conn)727 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
728 {
729 	struct iso_list_data *d;
730 	int ret;
731 
732 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
733 		   conn->iso_qos.bcast.bis);
734 
735 	d = kzalloc(sizeof(*d), GFP_KERNEL);
736 	if (!d)
737 		return -ENOMEM;
738 
739 	d->big = conn->iso_qos.bcast.big;
740 	d->bis = conn->iso_qos.bcast.bis;
741 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
742 
743 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
744 				 terminate_big_destroy);
745 	if (ret)
746 		kfree(d);
747 
748 	return ret;
749 }
750 
big_terminate_sync(struct hci_dev * hdev,void * data)751 static int big_terminate_sync(struct hci_dev *hdev, void *data)
752 {
753 	struct iso_list_data *d = data;
754 
755 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
756 		   d->sync_handle);
757 
758 	if (d->big_sync_term)
759 		hci_le_big_terminate_sync(hdev, d->big);
760 
761 	if (d->pa_sync_term)
762 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
763 
764 	return 0;
765 }
766 
hci_le_big_terminate(struct hci_dev * hdev,u8 big,struct hci_conn * conn)767 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
768 {
769 	struct iso_list_data *d;
770 	int ret;
771 
772 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
773 
774 	d = kzalloc(sizeof(*d), GFP_KERNEL);
775 	if (!d)
776 		return -ENOMEM;
777 
778 	d->big = big;
779 	d->sync_handle = conn->sync_handle;
780 	d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
781 	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
782 
783 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
784 				 terminate_big_destroy);
785 	if (ret)
786 		kfree(d);
787 
788 	return ret;
789 }
790 
791 /* Cleanup BIS connection
792  *
793  * Detects if there any BIS left connected in a BIG
794  * broadcaster: Remove advertising instance and terminate BIG.
795  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
796  */
bis_cleanup(struct hci_conn * conn)797 static void bis_cleanup(struct hci_conn *conn)
798 {
799 	struct hci_dev *hdev = conn->hdev;
800 	struct hci_conn *bis;
801 
802 	bt_dev_dbg(hdev, "conn %p", conn);
803 
804 	if (conn->role == HCI_ROLE_MASTER) {
805 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
806 			return;
807 
808 		/* Check if ISO connection is a BIS and terminate advertising
809 		 * set and BIG if there are no other connections using it.
810 		 */
811 		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
812 		if (bis)
813 			return;
814 
815 		hci_le_terminate_big(hdev, conn);
816 	} else {
817 		bis = hci_conn_hash_lookup_big_any_dst(hdev,
818 						       conn->iso_qos.bcast.big);
819 
820 		if (bis)
821 			return;
822 
823 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
824 				     conn);
825 	}
826 }
827 
remove_cig_sync(struct hci_dev * hdev,void * data)828 static int remove_cig_sync(struct hci_dev *hdev, void *data)
829 {
830 	u8 handle = PTR_UINT(data);
831 
832 	return hci_le_remove_cig_sync(hdev, handle);
833 }
834 
hci_le_remove_cig(struct hci_dev * hdev,u8 handle)835 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
836 {
837 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
838 
839 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
840 				  NULL);
841 }
842 
find_cis(struct hci_conn * conn,void * data)843 static void find_cis(struct hci_conn *conn, void *data)
844 {
845 	struct iso_list_data *d = data;
846 
847 	/* Ignore broadcast or if CIG don't match */
848 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
849 		return;
850 
851 	d->count++;
852 }
853 
854 /* Cleanup CIS connection:
855  *
856  * Detects if there any CIS left connected in a CIG and remove it.
857  */
cis_cleanup(struct hci_conn * conn)858 static void cis_cleanup(struct hci_conn *conn)
859 {
860 	struct hci_dev *hdev = conn->hdev;
861 	struct iso_list_data d;
862 
863 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
864 		return;
865 
866 	memset(&d, 0, sizeof(d));
867 	d.cig = conn->iso_qos.ucast.cig;
868 
869 	/* Check if ISO connection is a CIS and remove CIG if there are
870 	 * no other connections using it.
871 	 */
872 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
873 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
874 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
875 	if (d.count)
876 		return;
877 
878 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
879 }
880 
hci_conn_hash_alloc_unset(struct hci_dev * hdev)881 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
882 {
883 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
884 			       U16_MAX, GFP_ATOMIC);
885 }
886 
__hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)887 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
888 				       u8 role, u16 handle)
889 {
890 	struct hci_conn *conn;
891 
892 	switch (type) {
893 	case ACL_LINK:
894 		if (!hdev->acl_mtu)
895 			return ERR_PTR(-ECONNREFUSED);
896 		break;
897 	case ISO_LINK:
898 		if (hdev->iso_mtu)
899 			/* Dedicated ISO Buffer exists */
900 			break;
901 		fallthrough;
902 	case LE_LINK:
903 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
904 			return ERR_PTR(-ECONNREFUSED);
905 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
906 			return ERR_PTR(-ECONNREFUSED);
907 		break;
908 	case SCO_LINK:
909 	case ESCO_LINK:
910 		if (!hdev->sco_pkts)
911 			/* Controller does not support SCO or eSCO over HCI */
912 			return ERR_PTR(-ECONNREFUSED);
913 		break;
914 	default:
915 		return ERR_PTR(-ECONNREFUSED);
916 	}
917 
918 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
919 
920 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
921 	if (!conn)
922 		return ERR_PTR(-ENOMEM);
923 
924 	bacpy(&conn->dst, dst);
925 	bacpy(&conn->src, &hdev->bdaddr);
926 	conn->handle = handle;
927 	conn->hdev  = hdev;
928 	conn->type  = type;
929 	conn->role  = role;
930 	conn->mode  = HCI_CM_ACTIVE;
931 	conn->state = BT_OPEN;
932 	conn->auth_type = HCI_AT_GENERAL_BONDING;
933 	conn->io_capability = hdev->io_capability;
934 	conn->remote_auth = 0xff;
935 	conn->key_type = 0xff;
936 	conn->rssi = HCI_RSSI_INVALID;
937 	conn->tx_power = HCI_TX_POWER_INVALID;
938 	conn->max_tx_power = HCI_TX_POWER_INVALID;
939 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
940 
941 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
942 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
943 
944 	/* Set Default Authenticated payload timeout to 30s */
945 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
946 
947 	if (conn->role == HCI_ROLE_MASTER)
948 		conn->out = true;
949 
950 	switch (type) {
951 	case ACL_LINK:
952 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
953 		conn->mtu = hdev->acl_mtu;
954 		break;
955 	case LE_LINK:
956 		/* conn->src should reflect the local identity address */
957 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
958 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
959 		break;
960 	case ISO_LINK:
961 		/* conn->src should reflect the local identity address */
962 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
963 
964 		/* set proper cleanup function */
965 		if (!bacmp(dst, BDADDR_ANY))
966 			conn->cleanup = bis_cleanup;
967 		else if (conn->role == HCI_ROLE_MASTER)
968 			conn->cleanup = cis_cleanup;
969 
970 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
971 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
972 		break;
973 	case SCO_LINK:
974 		if (lmp_esco_capable(hdev))
975 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
976 					(hdev->esco_type & EDR_ESCO_MASK);
977 		else
978 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
979 
980 		conn->mtu = hdev->sco_mtu;
981 		break;
982 	case ESCO_LINK:
983 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
984 		conn->mtu = hdev->sco_mtu;
985 		break;
986 	}
987 
988 	skb_queue_head_init(&conn->data_q);
989 
990 	INIT_LIST_HEAD(&conn->chan_list);
991 	INIT_LIST_HEAD(&conn->link_list);
992 
993 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
994 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
995 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
996 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
997 
998 	atomic_set(&conn->refcnt, 0);
999 
1000 	hci_dev_hold(hdev);
1001 
1002 	hci_conn_hash_add(hdev, conn);
1003 
1004 	/* The SCO and eSCO connections will only be notified when their
1005 	 * setup has been completed. This is different to ACL links which
1006 	 * can be notified right away.
1007 	 */
1008 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1009 		if (hdev->notify)
1010 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1011 	}
1012 
1013 	hci_conn_init_sysfs(conn);
1014 
1015 	return conn;
1016 }
1017 
hci_conn_add_unset(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)1018 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1019 				    bdaddr_t *dst, u8 role)
1020 {
1021 	int handle;
1022 
1023 	bt_dev_dbg(hdev, "dst %pMR", dst);
1024 
1025 	handle = hci_conn_hash_alloc_unset(hdev);
1026 	if (unlikely(handle < 0))
1027 		return ERR_PTR(-ECONNREFUSED);
1028 
1029 	return __hci_conn_add(hdev, type, dst, role, handle);
1030 }
1031 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)1032 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1033 			      u8 role, u16 handle)
1034 {
1035 	if (handle > HCI_CONN_HANDLE_MAX)
1036 		return ERR_PTR(-EINVAL);
1037 
1038 	return __hci_conn_add(hdev, type, dst, role, handle);
1039 }
1040 
hci_conn_cleanup_child(struct hci_conn * conn,u8 reason)1041 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1042 {
1043 	if (!reason)
1044 		reason = HCI_ERROR_REMOTE_USER_TERM;
1045 
1046 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1047 	 * and nothing else will clean it up. In other cases it is done via HCI
1048 	 * events.
1049 	 */
1050 	switch (conn->type) {
1051 	case SCO_LINK:
1052 	case ESCO_LINK:
1053 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1054 			hci_conn_failed(conn, reason);
1055 		break;
1056 	case ISO_LINK:
1057 		if (conn->state != BT_CONNECTED &&
1058 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
1059 			hci_conn_failed(conn, reason);
1060 		break;
1061 	}
1062 }
1063 
hci_conn_unlink(struct hci_conn * conn)1064 static void hci_conn_unlink(struct hci_conn *conn)
1065 {
1066 	struct hci_dev *hdev = conn->hdev;
1067 
1068 	bt_dev_dbg(hdev, "hcon %p", conn);
1069 
1070 	if (!conn->parent) {
1071 		struct hci_link *link, *t;
1072 
1073 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1074 			struct hci_conn *child = link->conn;
1075 
1076 			hci_conn_unlink(child);
1077 
1078 			/* If hdev is down it means
1079 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1080 			 * and links don't need to be cleanup as all connections
1081 			 * would be cleanup.
1082 			 */
1083 			if (!test_bit(HCI_UP, &hdev->flags))
1084 				continue;
1085 
1086 			hci_conn_cleanup_child(child, conn->abort_reason);
1087 		}
1088 
1089 		return;
1090 	}
1091 
1092 	if (!conn->link)
1093 		return;
1094 
1095 	list_del_rcu(&conn->link->list);
1096 	synchronize_rcu();
1097 
1098 	hci_conn_drop(conn->parent);
1099 	hci_conn_put(conn->parent);
1100 	conn->parent = NULL;
1101 
1102 	kfree(conn->link);
1103 	conn->link = NULL;
1104 }
1105 
hci_conn_del(struct hci_conn * conn)1106 void hci_conn_del(struct hci_conn *conn)
1107 {
1108 	struct hci_dev *hdev = conn->hdev;
1109 
1110 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1111 
1112 	hci_conn_unlink(conn);
1113 
1114 	cancel_delayed_work_sync(&conn->disc_work);
1115 	cancel_delayed_work_sync(&conn->auto_accept_work);
1116 	cancel_delayed_work_sync(&conn->idle_work);
1117 
1118 	if (conn->type == ACL_LINK) {
1119 		/* Unacked frames */
1120 		hdev->acl_cnt += conn->sent;
1121 	} else if (conn->type == LE_LINK) {
1122 		cancel_delayed_work(&conn->le_conn_timeout);
1123 
1124 		if (hdev->le_pkts)
1125 			hdev->le_cnt += conn->sent;
1126 		else
1127 			hdev->acl_cnt += conn->sent;
1128 	} else {
1129 		/* Unacked ISO frames */
1130 		if (conn->type == ISO_LINK) {
1131 			if (hdev->iso_pkts)
1132 				hdev->iso_cnt += conn->sent;
1133 			else if (hdev->le_pkts)
1134 				hdev->le_cnt += conn->sent;
1135 			else
1136 				hdev->acl_cnt += conn->sent;
1137 		}
1138 	}
1139 
1140 	skb_queue_purge(&conn->data_q);
1141 
1142 	/* Remove the connection from the list and cleanup its remaining
1143 	 * state. This is a separate function since for some cases like
1144 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1145 	 * rest of hci_conn_del.
1146 	 */
1147 	hci_conn_cleanup(conn);
1148 
1149 	/* Dequeue callbacks using connection pointer as data */
1150 	hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1151 }
1152 
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)1153 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1154 {
1155 	int use_src = bacmp(src, BDADDR_ANY);
1156 	struct hci_dev *hdev = NULL, *d;
1157 
1158 	BT_DBG("%pMR -> %pMR", src, dst);
1159 
1160 	read_lock(&hci_dev_list_lock);
1161 
1162 	list_for_each_entry(d, &hci_dev_list, list) {
1163 		if (!test_bit(HCI_UP, &d->flags) ||
1164 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
1165 			continue;
1166 
1167 		/* Simple routing:
1168 		 *   No source address - find interface with bdaddr != dst
1169 		 *   Source address    - find interface with bdaddr == src
1170 		 */
1171 
1172 		if (use_src) {
1173 			bdaddr_t id_addr;
1174 			u8 id_addr_type;
1175 
1176 			if (src_type == BDADDR_BREDR) {
1177 				if (!lmp_bredr_capable(d))
1178 					continue;
1179 				bacpy(&id_addr, &d->bdaddr);
1180 				id_addr_type = BDADDR_BREDR;
1181 			} else {
1182 				if (!lmp_le_capable(d))
1183 					continue;
1184 
1185 				hci_copy_identity_address(d, &id_addr,
1186 							  &id_addr_type);
1187 
1188 				/* Convert from HCI to three-value type */
1189 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1190 					id_addr_type = BDADDR_LE_PUBLIC;
1191 				else
1192 					id_addr_type = BDADDR_LE_RANDOM;
1193 			}
1194 
1195 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1196 				hdev = d; break;
1197 			}
1198 		} else {
1199 			if (bacmp(&d->bdaddr, dst)) {
1200 				hdev = d; break;
1201 			}
1202 		}
1203 	}
1204 
1205 	if (hdev)
1206 		hdev = hci_dev_hold(hdev);
1207 
1208 	read_unlock(&hci_dev_list_lock);
1209 	return hdev;
1210 }
1211 EXPORT_SYMBOL(hci_get_route);
1212 
1213 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)1214 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1215 {
1216 	struct hci_dev *hdev = conn->hdev;
1217 
1218 	hci_connect_le_scan_cleanup(conn, status);
1219 
1220 	/* Enable advertising in case this was a failed connection
1221 	 * attempt as a peripheral.
1222 	 */
1223 	hci_enable_advertising(hdev);
1224 }
1225 
1226 /* This function requires the caller holds hdev->lock */
hci_conn_failed(struct hci_conn * conn,u8 status)1227 void hci_conn_failed(struct hci_conn *conn, u8 status)
1228 {
1229 	struct hci_dev *hdev = conn->hdev;
1230 
1231 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1232 
1233 	switch (conn->type) {
1234 	case LE_LINK:
1235 		hci_le_conn_failed(conn, status);
1236 		break;
1237 	case ACL_LINK:
1238 		mgmt_connect_failed(hdev, conn, status);
1239 		break;
1240 	}
1241 
1242 	/* In case of BIG/PA sync failed, clear conn flags so that
1243 	 * the conns will be correctly cleaned up by ISO layer
1244 	 */
1245 	test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1246 	test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1247 
1248 	conn->state = BT_CLOSED;
1249 	hci_connect_cfm(conn, status);
1250 	hci_conn_del(conn);
1251 }
1252 
1253 /* This function requires the caller holds hdev->lock */
hci_conn_set_handle(struct hci_conn * conn,u16 handle)1254 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1255 {
1256 	struct hci_dev *hdev = conn->hdev;
1257 
1258 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1259 
1260 	if (conn->handle == handle)
1261 		return 0;
1262 
1263 	if (handle > HCI_CONN_HANDLE_MAX) {
1264 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1265 			   handle, HCI_CONN_HANDLE_MAX);
1266 		return HCI_ERROR_INVALID_PARAMETERS;
1267 	}
1268 
1269 	/* If abort_reason has been sent it means the connection is being
1270 	 * aborted and the handle shall not be changed.
1271 	 */
1272 	if (conn->abort_reason)
1273 		return conn->abort_reason;
1274 
1275 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1276 		ida_free(&hdev->unset_handle_ida, conn->handle);
1277 
1278 	conn->handle = handle;
1279 
1280 	return 0;
1281 }
1282 
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,bool dst_resolved,u8 sec_level,u16 conn_timeout,u8 role)1283 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1284 				u8 dst_type, bool dst_resolved, u8 sec_level,
1285 				u16 conn_timeout, u8 role)
1286 {
1287 	struct hci_conn *conn;
1288 	struct smp_irk *irk;
1289 	int err;
1290 
1291 	/* Let's make sure that le is enabled.*/
1292 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1293 		if (lmp_le_capable(hdev))
1294 			return ERR_PTR(-ECONNREFUSED);
1295 
1296 		return ERR_PTR(-EOPNOTSUPP);
1297 	}
1298 
1299 	/* Since the controller supports only one LE connection attempt at a
1300 	 * time, we return -EBUSY if there is any connection attempt running.
1301 	 */
1302 	if (hci_lookup_le_connect(hdev))
1303 		return ERR_PTR(-EBUSY);
1304 
1305 	/* If there's already a connection object but it's not in
1306 	 * scanning state it means it must already be established, in
1307 	 * which case we can't do anything else except report a failure
1308 	 * to connect.
1309 	 */
1310 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1311 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1312 		return ERR_PTR(-EBUSY);
1313 	}
1314 
1315 	/* Check if the destination address has been resolved by the controller
1316 	 * since if it did then the identity address shall be used.
1317 	 */
1318 	if (!dst_resolved) {
1319 		/* When given an identity address with existing identity
1320 		 * resolving key, the connection needs to be established
1321 		 * to a resolvable random address.
1322 		 *
1323 		 * Storing the resolvable random address is required here
1324 		 * to handle connection failures. The address will later
1325 		 * be resolved back into the original identity address
1326 		 * from the connect request.
1327 		 */
1328 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1329 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1330 			dst = &irk->rpa;
1331 			dst_type = ADDR_LE_DEV_RANDOM;
1332 		}
1333 	}
1334 
1335 	if (conn) {
1336 		bacpy(&conn->dst, dst);
1337 	} else {
1338 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1339 		if (IS_ERR(conn))
1340 			return conn;
1341 		hci_conn_hold(conn);
1342 		conn->pending_sec_level = sec_level;
1343 	}
1344 
1345 	conn->dst_type = dst_type;
1346 	conn->sec_level = BT_SECURITY_LOW;
1347 	conn->conn_timeout = conn_timeout;
1348 
1349 	err = hci_connect_le_sync(hdev, conn);
1350 	if (err) {
1351 		hci_conn_del(conn);
1352 		return ERR_PTR(err);
1353 	}
1354 
1355 	return conn;
1356 }
1357 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1358 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1359 {
1360 	struct hci_conn *conn;
1361 
1362 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1363 	if (!conn)
1364 		return false;
1365 
1366 	if (conn->state != BT_CONNECTED)
1367 		return false;
1368 
1369 	return true;
1370 }
1371 
1372 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1373 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1374 					bdaddr_t *addr, u8 addr_type)
1375 {
1376 	struct hci_conn_params *params;
1377 
1378 	if (is_connected(hdev, addr, addr_type))
1379 		return -EISCONN;
1380 
1381 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1382 	if (!params) {
1383 		params = hci_conn_params_add(hdev, addr, addr_type);
1384 		if (!params)
1385 			return -ENOMEM;
1386 
1387 		/* If we created new params, mark them to be deleted in
1388 		 * hci_connect_le_scan_cleanup. It's different case than
1389 		 * existing disabled params, those will stay after cleanup.
1390 		 */
1391 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1392 	}
1393 
1394 	/* We're trying to connect, so make sure params are at pend_le_conns */
1395 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1396 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1397 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1398 		hci_pend_le_list_del_init(params);
1399 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1400 	}
1401 
1402 	params->explicit_connect = true;
1403 
1404 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1405 	       params->auto_connect);
1406 
1407 	return 0;
1408 }
1409 
qos_set_big(struct hci_dev * hdev,struct bt_iso_qos * qos)1410 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1411 {
1412 	struct hci_conn *conn;
1413 	u8  big;
1414 
1415 	/* Allocate a BIG if not set */
1416 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1417 		for (big = 0x00; big < 0xef; big++) {
1418 
1419 			conn = hci_conn_hash_lookup_big(hdev, big);
1420 			if (!conn)
1421 				break;
1422 		}
1423 
1424 		if (big == 0xef)
1425 			return -EADDRNOTAVAIL;
1426 
1427 		/* Update BIG */
1428 		qos->bcast.big = big;
1429 	}
1430 
1431 	return 0;
1432 }
1433 
qos_set_bis(struct hci_dev * hdev,struct bt_iso_qos * qos)1434 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1435 {
1436 	struct hci_conn *conn;
1437 	u8  bis;
1438 
1439 	/* Allocate BIS if not set */
1440 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1441 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1442 		 * since it is reserved as general purpose set.
1443 		 */
1444 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1445 		     bis++) {
1446 
1447 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1448 			if (!conn)
1449 				break;
1450 		}
1451 
1452 		if (bis == hdev->le_num_of_adv_sets)
1453 			return -EADDRNOTAVAIL;
1454 
1455 		/* Update BIS */
1456 		qos->bcast.bis = bis;
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 /* This function requires the caller holds hdev->lock */
hci_add_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)1463 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1464 				    struct bt_iso_qos *qos, __u8 base_len,
1465 				    __u8 *base)
1466 {
1467 	struct hci_conn *conn;
1468 	int err;
1469 
1470 	/* Let's make sure that le is enabled.*/
1471 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1472 		if (lmp_le_capable(hdev))
1473 			return ERR_PTR(-ECONNREFUSED);
1474 		return ERR_PTR(-EOPNOTSUPP);
1475 	}
1476 
1477 	err = qos_set_big(hdev, qos);
1478 	if (err)
1479 		return ERR_PTR(err);
1480 
1481 	err = qos_set_bis(hdev, qos);
1482 	if (err)
1483 		return ERR_PTR(err);
1484 
1485 	/* Check if the LE Create BIG command has already been sent */
1486 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1487 						qos->bcast.big);
1488 	if (conn)
1489 		return ERR_PTR(-EADDRINUSE);
1490 
1491 	/* Check BIS settings against other bound BISes, since all
1492 	 * BISes in a BIG must have the same value for all parameters
1493 	 */
1494 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1495 
1496 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1497 		     base_len != conn->le_per_adv_data_len ||
1498 		     memcmp(conn->le_per_adv_data, base, base_len)))
1499 		return ERR_PTR(-EADDRINUSE);
1500 
1501 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1502 	if (IS_ERR(conn))
1503 		return conn;
1504 
1505 	conn->state = BT_CONNECT;
1506 
1507 	hci_conn_hold(conn);
1508 	return conn;
1509 }
1510 
1511 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1512 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1513 				     u8 dst_type, u8 sec_level,
1514 				     u16 conn_timeout,
1515 				     enum conn_reasons conn_reason)
1516 {
1517 	struct hci_conn *conn;
1518 
1519 	/* Let's make sure that le is enabled.*/
1520 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1521 		if (lmp_le_capable(hdev))
1522 			return ERR_PTR(-ECONNREFUSED);
1523 
1524 		return ERR_PTR(-EOPNOTSUPP);
1525 	}
1526 
1527 	/* Some devices send ATT messages as soon as the physical link is
1528 	 * established. To be able to handle these ATT messages, the user-
1529 	 * space first establishes the connection and then starts the pairing
1530 	 * process.
1531 	 *
1532 	 * So if a hci_conn object already exists for the following connection
1533 	 * attempt, we simply update pending_sec_level and auth_type fields
1534 	 * and return the object found.
1535 	 */
1536 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1537 	if (conn) {
1538 		if (conn->pending_sec_level < sec_level)
1539 			conn->pending_sec_level = sec_level;
1540 		goto done;
1541 	}
1542 
1543 	BT_DBG("requesting refresh of dst_addr");
1544 
1545 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1546 	if (IS_ERR(conn))
1547 		return conn;
1548 
1549 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1550 		hci_conn_del(conn);
1551 		return ERR_PTR(-EBUSY);
1552 	}
1553 
1554 	conn->state = BT_CONNECT;
1555 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1556 	conn->dst_type = dst_type;
1557 	conn->sec_level = BT_SECURITY_LOW;
1558 	conn->pending_sec_level = sec_level;
1559 	conn->conn_timeout = conn_timeout;
1560 	conn->conn_reason = conn_reason;
1561 
1562 	hci_update_passive_scan(hdev);
1563 
1564 done:
1565 	hci_conn_hold(conn);
1566 	return conn;
1567 }
1568 
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason)1569 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1570 				 u8 sec_level, u8 auth_type,
1571 				 enum conn_reasons conn_reason)
1572 {
1573 	struct hci_conn *acl;
1574 
1575 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1576 		if (lmp_bredr_capable(hdev))
1577 			return ERR_PTR(-ECONNREFUSED);
1578 
1579 		return ERR_PTR(-EOPNOTSUPP);
1580 	}
1581 
1582 	/* Reject outgoing connection to device with same BD ADDR against
1583 	 * CVE-2020-26555
1584 	 */
1585 	if (!bacmp(&hdev->bdaddr, dst)) {
1586 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1587 			   dst);
1588 		return ERR_PTR(-ECONNREFUSED);
1589 	}
1590 
1591 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1592 	if (!acl) {
1593 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1594 		if (IS_ERR(acl))
1595 			return acl;
1596 	}
1597 
1598 	hci_conn_hold(acl);
1599 
1600 	acl->conn_reason = conn_reason;
1601 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1602 		int err;
1603 
1604 		acl->sec_level = BT_SECURITY_LOW;
1605 		acl->pending_sec_level = sec_level;
1606 		acl->auth_type = auth_type;
1607 
1608 		err = hci_connect_acl_sync(hdev, acl);
1609 		if (err) {
1610 			hci_conn_del(acl);
1611 			return ERR_PTR(err);
1612 		}
1613 	}
1614 
1615 	return acl;
1616 }
1617 
hci_conn_link(struct hci_conn * parent,struct hci_conn * conn)1618 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1619 				      struct hci_conn *conn)
1620 {
1621 	struct hci_dev *hdev = parent->hdev;
1622 	struct hci_link *link;
1623 
1624 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1625 
1626 	if (conn->link)
1627 		return conn->link;
1628 
1629 	if (conn->parent)
1630 		return NULL;
1631 
1632 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1633 	if (!link)
1634 		return NULL;
1635 
1636 	link->conn = hci_conn_hold(conn);
1637 	conn->link = link;
1638 	conn->parent = hci_conn_get(parent);
1639 
1640 	/* Use list_add_tail_rcu append to the list */
1641 	list_add_tail_rcu(&link->list, &parent->link_list);
1642 
1643 	return link;
1644 }
1645 
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting,struct bt_codec * codec)1646 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1647 				 __u16 setting, struct bt_codec *codec)
1648 {
1649 	struct hci_conn *acl;
1650 	struct hci_conn *sco;
1651 	struct hci_link *link;
1652 
1653 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1654 			      CONN_REASON_SCO_CONNECT);
1655 	if (IS_ERR(acl))
1656 		return acl;
1657 
1658 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1659 	if (!sco) {
1660 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1661 		if (IS_ERR(sco)) {
1662 			hci_conn_drop(acl);
1663 			return sco;
1664 		}
1665 	}
1666 
1667 	link = hci_conn_link(acl, sco);
1668 	if (!link) {
1669 		hci_conn_drop(acl);
1670 		hci_conn_drop(sco);
1671 		return ERR_PTR(-ENOLINK);
1672 	}
1673 
1674 	sco->setting = setting;
1675 	sco->codec = *codec;
1676 
1677 	if (acl->state == BT_CONNECTED &&
1678 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1679 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1680 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1681 
1682 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1683 			/* defer SCO setup until mode change completed */
1684 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1685 			return sco;
1686 		}
1687 
1688 		hci_sco_setup(acl, 0x00);
1689 	}
1690 
1691 	return sco;
1692 }
1693 
hci_le_create_big(struct hci_conn * conn,struct bt_iso_qos * qos)1694 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1695 {
1696 	struct hci_dev *hdev = conn->hdev;
1697 	struct hci_cp_le_create_big cp;
1698 	struct iso_list_data data;
1699 
1700 	memset(&cp, 0, sizeof(cp));
1701 
1702 	data.big = qos->bcast.big;
1703 	data.bis = qos->bcast.bis;
1704 	data.count = 0;
1705 
1706 	/* Create a BIS for each bound connection */
1707 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1708 				 BT_BOUND, &data);
1709 
1710 	cp.handle = qos->bcast.big;
1711 	cp.adv_handle = qos->bcast.bis;
1712 	cp.num_bis  = data.count;
1713 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1714 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1715 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1716 	cp.bis.rtn  = qos->bcast.out.rtn;
1717 	cp.bis.phy  = qos->bcast.out.phy;
1718 	cp.bis.packing = qos->bcast.packing;
1719 	cp.bis.framing = qos->bcast.framing;
1720 	cp.bis.encryption = qos->bcast.encryption;
1721 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1722 
1723 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1724 }
1725 
set_cig_params_sync(struct hci_dev * hdev,void * data)1726 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1727 {
1728 	u8 cig_id = PTR_UINT(data);
1729 	struct hci_conn *conn;
1730 	struct bt_iso_qos *qos;
1731 	struct iso_cig_params pdu;
1732 	u8 cis_id;
1733 
1734 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1735 	if (!conn)
1736 		return 0;
1737 
1738 	memset(&pdu, 0, sizeof(pdu));
1739 
1740 	qos = &conn->iso_qos;
1741 	pdu.cp.cig_id = cig_id;
1742 	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1743 	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1744 	pdu.cp.sca = qos->ucast.sca;
1745 	pdu.cp.packing = qos->ucast.packing;
1746 	pdu.cp.framing = qos->ucast.framing;
1747 	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1748 	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1749 
1750 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1751 	 * num_cis: 0x00 to 0x1F
1752 	 * cis_id: 0x00 to 0xEF
1753 	 */
1754 	for (cis_id = 0x00; cis_id < 0xf0 &&
1755 	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1756 		struct hci_cis_params *cis;
1757 
1758 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1759 		if (!conn)
1760 			continue;
1761 
1762 		qos = &conn->iso_qos;
1763 
1764 		cis = &pdu.cis[pdu.cp.num_cis++];
1765 		cis->cis_id = cis_id;
1766 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1767 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1768 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1769 			      qos->ucast.in.phy;
1770 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1771 			      qos->ucast.out.phy;
1772 		cis->c_rtn  = qos->ucast.out.rtn;
1773 		cis->p_rtn  = qos->ucast.in.rtn;
1774 	}
1775 
1776 	if (!pdu.cp.num_cis)
1777 		return 0;
1778 
1779 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1780 				     sizeof(pdu.cp) +
1781 				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1782 				     HCI_CMD_TIMEOUT);
1783 }
1784 
hci_le_set_cig_params(struct hci_conn * conn,struct bt_iso_qos * qos)1785 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1786 {
1787 	struct hci_dev *hdev = conn->hdev;
1788 	struct iso_list_data data;
1789 
1790 	memset(&data, 0, sizeof(data));
1791 
1792 	/* Allocate first still reconfigurable CIG if not set */
1793 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1794 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1795 			data.count = 0;
1796 
1797 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1798 						 BT_CONNECT, &data);
1799 			if (data.count)
1800 				continue;
1801 
1802 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1803 						 BT_CONNECTED, &data);
1804 			if (!data.count)
1805 				break;
1806 		}
1807 
1808 		if (data.cig == 0xf0)
1809 			return false;
1810 
1811 		/* Update CIG */
1812 		qos->ucast.cig = data.cig;
1813 	}
1814 
1815 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1816 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1817 					     qos->ucast.cis))
1818 			return false;
1819 		goto done;
1820 	}
1821 
1822 	/* Allocate first available CIS if not set */
1823 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1824 	     data.cis++) {
1825 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1826 					      data.cis)) {
1827 			/* Update CIS */
1828 			qos->ucast.cis = data.cis;
1829 			break;
1830 		}
1831 	}
1832 
1833 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1834 		return false;
1835 
1836 done:
1837 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1838 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1839 		return false;
1840 
1841 	return true;
1842 }
1843 
hci_bind_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)1844 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1845 			      __u8 dst_type, struct bt_iso_qos *qos)
1846 {
1847 	struct hci_conn *cis;
1848 
1849 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1850 				       qos->ucast.cis);
1851 	if (!cis) {
1852 		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1853 		if (IS_ERR(cis))
1854 			return cis;
1855 		cis->cleanup = cis_cleanup;
1856 		cis->dst_type = dst_type;
1857 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1858 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1859 	}
1860 
1861 	if (cis->state == BT_CONNECTED)
1862 		return cis;
1863 
1864 	/* Check if CIS has been set and the settings matches */
1865 	if (cis->state == BT_BOUND &&
1866 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1867 		return cis;
1868 
1869 	/* Update LINK PHYs according to QoS preference */
1870 	cis->le_tx_phy = qos->ucast.out.phy;
1871 	cis->le_rx_phy = qos->ucast.in.phy;
1872 
1873 	/* If output interval is not set use the input interval as it cannot be
1874 	 * 0x000000.
1875 	 */
1876 	if (!qos->ucast.out.interval)
1877 		qos->ucast.out.interval = qos->ucast.in.interval;
1878 
1879 	/* If input interval is not set use the output interval as it cannot be
1880 	 * 0x000000.
1881 	 */
1882 	if (!qos->ucast.in.interval)
1883 		qos->ucast.in.interval = qos->ucast.out.interval;
1884 
1885 	/* If output latency is not set use the input latency as it cannot be
1886 	 * 0x0000.
1887 	 */
1888 	if (!qos->ucast.out.latency)
1889 		qos->ucast.out.latency = qos->ucast.in.latency;
1890 
1891 	/* If input latency is not set use the output latency as it cannot be
1892 	 * 0x0000.
1893 	 */
1894 	if (!qos->ucast.in.latency)
1895 		qos->ucast.in.latency = qos->ucast.out.latency;
1896 
1897 	if (!hci_le_set_cig_params(cis, qos)) {
1898 		hci_conn_drop(cis);
1899 		return ERR_PTR(-EINVAL);
1900 	}
1901 
1902 	hci_conn_hold(cis);
1903 
1904 	cis->iso_qos = *qos;
1905 	cis->state = BT_BOUND;
1906 
1907 	return cis;
1908 }
1909 
hci_iso_setup_path(struct hci_conn * conn)1910 bool hci_iso_setup_path(struct hci_conn *conn)
1911 {
1912 	struct hci_dev *hdev = conn->hdev;
1913 	struct hci_cp_le_setup_iso_path cmd;
1914 
1915 	memset(&cmd, 0, sizeof(cmd));
1916 
1917 	if (conn->iso_qos.ucast.out.sdu) {
1918 		cmd.handle = cpu_to_le16(conn->handle);
1919 		cmd.direction = 0x00; /* Input (Host to Controller) */
1920 		cmd.path = 0x00; /* HCI path if enabled */
1921 		cmd.codec = 0x03; /* Transparent Data */
1922 
1923 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1924 				 &cmd) < 0)
1925 			return false;
1926 	}
1927 
1928 	if (conn->iso_qos.ucast.in.sdu) {
1929 		cmd.handle = cpu_to_le16(conn->handle);
1930 		cmd.direction = 0x01; /* Output (Controller to Host) */
1931 		cmd.path = 0x00; /* HCI path if enabled */
1932 		cmd.codec = 0x03; /* Transparent Data */
1933 
1934 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1935 				 &cmd) < 0)
1936 			return false;
1937 	}
1938 
1939 	return true;
1940 }
1941 
hci_conn_check_create_cis(struct hci_conn * conn)1942 int hci_conn_check_create_cis(struct hci_conn *conn)
1943 {
1944 	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1945 		return -EINVAL;
1946 
1947 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1948 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1949 		return 1;
1950 
1951 	return 0;
1952 }
1953 
hci_create_cis_sync(struct hci_dev * hdev,void * data)1954 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1955 {
1956 	return hci_le_create_cis_sync(hdev);
1957 }
1958 
hci_le_create_cis_pending(struct hci_dev * hdev)1959 int hci_le_create_cis_pending(struct hci_dev *hdev)
1960 {
1961 	struct hci_conn *conn;
1962 	bool pending = false;
1963 
1964 	rcu_read_lock();
1965 
1966 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1967 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1968 			rcu_read_unlock();
1969 			return -EBUSY;
1970 		}
1971 
1972 		if (!hci_conn_check_create_cis(conn))
1973 			pending = true;
1974 	}
1975 
1976 	rcu_read_unlock();
1977 
1978 	if (!pending)
1979 		return 0;
1980 
1981 	/* Queue Create CIS */
1982 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
1983 }
1984 
hci_iso_qos_setup(struct hci_dev * hdev,struct hci_conn * conn,struct bt_iso_io_qos * qos,__u8 phy)1985 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1986 			      struct bt_iso_io_qos *qos, __u8 phy)
1987 {
1988 	/* Only set MTU if PHY is enabled */
1989 	if (!qos->sdu && qos->phy)
1990 		qos->sdu = conn->mtu;
1991 
1992 	/* Use the same PHY as ACL if set to any */
1993 	if (qos->phy == BT_ISO_PHY_ANY)
1994 		qos->phy = phy;
1995 
1996 	/* Use LE ACL connection interval if not set */
1997 	if (!qos->interval)
1998 		/* ACL interval unit in 1.25 ms to us */
1999 		qos->interval = conn->le_conn_interval * 1250;
2000 
2001 	/* Use LE ACL connection latency if not set */
2002 	if (!qos->latency)
2003 		qos->latency = conn->le_conn_latency;
2004 }
2005 
create_big_sync(struct hci_dev * hdev,void * data)2006 static int create_big_sync(struct hci_dev *hdev, void *data)
2007 {
2008 	struct hci_conn *conn = data;
2009 	struct bt_iso_qos *qos = &conn->iso_qos;
2010 	u16 interval, sync_interval = 0;
2011 	u32 flags = 0;
2012 	int err;
2013 
2014 	if (qos->bcast.out.phy == 0x02)
2015 		flags |= MGMT_ADV_FLAG_SEC_2M;
2016 
2017 	/* Align intervals */
2018 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2019 
2020 	if (qos->bcast.bis)
2021 		sync_interval = interval * 4;
2022 
2023 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2024 				     conn->le_per_adv_data, flags, interval,
2025 				     interval, sync_interval);
2026 	if (err)
2027 		return err;
2028 
2029 	return hci_le_create_big(conn, &conn->iso_qos);
2030 }
2031 
create_pa_complete(struct hci_dev * hdev,void * data,int err)2032 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2033 {
2034 	struct hci_cp_le_pa_create_sync *cp = data;
2035 
2036 	bt_dev_dbg(hdev, "");
2037 
2038 	if (err)
2039 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2040 
2041 	kfree(cp);
2042 }
2043 
create_pa_sync(struct hci_dev * hdev,void * data)2044 static int create_pa_sync(struct hci_dev *hdev, void *data)
2045 {
2046 	struct hci_cp_le_pa_create_sync *cp = data;
2047 	int err;
2048 
2049 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2050 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2051 	if (err) {
2052 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2053 		return err;
2054 	}
2055 
2056 	return hci_update_passive_scan_sync(hdev);
2057 }
2058 
hci_pa_create_sync(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid,struct bt_iso_qos * qos)2059 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2060 		       __u8 sid, struct bt_iso_qos *qos)
2061 {
2062 	struct hci_cp_le_pa_create_sync *cp;
2063 
2064 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2065 		return -EBUSY;
2066 
2067 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2068 	if (!cp) {
2069 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2070 		return -ENOMEM;
2071 	}
2072 
2073 	cp->options = qos->bcast.options;
2074 	cp->sid = sid;
2075 	cp->addr_type = dst_type;
2076 	bacpy(&cp->addr, dst);
2077 	cp->skip = cpu_to_le16(qos->bcast.skip);
2078 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2079 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2080 
2081 	/* Queue start pa_create_sync and scan */
2082 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2083 }
2084 
hci_le_big_create_sync(struct hci_dev * hdev,struct hci_conn * hcon,struct bt_iso_qos * qos,__u16 sync_handle,__u8 num_bis,__u8 bis[])2085 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2086 			   struct bt_iso_qos *qos,
2087 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2088 {
2089 	struct _packed {
2090 		struct hci_cp_le_big_create_sync cp;
2091 		__u8  bis[0x11];
2092 	} pdu;
2093 	int err;
2094 
2095 	if (num_bis > sizeof(pdu.bis))
2096 		return -EINVAL;
2097 
2098 	err = qos_set_big(hdev, qos);
2099 	if (err)
2100 		return err;
2101 
2102 	if (hcon)
2103 		hcon->iso_qos.bcast.big = qos->bcast.big;
2104 
2105 	memset(&pdu, 0, sizeof(pdu));
2106 	pdu.cp.handle = qos->bcast.big;
2107 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2108 	pdu.cp.encryption = qos->bcast.encryption;
2109 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2110 	pdu.cp.mse = qos->bcast.mse;
2111 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2112 	pdu.cp.num_bis = num_bis;
2113 	memcpy(pdu.bis, bis, num_bis);
2114 
2115 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2116 			    sizeof(pdu.cp) + num_bis, &pdu);
2117 }
2118 
create_big_complete(struct hci_dev * hdev,void * data,int err)2119 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2120 {
2121 	struct hci_conn *conn = data;
2122 
2123 	bt_dev_dbg(hdev, "conn %p", conn);
2124 
2125 	if (err) {
2126 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2127 		hci_connect_cfm(conn, err);
2128 		hci_conn_del(conn);
2129 	}
2130 }
2131 
hci_bind_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2132 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2133 			      struct bt_iso_qos *qos,
2134 			      __u8 base_len, __u8 *base)
2135 {
2136 	struct hci_conn *conn;
2137 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2138 
2139 	if (base_len && base)
2140 		base_len = eir_append_service_data(eir, 0,  0x1851,
2141 						   base, base_len);
2142 
2143 	/* We need hci_conn object using the BDADDR_ANY as dst */
2144 	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2145 	if (IS_ERR(conn))
2146 		return conn;
2147 
2148 	/* Update LINK PHYs according to QoS preference */
2149 	conn->le_tx_phy = qos->bcast.out.phy;
2150 	conn->le_tx_phy = qos->bcast.out.phy;
2151 
2152 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2153 	if (base_len && base) {
2154 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2155 		conn->le_per_adv_data_len = base_len;
2156 	}
2157 
2158 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2159 			  conn->le_tx_phy ? conn->le_tx_phy :
2160 			  hdev->le_tx_def_phys);
2161 
2162 	conn->iso_qos = *qos;
2163 	conn->state = BT_BOUND;
2164 
2165 	return conn;
2166 }
2167 
bis_mark_per_adv(struct hci_conn * conn,void * data)2168 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2169 {
2170 	struct iso_list_data *d = data;
2171 
2172 	/* Skip if not broadcast/ANY address */
2173 	if (bacmp(&conn->dst, BDADDR_ANY))
2174 		return;
2175 
2176 	if (d->big != conn->iso_qos.bcast.big ||
2177 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2178 	    d->bis != conn->iso_qos.bcast.bis)
2179 		return;
2180 
2181 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2182 }
2183 
hci_connect_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2184 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2185 				 __u8 dst_type, struct bt_iso_qos *qos,
2186 				 __u8 base_len, __u8 *base)
2187 {
2188 	struct hci_conn *conn;
2189 	int err;
2190 	struct iso_list_data data;
2191 
2192 	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2193 	if (IS_ERR(conn))
2194 		return conn;
2195 
2196 	data.big = qos->bcast.big;
2197 	data.bis = qos->bcast.bis;
2198 
2199 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2200 	 * the start periodic advertising and create BIG commands have
2201 	 * been queued
2202 	 */
2203 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2204 				 BT_BOUND, &data);
2205 
2206 	/* Queue start periodic advertising and create BIG */
2207 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2208 				 create_big_complete);
2209 	if (err < 0) {
2210 		hci_conn_drop(conn);
2211 		return ERR_PTR(err);
2212 	}
2213 
2214 	return conn;
2215 }
2216 
hci_connect_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)2217 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2218 				 __u8 dst_type, struct bt_iso_qos *qos)
2219 {
2220 	struct hci_conn *le;
2221 	struct hci_conn *cis;
2222 	struct hci_link *link;
2223 
2224 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2225 		le = hci_connect_le(hdev, dst, dst_type, false,
2226 				    BT_SECURITY_LOW,
2227 				    HCI_LE_CONN_TIMEOUT,
2228 				    HCI_ROLE_SLAVE);
2229 	else
2230 		le = hci_connect_le_scan(hdev, dst, dst_type,
2231 					 BT_SECURITY_LOW,
2232 					 HCI_LE_CONN_TIMEOUT,
2233 					 CONN_REASON_ISO_CONNECT);
2234 	if (IS_ERR(le))
2235 		return le;
2236 
2237 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2238 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2239 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2240 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2241 
2242 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2243 	if (IS_ERR(cis)) {
2244 		hci_conn_drop(le);
2245 		return cis;
2246 	}
2247 
2248 	link = hci_conn_link(le, cis);
2249 	if (!link) {
2250 		hci_conn_drop(le);
2251 		hci_conn_drop(cis);
2252 		return ERR_PTR(-ENOLINK);
2253 	}
2254 
2255 	/* Link takes the refcount */
2256 	hci_conn_drop(cis);
2257 
2258 	cis->state = BT_CONNECT;
2259 
2260 	hci_le_create_cis_pending(hdev);
2261 
2262 	return cis;
2263 }
2264 
2265 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)2266 int hci_conn_check_link_mode(struct hci_conn *conn)
2267 {
2268 	BT_DBG("hcon %p", conn);
2269 
2270 	/* In Secure Connections Only mode, it is required that Secure
2271 	 * Connections is used and the link is encrypted with AES-CCM
2272 	 * using a P-256 authenticated combination key.
2273 	 */
2274 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2275 		if (!hci_conn_sc_enabled(conn) ||
2276 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2277 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2278 			return 0;
2279 	}
2280 
2281 	 /* AES encryption is required for Level 4:
2282 	  *
2283 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2284 	  * page 1319:
2285 	  *
2286 	  * 128-bit equivalent strength for link and encryption keys
2287 	  * required using FIPS approved algorithms (E0 not allowed,
2288 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2289 	  * not shortened)
2290 	  */
2291 	if (conn->sec_level == BT_SECURITY_FIPS &&
2292 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2293 		bt_dev_err(conn->hdev,
2294 			   "Invalid security: Missing AES-CCM usage");
2295 		return 0;
2296 	}
2297 
2298 	if (hci_conn_ssp_enabled(conn) &&
2299 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2300 		return 0;
2301 
2302 	return 1;
2303 }
2304 
2305 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)2306 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2307 {
2308 	BT_DBG("hcon %p", conn);
2309 
2310 	if (conn->pending_sec_level > sec_level)
2311 		sec_level = conn->pending_sec_level;
2312 
2313 	if (sec_level > conn->sec_level)
2314 		conn->pending_sec_level = sec_level;
2315 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2316 		return 1;
2317 
2318 	/* Make sure we preserve an existing MITM requirement*/
2319 	auth_type |= (conn->auth_type & 0x01);
2320 
2321 	conn->auth_type = auth_type;
2322 
2323 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2324 		struct hci_cp_auth_requested cp;
2325 
2326 		cp.handle = cpu_to_le16(conn->handle);
2327 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2328 			     sizeof(cp), &cp);
2329 
2330 		/* Set the ENCRYPT_PEND to trigger encryption after
2331 		 * authentication.
2332 		 */
2333 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2334 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)2341 static void hci_conn_encrypt(struct hci_conn *conn)
2342 {
2343 	BT_DBG("hcon %p", conn);
2344 
2345 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2346 		struct hci_cp_set_conn_encrypt cp;
2347 		cp.handle  = cpu_to_le16(conn->handle);
2348 		cp.encrypt = 0x01;
2349 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2350 			     &cp);
2351 	}
2352 }
2353 
2354 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)2355 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2356 		      bool initiator)
2357 {
2358 	BT_DBG("hcon %p", conn);
2359 
2360 	if (conn->type == LE_LINK)
2361 		return smp_conn_security(conn, sec_level);
2362 
2363 	/* For sdp we don't need the link key. */
2364 	if (sec_level == BT_SECURITY_SDP)
2365 		return 1;
2366 
2367 	/* For non 2.1 devices and low security level we don't need the link
2368 	   key. */
2369 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2370 		return 1;
2371 
2372 	/* For other security levels we need the link key. */
2373 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2374 		goto auth;
2375 
2376 	switch (conn->key_type) {
2377 	case HCI_LK_AUTH_COMBINATION_P256:
2378 		/* An authenticated FIPS approved combination key has
2379 		 * sufficient security for security level 4 or lower.
2380 		 */
2381 		if (sec_level <= BT_SECURITY_FIPS)
2382 			goto encrypt;
2383 		break;
2384 	case HCI_LK_AUTH_COMBINATION_P192:
2385 		/* An authenticated combination key has sufficient security for
2386 		 * security level 3 or lower.
2387 		 */
2388 		if (sec_level <= BT_SECURITY_HIGH)
2389 			goto encrypt;
2390 		break;
2391 	case HCI_LK_UNAUTH_COMBINATION_P192:
2392 	case HCI_LK_UNAUTH_COMBINATION_P256:
2393 		/* An unauthenticated combination key has sufficient security
2394 		 * for security level 2 or lower.
2395 		 */
2396 		if (sec_level <= BT_SECURITY_MEDIUM)
2397 			goto encrypt;
2398 		break;
2399 	case HCI_LK_COMBINATION:
2400 		/* A combination key has always sufficient security for the
2401 		 * security levels 2 or lower. High security level requires the
2402 		 * combination key is generated using maximum PIN code length
2403 		 * (16). For pre 2.1 units.
2404 		 */
2405 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2406 			goto encrypt;
2407 		break;
2408 	default:
2409 		break;
2410 	}
2411 
2412 auth:
2413 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2414 		return 0;
2415 
2416 	if (initiator)
2417 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2418 
2419 	if (!hci_conn_auth(conn, sec_level, auth_type))
2420 		return 0;
2421 
2422 encrypt:
2423 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2424 		/* Ensure that the encryption key size has been read,
2425 		 * otherwise stall the upper layer responses.
2426 		 */
2427 		if (!conn->enc_key_size)
2428 			return 0;
2429 
2430 		/* Nothing else needed, all requirements are met */
2431 		return 1;
2432 	}
2433 
2434 	hci_conn_encrypt(conn);
2435 	return 0;
2436 }
2437 EXPORT_SYMBOL(hci_conn_security);
2438 
2439 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)2440 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2441 {
2442 	BT_DBG("hcon %p", conn);
2443 
2444 	/* Accept if non-secure or higher security level is required */
2445 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2446 		return 1;
2447 
2448 	/* Accept if secure or higher security level is already present */
2449 	if (conn->sec_level == BT_SECURITY_HIGH ||
2450 	    conn->sec_level == BT_SECURITY_FIPS)
2451 		return 1;
2452 
2453 	/* Reject not secure link */
2454 	return 0;
2455 }
2456 EXPORT_SYMBOL(hci_conn_check_secure);
2457 
2458 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)2459 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2460 {
2461 	BT_DBG("hcon %p", conn);
2462 
2463 	if (role == conn->role)
2464 		return 1;
2465 
2466 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2467 		struct hci_cp_switch_role cp;
2468 		bacpy(&cp.bdaddr, &conn->dst);
2469 		cp.role = role;
2470 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2471 	}
2472 
2473 	return 0;
2474 }
2475 EXPORT_SYMBOL(hci_conn_switch_role);
2476 
2477 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)2478 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2479 {
2480 	struct hci_dev *hdev = conn->hdev;
2481 
2482 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2483 
2484 	if (conn->mode != HCI_CM_SNIFF)
2485 		goto timer;
2486 
2487 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2488 		goto timer;
2489 
2490 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2491 		struct hci_cp_exit_sniff_mode cp;
2492 		cp.handle = cpu_to_le16(conn->handle);
2493 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2494 	}
2495 
2496 timer:
2497 	if (hdev->idle_timeout > 0)
2498 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2499 				   msecs_to_jiffies(hdev->idle_timeout));
2500 }
2501 
2502 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)2503 void hci_conn_hash_flush(struct hci_dev *hdev)
2504 {
2505 	struct list_head *head = &hdev->conn_hash.list;
2506 	struct hci_conn *conn;
2507 
2508 	BT_DBG("hdev %s", hdev->name);
2509 
2510 	/* We should not traverse the list here, because hci_conn_del
2511 	 * can remove extra links, which may cause the list traversal
2512 	 * to hit items that have already been released.
2513 	 */
2514 	while ((conn = list_first_entry_or_null(head,
2515 						struct hci_conn,
2516 						list)) != NULL) {
2517 		conn->state = BT_CLOSED;
2518 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2519 		hci_conn_del(conn);
2520 	}
2521 }
2522 
get_link_mode(struct hci_conn * conn)2523 static u32 get_link_mode(struct hci_conn *conn)
2524 {
2525 	u32 link_mode = 0;
2526 
2527 	if (conn->role == HCI_ROLE_MASTER)
2528 		link_mode |= HCI_LM_MASTER;
2529 
2530 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2531 		link_mode |= HCI_LM_ENCRYPT;
2532 
2533 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2534 		link_mode |= HCI_LM_AUTH;
2535 
2536 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2537 		link_mode |= HCI_LM_SECURE;
2538 
2539 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2540 		link_mode |= HCI_LM_FIPS;
2541 
2542 	return link_mode;
2543 }
2544 
hci_get_conn_list(void __user * arg)2545 int hci_get_conn_list(void __user *arg)
2546 {
2547 	struct hci_conn *c;
2548 	struct hci_conn_list_req req, *cl;
2549 	struct hci_conn_info *ci;
2550 	struct hci_dev *hdev;
2551 	int n = 0, size, err;
2552 
2553 	if (copy_from_user(&req, arg, sizeof(req)))
2554 		return -EFAULT;
2555 
2556 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2557 		return -EINVAL;
2558 
2559 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2560 
2561 	cl = kmalloc(size, GFP_KERNEL);
2562 	if (!cl)
2563 		return -ENOMEM;
2564 
2565 	hdev = hci_dev_get(req.dev_id);
2566 	if (!hdev) {
2567 		kfree(cl);
2568 		return -ENODEV;
2569 	}
2570 
2571 	ci = cl->conn_info;
2572 
2573 	hci_dev_lock(hdev);
2574 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2575 		bacpy(&(ci + n)->bdaddr, &c->dst);
2576 		(ci + n)->handle = c->handle;
2577 		(ci + n)->type  = c->type;
2578 		(ci + n)->out   = c->out;
2579 		(ci + n)->state = c->state;
2580 		(ci + n)->link_mode = get_link_mode(c);
2581 		if (++n >= req.conn_num)
2582 			break;
2583 	}
2584 	hci_dev_unlock(hdev);
2585 
2586 	cl->dev_id = hdev->id;
2587 	cl->conn_num = n;
2588 	size = sizeof(req) + n * sizeof(*ci);
2589 
2590 	hci_dev_put(hdev);
2591 
2592 	err = copy_to_user(arg, cl, size);
2593 	kfree(cl);
2594 
2595 	return err ? -EFAULT : 0;
2596 }
2597 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)2598 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2599 {
2600 	struct hci_conn_info_req req;
2601 	struct hci_conn_info ci;
2602 	struct hci_conn *conn;
2603 	char __user *ptr = arg + sizeof(req);
2604 
2605 	if (copy_from_user(&req, arg, sizeof(req)))
2606 		return -EFAULT;
2607 
2608 	hci_dev_lock(hdev);
2609 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2610 	if (conn) {
2611 		bacpy(&ci.bdaddr, &conn->dst);
2612 		ci.handle = conn->handle;
2613 		ci.type  = conn->type;
2614 		ci.out   = conn->out;
2615 		ci.state = conn->state;
2616 		ci.link_mode = get_link_mode(conn);
2617 	}
2618 	hci_dev_unlock(hdev);
2619 
2620 	if (!conn)
2621 		return -ENOENT;
2622 
2623 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2624 }
2625 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)2626 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2627 {
2628 	struct hci_auth_info_req req;
2629 	struct hci_conn *conn;
2630 
2631 	if (copy_from_user(&req, arg, sizeof(req)))
2632 		return -EFAULT;
2633 
2634 	hci_dev_lock(hdev);
2635 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2636 	if (conn)
2637 		req.type = conn->auth_type;
2638 	hci_dev_unlock(hdev);
2639 
2640 	if (!conn)
2641 		return -ENOENT;
2642 
2643 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2644 }
2645 
hci_chan_create(struct hci_conn * conn)2646 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2647 {
2648 	struct hci_dev *hdev = conn->hdev;
2649 	struct hci_chan *chan;
2650 
2651 	BT_DBG("%s hcon %p", hdev->name, conn);
2652 
2653 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2654 		BT_DBG("Refusing to create new hci_chan");
2655 		return NULL;
2656 	}
2657 
2658 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2659 	if (!chan)
2660 		return NULL;
2661 
2662 	chan->conn = hci_conn_get(conn);
2663 	skb_queue_head_init(&chan->data_q);
2664 	chan->state = BT_CONNECTED;
2665 
2666 	list_add_rcu(&chan->list, &conn->chan_list);
2667 
2668 	return chan;
2669 }
2670 
hci_chan_del(struct hci_chan * chan)2671 void hci_chan_del(struct hci_chan *chan)
2672 {
2673 	struct hci_conn *conn = chan->conn;
2674 	struct hci_dev *hdev = conn->hdev;
2675 
2676 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2677 
2678 	list_del_rcu(&chan->list);
2679 
2680 	synchronize_rcu();
2681 
2682 	/* Prevent new hci_chan's to be created for this hci_conn */
2683 	set_bit(HCI_CONN_DROP, &conn->flags);
2684 
2685 	hci_conn_put(conn);
2686 
2687 	skb_queue_purge(&chan->data_q);
2688 	kfree(chan);
2689 }
2690 
hci_chan_list_flush(struct hci_conn * conn)2691 void hci_chan_list_flush(struct hci_conn *conn)
2692 {
2693 	struct hci_chan *chan, *n;
2694 
2695 	BT_DBG("hcon %p", conn);
2696 
2697 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2698 		hci_chan_del(chan);
2699 }
2700 
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)2701 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2702 						 __u16 handle)
2703 {
2704 	struct hci_chan *hchan;
2705 
2706 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2707 		if (hchan->handle == handle)
2708 			return hchan;
2709 	}
2710 
2711 	return NULL;
2712 }
2713 
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)2714 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2715 {
2716 	struct hci_conn_hash *h = &hdev->conn_hash;
2717 	struct hci_conn *hcon;
2718 	struct hci_chan *hchan = NULL;
2719 
2720 	rcu_read_lock();
2721 
2722 	list_for_each_entry_rcu(hcon, &h->list, list) {
2723 		hchan = __hci_chan_lookup_handle(hcon, handle);
2724 		if (hchan)
2725 			break;
2726 	}
2727 
2728 	rcu_read_unlock();
2729 
2730 	return hchan;
2731 }
2732 
hci_conn_get_phy(struct hci_conn * conn)2733 u32 hci_conn_get_phy(struct hci_conn *conn)
2734 {
2735 	u32 phys = 0;
2736 
2737 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2738 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2739 	 * CPB logical transport types.
2740 	 */
2741 	switch (conn->type) {
2742 	case SCO_LINK:
2743 		/* SCO logical transport (1 Mb/s):
2744 		 * HV1, HV2, HV3 and DV.
2745 		 */
2746 		phys |= BT_PHY_BR_1M_1SLOT;
2747 
2748 		break;
2749 
2750 	case ACL_LINK:
2751 		/* ACL logical transport (1 Mb/s) ptt=0:
2752 		 * DH1, DM3, DH3, DM5 and DH5.
2753 		 */
2754 		phys |= BT_PHY_BR_1M_1SLOT;
2755 
2756 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2757 			phys |= BT_PHY_BR_1M_3SLOT;
2758 
2759 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2760 			phys |= BT_PHY_BR_1M_5SLOT;
2761 
2762 		/* ACL logical transport (2 Mb/s) ptt=1:
2763 		 * 2-DH1, 2-DH3 and 2-DH5.
2764 		 */
2765 		if (!(conn->pkt_type & HCI_2DH1))
2766 			phys |= BT_PHY_EDR_2M_1SLOT;
2767 
2768 		if (!(conn->pkt_type & HCI_2DH3))
2769 			phys |= BT_PHY_EDR_2M_3SLOT;
2770 
2771 		if (!(conn->pkt_type & HCI_2DH5))
2772 			phys |= BT_PHY_EDR_2M_5SLOT;
2773 
2774 		/* ACL logical transport (3 Mb/s) ptt=1:
2775 		 * 3-DH1, 3-DH3 and 3-DH5.
2776 		 */
2777 		if (!(conn->pkt_type & HCI_3DH1))
2778 			phys |= BT_PHY_EDR_3M_1SLOT;
2779 
2780 		if (!(conn->pkt_type & HCI_3DH3))
2781 			phys |= BT_PHY_EDR_3M_3SLOT;
2782 
2783 		if (!(conn->pkt_type & HCI_3DH5))
2784 			phys |= BT_PHY_EDR_3M_5SLOT;
2785 
2786 		break;
2787 
2788 	case ESCO_LINK:
2789 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2790 		phys |= BT_PHY_BR_1M_1SLOT;
2791 
2792 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2793 			phys |= BT_PHY_BR_1M_3SLOT;
2794 
2795 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2796 		if (!(conn->pkt_type & ESCO_2EV3))
2797 			phys |= BT_PHY_EDR_2M_1SLOT;
2798 
2799 		if (!(conn->pkt_type & ESCO_2EV5))
2800 			phys |= BT_PHY_EDR_2M_3SLOT;
2801 
2802 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2803 		if (!(conn->pkt_type & ESCO_3EV3))
2804 			phys |= BT_PHY_EDR_3M_1SLOT;
2805 
2806 		if (!(conn->pkt_type & ESCO_3EV5))
2807 			phys |= BT_PHY_EDR_3M_3SLOT;
2808 
2809 		break;
2810 
2811 	case LE_LINK:
2812 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2813 			phys |= BT_PHY_LE_1M_TX;
2814 
2815 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2816 			phys |= BT_PHY_LE_1M_RX;
2817 
2818 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2819 			phys |= BT_PHY_LE_2M_TX;
2820 
2821 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2822 			phys |= BT_PHY_LE_2M_RX;
2823 
2824 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2825 			phys |= BT_PHY_LE_CODED_TX;
2826 
2827 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2828 			phys |= BT_PHY_LE_CODED_RX;
2829 
2830 		break;
2831 	}
2832 
2833 	return phys;
2834 }
2835 
abort_conn_sync(struct hci_dev * hdev,void * data)2836 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2837 {
2838 	struct hci_conn *conn = data;
2839 
2840 	if (!hci_conn_valid(hdev, conn))
2841 		return -ECANCELED;
2842 
2843 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2844 }
2845 
hci_abort_conn(struct hci_conn * conn,u8 reason)2846 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2847 {
2848 	struct hci_dev *hdev = conn->hdev;
2849 
2850 	/* If abort_reason has already been set it means the connection is
2851 	 * already being aborted so don't attempt to overwrite it.
2852 	 */
2853 	if (conn->abort_reason)
2854 		return 0;
2855 
2856 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2857 
2858 	conn->abort_reason = reason;
2859 
2860 	/* If the connection is pending check the command opcode since that
2861 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2862 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2863 	 *
2864 	 * hci_connect_le serializes the connection attempts so only one
2865 	 * connection can be in BT_CONNECT at time.
2866 	 */
2867 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2868 		switch (hci_skb_event(hdev->sent_cmd)) {
2869 		case HCI_EV_CONN_COMPLETE:
2870 		case HCI_EV_LE_CONN_COMPLETE:
2871 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2872 		case HCI_EVT_LE_CIS_ESTABLISHED:
2873 			hci_cmd_sync_cancel(hdev, ECANCELED);
2874 			break;
2875 		}
2876 	/* Cancel connect attempt if still queued/pending */
2877 	} else if (!hci_cancel_connect_sync(hdev, conn)) {
2878 		return 0;
2879 	}
2880 
2881 	/* Run immediately if on cmd_sync_work since this may be called
2882 	 * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does
2883 	 * already queue its callback on cmd_sync_work.
2884 	 */
2885 	return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL);
2886 }
2887