xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 3cde81f8)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan_cleanup(struct hci_conn * conn,u8 status)71 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
111 				    conn->dst_type, status);
112 
113 	/* The connection attempt was doing scan for new RPA, and is
114 	 * in scan phase. If params are not associated with any other
115 	 * autoconnect action, remove them completely. If they are, just unmark
116 	 * them as waiting for connection, by clearing explicit_connect field.
117 	 */
118 	params->explicit_connect = false;
119 
120 	hci_pend_le_list_del_init(params);
121 
122 	switch (params->auto_connect) {
123 	case HCI_AUTO_CONN_EXPLICIT:
124 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125 		/* return instead of break to avoid duplicate scan update */
126 		return;
127 	case HCI_AUTO_CONN_DIRECT:
128 	case HCI_AUTO_CONN_ALWAYS:
129 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
130 		break;
131 	case HCI_AUTO_CONN_REPORT:
132 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
133 		break;
134 	default:
135 		break;
136 	}
137 
138 	hci_update_passive_scan(hdev);
139 }
140 
hci_conn_cleanup(struct hci_conn * conn)141 static void hci_conn_cleanup(struct hci_conn *conn)
142 {
143 	struct hci_dev *hdev = conn->hdev;
144 
145 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147 
148 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149 		hci_remove_link_key(hdev, &conn->dst);
150 
151 	hci_chan_list_flush(conn);
152 
153 	hci_conn_hash_del(hdev, conn);
154 
155 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
156 		ida_free(&hdev->unset_handle_ida, conn->handle);
157 
158 	if (conn->cleanup)
159 		conn->cleanup(conn);
160 
161 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
162 		switch (conn->setting & SCO_AIRMODE_MASK) {
163 		case SCO_AIRMODE_CVSD:
164 		case SCO_AIRMODE_TRANSP:
165 			if (hdev->notify)
166 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
167 			break;
168 		}
169 	} else {
170 		if (hdev->notify)
171 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172 	}
173 
174 	debugfs_remove_recursive(conn->debugfs);
175 
176 	hci_conn_del_sysfs(conn);
177 
178 	hci_dev_put(hdev);
179 }
180 
hci_acl_create_connection(struct hci_conn * conn)181 static void hci_acl_create_connection(struct hci_conn *conn)
182 {
183 	struct hci_dev *hdev = conn->hdev;
184 	struct inquiry_entry *ie;
185 	struct hci_cp_create_conn cp;
186 
187 	BT_DBG("hcon %p", conn);
188 
189 	/* Many controllers disallow HCI Create Connection while it is doing
190 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191 	 * Connection. This may cause the MGMT discovering state to become false
192 	 * without user space's request but it is okay since the MGMT Discovery
193 	 * APIs do not promise that discovery should be done forever. Instead,
194 	 * the user space monitors the status of MGMT discovering and it may
195 	 * request for discovery again when this flag becomes false.
196 	 */
197 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198 		/* Put this connection to "pending" state so that it will be
199 		 * executed after the inquiry cancel command complete event.
200 		 */
201 		conn->state = BT_CONNECT2;
202 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
203 		return;
204 	}
205 
206 	conn->state = BT_CONNECT;
207 	conn->out = true;
208 	conn->role = HCI_ROLE_MASTER;
209 
210 	conn->attempt++;
211 
212 	conn->link_policy = hdev->link_policy;
213 
214 	memset(&cp, 0, sizeof(cp));
215 	bacpy(&cp.bdaddr, &conn->dst);
216 	cp.pscan_rep_mode = 0x02;
217 
218 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219 	if (ie) {
220 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 			cp.pscan_mode     = ie->data.pscan_mode;
223 			cp.clock_offset   = ie->data.clock_offset |
224 					    cpu_to_le16(0x8000);
225 		}
226 
227 		memcpy(conn->dev_class, ie->data.dev_class, 3);
228 	}
229 
230 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232 		cp.role_switch = 0x01;
233 	else
234 		cp.role_switch = 0x00;
235 
236 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
237 }
238 
hci_disconnect(struct hci_conn * conn,__u8 reason)239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
240 {
241 	BT_DBG("hcon %p", conn);
242 
243 	/* When we are central of an established connection and it enters
244 	 * the disconnect timeout, then go ahead and try to read the
245 	 * current clock offset.  Processing of the result is done
246 	 * within the event handling and hci_clock_offset_evt function.
247 	 */
248 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250 		struct hci_dev *hdev = conn->hdev;
251 		struct hci_cp_read_clock_offset clkoff_cp;
252 
253 		clkoff_cp.handle = cpu_to_le16(conn->handle);
254 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
255 			     &clkoff_cp);
256 	}
257 
258 	return hci_abort_conn(conn, reason);
259 }
260 
hci_add_sco(struct hci_conn * conn,__u16 handle)261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
262 {
263 	struct hci_dev *hdev = conn->hdev;
264 	struct hci_cp_add_sco cp;
265 
266 	BT_DBG("hcon %p", conn);
267 
268 	conn->state = BT_CONNECT;
269 	conn->out = true;
270 
271 	conn->attempt++;
272 
273 	cp.handle   = cpu_to_le16(handle);
274 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
275 
276 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
277 }
278 
find_next_esco_param(struct hci_conn * conn,const struct sco_param * esco_param,int size)279 static bool find_next_esco_param(struct hci_conn *conn,
280 				 const struct sco_param *esco_param, int size)
281 {
282 	if (!conn->parent)
283 		return false;
284 
285 	for (; conn->attempt <= size; conn->attempt++) {
286 		if (lmp_esco_2m_capable(conn->parent) ||
287 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
288 			break;
289 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290 		       conn, conn->attempt);
291 	}
292 
293 	return conn->attempt <= size;
294 }
295 
configure_datapath_sync(struct hci_dev * hdev,struct bt_codec * codec)296 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
297 {
298 	int err;
299 	__u8 vnd_len, *vnd_data = NULL;
300 	struct hci_op_configure_data_path *cmd = NULL;
301 
302 	if (!codec->data_path || !hdev->get_codec_config_data)
303 		return 0;
304 
305 	/* Do not take me as error */
306 	if (!hdev->get_codec_config_data)
307 		return 0;
308 
309 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
310 					  &vnd_data);
311 	if (err < 0)
312 		goto error;
313 
314 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
315 	if (!cmd) {
316 		err = -ENOMEM;
317 		goto error;
318 	}
319 
320 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
321 	if (err < 0)
322 		goto error;
323 
324 	cmd->vnd_len = vnd_len;
325 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
326 
327 	cmd->direction = 0x00;
328 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
329 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
330 
331 	cmd->direction = 0x01;
332 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
333 				    sizeof(*cmd) + vnd_len, cmd,
334 				    HCI_CMD_TIMEOUT);
335 error:
336 
337 	kfree(cmd);
338 	kfree(vnd_data);
339 	return err;
340 }
341 
hci_enhanced_setup_sync(struct hci_dev * hdev,void * data)342 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
343 {
344 	struct conn_handle_t *conn_handle = data;
345 	struct hci_conn *conn = conn_handle->conn;
346 	__u16 handle = conn_handle->handle;
347 	struct hci_cp_enhanced_setup_sync_conn cp;
348 	const struct sco_param *param;
349 
350 	kfree(conn_handle);
351 
352 	bt_dev_dbg(hdev, "hcon %p", conn);
353 
354 	configure_datapath_sync(hdev, &conn->codec);
355 
356 	conn->state = BT_CONNECT;
357 	conn->out = true;
358 
359 	conn->attempt++;
360 
361 	memset(&cp, 0x00, sizeof(cp));
362 
363 	cp.handle   = cpu_to_le16(handle);
364 
365 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
366 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
367 
368 	switch (conn->codec.id) {
369 	case BT_CODEC_MSBC:
370 		if (!find_next_esco_param(conn, esco_param_msbc,
371 					  ARRAY_SIZE(esco_param_msbc)))
372 			return -EINVAL;
373 
374 		param = &esco_param_msbc[conn->attempt - 1];
375 		cp.tx_coding_format.id = 0x05;
376 		cp.rx_coding_format.id = 0x05;
377 		cp.tx_codec_frame_size = __cpu_to_le16(60);
378 		cp.rx_codec_frame_size = __cpu_to_le16(60);
379 		cp.in_bandwidth = __cpu_to_le32(32000);
380 		cp.out_bandwidth = __cpu_to_le32(32000);
381 		cp.in_coding_format.id = 0x04;
382 		cp.out_coding_format.id = 0x04;
383 		cp.in_coded_data_size = __cpu_to_le16(16);
384 		cp.out_coded_data_size = __cpu_to_le16(16);
385 		cp.in_pcm_data_format = 2;
386 		cp.out_pcm_data_format = 2;
387 		cp.in_pcm_sample_payload_msb_pos = 0;
388 		cp.out_pcm_sample_payload_msb_pos = 0;
389 		cp.in_data_path = conn->codec.data_path;
390 		cp.out_data_path = conn->codec.data_path;
391 		cp.in_transport_unit_size = 1;
392 		cp.out_transport_unit_size = 1;
393 		break;
394 
395 	case BT_CODEC_TRANSPARENT:
396 		if (!find_next_esco_param(conn, esco_param_msbc,
397 					  ARRAY_SIZE(esco_param_msbc)))
398 			return false;
399 		param = &esco_param_msbc[conn->attempt - 1];
400 		cp.tx_coding_format.id = 0x03;
401 		cp.rx_coding_format.id = 0x03;
402 		cp.tx_codec_frame_size = __cpu_to_le16(60);
403 		cp.rx_codec_frame_size = __cpu_to_le16(60);
404 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
405 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
406 		cp.in_coding_format.id = 0x03;
407 		cp.out_coding_format.id = 0x03;
408 		cp.in_coded_data_size = __cpu_to_le16(16);
409 		cp.out_coded_data_size = __cpu_to_le16(16);
410 		cp.in_pcm_data_format = 2;
411 		cp.out_pcm_data_format = 2;
412 		cp.in_pcm_sample_payload_msb_pos = 0;
413 		cp.out_pcm_sample_payload_msb_pos = 0;
414 		cp.in_data_path = conn->codec.data_path;
415 		cp.out_data_path = conn->codec.data_path;
416 		cp.in_transport_unit_size = 1;
417 		cp.out_transport_unit_size = 1;
418 		break;
419 
420 	case BT_CODEC_CVSD:
421 		if (conn->parent && lmp_esco_capable(conn->parent)) {
422 			if (!find_next_esco_param(conn, esco_param_cvsd,
423 						  ARRAY_SIZE(esco_param_cvsd)))
424 				return -EINVAL;
425 			param = &esco_param_cvsd[conn->attempt - 1];
426 		} else {
427 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
428 				return -EINVAL;
429 			param = &sco_param_cvsd[conn->attempt - 1];
430 		}
431 		cp.tx_coding_format.id = 2;
432 		cp.rx_coding_format.id = 2;
433 		cp.tx_codec_frame_size = __cpu_to_le16(60);
434 		cp.rx_codec_frame_size = __cpu_to_le16(60);
435 		cp.in_bandwidth = __cpu_to_le32(16000);
436 		cp.out_bandwidth = __cpu_to_le32(16000);
437 		cp.in_coding_format.id = 4;
438 		cp.out_coding_format.id = 4;
439 		cp.in_coded_data_size = __cpu_to_le16(16);
440 		cp.out_coded_data_size = __cpu_to_le16(16);
441 		cp.in_pcm_data_format = 2;
442 		cp.out_pcm_data_format = 2;
443 		cp.in_pcm_sample_payload_msb_pos = 0;
444 		cp.out_pcm_sample_payload_msb_pos = 0;
445 		cp.in_data_path = conn->codec.data_path;
446 		cp.out_data_path = conn->codec.data_path;
447 		cp.in_transport_unit_size = 16;
448 		cp.out_transport_unit_size = 16;
449 		break;
450 	default:
451 		return -EINVAL;
452 	}
453 
454 	cp.retrans_effort = param->retrans_effort;
455 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
456 	cp.max_latency = __cpu_to_le16(param->max_latency);
457 
458 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
459 		return -EIO;
460 
461 	return 0;
462 }
463 
hci_setup_sync_conn(struct hci_conn * conn,__u16 handle)464 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
465 {
466 	struct hci_dev *hdev = conn->hdev;
467 	struct hci_cp_setup_sync_conn cp;
468 	const struct sco_param *param;
469 
470 	bt_dev_dbg(hdev, "hcon %p", conn);
471 
472 	conn->state = BT_CONNECT;
473 	conn->out = true;
474 
475 	conn->attempt++;
476 
477 	cp.handle   = cpu_to_le16(handle);
478 
479 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
480 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
481 	cp.voice_setting  = cpu_to_le16(conn->setting);
482 
483 	switch (conn->setting & SCO_AIRMODE_MASK) {
484 	case SCO_AIRMODE_TRANSP:
485 		if (!find_next_esco_param(conn, esco_param_msbc,
486 					  ARRAY_SIZE(esco_param_msbc)))
487 			return false;
488 		param = &esco_param_msbc[conn->attempt - 1];
489 		break;
490 	case SCO_AIRMODE_CVSD:
491 		if (conn->parent && lmp_esco_capable(conn->parent)) {
492 			if (!find_next_esco_param(conn, esco_param_cvsd,
493 						  ARRAY_SIZE(esco_param_cvsd)))
494 				return false;
495 			param = &esco_param_cvsd[conn->attempt - 1];
496 		} else {
497 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
498 				return false;
499 			param = &sco_param_cvsd[conn->attempt - 1];
500 		}
501 		break;
502 	default:
503 		return false;
504 	}
505 
506 	cp.retrans_effort = param->retrans_effort;
507 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
508 	cp.max_latency = __cpu_to_le16(param->max_latency);
509 
510 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
511 		return false;
512 
513 	return true;
514 }
515 
hci_setup_sync(struct hci_conn * conn,__u16 handle)516 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
517 {
518 	int result;
519 	struct conn_handle_t *conn_handle;
520 
521 	if (enhanced_sync_conn_capable(conn->hdev)) {
522 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
523 
524 		if (!conn_handle)
525 			return false;
526 
527 		conn_handle->conn = conn;
528 		conn_handle->handle = handle;
529 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
530 					    conn_handle, NULL);
531 		if (result < 0)
532 			kfree(conn_handle);
533 
534 		return result == 0;
535 	}
536 
537 	return hci_setup_sync_conn(conn, handle);
538 }
539 
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)540 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
541 		      u16 to_multiplier)
542 {
543 	struct hci_dev *hdev = conn->hdev;
544 	struct hci_conn_params *params;
545 	struct hci_cp_le_conn_update cp;
546 
547 	hci_dev_lock(hdev);
548 
549 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
550 	if (params) {
551 		params->conn_min_interval = min;
552 		params->conn_max_interval = max;
553 		params->conn_latency = latency;
554 		params->supervision_timeout = to_multiplier;
555 	}
556 
557 	hci_dev_unlock(hdev);
558 
559 	memset(&cp, 0, sizeof(cp));
560 	cp.handle		= cpu_to_le16(conn->handle);
561 	cp.conn_interval_min	= cpu_to_le16(min);
562 	cp.conn_interval_max	= cpu_to_le16(max);
563 	cp.conn_latency		= cpu_to_le16(latency);
564 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
565 	cp.min_ce_len		= cpu_to_le16(0x0000);
566 	cp.max_ce_len		= cpu_to_le16(0x0000);
567 
568 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
569 
570 	if (params)
571 		return 0x01;
572 
573 	return 0x00;
574 }
575 
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__le64 rand,__u8 ltk[16],__u8 key_size)576 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
577 		      __u8 ltk[16], __u8 key_size)
578 {
579 	struct hci_dev *hdev = conn->hdev;
580 	struct hci_cp_le_start_enc cp;
581 
582 	BT_DBG("hcon %p", conn);
583 
584 	memset(&cp, 0, sizeof(cp));
585 
586 	cp.handle = cpu_to_le16(conn->handle);
587 	cp.rand = rand;
588 	cp.ediv = ediv;
589 	memcpy(cp.ltk, ltk, key_size);
590 
591 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
592 }
593 
594 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)595 void hci_sco_setup(struct hci_conn *conn, __u8 status)
596 {
597 	struct hci_link *link;
598 
599 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
600 	if (!link || !link->conn)
601 		return;
602 
603 	BT_DBG("hcon %p", conn);
604 
605 	if (!status) {
606 		if (lmp_esco_capable(conn->hdev))
607 			hci_setup_sync(link->conn, conn->handle);
608 		else
609 			hci_add_sco(link->conn, conn->handle);
610 	} else {
611 		hci_connect_cfm(link->conn, status);
612 		hci_conn_del(link->conn);
613 	}
614 }
615 
hci_conn_timeout(struct work_struct * work)616 static void hci_conn_timeout(struct work_struct *work)
617 {
618 	struct hci_conn *conn = container_of(work, struct hci_conn,
619 					     disc_work.work);
620 	int refcnt = atomic_read(&conn->refcnt);
621 
622 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
623 
624 	WARN_ON(refcnt < 0);
625 
626 	/* FIXME: It was observed that in pairing failed scenario, refcnt
627 	 * drops below 0. Probably this is because l2cap_conn_del calls
628 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
629 	 * dropped. After that loop hci_chan_del is called which also drops
630 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
631 	 * otherwise drop it.
632 	 */
633 	if (refcnt > 0)
634 		return;
635 
636 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
637 }
638 
639 /* Enter sniff mode */
hci_conn_idle(struct work_struct * work)640 static void hci_conn_idle(struct work_struct *work)
641 {
642 	struct hci_conn *conn = container_of(work, struct hci_conn,
643 					     idle_work.work);
644 	struct hci_dev *hdev = conn->hdev;
645 
646 	BT_DBG("hcon %p mode %d", conn, conn->mode);
647 
648 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
649 		return;
650 
651 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
652 		return;
653 
654 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
655 		struct hci_cp_sniff_subrate cp;
656 		cp.handle             = cpu_to_le16(conn->handle);
657 		cp.max_latency        = cpu_to_le16(0);
658 		cp.min_remote_timeout = cpu_to_le16(0);
659 		cp.min_local_timeout  = cpu_to_le16(0);
660 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
661 	}
662 
663 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
664 		struct hci_cp_sniff_mode cp;
665 		cp.handle       = cpu_to_le16(conn->handle);
666 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
667 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
668 		cp.attempt      = cpu_to_le16(4);
669 		cp.timeout      = cpu_to_le16(1);
670 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
671 	}
672 }
673 
hci_conn_auto_accept(struct work_struct * work)674 static void hci_conn_auto_accept(struct work_struct *work)
675 {
676 	struct hci_conn *conn = container_of(work, struct hci_conn,
677 					     auto_accept_work.work);
678 
679 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
680 		     &conn->dst);
681 }
682 
le_disable_advertising(struct hci_dev * hdev)683 static void le_disable_advertising(struct hci_dev *hdev)
684 {
685 	if (ext_adv_capable(hdev)) {
686 		struct hci_cp_le_set_ext_adv_enable cp;
687 
688 		cp.enable = 0x00;
689 		cp.num_of_sets = 0x00;
690 
691 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
692 			     &cp);
693 	} else {
694 		u8 enable = 0x00;
695 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
696 			     &enable);
697 	}
698 }
699 
le_conn_timeout(struct work_struct * work)700 static void le_conn_timeout(struct work_struct *work)
701 {
702 	struct hci_conn *conn = container_of(work, struct hci_conn,
703 					     le_conn_timeout.work);
704 	struct hci_dev *hdev = conn->hdev;
705 
706 	BT_DBG("");
707 
708 	/* We could end up here due to having done directed advertising,
709 	 * so clean up the state if necessary. This should however only
710 	 * happen with broken hardware or if low duty cycle was used
711 	 * (which doesn't have a timeout of its own).
712 	 */
713 	if (conn->role == HCI_ROLE_SLAVE) {
714 		/* Disable LE Advertising */
715 		le_disable_advertising(hdev);
716 		hci_dev_lock(hdev);
717 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
718 		hci_dev_unlock(hdev);
719 		return;
720 	}
721 
722 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
723 }
724 
725 struct iso_cig_params {
726 	struct hci_cp_le_set_cig_params cp;
727 	struct hci_cis_params cis[0x1f];
728 };
729 
730 struct iso_list_data {
731 	union {
732 		u8  cig;
733 		u8  big;
734 	};
735 	union {
736 		u8  cis;
737 		u8  bis;
738 		u16 sync_handle;
739 	};
740 	int count;
741 	bool big_term;
742 	bool pa_sync_term;
743 	bool big_sync_term;
744 };
745 
bis_list(struct hci_conn * conn,void * data)746 static void bis_list(struct hci_conn *conn, void *data)
747 {
748 	struct iso_list_data *d = data;
749 
750 	/* Skip if not broadcast/ANY address */
751 	if (bacmp(&conn->dst, BDADDR_ANY))
752 		return;
753 
754 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
755 	    d->bis != conn->iso_qos.bcast.bis)
756 		return;
757 
758 	d->count++;
759 }
760 
terminate_big_sync(struct hci_dev * hdev,void * data)761 static int terminate_big_sync(struct hci_dev *hdev, void *data)
762 {
763 	struct iso_list_data *d = data;
764 
765 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
766 
767 	hci_disable_per_advertising_sync(hdev, d->bis);
768 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
769 
770 	/* Only terminate BIG if it has been created */
771 	if (!d->big_term)
772 		return 0;
773 
774 	return hci_le_terminate_big_sync(hdev, d->big,
775 					 HCI_ERROR_LOCAL_HOST_TERM);
776 }
777 
terminate_big_destroy(struct hci_dev * hdev,void * data,int err)778 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
779 {
780 	kfree(data);
781 }
782 
hci_le_terminate_big(struct hci_dev * hdev,struct hci_conn * conn)783 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
784 {
785 	struct iso_list_data *d;
786 	int ret;
787 
788 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
789 		   conn->iso_qos.bcast.bis);
790 
791 	d = kzalloc(sizeof(*d), GFP_KERNEL);
792 	if (!d)
793 		return -ENOMEM;
794 
795 	d->big = conn->iso_qos.bcast.big;
796 	d->bis = conn->iso_qos.bcast.bis;
797 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
798 
799 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
800 				 terminate_big_destroy);
801 	if (ret)
802 		kfree(d);
803 
804 	return ret;
805 }
806 
big_terminate_sync(struct hci_dev * hdev,void * data)807 static int big_terminate_sync(struct hci_dev *hdev, void *data)
808 {
809 	struct iso_list_data *d = data;
810 
811 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
812 		   d->sync_handle);
813 
814 	if (d->big_sync_term)
815 		hci_le_big_terminate_sync(hdev, d->big);
816 
817 	if (d->pa_sync_term)
818 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
819 
820 	return 0;
821 }
822 
hci_le_big_terminate(struct hci_dev * hdev,u8 big,struct hci_conn * conn)823 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
824 {
825 	struct iso_list_data *d;
826 	int ret;
827 
828 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
829 
830 	d = kzalloc(sizeof(*d), GFP_KERNEL);
831 	if (!d)
832 		return -ENOMEM;
833 
834 	d->big = big;
835 	d->sync_handle = conn->sync_handle;
836 	d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
837 	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
838 
839 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
840 				 terminate_big_destroy);
841 	if (ret)
842 		kfree(d);
843 
844 	return ret;
845 }
846 
847 /* Cleanup BIS connection
848  *
849  * Detects if there any BIS left connected in a BIG
850  * broadcaster: Remove advertising instance and terminate BIG.
851  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
852  */
bis_cleanup(struct hci_conn * conn)853 static void bis_cleanup(struct hci_conn *conn)
854 {
855 	struct hci_dev *hdev = conn->hdev;
856 	struct hci_conn *bis;
857 
858 	bt_dev_dbg(hdev, "conn %p", conn);
859 
860 	if (conn->role == HCI_ROLE_MASTER) {
861 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
862 			return;
863 
864 		/* Check if ISO connection is a BIS and terminate advertising
865 		 * set and BIG if there are no other connections using it.
866 		 */
867 		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
868 		if (bis)
869 			return;
870 
871 		hci_le_terminate_big(hdev, conn);
872 	} else {
873 		bis = hci_conn_hash_lookup_big_any_dst(hdev,
874 						       conn->iso_qos.bcast.big);
875 
876 		if (bis)
877 			return;
878 
879 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
880 				     conn);
881 	}
882 }
883 
remove_cig_sync(struct hci_dev * hdev,void * data)884 static int remove_cig_sync(struct hci_dev *hdev, void *data)
885 {
886 	u8 handle = PTR_UINT(data);
887 
888 	return hci_le_remove_cig_sync(hdev, handle);
889 }
890 
hci_le_remove_cig(struct hci_dev * hdev,u8 handle)891 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
892 {
893 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
894 
895 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
896 				  NULL);
897 }
898 
find_cis(struct hci_conn * conn,void * data)899 static void find_cis(struct hci_conn *conn, void *data)
900 {
901 	struct iso_list_data *d = data;
902 
903 	/* Ignore broadcast or if CIG don't match */
904 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
905 		return;
906 
907 	d->count++;
908 }
909 
910 /* Cleanup CIS connection:
911  *
912  * Detects if there any CIS left connected in a CIG and remove it.
913  */
cis_cleanup(struct hci_conn * conn)914 static void cis_cleanup(struct hci_conn *conn)
915 {
916 	struct hci_dev *hdev = conn->hdev;
917 	struct iso_list_data d;
918 
919 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
920 		return;
921 
922 	memset(&d, 0, sizeof(d));
923 	d.cig = conn->iso_qos.ucast.cig;
924 
925 	/* Check if ISO connection is a CIS and remove CIG if there are
926 	 * no other connections using it.
927 	 */
928 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
929 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
930 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
931 	if (d.count)
932 		return;
933 
934 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
935 }
936 
hci_conn_hash_alloc_unset(struct hci_dev * hdev)937 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
938 {
939 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
940 			       U16_MAX, GFP_ATOMIC);
941 }
942 
__hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)943 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
944 				       u8 role, u16 handle)
945 {
946 	struct hci_conn *conn;
947 
948 	switch (type) {
949 	case ACL_LINK:
950 		if (!hdev->acl_mtu)
951 			return ERR_PTR(-ECONNREFUSED);
952 		break;
953 	case ISO_LINK:
954 		if (hdev->iso_mtu)
955 			/* Dedicated ISO Buffer exists */
956 			break;
957 		fallthrough;
958 	case LE_LINK:
959 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
960 			return ERR_PTR(-ECONNREFUSED);
961 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
962 			return ERR_PTR(-ECONNREFUSED);
963 		break;
964 	case SCO_LINK:
965 	case ESCO_LINK:
966 		if (!hdev->sco_pkts)
967 			/* Controller does not support SCO or eSCO over HCI */
968 			return ERR_PTR(-ECONNREFUSED);
969 		break;
970 	default:
971 		return ERR_PTR(-ECONNREFUSED);
972 	}
973 
974 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
975 
976 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
977 	if (!conn)
978 		return ERR_PTR(-ENOMEM);
979 
980 	bacpy(&conn->dst, dst);
981 	bacpy(&conn->src, &hdev->bdaddr);
982 	conn->handle = handle;
983 	conn->hdev  = hdev;
984 	conn->type  = type;
985 	conn->role  = role;
986 	conn->mode  = HCI_CM_ACTIVE;
987 	conn->state = BT_OPEN;
988 	conn->auth_type = HCI_AT_GENERAL_BONDING;
989 	conn->io_capability = hdev->io_capability;
990 	conn->remote_auth = 0xff;
991 	conn->key_type = 0xff;
992 	conn->rssi = HCI_RSSI_INVALID;
993 	conn->tx_power = HCI_TX_POWER_INVALID;
994 	conn->max_tx_power = HCI_TX_POWER_INVALID;
995 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
996 
997 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
998 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
999 
1000 	/* Set Default Authenticated payload timeout to 30s */
1001 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
1002 
1003 	if (conn->role == HCI_ROLE_MASTER)
1004 		conn->out = true;
1005 
1006 	switch (type) {
1007 	case ACL_LINK:
1008 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1009 		conn->mtu = hdev->acl_mtu;
1010 		break;
1011 	case LE_LINK:
1012 		/* conn->src should reflect the local identity address */
1013 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1014 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1015 		break;
1016 	case ISO_LINK:
1017 		/* conn->src should reflect the local identity address */
1018 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1019 
1020 		/* set proper cleanup function */
1021 		if (!bacmp(dst, BDADDR_ANY))
1022 			conn->cleanup = bis_cleanup;
1023 		else if (conn->role == HCI_ROLE_MASTER)
1024 			conn->cleanup = cis_cleanup;
1025 
1026 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
1027 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1028 		break;
1029 	case SCO_LINK:
1030 		if (lmp_esco_capable(hdev))
1031 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1032 					(hdev->esco_type & EDR_ESCO_MASK);
1033 		else
1034 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1035 
1036 		conn->mtu = hdev->sco_mtu;
1037 		break;
1038 	case ESCO_LINK:
1039 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1040 		conn->mtu = hdev->sco_mtu;
1041 		break;
1042 	}
1043 
1044 	skb_queue_head_init(&conn->data_q);
1045 
1046 	INIT_LIST_HEAD(&conn->chan_list);
1047 	INIT_LIST_HEAD(&conn->link_list);
1048 
1049 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1050 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1051 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1052 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1053 
1054 	atomic_set(&conn->refcnt, 0);
1055 
1056 	hci_dev_hold(hdev);
1057 
1058 	hci_conn_hash_add(hdev, conn);
1059 
1060 	/* The SCO and eSCO connections will only be notified when their
1061 	 * setup has been completed. This is different to ACL links which
1062 	 * can be notified right away.
1063 	 */
1064 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1065 		if (hdev->notify)
1066 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1067 	}
1068 
1069 	hci_conn_init_sysfs(conn);
1070 
1071 	return conn;
1072 }
1073 
hci_conn_add_unset(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role)1074 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1075 				    bdaddr_t *dst, u8 role)
1076 {
1077 	int handle;
1078 
1079 	bt_dev_dbg(hdev, "dst %pMR", dst);
1080 
1081 	handle = hci_conn_hash_alloc_unset(hdev);
1082 	if (unlikely(handle < 0))
1083 		return ERR_PTR(-ECONNREFUSED);
1084 
1085 	return __hci_conn_add(hdev, type, dst, role, handle);
1086 }
1087 
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst,u8 role,u16 handle)1088 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1089 			      u8 role, u16 handle)
1090 {
1091 	if (handle > HCI_CONN_HANDLE_MAX)
1092 		return ERR_PTR(-EINVAL);
1093 
1094 	return __hci_conn_add(hdev, type, dst, role, handle);
1095 }
1096 
hci_conn_cleanup_child(struct hci_conn * conn,u8 reason)1097 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1098 {
1099 	if (!reason)
1100 		reason = HCI_ERROR_REMOTE_USER_TERM;
1101 
1102 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1103 	 * and nothing else will clean it up. In other cases it is done via HCI
1104 	 * events.
1105 	 */
1106 	switch (conn->type) {
1107 	case SCO_LINK:
1108 	case ESCO_LINK:
1109 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1110 			hci_conn_failed(conn, reason);
1111 		break;
1112 	case ISO_LINK:
1113 		if (conn->state != BT_CONNECTED &&
1114 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
1115 			hci_conn_failed(conn, reason);
1116 		break;
1117 	}
1118 }
1119 
hci_conn_unlink(struct hci_conn * conn)1120 static void hci_conn_unlink(struct hci_conn *conn)
1121 {
1122 	struct hci_dev *hdev = conn->hdev;
1123 
1124 	bt_dev_dbg(hdev, "hcon %p", conn);
1125 
1126 	if (!conn->parent) {
1127 		struct hci_link *link, *t;
1128 
1129 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1130 			struct hci_conn *child = link->conn;
1131 
1132 			hci_conn_unlink(child);
1133 
1134 			/* If hdev is down it means
1135 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1136 			 * and links don't need to be cleanup as all connections
1137 			 * would be cleanup.
1138 			 */
1139 			if (!test_bit(HCI_UP, &hdev->flags))
1140 				continue;
1141 
1142 			hci_conn_cleanup_child(child, conn->abort_reason);
1143 		}
1144 
1145 		return;
1146 	}
1147 
1148 	if (!conn->link)
1149 		return;
1150 
1151 	list_del_rcu(&conn->link->list);
1152 	synchronize_rcu();
1153 
1154 	hci_conn_drop(conn->parent);
1155 	hci_conn_put(conn->parent);
1156 	conn->parent = NULL;
1157 
1158 	kfree(conn->link);
1159 	conn->link = NULL;
1160 }
1161 
hci_conn_del(struct hci_conn * conn)1162 void hci_conn_del(struct hci_conn *conn)
1163 {
1164 	struct hci_dev *hdev = conn->hdev;
1165 
1166 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1167 
1168 	hci_conn_unlink(conn);
1169 
1170 	cancel_delayed_work_sync(&conn->disc_work);
1171 	cancel_delayed_work_sync(&conn->auto_accept_work);
1172 	cancel_delayed_work_sync(&conn->idle_work);
1173 
1174 	if (conn->type == ACL_LINK) {
1175 		/* Unacked frames */
1176 		hdev->acl_cnt += conn->sent;
1177 	} else if (conn->type == LE_LINK) {
1178 		cancel_delayed_work(&conn->le_conn_timeout);
1179 
1180 		if (hdev->le_pkts)
1181 			hdev->le_cnt += conn->sent;
1182 		else
1183 			hdev->acl_cnt += conn->sent;
1184 	} else {
1185 		/* Unacked ISO frames */
1186 		if (conn->type == ISO_LINK) {
1187 			if (hdev->iso_pkts)
1188 				hdev->iso_cnt += conn->sent;
1189 			else if (hdev->le_pkts)
1190 				hdev->le_cnt += conn->sent;
1191 			else
1192 				hdev->acl_cnt += conn->sent;
1193 		}
1194 	}
1195 
1196 	skb_queue_purge(&conn->data_q);
1197 
1198 	/* Remove the connection from the list and cleanup its remaining
1199 	 * state. This is a separate function since for some cases like
1200 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1201 	 * rest of hci_conn_del.
1202 	 */
1203 	hci_conn_cleanup(conn);
1204 }
1205 
hci_get_route(bdaddr_t * dst,bdaddr_t * src,uint8_t src_type)1206 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1207 {
1208 	int use_src = bacmp(src, BDADDR_ANY);
1209 	struct hci_dev *hdev = NULL, *d;
1210 
1211 	BT_DBG("%pMR -> %pMR", src, dst);
1212 
1213 	read_lock(&hci_dev_list_lock);
1214 
1215 	list_for_each_entry(d, &hci_dev_list, list) {
1216 		if (!test_bit(HCI_UP, &d->flags) ||
1217 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
1218 			continue;
1219 
1220 		/* Simple routing:
1221 		 *   No source address - find interface with bdaddr != dst
1222 		 *   Source address    - find interface with bdaddr == src
1223 		 */
1224 
1225 		if (use_src) {
1226 			bdaddr_t id_addr;
1227 			u8 id_addr_type;
1228 
1229 			if (src_type == BDADDR_BREDR) {
1230 				if (!lmp_bredr_capable(d))
1231 					continue;
1232 				bacpy(&id_addr, &d->bdaddr);
1233 				id_addr_type = BDADDR_BREDR;
1234 			} else {
1235 				if (!lmp_le_capable(d))
1236 					continue;
1237 
1238 				hci_copy_identity_address(d, &id_addr,
1239 							  &id_addr_type);
1240 
1241 				/* Convert from HCI to three-value type */
1242 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1243 					id_addr_type = BDADDR_LE_PUBLIC;
1244 				else
1245 					id_addr_type = BDADDR_LE_RANDOM;
1246 			}
1247 
1248 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1249 				hdev = d; break;
1250 			}
1251 		} else {
1252 			if (bacmp(&d->bdaddr, dst)) {
1253 				hdev = d; break;
1254 			}
1255 		}
1256 	}
1257 
1258 	if (hdev)
1259 		hdev = hci_dev_hold(hdev);
1260 
1261 	read_unlock(&hci_dev_list_lock);
1262 	return hdev;
1263 }
1264 EXPORT_SYMBOL(hci_get_route);
1265 
1266 /* This function requires the caller holds hdev->lock */
hci_le_conn_failed(struct hci_conn * conn,u8 status)1267 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1268 {
1269 	struct hci_dev *hdev = conn->hdev;
1270 
1271 	hci_connect_le_scan_cleanup(conn, status);
1272 
1273 	/* Enable advertising in case this was a failed connection
1274 	 * attempt as a peripheral.
1275 	 */
1276 	hci_enable_advertising(hdev);
1277 }
1278 
1279 /* This function requires the caller holds hdev->lock */
hci_conn_failed(struct hci_conn * conn,u8 status)1280 void hci_conn_failed(struct hci_conn *conn, u8 status)
1281 {
1282 	struct hci_dev *hdev = conn->hdev;
1283 
1284 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1285 
1286 	switch (conn->type) {
1287 	case LE_LINK:
1288 		hci_le_conn_failed(conn, status);
1289 		break;
1290 	case ACL_LINK:
1291 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1292 				    conn->dst_type, status);
1293 		break;
1294 	}
1295 
1296 	/* In case of BIG/PA sync failed, clear conn flags so that
1297 	 * the conns will be correctly cleaned up by ISO layer
1298 	 */
1299 	test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1300 	test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1301 
1302 	conn->state = BT_CLOSED;
1303 	hci_connect_cfm(conn, status);
1304 	hci_conn_del(conn);
1305 }
1306 
1307 /* This function requires the caller holds hdev->lock */
hci_conn_set_handle(struct hci_conn * conn,u16 handle)1308 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1309 {
1310 	struct hci_dev *hdev = conn->hdev;
1311 
1312 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1313 
1314 	if (conn->handle == handle)
1315 		return 0;
1316 
1317 	if (handle > HCI_CONN_HANDLE_MAX) {
1318 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1319 			   handle, HCI_CONN_HANDLE_MAX);
1320 		return HCI_ERROR_INVALID_PARAMETERS;
1321 	}
1322 
1323 	/* If abort_reason has been sent it means the connection is being
1324 	 * aborted and the handle shall not be changed.
1325 	 */
1326 	if (conn->abort_reason)
1327 		return conn->abort_reason;
1328 
1329 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1330 		ida_free(&hdev->unset_handle_ida, conn->handle);
1331 
1332 	conn->handle = handle;
1333 
1334 	return 0;
1335 }
1336 
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)1337 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1338 {
1339 	struct hci_conn *conn;
1340 	u16 handle = PTR_UINT(data);
1341 
1342 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1343 	if (!conn)
1344 		return;
1345 
1346 	bt_dev_dbg(hdev, "err %d", err);
1347 
1348 	hci_dev_lock(hdev);
1349 
1350 	if (!err) {
1351 		hci_connect_le_scan_cleanup(conn, 0x00);
1352 		goto done;
1353 	}
1354 
1355 	/* Check if connection is still pending */
1356 	if (conn != hci_lookup_le_connect(hdev))
1357 		goto done;
1358 
1359 	/* Flush to make sure we send create conn cancel command if needed */
1360 	flush_delayed_work(&conn->le_conn_timeout);
1361 	hci_conn_failed(conn, bt_status(err));
1362 
1363 done:
1364 	hci_dev_unlock(hdev);
1365 }
1366 
hci_connect_le_sync(struct hci_dev * hdev,void * data)1367 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1368 {
1369 	struct hci_conn *conn;
1370 	u16 handle = PTR_UINT(data);
1371 
1372 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1373 	if (!conn)
1374 		return 0;
1375 
1376 	bt_dev_dbg(hdev, "conn %p", conn);
1377 
1378 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1379 	conn->state = BT_CONNECT;
1380 
1381 	return hci_le_create_conn_sync(hdev, conn);
1382 }
1383 
hci_connect_le(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,bool dst_resolved,u8 sec_level,u16 conn_timeout,u8 role)1384 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1385 				u8 dst_type, bool dst_resolved, u8 sec_level,
1386 				u16 conn_timeout, u8 role)
1387 {
1388 	struct hci_conn *conn;
1389 	struct smp_irk *irk;
1390 	int err;
1391 
1392 	/* Let's make sure that le is enabled.*/
1393 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1394 		if (lmp_le_capable(hdev))
1395 			return ERR_PTR(-ECONNREFUSED);
1396 
1397 		return ERR_PTR(-EOPNOTSUPP);
1398 	}
1399 
1400 	/* Since the controller supports only one LE connection attempt at a
1401 	 * time, we return -EBUSY if there is any connection attempt running.
1402 	 */
1403 	if (hci_lookup_le_connect(hdev))
1404 		return ERR_PTR(-EBUSY);
1405 
1406 	/* If there's already a connection object but it's not in
1407 	 * scanning state it means it must already be established, in
1408 	 * which case we can't do anything else except report a failure
1409 	 * to connect.
1410 	 */
1411 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1412 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1413 		return ERR_PTR(-EBUSY);
1414 	}
1415 
1416 	/* Check if the destination address has been resolved by the controller
1417 	 * since if it did then the identity address shall be used.
1418 	 */
1419 	if (!dst_resolved) {
1420 		/* When given an identity address with existing identity
1421 		 * resolving key, the connection needs to be established
1422 		 * to a resolvable random address.
1423 		 *
1424 		 * Storing the resolvable random address is required here
1425 		 * to handle connection failures. The address will later
1426 		 * be resolved back into the original identity address
1427 		 * from the connect request.
1428 		 */
1429 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1430 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1431 			dst = &irk->rpa;
1432 			dst_type = ADDR_LE_DEV_RANDOM;
1433 		}
1434 	}
1435 
1436 	if (conn) {
1437 		bacpy(&conn->dst, dst);
1438 	} else {
1439 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1440 		if (IS_ERR(conn))
1441 			return conn;
1442 		hci_conn_hold(conn);
1443 		conn->pending_sec_level = sec_level;
1444 	}
1445 
1446 	conn->dst_type = dst_type;
1447 	conn->sec_level = BT_SECURITY_LOW;
1448 	conn->conn_timeout = conn_timeout;
1449 
1450 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1451 				 UINT_PTR(conn->handle),
1452 				 create_le_conn_complete);
1453 	if (err) {
1454 		hci_conn_del(conn);
1455 		return ERR_PTR(err);
1456 	}
1457 
1458 	return conn;
1459 }
1460 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)1461 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1462 {
1463 	struct hci_conn *conn;
1464 
1465 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1466 	if (!conn)
1467 		return false;
1468 
1469 	if (conn->state != BT_CONNECTED)
1470 		return false;
1471 
1472 	return true;
1473 }
1474 
1475 /* This function requires the caller holds hdev->lock */
hci_explicit_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)1476 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1477 					bdaddr_t *addr, u8 addr_type)
1478 {
1479 	struct hci_conn_params *params;
1480 
1481 	if (is_connected(hdev, addr, addr_type))
1482 		return -EISCONN;
1483 
1484 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1485 	if (!params) {
1486 		params = hci_conn_params_add(hdev, addr, addr_type);
1487 		if (!params)
1488 			return -ENOMEM;
1489 
1490 		/* If we created new params, mark them to be deleted in
1491 		 * hci_connect_le_scan_cleanup. It's different case than
1492 		 * existing disabled params, those will stay after cleanup.
1493 		 */
1494 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1495 	}
1496 
1497 	/* We're trying to connect, so make sure params are at pend_le_conns */
1498 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1499 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1500 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1501 		hci_pend_le_list_del_init(params);
1502 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1503 	}
1504 
1505 	params->explicit_connect = true;
1506 
1507 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1508 	       params->auto_connect);
1509 
1510 	return 0;
1511 }
1512 
qos_set_big(struct hci_dev * hdev,struct bt_iso_qos * qos)1513 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1514 {
1515 	struct hci_conn *conn;
1516 	u8  big;
1517 
1518 	/* Allocate a BIG if not set */
1519 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1520 		for (big = 0x00; big < 0xef; big++) {
1521 
1522 			conn = hci_conn_hash_lookup_big(hdev, big);
1523 			if (!conn)
1524 				break;
1525 		}
1526 
1527 		if (big == 0xef)
1528 			return -EADDRNOTAVAIL;
1529 
1530 		/* Update BIG */
1531 		qos->bcast.big = big;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
qos_set_bis(struct hci_dev * hdev,struct bt_iso_qos * qos)1537 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1538 {
1539 	struct hci_conn *conn;
1540 	u8  bis;
1541 
1542 	/* Allocate BIS if not set */
1543 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1544 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1545 		 * since it is reserved as general purpose set.
1546 		 */
1547 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1548 		     bis++) {
1549 
1550 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1551 			if (!conn)
1552 				break;
1553 		}
1554 
1555 		if (bis == hdev->le_num_of_adv_sets)
1556 			return -EADDRNOTAVAIL;
1557 
1558 		/* Update BIS */
1559 		qos->bcast.bis = bis;
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 /* This function requires the caller holds hdev->lock */
hci_add_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)1566 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1567 				    struct bt_iso_qos *qos, __u8 base_len,
1568 				    __u8 *base)
1569 {
1570 	struct hci_conn *conn;
1571 	int err;
1572 
1573 	/* Let's make sure that le is enabled.*/
1574 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1575 		if (lmp_le_capable(hdev))
1576 			return ERR_PTR(-ECONNREFUSED);
1577 		return ERR_PTR(-EOPNOTSUPP);
1578 	}
1579 
1580 	err = qos_set_big(hdev, qos);
1581 	if (err)
1582 		return ERR_PTR(err);
1583 
1584 	err = qos_set_bis(hdev, qos);
1585 	if (err)
1586 		return ERR_PTR(err);
1587 
1588 	/* Check if the LE Create BIG command has already been sent */
1589 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1590 						qos->bcast.big);
1591 	if (conn)
1592 		return ERR_PTR(-EADDRINUSE);
1593 
1594 	/* Check BIS settings against other bound BISes, since all
1595 	 * BISes in a BIG must have the same value for all parameters
1596 	 */
1597 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1598 
1599 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1600 		     base_len != conn->le_per_adv_data_len ||
1601 		     memcmp(conn->le_per_adv_data, base, base_len)))
1602 		return ERR_PTR(-EADDRINUSE);
1603 
1604 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1605 	if (IS_ERR(conn))
1606 		return conn;
1607 
1608 	conn->state = BT_CONNECT;
1609 
1610 	hci_conn_hold(conn);
1611 	return conn;
1612 }
1613 
1614 /* This function requires the caller holds hdev->lock */
hci_connect_le_scan(struct hci_dev * hdev,bdaddr_t * dst,u8 dst_type,u8 sec_level,u16 conn_timeout,enum conn_reasons conn_reason)1615 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1616 				     u8 dst_type, u8 sec_level,
1617 				     u16 conn_timeout,
1618 				     enum conn_reasons conn_reason)
1619 {
1620 	struct hci_conn *conn;
1621 
1622 	/* Let's make sure that le is enabled.*/
1623 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1624 		if (lmp_le_capable(hdev))
1625 			return ERR_PTR(-ECONNREFUSED);
1626 
1627 		return ERR_PTR(-EOPNOTSUPP);
1628 	}
1629 
1630 	/* Some devices send ATT messages as soon as the physical link is
1631 	 * established. To be able to handle these ATT messages, the user-
1632 	 * space first establishes the connection and then starts the pairing
1633 	 * process.
1634 	 *
1635 	 * So if a hci_conn object already exists for the following connection
1636 	 * attempt, we simply update pending_sec_level and auth_type fields
1637 	 * and return the object found.
1638 	 */
1639 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1640 	if (conn) {
1641 		if (conn->pending_sec_level < sec_level)
1642 			conn->pending_sec_level = sec_level;
1643 		goto done;
1644 	}
1645 
1646 	BT_DBG("requesting refresh of dst_addr");
1647 
1648 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1649 	if (IS_ERR(conn))
1650 		return conn;
1651 
1652 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1653 		hci_conn_del(conn);
1654 		return ERR_PTR(-EBUSY);
1655 	}
1656 
1657 	conn->state = BT_CONNECT;
1658 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1659 	conn->dst_type = dst_type;
1660 	conn->sec_level = BT_SECURITY_LOW;
1661 	conn->pending_sec_level = sec_level;
1662 	conn->conn_timeout = conn_timeout;
1663 	conn->conn_reason = conn_reason;
1664 
1665 	hci_update_passive_scan(hdev);
1666 
1667 done:
1668 	hci_conn_hold(conn);
1669 	return conn;
1670 }
1671 
hci_connect_acl(struct hci_dev * hdev,bdaddr_t * dst,u8 sec_level,u8 auth_type,enum conn_reasons conn_reason)1672 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1673 				 u8 sec_level, u8 auth_type,
1674 				 enum conn_reasons conn_reason)
1675 {
1676 	struct hci_conn *acl;
1677 
1678 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1679 		if (lmp_bredr_capable(hdev))
1680 			return ERR_PTR(-ECONNREFUSED);
1681 
1682 		return ERR_PTR(-EOPNOTSUPP);
1683 	}
1684 
1685 	/* Reject outgoing connection to device with same BD ADDR against
1686 	 * CVE-2020-26555
1687 	 */
1688 	if (!bacmp(&hdev->bdaddr, dst)) {
1689 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1690 			   dst);
1691 		return ERR_PTR(-ECONNREFUSED);
1692 	}
1693 
1694 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1695 	if (!acl) {
1696 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1697 		if (IS_ERR(acl))
1698 			return acl;
1699 	}
1700 
1701 	hci_conn_hold(acl);
1702 
1703 	acl->conn_reason = conn_reason;
1704 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1705 		acl->sec_level = BT_SECURITY_LOW;
1706 		acl->pending_sec_level = sec_level;
1707 		acl->auth_type = auth_type;
1708 		hci_acl_create_connection(acl);
1709 	}
1710 
1711 	return acl;
1712 }
1713 
hci_conn_link(struct hci_conn * parent,struct hci_conn * conn)1714 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1715 				      struct hci_conn *conn)
1716 {
1717 	struct hci_dev *hdev = parent->hdev;
1718 	struct hci_link *link;
1719 
1720 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1721 
1722 	if (conn->link)
1723 		return conn->link;
1724 
1725 	if (conn->parent)
1726 		return NULL;
1727 
1728 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1729 	if (!link)
1730 		return NULL;
1731 
1732 	link->conn = hci_conn_hold(conn);
1733 	conn->link = link;
1734 	conn->parent = hci_conn_get(parent);
1735 
1736 	/* Use list_add_tail_rcu append to the list */
1737 	list_add_tail_rcu(&link->list, &parent->link_list);
1738 
1739 	return link;
1740 }
1741 
hci_connect_sco(struct hci_dev * hdev,int type,bdaddr_t * dst,__u16 setting,struct bt_codec * codec)1742 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1743 				 __u16 setting, struct bt_codec *codec)
1744 {
1745 	struct hci_conn *acl;
1746 	struct hci_conn *sco;
1747 	struct hci_link *link;
1748 
1749 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1750 			      CONN_REASON_SCO_CONNECT);
1751 	if (IS_ERR(acl))
1752 		return acl;
1753 
1754 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1755 	if (!sco) {
1756 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1757 		if (IS_ERR(sco)) {
1758 			hci_conn_drop(acl);
1759 			return sco;
1760 		}
1761 	}
1762 
1763 	link = hci_conn_link(acl, sco);
1764 	if (!link) {
1765 		hci_conn_drop(acl);
1766 		hci_conn_drop(sco);
1767 		return ERR_PTR(-ENOLINK);
1768 	}
1769 
1770 	sco->setting = setting;
1771 	sco->codec = *codec;
1772 
1773 	if (acl->state == BT_CONNECTED &&
1774 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1775 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1776 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1777 
1778 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1779 			/* defer SCO setup until mode change completed */
1780 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1781 			return sco;
1782 		}
1783 
1784 		hci_sco_setup(acl, 0x00);
1785 	}
1786 
1787 	return sco;
1788 }
1789 
hci_le_create_big(struct hci_conn * conn,struct bt_iso_qos * qos)1790 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1791 {
1792 	struct hci_dev *hdev = conn->hdev;
1793 	struct hci_cp_le_create_big cp;
1794 	struct iso_list_data data;
1795 
1796 	memset(&cp, 0, sizeof(cp));
1797 
1798 	data.big = qos->bcast.big;
1799 	data.bis = qos->bcast.bis;
1800 	data.count = 0;
1801 
1802 	/* Create a BIS for each bound connection */
1803 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1804 				 BT_BOUND, &data);
1805 
1806 	cp.handle = qos->bcast.big;
1807 	cp.adv_handle = qos->bcast.bis;
1808 	cp.num_bis  = data.count;
1809 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1810 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1811 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1812 	cp.bis.rtn  = qos->bcast.out.rtn;
1813 	cp.bis.phy  = qos->bcast.out.phy;
1814 	cp.bis.packing = qos->bcast.packing;
1815 	cp.bis.framing = qos->bcast.framing;
1816 	cp.bis.encryption = qos->bcast.encryption;
1817 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1818 
1819 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1820 }
1821 
set_cig_params_sync(struct hci_dev * hdev,void * data)1822 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1823 {
1824 	u8 cig_id = PTR_UINT(data);
1825 	struct hci_conn *conn;
1826 	struct bt_iso_qos *qos;
1827 	struct iso_cig_params pdu;
1828 	u8 cis_id;
1829 
1830 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1831 	if (!conn)
1832 		return 0;
1833 
1834 	memset(&pdu, 0, sizeof(pdu));
1835 
1836 	qos = &conn->iso_qos;
1837 	pdu.cp.cig_id = cig_id;
1838 	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1839 	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1840 	pdu.cp.sca = qos->ucast.sca;
1841 	pdu.cp.packing = qos->ucast.packing;
1842 	pdu.cp.framing = qos->ucast.framing;
1843 	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1844 	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1845 
1846 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1847 	 * num_cis: 0x00 to 0x1F
1848 	 * cis_id: 0x00 to 0xEF
1849 	 */
1850 	for (cis_id = 0x00; cis_id < 0xf0 &&
1851 	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1852 		struct hci_cis_params *cis;
1853 
1854 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1855 		if (!conn)
1856 			continue;
1857 
1858 		qos = &conn->iso_qos;
1859 
1860 		cis = &pdu.cis[pdu.cp.num_cis++];
1861 		cis->cis_id = cis_id;
1862 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1863 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1864 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1865 			      qos->ucast.in.phy;
1866 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1867 			      qos->ucast.out.phy;
1868 		cis->c_rtn  = qos->ucast.out.rtn;
1869 		cis->p_rtn  = qos->ucast.in.rtn;
1870 	}
1871 
1872 	if (!pdu.cp.num_cis)
1873 		return 0;
1874 
1875 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1876 				     sizeof(pdu.cp) +
1877 				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1878 				     HCI_CMD_TIMEOUT);
1879 }
1880 
hci_le_set_cig_params(struct hci_conn * conn,struct bt_iso_qos * qos)1881 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1882 {
1883 	struct hci_dev *hdev = conn->hdev;
1884 	struct iso_list_data data;
1885 
1886 	memset(&data, 0, sizeof(data));
1887 
1888 	/* Allocate first still reconfigurable CIG if not set */
1889 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1890 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1891 			data.count = 0;
1892 
1893 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1894 						 BT_CONNECT, &data);
1895 			if (data.count)
1896 				continue;
1897 
1898 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1899 						 BT_CONNECTED, &data);
1900 			if (!data.count)
1901 				break;
1902 		}
1903 
1904 		if (data.cig == 0xf0)
1905 			return false;
1906 
1907 		/* Update CIG */
1908 		qos->ucast.cig = data.cig;
1909 	}
1910 
1911 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1912 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1913 					     qos->ucast.cis))
1914 			return false;
1915 		goto done;
1916 	}
1917 
1918 	/* Allocate first available CIS if not set */
1919 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1920 	     data.cis++) {
1921 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1922 					      data.cis)) {
1923 			/* Update CIS */
1924 			qos->ucast.cis = data.cis;
1925 			break;
1926 		}
1927 	}
1928 
1929 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1930 		return false;
1931 
1932 done:
1933 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1934 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1935 		return false;
1936 
1937 	return true;
1938 }
1939 
hci_bind_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)1940 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1941 			      __u8 dst_type, struct bt_iso_qos *qos)
1942 {
1943 	struct hci_conn *cis;
1944 
1945 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1946 				       qos->ucast.cis);
1947 	if (!cis) {
1948 		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1949 		if (IS_ERR(cis))
1950 			return cis;
1951 		cis->cleanup = cis_cleanup;
1952 		cis->dst_type = dst_type;
1953 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1954 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1955 	}
1956 
1957 	if (cis->state == BT_CONNECTED)
1958 		return cis;
1959 
1960 	/* Check if CIS has been set and the settings matches */
1961 	if (cis->state == BT_BOUND &&
1962 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1963 		return cis;
1964 
1965 	/* Update LINK PHYs according to QoS preference */
1966 	cis->le_tx_phy = qos->ucast.out.phy;
1967 	cis->le_rx_phy = qos->ucast.in.phy;
1968 
1969 	/* If output interval is not set use the input interval as it cannot be
1970 	 * 0x000000.
1971 	 */
1972 	if (!qos->ucast.out.interval)
1973 		qos->ucast.out.interval = qos->ucast.in.interval;
1974 
1975 	/* If input interval is not set use the output interval as it cannot be
1976 	 * 0x000000.
1977 	 */
1978 	if (!qos->ucast.in.interval)
1979 		qos->ucast.in.interval = qos->ucast.out.interval;
1980 
1981 	/* If output latency is not set use the input latency as it cannot be
1982 	 * 0x0000.
1983 	 */
1984 	if (!qos->ucast.out.latency)
1985 		qos->ucast.out.latency = qos->ucast.in.latency;
1986 
1987 	/* If input latency is not set use the output latency as it cannot be
1988 	 * 0x0000.
1989 	 */
1990 	if (!qos->ucast.in.latency)
1991 		qos->ucast.in.latency = qos->ucast.out.latency;
1992 
1993 	if (!hci_le_set_cig_params(cis, qos)) {
1994 		hci_conn_drop(cis);
1995 		return ERR_PTR(-EINVAL);
1996 	}
1997 
1998 	hci_conn_hold(cis);
1999 
2000 	cis->iso_qos = *qos;
2001 	cis->state = BT_BOUND;
2002 
2003 	return cis;
2004 }
2005 
hci_iso_setup_path(struct hci_conn * conn)2006 bool hci_iso_setup_path(struct hci_conn *conn)
2007 {
2008 	struct hci_dev *hdev = conn->hdev;
2009 	struct hci_cp_le_setup_iso_path cmd;
2010 
2011 	memset(&cmd, 0, sizeof(cmd));
2012 
2013 	if (conn->iso_qos.ucast.out.sdu) {
2014 		cmd.handle = cpu_to_le16(conn->handle);
2015 		cmd.direction = 0x00; /* Input (Host to Controller) */
2016 		cmd.path = 0x00; /* HCI path if enabled */
2017 		cmd.codec = 0x03; /* Transparent Data */
2018 
2019 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2020 				 &cmd) < 0)
2021 			return false;
2022 	}
2023 
2024 	if (conn->iso_qos.ucast.in.sdu) {
2025 		cmd.handle = cpu_to_le16(conn->handle);
2026 		cmd.direction = 0x01; /* Output (Controller to Host) */
2027 		cmd.path = 0x00; /* HCI path if enabled */
2028 		cmd.codec = 0x03; /* Transparent Data */
2029 
2030 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2031 				 &cmd) < 0)
2032 			return false;
2033 	}
2034 
2035 	return true;
2036 }
2037 
hci_conn_check_create_cis(struct hci_conn * conn)2038 int hci_conn_check_create_cis(struct hci_conn *conn)
2039 {
2040 	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
2041 		return -EINVAL;
2042 
2043 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
2044 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
2045 		return 1;
2046 
2047 	return 0;
2048 }
2049 
hci_create_cis_sync(struct hci_dev * hdev,void * data)2050 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
2051 {
2052 	return hci_le_create_cis_sync(hdev);
2053 }
2054 
hci_le_create_cis_pending(struct hci_dev * hdev)2055 int hci_le_create_cis_pending(struct hci_dev *hdev)
2056 {
2057 	struct hci_conn *conn;
2058 	bool pending = false;
2059 
2060 	rcu_read_lock();
2061 
2062 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2063 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2064 			rcu_read_unlock();
2065 			return -EBUSY;
2066 		}
2067 
2068 		if (!hci_conn_check_create_cis(conn))
2069 			pending = true;
2070 	}
2071 
2072 	rcu_read_unlock();
2073 
2074 	if (!pending)
2075 		return 0;
2076 
2077 	/* Queue Create CIS */
2078 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2079 }
2080 
hci_iso_qos_setup(struct hci_dev * hdev,struct hci_conn * conn,struct bt_iso_io_qos * qos,__u8 phy)2081 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2082 			      struct bt_iso_io_qos *qos, __u8 phy)
2083 {
2084 	/* Only set MTU if PHY is enabled */
2085 	if (!qos->sdu && qos->phy)
2086 		qos->sdu = conn->mtu;
2087 
2088 	/* Use the same PHY as ACL if set to any */
2089 	if (qos->phy == BT_ISO_PHY_ANY)
2090 		qos->phy = phy;
2091 
2092 	/* Use LE ACL connection interval if not set */
2093 	if (!qos->interval)
2094 		/* ACL interval unit in 1.25 ms to us */
2095 		qos->interval = conn->le_conn_interval * 1250;
2096 
2097 	/* Use LE ACL connection latency if not set */
2098 	if (!qos->latency)
2099 		qos->latency = conn->le_conn_latency;
2100 }
2101 
create_big_sync(struct hci_dev * hdev,void * data)2102 static int create_big_sync(struct hci_dev *hdev, void *data)
2103 {
2104 	struct hci_conn *conn = data;
2105 	struct bt_iso_qos *qos = &conn->iso_qos;
2106 	u16 interval, sync_interval = 0;
2107 	u32 flags = 0;
2108 	int err;
2109 
2110 	if (qos->bcast.out.phy == 0x02)
2111 		flags |= MGMT_ADV_FLAG_SEC_2M;
2112 
2113 	/* Align intervals */
2114 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2115 
2116 	if (qos->bcast.bis)
2117 		sync_interval = interval * 4;
2118 
2119 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2120 				     conn->le_per_adv_data, flags, interval,
2121 				     interval, sync_interval);
2122 	if (err)
2123 		return err;
2124 
2125 	return hci_le_create_big(conn, &conn->iso_qos);
2126 }
2127 
create_pa_complete(struct hci_dev * hdev,void * data,int err)2128 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2129 {
2130 	struct hci_cp_le_pa_create_sync *cp = data;
2131 
2132 	bt_dev_dbg(hdev, "");
2133 
2134 	if (err)
2135 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2136 
2137 	kfree(cp);
2138 }
2139 
create_pa_sync(struct hci_dev * hdev,void * data)2140 static int create_pa_sync(struct hci_dev *hdev, void *data)
2141 {
2142 	struct hci_cp_le_pa_create_sync *cp = data;
2143 	int err;
2144 
2145 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2146 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2147 	if (err) {
2148 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2149 		return err;
2150 	}
2151 
2152 	return hci_update_passive_scan_sync(hdev);
2153 }
2154 
hci_pa_create_sync(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,__u8 sid,struct bt_iso_qos * qos)2155 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2156 		       __u8 sid, struct bt_iso_qos *qos)
2157 {
2158 	struct hci_cp_le_pa_create_sync *cp;
2159 
2160 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2161 		return -EBUSY;
2162 
2163 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2164 	if (!cp) {
2165 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2166 		return -ENOMEM;
2167 	}
2168 
2169 	cp->options = qos->bcast.options;
2170 	cp->sid = sid;
2171 	cp->addr_type = dst_type;
2172 	bacpy(&cp->addr, dst);
2173 	cp->skip = cpu_to_le16(qos->bcast.skip);
2174 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2175 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2176 
2177 	/* Queue start pa_create_sync and scan */
2178 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2179 }
2180 
hci_le_big_create_sync(struct hci_dev * hdev,struct hci_conn * hcon,struct bt_iso_qos * qos,__u16 sync_handle,__u8 num_bis,__u8 bis[])2181 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2182 			   struct bt_iso_qos *qos,
2183 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2184 {
2185 	struct _packed {
2186 		struct hci_cp_le_big_create_sync cp;
2187 		__u8  bis[0x11];
2188 	} pdu;
2189 	int err;
2190 
2191 	if (num_bis > sizeof(pdu.bis))
2192 		return -EINVAL;
2193 
2194 	err = qos_set_big(hdev, qos);
2195 	if (err)
2196 		return err;
2197 
2198 	if (hcon)
2199 		hcon->iso_qos.bcast.big = qos->bcast.big;
2200 
2201 	memset(&pdu, 0, sizeof(pdu));
2202 	pdu.cp.handle = qos->bcast.big;
2203 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2204 	pdu.cp.encryption = qos->bcast.encryption;
2205 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2206 	pdu.cp.mse = qos->bcast.mse;
2207 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2208 	pdu.cp.num_bis = num_bis;
2209 	memcpy(pdu.bis, bis, num_bis);
2210 
2211 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2212 			    sizeof(pdu.cp) + num_bis, &pdu);
2213 }
2214 
create_big_complete(struct hci_dev * hdev,void * data,int err)2215 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2216 {
2217 	struct hci_conn *conn = data;
2218 
2219 	bt_dev_dbg(hdev, "conn %p", conn);
2220 
2221 	if (err) {
2222 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2223 		hci_connect_cfm(conn, err);
2224 		hci_conn_del(conn);
2225 	}
2226 }
2227 
hci_bind_bis(struct hci_dev * hdev,bdaddr_t * dst,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2228 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2229 			      struct bt_iso_qos *qos,
2230 			      __u8 base_len, __u8 *base)
2231 {
2232 	struct hci_conn *conn;
2233 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2234 
2235 	if (base_len && base)
2236 		base_len = eir_append_service_data(eir, 0,  0x1851,
2237 						   base, base_len);
2238 
2239 	/* We need hci_conn object using the BDADDR_ANY as dst */
2240 	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2241 	if (IS_ERR(conn))
2242 		return conn;
2243 
2244 	/* Update LINK PHYs according to QoS preference */
2245 	conn->le_tx_phy = qos->bcast.out.phy;
2246 	conn->le_tx_phy = qos->bcast.out.phy;
2247 
2248 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2249 	if (base_len && base) {
2250 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2251 		conn->le_per_adv_data_len = base_len;
2252 	}
2253 
2254 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2255 			  conn->le_tx_phy ? conn->le_tx_phy :
2256 			  hdev->le_tx_def_phys);
2257 
2258 	conn->iso_qos = *qos;
2259 	conn->state = BT_BOUND;
2260 
2261 	return conn;
2262 }
2263 
bis_mark_per_adv(struct hci_conn * conn,void * data)2264 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2265 {
2266 	struct iso_list_data *d = data;
2267 
2268 	/* Skip if not broadcast/ANY address */
2269 	if (bacmp(&conn->dst, BDADDR_ANY))
2270 		return;
2271 
2272 	if (d->big != conn->iso_qos.bcast.big ||
2273 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2274 	    d->bis != conn->iso_qos.bcast.bis)
2275 		return;
2276 
2277 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2278 }
2279 
hci_connect_bis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos,__u8 base_len,__u8 * base)2280 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2281 				 __u8 dst_type, struct bt_iso_qos *qos,
2282 				 __u8 base_len, __u8 *base)
2283 {
2284 	struct hci_conn *conn;
2285 	int err;
2286 	struct iso_list_data data;
2287 
2288 	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2289 	if (IS_ERR(conn))
2290 		return conn;
2291 
2292 	data.big = qos->bcast.big;
2293 	data.bis = qos->bcast.bis;
2294 
2295 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2296 	 * the start periodic advertising and create BIG commands have
2297 	 * been queued
2298 	 */
2299 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2300 				 BT_BOUND, &data);
2301 
2302 	/* Queue start periodic advertising and create BIG */
2303 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2304 				 create_big_complete);
2305 	if (err < 0) {
2306 		hci_conn_drop(conn);
2307 		return ERR_PTR(err);
2308 	}
2309 
2310 	return conn;
2311 }
2312 
hci_connect_cis(struct hci_dev * hdev,bdaddr_t * dst,__u8 dst_type,struct bt_iso_qos * qos)2313 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2314 				 __u8 dst_type, struct bt_iso_qos *qos)
2315 {
2316 	struct hci_conn *le;
2317 	struct hci_conn *cis;
2318 	struct hci_link *link;
2319 
2320 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2321 		le = hci_connect_le(hdev, dst, dst_type, false,
2322 				    BT_SECURITY_LOW,
2323 				    HCI_LE_CONN_TIMEOUT,
2324 				    HCI_ROLE_SLAVE);
2325 	else
2326 		le = hci_connect_le_scan(hdev, dst, dst_type,
2327 					 BT_SECURITY_LOW,
2328 					 HCI_LE_CONN_TIMEOUT,
2329 					 CONN_REASON_ISO_CONNECT);
2330 	if (IS_ERR(le))
2331 		return le;
2332 
2333 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2334 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2335 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2336 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2337 
2338 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2339 	if (IS_ERR(cis)) {
2340 		hci_conn_drop(le);
2341 		return cis;
2342 	}
2343 
2344 	link = hci_conn_link(le, cis);
2345 	if (!link) {
2346 		hci_conn_drop(le);
2347 		hci_conn_drop(cis);
2348 		return ERR_PTR(-ENOLINK);
2349 	}
2350 
2351 	/* Link takes the refcount */
2352 	hci_conn_drop(cis);
2353 
2354 	cis->state = BT_CONNECT;
2355 
2356 	hci_le_create_cis_pending(hdev);
2357 
2358 	return cis;
2359 }
2360 
2361 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)2362 int hci_conn_check_link_mode(struct hci_conn *conn)
2363 {
2364 	BT_DBG("hcon %p", conn);
2365 
2366 	/* In Secure Connections Only mode, it is required that Secure
2367 	 * Connections is used and the link is encrypted with AES-CCM
2368 	 * using a P-256 authenticated combination key.
2369 	 */
2370 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2371 		if (!hci_conn_sc_enabled(conn) ||
2372 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2373 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2374 			return 0;
2375 	}
2376 
2377 	 /* AES encryption is required for Level 4:
2378 	  *
2379 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2380 	  * page 1319:
2381 	  *
2382 	  * 128-bit equivalent strength for link and encryption keys
2383 	  * required using FIPS approved algorithms (E0 not allowed,
2384 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2385 	  * not shortened)
2386 	  */
2387 	if (conn->sec_level == BT_SECURITY_FIPS &&
2388 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2389 		bt_dev_err(conn->hdev,
2390 			   "Invalid security: Missing AES-CCM usage");
2391 		return 0;
2392 	}
2393 
2394 	if (hci_conn_ssp_enabled(conn) &&
2395 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2396 		return 0;
2397 
2398 	return 1;
2399 }
2400 
2401 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)2402 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2403 {
2404 	BT_DBG("hcon %p", conn);
2405 
2406 	if (conn->pending_sec_level > sec_level)
2407 		sec_level = conn->pending_sec_level;
2408 
2409 	if (sec_level > conn->sec_level)
2410 		conn->pending_sec_level = sec_level;
2411 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2412 		return 1;
2413 
2414 	/* Make sure we preserve an existing MITM requirement*/
2415 	auth_type |= (conn->auth_type & 0x01);
2416 
2417 	conn->auth_type = auth_type;
2418 
2419 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2420 		struct hci_cp_auth_requested cp;
2421 
2422 		cp.handle = cpu_to_le16(conn->handle);
2423 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2424 			     sizeof(cp), &cp);
2425 
2426 		/* Set the ENCRYPT_PEND to trigger encryption after
2427 		 * authentication.
2428 		 */
2429 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2430 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2431 	}
2432 
2433 	return 0;
2434 }
2435 
2436 /* Encrypt the link */
hci_conn_encrypt(struct hci_conn * conn)2437 static void hci_conn_encrypt(struct hci_conn *conn)
2438 {
2439 	BT_DBG("hcon %p", conn);
2440 
2441 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2442 		struct hci_cp_set_conn_encrypt cp;
2443 		cp.handle  = cpu_to_le16(conn->handle);
2444 		cp.encrypt = 0x01;
2445 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2446 			     &cp);
2447 	}
2448 }
2449 
2450 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type,bool initiator)2451 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2452 		      bool initiator)
2453 {
2454 	BT_DBG("hcon %p", conn);
2455 
2456 	if (conn->type == LE_LINK)
2457 		return smp_conn_security(conn, sec_level);
2458 
2459 	/* For sdp we don't need the link key. */
2460 	if (sec_level == BT_SECURITY_SDP)
2461 		return 1;
2462 
2463 	/* For non 2.1 devices and low security level we don't need the link
2464 	   key. */
2465 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2466 		return 1;
2467 
2468 	/* For other security levels we need the link key. */
2469 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2470 		goto auth;
2471 
2472 	switch (conn->key_type) {
2473 	case HCI_LK_AUTH_COMBINATION_P256:
2474 		/* An authenticated FIPS approved combination key has
2475 		 * sufficient security for security level 4 or lower.
2476 		 */
2477 		if (sec_level <= BT_SECURITY_FIPS)
2478 			goto encrypt;
2479 		break;
2480 	case HCI_LK_AUTH_COMBINATION_P192:
2481 		/* An authenticated combination key has sufficient security for
2482 		 * security level 3 or lower.
2483 		 */
2484 		if (sec_level <= BT_SECURITY_HIGH)
2485 			goto encrypt;
2486 		break;
2487 	case HCI_LK_UNAUTH_COMBINATION_P192:
2488 	case HCI_LK_UNAUTH_COMBINATION_P256:
2489 		/* An unauthenticated combination key has sufficient security
2490 		 * for security level 2 or lower.
2491 		 */
2492 		if (sec_level <= BT_SECURITY_MEDIUM)
2493 			goto encrypt;
2494 		break;
2495 	case HCI_LK_COMBINATION:
2496 		/* A combination key has always sufficient security for the
2497 		 * security levels 2 or lower. High security level requires the
2498 		 * combination key is generated using maximum PIN code length
2499 		 * (16). For pre 2.1 units.
2500 		 */
2501 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2502 			goto encrypt;
2503 		break;
2504 	default:
2505 		break;
2506 	}
2507 
2508 auth:
2509 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2510 		return 0;
2511 
2512 	if (initiator)
2513 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2514 
2515 	if (!hci_conn_auth(conn, sec_level, auth_type))
2516 		return 0;
2517 
2518 encrypt:
2519 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2520 		/* Ensure that the encryption key size has been read,
2521 		 * otherwise stall the upper layer responses.
2522 		 */
2523 		if (!conn->enc_key_size)
2524 			return 0;
2525 
2526 		/* Nothing else needed, all requirements are met */
2527 		return 1;
2528 	}
2529 
2530 	hci_conn_encrypt(conn);
2531 	return 0;
2532 }
2533 EXPORT_SYMBOL(hci_conn_security);
2534 
2535 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)2536 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2537 {
2538 	BT_DBG("hcon %p", conn);
2539 
2540 	/* Accept if non-secure or higher security level is required */
2541 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2542 		return 1;
2543 
2544 	/* Accept if secure or higher security level is already present */
2545 	if (conn->sec_level == BT_SECURITY_HIGH ||
2546 	    conn->sec_level == BT_SECURITY_FIPS)
2547 		return 1;
2548 
2549 	/* Reject not secure link */
2550 	return 0;
2551 }
2552 EXPORT_SYMBOL(hci_conn_check_secure);
2553 
2554 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)2555 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2556 {
2557 	BT_DBG("hcon %p", conn);
2558 
2559 	if (role == conn->role)
2560 		return 1;
2561 
2562 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2563 		struct hci_cp_switch_role cp;
2564 		bacpy(&cp.bdaddr, &conn->dst);
2565 		cp.role = role;
2566 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2567 	}
2568 
2569 	return 0;
2570 }
2571 EXPORT_SYMBOL(hci_conn_switch_role);
2572 
2573 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)2574 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2575 {
2576 	struct hci_dev *hdev = conn->hdev;
2577 
2578 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2579 
2580 	if (conn->mode != HCI_CM_SNIFF)
2581 		goto timer;
2582 
2583 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2584 		goto timer;
2585 
2586 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2587 		struct hci_cp_exit_sniff_mode cp;
2588 		cp.handle = cpu_to_le16(conn->handle);
2589 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2590 	}
2591 
2592 timer:
2593 	if (hdev->idle_timeout > 0)
2594 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2595 				   msecs_to_jiffies(hdev->idle_timeout));
2596 }
2597 
2598 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)2599 void hci_conn_hash_flush(struct hci_dev *hdev)
2600 {
2601 	struct list_head *head = &hdev->conn_hash.list;
2602 	struct hci_conn *conn;
2603 
2604 	BT_DBG("hdev %s", hdev->name);
2605 
2606 	/* We should not traverse the list here, because hci_conn_del
2607 	 * can remove extra links, which may cause the list traversal
2608 	 * to hit items that have already been released.
2609 	 */
2610 	while ((conn = list_first_entry_or_null(head,
2611 						struct hci_conn,
2612 						list)) != NULL) {
2613 		conn->state = BT_CLOSED;
2614 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2615 		hci_conn_del(conn);
2616 	}
2617 }
2618 
2619 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)2620 void hci_conn_check_pending(struct hci_dev *hdev)
2621 {
2622 	struct hci_conn *conn;
2623 
2624 	BT_DBG("hdev %s", hdev->name);
2625 
2626 	hci_dev_lock(hdev);
2627 
2628 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2629 	if (conn)
2630 		hci_acl_create_connection(conn);
2631 
2632 	hci_dev_unlock(hdev);
2633 }
2634 
get_link_mode(struct hci_conn * conn)2635 static u32 get_link_mode(struct hci_conn *conn)
2636 {
2637 	u32 link_mode = 0;
2638 
2639 	if (conn->role == HCI_ROLE_MASTER)
2640 		link_mode |= HCI_LM_MASTER;
2641 
2642 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2643 		link_mode |= HCI_LM_ENCRYPT;
2644 
2645 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2646 		link_mode |= HCI_LM_AUTH;
2647 
2648 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2649 		link_mode |= HCI_LM_SECURE;
2650 
2651 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2652 		link_mode |= HCI_LM_FIPS;
2653 
2654 	return link_mode;
2655 }
2656 
hci_get_conn_list(void __user * arg)2657 int hci_get_conn_list(void __user *arg)
2658 {
2659 	struct hci_conn *c;
2660 	struct hci_conn_list_req req, *cl;
2661 	struct hci_conn_info *ci;
2662 	struct hci_dev *hdev;
2663 	int n = 0, size, err;
2664 
2665 	if (copy_from_user(&req, arg, sizeof(req)))
2666 		return -EFAULT;
2667 
2668 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2669 		return -EINVAL;
2670 
2671 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2672 
2673 	cl = kmalloc(size, GFP_KERNEL);
2674 	if (!cl)
2675 		return -ENOMEM;
2676 
2677 	hdev = hci_dev_get(req.dev_id);
2678 	if (!hdev) {
2679 		kfree(cl);
2680 		return -ENODEV;
2681 	}
2682 
2683 	ci = cl->conn_info;
2684 
2685 	hci_dev_lock(hdev);
2686 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2687 		bacpy(&(ci + n)->bdaddr, &c->dst);
2688 		(ci + n)->handle = c->handle;
2689 		(ci + n)->type  = c->type;
2690 		(ci + n)->out   = c->out;
2691 		(ci + n)->state = c->state;
2692 		(ci + n)->link_mode = get_link_mode(c);
2693 		if (++n >= req.conn_num)
2694 			break;
2695 	}
2696 	hci_dev_unlock(hdev);
2697 
2698 	cl->dev_id = hdev->id;
2699 	cl->conn_num = n;
2700 	size = sizeof(req) + n * sizeof(*ci);
2701 
2702 	hci_dev_put(hdev);
2703 
2704 	err = copy_to_user(arg, cl, size);
2705 	kfree(cl);
2706 
2707 	return err ? -EFAULT : 0;
2708 }
2709 
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)2710 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2711 {
2712 	struct hci_conn_info_req req;
2713 	struct hci_conn_info ci;
2714 	struct hci_conn *conn;
2715 	char __user *ptr = arg + sizeof(req);
2716 
2717 	if (copy_from_user(&req, arg, sizeof(req)))
2718 		return -EFAULT;
2719 
2720 	hci_dev_lock(hdev);
2721 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2722 	if (conn) {
2723 		bacpy(&ci.bdaddr, &conn->dst);
2724 		ci.handle = conn->handle;
2725 		ci.type  = conn->type;
2726 		ci.out   = conn->out;
2727 		ci.state = conn->state;
2728 		ci.link_mode = get_link_mode(conn);
2729 	}
2730 	hci_dev_unlock(hdev);
2731 
2732 	if (!conn)
2733 		return -ENOENT;
2734 
2735 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2736 }
2737 
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)2738 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2739 {
2740 	struct hci_auth_info_req req;
2741 	struct hci_conn *conn;
2742 
2743 	if (copy_from_user(&req, arg, sizeof(req)))
2744 		return -EFAULT;
2745 
2746 	hci_dev_lock(hdev);
2747 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2748 	if (conn)
2749 		req.type = conn->auth_type;
2750 	hci_dev_unlock(hdev);
2751 
2752 	if (!conn)
2753 		return -ENOENT;
2754 
2755 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2756 }
2757 
hci_chan_create(struct hci_conn * conn)2758 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2759 {
2760 	struct hci_dev *hdev = conn->hdev;
2761 	struct hci_chan *chan;
2762 
2763 	BT_DBG("%s hcon %p", hdev->name, conn);
2764 
2765 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2766 		BT_DBG("Refusing to create new hci_chan");
2767 		return NULL;
2768 	}
2769 
2770 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2771 	if (!chan)
2772 		return NULL;
2773 
2774 	chan->conn = hci_conn_get(conn);
2775 	skb_queue_head_init(&chan->data_q);
2776 	chan->state = BT_CONNECTED;
2777 
2778 	list_add_rcu(&chan->list, &conn->chan_list);
2779 
2780 	return chan;
2781 }
2782 
hci_chan_del(struct hci_chan * chan)2783 void hci_chan_del(struct hci_chan *chan)
2784 {
2785 	struct hci_conn *conn = chan->conn;
2786 	struct hci_dev *hdev = conn->hdev;
2787 
2788 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2789 
2790 	list_del_rcu(&chan->list);
2791 
2792 	synchronize_rcu();
2793 
2794 	/* Prevent new hci_chan's to be created for this hci_conn */
2795 	set_bit(HCI_CONN_DROP, &conn->flags);
2796 
2797 	hci_conn_put(conn);
2798 
2799 	skb_queue_purge(&chan->data_q);
2800 	kfree(chan);
2801 }
2802 
hci_chan_list_flush(struct hci_conn * conn)2803 void hci_chan_list_flush(struct hci_conn *conn)
2804 {
2805 	struct hci_chan *chan, *n;
2806 
2807 	BT_DBG("hcon %p", conn);
2808 
2809 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2810 		hci_chan_del(chan);
2811 }
2812 
__hci_chan_lookup_handle(struct hci_conn * hcon,__u16 handle)2813 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2814 						 __u16 handle)
2815 {
2816 	struct hci_chan *hchan;
2817 
2818 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2819 		if (hchan->handle == handle)
2820 			return hchan;
2821 	}
2822 
2823 	return NULL;
2824 }
2825 
hci_chan_lookup_handle(struct hci_dev * hdev,__u16 handle)2826 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2827 {
2828 	struct hci_conn_hash *h = &hdev->conn_hash;
2829 	struct hci_conn *hcon;
2830 	struct hci_chan *hchan = NULL;
2831 
2832 	rcu_read_lock();
2833 
2834 	list_for_each_entry_rcu(hcon, &h->list, list) {
2835 		hchan = __hci_chan_lookup_handle(hcon, handle);
2836 		if (hchan)
2837 			break;
2838 	}
2839 
2840 	rcu_read_unlock();
2841 
2842 	return hchan;
2843 }
2844 
hci_conn_get_phy(struct hci_conn * conn)2845 u32 hci_conn_get_phy(struct hci_conn *conn)
2846 {
2847 	u32 phys = 0;
2848 
2849 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2850 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2851 	 * CPB logical transport types.
2852 	 */
2853 	switch (conn->type) {
2854 	case SCO_LINK:
2855 		/* SCO logical transport (1 Mb/s):
2856 		 * HV1, HV2, HV3 and DV.
2857 		 */
2858 		phys |= BT_PHY_BR_1M_1SLOT;
2859 
2860 		break;
2861 
2862 	case ACL_LINK:
2863 		/* ACL logical transport (1 Mb/s) ptt=0:
2864 		 * DH1, DM3, DH3, DM5 and DH5.
2865 		 */
2866 		phys |= BT_PHY_BR_1M_1SLOT;
2867 
2868 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2869 			phys |= BT_PHY_BR_1M_3SLOT;
2870 
2871 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2872 			phys |= BT_PHY_BR_1M_5SLOT;
2873 
2874 		/* ACL logical transport (2 Mb/s) ptt=1:
2875 		 * 2-DH1, 2-DH3 and 2-DH5.
2876 		 */
2877 		if (!(conn->pkt_type & HCI_2DH1))
2878 			phys |= BT_PHY_EDR_2M_1SLOT;
2879 
2880 		if (!(conn->pkt_type & HCI_2DH3))
2881 			phys |= BT_PHY_EDR_2M_3SLOT;
2882 
2883 		if (!(conn->pkt_type & HCI_2DH5))
2884 			phys |= BT_PHY_EDR_2M_5SLOT;
2885 
2886 		/* ACL logical transport (3 Mb/s) ptt=1:
2887 		 * 3-DH1, 3-DH3 and 3-DH5.
2888 		 */
2889 		if (!(conn->pkt_type & HCI_3DH1))
2890 			phys |= BT_PHY_EDR_3M_1SLOT;
2891 
2892 		if (!(conn->pkt_type & HCI_3DH3))
2893 			phys |= BT_PHY_EDR_3M_3SLOT;
2894 
2895 		if (!(conn->pkt_type & HCI_3DH5))
2896 			phys |= BT_PHY_EDR_3M_5SLOT;
2897 
2898 		break;
2899 
2900 	case ESCO_LINK:
2901 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2902 		phys |= BT_PHY_BR_1M_1SLOT;
2903 
2904 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2905 			phys |= BT_PHY_BR_1M_3SLOT;
2906 
2907 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2908 		if (!(conn->pkt_type & ESCO_2EV3))
2909 			phys |= BT_PHY_EDR_2M_1SLOT;
2910 
2911 		if (!(conn->pkt_type & ESCO_2EV5))
2912 			phys |= BT_PHY_EDR_2M_3SLOT;
2913 
2914 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2915 		if (!(conn->pkt_type & ESCO_3EV3))
2916 			phys |= BT_PHY_EDR_3M_1SLOT;
2917 
2918 		if (!(conn->pkt_type & ESCO_3EV5))
2919 			phys |= BT_PHY_EDR_3M_3SLOT;
2920 
2921 		break;
2922 
2923 	case LE_LINK:
2924 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2925 			phys |= BT_PHY_LE_1M_TX;
2926 
2927 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2928 			phys |= BT_PHY_LE_1M_RX;
2929 
2930 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2931 			phys |= BT_PHY_LE_2M_TX;
2932 
2933 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2934 			phys |= BT_PHY_LE_2M_RX;
2935 
2936 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2937 			phys |= BT_PHY_LE_CODED_TX;
2938 
2939 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2940 			phys |= BT_PHY_LE_CODED_RX;
2941 
2942 		break;
2943 	}
2944 
2945 	return phys;
2946 }
2947 
abort_conn_sync(struct hci_dev * hdev,void * data)2948 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2949 {
2950 	struct hci_conn *conn;
2951 	u16 handle = PTR_UINT(data);
2952 
2953 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2954 	if (!conn)
2955 		return 0;
2956 
2957 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2958 }
2959 
hci_abort_conn(struct hci_conn * conn,u8 reason)2960 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2961 {
2962 	struct hci_dev *hdev = conn->hdev;
2963 
2964 	/* If abort_reason has already been set it means the connection is
2965 	 * already being aborted so don't attempt to overwrite it.
2966 	 */
2967 	if (conn->abort_reason)
2968 		return 0;
2969 
2970 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2971 
2972 	conn->abort_reason = reason;
2973 
2974 	/* If the connection is pending check the command opcode since that
2975 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2976 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2977 	 *
2978 	 * hci_connect_le serializes the connection attempts so only one
2979 	 * connection can be in BT_CONNECT at time.
2980 	 */
2981 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2982 		switch (hci_skb_event(hdev->sent_cmd)) {
2983 		case HCI_EV_LE_CONN_COMPLETE:
2984 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2985 		case HCI_EVT_LE_CIS_ESTABLISHED:
2986 			hci_cmd_sync_cancel(hdev, ECANCELED);
2987 			break;
2988 		}
2989 	}
2990 
2991 	return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
2992 				  NULL);
2993 }
2994