xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision fe4549b1)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
71 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
111 				    conn->dst_type, status);
112 
113 	/* The connection attempt was doing scan for new RPA, and is
114 	 * in scan phase. If params are not associated with any other
115 	 * autoconnect action, remove them completely. If they are, just unmark
116 	 * them as waiting for connection, by clearing explicit_connect field.
117 	 */
118 	params->explicit_connect = false;
119 
120 	hci_pend_le_list_del_init(params);
121 
122 	switch (params->auto_connect) {
123 	case HCI_AUTO_CONN_EXPLICIT:
124 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125 		/* return instead of break to avoid duplicate scan update */
126 		return;
127 	case HCI_AUTO_CONN_DIRECT:
128 	case HCI_AUTO_CONN_ALWAYS:
129 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
130 		break;
131 	case HCI_AUTO_CONN_REPORT:
132 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
133 		break;
134 	default:
135 		break;
136 	}
137 
138 	hci_update_passive_scan(hdev);
139 }
140 
141 static void hci_conn_cleanup(struct hci_conn *conn)
142 {
143 	struct hci_dev *hdev = conn->hdev;
144 
145 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147 
148 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149 		hci_remove_link_key(hdev, &conn->dst);
150 
151 	hci_chan_list_flush(conn);
152 
153 	hci_conn_hash_del(hdev, conn);
154 
155 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
156 		ida_free(&hdev->unset_handle_ida, conn->handle);
157 
158 	if (conn->cleanup)
159 		conn->cleanup(conn);
160 
161 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
162 		switch (conn->setting & SCO_AIRMODE_MASK) {
163 		case SCO_AIRMODE_CVSD:
164 		case SCO_AIRMODE_TRANSP:
165 			if (hdev->notify)
166 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
167 			break;
168 		}
169 	} else {
170 		if (hdev->notify)
171 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172 	}
173 
174 	debugfs_remove_recursive(conn->debugfs);
175 
176 	hci_conn_del_sysfs(conn);
177 
178 	hci_dev_put(hdev);
179 }
180 
181 static void hci_acl_create_connection(struct hci_conn *conn)
182 {
183 	struct hci_dev *hdev = conn->hdev;
184 	struct inquiry_entry *ie;
185 	struct hci_cp_create_conn cp;
186 
187 	BT_DBG("hcon %p", conn);
188 
189 	/* Many controllers disallow HCI Create Connection while it is doing
190 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191 	 * Connection. This may cause the MGMT discovering state to become false
192 	 * without user space's request but it is okay since the MGMT Discovery
193 	 * APIs do not promise that discovery should be done forever. Instead,
194 	 * the user space monitors the status of MGMT discovering and it may
195 	 * request for discovery again when this flag becomes false.
196 	 */
197 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198 		/* Put this connection to "pending" state so that it will be
199 		 * executed after the inquiry cancel command complete event.
200 		 */
201 		conn->state = BT_CONNECT2;
202 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
203 		return;
204 	}
205 
206 	conn->state = BT_CONNECT;
207 	conn->out = true;
208 	conn->role = HCI_ROLE_MASTER;
209 
210 	conn->attempt++;
211 
212 	conn->link_policy = hdev->link_policy;
213 
214 	memset(&cp, 0, sizeof(cp));
215 	bacpy(&cp.bdaddr, &conn->dst);
216 	cp.pscan_rep_mode = 0x02;
217 
218 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219 	if (ie) {
220 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 			cp.pscan_mode     = ie->data.pscan_mode;
223 			cp.clock_offset   = ie->data.clock_offset |
224 					    cpu_to_le16(0x8000);
225 		}
226 
227 		memcpy(conn->dev_class, ie->data.dev_class, 3);
228 	}
229 
230 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232 		cp.role_switch = 0x01;
233 	else
234 		cp.role_switch = 0x00;
235 
236 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
237 }
238 
239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
240 {
241 	BT_DBG("hcon %p", conn);
242 
243 	/* When we are central of an established connection and it enters
244 	 * the disconnect timeout, then go ahead and try to read the
245 	 * current clock offset.  Processing of the result is done
246 	 * within the event handling and hci_clock_offset_evt function.
247 	 */
248 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250 		struct hci_dev *hdev = conn->hdev;
251 		struct hci_cp_read_clock_offset clkoff_cp;
252 
253 		clkoff_cp.handle = cpu_to_le16(conn->handle);
254 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
255 			     &clkoff_cp);
256 	}
257 
258 	return hci_abort_conn(conn, reason);
259 }
260 
261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
262 {
263 	struct hci_dev *hdev = conn->hdev;
264 	struct hci_cp_add_sco cp;
265 
266 	BT_DBG("hcon %p", conn);
267 
268 	conn->state = BT_CONNECT;
269 	conn->out = true;
270 
271 	conn->attempt++;
272 
273 	cp.handle   = cpu_to_le16(handle);
274 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
275 
276 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
277 }
278 
279 static bool find_next_esco_param(struct hci_conn *conn,
280 				 const struct sco_param *esco_param, int size)
281 {
282 	if (!conn->parent)
283 		return false;
284 
285 	for (; conn->attempt <= size; conn->attempt++) {
286 		if (lmp_esco_2m_capable(conn->parent) ||
287 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
288 			break;
289 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290 		       conn, conn->attempt);
291 	}
292 
293 	return conn->attempt <= size;
294 }
295 
296 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
297 {
298 	int err;
299 	__u8 vnd_len, *vnd_data = NULL;
300 	struct hci_op_configure_data_path *cmd = NULL;
301 
302 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
303 					  &vnd_data);
304 	if (err < 0)
305 		goto error;
306 
307 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
308 	if (!cmd) {
309 		err = -ENOMEM;
310 		goto error;
311 	}
312 
313 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
314 	if (err < 0)
315 		goto error;
316 
317 	cmd->vnd_len = vnd_len;
318 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
319 
320 	cmd->direction = 0x00;
321 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
322 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
323 
324 	cmd->direction = 0x01;
325 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
326 				    sizeof(*cmd) + vnd_len, cmd,
327 				    HCI_CMD_TIMEOUT);
328 error:
329 
330 	kfree(cmd);
331 	kfree(vnd_data);
332 	return err;
333 }
334 
335 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
336 {
337 	struct conn_handle_t *conn_handle = data;
338 	struct hci_conn *conn = conn_handle->conn;
339 	__u16 handle = conn_handle->handle;
340 	struct hci_cp_enhanced_setup_sync_conn cp;
341 	const struct sco_param *param;
342 
343 	kfree(conn_handle);
344 
345 	bt_dev_dbg(hdev, "hcon %p", conn);
346 
347 	/* for offload use case, codec needs to configured before opening SCO */
348 	if (conn->codec.data_path)
349 		configure_datapath_sync(hdev, &conn->codec);
350 
351 	conn->state = BT_CONNECT;
352 	conn->out = true;
353 
354 	conn->attempt++;
355 
356 	memset(&cp, 0x00, sizeof(cp));
357 
358 	cp.handle   = cpu_to_le16(handle);
359 
360 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
361 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
362 
363 	switch (conn->codec.id) {
364 	case BT_CODEC_MSBC:
365 		if (!find_next_esco_param(conn, esco_param_msbc,
366 					  ARRAY_SIZE(esco_param_msbc)))
367 			return -EINVAL;
368 
369 		param = &esco_param_msbc[conn->attempt - 1];
370 		cp.tx_coding_format.id = 0x05;
371 		cp.rx_coding_format.id = 0x05;
372 		cp.tx_codec_frame_size = __cpu_to_le16(60);
373 		cp.rx_codec_frame_size = __cpu_to_le16(60);
374 		cp.in_bandwidth = __cpu_to_le32(32000);
375 		cp.out_bandwidth = __cpu_to_le32(32000);
376 		cp.in_coding_format.id = 0x04;
377 		cp.out_coding_format.id = 0x04;
378 		cp.in_coded_data_size = __cpu_to_le16(16);
379 		cp.out_coded_data_size = __cpu_to_le16(16);
380 		cp.in_pcm_data_format = 2;
381 		cp.out_pcm_data_format = 2;
382 		cp.in_pcm_sample_payload_msb_pos = 0;
383 		cp.out_pcm_sample_payload_msb_pos = 0;
384 		cp.in_data_path = conn->codec.data_path;
385 		cp.out_data_path = conn->codec.data_path;
386 		cp.in_transport_unit_size = 1;
387 		cp.out_transport_unit_size = 1;
388 		break;
389 
390 	case BT_CODEC_TRANSPARENT:
391 		if (!find_next_esco_param(conn, esco_param_msbc,
392 					  ARRAY_SIZE(esco_param_msbc)))
393 			return false;
394 		param = &esco_param_msbc[conn->attempt - 1];
395 		cp.tx_coding_format.id = 0x03;
396 		cp.rx_coding_format.id = 0x03;
397 		cp.tx_codec_frame_size = __cpu_to_le16(60);
398 		cp.rx_codec_frame_size = __cpu_to_le16(60);
399 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
400 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
401 		cp.in_coding_format.id = 0x03;
402 		cp.out_coding_format.id = 0x03;
403 		cp.in_coded_data_size = __cpu_to_le16(16);
404 		cp.out_coded_data_size = __cpu_to_le16(16);
405 		cp.in_pcm_data_format = 2;
406 		cp.out_pcm_data_format = 2;
407 		cp.in_pcm_sample_payload_msb_pos = 0;
408 		cp.out_pcm_sample_payload_msb_pos = 0;
409 		cp.in_data_path = conn->codec.data_path;
410 		cp.out_data_path = conn->codec.data_path;
411 		cp.in_transport_unit_size = 1;
412 		cp.out_transport_unit_size = 1;
413 		break;
414 
415 	case BT_CODEC_CVSD:
416 		if (conn->parent && lmp_esco_capable(conn->parent)) {
417 			if (!find_next_esco_param(conn, esco_param_cvsd,
418 						  ARRAY_SIZE(esco_param_cvsd)))
419 				return -EINVAL;
420 			param = &esco_param_cvsd[conn->attempt - 1];
421 		} else {
422 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
423 				return -EINVAL;
424 			param = &sco_param_cvsd[conn->attempt - 1];
425 		}
426 		cp.tx_coding_format.id = 2;
427 		cp.rx_coding_format.id = 2;
428 		cp.tx_codec_frame_size = __cpu_to_le16(60);
429 		cp.rx_codec_frame_size = __cpu_to_le16(60);
430 		cp.in_bandwidth = __cpu_to_le32(16000);
431 		cp.out_bandwidth = __cpu_to_le32(16000);
432 		cp.in_coding_format.id = 4;
433 		cp.out_coding_format.id = 4;
434 		cp.in_coded_data_size = __cpu_to_le16(16);
435 		cp.out_coded_data_size = __cpu_to_le16(16);
436 		cp.in_pcm_data_format = 2;
437 		cp.out_pcm_data_format = 2;
438 		cp.in_pcm_sample_payload_msb_pos = 0;
439 		cp.out_pcm_sample_payload_msb_pos = 0;
440 		cp.in_data_path = conn->codec.data_path;
441 		cp.out_data_path = conn->codec.data_path;
442 		cp.in_transport_unit_size = 16;
443 		cp.out_transport_unit_size = 16;
444 		break;
445 	default:
446 		return -EINVAL;
447 	}
448 
449 	cp.retrans_effort = param->retrans_effort;
450 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
451 	cp.max_latency = __cpu_to_le16(param->max_latency);
452 
453 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
454 		return -EIO;
455 
456 	return 0;
457 }
458 
459 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
460 {
461 	struct hci_dev *hdev = conn->hdev;
462 	struct hci_cp_setup_sync_conn cp;
463 	const struct sco_param *param;
464 
465 	bt_dev_dbg(hdev, "hcon %p", conn);
466 
467 	conn->state = BT_CONNECT;
468 	conn->out = true;
469 
470 	conn->attempt++;
471 
472 	cp.handle   = cpu_to_le16(handle);
473 
474 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
475 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
476 	cp.voice_setting  = cpu_to_le16(conn->setting);
477 
478 	switch (conn->setting & SCO_AIRMODE_MASK) {
479 	case SCO_AIRMODE_TRANSP:
480 		if (!find_next_esco_param(conn, esco_param_msbc,
481 					  ARRAY_SIZE(esco_param_msbc)))
482 			return false;
483 		param = &esco_param_msbc[conn->attempt - 1];
484 		break;
485 	case SCO_AIRMODE_CVSD:
486 		if (conn->parent && lmp_esco_capable(conn->parent)) {
487 			if (!find_next_esco_param(conn, esco_param_cvsd,
488 						  ARRAY_SIZE(esco_param_cvsd)))
489 				return false;
490 			param = &esco_param_cvsd[conn->attempt - 1];
491 		} else {
492 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
493 				return false;
494 			param = &sco_param_cvsd[conn->attempt - 1];
495 		}
496 		break;
497 	default:
498 		return false;
499 	}
500 
501 	cp.retrans_effort = param->retrans_effort;
502 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
503 	cp.max_latency = __cpu_to_le16(param->max_latency);
504 
505 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
506 		return false;
507 
508 	return true;
509 }
510 
511 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
512 {
513 	int result;
514 	struct conn_handle_t *conn_handle;
515 
516 	if (enhanced_sync_conn_capable(conn->hdev)) {
517 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
518 
519 		if (!conn_handle)
520 			return false;
521 
522 		conn_handle->conn = conn;
523 		conn_handle->handle = handle;
524 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
525 					    conn_handle, NULL);
526 		if (result < 0)
527 			kfree(conn_handle);
528 
529 		return result == 0;
530 	}
531 
532 	return hci_setup_sync_conn(conn, handle);
533 }
534 
535 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
536 		      u16 to_multiplier)
537 {
538 	struct hci_dev *hdev = conn->hdev;
539 	struct hci_conn_params *params;
540 	struct hci_cp_le_conn_update cp;
541 
542 	hci_dev_lock(hdev);
543 
544 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
545 	if (params) {
546 		params->conn_min_interval = min;
547 		params->conn_max_interval = max;
548 		params->conn_latency = latency;
549 		params->supervision_timeout = to_multiplier;
550 	}
551 
552 	hci_dev_unlock(hdev);
553 
554 	memset(&cp, 0, sizeof(cp));
555 	cp.handle		= cpu_to_le16(conn->handle);
556 	cp.conn_interval_min	= cpu_to_le16(min);
557 	cp.conn_interval_max	= cpu_to_le16(max);
558 	cp.conn_latency		= cpu_to_le16(latency);
559 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
560 	cp.min_ce_len		= cpu_to_le16(0x0000);
561 	cp.max_ce_len		= cpu_to_le16(0x0000);
562 
563 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
564 
565 	if (params)
566 		return 0x01;
567 
568 	return 0x00;
569 }
570 
571 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
572 		      __u8 ltk[16], __u8 key_size)
573 {
574 	struct hci_dev *hdev = conn->hdev;
575 	struct hci_cp_le_start_enc cp;
576 
577 	BT_DBG("hcon %p", conn);
578 
579 	memset(&cp, 0, sizeof(cp));
580 
581 	cp.handle = cpu_to_le16(conn->handle);
582 	cp.rand = rand;
583 	cp.ediv = ediv;
584 	memcpy(cp.ltk, ltk, key_size);
585 
586 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
587 }
588 
589 /* Device _must_ be locked */
590 void hci_sco_setup(struct hci_conn *conn, __u8 status)
591 {
592 	struct hci_link *link;
593 
594 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
595 	if (!link || !link->conn)
596 		return;
597 
598 	BT_DBG("hcon %p", conn);
599 
600 	if (!status) {
601 		if (lmp_esco_capable(conn->hdev))
602 			hci_setup_sync(link->conn, conn->handle);
603 		else
604 			hci_add_sco(link->conn, conn->handle);
605 	} else {
606 		hci_connect_cfm(link->conn, status);
607 		hci_conn_del(link->conn);
608 	}
609 }
610 
611 static void hci_conn_timeout(struct work_struct *work)
612 {
613 	struct hci_conn *conn = container_of(work, struct hci_conn,
614 					     disc_work.work);
615 	int refcnt = atomic_read(&conn->refcnt);
616 
617 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
618 
619 	WARN_ON(refcnt < 0);
620 
621 	/* FIXME: It was observed that in pairing failed scenario, refcnt
622 	 * drops below 0. Probably this is because l2cap_conn_del calls
623 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
624 	 * dropped. After that loop hci_chan_del is called which also drops
625 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
626 	 * otherwise drop it.
627 	 */
628 	if (refcnt > 0)
629 		return;
630 
631 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
632 }
633 
634 /* Enter sniff mode */
635 static void hci_conn_idle(struct work_struct *work)
636 {
637 	struct hci_conn *conn = container_of(work, struct hci_conn,
638 					     idle_work.work);
639 	struct hci_dev *hdev = conn->hdev;
640 
641 	BT_DBG("hcon %p mode %d", conn, conn->mode);
642 
643 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
644 		return;
645 
646 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
647 		return;
648 
649 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
650 		struct hci_cp_sniff_subrate cp;
651 		cp.handle             = cpu_to_le16(conn->handle);
652 		cp.max_latency        = cpu_to_le16(0);
653 		cp.min_remote_timeout = cpu_to_le16(0);
654 		cp.min_local_timeout  = cpu_to_le16(0);
655 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
656 	}
657 
658 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
659 		struct hci_cp_sniff_mode cp;
660 		cp.handle       = cpu_to_le16(conn->handle);
661 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
662 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
663 		cp.attempt      = cpu_to_le16(4);
664 		cp.timeout      = cpu_to_le16(1);
665 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
666 	}
667 }
668 
669 static void hci_conn_auto_accept(struct work_struct *work)
670 {
671 	struct hci_conn *conn = container_of(work, struct hci_conn,
672 					     auto_accept_work.work);
673 
674 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
675 		     &conn->dst);
676 }
677 
678 static void le_disable_advertising(struct hci_dev *hdev)
679 {
680 	if (ext_adv_capable(hdev)) {
681 		struct hci_cp_le_set_ext_adv_enable cp;
682 
683 		cp.enable = 0x00;
684 		cp.num_of_sets = 0x00;
685 
686 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
687 			     &cp);
688 	} else {
689 		u8 enable = 0x00;
690 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
691 			     &enable);
692 	}
693 }
694 
695 static void le_conn_timeout(struct work_struct *work)
696 {
697 	struct hci_conn *conn = container_of(work, struct hci_conn,
698 					     le_conn_timeout.work);
699 	struct hci_dev *hdev = conn->hdev;
700 
701 	BT_DBG("");
702 
703 	/* We could end up here due to having done directed advertising,
704 	 * so clean up the state if necessary. This should however only
705 	 * happen with broken hardware or if low duty cycle was used
706 	 * (which doesn't have a timeout of its own).
707 	 */
708 	if (conn->role == HCI_ROLE_SLAVE) {
709 		/* Disable LE Advertising */
710 		le_disable_advertising(hdev);
711 		hci_dev_lock(hdev);
712 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
713 		hci_dev_unlock(hdev);
714 		return;
715 	}
716 
717 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
718 }
719 
720 struct iso_cig_params {
721 	struct hci_cp_le_set_cig_params cp;
722 	struct hci_cis_params cis[0x1f];
723 };
724 
725 struct iso_list_data {
726 	union {
727 		u8  cig;
728 		u8  big;
729 	};
730 	union {
731 		u8  cis;
732 		u8  bis;
733 		u16 sync_handle;
734 	};
735 	int count;
736 	bool big_term;
737 	bool pa_sync_term;
738 	bool big_sync_term;
739 };
740 
741 static void bis_list(struct hci_conn *conn, void *data)
742 {
743 	struct iso_list_data *d = data;
744 
745 	/* Skip if not broadcast/ANY address */
746 	if (bacmp(&conn->dst, BDADDR_ANY))
747 		return;
748 
749 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
750 	    d->bis != conn->iso_qos.bcast.bis)
751 		return;
752 
753 	d->count++;
754 }
755 
756 static int terminate_big_sync(struct hci_dev *hdev, void *data)
757 {
758 	struct iso_list_data *d = data;
759 
760 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
761 
762 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
763 
764 	/* Only terminate BIG if it has been created */
765 	if (!d->big_term)
766 		return 0;
767 
768 	return hci_le_terminate_big_sync(hdev, d->big,
769 					 HCI_ERROR_LOCAL_HOST_TERM);
770 }
771 
772 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
773 {
774 	kfree(data);
775 }
776 
777 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
778 {
779 	struct iso_list_data *d;
780 	int ret;
781 
782 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
783 		   conn->iso_qos.bcast.bis);
784 
785 	d = kzalloc(sizeof(*d), GFP_KERNEL);
786 	if (!d)
787 		return -ENOMEM;
788 
789 	d->big = conn->iso_qos.bcast.big;
790 	d->bis = conn->iso_qos.bcast.bis;
791 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
792 
793 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
794 				 terminate_big_destroy);
795 	if (ret)
796 		kfree(d);
797 
798 	return ret;
799 }
800 
801 static int big_terminate_sync(struct hci_dev *hdev, void *data)
802 {
803 	struct iso_list_data *d = data;
804 
805 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
806 		   d->sync_handle);
807 
808 	if (d->big_sync_term)
809 		hci_le_big_terminate_sync(hdev, d->big);
810 
811 	if (d->pa_sync_term)
812 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
813 
814 	return 0;
815 }
816 
817 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
818 {
819 	struct iso_list_data *d;
820 	int ret;
821 
822 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
823 
824 	d = kzalloc(sizeof(*d), GFP_KERNEL);
825 	if (!d)
826 		return -ENOMEM;
827 
828 	d->big = big;
829 	d->sync_handle = conn->sync_handle;
830 	d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
831 	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
832 
833 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
834 				 terminate_big_destroy);
835 	if (ret)
836 		kfree(d);
837 
838 	return ret;
839 }
840 
841 /* Cleanup BIS connection
842  *
843  * Detects if there any BIS left connected in a BIG
844  * broadcaster: Remove advertising instance and terminate BIG.
845  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
846  */
847 static void bis_cleanup(struct hci_conn *conn)
848 {
849 	struct hci_dev *hdev = conn->hdev;
850 	struct hci_conn *bis;
851 
852 	bt_dev_dbg(hdev, "conn %p", conn);
853 
854 	if (conn->role == HCI_ROLE_MASTER) {
855 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
856 			return;
857 
858 		/* Check if ISO connection is a BIS and terminate advertising
859 		 * set and BIG if there are no other connections using it.
860 		 */
861 		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
862 		if (bis)
863 			return;
864 
865 		hci_le_terminate_big(hdev, conn);
866 	} else {
867 		bis = hci_conn_hash_lookup_big_any_dst(hdev,
868 						       conn->iso_qos.bcast.big);
869 
870 		if (bis)
871 			return;
872 
873 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
874 				     conn);
875 	}
876 }
877 
878 static int remove_cig_sync(struct hci_dev *hdev, void *data)
879 {
880 	u8 handle = PTR_UINT(data);
881 
882 	return hci_le_remove_cig_sync(hdev, handle);
883 }
884 
885 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
886 {
887 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
888 
889 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
890 				  NULL);
891 }
892 
893 static void find_cis(struct hci_conn *conn, void *data)
894 {
895 	struct iso_list_data *d = data;
896 
897 	/* Ignore broadcast or if CIG don't match */
898 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
899 		return;
900 
901 	d->count++;
902 }
903 
904 /* Cleanup CIS connection:
905  *
906  * Detects if there any CIS left connected in a CIG and remove it.
907  */
908 static void cis_cleanup(struct hci_conn *conn)
909 {
910 	struct hci_dev *hdev = conn->hdev;
911 	struct iso_list_data d;
912 
913 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
914 		return;
915 
916 	memset(&d, 0, sizeof(d));
917 	d.cig = conn->iso_qos.ucast.cig;
918 
919 	/* Check if ISO connection is a CIS and remove CIG if there are
920 	 * no other connections using it.
921 	 */
922 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
923 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
924 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
925 	if (d.count)
926 		return;
927 
928 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
929 }
930 
931 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
932 {
933 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
934 			       U16_MAX, GFP_ATOMIC);
935 }
936 
937 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
938 			      u8 role, u16 handle)
939 {
940 	struct hci_conn *conn;
941 
942 	switch (type) {
943 	case ACL_LINK:
944 		if (!hdev->acl_mtu)
945 			return ERR_PTR(-ECONNREFUSED);
946 		break;
947 	case ISO_LINK:
948 		if (hdev->iso_mtu)
949 			/* Dedicated ISO Buffer exists */
950 			break;
951 		fallthrough;
952 	case LE_LINK:
953 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
954 			return ERR_PTR(-ECONNREFUSED);
955 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
956 			return ERR_PTR(-ECONNREFUSED);
957 		break;
958 	case SCO_LINK:
959 	case ESCO_LINK:
960 		if (!hdev->sco_pkts)
961 			/* Controller does not support SCO or eSCO over HCI */
962 			return ERR_PTR(-ECONNREFUSED);
963 		break;
964 	default:
965 		return ERR_PTR(-ECONNREFUSED);
966 	}
967 
968 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
969 
970 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
971 	if (!conn)
972 		return ERR_PTR(-ENOMEM);
973 
974 	bacpy(&conn->dst, dst);
975 	bacpy(&conn->src, &hdev->bdaddr);
976 	conn->handle = handle;
977 	conn->hdev  = hdev;
978 	conn->type  = type;
979 	conn->role  = role;
980 	conn->mode  = HCI_CM_ACTIVE;
981 	conn->state = BT_OPEN;
982 	conn->auth_type = HCI_AT_GENERAL_BONDING;
983 	conn->io_capability = hdev->io_capability;
984 	conn->remote_auth = 0xff;
985 	conn->key_type = 0xff;
986 	conn->rssi = HCI_RSSI_INVALID;
987 	conn->tx_power = HCI_TX_POWER_INVALID;
988 	conn->max_tx_power = HCI_TX_POWER_INVALID;
989 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
990 
991 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
992 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
993 
994 	/* Set Default Authenticated payload timeout to 30s */
995 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
996 
997 	if (conn->role == HCI_ROLE_MASTER)
998 		conn->out = true;
999 
1000 	switch (type) {
1001 	case ACL_LINK:
1002 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1003 		conn->mtu = hdev->acl_mtu;
1004 		break;
1005 	case LE_LINK:
1006 		/* conn->src should reflect the local identity address */
1007 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1008 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1009 		break;
1010 	case ISO_LINK:
1011 		/* conn->src should reflect the local identity address */
1012 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1013 
1014 		/* set proper cleanup function */
1015 		if (!bacmp(dst, BDADDR_ANY))
1016 			conn->cleanup = bis_cleanup;
1017 		else if (conn->role == HCI_ROLE_MASTER)
1018 			conn->cleanup = cis_cleanup;
1019 
1020 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
1021 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1022 		break;
1023 	case SCO_LINK:
1024 		if (lmp_esco_capable(hdev))
1025 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1026 					(hdev->esco_type & EDR_ESCO_MASK);
1027 		else
1028 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1029 
1030 		conn->mtu = hdev->sco_mtu;
1031 		break;
1032 	case ESCO_LINK:
1033 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1034 		conn->mtu = hdev->sco_mtu;
1035 		break;
1036 	}
1037 
1038 	skb_queue_head_init(&conn->data_q);
1039 
1040 	INIT_LIST_HEAD(&conn->chan_list);
1041 	INIT_LIST_HEAD(&conn->link_list);
1042 
1043 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1044 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1045 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1046 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1047 
1048 	atomic_set(&conn->refcnt, 0);
1049 
1050 	hci_dev_hold(hdev);
1051 
1052 	hci_conn_hash_add(hdev, conn);
1053 
1054 	/* The SCO and eSCO connections will only be notified when their
1055 	 * setup has been completed. This is different to ACL links which
1056 	 * can be notified right away.
1057 	 */
1058 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1059 		if (hdev->notify)
1060 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1061 	}
1062 
1063 	hci_conn_init_sysfs(conn);
1064 
1065 	return conn;
1066 }
1067 
1068 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1069 				    bdaddr_t *dst, u8 role)
1070 {
1071 	int handle;
1072 
1073 	bt_dev_dbg(hdev, "dst %pMR", dst);
1074 
1075 	handle = hci_conn_hash_alloc_unset(hdev);
1076 	if (unlikely(handle < 0))
1077 		return ERR_PTR(-ECONNREFUSED);
1078 
1079 	return hci_conn_add(hdev, type, dst, role, handle);
1080 }
1081 
1082 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1083 {
1084 	if (!reason)
1085 		reason = HCI_ERROR_REMOTE_USER_TERM;
1086 
1087 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1088 	 * and nothing else will clean it up. In other cases it is done via HCI
1089 	 * events.
1090 	 */
1091 	switch (conn->type) {
1092 	case SCO_LINK:
1093 	case ESCO_LINK:
1094 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1095 			hci_conn_failed(conn, reason);
1096 		break;
1097 	case ISO_LINK:
1098 		if (conn->state != BT_CONNECTED &&
1099 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
1100 			hci_conn_failed(conn, reason);
1101 		break;
1102 	}
1103 }
1104 
1105 static void hci_conn_unlink(struct hci_conn *conn)
1106 {
1107 	struct hci_dev *hdev = conn->hdev;
1108 
1109 	bt_dev_dbg(hdev, "hcon %p", conn);
1110 
1111 	if (!conn->parent) {
1112 		struct hci_link *link, *t;
1113 
1114 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1115 			struct hci_conn *child = link->conn;
1116 
1117 			hci_conn_unlink(child);
1118 
1119 			/* If hdev is down it means
1120 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1121 			 * and links don't need to be cleanup as all connections
1122 			 * would be cleanup.
1123 			 */
1124 			if (!test_bit(HCI_UP, &hdev->flags))
1125 				continue;
1126 
1127 			hci_conn_cleanup_child(child, conn->abort_reason);
1128 		}
1129 
1130 		return;
1131 	}
1132 
1133 	if (!conn->link)
1134 		return;
1135 
1136 	list_del_rcu(&conn->link->list);
1137 	synchronize_rcu();
1138 
1139 	hci_conn_drop(conn->parent);
1140 	hci_conn_put(conn->parent);
1141 	conn->parent = NULL;
1142 
1143 	kfree(conn->link);
1144 	conn->link = NULL;
1145 }
1146 
1147 void hci_conn_del(struct hci_conn *conn)
1148 {
1149 	struct hci_dev *hdev = conn->hdev;
1150 
1151 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1152 
1153 	hci_conn_unlink(conn);
1154 
1155 	cancel_delayed_work_sync(&conn->disc_work);
1156 	cancel_delayed_work_sync(&conn->auto_accept_work);
1157 	cancel_delayed_work_sync(&conn->idle_work);
1158 
1159 	if (conn->type == ACL_LINK) {
1160 		/* Unacked frames */
1161 		hdev->acl_cnt += conn->sent;
1162 	} else if (conn->type == LE_LINK) {
1163 		cancel_delayed_work(&conn->le_conn_timeout);
1164 
1165 		if (hdev->le_pkts)
1166 			hdev->le_cnt += conn->sent;
1167 		else
1168 			hdev->acl_cnt += conn->sent;
1169 	} else {
1170 		/* Unacked ISO frames */
1171 		if (conn->type == ISO_LINK) {
1172 			if (hdev->iso_pkts)
1173 				hdev->iso_cnt += conn->sent;
1174 			else if (hdev->le_pkts)
1175 				hdev->le_cnt += conn->sent;
1176 			else
1177 				hdev->acl_cnt += conn->sent;
1178 		}
1179 	}
1180 
1181 	skb_queue_purge(&conn->data_q);
1182 
1183 	/* Remove the connection from the list and cleanup its remaining
1184 	 * state. This is a separate function since for some cases like
1185 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1186 	 * rest of hci_conn_del.
1187 	 */
1188 	hci_conn_cleanup(conn);
1189 }
1190 
1191 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1192 {
1193 	int use_src = bacmp(src, BDADDR_ANY);
1194 	struct hci_dev *hdev = NULL, *d;
1195 
1196 	BT_DBG("%pMR -> %pMR", src, dst);
1197 
1198 	read_lock(&hci_dev_list_lock);
1199 
1200 	list_for_each_entry(d, &hci_dev_list, list) {
1201 		if (!test_bit(HCI_UP, &d->flags) ||
1202 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1203 		    d->dev_type != HCI_PRIMARY)
1204 			continue;
1205 
1206 		/* Simple routing:
1207 		 *   No source address - find interface with bdaddr != dst
1208 		 *   Source address    - find interface with bdaddr == src
1209 		 */
1210 
1211 		if (use_src) {
1212 			bdaddr_t id_addr;
1213 			u8 id_addr_type;
1214 
1215 			if (src_type == BDADDR_BREDR) {
1216 				if (!lmp_bredr_capable(d))
1217 					continue;
1218 				bacpy(&id_addr, &d->bdaddr);
1219 				id_addr_type = BDADDR_BREDR;
1220 			} else {
1221 				if (!lmp_le_capable(d))
1222 					continue;
1223 
1224 				hci_copy_identity_address(d, &id_addr,
1225 							  &id_addr_type);
1226 
1227 				/* Convert from HCI to three-value type */
1228 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1229 					id_addr_type = BDADDR_LE_PUBLIC;
1230 				else
1231 					id_addr_type = BDADDR_LE_RANDOM;
1232 			}
1233 
1234 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1235 				hdev = d; break;
1236 			}
1237 		} else {
1238 			if (bacmp(&d->bdaddr, dst)) {
1239 				hdev = d; break;
1240 			}
1241 		}
1242 	}
1243 
1244 	if (hdev)
1245 		hdev = hci_dev_hold(hdev);
1246 
1247 	read_unlock(&hci_dev_list_lock);
1248 	return hdev;
1249 }
1250 EXPORT_SYMBOL(hci_get_route);
1251 
1252 /* This function requires the caller holds hdev->lock */
1253 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1254 {
1255 	struct hci_dev *hdev = conn->hdev;
1256 
1257 	hci_connect_le_scan_cleanup(conn, status);
1258 
1259 	/* Enable advertising in case this was a failed connection
1260 	 * attempt as a peripheral.
1261 	 */
1262 	hci_enable_advertising(hdev);
1263 }
1264 
1265 /* This function requires the caller holds hdev->lock */
1266 void hci_conn_failed(struct hci_conn *conn, u8 status)
1267 {
1268 	struct hci_dev *hdev = conn->hdev;
1269 
1270 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1271 
1272 	switch (conn->type) {
1273 	case LE_LINK:
1274 		hci_le_conn_failed(conn, status);
1275 		break;
1276 	case ACL_LINK:
1277 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1278 				    conn->dst_type, status);
1279 		break;
1280 	}
1281 
1282 	conn->state = BT_CLOSED;
1283 	hci_connect_cfm(conn, status);
1284 	hci_conn_del(conn);
1285 }
1286 
1287 /* This function requires the caller holds hdev->lock */
1288 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1289 {
1290 	struct hci_dev *hdev = conn->hdev;
1291 
1292 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1293 
1294 	if (conn->handle == handle)
1295 		return 0;
1296 
1297 	if (handle > HCI_CONN_HANDLE_MAX) {
1298 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1299 			   handle, HCI_CONN_HANDLE_MAX);
1300 		return HCI_ERROR_INVALID_PARAMETERS;
1301 	}
1302 
1303 	/* If abort_reason has been sent it means the connection is being
1304 	 * aborted and the handle shall not be changed.
1305 	 */
1306 	if (conn->abort_reason)
1307 		return conn->abort_reason;
1308 
1309 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1310 		ida_free(&hdev->unset_handle_ida, conn->handle);
1311 
1312 	conn->handle = handle;
1313 
1314 	return 0;
1315 }
1316 
1317 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1318 {
1319 	struct hci_conn *conn;
1320 	u16 handle = PTR_UINT(data);
1321 
1322 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1323 	if (!conn)
1324 		return;
1325 
1326 	bt_dev_dbg(hdev, "err %d", err);
1327 
1328 	hci_dev_lock(hdev);
1329 
1330 	if (!err) {
1331 		hci_connect_le_scan_cleanup(conn, 0x00);
1332 		goto done;
1333 	}
1334 
1335 	/* Check if connection is still pending */
1336 	if (conn != hci_lookup_le_connect(hdev))
1337 		goto done;
1338 
1339 	/* Flush to make sure we send create conn cancel command if needed */
1340 	flush_delayed_work(&conn->le_conn_timeout);
1341 	hci_conn_failed(conn, bt_status(err));
1342 
1343 done:
1344 	hci_dev_unlock(hdev);
1345 }
1346 
1347 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1348 {
1349 	struct hci_conn *conn;
1350 	u16 handle = PTR_UINT(data);
1351 
1352 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1353 	if (!conn)
1354 		return 0;
1355 
1356 	bt_dev_dbg(hdev, "conn %p", conn);
1357 
1358 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1359 	conn->state = BT_CONNECT;
1360 
1361 	return hci_le_create_conn_sync(hdev, conn);
1362 }
1363 
1364 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1365 				u8 dst_type, bool dst_resolved, u8 sec_level,
1366 				u16 conn_timeout, u8 role)
1367 {
1368 	struct hci_conn *conn;
1369 	struct smp_irk *irk;
1370 	int err;
1371 
1372 	/* Let's make sure that le is enabled.*/
1373 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1374 		if (lmp_le_capable(hdev))
1375 			return ERR_PTR(-ECONNREFUSED);
1376 
1377 		return ERR_PTR(-EOPNOTSUPP);
1378 	}
1379 
1380 	/* Since the controller supports only one LE connection attempt at a
1381 	 * time, we return -EBUSY if there is any connection attempt running.
1382 	 */
1383 	if (hci_lookup_le_connect(hdev))
1384 		return ERR_PTR(-EBUSY);
1385 
1386 	/* If there's already a connection object but it's not in
1387 	 * scanning state it means it must already be established, in
1388 	 * which case we can't do anything else except report a failure
1389 	 * to connect.
1390 	 */
1391 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1392 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1393 		return ERR_PTR(-EBUSY);
1394 	}
1395 
1396 	/* Check if the destination address has been resolved by the controller
1397 	 * since if it did then the identity address shall be used.
1398 	 */
1399 	if (!dst_resolved) {
1400 		/* When given an identity address with existing identity
1401 		 * resolving key, the connection needs to be established
1402 		 * to a resolvable random address.
1403 		 *
1404 		 * Storing the resolvable random address is required here
1405 		 * to handle connection failures. The address will later
1406 		 * be resolved back into the original identity address
1407 		 * from the connect request.
1408 		 */
1409 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1410 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1411 			dst = &irk->rpa;
1412 			dst_type = ADDR_LE_DEV_RANDOM;
1413 		}
1414 	}
1415 
1416 	if (conn) {
1417 		bacpy(&conn->dst, dst);
1418 	} else {
1419 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1420 		if (IS_ERR(conn))
1421 			return conn;
1422 		hci_conn_hold(conn);
1423 		conn->pending_sec_level = sec_level;
1424 	}
1425 
1426 	conn->dst_type = dst_type;
1427 	conn->sec_level = BT_SECURITY_LOW;
1428 	conn->conn_timeout = conn_timeout;
1429 
1430 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1431 				 UINT_PTR(conn->handle),
1432 				 create_le_conn_complete);
1433 	if (err) {
1434 		hci_conn_del(conn);
1435 		return ERR_PTR(err);
1436 	}
1437 
1438 	return conn;
1439 }
1440 
1441 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1442 {
1443 	struct hci_conn *conn;
1444 
1445 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1446 	if (!conn)
1447 		return false;
1448 
1449 	if (conn->state != BT_CONNECTED)
1450 		return false;
1451 
1452 	return true;
1453 }
1454 
1455 /* This function requires the caller holds hdev->lock */
1456 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1457 					bdaddr_t *addr, u8 addr_type)
1458 {
1459 	struct hci_conn_params *params;
1460 
1461 	if (is_connected(hdev, addr, addr_type))
1462 		return -EISCONN;
1463 
1464 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1465 	if (!params) {
1466 		params = hci_conn_params_add(hdev, addr, addr_type);
1467 		if (!params)
1468 			return -ENOMEM;
1469 
1470 		/* If we created new params, mark them to be deleted in
1471 		 * hci_connect_le_scan_cleanup. It's different case than
1472 		 * existing disabled params, those will stay after cleanup.
1473 		 */
1474 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1475 	}
1476 
1477 	/* We're trying to connect, so make sure params are at pend_le_conns */
1478 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1479 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1480 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1481 		hci_pend_le_list_del_init(params);
1482 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1483 	}
1484 
1485 	params->explicit_connect = true;
1486 
1487 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1488 	       params->auto_connect);
1489 
1490 	return 0;
1491 }
1492 
1493 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1494 {
1495 	struct hci_conn *conn;
1496 	u8  big;
1497 
1498 	/* Allocate a BIG if not set */
1499 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1500 		for (big = 0x00; big < 0xef; big++) {
1501 
1502 			conn = hci_conn_hash_lookup_big(hdev, big);
1503 			if (!conn)
1504 				break;
1505 		}
1506 
1507 		if (big == 0xef)
1508 			return -EADDRNOTAVAIL;
1509 
1510 		/* Update BIG */
1511 		qos->bcast.big = big;
1512 	}
1513 
1514 	return 0;
1515 }
1516 
1517 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1518 {
1519 	struct hci_conn *conn;
1520 	u8  bis;
1521 
1522 	/* Allocate BIS if not set */
1523 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1524 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1525 		 * since it is reserved as general purpose set.
1526 		 */
1527 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1528 		     bis++) {
1529 
1530 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1531 			if (!conn)
1532 				break;
1533 		}
1534 
1535 		if (bis == hdev->le_num_of_adv_sets)
1536 			return -EADDRNOTAVAIL;
1537 
1538 		/* Update BIS */
1539 		qos->bcast.bis = bis;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 /* This function requires the caller holds hdev->lock */
1546 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1547 				    struct bt_iso_qos *qos, __u8 base_len,
1548 				    __u8 *base)
1549 {
1550 	struct hci_conn *conn;
1551 	int err;
1552 
1553 	/* Let's make sure that le is enabled.*/
1554 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1555 		if (lmp_le_capable(hdev))
1556 			return ERR_PTR(-ECONNREFUSED);
1557 		return ERR_PTR(-EOPNOTSUPP);
1558 	}
1559 
1560 	err = qos_set_big(hdev, qos);
1561 	if (err)
1562 		return ERR_PTR(err);
1563 
1564 	err = qos_set_bis(hdev, qos);
1565 	if (err)
1566 		return ERR_PTR(err);
1567 
1568 	/* Check if the LE Create BIG command has already been sent */
1569 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1570 						qos->bcast.big);
1571 	if (conn)
1572 		return ERR_PTR(-EADDRINUSE);
1573 
1574 	/* Check BIS settings against other bound BISes, since all
1575 	 * BISes in a BIG must have the same value for all parameters
1576 	 */
1577 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1578 
1579 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1580 		     base_len != conn->le_per_adv_data_len ||
1581 		     memcmp(conn->le_per_adv_data, base, base_len)))
1582 		return ERR_PTR(-EADDRINUSE);
1583 
1584 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1585 	if (IS_ERR(conn))
1586 		return conn;
1587 
1588 	conn->state = BT_CONNECT;
1589 
1590 	hci_conn_hold(conn);
1591 	return conn;
1592 }
1593 
1594 /* This function requires the caller holds hdev->lock */
1595 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1596 				     u8 dst_type, u8 sec_level,
1597 				     u16 conn_timeout,
1598 				     enum conn_reasons conn_reason)
1599 {
1600 	struct hci_conn *conn;
1601 
1602 	/* Let's make sure that le is enabled.*/
1603 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1604 		if (lmp_le_capable(hdev))
1605 			return ERR_PTR(-ECONNREFUSED);
1606 
1607 		return ERR_PTR(-EOPNOTSUPP);
1608 	}
1609 
1610 	/* Some devices send ATT messages as soon as the physical link is
1611 	 * established. To be able to handle these ATT messages, the user-
1612 	 * space first establishes the connection and then starts the pairing
1613 	 * process.
1614 	 *
1615 	 * So if a hci_conn object already exists for the following connection
1616 	 * attempt, we simply update pending_sec_level and auth_type fields
1617 	 * and return the object found.
1618 	 */
1619 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1620 	if (conn) {
1621 		if (conn->pending_sec_level < sec_level)
1622 			conn->pending_sec_level = sec_level;
1623 		goto done;
1624 	}
1625 
1626 	BT_DBG("requesting refresh of dst_addr");
1627 
1628 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1629 	if (IS_ERR(conn))
1630 		return conn;
1631 
1632 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1633 		hci_conn_del(conn);
1634 		return ERR_PTR(-EBUSY);
1635 	}
1636 
1637 	conn->state = BT_CONNECT;
1638 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1639 	conn->dst_type = dst_type;
1640 	conn->sec_level = BT_SECURITY_LOW;
1641 	conn->pending_sec_level = sec_level;
1642 	conn->conn_timeout = conn_timeout;
1643 	conn->conn_reason = conn_reason;
1644 
1645 	hci_update_passive_scan(hdev);
1646 
1647 done:
1648 	hci_conn_hold(conn);
1649 	return conn;
1650 }
1651 
1652 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1653 				 u8 sec_level, u8 auth_type,
1654 				 enum conn_reasons conn_reason)
1655 {
1656 	struct hci_conn *acl;
1657 
1658 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1659 		if (lmp_bredr_capable(hdev))
1660 			return ERR_PTR(-ECONNREFUSED);
1661 
1662 		return ERR_PTR(-EOPNOTSUPP);
1663 	}
1664 
1665 	/* Reject outgoing connection to device with same BD ADDR against
1666 	 * CVE-2020-26555
1667 	 */
1668 	if (!bacmp(&hdev->bdaddr, dst)) {
1669 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1670 			   dst);
1671 		return ERR_PTR(-ECONNREFUSED);
1672 	}
1673 
1674 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1675 	if (!acl) {
1676 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1677 		if (IS_ERR(acl))
1678 			return acl;
1679 	}
1680 
1681 	hci_conn_hold(acl);
1682 
1683 	acl->conn_reason = conn_reason;
1684 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1685 		acl->sec_level = BT_SECURITY_LOW;
1686 		acl->pending_sec_level = sec_level;
1687 		acl->auth_type = auth_type;
1688 		hci_acl_create_connection(acl);
1689 	}
1690 
1691 	return acl;
1692 }
1693 
1694 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1695 				      struct hci_conn *conn)
1696 {
1697 	struct hci_dev *hdev = parent->hdev;
1698 	struct hci_link *link;
1699 
1700 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1701 
1702 	if (conn->link)
1703 		return conn->link;
1704 
1705 	if (conn->parent)
1706 		return NULL;
1707 
1708 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1709 	if (!link)
1710 		return NULL;
1711 
1712 	link->conn = hci_conn_hold(conn);
1713 	conn->link = link;
1714 	conn->parent = hci_conn_get(parent);
1715 
1716 	/* Use list_add_tail_rcu append to the list */
1717 	list_add_tail_rcu(&link->list, &parent->link_list);
1718 
1719 	return link;
1720 }
1721 
1722 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1723 				 __u16 setting, struct bt_codec *codec)
1724 {
1725 	struct hci_conn *acl;
1726 	struct hci_conn *sco;
1727 	struct hci_link *link;
1728 
1729 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1730 			      CONN_REASON_SCO_CONNECT);
1731 	if (IS_ERR(acl))
1732 		return acl;
1733 
1734 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1735 	if (!sco) {
1736 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1737 		if (IS_ERR(sco)) {
1738 			hci_conn_drop(acl);
1739 			return sco;
1740 		}
1741 	}
1742 
1743 	link = hci_conn_link(acl, sco);
1744 	if (!link) {
1745 		hci_conn_drop(acl);
1746 		hci_conn_drop(sco);
1747 		return ERR_PTR(-ENOLINK);
1748 	}
1749 
1750 	sco->setting = setting;
1751 	sco->codec = *codec;
1752 
1753 	if (acl->state == BT_CONNECTED &&
1754 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1755 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1756 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1757 
1758 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1759 			/* defer SCO setup until mode change completed */
1760 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1761 			return sco;
1762 		}
1763 
1764 		hci_sco_setup(acl, 0x00);
1765 	}
1766 
1767 	return sco;
1768 }
1769 
1770 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1771 {
1772 	struct hci_dev *hdev = conn->hdev;
1773 	struct hci_cp_le_create_big cp;
1774 	struct iso_list_data data;
1775 
1776 	memset(&cp, 0, sizeof(cp));
1777 
1778 	data.big = qos->bcast.big;
1779 	data.bis = qos->bcast.bis;
1780 	data.count = 0;
1781 
1782 	/* Create a BIS for each bound connection */
1783 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1784 				 BT_BOUND, &data);
1785 
1786 	cp.handle = qos->bcast.big;
1787 	cp.adv_handle = qos->bcast.bis;
1788 	cp.num_bis  = data.count;
1789 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1790 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1791 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1792 	cp.bis.rtn  = qos->bcast.out.rtn;
1793 	cp.bis.phy  = qos->bcast.out.phy;
1794 	cp.bis.packing = qos->bcast.packing;
1795 	cp.bis.framing = qos->bcast.framing;
1796 	cp.bis.encryption = qos->bcast.encryption;
1797 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1798 
1799 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1800 }
1801 
1802 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1803 {
1804 	u8 cig_id = PTR_UINT(data);
1805 	struct hci_conn *conn;
1806 	struct bt_iso_qos *qos;
1807 	struct iso_cig_params pdu;
1808 	u8 cis_id;
1809 
1810 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1811 	if (!conn)
1812 		return 0;
1813 
1814 	memset(&pdu, 0, sizeof(pdu));
1815 
1816 	qos = &conn->iso_qos;
1817 	pdu.cp.cig_id = cig_id;
1818 	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1819 	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1820 	pdu.cp.sca = qos->ucast.sca;
1821 	pdu.cp.packing = qos->ucast.packing;
1822 	pdu.cp.framing = qos->ucast.framing;
1823 	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1824 	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1825 
1826 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1827 	 * num_cis: 0x00 to 0x1F
1828 	 * cis_id: 0x00 to 0xEF
1829 	 */
1830 	for (cis_id = 0x00; cis_id < 0xf0 &&
1831 	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1832 		struct hci_cis_params *cis;
1833 
1834 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1835 		if (!conn)
1836 			continue;
1837 
1838 		qos = &conn->iso_qos;
1839 
1840 		cis = &pdu.cis[pdu.cp.num_cis++];
1841 		cis->cis_id = cis_id;
1842 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1843 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1844 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1845 			      qos->ucast.in.phy;
1846 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1847 			      qos->ucast.out.phy;
1848 		cis->c_rtn  = qos->ucast.out.rtn;
1849 		cis->p_rtn  = qos->ucast.in.rtn;
1850 	}
1851 
1852 	if (!pdu.cp.num_cis)
1853 		return 0;
1854 
1855 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1856 				     sizeof(pdu.cp) +
1857 				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1858 				     HCI_CMD_TIMEOUT);
1859 }
1860 
1861 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1862 {
1863 	struct hci_dev *hdev = conn->hdev;
1864 	struct iso_list_data data;
1865 
1866 	memset(&data, 0, sizeof(data));
1867 
1868 	/* Allocate first still reconfigurable CIG if not set */
1869 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1870 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1871 			data.count = 0;
1872 
1873 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1874 						 BT_CONNECT, &data);
1875 			if (data.count)
1876 				continue;
1877 
1878 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1879 						 BT_CONNECTED, &data);
1880 			if (!data.count)
1881 				break;
1882 		}
1883 
1884 		if (data.cig == 0xf0)
1885 			return false;
1886 
1887 		/* Update CIG */
1888 		qos->ucast.cig = data.cig;
1889 	}
1890 
1891 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1892 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1893 					     qos->ucast.cis))
1894 			return false;
1895 		goto done;
1896 	}
1897 
1898 	/* Allocate first available CIS if not set */
1899 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1900 	     data.cis++) {
1901 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1902 					      data.cis)) {
1903 			/* Update CIS */
1904 			qos->ucast.cis = data.cis;
1905 			break;
1906 		}
1907 	}
1908 
1909 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1910 		return false;
1911 
1912 done:
1913 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1914 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1915 		return false;
1916 
1917 	return true;
1918 }
1919 
1920 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1921 			      __u8 dst_type, struct bt_iso_qos *qos)
1922 {
1923 	struct hci_conn *cis;
1924 
1925 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1926 				       qos->ucast.cis);
1927 	if (!cis) {
1928 		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1929 		if (IS_ERR(cis))
1930 			return cis;
1931 		cis->cleanup = cis_cleanup;
1932 		cis->dst_type = dst_type;
1933 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1934 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1935 	}
1936 
1937 	if (cis->state == BT_CONNECTED)
1938 		return cis;
1939 
1940 	/* Check if CIS has been set and the settings matches */
1941 	if (cis->state == BT_BOUND &&
1942 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1943 		return cis;
1944 
1945 	/* Update LINK PHYs according to QoS preference */
1946 	cis->le_tx_phy = qos->ucast.out.phy;
1947 	cis->le_rx_phy = qos->ucast.in.phy;
1948 
1949 	/* If output interval is not set use the input interval as it cannot be
1950 	 * 0x000000.
1951 	 */
1952 	if (!qos->ucast.out.interval)
1953 		qos->ucast.out.interval = qos->ucast.in.interval;
1954 
1955 	/* If input interval is not set use the output interval as it cannot be
1956 	 * 0x000000.
1957 	 */
1958 	if (!qos->ucast.in.interval)
1959 		qos->ucast.in.interval = qos->ucast.out.interval;
1960 
1961 	/* If output latency is not set use the input latency as it cannot be
1962 	 * 0x0000.
1963 	 */
1964 	if (!qos->ucast.out.latency)
1965 		qos->ucast.out.latency = qos->ucast.in.latency;
1966 
1967 	/* If input latency is not set use the output latency as it cannot be
1968 	 * 0x0000.
1969 	 */
1970 	if (!qos->ucast.in.latency)
1971 		qos->ucast.in.latency = qos->ucast.out.latency;
1972 
1973 	if (!hci_le_set_cig_params(cis, qos)) {
1974 		hci_conn_drop(cis);
1975 		return ERR_PTR(-EINVAL);
1976 	}
1977 
1978 	hci_conn_hold(cis);
1979 
1980 	cis->iso_qos = *qos;
1981 	cis->state = BT_BOUND;
1982 
1983 	return cis;
1984 }
1985 
1986 bool hci_iso_setup_path(struct hci_conn *conn)
1987 {
1988 	struct hci_dev *hdev = conn->hdev;
1989 	struct hci_cp_le_setup_iso_path cmd;
1990 
1991 	memset(&cmd, 0, sizeof(cmd));
1992 
1993 	if (conn->iso_qos.ucast.out.sdu) {
1994 		cmd.handle = cpu_to_le16(conn->handle);
1995 		cmd.direction = 0x00; /* Input (Host to Controller) */
1996 		cmd.path = 0x00; /* HCI path if enabled */
1997 		cmd.codec = 0x03; /* Transparent Data */
1998 
1999 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2000 				 &cmd) < 0)
2001 			return false;
2002 	}
2003 
2004 	if (conn->iso_qos.ucast.in.sdu) {
2005 		cmd.handle = cpu_to_le16(conn->handle);
2006 		cmd.direction = 0x01; /* Output (Controller to Host) */
2007 		cmd.path = 0x00; /* HCI path if enabled */
2008 		cmd.codec = 0x03; /* Transparent Data */
2009 
2010 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2011 				 &cmd) < 0)
2012 			return false;
2013 	}
2014 
2015 	return true;
2016 }
2017 
2018 int hci_conn_check_create_cis(struct hci_conn *conn)
2019 {
2020 	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
2021 		return -EINVAL;
2022 
2023 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
2024 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
2025 		return 1;
2026 
2027 	return 0;
2028 }
2029 
2030 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
2031 {
2032 	return hci_le_create_cis_sync(hdev);
2033 }
2034 
2035 int hci_le_create_cis_pending(struct hci_dev *hdev)
2036 {
2037 	struct hci_conn *conn;
2038 	bool pending = false;
2039 
2040 	rcu_read_lock();
2041 
2042 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2043 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2044 			rcu_read_unlock();
2045 			return -EBUSY;
2046 		}
2047 
2048 		if (!hci_conn_check_create_cis(conn))
2049 			pending = true;
2050 	}
2051 
2052 	rcu_read_unlock();
2053 
2054 	if (!pending)
2055 		return 0;
2056 
2057 	/* Queue Create CIS */
2058 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2059 }
2060 
2061 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2062 			      struct bt_iso_io_qos *qos, __u8 phy)
2063 {
2064 	/* Only set MTU if PHY is enabled */
2065 	if (!qos->sdu && qos->phy)
2066 		qos->sdu = conn->mtu;
2067 
2068 	/* Use the same PHY as ACL if set to any */
2069 	if (qos->phy == BT_ISO_PHY_ANY)
2070 		qos->phy = phy;
2071 
2072 	/* Use LE ACL connection interval if not set */
2073 	if (!qos->interval)
2074 		/* ACL interval unit in 1.25 ms to us */
2075 		qos->interval = conn->le_conn_interval * 1250;
2076 
2077 	/* Use LE ACL connection latency if not set */
2078 	if (!qos->latency)
2079 		qos->latency = conn->le_conn_latency;
2080 }
2081 
2082 static int create_big_sync(struct hci_dev *hdev, void *data)
2083 {
2084 	struct hci_conn *conn = data;
2085 	struct bt_iso_qos *qos = &conn->iso_qos;
2086 	u16 interval, sync_interval = 0;
2087 	u32 flags = 0;
2088 	int err;
2089 
2090 	if (qos->bcast.out.phy == 0x02)
2091 		flags |= MGMT_ADV_FLAG_SEC_2M;
2092 
2093 	/* Align intervals */
2094 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2095 
2096 	if (qos->bcast.bis)
2097 		sync_interval = interval * 4;
2098 
2099 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2100 				     conn->le_per_adv_data, flags, interval,
2101 				     interval, sync_interval);
2102 	if (err)
2103 		return err;
2104 
2105 	return hci_le_create_big(conn, &conn->iso_qos);
2106 }
2107 
2108 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2109 {
2110 	struct hci_cp_le_pa_create_sync *cp = data;
2111 
2112 	bt_dev_dbg(hdev, "");
2113 
2114 	if (err)
2115 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2116 
2117 	kfree(cp);
2118 }
2119 
2120 static int create_pa_sync(struct hci_dev *hdev, void *data)
2121 {
2122 	struct hci_cp_le_pa_create_sync *cp = data;
2123 	int err;
2124 
2125 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2126 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2127 	if (err) {
2128 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2129 		return err;
2130 	}
2131 
2132 	return hci_update_passive_scan_sync(hdev);
2133 }
2134 
2135 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2136 		       __u8 sid, struct bt_iso_qos *qos)
2137 {
2138 	struct hci_cp_le_pa_create_sync *cp;
2139 
2140 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2141 		return -EBUSY;
2142 
2143 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2144 	if (!cp) {
2145 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2146 		return -ENOMEM;
2147 	}
2148 
2149 	cp->options = qos->bcast.options;
2150 	cp->sid = sid;
2151 	cp->addr_type = dst_type;
2152 	bacpy(&cp->addr, dst);
2153 	cp->skip = cpu_to_le16(qos->bcast.skip);
2154 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2155 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2156 
2157 	/* Queue start pa_create_sync and scan */
2158 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2159 }
2160 
2161 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2162 			   struct bt_iso_qos *qos,
2163 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2164 {
2165 	struct _packed {
2166 		struct hci_cp_le_big_create_sync cp;
2167 		__u8  bis[0x11];
2168 	} pdu;
2169 	int err;
2170 
2171 	if (num_bis > sizeof(pdu.bis))
2172 		return -EINVAL;
2173 
2174 	err = qos_set_big(hdev, qos);
2175 	if (err)
2176 		return err;
2177 
2178 	if (hcon)
2179 		hcon->iso_qos.bcast.big = qos->bcast.big;
2180 
2181 	memset(&pdu, 0, sizeof(pdu));
2182 	pdu.cp.handle = qos->bcast.big;
2183 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2184 	pdu.cp.encryption = qos->bcast.encryption;
2185 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2186 	pdu.cp.mse = qos->bcast.mse;
2187 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2188 	pdu.cp.num_bis = num_bis;
2189 	memcpy(pdu.bis, bis, num_bis);
2190 
2191 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2192 			    sizeof(pdu.cp) + num_bis, &pdu);
2193 }
2194 
2195 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2196 {
2197 	struct hci_conn *conn = data;
2198 
2199 	bt_dev_dbg(hdev, "conn %p", conn);
2200 
2201 	if (err) {
2202 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2203 		hci_connect_cfm(conn, err);
2204 		hci_conn_del(conn);
2205 	}
2206 }
2207 
2208 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2209 			      struct bt_iso_qos *qos,
2210 			      __u8 base_len, __u8 *base)
2211 {
2212 	struct hci_conn *conn;
2213 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2214 
2215 	if (base_len && base)
2216 		base_len = eir_append_service_data(eir, 0,  0x1851,
2217 						   base, base_len);
2218 
2219 	/* We need hci_conn object using the BDADDR_ANY as dst */
2220 	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2221 	if (IS_ERR(conn))
2222 		return conn;
2223 
2224 	/* Update LINK PHYs according to QoS preference */
2225 	conn->le_tx_phy = qos->bcast.out.phy;
2226 	conn->le_tx_phy = qos->bcast.out.phy;
2227 
2228 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2229 	if (base_len && base) {
2230 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2231 		conn->le_per_adv_data_len = base_len;
2232 	}
2233 
2234 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2235 			  conn->le_tx_phy ? conn->le_tx_phy :
2236 			  hdev->le_tx_def_phys);
2237 
2238 	conn->iso_qos = *qos;
2239 	conn->state = BT_BOUND;
2240 
2241 	return conn;
2242 }
2243 
2244 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2245 {
2246 	struct iso_list_data *d = data;
2247 
2248 	/* Skip if not broadcast/ANY address */
2249 	if (bacmp(&conn->dst, BDADDR_ANY))
2250 		return;
2251 
2252 	if (d->big != conn->iso_qos.bcast.big ||
2253 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2254 	    d->bis != conn->iso_qos.bcast.bis)
2255 		return;
2256 
2257 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2258 }
2259 
2260 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2261 				 __u8 dst_type, struct bt_iso_qos *qos,
2262 				 __u8 base_len, __u8 *base)
2263 {
2264 	struct hci_conn *conn;
2265 	int err;
2266 	struct iso_list_data data;
2267 
2268 	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2269 	if (IS_ERR(conn))
2270 		return conn;
2271 
2272 	data.big = qos->bcast.big;
2273 	data.bis = qos->bcast.bis;
2274 
2275 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2276 	 * the start periodic advertising and create BIG commands have
2277 	 * been queued
2278 	 */
2279 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2280 				 BT_BOUND, &data);
2281 
2282 	/* Queue start periodic advertising and create BIG */
2283 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2284 				 create_big_complete);
2285 	if (err < 0) {
2286 		hci_conn_drop(conn);
2287 		return ERR_PTR(err);
2288 	}
2289 
2290 	return conn;
2291 }
2292 
2293 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2294 				 __u8 dst_type, struct bt_iso_qos *qos)
2295 {
2296 	struct hci_conn *le;
2297 	struct hci_conn *cis;
2298 	struct hci_link *link;
2299 
2300 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2301 		le = hci_connect_le(hdev, dst, dst_type, false,
2302 				    BT_SECURITY_LOW,
2303 				    HCI_LE_CONN_TIMEOUT,
2304 				    HCI_ROLE_SLAVE);
2305 	else
2306 		le = hci_connect_le_scan(hdev, dst, dst_type,
2307 					 BT_SECURITY_LOW,
2308 					 HCI_LE_CONN_TIMEOUT,
2309 					 CONN_REASON_ISO_CONNECT);
2310 	if (IS_ERR(le))
2311 		return le;
2312 
2313 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2314 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2315 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2316 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2317 
2318 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2319 	if (IS_ERR(cis)) {
2320 		hci_conn_drop(le);
2321 		return cis;
2322 	}
2323 
2324 	link = hci_conn_link(le, cis);
2325 	if (!link) {
2326 		hci_conn_drop(le);
2327 		hci_conn_drop(cis);
2328 		return ERR_PTR(-ENOLINK);
2329 	}
2330 
2331 	/* Link takes the refcount */
2332 	hci_conn_drop(cis);
2333 
2334 	cis->state = BT_CONNECT;
2335 
2336 	hci_le_create_cis_pending(hdev);
2337 
2338 	return cis;
2339 }
2340 
2341 /* Check link security requirement */
2342 int hci_conn_check_link_mode(struct hci_conn *conn)
2343 {
2344 	BT_DBG("hcon %p", conn);
2345 
2346 	/* In Secure Connections Only mode, it is required that Secure
2347 	 * Connections is used and the link is encrypted with AES-CCM
2348 	 * using a P-256 authenticated combination key.
2349 	 */
2350 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2351 		if (!hci_conn_sc_enabled(conn) ||
2352 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2353 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2354 			return 0;
2355 	}
2356 
2357 	 /* AES encryption is required for Level 4:
2358 	  *
2359 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2360 	  * page 1319:
2361 	  *
2362 	  * 128-bit equivalent strength for link and encryption keys
2363 	  * required using FIPS approved algorithms (E0 not allowed,
2364 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2365 	  * not shortened)
2366 	  */
2367 	if (conn->sec_level == BT_SECURITY_FIPS &&
2368 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2369 		bt_dev_err(conn->hdev,
2370 			   "Invalid security: Missing AES-CCM usage");
2371 		return 0;
2372 	}
2373 
2374 	if (hci_conn_ssp_enabled(conn) &&
2375 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2376 		return 0;
2377 
2378 	return 1;
2379 }
2380 
2381 /* Authenticate remote device */
2382 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2383 {
2384 	BT_DBG("hcon %p", conn);
2385 
2386 	if (conn->pending_sec_level > sec_level)
2387 		sec_level = conn->pending_sec_level;
2388 
2389 	if (sec_level > conn->sec_level)
2390 		conn->pending_sec_level = sec_level;
2391 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2392 		return 1;
2393 
2394 	/* Make sure we preserve an existing MITM requirement*/
2395 	auth_type |= (conn->auth_type & 0x01);
2396 
2397 	conn->auth_type = auth_type;
2398 
2399 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2400 		struct hci_cp_auth_requested cp;
2401 
2402 		cp.handle = cpu_to_le16(conn->handle);
2403 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2404 			     sizeof(cp), &cp);
2405 
2406 		/* Set the ENCRYPT_PEND to trigger encryption after
2407 		 * authentication.
2408 		 */
2409 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2410 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2411 	}
2412 
2413 	return 0;
2414 }
2415 
2416 /* Encrypt the link */
2417 static void hci_conn_encrypt(struct hci_conn *conn)
2418 {
2419 	BT_DBG("hcon %p", conn);
2420 
2421 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2422 		struct hci_cp_set_conn_encrypt cp;
2423 		cp.handle  = cpu_to_le16(conn->handle);
2424 		cp.encrypt = 0x01;
2425 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2426 			     &cp);
2427 	}
2428 }
2429 
2430 /* Enable security */
2431 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2432 		      bool initiator)
2433 {
2434 	BT_DBG("hcon %p", conn);
2435 
2436 	if (conn->type == LE_LINK)
2437 		return smp_conn_security(conn, sec_level);
2438 
2439 	/* For sdp we don't need the link key. */
2440 	if (sec_level == BT_SECURITY_SDP)
2441 		return 1;
2442 
2443 	/* For non 2.1 devices and low security level we don't need the link
2444 	   key. */
2445 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2446 		return 1;
2447 
2448 	/* For other security levels we need the link key. */
2449 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2450 		goto auth;
2451 
2452 	switch (conn->key_type) {
2453 	case HCI_LK_AUTH_COMBINATION_P256:
2454 		/* An authenticated FIPS approved combination key has
2455 		 * sufficient security for security level 4 or lower.
2456 		 */
2457 		if (sec_level <= BT_SECURITY_FIPS)
2458 			goto encrypt;
2459 		break;
2460 	case HCI_LK_AUTH_COMBINATION_P192:
2461 		/* An authenticated combination key has sufficient security for
2462 		 * security level 3 or lower.
2463 		 */
2464 		if (sec_level <= BT_SECURITY_HIGH)
2465 			goto encrypt;
2466 		break;
2467 	case HCI_LK_UNAUTH_COMBINATION_P192:
2468 	case HCI_LK_UNAUTH_COMBINATION_P256:
2469 		/* An unauthenticated combination key has sufficient security
2470 		 * for security level 2 or lower.
2471 		 */
2472 		if (sec_level <= BT_SECURITY_MEDIUM)
2473 			goto encrypt;
2474 		break;
2475 	case HCI_LK_COMBINATION:
2476 		/* A combination key has always sufficient security for the
2477 		 * security levels 2 or lower. High security level requires the
2478 		 * combination key is generated using maximum PIN code length
2479 		 * (16). For pre 2.1 units.
2480 		 */
2481 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2482 			goto encrypt;
2483 		break;
2484 	default:
2485 		break;
2486 	}
2487 
2488 auth:
2489 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2490 		return 0;
2491 
2492 	if (initiator)
2493 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2494 
2495 	if (!hci_conn_auth(conn, sec_level, auth_type))
2496 		return 0;
2497 
2498 encrypt:
2499 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2500 		/* Ensure that the encryption key size has been read,
2501 		 * otherwise stall the upper layer responses.
2502 		 */
2503 		if (!conn->enc_key_size)
2504 			return 0;
2505 
2506 		/* Nothing else needed, all requirements are met */
2507 		return 1;
2508 	}
2509 
2510 	hci_conn_encrypt(conn);
2511 	return 0;
2512 }
2513 EXPORT_SYMBOL(hci_conn_security);
2514 
2515 /* Check secure link requirement */
2516 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2517 {
2518 	BT_DBG("hcon %p", conn);
2519 
2520 	/* Accept if non-secure or higher security level is required */
2521 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2522 		return 1;
2523 
2524 	/* Accept if secure or higher security level is already present */
2525 	if (conn->sec_level == BT_SECURITY_HIGH ||
2526 	    conn->sec_level == BT_SECURITY_FIPS)
2527 		return 1;
2528 
2529 	/* Reject not secure link */
2530 	return 0;
2531 }
2532 EXPORT_SYMBOL(hci_conn_check_secure);
2533 
2534 /* Switch role */
2535 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2536 {
2537 	BT_DBG("hcon %p", conn);
2538 
2539 	if (role == conn->role)
2540 		return 1;
2541 
2542 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2543 		struct hci_cp_switch_role cp;
2544 		bacpy(&cp.bdaddr, &conn->dst);
2545 		cp.role = role;
2546 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2547 	}
2548 
2549 	return 0;
2550 }
2551 EXPORT_SYMBOL(hci_conn_switch_role);
2552 
2553 /* Enter active mode */
2554 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2555 {
2556 	struct hci_dev *hdev = conn->hdev;
2557 
2558 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2559 
2560 	if (conn->mode != HCI_CM_SNIFF)
2561 		goto timer;
2562 
2563 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2564 		goto timer;
2565 
2566 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2567 		struct hci_cp_exit_sniff_mode cp;
2568 		cp.handle = cpu_to_le16(conn->handle);
2569 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2570 	}
2571 
2572 timer:
2573 	if (hdev->idle_timeout > 0)
2574 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2575 				   msecs_to_jiffies(hdev->idle_timeout));
2576 }
2577 
2578 /* Drop all connection on the device */
2579 void hci_conn_hash_flush(struct hci_dev *hdev)
2580 {
2581 	struct list_head *head = &hdev->conn_hash.list;
2582 	struct hci_conn *conn;
2583 
2584 	BT_DBG("hdev %s", hdev->name);
2585 
2586 	/* We should not traverse the list here, because hci_conn_del
2587 	 * can remove extra links, which may cause the list traversal
2588 	 * to hit items that have already been released.
2589 	 */
2590 	while ((conn = list_first_entry_or_null(head,
2591 						struct hci_conn,
2592 						list)) != NULL) {
2593 		conn->state = BT_CLOSED;
2594 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2595 		hci_conn_del(conn);
2596 	}
2597 }
2598 
2599 /* Check pending connect attempts */
2600 void hci_conn_check_pending(struct hci_dev *hdev)
2601 {
2602 	struct hci_conn *conn;
2603 
2604 	BT_DBG("hdev %s", hdev->name);
2605 
2606 	hci_dev_lock(hdev);
2607 
2608 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2609 	if (conn)
2610 		hci_acl_create_connection(conn);
2611 
2612 	hci_dev_unlock(hdev);
2613 }
2614 
2615 static u32 get_link_mode(struct hci_conn *conn)
2616 {
2617 	u32 link_mode = 0;
2618 
2619 	if (conn->role == HCI_ROLE_MASTER)
2620 		link_mode |= HCI_LM_MASTER;
2621 
2622 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2623 		link_mode |= HCI_LM_ENCRYPT;
2624 
2625 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2626 		link_mode |= HCI_LM_AUTH;
2627 
2628 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2629 		link_mode |= HCI_LM_SECURE;
2630 
2631 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2632 		link_mode |= HCI_LM_FIPS;
2633 
2634 	return link_mode;
2635 }
2636 
2637 int hci_get_conn_list(void __user *arg)
2638 {
2639 	struct hci_conn *c;
2640 	struct hci_conn_list_req req, *cl;
2641 	struct hci_conn_info *ci;
2642 	struct hci_dev *hdev;
2643 	int n = 0, size, err;
2644 
2645 	if (copy_from_user(&req, arg, sizeof(req)))
2646 		return -EFAULT;
2647 
2648 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2649 		return -EINVAL;
2650 
2651 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2652 
2653 	cl = kmalloc(size, GFP_KERNEL);
2654 	if (!cl)
2655 		return -ENOMEM;
2656 
2657 	hdev = hci_dev_get(req.dev_id);
2658 	if (!hdev) {
2659 		kfree(cl);
2660 		return -ENODEV;
2661 	}
2662 
2663 	ci = cl->conn_info;
2664 
2665 	hci_dev_lock(hdev);
2666 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2667 		bacpy(&(ci + n)->bdaddr, &c->dst);
2668 		(ci + n)->handle = c->handle;
2669 		(ci + n)->type  = c->type;
2670 		(ci + n)->out   = c->out;
2671 		(ci + n)->state = c->state;
2672 		(ci + n)->link_mode = get_link_mode(c);
2673 		if (++n >= req.conn_num)
2674 			break;
2675 	}
2676 	hci_dev_unlock(hdev);
2677 
2678 	cl->dev_id = hdev->id;
2679 	cl->conn_num = n;
2680 	size = sizeof(req) + n * sizeof(*ci);
2681 
2682 	hci_dev_put(hdev);
2683 
2684 	err = copy_to_user(arg, cl, size);
2685 	kfree(cl);
2686 
2687 	return err ? -EFAULT : 0;
2688 }
2689 
2690 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2691 {
2692 	struct hci_conn_info_req req;
2693 	struct hci_conn_info ci;
2694 	struct hci_conn *conn;
2695 	char __user *ptr = arg + sizeof(req);
2696 
2697 	if (copy_from_user(&req, arg, sizeof(req)))
2698 		return -EFAULT;
2699 
2700 	hci_dev_lock(hdev);
2701 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2702 	if (conn) {
2703 		bacpy(&ci.bdaddr, &conn->dst);
2704 		ci.handle = conn->handle;
2705 		ci.type  = conn->type;
2706 		ci.out   = conn->out;
2707 		ci.state = conn->state;
2708 		ci.link_mode = get_link_mode(conn);
2709 	}
2710 	hci_dev_unlock(hdev);
2711 
2712 	if (!conn)
2713 		return -ENOENT;
2714 
2715 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2716 }
2717 
2718 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2719 {
2720 	struct hci_auth_info_req req;
2721 	struct hci_conn *conn;
2722 
2723 	if (copy_from_user(&req, arg, sizeof(req)))
2724 		return -EFAULT;
2725 
2726 	hci_dev_lock(hdev);
2727 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2728 	if (conn)
2729 		req.type = conn->auth_type;
2730 	hci_dev_unlock(hdev);
2731 
2732 	if (!conn)
2733 		return -ENOENT;
2734 
2735 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2736 }
2737 
2738 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2739 {
2740 	struct hci_dev *hdev = conn->hdev;
2741 	struct hci_chan *chan;
2742 
2743 	BT_DBG("%s hcon %p", hdev->name, conn);
2744 
2745 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2746 		BT_DBG("Refusing to create new hci_chan");
2747 		return NULL;
2748 	}
2749 
2750 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2751 	if (!chan)
2752 		return NULL;
2753 
2754 	chan->conn = hci_conn_get(conn);
2755 	skb_queue_head_init(&chan->data_q);
2756 	chan->state = BT_CONNECTED;
2757 
2758 	list_add_rcu(&chan->list, &conn->chan_list);
2759 
2760 	return chan;
2761 }
2762 
2763 void hci_chan_del(struct hci_chan *chan)
2764 {
2765 	struct hci_conn *conn = chan->conn;
2766 	struct hci_dev *hdev = conn->hdev;
2767 
2768 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2769 
2770 	list_del_rcu(&chan->list);
2771 
2772 	synchronize_rcu();
2773 
2774 	/* Prevent new hci_chan's to be created for this hci_conn */
2775 	set_bit(HCI_CONN_DROP, &conn->flags);
2776 
2777 	hci_conn_put(conn);
2778 
2779 	skb_queue_purge(&chan->data_q);
2780 	kfree(chan);
2781 }
2782 
2783 void hci_chan_list_flush(struct hci_conn *conn)
2784 {
2785 	struct hci_chan *chan, *n;
2786 
2787 	BT_DBG("hcon %p", conn);
2788 
2789 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2790 		hci_chan_del(chan);
2791 }
2792 
2793 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2794 						 __u16 handle)
2795 {
2796 	struct hci_chan *hchan;
2797 
2798 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2799 		if (hchan->handle == handle)
2800 			return hchan;
2801 	}
2802 
2803 	return NULL;
2804 }
2805 
2806 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2807 {
2808 	struct hci_conn_hash *h = &hdev->conn_hash;
2809 	struct hci_conn *hcon;
2810 	struct hci_chan *hchan = NULL;
2811 
2812 	rcu_read_lock();
2813 
2814 	list_for_each_entry_rcu(hcon, &h->list, list) {
2815 		hchan = __hci_chan_lookup_handle(hcon, handle);
2816 		if (hchan)
2817 			break;
2818 	}
2819 
2820 	rcu_read_unlock();
2821 
2822 	return hchan;
2823 }
2824 
2825 u32 hci_conn_get_phy(struct hci_conn *conn)
2826 {
2827 	u32 phys = 0;
2828 
2829 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2830 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2831 	 * CPB logical transport types.
2832 	 */
2833 	switch (conn->type) {
2834 	case SCO_LINK:
2835 		/* SCO logical transport (1 Mb/s):
2836 		 * HV1, HV2, HV3 and DV.
2837 		 */
2838 		phys |= BT_PHY_BR_1M_1SLOT;
2839 
2840 		break;
2841 
2842 	case ACL_LINK:
2843 		/* ACL logical transport (1 Mb/s) ptt=0:
2844 		 * DH1, DM3, DH3, DM5 and DH5.
2845 		 */
2846 		phys |= BT_PHY_BR_1M_1SLOT;
2847 
2848 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2849 			phys |= BT_PHY_BR_1M_3SLOT;
2850 
2851 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2852 			phys |= BT_PHY_BR_1M_5SLOT;
2853 
2854 		/* ACL logical transport (2 Mb/s) ptt=1:
2855 		 * 2-DH1, 2-DH3 and 2-DH5.
2856 		 */
2857 		if (!(conn->pkt_type & HCI_2DH1))
2858 			phys |= BT_PHY_EDR_2M_1SLOT;
2859 
2860 		if (!(conn->pkt_type & HCI_2DH3))
2861 			phys |= BT_PHY_EDR_2M_3SLOT;
2862 
2863 		if (!(conn->pkt_type & HCI_2DH5))
2864 			phys |= BT_PHY_EDR_2M_5SLOT;
2865 
2866 		/* ACL logical transport (3 Mb/s) ptt=1:
2867 		 * 3-DH1, 3-DH3 and 3-DH5.
2868 		 */
2869 		if (!(conn->pkt_type & HCI_3DH1))
2870 			phys |= BT_PHY_EDR_3M_1SLOT;
2871 
2872 		if (!(conn->pkt_type & HCI_3DH3))
2873 			phys |= BT_PHY_EDR_3M_3SLOT;
2874 
2875 		if (!(conn->pkt_type & HCI_3DH5))
2876 			phys |= BT_PHY_EDR_3M_5SLOT;
2877 
2878 		break;
2879 
2880 	case ESCO_LINK:
2881 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2882 		phys |= BT_PHY_BR_1M_1SLOT;
2883 
2884 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2885 			phys |= BT_PHY_BR_1M_3SLOT;
2886 
2887 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2888 		if (!(conn->pkt_type & ESCO_2EV3))
2889 			phys |= BT_PHY_EDR_2M_1SLOT;
2890 
2891 		if (!(conn->pkt_type & ESCO_2EV5))
2892 			phys |= BT_PHY_EDR_2M_3SLOT;
2893 
2894 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2895 		if (!(conn->pkt_type & ESCO_3EV3))
2896 			phys |= BT_PHY_EDR_3M_1SLOT;
2897 
2898 		if (!(conn->pkt_type & ESCO_3EV5))
2899 			phys |= BT_PHY_EDR_3M_3SLOT;
2900 
2901 		break;
2902 
2903 	case LE_LINK:
2904 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2905 			phys |= BT_PHY_LE_1M_TX;
2906 
2907 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2908 			phys |= BT_PHY_LE_1M_RX;
2909 
2910 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2911 			phys |= BT_PHY_LE_2M_TX;
2912 
2913 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2914 			phys |= BT_PHY_LE_2M_RX;
2915 
2916 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2917 			phys |= BT_PHY_LE_CODED_TX;
2918 
2919 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2920 			phys |= BT_PHY_LE_CODED_RX;
2921 
2922 		break;
2923 	}
2924 
2925 	return phys;
2926 }
2927 
2928 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2929 {
2930 	struct hci_conn *conn;
2931 	u16 handle = PTR_UINT(data);
2932 
2933 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2934 	if (!conn)
2935 		return 0;
2936 
2937 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2938 }
2939 
2940 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2941 {
2942 	struct hci_dev *hdev = conn->hdev;
2943 
2944 	/* If abort_reason has already been set it means the connection is
2945 	 * already being aborted so don't attempt to overwrite it.
2946 	 */
2947 	if (conn->abort_reason)
2948 		return 0;
2949 
2950 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2951 
2952 	conn->abort_reason = reason;
2953 
2954 	/* If the connection is pending check the command opcode since that
2955 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2956 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2957 	 *
2958 	 * hci_connect_le serializes the connection attempts so only one
2959 	 * connection can be in BT_CONNECT at time.
2960 	 */
2961 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2962 		switch (hci_skb_event(hdev->sent_cmd)) {
2963 		case HCI_EV_LE_CONN_COMPLETE:
2964 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2965 		case HCI_EVT_LE_CIS_ESTABLISHED:
2966 			hci_cmd_sync_cancel(hdev, ECANCELED);
2967 			break;
2968 		}
2969 	}
2970 
2971 	return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
2972 				  NULL);
2973 }
2974