xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 4970e48f)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
71 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
111 				    conn->dst_type, status);
112 
113 	/* The connection attempt was doing scan for new RPA, and is
114 	 * in scan phase. If params are not associated with any other
115 	 * autoconnect action, remove them completely. If they are, just unmark
116 	 * them as waiting for connection, by clearing explicit_connect field.
117 	 */
118 	params->explicit_connect = false;
119 
120 	hci_pend_le_list_del_init(params);
121 
122 	switch (params->auto_connect) {
123 	case HCI_AUTO_CONN_EXPLICIT:
124 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125 		/* return instead of break to avoid duplicate scan update */
126 		return;
127 	case HCI_AUTO_CONN_DIRECT:
128 	case HCI_AUTO_CONN_ALWAYS:
129 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
130 		break;
131 	case HCI_AUTO_CONN_REPORT:
132 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
133 		break;
134 	default:
135 		break;
136 	}
137 
138 	hci_update_passive_scan(hdev);
139 }
140 
141 static void hci_conn_cleanup(struct hci_conn *conn)
142 {
143 	struct hci_dev *hdev = conn->hdev;
144 
145 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147 
148 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149 		hci_remove_link_key(hdev, &conn->dst);
150 
151 	hci_chan_list_flush(conn);
152 
153 	hci_conn_hash_del(hdev, conn);
154 
155 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
156 		ida_free(&hdev->unset_handle_ida, conn->handle);
157 
158 	if (conn->cleanup)
159 		conn->cleanup(conn);
160 
161 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
162 		switch (conn->setting & SCO_AIRMODE_MASK) {
163 		case SCO_AIRMODE_CVSD:
164 		case SCO_AIRMODE_TRANSP:
165 			if (hdev->notify)
166 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
167 			break;
168 		}
169 	} else {
170 		if (hdev->notify)
171 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172 	}
173 
174 	debugfs_remove_recursive(conn->debugfs);
175 
176 	hci_conn_del_sysfs(conn);
177 
178 	hci_dev_put(hdev);
179 }
180 
181 static void hci_acl_create_connection(struct hci_conn *conn)
182 {
183 	struct hci_dev *hdev = conn->hdev;
184 	struct inquiry_entry *ie;
185 	struct hci_cp_create_conn cp;
186 
187 	BT_DBG("hcon %p", conn);
188 
189 	/* Many controllers disallow HCI Create Connection while it is doing
190 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191 	 * Connection. This may cause the MGMT discovering state to become false
192 	 * without user space's request but it is okay since the MGMT Discovery
193 	 * APIs do not promise that discovery should be done forever. Instead,
194 	 * the user space monitors the status of MGMT discovering and it may
195 	 * request for discovery again when this flag becomes false.
196 	 */
197 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198 		/* Put this connection to "pending" state so that it will be
199 		 * executed after the inquiry cancel command complete event.
200 		 */
201 		conn->state = BT_CONNECT2;
202 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
203 		return;
204 	}
205 
206 	conn->state = BT_CONNECT;
207 	conn->out = true;
208 	conn->role = HCI_ROLE_MASTER;
209 
210 	conn->attempt++;
211 
212 	conn->link_policy = hdev->link_policy;
213 
214 	memset(&cp, 0, sizeof(cp));
215 	bacpy(&cp.bdaddr, &conn->dst);
216 	cp.pscan_rep_mode = 0x02;
217 
218 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219 	if (ie) {
220 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 			cp.pscan_mode     = ie->data.pscan_mode;
223 			cp.clock_offset   = ie->data.clock_offset |
224 					    cpu_to_le16(0x8000);
225 		}
226 
227 		memcpy(conn->dev_class, ie->data.dev_class, 3);
228 	}
229 
230 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232 		cp.role_switch = 0x01;
233 	else
234 		cp.role_switch = 0x00;
235 
236 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
237 }
238 
239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
240 {
241 	BT_DBG("hcon %p", conn);
242 
243 	/* When we are central of an established connection and it enters
244 	 * the disconnect timeout, then go ahead and try to read the
245 	 * current clock offset.  Processing of the result is done
246 	 * within the event handling and hci_clock_offset_evt function.
247 	 */
248 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250 		struct hci_dev *hdev = conn->hdev;
251 		struct hci_cp_read_clock_offset clkoff_cp;
252 
253 		clkoff_cp.handle = cpu_to_le16(conn->handle);
254 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
255 			     &clkoff_cp);
256 	}
257 
258 	return hci_abort_conn(conn, reason);
259 }
260 
261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
262 {
263 	struct hci_dev *hdev = conn->hdev;
264 	struct hci_cp_add_sco cp;
265 
266 	BT_DBG("hcon %p", conn);
267 
268 	conn->state = BT_CONNECT;
269 	conn->out = true;
270 
271 	conn->attempt++;
272 
273 	cp.handle   = cpu_to_le16(handle);
274 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
275 
276 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
277 }
278 
279 static bool find_next_esco_param(struct hci_conn *conn,
280 				 const struct sco_param *esco_param, int size)
281 {
282 	if (!conn->parent)
283 		return false;
284 
285 	for (; conn->attempt <= size; conn->attempt++) {
286 		if (lmp_esco_2m_capable(conn->parent) ||
287 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
288 			break;
289 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290 		       conn, conn->attempt);
291 	}
292 
293 	return conn->attempt <= size;
294 }
295 
296 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
297 {
298 	int err;
299 	__u8 vnd_len, *vnd_data = NULL;
300 	struct hci_op_configure_data_path *cmd = NULL;
301 
302 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
303 					  &vnd_data);
304 	if (err < 0)
305 		goto error;
306 
307 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
308 	if (!cmd) {
309 		err = -ENOMEM;
310 		goto error;
311 	}
312 
313 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
314 	if (err < 0)
315 		goto error;
316 
317 	cmd->vnd_len = vnd_len;
318 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
319 
320 	cmd->direction = 0x00;
321 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
322 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
323 
324 	cmd->direction = 0x01;
325 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
326 				    sizeof(*cmd) + vnd_len, cmd,
327 				    HCI_CMD_TIMEOUT);
328 error:
329 
330 	kfree(cmd);
331 	kfree(vnd_data);
332 	return err;
333 }
334 
335 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
336 {
337 	struct conn_handle_t *conn_handle = data;
338 	struct hci_conn *conn = conn_handle->conn;
339 	__u16 handle = conn_handle->handle;
340 	struct hci_cp_enhanced_setup_sync_conn cp;
341 	const struct sco_param *param;
342 
343 	kfree(conn_handle);
344 
345 	bt_dev_dbg(hdev, "hcon %p", conn);
346 
347 	/* for offload use case, codec needs to configured before opening SCO */
348 	if (conn->codec.data_path)
349 		configure_datapath_sync(hdev, &conn->codec);
350 
351 	conn->state = BT_CONNECT;
352 	conn->out = true;
353 
354 	conn->attempt++;
355 
356 	memset(&cp, 0x00, sizeof(cp));
357 
358 	cp.handle   = cpu_to_le16(handle);
359 
360 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
361 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
362 
363 	switch (conn->codec.id) {
364 	case BT_CODEC_MSBC:
365 		if (!find_next_esco_param(conn, esco_param_msbc,
366 					  ARRAY_SIZE(esco_param_msbc)))
367 			return -EINVAL;
368 
369 		param = &esco_param_msbc[conn->attempt - 1];
370 		cp.tx_coding_format.id = 0x05;
371 		cp.rx_coding_format.id = 0x05;
372 		cp.tx_codec_frame_size = __cpu_to_le16(60);
373 		cp.rx_codec_frame_size = __cpu_to_le16(60);
374 		cp.in_bandwidth = __cpu_to_le32(32000);
375 		cp.out_bandwidth = __cpu_to_le32(32000);
376 		cp.in_coding_format.id = 0x04;
377 		cp.out_coding_format.id = 0x04;
378 		cp.in_coded_data_size = __cpu_to_le16(16);
379 		cp.out_coded_data_size = __cpu_to_le16(16);
380 		cp.in_pcm_data_format = 2;
381 		cp.out_pcm_data_format = 2;
382 		cp.in_pcm_sample_payload_msb_pos = 0;
383 		cp.out_pcm_sample_payload_msb_pos = 0;
384 		cp.in_data_path = conn->codec.data_path;
385 		cp.out_data_path = conn->codec.data_path;
386 		cp.in_transport_unit_size = 1;
387 		cp.out_transport_unit_size = 1;
388 		break;
389 
390 	case BT_CODEC_TRANSPARENT:
391 		if (!find_next_esco_param(conn, esco_param_msbc,
392 					  ARRAY_SIZE(esco_param_msbc)))
393 			return false;
394 		param = &esco_param_msbc[conn->attempt - 1];
395 		cp.tx_coding_format.id = 0x03;
396 		cp.rx_coding_format.id = 0x03;
397 		cp.tx_codec_frame_size = __cpu_to_le16(60);
398 		cp.rx_codec_frame_size = __cpu_to_le16(60);
399 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
400 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
401 		cp.in_coding_format.id = 0x03;
402 		cp.out_coding_format.id = 0x03;
403 		cp.in_coded_data_size = __cpu_to_le16(16);
404 		cp.out_coded_data_size = __cpu_to_le16(16);
405 		cp.in_pcm_data_format = 2;
406 		cp.out_pcm_data_format = 2;
407 		cp.in_pcm_sample_payload_msb_pos = 0;
408 		cp.out_pcm_sample_payload_msb_pos = 0;
409 		cp.in_data_path = conn->codec.data_path;
410 		cp.out_data_path = conn->codec.data_path;
411 		cp.in_transport_unit_size = 1;
412 		cp.out_transport_unit_size = 1;
413 		break;
414 
415 	case BT_CODEC_CVSD:
416 		if (conn->parent && lmp_esco_capable(conn->parent)) {
417 			if (!find_next_esco_param(conn, esco_param_cvsd,
418 						  ARRAY_SIZE(esco_param_cvsd)))
419 				return -EINVAL;
420 			param = &esco_param_cvsd[conn->attempt - 1];
421 		} else {
422 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
423 				return -EINVAL;
424 			param = &sco_param_cvsd[conn->attempt - 1];
425 		}
426 		cp.tx_coding_format.id = 2;
427 		cp.rx_coding_format.id = 2;
428 		cp.tx_codec_frame_size = __cpu_to_le16(60);
429 		cp.rx_codec_frame_size = __cpu_to_le16(60);
430 		cp.in_bandwidth = __cpu_to_le32(16000);
431 		cp.out_bandwidth = __cpu_to_le32(16000);
432 		cp.in_coding_format.id = 4;
433 		cp.out_coding_format.id = 4;
434 		cp.in_coded_data_size = __cpu_to_le16(16);
435 		cp.out_coded_data_size = __cpu_to_le16(16);
436 		cp.in_pcm_data_format = 2;
437 		cp.out_pcm_data_format = 2;
438 		cp.in_pcm_sample_payload_msb_pos = 0;
439 		cp.out_pcm_sample_payload_msb_pos = 0;
440 		cp.in_data_path = conn->codec.data_path;
441 		cp.out_data_path = conn->codec.data_path;
442 		cp.in_transport_unit_size = 16;
443 		cp.out_transport_unit_size = 16;
444 		break;
445 	default:
446 		return -EINVAL;
447 	}
448 
449 	cp.retrans_effort = param->retrans_effort;
450 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
451 	cp.max_latency = __cpu_to_le16(param->max_latency);
452 
453 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
454 		return -EIO;
455 
456 	return 0;
457 }
458 
459 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
460 {
461 	struct hci_dev *hdev = conn->hdev;
462 	struct hci_cp_setup_sync_conn cp;
463 	const struct sco_param *param;
464 
465 	bt_dev_dbg(hdev, "hcon %p", conn);
466 
467 	conn->state = BT_CONNECT;
468 	conn->out = true;
469 
470 	conn->attempt++;
471 
472 	cp.handle   = cpu_to_le16(handle);
473 
474 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
475 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
476 	cp.voice_setting  = cpu_to_le16(conn->setting);
477 
478 	switch (conn->setting & SCO_AIRMODE_MASK) {
479 	case SCO_AIRMODE_TRANSP:
480 		if (!find_next_esco_param(conn, esco_param_msbc,
481 					  ARRAY_SIZE(esco_param_msbc)))
482 			return false;
483 		param = &esco_param_msbc[conn->attempt - 1];
484 		break;
485 	case SCO_AIRMODE_CVSD:
486 		if (conn->parent && lmp_esco_capable(conn->parent)) {
487 			if (!find_next_esco_param(conn, esco_param_cvsd,
488 						  ARRAY_SIZE(esco_param_cvsd)))
489 				return false;
490 			param = &esco_param_cvsd[conn->attempt - 1];
491 		} else {
492 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
493 				return false;
494 			param = &sco_param_cvsd[conn->attempt - 1];
495 		}
496 		break;
497 	default:
498 		return false;
499 	}
500 
501 	cp.retrans_effort = param->retrans_effort;
502 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
503 	cp.max_latency = __cpu_to_le16(param->max_latency);
504 
505 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
506 		return false;
507 
508 	return true;
509 }
510 
511 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
512 {
513 	int result;
514 	struct conn_handle_t *conn_handle;
515 
516 	if (enhanced_sync_conn_capable(conn->hdev)) {
517 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
518 
519 		if (!conn_handle)
520 			return false;
521 
522 		conn_handle->conn = conn;
523 		conn_handle->handle = handle;
524 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
525 					    conn_handle, NULL);
526 		if (result < 0)
527 			kfree(conn_handle);
528 
529 		return result == 0;
530 	}
531 
532 	return hci_setup_sync_conn(conn, handle);
533 }
534 
535 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
536 		      u16 to_multiplier)
537 {
538 	struct hci_dev *hdev = conn->hdev;
539 	struct hci_conn_params *params;
540 	struct hci_cp_le_conn_update cp;
541 
542 	hci_dev_lock(hdev);
543 
544 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
545 	if (params) {
546 		params->conn_min_interval = min;
547 		params->conn_max_interval = max;
548 		params->conn_latency = latency;
549 		params->supervision_timeout = to_multiplier;
550 	}
551 
552 	hci_dev_unlock(hdev);
553 
554 	memset(&cp, 0, sizeof(cp));
555 	cp.handle		= cpu_to_le16(conn->handle);
556 	cp.conn_interval_min	= cpu_to_le16(min);
557 	cp.conn_interval_max	= cpu_to_le16(max);
558 	cp.conn_latency		= cpu_to_le16(latency);
559 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
560 	cp.min_ce_len		= cpu_to_le16(0x0000);
561 	cp.max_ce_len		= cpu_to_le16(0x0000);
562 
563 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
564 
565 	if (params)
566 		return 0x01;
567 
568 	return 0x00;
569 }
570 
571 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
572 		      __u8 ltk[16], __u8 key_size)
573 {
574 	struct hci_dev *hdev = conn->hdev;
575 	struct hci_cp_le_start_enc cp;
576 
577 	BT_DBG("hcon %p", conn);
578 
579 	memset(&cp, 0, sizeof(cp));
580 
581 	cp.handle = cpu_to_le16(conn->handle);
582 	cp.rand = rand;
583 	cp.ediv = ediv;
584 	memcpy(cp.ltk, ltk, key_size);
585 
586 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
587 }
588 
589 /* Device _must_ be locked */
590 void hci_sco_setup(struct hci_conn *conn, __u8 status)
591 {
592 	struct hci_link *link;
593 
594 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
595 	if (!link || !link->conn)
596 		return;
597 
598 	BT_DBG("hcon %p", conn);
599 
600 	if (!status) {
601 		if (lmp_esco_capable(conn->hdev))
602 			hci_setup_sync(link->conn, conn->handle);
603 		else
604 			hci_add_sco(link->conn, conn->handle);
605 	} else {
606 		hci_connect_cfm(link->conn, status);
607 		hci_conn_del(link->conn);
608 	}
609 }
610 
611 static void hci_conn_timeout(struct work_struct *work)
612 {
613 	struct hci_conn *conn = container_of(work, struct hci_conn,
614 					     disc_work.work);
615 	int refcnt = atomic_read(&conn->refcnt);
616 
617 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
618 
619 	WARN_ON(refcnt < 0);
620 
621 	/* FIXME: It was observed that in pairing failed scenario, refcnt
622 	 * drops below 0. Probably this is because l2cap_conn_del calls
623 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
624 	 * dropped. After that loop hci_chan_del is called which also drops
625 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
626 	 * otherwise drop it.
627 	 */
628 	if (refcnt > 0)
629 		return;
630 
631 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
632 }
633 
634 /* Enter sniff mode */
635 static void hci_conn_idle(struct work_struct *work)
636 {
637 	struct hci_conn *conn = container_of(work, struct hci_conn,
638 					     idle_work.work);
639 	struct hci_dev *hdev = conn->hdev;
640 
641 	BT_DBG("hcon %p mode %d", conn, conn->mode);
642 
643 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
644 		return;
645 
646 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
647 		return;
648 
649 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
650 		struct hci_cp_sniff_subrate cp;
651 		cp.handle             = cpu_to_le16(conn->handle);
652 		cp.max_latency        = cpu_to_le16(0);
653 		cp.min_remote_timeout = cpu_to_le16(0);
654 		cp.min_local_timeout  = cpu_to_le16(0);
655 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
656 	}
657 
658 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
659 		struct hci_cp_sniff_mode cp;
660 		cp.handle       = cpu_to_le16(conn->handle);
661 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
662 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
663 		cp.attempt      = cpu_to_le16(4);
664 		cp.timeout      = cpu_to_le16(1);
665 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
666 	}
667 }
668 
669 static void hci_conn_auto_accept(struct work_struct *work)
670 {
671 	struct hci_conn *conn = container_of(work, struct hci_conn,
672 					     auto_accept_work.work);
673 
674 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
675 		     &conn->dst);
676 }
677 
678 static void le_disable_advertising(struct hci_dev *hdev)
679 {
680 	if (ext_adv_capable(hdev)) {
681 		struct hci_cp_le_set_ext_adv_enable cp;
682 
683 		cp.enable = 0x00;
684 		cp.num_of_sets = 0x00;
685 
686 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
687 			     &cp);
688 	} else {
689 		u8 enable = 0x00;
690 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
691 			     &enable);
692 	}
693 }
694 
695 static void le_conn_timeout(struct work_struct *work)
696 {
697 	struct hci_conn *conn = container_of(work, struct hci_conn,
698 					     le_conn_timeout.work);
699 	struct hci_dev *hdev = conn->hdev;
700 
701 	BT_DBG("");
702 
703 	/* We could end up here due to having done directed advertising,
704 	 * so clean up the state if necessary. This should however only
705 	 * happen with broken hardware or if low duty cycle was used
706 	 * (which doesn't have a timeout of its own).
707 	 */
708 	if (conn->role == HCI_ROLE_SLAVE) {
709 		/* Disable LE Advertising */
710 		le_disable_advertising(hdev);
711 		hci_dev_lock(hdev);
712 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
713 		hci_dev_unlock(hdev);
714 		return;
715 	}
716 
717 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
718 }
719 
720 struct iso_cig_params {
721 	struct hci_cp_le_set_cig_params cp;
722 	struct hci_cis_params cis[0x1f];
723 };
724 
725 struct iso_list_data {
726 	union {
727 		u8  cig;
728 		u8  big;
729 	};
730 	union {
731 		u8  cis;
732 		u8  bis;
733 		u16 sync_handle;
734 	};
735 	int count;
736 	bool big_term;
737 	bool pa_sync_term;
738 	bool big_sync_term;
739 };
740 
741 static void bis_list(struct hci_conn *conn, void *data)
742 {
743 	struct iso_list_data *d = data;
744 
745 	/* Skip if not broadcast/ANY address */
746 	if (bacmp(&conn->dst, BDADDR_ANY))
747 		return;
748 
749 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
750 	    d->bis != conn->iso_qos.bcast.bis)
751 		return;
752 
753 	d->count++;
754 }
755 
756 static int terminate_big_sync(struct hci_dev *hdev, void *data)
757 {
758 	struct iso_list_data *d = data;
759 
760 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
761 
762 	hci_disable_per_advertising_sync(hdev, d->bis);
763 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
764 
765 	/* Only terminate BIG if it has been created */
766 	if (!d->big_term)
767 		return 0;
768 
769 	return hci_le_terminate_big_sync(hdev, d->big,
770 					 HCI_ERROR_LOCAL_HOST_TERM);
771 }
772 
773 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
774 {
775 	kfree(data);
776 }
777 
778 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
779 {
780 	struct iso_list_data *d;
781 	int ret;
782 
783 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
784 		   conn->iso_qos.bcast.bis);
785 
786 	d = kzalloc(sizeof(*d), GFP_KERNEL);
787 	if (!d)
788 		return -ENOMEM;
789 
790 	d->big = conn->iso_qos.bcast.big;
791 	d->bis = conn->iso_qos.bcast.bis;
792 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
793 
794 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
795 				 terminate_big_destroy);
796 	if (ret)
797 		kfree(d);
798 
799 	return ret;
800 }
801 
802 static int big_terminate_sync(struct hci_dev *hdev, void *data)
803 {
804 	struct iso_list_data *d = data;
805 
806 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
807 		   d->sync_handle);
808 
809 	if (d->big_sync_term)
810 		hci_le_big_terminate_sync(hdev, d->big);
811 
812 	if (d->pa_sync_term)
813 		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
814 
815 	return 0;
816 }
817 
818 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
819 {
820 	struct iso_list_data *d;
821 	int ret;
822 
823 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
824 
825 	d = kzalloc(sizeof(*d), GFP_KERNEL);
826 	if (!d)
827 		return -ENOMEM;
828 
829 	d->big = big;
830 	d->sync_handle = conn->sync_handle;
831 	d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
832 	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
833 
834 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
835 				 terminate_big_destroy);
836 	if (ret)
837 		kfree(d);
838 
839 	return ret;
840 }
841 
842 /* Cleanup BIS connection
843  *
844  * Detects if there any BIS left connected in a BIG
845  * broadcaster: Remove advertising instance and terminate BIG.
846  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
847  */
848 static void bis_cleanup(struct hci_conn *conn)
849 {
850 	struct hci_dev *hdev = conn->hdev;
851 	struct hci_conn *bis;
852 
853 	bt_dev_dbg(hdev, "conn %p", conn);
854 
855 	if (conn->role == HCI_ROLE_MASTER) {
856 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
857 			return;
858 
859 		/* Check if ISO connection is a BIS and terminate advertising
860 		 * set and BIG if there are no other connections using it.
861 		 */
862 		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
863 		if (bis)
864 			return;
865 
866 		hci_le_terminate_big(hdev, conn);
867 	} else {
868 		bis = hci_conn_hash_lookup_big_any_dst(hdev,
869 						       conn->iso_qos.bcast.big);
870 
871 		if (bis)
872 			return;
873 
874 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
875 				     conn);
876 	}
877 }
878 
879 static int remove_cig_sync(struct hci_dev *hdev, void *data)
880 {
881 	u8 handle = PTR_UINT(data);
882 
883 	return hci_le_remove_cig_sync(hdev, handle);
884 }
885 
886 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
887 {
888 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
889 
890 	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
891 				  NULL);
892 }
893 
894 static void find_cis(struct hci_conn *conn, void *data)
895 {
896 	struct iso_list_data *d = data;
897 
898 	/* Ignore broadcast or if CIG don't match */
899 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
900 		return;
901 
902 	d->count++;
903 }
904 
905 /* Cleanup CIS connection:
906  *
907  * Detects if there any CIS left connected in a CIG and remove it.
908  */
909 static void cis_cleanup(struct hci_conn *conn)
910 {
911 	struct hci_dev *hdev = conn->hdev;
912 	struct iso_list_data d;
913 
914 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
915 		return;
916 
917 	memset(&d, 0, sizeof(d));
918 	d.cig = conn->iso_qos.ucast.cig;
919 
920 	/* Check if ISO connection is a CIS and remove CIG if there are
921 	 * no other connections using it.
922 	 */
923 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
924 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
925 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
926 	if (d.count)
927 		return;
928 
929 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
930 }
931 
932 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
933 {
934 	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
935 			       U16_MAX, GFP_ATOMIC);
936 }
937 
938 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
939 				       u8 role, u16 handle)
940 {
941 	struct hci_conn *conn;
942 
943 	switch (type) {
944 	case ACL_LINK:
945 		if (!hdev->acl_mtu)
946 			return ERR_PTR(-ECONNREFUSED);
947 		break;
948 	case ISO_LINK:
949 		if (hdev->iso_mtu)
950 			/* Dedicated ISO Buffer exists */
951 			break;
952 		fallthrough;
953 	case LE_LINK:
954 		if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU)
955 			return ERR_PTR(-ECONNREFUSED);
956 		if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU)
957 			return ERR_PTR(-ECONNREFUSED);
958 		break;
959 	case SCO_LINK:
960 	case ESCO_LINK:
961 		if (!hdev->sco_pkts)
962 			/* Controller does not support SCO or eSCO over HCI */
963 			return ERR_PTR(-ECONNREFUSED);
964 		break;
965 	default:
966 		return ERR_PTR(-ECONNREFUSED);
967 	}
968 
969 	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
970 
971 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
972 	if (!conn)
973 		return ERR_PTR(-ENOMEM);
974 
975 	bacpy(&conn->dst, dst);
976 	bacpy(&conn->src, &hdev->bdaddr);
977 	conn->handle = handle;
978 	conn->hdev  = hdev;
979 	conn->type  = type;
980 	conn->role  = role;
981 	conn->mode  = HCI_CM_ACTIVE;
982 	conn->state = BT_OPEN;
983 	conn->auth_type = HCI_AT_GENERAL_BONDING;
984 	conn->io_capability = hdev->io_capability;
985 	conn->remote_auth = 0xff;
986 	conn->key_type = 0xff;
987 	conn->rssi = HCI_RSSI_INVALID;
988 	conn->tx_power = HCI_TX_POWER_INVALID;
989 	conn->max_tx_power = HCI_TX_POWER_INVALID;
990 	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
991 
992 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
993 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
994 
995 	/* Set Default Authenticated payload timeout to 30s */
996 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
997 
998 	if (conn->role == HCI_ROLE_MASTER)
999 		conn->out = true;
1000 
1001 	switch (type) {
1002 	case ACL_LINK:
1003 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1004 		conn->mtu = hdev->acl_mtu;
1005 		break;
1006 	case LE_LINK:
1007 		/* conn->src should reflect the local identity address */
1008 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1009 		conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1010 		break;
1011 	case ISO_LINK:
1012 		/* conn->src should reflect the local identity address */
1013 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1014 
1015 		/* set proper cleanup function */
1016 		if (!bacmp(dst, BDADDR_ANY))
1017 			conn->cleanup = bis_cleanup;
1018 		else if (conn->role == HCI_ROLE_MASTER)
1019 			conn->cleanup = cis_cleanup;
1020 
1021 		conn->mtu = hdev->iso_mtu ? hdev->iso_mtu :
1022 			    hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu;
1023 		break;
1024 	case SCO_LINK:
1025 		if (lmp_esco_capable(hdev))
1026 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1027 					(hdev->esco_type & EDR_ESCO_MASK);
1028 		else
1029 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1030 
1031 		conn->mtu = hdev->sco_mtu;
1032 		break;
1033 	case ESCO_LINK:
1034 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1035 		conn->mtu = hdev->sco_mtu;
1036 		break;
1037 	}
1038 
1039 	skb_queue_head_init(&conn->data_q);
1040 
1041 	INIT_LIST_HEAD(&conn->chan_list);
1042 	INIT_LIST_HEAD(&conn->link_list);
1043 
1044 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1045 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1046 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1047 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1048 
1049 	atomic_set(&conn->refcnt, 0);
1050 
1051 	hci_dev_hold(hdev);
1052 
1053 	hci_conn_hash_add(hdev, conn);
1054 
1055 	/* The SCO and eSCO connections will only be notified when their
1056 	 * setup has been completed. This is different to ACL links which
1057 	 * can be notified right away.
1058 	 */
1059 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1060 		if (hdev->notify)
1061 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1062 	}
1063 
1064 	hci_conn_init_sysfs(conn);
1065 
1066 	return conn;
1067 }
1068 
1069 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1070 				    bdaddr_t *dst, u8 role)
1071 {
1072 	int handle;
1073 
1074 	bt_dev_dbg(hdev, "dst %pMR", dst);
1075 
1076 	handle = hci_conn_hash_alloc_unset(hdev);
1077 	if (unlikely(handle < 0))
1078 		return ERR_PTR(-ECONNREFUSED);
1079 
1080 	return __hci_conn_add(hdev, type, dst, role, handle);
1081 }
1082 
1083 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
1084 			      u8 role, u16 handle)
1085 {
1086 	if (handle > HCI_CONN_HANDLE_MAX)
1087 		return ERR_PTR(-EINVAL);
1088 
1089 	return __hci_conn_add(hdev, type, dst, role, handle);
1090 }
1091 
1092 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1093 {
1094 	if (!reason)
1095 		reason = HCI_ERROR_REMOTE_USER_TERM;
1096 
1097 	/* Due to race, SCO/ISO conn might be not established yet at this point,
1098 	 * and nothing else will clean it up. In other cases it is done via HCI
1099 	 * events.
1100 	 */
1101 	switch (conn->type) {
1102 	case SCO_LINK:
1103 	case ESCO_LINK:
1104 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1105 			hci_conn_failed(conn, reason);
1106 		break;
1107 	case ISO_LINK:
1108 		if (conn->state != BT_CONNECTED &&
1109 		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
1110 			hci_conn_failed(conn, reason);
1111 		break;
1112 	}
1113 }
1114 
1115 static void hci_conn_unlink(struct hci_conn *conn)
1116 {
1117 	struct hci_dev *hdev = conn->hdev;
1118 
1119 	bt_dev_dbg(hdev, "hcon %p", conn);
1120 
1121 	if (!conn->parent) {
1122 		struct hci_link *link, *t;
1123 
1124 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1125 			struct hci_conn *child = link->conn;
1126 
1127 			hci_conn_unlink(child);
1128 
1129 			/* If hdev is down it means
1130 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1131 			 * and links don't need to be cleanup as all connections
1132 			 * would be cleanup.
1133 			 */
1134 			if (!test_bit(HCI_UP, &hdev->flags))
1135 				continue;
1136 
1137 			hci_conn_cleanup_child(child, conn->abort_reason);
1138 		}
1139 
1140 		return;
1141 	}
1142 
1143 	if (!conn->link)
1144 		return;
1145 
1146 	list_del_rcu(&conn->link->list);
1147 	synchronize_rcu();
1148 
1149 	hci_conn_drop(conn->parent);
1150 	hci_conn_put(conn->parent);
1151 	conn->parent = NULL;
1152 
1153 	kfree(conn->link);
1154 	conn->link = NULL;
1155 }
1156 
1157 void hci_conn_del(struct hci_conn *conn)
1158 {
1159 	struct hci_dev *hdev = conn->hdev;
1160 
1161 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1162 
1163 	hci_conn_unlink(conn);
1164 
1165 	cancel_delayed_work_sync(&conn->disc_work);
1166 	cancel_delayed_work_sync(&conn->auto_accept_work);
1167 	cancel_delayed_work_sync(&conn->idle_work);
1168 
1169 	if (conn->type == ACL_LINK) {
1170 		/* Unacked frames */
1171 		hdev->acl_cnt += conn->sent;
1172 	} else if (conn->type == LE_LINK) {
1173 		cancel_delayed_work(&conn->le_conn_timeout);
1174 
1175 		if (hdev->le_pkts)
1176 			hdev->le_cnt += conn->sent;
1177 		else
1178 			hdev->acl_cnt += conn->sent;
1179 	} else {
1180 		/* Unacked ISO frames */
1181 		if (conn->type == ISO_LINK) {
1182 			if (hdev->iso_pkts)
1183 				hdev->iso_cnt += conn->sent;
1184 			else if (hdev->le_pkts)
1185 				hdev->le_cnt += conn->sent;
1186 			else
1187 				hdev->acl_cnt += conn->sent;
1188 		}
1189 	}
1190 
1191 	skb_queue_purge(&conn->data_q);
1192 
1193 	/* Remove the connection from the list and cleanup its remaining
1194 	 * state. This is a separate function since for some cases like
1195 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1196 	 * rest of hci_conn_del.
1197 	 */
1198 	hci_conn_cleanup(conn);
1199 }
1200 
1201 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1202 {
1203 	int use_src = bacmp(src, BDADDR_ANY);
1204 	struct hci_dev *hdev = NULL, *d;
1205 
1206 	BT_DBG("%pMR -> %pMR", src, dst);
1207 
1208 	read_lock(&hci_dev_list_lock);
1209 
1210 	list_for_each_entry(d, &hci_dev_list, list) {
1211 		if (!test_bit(HCI_UP, &d->flags) ||
1212 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
1213 			continue;
1214 
1215 		/* Simple routing:
1216 		 *   No source address - find interface with bdaddr != dst
1217 		 *   Source address    - find interface with bdaddr == src
1218 		 */
1219 
1220 		if (use_src) {
1221 			bdaddr_t id_addr;
1222 			u8 id_addr_type;
1223 
1224 			if (src_type == BDADDR_BREDR) {
1225 				if (!lmp_bredr_capable(d))
1226 					continue;
1227 				bacpy(&id_addr, &d->bdaddr);
1228 				id_addr_type = BDADDR_BREDR;
1229 			} else {
1230 				if (!lmp_le_capable(d))
1231 					continue;
1232 
1233 				hci_copy_identity_address(d, &id_addr,
1234 							  &id_addr_type);
1235 
1236 				/* Convert from HCI to three-value type */
1237 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1238 					id_addr_type = BDADDR_LE_PUBLIC;
1239 				else
1240 					id_addr_type = BDADDR_LE_RANDOM;
1241 			}
1242 
1243 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1244 				hdev = d; break;
1245 			}
1246 		} else {
1247 			if (bacmp(&d->bdaddr, dst)) {
1248 				hdev = d; break;
1249 			}
1250 		}
1251 	}
1252 
1253 	if (hdev)
1254 		hdev = hci_dev_hold(hdev);
1255 
1256 	read_unlock(&hci_dev_list_lock);
1257 	return hdev;
1258 }
1259 EXPORT_SYMBOL(hci_get_route);
1260 
1261 /* This function requires the caller holds hdev->lock */
1262 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1263 {
1264 	struct hci_dev *hdev = conn->hdev;
1265 
1266 	hci_connect_le_scan_cleanup(conn, status);
1267 
1268 	/* Enable advertising in case this was a failed connection
1269 	 * attempt as a peripheral.
1270 	 */
1271 	hci_enable_advertising(hdev);
1272 }
1273 
1274 /* This function requires the caller holds hdev->lock */
1275 void hci_conn_failed(struct hci_conn *conn, u8 status)
1276 {
1277 	struct hci_dev *hdev = conn->hdev;
1278 
1279 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1280 
1281 	switch (conn->type) {
1282 	case LE_LINK:
1283 		hci_le_conn_failed(conn, status);
1284 		break;
1285 	case ACL_LINK:
1286 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1287 				    conn->dst_type, status);
1288 		break;
1289 	}
1290 
1291 	/* In case of BIG/PA sync failed, clear conn flags so that
1292 	 * the conns will be correctly cleaned up by ISO layer
1293 	 */
1294 	test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1295 	test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1296 
1297 	conn->state = BT_CLOSED;
1298 	hci_connect_cfm(conn, status);
1299 	hci_conn_del(conn);
1300 }
1301 
1302 /* This function requires the caller holds hdev->lock */
1303 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1304 {
1305 	struct hci_dev *hdev = conn->hdev;
1306 
1307 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1308 
1309 	if (conn->handle == handle)
1310 		return 0;
1311 
1312 	if (handle > HCI_CONN_HANDLE_MAX) {
1313 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1314 			   handle, HCI_CONN_HANDLE_MAX);
1315 		return HCI_ERROR_INVALID_PARAMETERS;
1316 	}
1317 
1318 	/* If abort_reason has been sent it means the connection is being
1319 	 * aborted and the handle shall not be changed.
1320 	 */
1321 	if (conn->abort_reason)
1322 		return conn->abort_reason;
1323 
1324 	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1325 		ida_free(&hdev->unset_handle_ida, conn->handle);
1326 
1327 	conn->handle = handle;
1328 
1329 	return 0;
1330 }
1331 
1332 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1333 {
1334 	struct hci_conn *conn;
1335 	u16 handle = PTR_UINT(data);
1336 
1337 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1338 	if (!conn)
1339 		return;
1340 
1341 	bt_dev_dbg(hdev, "err %d", err);
1342 
1343 	hci_dev_lock(hdev);
1344 
1345 	if (!err) {
1346 		hci_connect_le_scan_cleanup(conn, 0x00);
1347 		goto done;
1348 	}
1349 
1350 	/* Check if connection is still pending */
1351 	if (conn != hci_lookup_le_connect(hdev))
1352 		goto done;
1353 
1354 	/* Flush to make sure we send create conn cancel command if needed */
1355 	flush_delayed_work(&conn->le_conn_timeout);
1356 	hci_conn_failed(conn, bt_status(err));
1357 
1358 done:
1359 	hci_dev_unlock(hdev);
1360 }
1361 
1362 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1363 {
1364 	struct hci_conn *conn;
1365 	u16 handle = PTR_UINT(data);
1366 
1367 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1368 	if (!conn)
1369 		return 0;
1370 
1371 	bt_dev_dbg(hdev, "conn %p", conn);
1372 
1373 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1374 	conn->state = BT_CONNECT;
1375 
1376 	return hci_le_create_conn_sync(hdev, conn);
1377 }
1378 
1379 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1380 				u8 dst_type, bool dst_resolved, u8 sec_level,
1381 				u16 conn_timeout, u8 role)
1382 {
1383 	struct hci_conn *conn;
1384 	struct smp_irk *irk;
1385 	int err;
1386 
1387 	/* Let's make sure that le is enabled.*/
1388 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1389 		if (lmp_le_capable(hdev))
1390 			return ERR_PTR(-ECONNREFUSED);
1391 
1392 		return ERR_PTR(-EOPNOTSUPP);
1393 	}
1394 
1395 	/* Since the controller supports only one LE connection attempt at a
1396 	 * time, we return -EBUSY if there is any connection attempt running.
1397 	 */
1398 	if (hci_lookup_le_connect(hdev))
1399 		return ERR_PTR(-EBUSY);
1400 
1401 	/* If there's already a connection object but it's not in
1402 	 * scanning state it means it must already be established, in
1403 	 * which case we can't do anything else except report a failure
1404 	 * to connect.
1405 	 */
1406 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1407 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1408 		return ERR_PTR(-EBUSY);
1409 	}
1410 
1411 	/* Check if the destination address has been resolved by the controller
1412 	 * since if it did then the identity address shall be used.
1413 	 */
1414 	if (!dst_resolved) {
1415 		/* When given an identity address with existing identity
1416 		 * resolving key, the connection needs to be established
1417 		 * to a resolvable random address.
1418 		 *
1419 		 * Storing the resolvable random address is required here
1420 		 * to handle connection failures. The address will later
1421 		 * be resolved back into the original identity address
1422 		 * from the connect request.
1423 		 */
1424 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1425 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1426 			dst = &irk->rpa;
1427 			dst_type = ADDR_LE_DEV_RANDOM;
1428 		}
1429 	}
1430 
1431 	if (conn) {
1432 		bacpy(&conn->dst, dst);
1433 	} else {
1434 		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1435 		if (IS_ERR(conn))
1436 			return conn;
1437 		hci_conn_hold(conn);
1438 		conn->pending_sec_level = sec_level;
1439 	}
1440 
1441 	conn->dst_type = dst_type;
1442 	conn->sec_level = BT_SECURITY_LOW;
1443 	conn->conn_timeout = conn_timeout;
1444 
1445 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1446 				 UINT_PTR(conn->handle),
1447 				 create_le_conn_complete);
1448 	if (err) {
1449 		hci_conn_del(conn);
1450 		return ERR_PTR(err);
1451 	}
1452 
1453 	return conn;
1454 }
1455 
1456 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1457 {
1458 	struct hci_conn *conn;
1459 
1460 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1461 	if (!conn)
1462 		return false;
1463 
1464 	if (conn->state != BT_CONNECTED)
1465 		return false;
1466 
1467 	return true;
1468 }
1469 
1470 /* This function requires the caller holds hdev->lock */
1471 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1472 					bdaddr_t *addr, u8 addr_type)
1473 {
1474 	struct hci_conn_params *params;
1475 
1476 	if (is_connected(hdev, addr, addr_type))
1477 		return -EISCONN;
1478 
1479 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1480 	if (!params) {
1481 		params = hci_conn_params_add(hdev, addr, addr_type);
1482 		if (!params)
1483 			return -ENOMEM;
1484 
1485 		/* If we created new params, mark them to be deleted in
1486 		 * hci_connect_le_scan_cleanup. It's different case than
1487 		 * existing disabled params, those will stay after cleanup.
1488 		 */
1489 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1490 	}
1491 
1492 	/* We're trying to connect, so make sure params are at pend_le_conns */
1493 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1494 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1495 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1496 		hci_pend_le_list_del_init(params);
1497 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1498 	}
1499 
1500 	params->explicit_connect = true;
1501 
1502 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1503 	       params->auto_connect);
1504 
1505 	return 0;
1506 }
1507 
1508 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1509 {
1510 	struct hci_conn *conn;
1511 	u8  big;
1512 
1513 	/* Allocate a BIG if not set */
1514 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1515 		for (big = 0x00; big < 0xef; big++) {
1516 
1517 			conn = hci_conn_hash_lookup_big(hdev, big);
1518 			if (!conn)
1519 				break;
1520 		}
1521 
1522 		if (big == 0xef)
1523 			return -EADDRNOTAVAIL;
1524 
1525 		/* Update BIG */
1526 		qos->bcast.big = big;
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1533 {
1534 	struct hci_conn *conn;
1535 	u8  bis;
1536 
1537 	/* Allocate BIS if not set */
1538 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1539 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1540 		 * since it is reserved as general purpose set.
1541 		 */
1542 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1543 		     bis++) {
1544 
1545 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1546 			if (!conn)
1547 				break;
1548 		}
1549 
1550 		if (bis == hdev->le_num_of_adv_sets)
1551 			return -EADDRNOTAVAIL;
1552 
1553 		/* Update BIS */
1554 		qos->bcast.bis = bis;
1555 	}
1556 
1557 	return 0;
1558 }
1559 
1560 /* This function requires the caller holds hdev->lock */
1561 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1562 				    struct bt_iso_qos *qos, __u8 base_len,
1563 				    __u8 *base)
1564 {
1565 	struct hci_conn *conn;
1566 	int err;
1567 
1568 	/* Let's make sure that le is enabled.*/
1569 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1570 		if (lmp_le_capable(hdev))
1571 			return ERR_PTR(-ECONNREFUSED);
1572 		return ERR_PTR(-EOPNOTSUPP);
1573 	}
1574 
1575 	err = qos_set_big(hdev, qos);
1576 	if (err)
1577 		return ERR_PTR(err);
1578 
1579 	err = qos_set_bis(hdev, qos);
1580 	if (err)
1581 		return ERR_PTR(err);
1582 
1583 	/* Check if the LE Create BIG command has already been sent */
1584 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1585 						qos->bcast.big);
1586 	if (conn)
1587 		return ERR_PTR(-EADDRINUSE);
1588 
1589 	/* Check BIS settings against other bound BISes, since all
1590 	 * BISes in a BIG must have the same value for all parameters
1591 	 */
1592 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1593 
1594 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1595 		     base_len != conn->le_per_adv_data_len ||
1596 		     memcmp(conn->le_per_adv_data, base, base_len)))
1597 		return ERR_PTR(-EADDRINUSE);
1598 
1599 	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1600 	if (IS_ERR(conn))
1601 		return conn;
1602 
1603 	conn->state = BT_CONNECT;
1604 
1605 	hci_conn_hold(conn);
1606 	return conn;
1607 }
1608 
1609 /* This function requires the caller holds hdev->lock */
1610 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1611 				     u8 dst_type, u8 sec_level,
1612 				     u16 conn_timeout,
1613 				     enum conn_reasons conn_reason)
1614 {
1615 	struct hci_conn *conn;
1616 
1617 	/* Let's make sure that le is enabled.*/
1618 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1619 		if (lmp_le_capable(hdev))
1620 			return ERR_PTR(-ECONNREFUSED);
1621 
1622 		return ERR_PTR(-EOPNOTSUPP);
1623 	}
1624 
1625 	/* Some devices send ATT messages as soon as the physical link is
1626 	 * established. To be able to handle these ATT messages, the user-
1627 	 * space first establishes the connection and then starts the pairing
1628 	 * process.
1629 	 *
1630 	 * So if a hci_conn object already exists for the following connection
1631 	 * attempt, we simply update pending_sec_level and auth_type fields
1632 	 * and return the object found.
1633 	 */
1634 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1635 	if (conn) {
1636 		if (conn->pending_sec_level < sec_level)
1637 			conn->pending_sec_level = sec_level;
1638 		goto done;
1639 	}
1640 
1641 	BT_DBG("requesting refresh of dst_addr");
1642 
1643 	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1644 	if (IS_ERR(conn))
1645 		return conn;
1646 
1647 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1648 		hci_conn_del(conn);
1649 		return ERR_PTR(-EBUSY);
1650 	}
1651 
1652 	conn->state = BT_CONNECT;
1653 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1654 	conn->dst_type = dst_type;
1655 	conn->sec_level = BT_SECURITY_LOW;
1656 	conn->pending_sec_level = sec_level;
1657 	conn->conn_timeout = conn_timeout;
1658 	conn->conn_reason = conn_reason;
1659 
1660 	hci_update_passive_scan(hdev);
1661 
1662 done:
1663 	hci_conn_hold(conn);
1664 	return conn;
1665 }
1666 
1667 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1668 				 u8 sec_level, u8 auth_type,
1669 				 enum conn_reasons conn_reason)
1670 {
1671 	struct hci_conn *acl;
1672 
1673 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1674 		if (lmp_bredr_capable(hdev))
1675 			return ERR_PTR(-ECONNREFUSED);
1676 
1677 		return ERR_PTR(-EOPNOTSUPP);
1678 	}
1679 
1680 	/* Reject outgoing connection to device with same BD ADDR against
1681 	 * CVE-2020-26555
1682 	 */
1683 	if (!bacmp(&hdev->bdaddr, dst)) {
1684 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1685 			   dst);
1686 		return ERR_PTR(-ECONNREFUSED);
1687 	}
1688 
1689 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1690 	if (!acl) {
1691 		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1692 		if (IS_ERR(acl))
1693 			return acl;
1694 	}
1695 
1696 	hci_conn_hold(acl);
1697 
1698 	acl->conn_reason = conn_reason;
1699 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1700 		acl->sec_level = BT_SECURITY_LOW;
1701 		acl->pending_sec_level = sec_level;
1702 		acl->auth_type = auth_type;
1703 		hci_acl_create_connection(acl);
1704 	}
1705 
1706 	return acl;
1707 }
1708 
1709 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1710 				      struct hci_conn *conn)
1711 {
1712 	struct hci_dev *hdev = parent->hdev;
1713 	struct hci_link *link;
1714 
1715 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1716 
1717 	if (conn->link)
1718 		return conn->link;
1719 
1720 	if (conn->parent)
1721 		return NULL;
1722 
1723 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1724 	if (!link)
1725 		return NULL;
1726 
1727 	link->conn = hci_conn_hold(conn);
1728 	conn->link = link;
1729 	conn->parent = hci_conn_get(parent);
1730 
1731 	/* Use list_add_tail_rcu append to the list */
1732 	list_add_tail_rcu(&link->list, &parent->link_list);
1733 
1734 	return link;
1735 }
1736 
1737 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1738 				 __u16 setting, struct bt_codec *codec)
1739 {
1740 	struct hci_conn *acl;
1741 	struct hci_conn *sco;
1742 	struct hci_link *link;
1743 
1744 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1745 			      CONN_REASON_SCO_CONNECT);
1746 	if (IS_ERR(acl))
1747 		return acl;
1748 
1749 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1750 	if (!sco) {
1751 		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1752 		if (IS_ERR(sco)) {
1753 			hci_conn_drop(acl);
1754 			return sco;
1755 		}
1756 	}
1757 
1758 	link = hci_conn_link(acl, sco);
1759 	if (!link) {
1760 		hci_conn_drop(acl);
1761 		hci_conn_drop(sco);
1762 		return ERR_PTR(-ENOLINK);
1763 	}
1764 
1765 	sco->setting = setting;
1766 	sco->codec = *codec;
1767 
1768 	if (acl->state == BT_CONNECTED &&
1769 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1770 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1771 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1772 
1773 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1774 			/* defer SCO setup until mode change completed */
1775 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1776 			return sco;
1777 		}
1778 
1779 		hci_sco_setup(acl, 0x00);
1780 	}
1781 
1782 	return sco;
1783 }
1784 
1785 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1786 {
1787 	struct hci_dev *hdev = conn->hdev;
1788 	struct hci_cp_le_create_big cp;
1789 	struct iso_list_data data;
1790 
1791 	memset(&cp, 0, sizeof(cp));
1792 
1793 	data.big = qos->bcast.big;
1794 	data.bis = qos->bcast.bis;
1795 	data.count = 0;
1796 
1797 	/* Create a BIS for each bound connection */
1798 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1799 				 BT_BOUND, &data);
1800 
1801 	cp.handle = qos->bcast.big;
1802 	cp.adv_handle = qos->bcast.bis;
1803 	cp.num_bis  = data.count;
1804 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1805 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1806 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1807 	cp.bis.rtn  = qos->bcast.out.rtn;
1808 	cp.bis.phy  = qos->bcast.out.phy;
1809 	cp.bis.packing = qos->bcast.packing;
1810 	cp.bis.framing = qos->bcast.framing;
1811 	cp.bis.encryption = qos->bcast.encryption;
1812 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1813 
1814 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1815 }
1816 
1817 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1818 {
1819 	u8 cig_id = PTR_UINT(data);
1820 	struct hci_conn *conn;
1821 	struct bt_iso_qos *qos;
1822 	struct iso_cig_params pdu;
1823 	u8 cis_id;
1824 
1825 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1826 	if (!conn)
1827 		return 0;
1828 
1829 	memset(&pdu, 0, sizeof(pdu));
1830 
1831 	qos = &conn->iso_qos;
1832 	pdu.cp.cig_id = cig_id;
1833 	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1834 	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1835 	pdu.cp.sca = qos->ucast.sca;
1836 	pdu.cp.packing = qos->ucast.packing;
1837 	pdu.cp.framing = qos->ucast.framing;
1838 	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1839 	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1840 
1841 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1842 	 * num_cis: 0x00 to 0x1F
1843 	 * cis_id: 0x00 to 0xEF
1844 	 */
1845 	for (cis_id = 0x00; cis_id < 0xf0 &&
1846 	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1847 		struct hci_cis_params *cis;
1848 
1849 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1850 		if (!conn)
1851 			continue;
1852 
1853 		qos = &conn->iso_qos;
1854 
1855 		cis = &pdu.cis[pdu.cp.num_cis++];
1856 		cis->cis_id = cis_id;
1857 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1858 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1859 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1860 			      qos->ucast.in.phy;
1861 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1862 			      qos->ucast.out.phy;
1863 		cis->c_rtn  = qos->ucast.out.rtn;
1864 		cis->p_rtn  = qos->ucast.in.rtn;
1865 	}
1866 
1867 	if (!pdu.cp.num_cis)
1868 		return 0;
1869 
1870 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1871 				     sizeof(pdu.cp) +
1872 				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1873 				     HCI_CMD_TIMEOUT);
1874 }
1875 
1876 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1877 {
1878 	struct hci_dev *hdev = conn->hdev;
1879 	struct iso_list_data data;
1880 
1881 	memset(&data, 0, sizeof(data));
1882 
1883 	/* Allocate first still reconfigurable CIG if not set */
1884 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1885 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1886 			data.count = 0;
1887 
1888 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1889 						 BT_CONNECT, &data);
1890 			if (data.count)
1891 				continue;
1892 
1893 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1894 						 BT_CONNECTED, &data);
1895 			if (!data.count)
1896 				break;
1897 		}
1898 
1899 		if (data.cig == 0xf0)
1900 			return false;
1901 
1902 		/* Update CIG */
1903 		qos->ucast.cig = data.cig;
1904 	}
1905 
1906 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1907 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1908 					     qos->ucast.cis))
1909 			return false;
1910 		goto done;
1911 	}
1912 
1913 	/* Allocate first available CIS if not set */
1914 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1915 	     data.cis++) {
1916 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1917 					      data.cis)) {
1918 			/* Update CIS */
1919 			qos->ucast.cis = data.cis;
1920 			break;
1921 		}
1922 	}
1923 
1924 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1925 		return false;
1926 
1927 done:
1928 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1929 			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1930 		return false;
1931 
1932 	return true;
1933 }
1934 
1935 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1936 			      __u8 dst_type, struct bt_iso_qos *qos)
1937 {
1938 	struct hci_conn *cis;
1939 
1940 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1941 				       qos->ucast.cis);
1942 	if (!cis) {
1943 		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1944 		if (IS_ERR(cis))
1945 			return cis;
1946 		cis->cleanup = cis_cleanup;
1947 		cis->dst_type = dst_type;
1948 		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1949 		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1950 	}
1951 
1952 	if (cis->state == BT_CONNECTED)
1953 		return cis;
1954 
1955 	/* Check if CIS has been set and the settings matches */
1956 	if (cis->state == BT_BOUND &&
1957 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1958 		return cis;
1959 
1960 	/* Update LINK PHYs according to QoS preference */
1961 	cis->le_tx_phy = qos->ucast.out.phy;
1962 	cis->le_rx_phy = qos->ucast.in.phy;
1963 
1964 	/* If output interval is not set use the input interval as it cannot be
1965 	 * 0x000000.
1966 	 */
1967 	if (!qos->ucast.out.interval)
1968 		qos->ucast.out.interval = qos->ucast.in.interval;
1969 
1970 	/* If input interval is not set use the output interval as it cannot be
1971 	 * 0x000000.
1972 	 */
1973 	if (!qos->ucast.in.interval)
1974 		qos->ucast.in.interval = qos->ucast.out.interval;
1975 
1976 	/* If output latency is not set use the input latency as it cannot be
1977 	 * 0x0000.
1978 	 */
1979 	if (!qos->ucast.out.latency)
1980 		qos->ucast.out.latency = qos->ucast.in.latency;
1981 
1982 	/* If input latency is not set use the output latency as it cannot be
1983 	 * 0x0000.
1984 	 */
1985 	if (!qos->ucast.in.latency)
1986 		qos->ucast.in.latency = qos->ucast.out.latency;
1987 
1988 	if (!hci_le_set_cig_params(cis, qos)) {
1989 		hci_conn_drop(cis);
1990 		return ERR_PTR(-EINVAL);
1991 	}
1992 
1993 	hci_conn_hold(cis);
1994 
1995 	cis->iso_qos = *qos;
1996 	cis->state = BT_BOUND;
1997 
1998 	return cis;
1999 }
2000 
2001 bool hci_iso_setup_path(struct hci_conn *conn)
2002 {
2003 	struct hci_dev *hdev = conn->hdev;
2004 	struct hci_cp_le_setup_iso_path cmd;
2005 
2006 	memset(&cmd, 0, sizeof(cmd));
2007 
2008 	if (conn->iso_qos.ucast.out.sdu) {
2009 		cmd.handle = cpu_to_le16(conn->handle);
2010 		cmd.direction = 0x00; /* Input (Host to Controller) */
2011 		cmd.path = 0x00; /* HCI path if enabled */
2012 		cmd.codec = 0x03; /* Transparent Data */
2013 
2014 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2015 				 &cmd) < 0)
2016 			return false;
2017 	}
2018 
2019 	if (conn->iso_qos.ucast.in.sdu) {
2020 		cmd.handle = cpu_to_le16(conn->handle);
2021 		cmd.direction = 0x01; /* Output (Controller to Host) */
2022 		cmd.path = 0x00; /* HCI path if enabled */
2023 		cmd.codec = 0x03; /* Transparent Data */
2024 
2025 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2026 				 &cmd) < 0)
2027 			return false;
2028 	}
2029 
2030 	return true;
2031 }
2032 
2033 int hci_conn_check_create_cis(struct hci_conn *conn)
2034 {
2035 	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
2036 		return -EINVAL;
2037 
2038 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
2039 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
2040 		return 1;
2041 
2042 	return 0;
2043 }
2044 
2045 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
2046 {
2047 	return hci_le_create_cis_sync(hdev);
2048 }
2049 
2050 int hci_le_create_cis_pending(struct hci_dev *hdev)
2051 {
2052 	struct hci_conn *conn;
2053 	bool pending = false;
2054 
2055 	rcu_read_lock();
2056 
2057 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2058 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2059 			rcu_read_unlock();
2060 			return -EBUSY;
2061 		}
2062 
2063 		if (!hci_conn_check_create_cis(conn))
2064 			pending = true;
2065 	}
2066 
2067 	rcu_read_unlock();
2068 
2069 	if (!pending)
2070 		return 0;
2071 
2072 	/* Queue Create CIS */
2073 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2074 }
2075 
2076 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2077 			      struct bt_iso_io_qos *qos, __u8 phy)
2078 {
2079 	/* Only set MTU if PHY is enabled */
2080 	if (!qos->sdu && qos->phy)
2081 		qos->sdu = conn->mtu;
2082 
2083 	/* Use the same PHY as ACL if set to any */
2084 	if (qos->phy == BT_ISO_PHY_ANY)
2085 		qos->phy = phy;
2086 
2087 	/* Use LE ACL connection interval if not set */
2088 	if (!qos->interval)
2089 		/* ACL interval unit in 1.25 ms to us */
2090 		qos->interval = conn->le_conn_interval * 1250;
2091 
2092 	/* Use LE ACL connection latency if not set */
2093 	if (!qos->latency)
2094 		qos->latency = conn->le_conn_latency;
2095 }
2096 
2097 static int create_big_sync(struct hci_dev *hdev, void *data)
2098 {
2099 	struct hci_conn *conn = data;
2100 	struct bt_iso_qos *qos = &conn->iso_qos;
2101 	u16 interval, sync_interval = 0;
2102 	u32 flags = 0;
2103 	int err;
2104 
2105 	if (qos->bcast.out.phy == 0x02)
2106 		flags |= MGMT_ADV_FLAG_SEC_2M;
2107 
2108 	/* Align intervals */
2109 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2110 
2111 	if (qos->bcast.bis)
2112 		sync_interval = interval * 4;
2113 
2114 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2115 				     conn->le_per_adv_data, flags, interval,
2116 				     interval, sync_interval);
2117 	if (err)
2118 		return err;
2119 
2120 	return hci_le_create_big(conn, &conn->iso_qos);
2121 }
2122 
2123 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2124 {
2125 	struct hci_cp_le_pa_create_sync *cp = data;
2126 
2127 	bt_dev_dbg(hdev, "");
2128 
2129 	if (err)
2130 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2131 
2132 	kfree(cp);
2133 }
2134 
2135 static int create_pa_sync(struct hci_dev *hdev, void *data)
2136 {
2137 	struct hci_cp_le_pa_create_sync *cp = data;
2138 	int err;
2139 
2140 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2141 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2142 	if (err) {
2143 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2144 		return err;
2145 	}
2146 
2147 	return hci_update_passive_scan_sync(hdev);
2148 }
2149 
2150 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2151 		       __u8 sid, struct bt_iso_qos *qos)
2152 {
2153 	struct hci_cp_le_pa_create_sync *cp;
2154 
2155 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2156 		return -EBUSY;
2157 
2158 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2159 	if (!cp) {
2160 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2161 		return -ENOMEM;
2162 	}
2163 
2164 	cp->options = qos->bcast.options;
2165 	cp->sid = sid;
2166 	cp->addr_type = dst_type;
2167 	bacpy(&cp->addr, dst);
2168 	cp->skip = cpu_to_le16(qos->bcast.skip);
2169 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2170 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2171 
2172 	/* Queue start pa_create_sync and scan */
2173 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2174 }
2175 
2176 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2177 			   struct bt_iso_qos *qos,
2178 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2179 {
2180 	struct _packed {
2181 		struct hci_cp_le_big_create_sync cp;
2182 		__u8  bis[0x11];
2183 	} pdu;
2184 	int err;
2185 
2186 	if (num_bis > sizeof(pdu.bis))
2187 		return -EINVAL;
2188 
2189 	err = qos_set_big(hdev, qos);
2190 	if (err)
2191 		return err;
2192 
2193 	if (hcon)
2194 		hcon->iso_qos.bcast.big = qos->bcast.big;
2195 
2196 	memset(&pdu, 0, sizeof(pdu));
2197 	pdu.cp.handle = qos->bcast.big;
2198 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2199 	pdu.cp.encryption = qos->bcast.encryption;
2200 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2201 	pdu.cp.mse = qos->bcast.mse;
2202 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2203 	pdu.cp.num_bis = num_bis;
2204 	memcpy(pdu.bis, bis, num_bis);
2205 
2206 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2207 			    sizeof(pdu.cp) + num_bis, &pdu);
2208 }
2209 
2210 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2211 {
2212 	struct hci_conn *conn = data;
2213 
2214 	bt_dev_dbg(hdev, "conn %p", conn);
2215 
2216 	if (err) {
2217 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2218 		hci_connect_cfm(conn, err);
2219 		hci_conn_del(conn);
2220 	}
2221 }
2222 
2223 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2224 			      struct bt_iso_qos *qos,
2225 			      __u8 base_len, __u8 *base)
2226 {
2227 	struct hci_conn *conn;
2228 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2229 
2230 	if (base_len && base)
2231 		base_len = eir_append_service_data(eir, 0,  0x1851,
2232 						   base, base_len);
2233 
2234 	/* We need hci_conn object using the BDADDR_ANY as dst */
2235 	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2236 	if (IS_ERR(conn))
2237 		return conn;
2238 
2239 	/* Update LINK PHYs according to QoS preference */
2240 	conn->le_tx_phy = qos->bcast.out.phy;
2241 	conn->le_tx_phy = qos->bcast.out.phy;
2242 
2243 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2244 	if (base_len && base) {
2245 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2246 		conn->le_per_adv_data_len = base_len;
2247 	}
2248 
2249 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2250 			  conn->le_tx_phy ? conn->le_tx_phy :
2251 			  hdev->le_tx_def_phys);
2252 
2253 	conn->iso_qos = *qos;
2254 	conn->state = BT_BOUND;
2255 
2256 	return conn;
2257 }
2258 
2259 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2260 {
2261 	struct iso_list_data *d = data;
2262 
2263 	/* Skip if not broadcast/ANY address */
2264 	if (bacmp(&conn->dst, BDADDR_ANY))
2265 		return;
2266 
2267 	if (d->big != conn->iso_qos.bcast.big ||
2268 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2269 	    d->bis != conn->iso_qos.bcast.bis)
2270 		return;
2271 
2272 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2273 }
2274 
2275 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2276 				 __u8 dst_type, struct bt_iso_qos *qos,
2277 				 __u8 base_len, __u8 *base)
2278 {
2279 	struct hci_conn *conn;
2280 	int err;
2281 	struct iso_list_data data;
2282 
2283 	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2284 	if (IS_ERR(conn))
2285 		return conn;
2286 
2287 	data.big = qos->bcast.big;
2288 	data.bis = qos->bcast.bis;
2289 
2290 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2291 	 * the start periodic advertising and create BIG commands have
2292 	 * been queued
2293 	 */
2294 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2295 				 BT_BOUND, &data);
2296 
2297 	/* Queue start periodic advertising and create BIG */
2298 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2299 				 create_big_complete);
2300 	if (err < 0) {
2301 		hci_conn_drop(conn);
2302 		return ERR_PTR(err);
2303 	}
2304 
2305 	return conn;
2306 }
2307 
2308 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2309 				 __u8 dst_type, struct bt_iso_qos *qos)
2310 {
2311 	struct hci_conn *le;
2312 	struct hci_conn *cis;
2313 	struct hci_link *link;
2314 
2315 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2316 		le = hci_connect_le(hdev, dst, dst_type, false,
2317 				    BT_SECURITY_LOW,
2318 				    HCI_LE_CONN_TIMEOUT,
2319 				    HCI_ROLE_SLAVE);
2320 	else
2321 		le = hci_connect_le_scan(hdev, dst, dst_type,
2322 					 BT_SECURITY_LOW,
2323 					 HCI_LE_CONN_TIMEOUT,
2324 					 CONN_REASON_ISO_CONNECT);
2325 	if (IS_ERR(le))
2326 		return le;
2327 
2328 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2329 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2330 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2331 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2332 
2333 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2334 	if (IS_ERR(cis)) {
2335 		hci_conn_drop(le);
2336 		return cis;
2337 	}
2338 
2339 	link = hci_conn_link(le, cis);
2340 	if (!link) {
2341 		hci_conn_drop(le);
2342 		hci_conn_drop(cis);
2343 		return ERR_PTR(-ENOLINK);
2344 	}
2345 
2346 	/* Link takes the refcount */
2347 	hci_conn_drop(cis);
2348 
2349 	cis->state = BT_CONNECT;
2350 
2351 	hci_le_create_cis_pending(hdev);
2352 
2353 	return cis;
2354 }
2355 
2356 /* Check link security requirement */
2357 int hci_conn_check_link_mode(struct hci_conn *conn)
2358 {
2359 	BT_DBG("hcon %p", conn);
2360 
2361 	/* In Secure Connections Only mode, it is required that Secure
2362 	 * Connections is used and the link is encrypted with AES-CCM
2363 	 * using a P-256 authenticated combination key.
2364 	 */
2365 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2366 		if (!hci_conn_sc_enabled(conn) ||
2367 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2368 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2369 			return 0;
2370 	}
2371 
2372 	 /* AES encryption is required for Level 4:
2373 	  *
2374 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2375 	  * page 1319:
2376 	  *
2377 	  * 128-bit equivalent strength for link and encryption keys
2378 	  * required using FIPS approved algorithms (E0 not allowed,
2379 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2380 	  * not shortened)
2381 	  */
2382 	if (conn->sec_level == BT_SECURITY_FIPS &&
2383 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2384 		bt_dev_err(conn->hdev,
2385 			   "Invalid security: Missing AES-CCM usage");
2386 		return 0;
2387 	}
2388 
2389 	if (hci_conn_ssp_enabled(conn) &&
2390 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2391 		return 0;
2392 
2393 	return 1;
2394 }
2395 
2396 /* Authenticate remote device */
2397 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2398 {
2399 	BT_DBG("hcon %p", conn);
2400 
2401 	if (conn->pending_sec_level > sec_level)
2402 		sec_level = conn->pending_sec_level;
2403 
2404 	if (sec_level > conn->sec_level)
2405 		conn->pending_sec_level = sec_level;
2406 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2407 		return 1;
2408 
2409 	/* Make sure we preserve an existing MITM requirement*/
2410 	auth_type |= (conn->auth_type & 0x01);
2411 
2412 	conn->auth_type = auth_type;
2413 
2414 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2415 		struct hci_cp_auth_requested cp;
2416 
2417 		cp.handle = cpu_to_le16(conn->handle);
2418 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2419 			     sizeof(cp), &cp);
2420 
2421 		/* Set the ENCRYPT_PEND to trigger encryption after
2422 		 * authentication.
2423 		 */
2424 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2425 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2426 	}
2427 
2428 	return 0;
2429 }
2430 
2431 /* Encrypt the link */
2432 static void hci_conn_encrypt(struct hci_conn *conn)
2433 {
2434 	BT_DBG("hcon %p", conn);
2435 
2436 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2437 		struct hci_cp_set_conn_encrypt cp;
2438 		cp.handle  = cpu_to_le16(conn->handle);
2439 		cp.encrypt = 0x01;
2440 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2441 			     &cp);
2442 	}
2443 }
2444 
2445 /* Enable security */
2446 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2447 		      bool initiator)
2448 {
2449 	BT_DBG("hcon %p", conn);
2450 
2451 	if (conn->type == LE_LINK)
2452 		return smp_conn_security(conn, sec_level);
2453 
2454 	/* For sdp we don't need the link key. */
2455 	if (sec_level == BT_SECURITY_SDP)
2456 		return 1;
2457 
2458 	/* For non 2.1 devices and low security level we don't need the link
2459 	   key. */
2460 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2461 		return 1;
2462 
2463 	/* For other security levels we need the link key. */
2464 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2465 		goto auth;
2466 
2467 	switch (conn->key_type) {
2468 	case HCI_LK_AUTH_COMBINATION_P256:
2469 		/* An authenticated FIPS approved combination key has
2470 		 * sufficient security for security level 4 or lower.
2471 		 */
2472 		if (sec_level <= BT_SECURITY_FIPS)
2473 			goto encrypt;
2474 		break;
2475 	case HCI_LK_AUTH_COMBINATION_P192:
2476 		/* An authenticated combination key has sufficient security for
2477 		 * security level 3 or lower.
2478 		 */
2479 		if (sec_level <= BT_SECURITY_HIGH)
2480 			goto encrypt;
2481 		break;
2482 	case HCI_LK_UNAUTH_COMBINATION_P192:
2483 	case HCI_LK_UNAUTH_COMBINATION_P256:
2484 		/* An unauthenticated combination key has sufficient security
2485 		 * for security level 2 or lower.
2486 		 */
2487 		if (sec_level <= BT_SECURITY_MEDIUM)
2488 			goto encrypt;
2489 		break;
2490 	case HCI_LK_COMBINATION:
2491 		/* A combination key has always sufficient security for the
2492 		 * security levels 2 or lower. High security level requires the
2493 		 * combination key is generated using maximum PIN code length
2494 		 * (16). For pre 2.1 units.
2495 		 */
2496 		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2497 			goto encrypt;
2498 		break;
2499 	default:
2500 		break;
2501 	}
2502 
2503 auth:
2504 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2505 		return 0;
2506 
2507 	if (initiator)
2508 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2509 
2510 	if (!hci_conn_auth(conn, sec_level, auth_type))
2511 		return 0;
2512 
2513 encrypt:
2514 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2515 		/* Ensure that the encryption key size has been read,
2516 		 * otherwise stall the upper layer responses.
2517 		 */
2518 		if (!conn->enc_key_size)
2519 			return 0;
2520 
2521 		/* Nothing else needed, all requirements are met */
2522 		return 1;
2523 	}
2524 
2525 	hci_conn_encrypt(conn);
2526 	return 0;
2527 }
2528 EXPORT_SYMBOL(hci_conn_security);
2529 
2530 /* Check secure link requirement */
2531 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2532 {
2533 	BT_DBG("hcon %p", conn);
2534 
2535 	/* Accept if non-secure or higher security level is required */
2536 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2537 		return 1;
2538 
2539 	/* Accept if secure or higher security level is already present */
2540 	if (conn->sec_level == BT_SECURITY_HIGH ||
2541 	    conn->sec_level == BT_SECURITY_FIPS)
2542 		return 1;
2543 
2544 	/* Reject not secure link */
2545 	return 0;
2546 }
2547 EXPORT_SYMBOL(hci_conn_check_secure);
2548 
2549 /* Switch role */
2550 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2551 {
2552 	BT_DBG("hcon %p", conn);
2553 
2554 	if (role == conn->role)
2555 		return 1;
2556 
2557 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2558 		struct hci_cp_switch_role cp;
2559 		bacpy(&cp.bdaddr, &conn->dst);
2560 		cp.role = role;
2561 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2562 	}
2563 
2564 	return 0;
2565 }
2566 EXPORT_SYMBOL(hci_conn_switch_role);
2567 
2568 /* Enter active mode */
2569 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2570 {
2571 	struct hci_dev *hdev = conn->hdev;
2572 
2573 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2574 
2575 	if (conn->mode != HCI_CM_SNIFF)
2576 		goto timer;
2577 
2578 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2579 		goto timer;
2580 
2581 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2582 		struct hci_cp_exit_sniff_mode cp;
2583 		cp.handle = cpu_to_le16(conn->handle);
2584 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2585 	}
2586 
2587 timer:
2588 	if (hdev->idle_timeout > 0)
2589 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2590 				   msecs_to_jiffies(hdev->idle_timeout));
2591 }
2592 
2593 /* Drop all connection on the device */
2594 void hci_conn_hash_flush(struct hci_dev *hdev)
2595 {
2596 	struct list_head *head = &hdev->conn_hash.list;
2597 	struct hci_conn *conn;
2598 
2599 	BT_DBG("hdev %s", hdev->name);
2600 
2601 	/* We should not traverse the list here, because hci_conn_del
2602 	 * can remove extra links, which may cause the list traversal
2603 	 * to hit items that have already been released.
2604 	 */
2605 	while ((conn = list_first_entry_or_null(head,
2606 						struct hci_conn,
2607 						list)) != NULL) {
2608 		conn->state = BT_CLOSED;
2609 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2610 		hci_conn_del(conn);
2611 	}
2612 }
2613 
2614 /* Check pending connect attempts */
2615 void hci_conn_check_pending(struct hci_dev *hdev)
2616 {
2617 	struct hci_conn *conn;
2618 
2619 	BT_DBG("hdev %s", hdev->name);
2620 
2621 	hci_dev_lock(hdev);
2622 
2623 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2624 	if (conn)
2625 		hci_acl_create_connection(conn);
2626 
2627 	hci_dev_unlock(hdev);
2628 }
2629 
2630 static u32 get_link_mode(struct hci_conn *conn)
2631 {
2632 	u32 link_mode = 0;
2633 
2634 	if (conn->role == HCI_ROLE_MASTER)
2635 		link_mode |= HCI_LM_MASTER;
2636 
2637 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2638 		link_mode |= HCI_LM_ENCRYPT;
2639 
2640 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2641 		link_mode |= HCI_LM_AUTH;
2642 
2643 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2644 		link_mode |= HCI_LM_SECURE;
2645 
2646 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2647 		link_mode |= HCI_LM_FIPS;
2648 
2649 	return link_mode;
2650 }
2651 
2652 int hci_get_conn_list(void __user *arg)
2653 {
2654 	struct hci_conn *c;
2655 	struct hci_conn_list_req req, *cl;
2656 	struct hci_conn_info *ci;
2657 	struct hci_dev *hdev;
2658 	int n = 0, size, err;
2659 
2660 	if (copy_from_user(&req, arg, sizeof(req)))
2661 		return -EFAULT;
2662 
2663 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2664 		return -EINVAL;
2665 
2666 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2667 
2668 	cl = kmalloc(size, GFP_KERNEL);
2669 	if (!cl)
2670 		return -ENOMEM;
2671 
2672 	hdev = hci_dev_get(req.dev_id);
2673 	if (!hdev) {
2674 		kfree(cl);
2675 		return -ENODEV;
2676 	}
2677 
2678 	ci = cl->conn_info;
2679 
2680 	hci_dev_lock(hdev);
2681 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2682 		bacpy(&(ci + n)->bdaddr, &c->dst);
2683 		(ci + n)->handle = c->handle;
2684 		(ci + n)->type  = c->type;
2685 		(ci + n)->out   = c->out;
2686 		(ci + n)->state = c->state;
2687 		(ci + n)->link_mode = get_link_mode(c);
2688 		if (++n >= req.conn_num)
2689 			break;
2690 	}
2691 	hci_dev_unlock(hdev);
2692 
2693 	cl->dev_id = hdev->id;
2694 	cl->conn_num = n;
2695 	size = sizeof(req) + n * sizeof(*ci);
2696 
2697 	hci_dev_put(hdev);
2698 
2699 	err = copy_to_user(arg, cl, size);
2700 	kfree(cl);
2701 
2702 	return err ? -EFAULT : 0;
2703 }
2704 
2705 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2706 {
2707 	struct hci_conn_info_req req;
2708 	struct hci_conn_info ci;
2709 	struct hci_conn *conn;
2710 	char __user *ptr = arg + sizeof(req);
2711 
2712 	if (copy_from_user(&req, arg, sizeof(req)))
2713 		return -EFAULT;
2714 
2715 	hci_dev_lock(hdev);
2716 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2717 	if (conn) {
2718 		bacpy(&ci.bdaddr, &conn->dst);
2719 		ci.handle = conn->handle;
2720 		ci.type  = conn->type;
2721 		ci.out   = conn->out;
2722 		ci.state = conn->state;
2723 		ci.link_mode = get_link_mode(conn);
2724 	}
2725 	hci_dev_unlock(hdev);
2726 
2727 	if (!conn)
2728 		return -ENOENT;
2729 
2730 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2731 }
2732 
2733 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2734 {
2735 	struct hci_auth_info_req req;
2736 	struct hci_conn *conn;
2737 
2738 	if (copy_from_user(&req, arg, sizeof(req)))
2739 		return -EFAULT;
2740 
2741 	hci_dev_lock(hdev);
2742 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2743 	if (conn)
2744 		req.type = conn->auth_type;
2745 	hci_dev_unlock(hdev);
2746 
2747 	if (!conn)
2748 		return -ENOENT;
2749 
2750 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2751 }
2752 
2753 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2754 {
2755 	struct hci_dev *hdev = conn->hdev;
2756 	struct hci_chan *chan;
2757 
2758 	BT_DBG("%s hcon %p", hdev->name, conn);
2759 
2760 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2761 		BT_DBG("Refusing to create new hci_chan");
2762 		return NULL;
2763 	}
2764 
2765 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2766 	if (!chan)
2767 		return NULL;
2768 
2769 	chan->conn = hci_conn_get(conn);
2770 	skb_queue_head_init(&chan->data_q);
2771 	chan->state = BT_CONNECTED;
2772 
2773 	list_add_rcu(&chan->list, &conn->chan_list);
2774 
2775 	return chan;
2776 }
2777 
2778 void hci_chan_del(struct hci_chan *chan)
2779 {
2780 	struct hci_conn *conn = chan->conn;
2781 	struct hci_dev *hdev = conn->hdev;
2782 
2783 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2784 
2785 	list_del_rcu(&chan->list);
2786 
2787 	synchronize_rcu();
2788 
2789 	/* Prevent new hci_chan's to be created for this hci_conn */
2790 	set_bit(HCI_CONN_DROP, &conn->flags);
2791 
2792 	hci_conn_put(conn);
2793 
2794 	skb_queue_purge(&chan->data_q);
2795 	kfree(chan);
2796 }
2797 
2798 void hci_chan_list_flush(struct hci_conn *conn)
2799 {
2800 	struct hci_chan *chan, *n;
2801 
2802 	BT_DBG("hcon %p", conn);
2803 
2804 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2805 		hci_chan_del(chan);
2806 }
2807 
2808 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2809 						 __u16 handle)
2810 {
2811 	struct hci_chan *hchan;
2812 
2813 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2814 		if (hchan->handle == handle)
2815 			return hchan;
2816 	}
2817 
2818 	return NULL;
2819 }
2820 
2821 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2822 {
2823 	struct hci_conn_hash *h = &hdev->conn_hash;
2824 	struct hci_conn *hcon;
2825 	struct hci_chan *hchan = NULL;
2826 
2827 	rcu_read_lock();
2828 
2829 	list_for_each_entry_rcu(hcon, &h->list, list) {
2830 		hchan = __hci_chan_lookup_handle(hcon, handle);
2831 		if (hchan)
2832 			break;
2833 	}
2834 
2835 	rcu_read_unlock();
2836 
2837 	return hchan;
2838 }
2839 
2840 u32 hci_conn_get_phy(struct hci_conn *conn)
2841 {
2842 	u32 phys = 0;
2843 
2844 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2845 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2846 	 * CPB logical transport types.
2847 	 */
2848 	switch (conn->type) {
2849 	case SCO_LINK:
2850 		/* SCO logical transport (1 Mb/s):
2851 		 * HV1, HV2, HV3 and DV.
2852 		 */
2853 		phys |= BT_PHY_BR_1M_1SLOT;
2854 
2855 		break;
2856 
2857 	case ACL_LINK:
2858 		/* ACL logical transport (1 Mb/s) ptt=0:
2859 		 * DH1, DM3, DH3, DM5 and DH5.
2860 		 */
2861 		phys |= BT_PHY_BR_1M_1SLOT;
2862 
2863 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2864 			phys |= BT_PHY_BR_1M_3SLOT;
2865 
2866 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2867 			phys |= BT_PHY_BR_1M_5SLOT;
2868 
2869 		/* ACL logical transport (2 Mb/s) ptt=1:
2870 		 * 2-DH1, 2-DH3 and 2-DH5.
2871 		 */
2872 		if (!(conn->pkt_type & HCI_2DH1))
2873 			phys |= BT_PHY_EDR_2M_1SLOT;
2874 
2875 		if (!(conn->pkt_type & HCI_2DH3))
2876 			phys |= BT_PHY_EDR_2M_3SLOT;
2877 
2878 		if (!(conn->pkt_type & HCI_2DH5))
2879 			phys |= BT_PHY_EDR_2M_5SLOT;
2880 
2881 		/* ACL logical transport (3 Mb/s) ptt=1:
2882 		 * 3-DH1, 3-DH3 and 3-DH5.
2883 		 */
2884 		if (!(conn->pkt_type & HCI_3DH1))
2885 			phys |= BT_PHY_EDR_3M_1SLOT;
2886 
2887 		if (!(conn->pkt_type & HCI_3DH3))
2888 			phys |= BT_PHY_EDR_3M_3SLOT;
2889 
2890 		if (!(conn->pkt_type & HCI_3DH5))
2891 			phys |= BT_PHY_EDR_3M_5SLOT;
2892 
2893 		break;
2894 
2895 	case ESCO_LINK:
2896 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2897 		phys |= BT_PHY_BR_1M_1SLOT;
2898 
2899 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2900 			phys |= BT_PHY_BR_1M_3SLOT;
2901 
2902 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2903 		if (!(conn->pkt_type & ESCO_2EV3))
2904 			phys |= BT_PHY_EDR_2M_1SLOT;
2905 
2906 		if (!(conn->pkt_type & ESCO_2EV5))
2907 			phys |= BT_PHY_EDR_2M_3SLOT;
2908 
2909 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2910 		if (!(conn->pkt_type & ESCO_3EV3))
2911 			phys |= BT_PHY_EDR_3M_1SLOT;
2912 
2913 		if (!(conn->pkt_type & ESCO_3EV5))
2914 			phys |= BT_PHY_EDR_3M_3SLOT;
2915 
2916 		break;
2917 
2918 	case LE_LINK:
2919 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2920 			phys |= BT_PHY_LE_1M_TX;
2921 
2922 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2923 			phys |= BT_PHY_LE_1M_RX;
2924 
2925 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2926 			phys |= BT_PHY_LE_2M_TX;
2927 
2928 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2929 			phys |= BT_PHY_LE_2M_RX;
2930 
2931 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2932 			phys |= BT_PHY_LE_CODED_TX;
2933 
2934 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2935 			phys |= BT_PHY_LE_CODED_RX;
2936 
2937 		break;
2938 	}
2939 
2940 	return phys;
2941 }
2942 
2943 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2944 {
2945 	struct hci_conn *conn;
2946 	u16 handle = PTR_UINT(data);
2947 
2948 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2949 	if (!conn)
2950 		return 0;
2951 
2952 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2953 }
2954 
2955 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2956 {
2957 	struct hci_dev *hdev = conn->hdev;
2958 
2959 	/* If abort_reason has already been set it means the connection is
2960 	 * already being aborted so don't attempt to overwrite it.
2961 	 */
2962 	if (conn->abort_reason)
2963 		return 0;
2964 
2965 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2966 
2967 	conn->abort_reason = reason;
2968 
2969 	/* If the connection is pending check the command opcode since that
2970 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2971 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2972 	 *
2973 	 * hci_connect_le serializes the connection attempts so only one
2974 	 * connection can be in BT_CONNECT at time.
2975 	 */
2976 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2977 		switch (hci_skb_event(hdev->sent_cmd)) {
2978 		case HCI_EV_LE_CONN_COMPLETE:
2979 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2980 		case HCI_EVT_LE_CIS_ESTABLISHED:
2981 			hci_cmd_sync_cancel(hdev, ECANCELED);
2982 			break;
2983 		}
2984 	}
2985 
2986 	return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
2987 				  NULL);
2988 }
2989