xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision a0912892)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "a2mp.h"
40 #include "eir.h"
41 
42 struct sco_param {
43 	u16 pkt_type;
44 	u16 max_latency;
45 	u8  retrans_effort;
46 };
47 
48 struct conn_handle_t {
49 	struct hci_conn *conn;
50 	__u16 handle;
51 };
52 
53 static const struct sco_param esco_param_cvsd[] = {
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
55 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
56 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
57 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
58 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
59 };
60 
61 static const struct sco_param sco_param_cvsd[] = {
62 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
63 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
64 };
65 
66 static const struct sco_param esco_param_msbc[] = {
67 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
68 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
69 };
70 
71 /* This function requires the caller holds hdev->lock */
72 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
73 {
74 	struct hci_conn_params *params;
75 	struct hci_dev *hdev = conn->hdev;
76 	struct smp_irk *irk;
77 	bdaddr_t *bdaddr;
78 	u8 bdaddr_type;
79 
80 	bdaddr = &conn->dst;
81 	bdaddr_type = conn->dst_type;
82 
83 	/* Check if we need to convert to identity address */
84 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
85 	if (irk) {
86 		bdaddr = &irk->bdaddr;
87 		bdaddr_type = irk->addr_type;
88 	}
89 
90 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
91 					   bdaddr_type);
92 	if (!params)
93 		return;
94 
95 	if (params->conn) {
96 		hci_conn_drop(params->conn);
97 		hci_conn_put(params->conn);
98 		params->conn = NULL;
99 	}
100 
101 	if (!params->explicit_connect)
102 		return;
103 
104 	/* If the status indicates successful cancellation of
105 	 * the attempt (i.e. Unknown Connection Id) there's no point of
106 	 * notifying failure since we'll go back to keep trying to
107 	 * connect. The only exception is explicit connect requests
108 	 * where a timeout + cancel does indicate an actual failure.
109 	 */
110 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
111 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
112 				    conn->dst_type, status);
113 
114 	/* The connection attempt was doing scan for new RPA, and is
115 	 * in scan phase. If params are not associated with any other
116 	 * autoconnect action, remove them completely. If they are, just unmark
117 	 * them as waiting for connection, by clearing explicit_connect field.
118 	 */
119 	params->explicit_connect = false;
120 
121 	hci_pend_le_list_del_init(params);
122 
123 	switch (params->auto_connect) {
124 	case HCI_AUTO_CONN_EXPLICIT:
125 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
126 		/* return instead of break to avoid duplicate scan update */
127 		return;
128 	case HCI_AUTO_CONN_DIRECT:
129 	case HCI_AUTO_CONN_ALWAYS:
130 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
131 		break;
132 	case HCI_AUTO_CONN_REPORT:
133 		hci_pend_le_list_add(params, &hdev->pend_le_reports);
134 		break;
135 	default:
136 		break;
137 	}
138 
139 	hci_update_passive_scan(hdev);
140 }
141 
142 static void hci_conn_cleanup(struct hci_conn *conn)
143 {
144 	struct hci_dev *hdev = conn->hdev;
145 
146 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
147 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
148 
149 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
150 		hci_remove_link_key(hdev, &conn->dst);
151 
152 	hci_chan_list_flush(conn);
153 
154 	hci_conn_hash_del(hdev, conn);
155 
156 	if (conn->cleanup)
157 		conn->cleanup(conn);
158 
159 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160 		switch (conn->setting & SCO_AIRMODE_MASK) {
161 		case SCO_AIRMODE_CVSD:
162 		case SCO_AIRMODE_TRANSP:
163 			if (hdev->notify)
164 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
165 			break;
166 		}
167 	} else {
168 		if (hdev->notify)
169 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
170 	}
171 
172 	hci_conn_del_sysfs(conn);
173 
174 	debugfs_remove_recursive(conn->debugfs);
175 
176 	hci_dev_put(hdev);
177 
178 	hci_conn_put(conn);
179 }
180 
181 static void hci_acl_create_connection(struct hci_conn *conn)
182 {
183 	struct hci_dev *hdev = conn->hdev;
184 	struct inquiry_entry *ie;
185 	struct hci_cp_create_conn cp;
186 
187 	BT_DBG("hcon %p", conn);
188 
189 	/* Many controllers disallow HCI Create Connection while it is doing
190 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191 	 * Connection. This may cause the MGMT discovering state to become false
192 	 * without user space's request but it is okay since the MGMT Discovery
193 	 * APIs do not promise that discovery should be done forever. Instead,
194 	 * the user space monitors the status of MGMT discovering and it may
195 	 * request for discovery again when this flag becomes false.
196 	 */
197 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198 		/* Put this connection to "pending" state so that it will be
199 		 * executed after the inquiry cancel command complete event.
200 		 */
201 		conn->state = BT_CONNECT2;
202 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
203 		return;
204 	}
205 
206 	conn->state = BT_CONNECT;
207 	conn->out = true;
208 	conn->role = HCI_ROLE_MASTER;
209 
210 	conn->attempt++;
211 
212 	conn->link_policy = hdev->link_policy;
213 
214 	memset(&cp, 0, sizeof(cp));
215 	bacpy(&cp.bdaddr, &conn->dst);
216 	cp.pscan_rep_mode = 0x02;
217 
218 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219 	if (ie) {
220 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 			cp.pscan_mode     = ie->data.pscan_mode;
223 			cp.clock_offset   = ie->data.clock_offset |
224 					    cpu_to_le16(0x8000);
225 		}
226 
227 		memcpy(conn->dev_class, ie->data.dev_class, 3);
228 	}
229 
230 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232 		cp.role_switch = 0x01;
233 	else
234 		cp.role_switch = 0x00;
235 
236 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
237 }
238 
239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
240 {
241 	BT_DBG("hcon %p", conn);
242 
243 	/* When we are central of an established connection and it enters
244 	 * the disconnect timeout, then go ahead and try to read the
245 	 * current clock offset.  Processing of the result is done
246 	 * within the event handling and hci_clock_offset_evt function.
247 	 */
248 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250 		struct hci_dev *hdev = conn->hdev;
251 		struct hci_cp_read_clock_offset clkoff_cp;
252 
253 		clkoff_cp.handle = cpu_to_le16(conn->handle);
254 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
255 			     &clkoff_cp);
256 	}
257 
258 	return hci_abort_conn(conn, reason);
259 }
260 
261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
262 {
263 	struct hci_dev *hdev = conn->hdev;
264 	struct hci_cp_add_sco cp;
265 
266 	BT_DBG("hcon %p", conn);
267 
268 	conn->state = BT_CONNECT;
269 	conn->out = true;
270 
271 	conn->attempt++;
272 
273 	cp.handle   = cpu_to_le16(handle);
274 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
275 
276 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
277 }
278 
279 static bool find_next_esco_param(struct hci_conn *conn,
280 				 const struct sco_param *esco_param, int size)
281 {
282 	if (!conn->parent)
283 		return false;
284 
285 	for (; conn->attempt <= size; conn->attempt++) {
286 		if (lmp_esco_2m_capable(conn->parent) ||
287 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
288 			break;
289 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290 		       conn, conn->attempt);
291 	}
292 
293 	return conn->attempt <= size;
294 }
295 
296 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
297 {
298 	int err;
299 	__u8 vnd_len, *vnd_data = NULL;
300 	struct hci_op_configure_data_path *cmd = NULL;
301 
302 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
303 					  &vnd_data);
304 	if (err < 0)
305 		goto error;
306 
307 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
308 	if (!cmd) {
309 		err = -ENOMEM;
310 		goto error;
311 	}
312 
313 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
314 	if (err < 0)
315 		goto error;
316 
317 	cmd->vnd_len = vnd_len;
318 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
319 
320 	cmd->direction = 0x00;
321 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
322 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
323 
324 	cmd->direction = 0x01;
325 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
326 				    sizeof(*cmd) + vnd_len, cmd,
327 				    HCI_CMD_TIMEOUT);
328 error:
329 
330 	kfree(cmd);
331 	kfree(vnd_data);
332 	return err;
333 }
334 
335 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
336 {
337 	struct conn_handle_t *conn_handle = data;
338 	struct hci_conn *conn = conn_handle->conn;
339 	__u16 handle = conn_handle->handle;
340 	struct hci_cp_enhanced_setup_sync_conn cp;
341 	const struct sco_param *param;
342 
343 	kfree(conn_handle);
344 
345 	bt_dev_dbg(hdev, "hcon %p", conn);
346 
347 	/* for offload use case, codec needs to configured before opening SCO */
348 	if (conn->codec.data_path)
349 		configure_datapath_sync(hdev, &conn->codec);
350 
351 	conn->state = BT_CONNECT;
352 	conn->out = true;
353 
354 	conn->attempt++;
355 
356 	memset(&cp, 0x00, sizeof(cp));
357 
358 	cp.handle   = cpu_to_le16(handle);
359 
360 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
361 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
362 
363 	switch (conn->codec.id) {
364 	case BT_CODEC_MSBC:
365 		if (!find_next_esco_param(conn, esco_param_msbc,
366 					  ARRAY_SIZE(esco_param_msbc)))
367 			return -EINVAL;
368 
369 		param = &esco_param_msbc[conn->attempt - 1];
370 		cp.tx_coding_format.id = 0x05;
371 		cp.rx_coding_format.id = 0x05;
372 		cp.tx_codec_frame_size = __cpu_to_le16(60);
373 		cp.rx_codec_frame_size = __cpu_to_le16(60);
374 		cp.in_bandwidth = __cpu_to_le32(32000);
375 		cp.out_bandwidth = __cpu_to_le32(32000);
376 		cp.in_coding_format.id = 0x04;
377 		cp.out_coding_format.id = 0x04;
378 		cp.in_coded_data_size = __cpu_to_le16(16);
379 		cp.out_coded_data_size = __cpu_to_le16(16);
380 		cp.in_pcm_data_format = 2;
381 		cp.out_pcm_data_format = 2;
382 		cp.in_pcm_sample_payload_msb_pos = 0;
383 		cp.out_pcm_sample_payload_msb_pos = 0;
384 		cp.in_data_path = conn->codec.data_path;
385 		cp.out_data_path = conn->codec.data_path;
386 		cp.in_transport_unit_size = 1;
387 		cp.out_transport_unit_size = 1;
388 		break;
389 
390 	case BT_CODEC_TRANSPARENT:
391 		if (!find_next_esco_param(conn, esco_param_msbc,
392 					  ARRAY_SIZE(esco_param_msbc)))
393 			return false;
394 		param = &esco_param_msbc[conn->attempt - 1];
395 		cp.tx_coding_format.id = 0x03;
396 		cp.rx_coding_format.id = 0x03;
397 		cp.tx_codec_frame_size = __cpu_to_le16(60);
398 		cp.rx_codec_frame_size = __cpu_to_le16(60);
399 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
400 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
401 		cp.in_coding_format.id = 0x03;
402 		cp.out_coding_format.id = 0x03;
403 		cp.in_coded_data_size = __cpu_to_le16(16);
404 		cp.out_coded_data_size = __cpu_to_le16(16);
405 		cp.in_pcm_data_format = 2;
406 		cp.out_pcm_data_format = 2;
407 		cp.in_pcm_sample_payload_msb_pos = 0;
408 		cp.out_pcm_sample_payload_msb_pos = 0;
409 		cp.in_data_path = conn->codec.data_path;
410 		cp.out_data_path = conn->codec.data_path;
411 		cp.in_transport_unit_size = 1;
412 		cp.out_transport_unit_size = 1;
413 		break;
414 
415 	case BT_CODEC_CVSD:
416 		if (conn->parent && lmp_esco_capable(conn->parent)) {
417 			if (!find_next_esco_param(conn, esco_param_cvsd,
418 						  ARRAY_SIZE(esco_param_cvsd)))
419 				return -EINVAL;
420 			param = &esco_param_cvsd[conn->attempt - 1];
421 		} else {
422 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
423 				return -EINVAL;
424 			param = &sco_param_cvsd[conn->attempt - 1];
425 		}
426 		cp.tx_coding_format.id = 2;
427 		cp.rx_coding_format.id = 2;
428 		cp.tx_codec_frame_size = __cpu_to_le16(60);
429 		cp.rx_codec_frame_size = __cpu_to_le16(60);
430 		cp.in_bandwidth = __cpu_to_le32(16000);
431 		cp.out_bandwidth = __cpu_to_le32(16000);
432 		cp.in_coding_format.id = 4;
433 		cp.out_coding_format.id = 4;
434 		cp.in_coded_data_size = __cpu_to_le16(16);
435 		cp.out_coded_data_size = __cpu_to_le16(16);
436 		cp.in_pcm_data_format = 2;
437 		cp.out_pcm_data_format = 2;
438 		cp.in_pcm_sample_payload_msb_pos = 0;
439 		cp.out_pcm_sample_payload_msb_pos = 0;
440 		cp.in_data_path = conn->codec.data_path;
441 		cp.out_data_path = conn->codec.data_path;
442 		cp.in_transport_unit_size = 16;
443 		cp.out_transport_unit_size = 16;
444 		break;
445 	default:
446 		return -EINVAL;
447 	}
448 
449 	cp.retrans_effort = param->retrans_effort;
450 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
451 	cp.max_latency = __cpu_to_le16(param->max_latency);
452 
453 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
454 		return -EIO;
455 
456 	return 0;
457 }
458 
459 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
460 {
461 	struct hci_dev *hdev = conn->hdev;
462 	struct hci_cp_setup_sync_conn cp;
463 	const struct sco_param *param;
464 
465 	bt_dev_dbg(hdev, "hcon %p", conn);
466 
467 	conn->state = BT_CONNECT;
468 	conn->out = true;
469 
470 	conn->attempt++;
471 
472 	cp.handle   = cpu_to_le16(handle);
473 
474 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
475 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
476 	cp.voice_setting  = cpu_to_le16(conn->setting);
477 
478 	switch (conn->setting & SCO_AIRMODE_MASK) {
479 	case SCO_AIRMODE_TRANSP:
480 		if (!find_next_esco_param(conn, esco_param_msbc,
481 					  ARRAY_SIZE(esco_param_msbc)))
482 			return false;
483 		param = &esco_param_msbc[conn->attempt - 1];
484 		break;
485 	case SCO_AIRMODE_CVSD:
486 		if (conn->parent && lmp_esco_capable(conn->parent)) {
487 			if (!find_next_esco_param(conn, esco_param_cvsd,
488 						  ARRAY_SIZE(esco_param_cvsd)))
489 				return false;
490 			param = &esco_param_cvsd[conn->attempt - 1];
491 		} else {
492 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
493 				return false;
494 			param = &sco_param_cvsd[conn->attempt - 1];
495 		}
496 		break;
497 	default:
498 		return false;
499 	}
500 
501 	cp.retrans_effort = param->retrans_effort;
502 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
503 	cp.max_latency = __cpu_to_le16(param->max_latency);
504 
505 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
506 		return false;
507 
508 	return true;
509 }
510 
511 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
512 {
513 	int result;
514 	struct conn_handle_t *conn_handle;
515 
516 	if (enhanced_sync_conn_capable(conn->hdev)) {
517 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
518 
519 		if (!conn_handle)
520 			return false;
521 
522 		conn_handle->conn = conn;
523 		conn_handle->handle = handle;
524 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
525 					    conn_handle, NULL);
526 		if (result < 0)
527 			kfree(conn_handle);
528 
529 		return result == 0;
530 	}
531 
532 	return hci_setup_sync_conn(conn, handle);
533 }
534 
535 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
536 		      u16 to_multiplier)
537 {
538 	struct hci_dev *hdev = conn->hdev;
539 	struct hci_conn_params *params;
540 	struct hci_cp_le_conn_update cp;
541 
542 	hci_dev_lock(hdev);
543 
544 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
545 	if (params) {
546 		params->conn_min_interval = min;
547 		params->conn_max_interval = max;
548 		params->conn_latency = latency;
549 		params->supervision_timeout = to_multiplier;
550 	}
551 
552 	hci_dev_unlock(hdev);
553 
554 	memset(&cp, 0, sizeof(cp));
555 	cp.handle		= cpu_to_le16(conn->handle);
556 	cp.conn_interval_min	= cpu_to_le16(min);
557 	cp.conn_interval_max	= cpu_to_le16(max);
558 	cp.conn_latency		= cpu_to_le16(latency);
559 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
560 	cp.min_ce_len		= cpu_to_le16(0x0000);
561 	cp.max_ce_len		= cpu_to_le16(0x0000);
562 
563 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
564 
565 	if (params)
566 		return 0x01;
567 
568 	return 0x00;
569 }
570 
571 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
572 		      __u8 ltk[16], __u8 key_size)
573 {
574 	struct hci_dev *hdev = conn->hdev;
575 	struct hci_cp_le_start_enc cp;
576 
577 	BT_DBG("hcon %p", conn);
578 
579 	memset(&cp, 0, sizeof(cp));
580 
581 	cp.handle = cpu_to_le16(conn->handle);
582 	cp.rand = rand;
583 	cp.ediv = ediv;
584 	memcpy(cp.ltk, ltk, key_size);
585 
586 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
587 }
588 
589 /* Device _must_ be locked */
590 void hci_sco_setup(struct hci_conn *conn, __u8 status)
591 {
592 	struct hci_link *link;
593 
594 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
595 	if (!link || !link->conn)
596 		return;
597 
598 	BT_DBG("hcon %p", conn);
599 
600 	if (!status) {
601 		if (lmp_esco_capable(conn->hdev))
602 			hci_setup_sync(link->conn, conn->handle);
603 		else
604 			hci_add_sco(link->conn, conn->handle);
605 	} else {
606 		hci_connect_cfm(link->conn, status);
607 		hci_conn_del(link->conn);
608 	}
609 }
610 
611 static void hci_conn_timeout(struct work_struct *work)
612 {
613 	struct hci_conn *conn = container_of(work, struct hci_conn,
614 					     disc_work.work);
615 	int refcnt = atomic_read(&conn->refcnt);
616 
617 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
618 
619 	WARN_ON(refcnt < 0);
620 
621 	/* FIXME: It was observed that in pairing failed scenario, refcnt
622 	 * drops below 0. Probably this is because l2cap_conn_del calls
623 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
624 	 * dropped. After that loop hci_chan_del is called which also drops
625 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
626 	 * otherwise drop it.
627 	 */
628 	if (refcnt > 0)
629 		return;
630 
631 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
632 }
633 
634 /* Enter sniff mode */
635 static void hci_conn_idle(struct work_struct *work)
636 {
637 	struct hci_conn *conn = container_of(work, struct hci_conn,
638 					     idle_work.work);
639 	struct hci_dev *hdev = conn->hdev;
640 
641 	BT_DBG("hcon %p mode %d", conn, conn->mode);
642 
643 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
644 		return;
645 
646 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
647 		return;
648 
649 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
650 		struct hci_cp_sniff_subrate cp;
651 		cp.handle             = cpu_to_le16(conn->handle);
652 		cp.max_latency        = cpu_to_le16(0);
653 		cp.min_remote_timeout = cpu_to_le16(0);
654 		cp.min_local_timeout  = cpu_to_le16(0);
655 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
656 	}
657 
658 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
659 		struct hci_cp_sniff_mode cp;
660 		cp.handle       = cpu_to_le16(conn->handle);
661 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
662 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
663 		cp.attempt      = cpu_to_le16(4);
664 		cp.timeout      = cpu_to_le16(1);
665 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
666 	}
667 }
668 
669 static void hci_conn_auto_accept(struct work_struct *work)
670 {
671 	struct hci_conn *conn = container_of(work, struct hci_conn,
672 					     auto_accept_work.work);
673 
674 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
675 		     &conn->dst);
676 }
677 
678 static void le_disable_advertising(struct hci_dev *hdev)
679 {
680 	if (ext_adv_capable(hdev)) {
681 		struct hci_cp_le_set_ext_adv_enable cp;
682 
683 		cp.enable = 0x00;
684 		cp.num_of_sets = 0x00;
685 
686 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
687 			     &cp);
688 	} else {
689 		u8 enable = 0x00;
690 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
691 			     &enable);
692 	}
693 }
694 
695 static void le_conn_timeout(struct work_struct *work)
696 {
697 	struct hci_conn *conn = container_of(work, struct hci_conn,
698 					     le_conn_timeout.work);
699 	struct hci_dev *hdev = conn->hdev;
700 
701 	BT_DBG("");
702 
703 	/* We could end up here due to having done directed advertising,
704 	 * so clean up the state if necessary. This should however only
705 	 * happen with broken hardware or if low duty cycle was used
706 	 * (which doesn't have a timeout of its own).
707 	 */
708 	if (conn->role == HCI_ROLE_SLAVE) {
709 		/* Disable LE Advertising */
710 		le_disable_advertising(hdev);
711 		hci_dev_lock(hdev);
712 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
713 		hci_dev_unlock(hdev);
714 		return;
715 	}
716 
717 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
718 }
719 
720 struct iso_cig_params {
721 	struct hci_cp_le_set_cig_params cp;
722 	struct hci_cis_params cis[0x1f];
723 };
724 
725 struct iso_list_data {
726 	union {
727 		u8  cig;
728 		u8  big;
729 	};
730 	union {
731 		u8  cis;
732 		u8  bis;
733 		u16 sync_handle;
734 	};
735 	int count;
736 	bool big_term;
737 	bool big_sync_term;
738 };
739 
740 static void bis_list(struct hci_conn *conn, void *data)
741 {
742 	struct iso_list_data *d = data;
743 
744 	/* Skip if not broadcast/ANY address */
745 	if (bacmp(&conn->dst, BDADDR_ANY))
746 		return;
747 
748 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
749 	    d->bis != conn->iso_qos.bcast.bis)
750 		return;
751 
752 	d->count++;
753 }
754 
755 static int terminate_big_sync(struct hci_dev *hdev, void *data)
756 {
757 	struct iso_list_data *d = data;
758 
759 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
760 
761 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
762 
763 	/* Only terminate BIG if it has been created */
764 	if (!d->big_term)
765 		return 0;
766 
767 	return hci_le_terminate_big_sync(hdev, d->big,
768 					 HCI_ERROR_LOCAL_HOST_TERM);
769 }
770 
771 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
772 {
773 	kfree(data);
774 }
775 
776 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
777 {
778 	struct iso_list_data *d;
779 	int ret;
780 
781 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
782 		   conn->iso_qos.bcast.bis);
783 
784 	d = kzalloc(sizeof(*d), GFP_KERNEL);
785 	if (!d)
786 		return -ENOMEM;
787 
788 	d->big = conn->iso_qos.bcast.big;
789 	d->bis = conn->iso_qos.bcast.bis;
790 	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
791 
792 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
793 				 terminate_big_destroy);
794 	if (ret)
795 		kfree(d);
796 
797 	return ret;
798 }
799 
800 static int big_terminate_sync(struct hci_dev *hdev, void *data)
801 {
802 	struct iso_list_data *d = data;
803 
804 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
805 		   d->sync_handle);
806 
807 	if (d->big_sync_term)
808 		hci_le_big_terminate_sync(hdev, d->big);
809 
810 	return hci_le_pa_terminate_sync(hdev, d->sync_handle);
811 }
812 
813 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
814 {
815 	struct iso_list_data *d;
816 	int ret;
817 
818 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
819 
820 	d = kzalloc(sizeof(*d), GFP_KERNEL);
821 	if (!d)
822 		return -ENOMEM;
823 
824 	d->big = big;
825 	d->sync_handle = conn->sync_handle;
826 	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
827 
828 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
829 				 terminate_big_destroy);
830 	if (ret)
831 		kfree(d);
832 
833 	return ret;
834 }
835 
836 /* Cleanup BIS connection
837  *
838  * Detects if there any BIS left connected in a BIG
839  * broadcaster: Remove advertising instance and terminate BIG.
840  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
841  */
842 static void bis_cleanup(struct hci_conn *conn)
843 {
844 	struct hci_dev *hdev = conn->hdev;
845 	struct hci_conn *bis;
846 
847 	bt_dev_dbg(hdev, "conn %p", conn);
848 
849 	if (conn->role == HCI_ROLE_MASTER) {
850 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
851 			return;
852 
853 		/* Check if ISO connection is a BIS and terminate advertising
854 		 * set and BIG if there are no other connections using it.
855 		 */
856 		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
857 		if (bis)
858 			return;
859 
860 		hci_le_terminate_big(hdev, conn);
861 	} else {
862 		bis = hci_conn_hash_lookup_big_any_dst(hdev,
863 						       conn->iso_qos.bcast.big);
864 
865 		if (bis)
866 			return;
867 
868 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
869 				     conn);
870 	}
871 }
872 
873 static int remove_cig_sync(struct hci_dev *hdev, void *data)
874 {
875 	u8 handle = PTR_ERR(data);
876 
877 	return hci_le_remove_cig_sync(hdev, handle);
878 }
879 
880 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
881 {
882 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
883 
884 	return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
885 }
886 
887 static void find_cis(struct hci_conn *conn, void *data)
888 {
889 	struct iso_list_data *d = data;
890 
891 	/* Ignore broadcast or if CIG don't match */
892 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
893 		return;
894 
895 	d->count++;
896 }
897 
898 /* Cleanup CIS connection:
899  *
900  * Detects if there any CIS left connected in a CIG and remove it.
901  */
902 static void cis_cleanup(struct hci_conn *conn)
903 {
904 	struct hci_dev *hdev = conn->hdev;
905 	struct iso_list_data d;
906 
907 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
908 		return;
909 
910 	memset(&d, 0, sizeof(d));
911 	d.cig = conn->iso_qos.ucast.cig;
912 
913 	/* Check if ISO connection is a CIS and remove CIG if there are
914 	 * no other connections using it.
915 	 */
916 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
917 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
918 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
919 	if (d.count)
920 		return;
921 
922 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
923 }
924 
925 static u16 hci_conn_hash_alloc_unset(struct hci_dev *hdev)
926 {
927 	struct hci_conn_hash *h = &hdev->conn_hash;
928 	struct hci_conn  *c;
929 	u16 handle = HCI_CONN_HANDLE_MAX + 1;
930 
931 	rcu_read_lock();
932 
933 	list_for_each_entry_rcu(c, &h->list, list) {
934 		/* Find the first unused handle */
935 		if (handle == 0xffff || c->handle != handle)
936 			break;
937 		handle++;
938 	}
939 	rcu_read_unlock();
940 
941 	return handle;
942 }
943 
944 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
945 			      u8 role)
946 {
947 	struct hci_conn *conn;
948 
949 	BT_DBG("%s dst %pMR", hdev->name, dst);
950 
951 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
952 	if (!conn)
953 		return NULL;
954 
955 	bacpy(&conn->dst, dst);
956 	bacpy(&conn->src, &hdev->bdaddr);
957 	conn->handle = hci_conn_hash_alloc_unset(hdev);
958 	conn->hdev  = hdev;
959 	conn->type  = type;
960 	conn->role  = role;
961 	conn->mode  = HCI_CM_ACTIVE;
962 	conn->state = BT_OPEN;
963 	conn->auth_type = HCI_AT_GENERAL_BONDING;
964 	conn->io_capability = hdev->io_capability;
965 	conn->remote_auth = 0xff;
966 	conn->key_type = 0xff;
967 	conn->rssi = HCI_RSSI_INVALID;
968 	conn->tx_power = HCI_TX_POWER_INVALID;
969 	conn->max_tx_power = HCI_TX_POWER_INVALID;
970 
971 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
972 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
973 
974 	/* Set Default Authenticated payload timeout to 30s */
975 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
976 
977 	if (conn->role == HCI_ROLE_MASTER)
978 		conn->out = true;
979 
980 	switch (type) {
981 	case ACL_LINK:
982 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
983 		break;
984 	case LE_LINK:
985 		/* conn->src should reflect the local identity address */
986 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
987 		break;
988 	case ISO_LINK:
989 		/* conn->src should reflect the local identity address */
990 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
991 
992 		/* set proper cleanup function */
993 		if (!bacmp(dst, BDADDR_ANY))
994 			conn->cleanup = bis_cleanup;
995 		else if (conn->role == HCI_ROLE_MASTER)
996 			conn->cleanup = cis_cleanup;
997 
998 		break;
999 	case SCO_LINK:
1000 		if (lmp_esco_capable(hdev))
1001 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1002 					(hdev->esco_type & EDR_ESCO_MASK);
1003 		else
1004 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1005 		break;
1006 	case ESCO_LINK:
1007 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1008 		break;
1009 	}
1010 
1011 	skb_queue_head_init(&conn->data_q);
1012 
1013 	INIT_LIST_HEAD(&conn->chan_list);
1014 	INIT_LIST_HEAD(&conn->link_list);
1015 
1016 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1017 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1018 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1019 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1020 
1021 	atomic_set(&conn->refcnt, 0);
1022 
1023 	hci_dev_hold(hdev);
1024 
1025 	hci_conn_hash_add(hdev, conn);
1026 
1027 	/* The SCO and eSCO connections will only be notified when their
1028 	 * setup has been completed. This is different to ACL links which
1029 	 * can be notified right away.
1030 	 */
1031 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1032 		if (hdev->notify)
1033 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1034 	}
1035 
1036 	hci_conn_init_sysfs(conn);
1037 
1038 	return conn;
1039 }
1040 
1041 static void hci_conn_unlink(struct hci_conn *conn)
1042 {
1043 	struct hci_dev *hdev = conn->hdev;
1044 
1045 	bt_dev_dbg(hdev, "hcon %p", conn);
1046 
1047 	if (!conn->parent) {
1048 		struct hci_link *link, *t;
1049 
1050 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1051 			struct hci_conn *child = link->conn;
1052 
1053 			hci_conn_unlink(child);
1054 
1055 			/* If hdev is down it means
1056 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1057 			 * and links don't need to be cleanup as all connections
1058 			 * would be cleanup.
1059 			 */
1060 			if (!test_bit(HCI_UP, &hdev->flags))
1061 				continue;
1062 
1063 			/* Due to race, SCO connection might be not established
1064 			 * yet at this point. Delete it now, otherwise it is
1065 			 * possible for it to be stuck and can't be deleted.
1066 			 */
1067 			if ((child->type == SCO_LINK ||
1068 			     child->type == ESCO_LINK) &&
1069 			    HCI_CONN_HANDLE_UNSET(child->handle))
1070 				hci_conn_del(child);
1071 		}
1072 
1073 		return;
1074 	}
1075 
1076 	if (!conn->link)
1077 		return;
1078 
1079 	list_del_rcu(&conn->link->list);
1080 	synchronize_rcu();
1081 
1082 	hci_conn_drop(conn->parent);
1083 	hci_conn_put(conn->parent);
1084 	conn->parent = NULL;
1085 
1086 	kfree(conn->link);
1087 	conn->link = NULL;
1088 }
1089 
1090 void hci_conn_del(struct hci_conn *conn)
1091 {
1092 	struct hci_dev *hdev = conn->hdev;
1093 
1094 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1095 
1096 	hci_conn_unlink(conn);
1097 
1098 	cancel_delayed_work_sync(&conn->disc_work);
1099 	cancel_delayed_work_sync(&conn->auto_accept_work);
1100 	cancel_delayed_work_sync(&conn->idle_work);
1101 
1102 	if (conn->type == ACL_LINK) {
1103 		/* Unacked frames */
1104 		hdev->acl_cnt += conn->sent;
1105 	} else if (conn->type == LE_LINK) {
1106 		cancel_delayed_work(&conn->le_conn_timeout);
1107 
1108 		if (hdev->le_pkts)
1109 			hdev->le_cnt += conn->sent;
1110 		else
1111 			hdev->acl_cnt += conn->sent;
1112 	} else {
1113 		/* Unacked ISO frames */
1114 		if (conn->type == ISO_LINK) {
1115 			if (hdev->iso_pkts)
1116 				hdev->iso_cnt += conn->sent;
1117 			else if (hdev->le_pkts)
1118 				hdev->le_cnt += conn->sent;
1119 			else
1120 				hdev->acl_cnt += conn->sent;
1121 		}
1122 	}
1123 
1124 	if (conn->amp_mgr)
1125 		amp_mgr_put(conn->amp_mgr);
1126 
1127 	skb_queue_purge(&conn->data_q);
1128 
1129 	/* Remove the connection from the list and cleanup its remaining
1130 	 * state. This is a separate function since for some cases like
1131 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1132 	 * rest of hci_conn_del.
1133 	 */
1134 	hci_conn_cleanup(conn);
1135 }
1136 
1137 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1138 {
1139 	int use_src = bacmp(src, BDADDR_ANY);
1140 	struct hci_dev *hdev = NULL, *d;
1141 
1142 	BT_DBG("%pMR -> %pMR", src, dst);
1143 
1144 	read_lock(&hci_dev_list_lock);
1145 
1146 	list_for_each_entry(d, &hci_dev_list, list) {
1147 		if (!test_bit(HCI_UP, &d->flags) ||
1148 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1149 		    d->dev_type != HCI_PRIMARY)
1150 			continue;
1151 
1152 		/* Simple routing:
1153 		 *   No source address - find interface with bdaddr != dst
1154 		 *   Source address    - find interface with bdaddr == src
1155 		 */
1156 
1157 		if (use_src) {
1158 			bdaddr_t id_addr;
1159 			u8 id_addr_type;
1160 
1161 			if (src_type == BDADDR_BREDR) {
1162 				if (!lmp_bredr_capable(d))
1163 					continue;
1164 				bacpy(&id_addr, &d->bdaddr);
1165 				id_addr_type = BDADDR_BREDR;
1166 			} else {
1167 				if (!lmp_le_capable(d))
1168 					continue;
1169 
1170 				hci_copy_identity_address(d, &id_addr,
1171 							  &id_addr_type);
1172 
1173 				/* Convert from HCI to three-value type */
1174 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1175 					id_addr_type = BDADDR_LE_PUBLIC;
1176 				else
1177 					id_addr_type = BDADDR_LE_RANDOM;
1178 			}
1179 
1180 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1181 				hdev = d; break;
1182 			}
1183 		} else {
1184 			if (bacmp(&d->bdaddr, dst)) {
1185 				hdev = d; break;
1186 			}
1187 		}
1188 	}
1189 
1190 	if (hdev)
1191 		hdev = hci_dev_hold(hdev);
1192 
1193 	read_unlock(&hci_dev_list_lock);
1194 	return hdev;
1195 }
1196 EXPORT_SYMBOL(hci_get_route);
1197 
1198 /* This function requires the caller holds hdev->lock */
1199 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1200 {
1201 	struct hci_dev *hdev = conn->hdev;
1202 
1203 	hci_connect_le_scan_cleanup(conn, status);
1204 
1205 	/* Enable advertising in case this was a failed connection
1206 	 * attempt as a peripheral.
1207 	 */
1208 	hci_enable_advertising(hdev);
1209 }
1210 
1211 /* This function requires the caller holds hdev->lock */
1212 void hci_conn_failed(struct hci_conn *conn, u8 status)
1213 {
1214 	struct hci_dev *hdev = conn->hdev;
1215 
1216 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1217 
1218 	switch (conn->type) {
1219 	case LE_LINK:
1220 		hci_le_conn_failed(conn, status);
1221 		break;
1222 	case ACL_LINK:
1223 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1224 				    conn->dst_type, status);
1225 		break;
1226 	}
1227 
1228 	conn->state = BT_CLOSED;
1229 	hci_connect_cfm(conn, status);
1230 	hci_conn_del(conn);
1231 }
1232 
1233 /* This function requires the caller holds hdev->lock */
1234 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1235 {
1236 	struct hci_dev *hdev = conn->hdev;
1237 
1238 	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1239 
1240 	if (conn->handle == handle)
1241 		return 0;
1242 
1243 	if (handle > HCI_CONN_HANDLE_MAX) {
1244 		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1245 			   handle, HCI_CONN_HANDLE_MAX);
1246 		return HCI_ERROR_INVALID_PARAMETERS;
1247 	}
1248 
1249 	/* If abort_reason has been sent it means the connection is being
1250 	 * aborted and the handle shall not be changed.
1251 	 */
1252 	if (conn->abort_reason)
1253 		return conn->abort_reason;
1254 
1255 	conn->handle = handle;
1256 
1257 	return 0;
1258 }
1259 
1260 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1261 {
1262 	struct hci_conn *conn;
1263 	u16 handle = PTR_ERR(data);
1264 
1265 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1266 	if (!conn)
1267 		return;
1268 
1269 	bt_dev_dbg(hdev, "err %d", err);
1270 
1271 	hci_dev_lock(hdev);
1272 
1273 	if (!err) {
1274 		hci_connect_le_scan_cleanup(conn, 0x00);
1275 		goto done;
1276 	}
1277 
1278 	/* Check if connection is still pending */
1279 	if (conn != hci_lookup_le_connect(hdev))
1280 		goto done;
1281 
1282 	/* Flush to make sure we send create conn cancel command if needed */
1283 	flush_delayed_work(&conn->le_conn_timeout);
1284 	hci_conn_failed(conn, bt_status(err));
1285 
1286 done:
1287 	hci_dev_unlock(hdev);
1288 }
1289 
1290 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1291 {
1292 	struct hci_conn *conn;
1293 	u16 handle = PTR_ERR(data);
1294 
1295 	conn = hci_conn_hash_lookup_handle(hdev, handle);
1296 	if (!conn)
1297 		return 0;
1298 
1299 	bt_dev_dbg(hdev, "conn %p", conn);
1300 
1301 	conn->state = BT_CONNECT;
1302 
1303 	return hci_le_create_conn_sync(hdev, conn);
1304 }
1305 
1306 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1307 				u8 dst_type, bool dst_resolved, u8 sec_level,
1308 				u16 conn_timeout, u8 role)
1309 {
1310 	struct hci_conn *conn;
1311 	struct smp_irk *irk;
1312 	int err;
1313 
1314 	/* Let's make sure that le is enabled.*/
1315 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1316 		if (lmp_le_capable(hdev))
1317 			return ERR_PTR(-ECONNREFUSED);
1318 
1319 		return ERR_PTR(-EOPNOTSUPP);
1320 	}
1321 
1322 	/* Since the controller supports only one LE connection attempt at a
1323 	 * time, we return -EBUSY if there is any connection attempt running.
1324 	 */
1325 	if (hci_lookup_le_connect(hdev))
1326 		return ERR_PTR(-EBUSY);
1327 
1328 	/* If there's already a connection object but it's not in
1329 	 * scanning state it means it must already be established, in
1330 	 * which case we can't do anything else except report a failure
1331 	 * to connect.
1332 	 */
1333 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1334 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1335 		return ERR_PTR(-EBUSY);
1336 	}
1337 
1338 	/* Check if the destination address has been resolved by the controller
1339 	 * since if it did then the identity address shall be used.
1340 	 */
1341 	if (!dst_resolved) {
1342 		/* When given an identity address with existing identity
1343 		 * resolving key, the connection needs to be established
1344 		 * to a resolvable random address.
1345 		 *
1346 		 * Storing the resolvable random address is required here
1347 		 * to handle connection failures. The address will later
1348 		 * be resolved back into the original identity address
1349 		 * from the connect request.
1350 		 */
1351 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1352 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1353 			dst = &irk->rpa;
1354 			dst_type = ADDR_LE_DEV_RANDOM;
1355 		}
1356 	}
1357 
1358 	if (conn) {
1359 		bacpy(&conn->dst, dst);
1360 	} else {
1361 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1362 		if (!conn)
1363 			return ERR_PTR(-ENOMEM);
1364 		hci_conn_hold(conn);
1365 		conn->pending_sec_level = sec_level;
1366 	}
1367 
1368 	conn->dst_type = dst_type;
1369 	conn->sec_level = BT_SECURITY_LOW;
1370 	conn->conn_timeout = conn_timeout;
1371 
1372 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1373 
1374 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1375 				 ERR_PTR(conn->handle),
1376 				 create_le_conn_complete);
1377 	if (err) {
1378 		hci_conn_del(conn);
1379 		return ERR_PTR(err);
1380 	}
1381 
1382 	return conn;
1383 }
1384 
1385 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1386 {
1387 	struct hci_conn *conn;
1388 
1389 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1390 	if (!conn)
1391 		return false;
1392 
1393 	if (conn->state != BT_CONNECTED)
1394 		return false;
1395 
1396 	return true;
1397 }
1398 
1399 /* This function requires the caller holds hdev->lock */
1400 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1401 					bdaddr_t *addr, u8 addr_type)
1402 {
1403 	struct hci_conn_params *params;
1404 
1405 	if (is_connected(hdev, addr, addr_type))
1406 		return -EISCONN;
1407 
1408 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1409 	if (!params) {
1410 		params = hci_conn_params_add(hdev, addr, addr_type);
1411 		if (!params)
1412 			return -ENOMEM;
1413 
1414 		/* If we created new params, mark them to be deleted in
1415 		 * hci_connect_le_scan_cleanup. It's different case than
1416 		 * existing disabled params, those will stay after cleanup.
1417 		 */
1418 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1419 	}
1420 
1421 	/* We're trying to connect, so make sure params are at pend_le_conns */
1422 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1423 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1424 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1425 		hci_pend_le_list_del_init(params);
1426 		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1427 	}
1428 
1429 	params->explicit_connect = true;
1430 
1431 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1432 	       params->auto_connect);
1433 
1434 	return 0;
1435 }
1436 
1437 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1438 {
1439 	struct hci_conn *conn;
1440 	u8  big;
1441 
1442 	/* Allocate a BIG if not set */
1443 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1444 		for (big = 0x00; big < 0xef; big++) {
1445 
1446 			conn = hci_conn_hash_lookup_big(hdev, big);
1447 			if (!conn)
1448 				break;
1449 		}
1450 
1451 		if (big == 0xef)
1452 			return -EADDRNOTAVAIL;
1453 
1454 		/* Update BIG */
1455 		qos->bcast.big = big;
1456 	}
1457 
1458 	return 0;
1459 }
1460 
1461 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1462 {
1463 	struct hci_conn *conn;
1464 	u8  bis;
1465 
1466 	/* Allocate BIS if not set */
1467 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1468 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1469 		 * since it is reserved as general purpose set.
1470 		 */
1471 		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1472 		     bis++) {
1473 
1474 			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1475 			if (!conn)
1476 				break;
1477 		}
1478 
1479 		if (bis == hdev->le_num_of_adv_sets)
1480 			return -EADDRNOTAVAIL;
1481 
1482 		/* Update BIS */
1483 		qos->bcast.bis = bis;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 /* This function requires the caller holds hdev->lock */
1490 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1491 				    struct bt_iso_qos *qos, __u8 base_len,
1492 				    __u8 *base)
1493 {
1494 	struct hci_conn *conn;
1495 	int err;
1496 
1497 	/* Let's make sure that le is enabled.*/
1498 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1499 		if (lmp_le_capable(hdev))
1500 			return ERR_PTR(-ECONNREFUSED);
1501 		return ERR_PTR(-EOPNOTSUPP);
1502 	}
1503 
1504 	err = qos_set_big(hdev, qos);
1505 	if (err)
1506 		return ERR_PTR(err);
1507 
1508 	err = qos_set_bis(hdev, qos);
1509 	if (err)
1510 		return ERR_PTR(err);
1511 
1512 	/* Check if the LE Create BIG command has already been sent */
1513 	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1514 						qos->bcast.big);
1515 	if (conn)
1516 		return ERR_PTR(-EADDRINUSE);
1517 
1518 	/* Check BIS settings against other bound BISes, since all
1519 	 * BISes in a BIG must have the same value for all parameters
1520 	 */
1521 	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1522 
1523 	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1524 		     base_len != conn->le_per_adv_data_len ||
1525 		     memcmp(conn->le_per_adv_data, base, base_len)))
1526 		return ERR_PTR(-EADDRINUSE);
1527 
1528 	conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1529 	if (!conn)
1530 		return ERR_PTR(-ENOMEM);
1531 
1532 	conn->state = BT_CONNECT;
1533 
1534 	hci_conn_hold(conn);
1535 	return conn;
1536 }
1537 
1538 /* This function requires the caller holds hdev->lock */
1539 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1540 				     u8 dst_type, u8 sec_level,
1541 				     u16 conn_timeout,
1542 				     enum conn_reasons conn_reason)
1543 {
1544 	struct hci_conn *conn;
1545 
1546 	/* Let's make sure that le is enabled.*/
1547 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1548 		if (lmp_le_capable(hdev))
1549 			return ERR_PTR(-ECONNREFUSED);
1550 
1551 		return ERR_PTR(-EOPNOTSUPP);
1552 	}
1553 
1554 	/* Some devices send ATT messages as soon as the physical link is
1555 	 * established. To be able to handle these ATT messages, the user-
1556 	 * space first establishes the connection and then starts the pairing
1557 	 * process.
1558 	 *
1559 	 * So if a hci_conn object already exists for the following connection
1560 	 * attempt, we simply update pending_sec_level and auth_type fields
1561 	 * and return the object found.
1562 	 */
1563 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1564 	if (conn) {
1565 		if (conn->pending_sec_level < sec_level)
1566 			conn->pending_sec_level = sec_level;
1567 		goto done;
1568 	}
1569 
1570 	BT_DBG("requesting refresh of dst_addr");
1571 
1572 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1573 	if (!conn)
1574 		return ERR_PTR(-ENOMEM);
1575 
1576 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1577 		hci_conn_del(conn);
1578 		return ERR_PTR(-EBUSY);
1579 	}
1580 
1581 	conn->state = BT_CONNECT;
1582 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1583 	conn->dst_type = dst_type;
1584 	conn->sec_level = BT_SECURITY_LOW;
1585 	conn->pending_sec_level = sec_level;
1586 	conn->conn_timeout = conn_timeout;
1587 	conn->conn_reason = conn_reason;
1588 
1589 	hci_update_passive_scan(hdev);
1590 
1591 done:
1592 	hci_conn_hold(conn);
1593 	return conn;
1594 }
1595 
1596 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1597 				 u8 sec_level, u8 auth_type,
1598 				 enum conn_reasons conn_reason)
1599 {
1600 	struct hci_conn *acl;
1601 
1602 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1603 		if (lmp_bredr_capable(hdev))
1604 			return ERR_PTR(-ECONNREFUSED);
1605 
1606 		return ERR_PTR(-EOPNOTSUPP);
1607 	}
1608 
1609 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1610 	if (!acl) {
1611 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1612 		if (!acl)
1613 			return ERR_PTR(-ENOMEM);
1614 	}
1615 
1616 	hci_conn_hold(acl);
1617 
1618 	acl->conn_reason = conn_reason;
1619 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1620 		acl->sec_level = BT_SECURITY_LOW;
1621 		acl->pending_sec_level = sec_level;
1622 		acl->auth_type = auth_type;
1623 		hci_acl_create_connection(acl);
1624 	}
1625 
1626 	return acl;
1627 }
1628 
1629 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1630 				      struct hci_conn *conn)
1631 {
1632 	struct hci_dev *hdev = parent->hdev;
1633 	struct hci_link *link;
1634 
1635 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1636 
1637 	if (conn->link)
1638 		return conn->link;
1639 
1640 	if (conn->parent)
1641 		return NULL;
1642 
1643 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1644 	if (!link)
1645 		return NULL;
1646 
1647 	link->conn = hci_conn_hold(conn);
1648 	conn->link = link;
1649 	conn->parent = hci_conn_get(parent);
1650 
1651 	/* Use list_add_tail_rcu append to the list */
1652 	list_add_tail_rcu(&link->list, &parent->link_list);
1653 
1654 	return link;
1655 }
1656 
1657 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1658 				 __u16 setting, struct bt_codec *codec)
1659 {
1660 	struct hci_conn *acl;
1661 	struct hci_conn *sco;
1662 	struct hci_link *link;
1663 
1664 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1665 			      CONN_REASON_SCO_CONNECT);
1666 	if (IS_ERR(acl))
1667 		return acl;
1668 
1669 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1670 	if (!sco) {
1671 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1672 		if (!sco) {
1673 			hci_conn_drop(acl);
1674 			return ERR_PTR(-ENOMEM);
1675 		}
1676 	}
1677 
1678 	link = hci_conn_link(acl, sco);
1679 	if (!link) {
1680 		hci_conn_drop(acl);
1681 		hci_conn_drop(sco);
1682 		return ERR_PTR(-ENOLINK);
1683 	}
1684 
1685 	sco->setting = setting;
1686 	sco->codec = *codec;
1687 
1688 	if (acl->state == BT_CONNECTED &&
1689 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1690 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1691 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1692 
1693 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1694 			/* defer SCO setup until mode change completed */
1695 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1696 			return sco;
1697 		}
1698 
1699 		hci_sco_setup(acl, 0x00);
1700 	}
1701 
1702 	return sco;
1703 }
1704 
1705 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1706 {
1707 	struct hci_dev *hdev = conn->hdev;
1708 	struct hci_cp_le_create_big cp;
1709 	struct iso_list_data data;
1710 
1711 	memset(&cp, 0, sizeof(cp));
1712 
1713 	data.big = qos->bcast.big;
1714 	data.bis = qos->bcast.bis;
1715 	data.count = 0;
1716 
1717 	/* Create a BIS for each bound connection */
1718 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1719 				 BT_BOUND, &data);
1720 
1721 	cp.handle = qos->bcast.big;
1722 	cp.adv_handle = qos->bcast.bis;
1723 	cp.num_bis  = data.count;
1724 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1725 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1726 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1727 	cp.bis.rtn  = qos->bcast.out.rtn;
1728 	cp.bis.phy  = qos->bcast.out.phy;
1729 	cp.bis.packing = qos->bcast.packing;
1730 	cp.bis.framing = qos->bcast.framing;
1731 	cp.bis.encryption = qos->bcast.encryption;
1732 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1733 
1734 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1735 }
1736 
1737 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1738 {
1739 	u8 cig_id = PTR_ERR(data);
1740 	struct hci_conn *conn;
1741 	struct bt_iso_qos *qos;
1742 	struct iso_cig_params pdu;
1743 	u8 cis_id;
1744 
1745 	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1746 	if (!conn)
1747 		return 0;
1748 
1749 	memset(&pdu, 0, sizeof(pdu));
1750 
1751 	qos = &conn->iso_qos;
1752 	pdu.cp.cig_id = cig_id;
1753 	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1754 	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1755 	pdu.cp.sca = qos->ucast.sca;
1756 	pdu.cp.packing = qos->ucast.packing;
1757 	pdu.cp.framing = qos->ucast.framing;
1758 	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1759 	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1760 
1761 	/* Reprogram all CIS(s) with the same CIG, valid range are:
1762 	 * num_cis: 0x00 to 0x1F
1763 	 * cis_id: 0x00 to 0xEF
1764 	 */
1765 	for (cis_id = 0x00; cis_id < 0xf0 &&
1766 	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1767 		struct hci_cis_params *cis;
1768 
1769 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1770 		if (!conn)
1771 			continue;
1772 
1773 		qos = &conn->iso_qos;
1774 
1775 		cis = &pdu.cis[pdu.cp.num_cis++];
1776 		cis->cis_id = cis_id;
1777 		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1778 		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1779 		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1780 			      qos->ucast.in.phy;
1781 		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1782 			      qos->ucast.out.phy;
1783 		cis->c_rtn  = qos->ucast.out.rtn;
1784 		cis->p_rtn  = qos->ucast.in.rtn;
1785 	}
1786 
1787 	if (!pdu.cp.num_cis)
1788 		return 0;
1789 
1790 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1791 				     sizeof(pdu.cp) +
1792 				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1793 				     HCI_CMD_TIMEOUT);
1794 }
1795 
1796 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1797 {
1798 	struct hci_dev *hdev = conn->hdev;
1799 	struct iso_list_data data;
1800 
1801 	memset(&data, 0, sizeof(data));
1802 
1803 	/* Allocate first still reconfigurable CIG if not set */
1804 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1805 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1806 			data.count = 0;
1807 
1808 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1809 						 BT_CONNECT, &data);
1810 			if (data.count)
1811 				continue;
1812 
1813 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1814 						 BT_CONNECTED, &data);
1815 			if (!data.count)
1816 				break;
1817 		}
1818 
1819 		if (data.cig == 0xf0)
1820 			return false;
1821 
1822 		/* Update CIG */
1823 		qos->ucast.cig = data.cig;
1824 	}
1825 
1826 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1827 		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1828 					     qos->ucast.cis))
1829 			return false;
1830 		goto done;
1831 	}
1832 
1833 	/* Allocate first available CIS if not set */
1834 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1835 	     data.cis++) {
1836 		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1837 					      data.cis)) {
1838 			/* Update CIS */
1839 			qos->ucast.cis = data.cis;
1840 			break;
1841 		}
1842 	}
1843 
1844 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1845 		return false;
1846 
1847 done:
1848 	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1849 			       ERR_PTR(qos->ucast.cig), NULL) < 0)
1850 		return false;
1851 
1852 	return true;
1853 }
1854 
1855 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1856 			      __u8 dst_type, struct bt_iso_qos *qos)
1857 {
1858 	struct hci_conn *cis;
1859 
1860 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1861 				       qos->ucast.cis);
1862 	if (!cis) {
1863 		cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1864 		if (!cis)
1865 			return ERR_PTR(-ENOMEM);
1866 		cis->cleanup = cis_cleanup;
1867 		cis->dst_type = dst_type;
1868 	}
1869 
1870 	if (cis->state == BT_CONNECTED)
1871 		return cis;
1872 
1873 	/* Check if CIS has been set and the settings matches */
1874 	if (cis->state == BT_BOUND &&
1875 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1876 		return cis;
1877 
1878 	/* Update LINK PHYs according to QoS preference */
1879 	cis->le_tx_phy = qos->ucast.out.phy;
1880 	cis->le_rx_phy = qos->ucast.in.phy;
1881 
1882 	/* If output interval is not set use the input interval as it cannot be
1883 	 * 0x000000.
1884 	 */
1885 	if (!qos->ucast.out.interval)
1886 		qos->ucast.out.interval = qos->ucast.in.interval;
1887 
1888 	/* If input interval is not set use the output interval as it cannot be
1889 	 * 0x000000.
1890 	 */
1891 	if (!qos->ucast.in.interval)
1892 		qos->ucast.in.interval = qos->ucast.out.interval;
1893 
1894 	/* If output latency is not set use the input latency as it cannot be
1895 	 * 0x0000.
1896 	 */
1897 	if (!qos->ucast.out.latency)
1898 		qos->ucast.out.latency = qos->ucast.in.latency;
1899 
1900 	/* If input latency is not set use the output latency as it cannot be
1901 	 * 0x0000.
1902 	 */
1903 	if (!qos->ucast.in.latency)
1904 		qos->ucast.in.latency = qos->ucast.out.latency;
1905 
1906 	if (!hci_le_set_cig_params(cis, qos)) {
1907 		hci_conn_drop(cis);
1908 		return ERR_PTR(-EINVAL);
1909 	}
1910 
1911 	hci_conn_hold(cis);
1912 
1913 	cis->iso_qos = *qos;
1914 	cis->state = BT_BOUND;
1915 
1916 	return cis;
1917 }
1918 
1919 bool hci_iso_setup_path(struct hci_conn *conn)
1920 {
1921 	struct hci_dev *hdev = conn->hdev;
1922 	struct hci_cp_le_setup_iso_path cmd;
1923 
1924 	memset(&cmd, 0, sizeof(cmd));
1925 
1926 	if (conn->iso_qos.ucast.out.sdu) {
1927 		cmd.handle = cpu_to_le16(conn->handle);
1928 		cmd.direction = 0x00; /* Input (Host to Controller) */
1929 		cmd.path = 0x00; /* HCI path if enabled */
1930 		cmd.codec = 0x03; /* Transparent Data */
1931 
1932 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1933 				 &cmd) < 0)
1934 			return false;
1935 	}
1936 
1937 	if (conn->iso_qos.ucast.in.sdu) {
1938 		cmd.handle = cpu_to_le16(conn->handle);
1939 		cmd.direction = 0x01; /* Output (Controller to Host) */
1940 		cmd.path = 0x00; /* HCI path if enabled */
1941 		cmd.codec = 0x03; /* Transparent Data */
1942 
1943 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1944 				 &cmd) < 0)
1945 			return false;
1946 	}
1947 
1948 	return true;
1949 }
1950 
1951 int hci_conn_check_create_cis(struct hci_conn *conn)
1952 {
1953 	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1954 		return -EINVAL;
1955 
1956 	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1957 	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1958 		return 1;
1959 
1960 	return 0;
1961 }
1962 
1963 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1964 {
1965 	return hci_le_create_cis_sync(hdev);
1966 }
1967 
1968 int hci_le_create_cis_pending(struct hci_dev *hdev)
1969 {
1970 	struct hci_conn *conn;
1971 	bool pending = false;
1972 
1973 	rcu_read_lock();
1974 
1975 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1976 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1977 			rcu_read_unlock();
1978 			return -EBUSY;
1979 		}
1980 
1981 		if (!hci_conn_check_create_cis(conn))
1982 			pending = true;
1983 	}
1984 
1985 	rcu_read_unlock();
1986 
1987 	if (!pending)
1988 		return 0;
1989 
1990 	/* Queue Create CIS */
1991 	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
1992 }
1993 
1994 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1995 			      struct bt_iso_io_qos *qos, __u8 phy)
1996 {
1997 	/* Only set MTU if PHY is enabled */
1998 	if (!qos->sdu && qos->phy) {
1999 		if (hdev->iso_mtu > 0)
2000 			qos->sdu = hdev->iso_mtu;
2001 		else if (hdev->le_mtu > 0)
2002 			qos->sdu = hdev->le_mtu;
2003 		else
2004 			qos->sdu = hdev->acl_mtu;
2005 	}
2006 
2007 	/* Use the same PHY as ACL if set to any */
2008 	if (qos->phy == BT_ISO_PHY_ANY)
2009 		qos->phy = phy;
2010 
2011 	/* Use LE ACL connection interval if not set */
2012 	if (!qos->interval)
2013 		/* ACL interval unit in 1.25 ms to us */
2014 		qos->interval = conn->le_conn_interval * 1250;
2015 
2016 	/* Use LE ACL connection latency if not set */
2017 	if (!qos->latency)
2018 		qos->latency = conn->le_conn_latency;
2019 }
2020 
2021 static int create_big_sync(struct hci_dev *hdev, void *data)
2022 {
2023 	struct hci_conn *conn = data;
2024 	struct bt_iso_qos *qos = &conn->iso_qos;
2025 	u16 interval, sync_interval = 0;
2026 	u32 flags = 0;
2027 	int err;
2028 
2029 	if (qos->bcast.out.phy == 0x02)
2030 		flags |= MGMT_ADV_FLAG_SEC_2M;
2031 
2032 	/* Align intervals */
2033 	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2034 
2035 	if (qos->bcast.bis)
2036 		sync_interval = interval * 4;
2037 
2038 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2039 				     conn->le_per_adv_data, flags, interval,
2040 				     interval, sync_interval);
2041 	if (err)
2042 		return err;
2043 
2044 	return hci_le_create_big(conn, &conn->iso_qos);
2045 }
2046 
2047 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2048 {
2049 	struct hci_cp_le_pa_create_sync *cp = data;
2050 
2051 	bt_dev_dbg(hdev, "");
2052 
2053 	if (err)
2054 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2055 
2056 	kfree(cp);
2057 }
2058 
2059 static int create_pa_sync(struct hci_dev *hdev, void *data)
2060 {
2061 	struct hci_cp_le_pa_create_sync *cp = data;
2062 	int err;
2063 
2064 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2065 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2066 	if (err) {
2067 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2068 		return err;
2069 	}
2070 
2071 	return hci_update_passive_scan_sync(hdev);
2072 }
2073 
2074 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2075 		       __u8 sid, struct bt_iso_qos *qos)
2076 {
2077 	struct hci_cp_le_pa_create_sync *cp;
2078 
2079 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2080 		return -EBUSY;
2081 
2082 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2083 	if (!cp) {
2084 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2085 		return -ENOMEM;
2086 	}
2087 
2088 	cp->options = qos->bcast.options;
2089 	cp->sid = sid;
2090 	cp->addr_type = dst_type;
2091 	bacpy(&cp->addr, dst);
2092 	cp->skip = cpu_to_le16(qos->bcast.skip);
2093 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2094 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2095 
2096 	/* Queue start pa_create_sync and scan */
2097 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2098 }
2099 
2100 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2101 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2102 {
2103 	struct _packed {
2104 		struct hci_cp_le_big_create_sync cp;
2105 		__u8  bis[0x11];
2106 	} pdu;
2107 	int err;
2108 
2109 	if (num_bis > sizeof(pdu.bis))
2110 		return -EINVAL;
2111 
2112 	err = qos_set_big(hdev, qos);
2113 	if (err)
2114 		return err;
2115 
2116 	memset(&pdu, 0, sizeof(pdu));
2117 	pdu.cp.handle = qos->bcast.big;
2118 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2119 	pdu.cp.encryption = qos->bcast.encryption;
2120 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2121 	pdu.cp.mse = qos->bcast.mse;
2122 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2123 	pdu.cp.num_bis = num_bis;
2124 	memcpy(pdu.bis, bis, num_bis);
2125 
2126 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2127 			    sizeof(pdu.cp) + num_bis, &pdu);
2128 }
2129 
2130 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2131 {
2132 	struct hci_conn *conn = data;
2133 
2134 	bt_dev_dbg(hdev, "conn %p", conn);
2135 
2136 	if (err) {
2137 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2138 		hci_connect_cfm(conn, err);
2139 		hci_conn_del(conn);
2140 	}
2141 }
2142 
2143 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2144 			      struct bt_iso_qos *qos,
2145 			      __u8 base_len, __u8 *base)
2146 {
2147 	struct hci_conn *conn;
2148 	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2149 
2150 	if (base_len && base)
2151 		base_len = eir_append_service_data(eir, 0,  0x1851,
2152 						   base, base_len);
2153 
2154 	/* We need hci_conn object using the BDADDR_ANY as dst */
2155 	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2156 	if (IS_ERR(conn))
2157 		return conn;
2158 
2159 	/* Update LINK PHYs according to QoS preference */
2160 	conn->le_tx_phy = qos->bcast.out.phy;
2161 	conn->le_tx_phy = qos->bcast.out.phy;
2162 
2163 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2164 	if (base_len && base) {
2165 		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2166 		conn->le_per_adv_data_len = base_len;
2167 	}
2168 
2169 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2170 			  conn->le_tx_phy ? conn->le_tx_phy :
2171 			  hdev->le_tx_def_phys);
2172 
2173 	conn->iso_qos = *qos;
2174 	conn->state = BT_BOUND;
2175 
2176 	return conn;
2177 }
2178 
2179 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2180 {
2181 	struct iso_list_data *d = data;
2182 
2183 	/* Skip if not broadcast/ANY address */
2184 	if (bacmp(&conn->dst, BDADDR_ANY))
2185 		return;
2186 
2187 	if (d->big != conn->iso_qos.bcast.big ||
2188 	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2189 	    d->bis != conn->iso_qos.bcast.bis)
2190 		return;
2191 
2192 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2193 }
2194 
2195 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2196 				 __u8 dst_type, struct bt_iso_qos *qos,
2197 				 __u8 base_len, __u8 *base)
2198 {
2199 	struct hci_conn *conn;
2200 	int err;
2201 	struct iso_list_data data;
2202 
2203 	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2204 	if (IS_ERR(conn))
2205 		return conn;
2206 
2207 	data.big = qos->bcast.big;
2208 	data.bis = qos->bcast.bis;
2209 
2210 	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2211 	 * the start periodic advertising and create BIG commands have
2212 	 * been queued
2213 	 */
2214 	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2215 				 BT_BOUND, &data);
2216 
2217 	/* Queue start periodic advertising and create BIG */
2218 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2219 				 create_big_complete);
2220 	if (err < 0) {
2221 		hci_conn_drop(conn);
2222 		return ERR_PTR(err);
2223 	}
2224 
2225 	return conn;
2226 }
2227 
2228 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2229 				 __u8 dst_type, struct bt_iso_qos *qos)
2230 {
2231 	struct hci_conn *le;
2232 	struct hci_conn *cis;
2233 	struct hci_link *link;
2234 
2235 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2236 		le = hci_connect_le(hdev, dst, dst_type, false,
2237 				    BT_SECURITY_LOW,
2238 				    HCI_LE_CONN_TIMEOUT,
2239 				    HCI_ROLE_SLAVE);
2240 	else
2241 		le = hci_connect_le_scan(hdev, dst, dst_type,
2242 					 BT_SECURITY_LOW,
2243 					 HCI_LE_CONN_TIMEOUT,
2244 					 CONN_REASON_ISO_CONNECT);
2245 	if (IS_ERR(le))
2246 		return le;
2247 
2248 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2249 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2250 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2251 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2252 
2253 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2254 	if (IS_ERR(cis)) {
2255 		hci_conn_drop(le);
2256 		return cis;
2257 	}
2258 
2259 	link = hci_conn_link(le, cis);
2260 	if (!link) {
2261 		hci_conn_drop(le);
2262 		hci_conn_drop(cis);
2263 		return ERR_PTR(-ENOLINK);
2264 	}
2265 
2266 	/* Link takes the refcount */
2267 	hci_conn_drop(cis);
2268 
2269 	cis->state = BT_CONNECT;
2270 
2271 	hci_le_create_cis_pending(hdev);
2272 
2273 	return cis;
2274 }
2275 
2276 /* Check link security requirement */
2277 int hci_conn_check_link_mode(struct hci_conn *conn)
2278 {
2279 	BT_DBG("hcon %p", conn);
2280 
2281 	/* In Secure Connections Only mode, it is required that Secure
2282 	 * Connections is used and the link is encrypted with AES-CCM
2283 	 * using a P-256 authenticated combination key.
2284 	 */
2285 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2286 		if (!hci_conn_sc_enabled(conn) ||
2287 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2288 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2289 			return 0;
2290 	}
2291 
2292 	 /* AES encryption is required for Level 4:
2293 	  *
2294 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2295 	  * page 1319:
2296 	  *
2297 	  * 128-bit equivalent strength for link and encryption keys
2298 	  * required using FIPS approved algorithms (E0 not allowed,
2299 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2300 	  * not shortened)
2301 	  */
2302 	if (conn->sec_level == BT_SECURITY_FIPS &&
2303 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2304 		bt_dev_err(conn->hdev,
2305 			   "Invalid security: Missing AES-CCM usage");
2306 		return 0;
2307 	}
2308 
2309 	if (hci_conn_ssp_enabled(conn) &&
2310 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2311 		return 0;
2312 
2313 	return 1;
2314 }
2315 
2316 /* Authenticate remote device */
2317 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2318 {
2319 	BT_DBG("hcon %p", conn);
2320 
2321 	if (conn->pending_sec_level > sec_level)
2322 		sec_level = conn->pending_sec_level;
2323 
2324 	if (sec_level > conn->sec_level)
2325 		conn->pending_sec_level = sec_level;
2326 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2327 		return 1;
2328 
2329 	/* Make sure we preserve an existing MITM requirement*/
2330 	auth_type |= (conn->auth_type & 0x01);
2331 
2332 	conn->auth_type = auth_type;
2333 
2334 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2335 		struct hci_cp_auth_requested cp;
2336 
2337 		cp.handle = cpu_to_le16(conn->handle);
2338 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2339 			     sizeof(cp), &cp);
2340 
2341 		/* If we're already encrypted set the REAUTH_PEND flag,
2342 		 * otherwise set the ENCRYPT_PEND.
2343 		 */
2344 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2345 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2346 		else
2347 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2348 	}
2349 
2350 	return 0;
2351 }
2352 
2353 /* Encrypt the link */
2354 static void hci_conn_encrypt(struct hci_conn *conn)
2355 {
2356 	BT_DBG("hcon %p", conn);
2357 
2358 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2359 		struct hci_cp_set_conn_encrypt cp;
2360 		cp.handle  = cpu_to_le16(conn->handle);
2361 		cp.encrypt = 0x01;
2362 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2363 			     &cp);
2364 	}
2365 }
2366 
2367 /* Enable security */
2368 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2369 		      bool initiator)
2370 {
2371 	BT_DBG("hcon %p", conn);
2372 
2373 	if (conn->type == LE_LINK)
2374 		return smp_conn_security(conn, sec_level);
2375 
2376 	/* For sdp we don't need the link key. */
2377 	if (sec_level == BT_SECURITY_SDP)
2378 		return 1;
2379 
2380 	/* For non 2.1 devices and low security level we don't need the link
2381 	   key. */
2382 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2383 		return 1;
2384 
2385 	/* For other security levels we need the link key. */
2386 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2387 		goto auth;
2388 
2389 	/* An authenticated FIPS approved combination key has sufficient
2390 	 * security for security level 4. */
2391 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2392 	    sec_level == BT_SECURITY_FIPS)
2393 		goto encrypt;
2394 
2395 	/* An authenticated combination key has sufficient security for
2396 	   security level 3. */
2397 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2398 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2399 	    sec_level == BT_SECURITY_HIGH)
2400 		goto encrypt;
2401 
2402 	/* An unauthenticated combination key has sufficient security for
2403 	   security level 1 and 2. */
2404 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2405 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2406 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2407 		goto encrypt;
2408 
2409 	/* A combination key has always sufficient security for the security
2410 	   levels 1 or 2. High security level requires the combination key
2411 	   is generated using maximum PIN code length (16).
2412 	   For pre 2.1 units. */
2413 	if (conn->key_type == HCI_LK_COMBINATION &&
2414 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2415 	     conn->pin_length == 16))
2416 		goto encrypt;
2417 
2418 auth:
2419 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2420 		return 0;
2421 
2422 	if (initiator)
2423 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2424 
2425 	if (!hci_conn_auth(conn, sec_level, auth_type))
2426 		return 0;
2427 
2428 encrypt:
2429 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2430 		/* Ensure that the encryption key size has been read,
2431 		 * otherwise stall the upper layer responses.
2432 		 */
2433 		if (!conn->enc_key_size)
2434 			return 0;
2435 
2436 		/* Nothing else needed, all requirements are met */
2437 		return 1;
2438 	}
2439 
2440 	hci_conn_encrypt(conn);
2441 	return 0;
2442 }
2443 EXPORT_SYMBOL(hci_conn_security);
2444 
2445 /* Check secure link requirement */
2446 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2447 {
2448 	BT_DBG("hcon %p", conn);
2449 
2450 	/* Accept if non-secure or higher security level is required */
2451 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2452 		return 1;
2453 
2454 	/* Accept if secure or higher security level is already present */
2455 	if (conn->sec_level == BT_SECURITY_HIGH ||
2456 	    conn->sec_level == BT_SECURITY_FIPS)
2457 		return 1;
2458 
2459 	/* Reject not secure link */
2460 	return 0;
2461 }
2462 EXPORT_SYMBOL(hci_conn_check_secure);
2463 
2464 /* Switch role */
2465 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2466 {
2467 	BT_DBG("hcon %p", conn);
2468 
2469 	if (role == conn->role)
2470 		return 1;
2471 
2472 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2473 		struct hci_cp_switch_role cp;
2474 		bacpy(&cp.bdaddr, &conn->dst);
2475 		cp.role = role;
2476 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2477 	}
2478 
2479 	return 0;
2480 }
2481 EXPORT_SYMBOL(hci_conn_switch_role);
2482 
2483 /* Enter active mode */
2484 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2485 {
2486 	struct hci_dev *hdev = conn->hdev;
2487 
2488 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2489 
2490 	if (conn->mode != HCI_CM_SNIFF)
2491 		goto timer;
2492 
2493 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2494 		goto timer;
2495 
2496 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2497 		struct hci_cp_exit_sniff_mode cp;
2498 		cp.handle = cpu_to_le16(conn->handle);
2499 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2500 	}
2501 
2502 timer:
2503 	if (hdev->idle_timeout > 0)
2504 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2505 				   msecs_to_jiffies(hdev->idle_timeout));
2506 }
2507 
2508 /* Drop all connection on the device */
2509 void hci_conn_hash_flush(struct hci_dev *hdev)
2510 {
2511 	struct list_head *head = &hdev->conn_hash.list;
2512 	struct hci_conn *conn;
2513 
2514 	BT_DBG("hdev %s", hdev->name);
2515 
2516 	/* We should not traverse the list here, because hci_conn_del
2517 	 * can remove extra links, which may cause the list traversal
2518 	 * to hit items that have already been released.
2519 	 */
2520 	while ((conn = list_first_entry_or_null(head,
2521 						struct hci_conn,
2522 						list)) != NULL) {
2523 		conn->state = BT_CLOSED;
2524 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2525 		hci_conn_del(conn);
2526 	}
2527 }
2528 
2529 /* Check pending connect attempts */
2530 void hci_conn_check_pending(struct hci_dev *hdev)
2531 {
2532 	struct hci_conn *conn;
2533 
2534 	BT_DBG("hdev %s", hdev->name);
2535 
2536 	hci_dev_lock(hdev);
2537 
2538 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2539 	if (conn)
2540 		hci_acl_create_connection(conn);
2541 
2542 	hci_dev_unlock(hdev);
2543 }
2544 
2545 static u32 get_link_mode(struct hci_conn *conn)
2546 {
2547 	u32 link_mode = 0;
2548 
2549 	if (conn->role == HCI_ROLE_MASTER)
2550 		link_mode |= HCI_LM_MASTER;
2551 
2552 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2553 		link_mode |= HCI_LM_ENCRYPT;
2554 
2555 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2556 		link_mode |= HCI_LM_AUTH;
2557 
2558 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2559 		link_mode |= HCI_LM_SECURE;
2560 
2561 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2562 		link_mode |= HCI_LM_FIPS;
2563 
2564 	return link_mode;
2565 }
2566 
2567 int hci_get_conn_list(void __user *arg)
2568 {
2569 	struct hci_conn *c;
2570 	struct hci_conn_list_req req, *cl;
2571 	struct hci_conn_info *ci;
2572 	struct hci_dev *hdev;
2573 	int n = 0, size, err;
2574 
2575 	if (copy_from_user(&req, arg, sizeof(req)))
2576 		return -EFAULT;
2577 
2578 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2579 		return -EINVAL;
2580 
2581 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2582 
2583 	cl = kmalloc(size, GFP_KERNEL);
2584 	if (!cl)
2585 		return -ENOMEM;
2586 
2587 	hdev = hci_dev_get(req.dev_id);
2588 	if (!hdev) {
2589 		kfree(cl);
2590 		return -ENODEV;
2591 	}
2592 
2593 	ci = cl->conn_info;
2594 
2595 	hci_dev_lock(hdev);
2596 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2597 		bacpy(&(ci + n)->bdaddr, &c->dst);
2598 		(ci + n)->handle = c->handle;
2599 		(ci + n)->type  = c->type;
2600 		(ci + n)->out   = c->out;
2601 		(ci + n)->state = c->state;
2602 		(ci + n)->link_mode = get_link_mode(c);
2603 		if (++n >= req.conn_num)
2604 			break;
2605 	}
2606 	hci_dev_unlock(hdev);
2607 
2608 	cl->dev_id = hdev->id;
2609 	cl->conn_num = n;
2610 	size = sizeof(req) + n * sizeof(*ci);
2611 
2612 	hci_dev_put(hdev);
2613 
2614 	err = copy_to_user(arg, cl, size);
2615 	kfree(cl);
2616 
2617 	return err ? -EFAULT : 0;
2618 }
2619 
2620 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2621 {
2622 	struct hci_conn_info_req req;
2623 	struct hci_conn_info ci;
2624 	struct hci_conn *conn;
2625 	char __user *ptr = arg + sizeof(req);
2626 
2627 	if (copy_from_user(&req, arg, sizeof(req)))
2628 		return -EFAULT;
2629 
2630 	hci_dev_lock(hdev);
2631 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2632 	if (conn) {
2633 		bacpy(&ci.bdaddr, &conn->dst);
2634 		ci.handle = conn->handle;
2635 		ci.type  = conn->type;
2636 		ci.out   = conn->out;
2637 		ci.state = conn->state;
2638 		ci.link_mode = get_link_mode(conn);
2639 	}
2640 	hci_dev_unlock(hdev);
2641 
2642 	if (!conn)
2643 		return -ENOENT;
2644 
2645 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2646 }
2647 
2648 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2649 {
2650 	struct hci_auth_info_req req;
2651 	struct hci_conn *conn;
2652 
2653 	if (copy_from_user(&req, arg, sizeof(req)))
2654 		return -EFAULT;
2655 
2656 	hci_dev_lock(hdev);
2657 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2658 	if (conn)
2659 		req.type = conn->auth_type;
2660 	hci_dev_unlock(hdev);
2661 
2662 	if (!conn)
2663 		return -ENOENT;
2664 
2665 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2666 }
2667 
2668 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2669 {
2670 	struct hci_dev *hdev = conn->hdev;
2671 	struct hci_chan *chan;
2672 
2673 	BT_DBG("%s hcon %p", hdev->name, conn);
2674 
2675 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2676 		BT_DBG("Refusing to create new hci_chan");
2677 		return NULL;
2678 	}
2679 
2680 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2681 	if (!chan)
2682 		return NULL;
2683 
2684 	chan->conn = hci_conn_get(conn);
2685 	skb_queue_head_init(&chan->data_q);
2686 	chan->state = BT_CONNECTED;
2687 
2688 	list_add_rcu(&chan->list, &conn->chan_list);
2689 
2690 	return chan;
2691 }
2692 
2693 void hci_chan_del(struct hci_chan *chan)
2694 {
2695 	struct hci_conn *conn = chan->conn;
2696 	struct hci_dev *hdev = conn->hdev;
2697 
2698 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2699 
2700 	list_del_rcu(&chan->list);
2701 
2702 	synchronize_rcu();
2703 
2704 	/* Prevent new hci_chan's to be created for this hci_conn */
2705 	set_bit(HCI_CONN_DROP, &conn->flags);
2706 
2707 	hci_conn_put(conn);
2708 
2709 	skb_queue_purge(&chan->data_q);
2710 	kfree(chan);
2711 }
2712 
2713 void hci_chan_list_flush(struct hci_conn *conn)
2714 {
2715 	struct hci_chan *chan, *n;
2716 
2717 	BT_DBG("hcon %p", conn);
2718 
2719 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2720 		hci_chan_del(chan);
2721 }
2722 
2723 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2724 						 __u16 handle)
2725 {
2726 	struct hci_chan *hchan;
2727 
2728 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2729 		if (hchan->handle == handle)
2730 			return hchan;
2731 	}
2732 
2733 	return NULL;
2734 }
2735 
2736 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2737 {
2738 	struct hci_conn_hash *h = &hdev->conn_hash;
2739 	struct hci_conn *hcon;
2740 	struct hci_chan *hchan = NULL;
2741 
2742 	rcu_read_lock();
2743 
2744 	list_for_each_entry_rcu(hcon, &h->list, list) {
2745 		hchan = __hci_chan_lookup_handle(hcon, handle);
2746 		if (hchan)
2747 			break;
2748 	}
2749 
2750 	rcu_read_unlock();
2751 
2752 	return hchan;
2753 }
2754 
2755 u32 hci_conn_get_phy(struct hci_conn *conn)
2756 {
2757 	u32 phys = 0;
2758 
2759 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2760 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2761 	 * CPB logical transport types.
2762 	 */
2763 	switch (conn->type) {
2764 	case SCO_LINK:
2765 		/* SCO logical transport (1 Mb/s):
2766 		 * HV1, HV2, HV3 and DV.
2767 		 */
2768 		phys |= BT_PHY_BR_1M_1SLOT;
2769 
2770 		break;
2771 
2772 	case ACL_LINK:
2773 		/* ACL logical transport (1 Mb/s) ptt=0:
2774 		 * DH1, DM3, DH3, DM5 and DH5.
2775 		 */
2776 		phys |= BT_PHY_BR_1M_1SLOT;
2777 
2778 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2779 			phys |= BT_PHY_BR_1M_3SLOT;
2780 
2781 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2782 			phys |= BT_PHY_BR_1M_5SLOT;
2783 
2784 		/* ACL logical transport (2 Mb/s) ptt=1:
2785 		 * 2-DH1, 2-DH3 and 2-DH5.
2786 		 */
2787 		if (!(conn->pkt_type & HCI_2DH1))
2788 			phys |= BT_PHY_EDR_2M_1SLOT;
2789 
2790 		if (!(conn->pkt_type & HCI_2DH3))
2791 			phys |= BT_PHY_EDR_2M_3SLOT;
2792 
2793 		if (!(conn->pkt_type & HCI_2DH5))
2794 			phys |= BT_PHY_EDR_2M_5SLOT;
2795 
2796 		/* ACL logical transport (3 Mb/s) ptt=1:
2797 		 * 3-DH1, 3-DH3 and 3-DH5.
2798 		 */
2799 		if (!(conn->pkt_type & HCI_3DH1))
2800 			phys |= BT_PHY_EDR_3M_1SLOT;
2801 
2802 		if (!(conn->pkt_type & HCI_3DH3))
2803 			phys |= BT_PHY_EDR_3M_3SLOT;
2804 
2805 		if (!(conn->pkt_type & HCI_3DH5))
2806 			phys |= BT_PHY_EDR_3M_5SLOT;
2807 
2808 		break;
2809 
2810 	case ESCO_LINK:
2811 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2812 		phys |= BT_PHY_BR_1M_1SLOT;
2813 
2814 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2815 			phys |= BT_PHY_BR_1M_3SLOT;
2816 
2817 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2818 		if (!(conn->pkt_type & ESCO_2EV3))
2819 			phys |= BT_PHY_EDR_2M_1SLOT;
2820 
2821 		if (!(conn->pkt_type & ESCO_2EV5))
2822 			phys |= BT_PHY_EDR_2M_3SLOT;
2823 
2824 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2825 		if (!(conn->pkt_type & ESCO_3EV3))
2826 			phys |= BT_PHY_EDR_3M_1SLOT;
2827 
2828 		if (!(conn->pkt_type & ESCO_3EV5))
2829 			phys |= BT_PHY_EDR_3M_3SLOT;
2830 
2831 		break;
2832 
2833 	case LE_LINK:
2834 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2835 			phys |= BT_PHY_LE_1M_TX;
2836 
2837 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2838 			phys |= BT_PHY_LE_1M_RX;
2839 
2840 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2841 			phys |= BT_PHY_LE_2M_TX;
2842 
2843 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2844 			phys |= BT_PHY_LE_2M_RX;
2845 
2846 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2847 			phys |= BT_PHY_LE_CODED_TX;
2848 
2849 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2850 			phys |= BT_PHY_LE_CODED_RX;
2851 
2852 		break;
2853 	}
2854 
2855 	return phys;
2856 }
2857 
2858 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2859 {
2860 	struct hci_conn *conn;
2861 	u16 handle = PTR_ERR(data);
2862 
2863 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2864 	if (!conn)
2865 		return 0;
2866 
2867 	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2868 }
2869 
2870 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2871 {
2872 	struct hci_dev *hdev = conn->hdev;
2873 
2874 	/* If abort_reason has already been set it means the connection is
2875 	 * already being aborted so don't attempt to overwrite it.
2876 	 */
2877 	if (conn->abort_reason)
2878 		return 0;
2879 
2880 	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2881 
2882 	conn->abort_reason = reason;
2883 
2884 	/* If the connection is pending check the command opcode since that
2885 	 * might be blocking on hci_cmd_sync_work while waiting its respective
2886 	 * event so we need to hci_cmd_sync_cancel to cancel it.
2887 	 *
2888 	 * hci_connect_le serializes the connection attempts so only one
2889 	 * connection can be in BT_CONNECT at time.
2890 	 */
2891 	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2892 		switch (hci_skb_event(hdev->sent_cmd)) {
2893 		case HCI_EV_LE_CONN_COMPLETE:
2894 		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2895 		case HCI_EVT_LE_CIS_ESTABLISHED:
2896 			hci_cmd_sync_cancel(hdev, -ECANCELED);
2897 			break;
2898 		}
2899 	}
2900 
2901 	return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
2902 				  NULL);
2903 }
2904