xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision bfce728d)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "a2mp.h"
40 #include "eir.h"
41 
42 struct sco_param {
43 	u16 pkt_type;
44 	u16 max_latency;
45 	u8  retrans_effort;
46 };
47 
48 struct conn_handle_t {
49 	struct hci_conn *conn;
50 	__u16 handle;
51 };
52 
53 static const struct sco_param esco_param_cvsd[] = {
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
55 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
56 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
57 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
58 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
59 };
60 
61 static const struct sco_param sco_param_cvsd[] = {
62 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
63 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
64 };
65 
66 static const struct sco_param esco_param_msbc[] = {
67 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
68 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
69 };
70 
71 /* This function requires the caller holds hdev->lock */
72 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
73 {
74 	struct hci_conn_params *params;
75 	struct hci_dev *hdev = conn->hdev;
76 	struct smp_irk *irk;
77 	bdaddr_t *bdaddr;
78 	u8 bdaddr_type;
79 
80 	bdaddr = &conn->dst;
81 	bdaddr_type = conn->dst_type;
82 
83 	/* Check if we need to convert to identity address */
84 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
85 	if (irk) {
86 		bdaddr = &irk->bdaddr;
87 		bdaddr_type = irk->addr_type;
88 	}
89 
90 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
91 					   bdaddr_type);
92 	if (!params)
93 		return;
94 
95 	if (params->conn) {
96 		hci_conn_drop(params->conn);
97 		hci_conn_put(params->conn);
98 		params->conn = NULL;
99 	}
100 
101 	if (!params->explicit_connect)
102 		return;
103 
104 	/* If the status indicates successful cancellation of
105 	 * the attempt (i.e. Unknown Connection Id) there's no point of
106 	 * notifying failure since we'll go back to keep trying to
107 	 * connect. The only exception is explicit connect requests
108 	 * where a timeout + cancel does indicate an actual failure.
109 	 */
110 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
111 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
112 				    conn->dst_type, status);
113 
114 	/* The connection attempt was doing scan for new RPA, and is
115 	 * in scan phase. If params are not associated with any other
116 	 * autoconnect action, remove them completely. If they are, just unmark
117 	 * them as waiting for connection, by clearing explicit_connect field.
118 	 */
119 	params->explicit_connect = false;
120 
121 	list_del_init(&params->action);
122 
123 	switch (params->auto_connect) {
124 	case HCI_AUTO_CONN_EXPLICIT:
125 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
126 		/* return instead of break to avoid duplicate scan update */
127 		return;
128 	case HCI_AUTO_CONN_DIRECT:
129 	case HCI_AUTO_CONN_ALWAYS:
130 		list_add(&params->action, &hdev->pend_le_conns);
131 		break;
132 	case HCI_AUTO_CONN_REPORT:
133 		list_add(&params->action, &hdev->pend_le_reports);
134 		break;
135 	default:
136 		break;
137 	}
138 
139 	hci_update_passive_scan(hdev);
140 }
141 
142 static void hci_conn_cleanup(struct hci_conn *conn)
143 {
144 	struct hci_dev *hdev = conn->hdev;
145 
146 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
147 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
148 
149 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
150 		hci_remove_link_key(hdev, &conn->dst);
151 
152 	hci_chan_list_flush(conn);
153 
154 	hci_conn_hash_del(hdev, conn);
155 
156 	if (conn->cleanup)
157 		conn->cleanup(conn);
158 
159 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160 		switch (conn->setting & SCO_AIRMODE_MASK) {
161 		case SCO_AIRMODE_CVSD:
162 		case SCO_AIRMODE_TRANSP:
163 			if (hdev->notify)
164 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
165 			break;
166 		}
167 	} else {
168 		if (hdev->notify)
169 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
170 	}
171 
172 	hci_conn_del_sysfs(conn);
173 
174 	debugfs_remove_recursive(conn->debugfs);
175 
176 	hci_dev_put(hdev);
177 
178 	hci_conn_put(conn);
179 }
180 
181 static void le_scan_cleanup(struct work_struct *work)
182 {
183 	struct hci_conn *conn = container_of(work, struct hci_conn,
184 					     le_scan_cleanup);
185 	struct hci_dev *hdev = conn->hdev;
186 	struct hci_conn *c = NULL;
187 
188 	BT_DBG("%s hcon %p", hdev->name, conn);
189 
190 	hci_dev_lock(hdev);
191 
192 	/* Check that the hci_conn is still around */
193 	rcu_read_lock();
194 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
195 		if (c == conn)
196 			break;
197 	}
198 	rcu_read_unlock();
199 
200 	if (c == conn) {
201 		hci_connect_le_scan_cleanup(conn, 0x00);
202 		hci_conn_cleanup(conn);
203 	}
204 
205 	hci_dev_unlock(hdev);
206 	hci_dev_put(hdev);
207 	hci_conn_put(conn);
208 }
209 
210 static void hci_connect_le_scan_remove(struct hci_conn *conn)
211 {
212 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
213 
214 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
215 	 * could deadlock with another hci_conn_del() call that's holding
216 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
217 	 * Instead, grab temporary extra references to the hci_dev and
218 	 * hci_conn and perform the necessary cleanup in a separate work
219 	 * callback.
220 	 */
221 
222 	hci_dev_hold(conn->hdev);
223 	hci_conn_get(conn);
224 
225 	/* Even though we hold a reference to the hdev, many other
226 	 * things might get cleaned up meanwhile, including the hdev's
227 	 * own workqueue, so we can't use that for scheduling.
228 	 */
229 	schedule_work(&conn->le_scan_cleanup);
230 }
231 
232 static void hci_acl_create_connection(struct hci_conn *conn)
233 {
234 	struct hci_dev *hdev = conn->hdev;
235 	struct inquiry_entry *ie;
236 	struct hci_cp_create_conn cp;
237 
238 	BT_DBG("hcon %p", conn);
239 
240 	/* Many controllers disallow HCI Create Connection while it is doing
241 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
242 	 * Connection. This may cause the MGMT discovering state to become false
243 	 * without user space's request but it is okay since the MGMT Discovery
244 	 * APIs do not promise that discovery should be done forever. Instead,
245 	 * the user space monitors the status of MGMT discovering and it may
246 	 * request for discovery again when this flag becomes false.
247 	 */
248 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
249 		/* Put this connection to "pending" state so that it will be
250 		 * executed after the inquiry cancel command complete event.
251 		 */
252 		conn->state = BT_CONNECT2;
253 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
254 		return;
255 	}
256 
257 	conn->state = BT_CONNECT;
258 	conn->out = true;
259 	conn->role = HCI_ROLE_MASTER;
260 
261 	conn->attempt++;
262 
263 	conn->link_policy = hdev->link_policy;
264 
265 	memset(&cp, 0, sizeof(cp));
266 	bacpy(&cp.bdaddr, &conn->dst);
267 	cp.pscan_rep_mode = 0x02;
268 
269 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
270 	if (ie) {
271 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
272 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
273 			cp.pscan_mode     = ie->data.pscan_mode;
274 			cp.clock_offset   = ie->data.clock_offset |
275 					    cpu_to_le16(0x8000);
276 		}
277 
278 		memcpy(conn->dev_class, ie->data.dev_class, 3);
279 	}
280 
281 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
282 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
283 		cp.role_switch = 0x01;
284 	else
285 		cp.role_switch = 0x00;
286 
287 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
288 }
289 
290 int hci_disconnect(struct hci_conn *conn, __u8 reason)
291 {
292 	BT_DBG("hcon %p", conn);
293 
294 	/* When we are central of an established connection and it enters
295 	 * the disconnect timeout, then go ahead and try to read the
296 	 * current clock offset.  Processing of the result is done
297 	 * within the event handling and hci_clock_offset_evt function.
298 	 */
299 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
300 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
301 		struct hci_dev *hdev = conn->hdev;
302 		struct hci_cp_read_clock_offset clkoff_cp;
303 
304 		clkoff_cp.handle = cpu_to_le16(conn->handle);
305 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
306 			     &clkoff_cp);
307 	}
308 
309 	return hci_abort_conn(conn, reason);
310 }
311 
312 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
313 {
314 	struct hci_dev *hdev = conn->hdev;
315 	struct hci_cp_add_sco cp;
316 
317 	BT_DBG("hcon %p", conn);
318 
319 	conn->state = BT_CONNECT;
320 	conn->out = true;
321 
322 	conn->attempt++;
323 
324 	cp.handle   = cpu_to_le16(handle);
325 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
326 
327 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
328 }
329 
330 static bool find_next_esco_param(struct hci_conn *conn,
331 				 const struct sco_param *esco_param, int size)
332 {
333 	if (!conn->parent)
334 		return false;
335 
336 	for (; conn->attempt <= size; conn->attempt++) {
337 		if (lmp_esco_2m_capable(conn->parent) ||
338 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
339 			break;
340 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
341 		       conn, conn->attempt);
342 	}
343 
344 	return conn->attempt <= size;
345 }
346 
347 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
348 {
349 	int err;
350 	__u8 vnd_len, *vnd_data = NULL;
351 	struct hci_op_configure_data_path *cmd = NULL;
352 
353 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
354 					  &vnd_data);
355 	if (err < 0)
356 		goto error;
357 
358 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
359 	if (!cmd) {
360 		err = -ENOMEM;
361 		goto error;
362 	}
363 
364 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
365 	if (err < 0)
366 		goto error;
367 
368 	cmd->vnd_len = vnd_len;
369 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
370 
371 	cmd->direction = 0x00;
372 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
373 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
374 
375 	cmd->direction = 0x01;
376 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
377 				    sizeof(*cmd) + vnd_len, cmd,
378 				    HCI_CMD_TIMEOUT);
379 error:
380 
381 	kfree(cmd);
382 	kfree(vnd_data);
383 	return err;
384 }
385 
386 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
387 {
388 	struct conn_handle_t *conn_handle = data;
389 	struct hci_conn *conn = conn_handle->conn;
390 	__u16 handle = conn_handle->handle;
391 	struct hci_cp_enhanced_setup_sync_conn cp;
392 	const struct sco_param *param;
393 
394 	kfree(conn_handle);
395 
396 	bt_dev_dbg(hdev, "hcon %p", conn);
397 
398 	/* for offload use case, codec needs to configured before opening SCO */
399 	if (conn->codec.data_path)
400 		configure_datapath_sync(hdev, &conn->codec);
401 
402 	conn->state = BT_CONNECT;
403 	conn->out = true;
404 
405 	conn->attempt++;
406 
407 	memset(&cp, 0x00, sizeof(cp));
408 
409 	cp.handle   = cpu_to_le16(handle);
410 
411 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
412 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
413 
414 	switch (conn->codec.id) {
415 	case BT_CODEC_MSBC:
416 		if (!find_next_esco_param(conn, esco_param_msbc,
417 					  ARRAY_SIZE(esco_param_msbc)))
418 			return -EINVAL;
419 
420 		param = &esco_param_msbc[conn->attempt - 1];
421 		cp.tx_coding_format.id = 0x05;
422 		cp.rx_coding_format.id = 0x05;
423 		cp.tx_codec_frame_size = __cpu_to_le16(60);
424 		cp.rx_codec_frame_size = __cpu_to_le16(60);
425 		cp.in_bandwidth = __cpu_to_le32(32000);
426 		cp.out_bandwidth = __cpu_to_le32(32000);
427 		cp.in_coding_format.id = 0x04;
428 		cp.out_coding_format.id = 0x04;
429 		cp.in_coded_data_size = __cpu_to_le16(16);
430 		cp.out_coded_data_size = __cpu_to_le16(16);
431 		cp.in_pcm_data_format = 2;
432 		cp.out_pcm_data_format = 2;
433 		cp.in_pcm_sample_payload_msb_pos = 0;
434 		cp.out_pcm_sample_payload_msb_pos = 0;
435 		cp.in_data_path = conn->codec.data_path;
436 		cp.out_data_path = conn->codec.data_path;
437 		cp.in_transport_unit_size = 1;
438 		cp.out_transport_unit_size = 1;
439 		break;
440 
441 	case BT_CODEC_TRANSPARENT:
442 		if (!find_next_esco_param(conn, esco_param_msbc,
443 					  ARRAY_SIZE(esco_param_msbc)))
444 			return false;
445 		param = &esco_param_msbc[conn->attempt - 1];
446 		cp.tx_coding_format.id = 0x03;
447 		cp.rx_coding_format.id = 0x03;
448 		cp.tx_codec_frame_size = __cpu_to_le16(60);
449 		cp.rx_codec_frame_size = __cpu_to_le16(60);
450 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
451 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
452 		cp.in_coding_format.id = 0x03;
453 		cp.out_coding_format.id = 0x03;
454 		cp.in_coded_data_size = __cpu_to_le16(16);
455 		cp.out_coded_data_size = __cpu_to_le16(16);
456 		cp.in_pcm_data_format = 2;
457 		cp.out_pcm_data_format = 2;
458 		cp.in_pcm_sample_payload_msb_pos = 0;
459 		cp.out_pcm_sample_payload_msb_pos = 0;
460 		cp.in_data_path = conn->codec.data_path;
461 		cp.out_data_path = conn->codec.data_path;
462 		cp.in_transport_unit_size = 1;
463 		cp.out_transport_unit_size = 1;
464 		break;
465 
466 	case BT_CODEC_CVSD:
467 		if (conn->parent && lmp_esco_capable(conn->parent)) {
468 			if (!find_next_esco_param(conn, esco_param_cvsd,
469 						  ARRAY_SIZE(esco_param_cvsd)))
470 				return -EINVAL;
471 			param = &esco_param_cvsd[conn->attempt - 1];
472 		} else {
473 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
474 				return -EINVAL;
475 			param = &sco_param_cvsd[conn->attempt - 1];
476 		}
477 		cp.tx_coding_format.id = 2;
478 		cp.rx_coding_format.id = 2;
479 		cp.tx_codec_frame_size = __cpu_to_le16(60);
480 		cp.rx_codec_frame_size = __cpu_to_le16(60);
481 		cp.in_bandwidth = __cpu_to_le32(16000);
482 		cp.out_bandwidth = __cpu_to_le32(16000);
483 		cp.in_coding_format.id = 4;
484 		cp.out_coding_format.id = 4;
485 		cp.in_coded_data_size = __cpu_to_le16(16);
486 		cp.out_coded_data_size = __cpu_to_le16(16);
487 		cp.in_pcm_data_format = 2;
488 		cp.out_pcm_data_format = 2;
489 		cp.in_pcm_sample_payload_msb_pos = 0;
490 		cp.out_pcm_sample_payload_msb_pos = 0;
491 		cp.in_data_path = conn->codec.data_path;
492 		cp.out_data_path = conn->codec.data_path;
493 		cp.in_transport_unit_size = 16;
494 		cp.out_transport_unit_size = 16;
495 		break;
496 	default:
497 		return -EINVAL;
498 	}
499 
500 	cp.retrans_effort = param->retrans_effort;
501 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
502 	cp.max_latency = __cpu_to_le16(param->max_latency);
503 
504 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
505 		return -EIO;
506 
507 	return 0;
508 }
509 
510 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
511 {
512 	struct hci_dev *hdev = conn->hdev;
513 	struct hci_cp_setup_sync_conn cp;
514 	const struct sco_param *param;
515 
516 	bt_dev_dbg(hdev, "hcon %p", conn);
517 
518 	conn->state = BT_CONNECT;
519 	conn->out = true;
520 
521 	conn->attempt++;
522 
523 	cp.handle   = cpu_to_le16(handle);
524 
525 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
526 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
527 	cp.voice_setting  = cpu_to_le16(conn->setting);
528 
529 	switch (conn->setting & SCO_AIRMODE_MASK) {
530 	case SCO_AIRMODE_TRANSP:
531 		if (!find_next_esco_param(conn, esco_param_msbc,
532 					  ARRAY_SIZE(esco_param_msbc)))
533 			return false;
534 		param = &esco_param_msbc[conn->attempt - 1];
535 		break;
536 	case SCO_AIRMODE_CVSD:
537 		if (conn->parent && lmp_esco_capable(conn->parent)) {
538 			if (!find_next_esco_param(conn, esco_param_cvsd,
539 						  ARRAY_SIZE(esco_param_cvsd)))
540 				return false;
541 			param = &esco_param_cvsd[conn->attempt - 1];
542 		} else {
543 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
544 				return false;
545 			param = &sco_param_cvsd[conn->attempt - 1];
546 		}
547 		break;
548 	default:
549 		return false;
550 	}
551 
552 	cp.retrans_effort = param->retrans_effort;
553 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
554 	cp.max_latency = __cpu_to_le16(param->max_latency);
555 
556 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
557 		return false;
558 
559 	return true;
560 }
561 
562 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
563 {
564 	int result;
565 	struct conn_handle_t *conn_handle;
566 
567 	if (enhanced_sync_conn_capable(conn->hdev)) {
568 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
569 
570 		if (!conn_handle)
571 			return false;
572 
573 		conn_handle->conn = conn;
574 		conn_handle->handle = handle;
575 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
576 					    conn_handle, NULL);
577 		if (result < 0)
578 			kfree(conn_handle);
579 
580 		return result == 0;
581 	}
582 
583 	return hci_setup_sync_conn(conn, handle);
584 }
585 
586 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
587 		      u16 to_multiplier)
588 {
589 	struct hci_dev *hdev = conn->hdev;
590 	struct hci_conn_params *params;
591 	struct hci_cp_le_conn_update cp;
592 
593 	hci_dev_lock(hdev);
594 
595 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
596 	if (params) {
597 		params->conn_min_interval = min;
598 		params->conn_max_interval = max;
599 		params->conn_latency = latency;
600 		params->supervision_timeout = to_multiplier;
601 	}
602 
603 	hci_dev_unlock(hdev);
604 
605 	memset(&cp, 0, sizeof(cp));
606 	cp.handle		= cpu_to_le16(conn->handle);
607 	cp.conn_interval_min	= cpu_to_le16(min);
608 	cp.conn_interval_max	= cpu_to_le16(max);
609 	cp.conn_latency		= cpu_to_le16(latency);
610 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
611 	cp.min_ce_len		= cpu_to_le16(0x0000);
612 	cp.max_ce_len		= cpu_to_le16(0x0000);
613 
614 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
615 
616 	if (params)
617 		return 0x01;
618 
619 	return 0x00;
620 }
621 
622 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
623 		      __u8 ltk[16], __u8 key_size)
624 {
625 	struct hci_dev *hdev = conn->hdev;
626 	struct hci_cp_le_start_enc cp;
627 
628 	BT_DBG("hcon %p", conn);
629 
630 	memset(&cp, 0, sizeof(cp));
631 
632 	cp.handle = cpu_to_le16(conn->handle);
633 	cp.rand = rand;
634 	cp.ediv = ediv;
635 	memcpy(cp.ltk, ltk, key_size);
636 
637 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
638 }
639 
640 /* Device _must_ be locked */
641 void hci_sco_setup(struct hci_conn *conn, __u8 status)
642 {
643 	struct hci_link *link;
644 
645 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
646 	if (!link || !link->conn)
647 		return;
648 
649 	BT_DBG("hcon %p", conn);
650 
651 	if (!status) {
652 		if (lmp_esco_capable(conn->hdev))
653 			hci_setup_sync(link->conn, conn->handle);
654 		else
655 			hci_add_sco(link->conn, conn->handle);
656 	} else {
657 		hci_connect_cfm(link->conn, status);
658 		hci_conn_del(link->conn);
659 	}
660 }
661 
662 static void hci_conn_timeout(struct work_struct *work)
663 {
664 	struct hci_conn *conn = container_of(work, struct hci_conn,
665 					     disc_work.work);
666 	int refcnt = atomic_read(&conn->refcnt);
667 
668 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
669 
670 	WARN_ON(refcnt < 0);
671 
672 	/* FIXME: It was observed that in pairing failed scenario, refcnt
673 	 * drops below 0. Probably this is because l2cap_conn_del calls
674 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
675 	 * dropped. After that loop hci_chan_del is called which also drops
676 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
677 	 * otherwise drop it.
678 	 */
679 	if (refcnt > 0)
680 		return;
681 
682 	/* LE connections in scanning state need special handling */
683 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
684 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
685 		hci_connect_le_scan_remove(conn);
686 		return;
687 	}
688 
689 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
690 }
691 
692 /* Enter sniff mode */
693 static void hci_conn_idle(struct work_struct *work)
694 {
695 	struct hci_conn *conn = container_of(work, struct hci_conn,
696 					     idle_work.work);
697 	struct hci_dev *hdev = conn->hdev;
698 
699 	BT_DBG("hcon %p mode %d", conn, conn->mode);
700 
701 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
702 		return;
703 
704 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
705 		return;
706 
707 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
708 		struct hci_cp_sniff_subrate cp;
709 		cp.handle             = cpu_to_le16(conn->handle);
710 		cp.max_latency        = cpu_to_le16(0);
711 		cp.min_remote_timeout = cpu_to_le16(0);
712 		cp.min_local_timeout  = cpu_to_le16(0);
713 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
714 	}
715 
716 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
717 		struct hci_cp_sniff_mode cp;
718 		cp.handle       = cpu_to_le16(conn->handle);
719 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
720 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
721 		cp.attempt      = cpu_to_le16(4);
722 		cp.timeout      = cpu_to_le16(1);
723 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
724 	}
725 }
726 
727 static void hci_conn_auto_accept(struct work_struct *work)
728 {
729 	struct hci_conn *conn = container_of(work, struct hci_conn,
730 					     auto_accept_work.work);
731 
732 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
733 		     &conn->dst);
734 }
735 
736 static void le_disable_advertising(struct hci_dev *hdev)
737 {
738 	if (ext_adv_capable(hdev)) {
739 		struct hci_cp_le_set_ext_adv_enable cp;
740 
741 		cp.enable = 0x00;
742 		cp.num_of_sets = 0x00;
743 
744 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
745 			     &cp);
746 	} else {
747 		u8 enable = 0x00;
748 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
749 			     &enable);
750 	}
751 }
752 
753 static void le_conn_timeout(struct work_struct *work)
754 {
755 	struct hci_conn *conn = container_of(work, struct hci_conn,
756 					     le_conn_timeout.work);
757 	struct hci_dev *hdev = conn->hdev;
758 
759 	BT_DBG("");
760 
761 	/* We could end up here due to having done directed advertising,
762 	 * so clean up the state if necessary. This should however only
763 	 * happen with broken hardware or if low duty cycle was used
764 	 * (which doesn't have a timeout of its own).
765 	 */
766 	if (conn->role == HCI_ROLE_SLAVE) {
767 		/* Disable LE Advertising */
768 		le_disable_advertising(hdev);
769 		hci_dev_lock(hdev);
770 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
771 		hci_dev_unlock(hdev);
772 		return;
773 	}
774 
775 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
776 }
777 
778 struct iso_list_data {
779 	union {
780 		u8  cig;
781 		u8  big;
782 	};
783 	union {
784 		u8  cis;
785 		u8  bis;
786 		u16 sync_handle;
787 	};
788 	int count;
789 	struct {
790 		struct hci_cp_le_set_cig_params cp;
791 		struct hci_cis_params cis[0x11];
792 	} pdu;
793 };
794 
795 static void bis_list(struct hci_conn *conn, void *data)
796 {
797 	struct iso_list_data *d = data;
798 
799 	/* Skip if not broadcast/ANY address */
800 	if (bacmp(&conn->dst, BDADDR_ANY))
801 		return;
802 
803 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
804 	    d->bis != conn->iso_qos.bcast.bis)
805 		return;
806 
807 	d->count++;
808 }
809 
810 static void find_bis(struct hci_conn *conn, void *data)
811 {
812 	struct iso_list_data *d = data;
813 
814 	/* Ignore unicast */
815 	if (bacmp(&conn->dst, BDADDR_ANY))
816 		return;
817 
818 	d->count++;
819 }
820 
821 static int terminate_big_sync(struct hci_dev *hdev, void *data)
822 {
823 	struct iso_list_data *d = data;
824 
825 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
826 
827 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
828 
829 	/* Check if ISO connection is a BIS and terminate BIG if there are
830 	 * no other connections using it.
831 	 */
832 	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
833 	if (d->count)
834 		return 0;
835 
836 	return hci_le_terminate_big_sync(hdev, d->big,
837 					 HCI_ERROR_LOCAL_HOST_TERM);
838 }
839 
840 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
841 {
842 	kfree(data);
843 }
844 
845 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
846 {
847 	struct iso_list_data *d;
848 	int ret;
849 
850 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
851 
852 	d = kzalloc(sizeof(*d), GFP_KERNEL);
853 	if (!d)
854 		return -ENOMEM;
855 
856 	d->big = big;
857 	d->bis = bis;
858 
859 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
860 				 terminate_big_destroy);
861 	if (ret)
862 		kfree(d);
863 
864 	return ret;
865 }
866 
867 static int big_terminate_sync(struct hci_dev *hdev, void *data)
868 {
869 	struct iso_list_data *d = data;
870 
871 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
872 		   d->sync_handle);
873 
874 	/* Check if ISO connection is a BIS and terminate BIG if there are
875 	 * no other connections using it.
876 	 */
877 	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
878 	if (d->count)
879 		return 0;
880 
881 	hci_le_big_terminate_sync(hdev, d->big);
882 
883 	return hci_le_pa_terminate_sync(hdev, d->sync_handle);
884 }
885 
886 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
887 {
888 	struct iso_list_data *d;
889 	int ret;
890 
891 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
892 
893 	d = kzalloc(sizeof(*d), GFP_KERNEL);
894 	if (!d)
895 		return -ENOMEM;
896 
897 	d->big = big;
898 	d->sync_handle = sync_handle;
899 
900 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
901 				 terminate_big_destroy);
902 	if (ret)
903 		kfree(d);
904 
905 	return ret;
906 }
907 
908 /* Cleanup BIS connection
909  *
910  * Detects if there any BIS left connected in a BIG
911  * broadcaster: Remove advertising instance and terminate BIG.
912  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
913  */
914 static void bis_cleanup(struct hci_conn *conn)
915 {
916 	struct hci_dev *hdev = conn->hdev;
917 
918 	bt_dev_dbg(hdev, "conn %p", conn);
919 
920 	if (conn->role == HCI_ROLE_MASTER) {
921 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
922 			return;
923 
924 		hci_le_terminate_big(hdev, conn->iso_qos.bcast.big,
925 				     conn->iso_qos.bcast.bis);
926 	} else {
927 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
928 				     conn->sync_handle);
929 	}
930 }
931 
932 static int remove_cig_sync(struct hci_dev *hdev, void *data)
933 {
934 	u8 handle = PTR_ERR(data);
935 
936 	return hci_le_remove_cig_sync(hdev, handle);
937 }
938 
939 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
940 {
941 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
942 
943 	return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
944 }
945 
946 static void find_cis(struct hci_conn *conn, void *data)
947 {
948 	struct iso_list_data *d = data;
949 
950 	/* Ignore broadcast */
951 	if (!bacmp(&conn->dst, BDADDR_ANY))
952 		return;
953 
954 	d->count++;
955 }
956 
957 /* Cleanup CIS connection:
958  *
959  * Detects if there any CIS left connected in a CIG and remove it.
960  */
961 static void cis_cleanup(struct hci_conn *conn)
962 {
963 	struct hci_dev *hdev = conn->hdev;
964 	struct iso_list_data d;
965 
966 	memset(&d, 0, sizeof(d));
967 	d.cig = conn->iso_qos.ucast.cig;
968 
969 	/* Check if ISO connection is a CIS and remove CIG if there are
970 	 * no other connections using it.
971 	 */
972 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
973 	if (d.count)
974 		return;
975 
976 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
977 }
978 
979 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
980 			      u8 role)
981 {
982 	struct hci_conn *conn;
983 
984 	BT_DBG("%s dst %pMR", hdev->name, dst);
985 
986 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
987 	if (!conn)
988 		return NULL;
989 
990 	bacpy(&conn->dst, dst);
991 	bacpy(&conn->src, &hdev->bdaddr);
992 	conn->handle = HCI_CONN_HANDLE_UNSET;
993 	conn->hdev  = hdev;
994 	conn->type  = type;
995 	conn->role  = role;
996 	conn->mode  = HCI_CM_ACTIVE;
997 	conn->state = BT_OPEN;
998 	conn->auth_type = HCI_AT_GENERAL_BONDING;
999 	conn->io_capability = hdev->io_capability;
1000 	conn->remote_auth = 0xff;
1001 	conn->key_type = 0xff;
1002 	conn->rssi = HCI_RSSI_INVALID;
1003 	conn->tx_power = HCI_TX_POWER_INVALID;
1004 	conn->max_tx_power = HCI_TX_POWER_INVALID;
1005 
1006 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
1007 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1008 
1009 	/* Set Default Authenticated payload timeout to 30s */
1010 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
1011 
1012 	if (conn->role == HCI_ROLE_MASTER)
1013 		conn->out = true;
1014 
1015 	switch (type) {
1016 	case ACL_LINK:
1017 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1018 		break;
1019 	case LE_LINK:
1020 		/* conn->src should reflect the local identity address */
1021 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1022 		break;
1023 	case ISO_LINK:
1024 		/* conn->src should reflect the local identity address */
1025 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1026 
1027 		/* set proper cleanup function */
1028 		if (!bacmp(dst, BDADDR_ANY))
1029 			conn->cleanup = bis_cleanup;
1030 		else if (conn->role == HCI_ROLE_MASTER)
1031 			conn->cleanup = cis_cleanup;
1032 
1033 		break;
1034 	case SCO_LINK:
1035 		if (lmp_esco_capable(hdev))
1036 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1037 					(hdev->esco_type & EDR_ESCO_MASK);
1038 		else
1039 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1040 		break;
1041 	case ESCO_LINK:
1042 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1043 		break;
1044 	}
1045 
1046 	skb_queue_head_init(&conn->data_q);
1047 
1048 	INIT_LIST_HEAD(&conn->chan_list);
1049 	INIT_LIST_HEAD(&conn->link_list);
1050 
1051 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1052 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1053 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1054 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1055 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
1056 
1057 	atomic_set(&conn->refcnt, 0);
1058 
1059 	hci_dev_hold(hdev);
1060 
1061 	hci_conn_hash_add(hdev, conn);
1062 
1063 	/* The SCO and eSCO connections will only be notified when their
1064 	 * setup has been completed. This is different to ACL links which
1065 	 * can be notified right away.
1066 	 */
1067 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1068 		if (hdev->notify)
1069 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1070 	}
1071 
1072 	hci_conn_init_sysfs(conn);
1073 
1074 	return conn;
1075 }
1076 
1077 static void hci_conn_unlink(struct hci_conn *conn)
1078 {
1079 	struct hci_dev *hdev = conn->hdev;
1080 
1081 	bt_dev_dbg(hdev, "hcon %p", conn);
1082 
1083 	if (!conn->parent) {
1084 		struct hci_link *link, *t;
1085 
1086 		list_for_each_entry_safe(link, t, &conn->link_list, list)
1087 			hci_conn_unlink(link->conn);
1088 
1089 		return;
1090 	}
1091 
1092 	if (!conn->link)
1093 		return;
1094 
1095 	hci_conn_put(conn->parent);
1096 	conn->parent = NULL;
1097 
1098 	list_del_rcu(&conn->link->list);
1099 	synchronize_rcu();
1100 
1101 	kfree(conn->link);
1102 	conn->link = NULL;
1103 
1104 	/* Due to race, SCO connection might be not established
1105 	 * yet at this point. Delete it now, otherwise it is
1106 	 * possible for it to be stuck and can't be deleted.
1107 	 */
1108 	if (conn->handle == HCI_CONN_HANDLE_UNSET)
1109 		hci_conn_del(conn);
1110 }
1111 
1112 int hci_conn_del(struct hci_conn *conn)
1113 {
1114 	struct hci_dev *hdev = conn->hdev;
1115 
1116 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1117 
1118 	cancel_delayed_work_sync(&conn->disc_work);
1119 	cancel_delayed_work_sync(&conn->auto_accept_work);
1120 	cancel_delayed_work_sync(&conn->idle_work);
1121 
1122 	if (conn->type == ACL_LINK) {
1123 		hci_conn_unlink(conn);
1124 		/* Unacked frames */
1125 		hdev->acl_cnt += conn->sent;
1126 	} else if (conn->type == LE_LINK) {
1127 		cancel_delayed_work(&conn->le_conn_timeout);
1128 
1129 		if (hdev->le_pkts)
1130 			hdev->le_cnt += conn->sent;
1131 		else
1132 			hdev->acl_cnt += conn->sent;
1133 	} else {
1134 		struct hci_conn *acl = conn->parent;
1135 
1136 		if (acl) {
1137 			hci_conn_unlink(conn);
1138 			hci_conn_drop(acl);
1139 		}
1140 
1141 		/* Unacked ISO frames */
1142 		if (conn->type == ISO_LINK) {
1143 			if (hdev->iso_pkts)
1144 				hdev->iso_cnt += conn->sent;
1145 			else if (hdev->le_pkts)
1146 				hdev->le_cnt += conn->sent;
1147 			else
1148 				hdev->acl_cnt += conn->sent;
1149 		}
1150 	}
1151 
1152 	if (conn->amp_mgr)
1153 		amp_mgr_put(conn->amp_mgr);
1154 
1155 	skb_queue_purge(&conn->data_q);
1156 
1157 	/* Remove the connection from the list and cleanup its remaining
1158 	 * state. This is a separate function since for some cases like
1159 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1160 	 * rest of hci_conn_del.
1161 	 */
1162 	hci_conn_cleanup(conn);
1163 
1164 	return 0;
1165 }
1166 
1167 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1168 {
1169 	int use_src = bacmp(src, BDADDR_ANY);
1170 	struct hci_dev *hdev = NULL, *d;
1171 
1172 	BT_DBG("%pMR -> %pMR", src, dst);
1173 
1174 	read_lock(&hci_dev_list_lock);
1175 
1176 	list_for_each_entry(d, &hci_dev_list, list) {
1177 		if (!test_bit(HCI_UP, &d->flags) ||
1178 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1179 		    d->dev_type != HCI_PRIMARY)
1180 			continue;
1181 
1182 		/* Simple routing:
1183 		 *   No source address - find interface with bdaddr != dst
1184 		 *   Source address    - find interface with bdaddr == src
1185 		 */
1186 
1187 		if (use_src) {
1188 			bdaddr_t id_addr;
1189 			u8 id_addr_type;
1190 
1191 			if (src_type == BDADDR_BREDR) {
1192 				if (!lmp_bredr_capable(d))
1193 					continue;
1194 				bacpy(&id_addr, &d->bdaddr);
1195 				id_addr_type = BDADDR_BREDR;
1196 			} else {
1197 				if (!lmp_le_capable(d))
1198 					continue;
1199 
1200 				hci_copy_identity_address(d, &id_addr,
1201 							  &id_addr_type);
1202 
1203 				/* Convert from HCI to three-value type */
1204 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1205 					id_addr_type = BDADDR_LE_PUBLIC;
1206 				else
1207 					id_addr_type = BDADDR_LE_RANDOM;
1208 			}
1209 
1210 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1211 				hdev = d; break;
1212 			}
1213 		} else {
1214 			if (bacmp(&d->bdaddr, dst)) {
1215 				hdev = d; break;
1216 			}
1217 		}
1218 	}
1219 
1220 	if (hdev)
1221 		hdev = hci_dev_hold(hdev);
1222 
1223 	read_unlock(&hci_dev_list_lock);
1224 	return hdev;
1225 }
1226 EXPORT_SYMBOL(hci_get_route);
1227 
1228 /* This function requires the caller holds hdev->lock */
1229 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1230 {
1231 	struct hci_dev *hdev = conn->hdev;
1232 
1233 	hci_connect_le_scan_cleanup(conn, status);
1234 
1235 	/* Enable advertising in case this was a failed connection
1236 	 * attempt as a peripheral.
1237 	 */
1238 	hci_enable_advertising(hdev);
1239 }
1240 
1241 /* This function requires the caller holds hdev->lock */
1242 void hci_conn_failed(struct hci_conn *conn, u8 status)
1243 {
1244 	struct hci_dev *hdev = conn->hdev;
1245 
1246 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1247 
1248 	switch (conn->type) {
1249 	case LE_LINK:
1250 		hci_le_conn_failed(conn, status);
1251 		break;
1252 	case ACL_LINK:
1253 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1254 				    conn->dst_type, status);
1255 		break;
1256 	}
1257 
1258 	conn->state = BT_CLOSED;
1259 	hci_connect_cfm(conn, status);
1260 	hci_conn_del(conn);
1261 }
1262 
1263 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1264 {
1265 	struct hci_conn *conn = data;
1266 
1267 	bt_dev_dbg(hdev, "err %d", err);
1268 
1269 	hci_dev_lock(hdev);
1270 
1271 	if (!err) {
1272 		hci_connect_le_scan_cleanup(conn, 0x00);
1273 		goto done;
1274 	}
1275 
1276 	/* Check if connection is still pending */
1277 	if (conn != hci_lookup_le_connect(hdev))
1278 		goto done;
1279 
1280 	/* Flush to make sure we send create conn cancel command if needed */
1281 	flush_delayed_work(&conn->le_conn_timeout);
1282 	hci_conn_failed(conn, bt_status(err));
1283 
1284 done:
1285 	hci_dev_unlock(hdev);
1286 }
1287 
1288 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1289 {
1290 	struct hci_conn *conn = data;
1291 
1292 	bt_dev_dbg(hdev, "conn %p", conn);
1293 
1294 	return hci_le_create_conn_sync(hdev, conn);
1295 }
1296 
1297 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1298 				u8 dst_type, bool dst_resolved, u8 sec_level,
1299 				u16 conn_timeout, u8 role)
1300 {
1301 	struct hci_conn *conn;
1302 	struct smp_irk *irk;
1303 	int err;
1304 
1305 	/* Let's make sure that le is enabled.*/
1306 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1307 		if (lmp_le_capable(hdev))
1308 			return ERR_PTR(-ECONNREFUSED);
1309 
1310 		return ERR_PTR(-EOPNOTSUPP);
1311 	}
1312 
1313 	/* Since the controller supports only one LE connection attempt at a
1314 	 * time, we return -EBUSY if there is any connection attempt running.
1315 	 */
1316 	if (hci_lookup_le_connect(hdev))
1317 		return ERR_PTR(-EBUSY);
1318 
1319 	/* If there's already a connection object but it's not in
1320 	 * scanning state it means it must already be established, in
1321 	 * which case we can't do anything else except report a failure
1322 	 * to connect.
1323 	 */
1324 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1325 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1326 		return ERR_PTR(-EBUSY);
1327 	}
1328 
1329 	/* Check if the destination address has been resolved by the controller
1330 	 * since if it did then the identity address shall be used.
1331 	 */
1332 	if (!dst_resolved) {
1333 		/* When given an identity address with existing identity
1334 		 * resolving key, the connection needs to be established
1335 		 * to a resolvable random address.
1336 		 *
1337 		 * Storing the resolvable random address is required here
1338 		 * to handle connection failures. The address will later
1339 		 * be resolved back into the original identity address
1340 		 * from the connect request.
1341 		 */
1342 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1343 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1344 			dst = &irk->rpa;
1345 			dst_type = ADDR_LE_DEV_RANDOM;
1346 		}
1347 	}
1348 
1349 	if (conn) {
1350 		bacpy(&conn->dst, dst);
1351 	} else {
1352 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1353 		if (!conn)
1354 			return ERR_PTR(-ENOMEM);
1355 		hci_conn_hold(conn);
1356 		conn->pending_sec_level = sec_level;
1357 	}
1358 
1359 	conn->dst_type = dst_type;
1360 	conn->sec_level = BT_SECURITY_LOW;
1361 	conn->conn_timeout = conn_timeout;
1362 
1363 	conn->state = BT_CONNECT;
1364 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1365 
1366 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1367 				 create_le_conn_complete);
1368 	if (err) {
1369 		hci_conn_del(conn);
1370 		return ERR_PTR(err);
1371 	}
1372 
1373 	return conn;
1374 }
1375 
1376 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1377 {
1378 	struct hci_conn *conn;
1379 
1380 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1381 	if (!conn)
1382 		return false;
1383 
1384 	if (conn->state != BT_CONNECTED)
1385 		return false;
1386 
1387 	return true;
1388 }
1389 
1390 /* This function requires the caller holds hdev->lock */
1391 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1392 					bdaddr_t *addr, u8 addr_type)
1393 {
1394 	struct hci_conn_params *params;
1395 
1396 	if (is_connected(hdev, addr, addr_type))
1397 		return -EISCONN;
1398 
1399 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1400 	if (!params) {
1401 		params = hci_conn_params_add(hdev, addr, addr_type);
1402 		if (!params)
1403 			return -ENOMEM;
1404 
1405 		/* If we created new params, mark them to be deleted in
1406 		 * hci_connect_le_scan_cleanup. It's different case than
1407 		 * existing disabled params, those will stay after cleanup.
1408 		 */
1409 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1410 	}
1411 
1412 	/* We're trying to connect, so make sure params are at pend_le_conns */
1413 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1414 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1415 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1416 		list_del_init(&params->action);
1417 		list_add(&params->action, &hdev->pend_le_conns);
1418 	}
1419 
1420 	params->explicit_connect = true;
1421 
1422 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1423 	       params->auto_connect);
1424 
1425 	return 0;
1426 }
1427 
1428 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1429 {
1430 	struct iso_list_data data;
1431 
1432 	/* Allocate a BIG if not set */
1433 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1434 		for (data.big = 0x00; data.big < 0xef; data.big++) {
1435 			data.count = 0;
1436 			data.bis = 0xff;
1437 
1438 			hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1439 						 BT_BOUND, &data);
1440 			if (!data.count)
1441 				break;
1442 		}
1443 
1444 		if (data.big == 0xef)
1445 			return -EADDRNOTAVAIL;
1446 
1447 		/* Update BIG */
1448 		qos->bcast.big = data.big;
1449 	}
1450 
1451 	return 0;
1452 }
1453 
1454 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1455 {
1456 	struct iso_list_data data;
1457 
1458 	/* Allocate BIS if not set */
1459 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1460 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1461 		 * since it is reserved as general purpose set.
1462 		 */
1463 		for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1464 		     data.bis++) {
1465 			data.count = 0;
1466 
1467 			hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1468 						 BT_BOUND, &data);
1469 			if (!data.count)
1470 				break;
1471 		}
1472 
1473 		if (data.bis == hdev->le_num_of_adv_sets)
1474 			return -EADDRNOTAVAIL;
1475 
1476 		/* Update BIS */
1477 		qos->bcast.bis = data.bis;
1478 	}
1479 
1480 	return 0;
1481 }
1482 
1483 /* This function requires the caller holds hdev->lock */
1484 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1485 				    struct bt_iso_qos *qos)
1486 {
1487 	struct hci_conn *conn;
1488 	struct iso_list_data data;
1489 	int err;
1490 
1491 	/* Let's make sure that le is enabled.*/
1492 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1493 		if (lmp_le_capable(hdev))
1494 			return ERR_PTR(-ECONNREFUSED);
1495 		return ERR_PTR(-EOPNOTSUPP);
1496 	}
1497 
1498 	err = qos_set_big(hdev, qos);
1499 	if (err)
1500 		return ERR_PTR(err);
1501 
1502 	err = qos_set_bis(hdev, qos);
1503 	if (err)
1504 		return ERR_PTR(err);
1505 
1506 	data.big = qos->bcast.big;
1507 	data.bis = qos->bcast.bis;
1508 	data.count = 0;
1509 
1510 	/* Check if there is already a matching BIG/BIS */
1511 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1512 	if (data.count)
1513 		return ERR_PTR(-EADDRINUSE);
1514 
1515 	conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis);
1516 	if (conn)
1517 		return ERR_PTR(-EADDRINUSE);
1518 
1519 	conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1520 	if (!conn)
1521 		return ERR_PTR(-ENOMEM);
1522 
1523 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
1524 	conn->state = BT_CONNECT;
1525 
1526 	hci_conn_hold(conn);
1527 	return conn;
1528 }
1529 
1530 /* This function requires the caller holds hdev->lock */
1531 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1532 				     u8 dst_type, u8 sec_level,
1533 				     u16 conn_timeout,
1534 				     enum conn_reasons conn_reason)
1535 {
1536 	struct hci_conn *conn;
1537 
1538 	/* Let's make sure that le is enabled.*/
1539 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1540 		if (lmp_le_capable(hdev))
1541 			return ERR_PTR(-ECONNREFUSED);
1542 
1543 		return ERR_PTR(-EOPNOTSUPP);
1544 	}
1545 
1546 	/* Some devices send ATT messages as soon as the physical link is
1547 	 * established. To be able to handle these ATT messages, the user-
1548 	 * space first establishes the connection and then starts the pairing
1549 	 * process.
1550 	 *
1551 	 * So if a hci_conn object already exists for the following connection
1552 	 * attempt, we simply update pending_sec_level and auth_type fields
1553 	 * and return the object found.
1554 	 */
1555 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1556 	if (conn) {
1557 		if (conn->pending_sec_level < sec_level)
1558 			conn->pending_sec_level = sec_level;
1559 		goto done;
1560 	}
1561 
1562 	BT_DBG("requesting refresh of dst_addr");
1563 
1564 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1565 	if (!conn)
1566 		return ERR_PTR(-ENOMEM);
1567 
1568 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1569 		hci_conn_del(conn);
1570 		return ERR_PTR(-EBUSY);
1571 	}
1572 
1573 	conn->state = BT_CONNECT;
1574 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1575 	conn->dst_type = dst_type;
1576 	conn->sec_level = BT_SECURITY_LOW;
1577 	conn->pending_sec_level = sec_level;
1578 	conn->conn_timeout = conn_timeout;
1579 	conn->conn_reason = conn_reason;
1580 
1581 	hci_update_passive_scan(hdev);
1582 
1583 done:
1584 	hci_conn_hold(conn);
1585 	return conn;
1586 }
1587 
1588 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1589 				 u8 sec_level, u8 auth_type,
1590 				 enum conn_reasons conn_reason)
1591 {
1592 	struct hci_conn *acl;
1593 
1594 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1595 		if (lmp_bredr_capable(hdev))
1596 			return ERR_PTR(-ECONNREFUSED);
1597 
1598 		return ERR_PTR(-EOPNOTSUPP);
1599 	}
1600 
1601 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1602 	if (!acl) {
1603 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1604 		if (!acl)
1605 			return ERR_PTR(-ENOMEM);
1606 	}
1607 
1608 	hci_conn_hold(acl);
1609 
1610 	acl->conn_reason = conn_reason;
1611 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1612 		acl->sec_level = BT_SECURITY_LOW;
1613 		acl->pending_sec_level = sec_level;
1614 		acl->auth_type = auth_type;
1615 		hci_acl_create_connection(acl);
1616 	}
1617 
1618 	return acl;
1619 }
1620 
1621 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1622 				      struct hci_conn *conn)
1623 {
1624 	struct hci_dev *hdev = parent->hdev;
1625 	struct hci_link *link;
1626 
1627 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1628 
1629 	if (conn->link)
1630 		return conn->link;
1631 
1632 	if (conn->parent)
1633 		return NULL;
1634 
1635 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1636 	if (!link)
1637 		return NULL;
1638 
1639 	link->conn = hci_conn_hold(conn);
1640 	conn->link = link;
1641 	conn->parent = hci_conn_get(parent);
1642 
1643 	/* Use list_add_tail_rcu append to the list */
1644 	list_add_tail_rcu(&link->list, &parent->link_list);
1645 
1646 	return link;
1647 }
1648 
1649 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1650 				 __u16 setting, struct bt_codec *codec)
1651 {
1652 	struct hci_conn *acl;
1653 	struct hci_conn *sco;
1654 	struct hci_link *link;
1655 
1656 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1657 			      CONN_REASON_SCO_CONNECT);
1658 	if (IS_ERR(acl))
1659 		return acl;
1660 
1661 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1662 	if (!sco) {
1663 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1664 		if (!sco) {
1665 			hci_conn_drop(acl);
1666 			return ERR_PTR(-ENOMEM);
1667 		}
1668 	}
1669 
1670 	link = hci_conn_link(acl, sco);
1671 	if (!link) {
1672 		hci_conn_drop(acl);
1673 		hci_conn_drop(sco);
1674 		return NULL;
1675 	}
1676 
1677 	sco->setting = setting;
1678 	sco->codec = *codec;
1679 
1680 	if (acl->state == BT_CONNECTED &&
1681 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1682 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1683 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1684 
1685 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1686 			/* defer SCO setup until mode change completed */
1687 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1688 			return sco;
1689 		}
1690 
1691 		hci_sco_setup(acl, 0x00);
1692 	}
1693 
1694 	return sco;
1695 }
1696 
1697 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1698 {
1699 	struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1700 
1701 	cis->cis_id = qos->ucast.cis;
1702 	cis->c_sdu  = cpu_to_le16(qos->ucast.out.sdu);
1703 	cis->p_sdu  = cpu_to_le16(qos->ucast.in.sdu);
1704 	cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy;
1705 	cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy;
1706 	cis->c_rtn  = qos->ucast.out.rtn;
1707 	cis->p_rtn  = qos->ucast.in.rtn;
1708 
1709 	d->pdu.cp.num_cis++;
1710 }
1711 
1712 static void cis_list(struct hci_conn *conn, void *data)
1713 {
1714 	struct iso_list_data *d = data;
1715 
1716 	/* Skip if broadcast/ANY address */
1717 	if (!bacmp(&conn->dst, BDADDR_ANY))
1718 		return;
1719 
1720 	if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1721 	    d->cis != conn->iso_qos.ucast.cis)
1722 		return;
1723 
1724 	d->count++;
1725 
1726 	if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1727 	    d->count >= ARRAY_SIZE(d->pdu.cis))
1728 		return;
1729 
1730 	cis_add(d, &conn->iso_qos);
1731 }
1732 
1733 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1734 {
1735 	struct hci_dev *hdev = conn->hdev;
1736 	struct hci_cp_le_create_big cp;
1737 
1738 	memset(&cp, 0, sizeof(cp));
1739 
1740 	cp.handle = qos->bcast.big;
1741 	cp.adv_handle = qos->bcast.bis;
1742 	cp.num_bis  = 0x01;
1743 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1744 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1745 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1746 	cp.bis.rtn  = qos->bcast.out.rtn;
1747 	cp.bis.phy  = qos->bcast.out.phy;
1748 	cp.bis.packing = qos->bcast.packing;
1749 	cp.bis.framing = qos->bcast.framing;
1750 	cp.bis.encryption = qos->bcast.encryption;
1751 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1752 
1753 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1754 }
1755 
1756 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1757 {
1758 	struct hci_dev *hdev = conn->hdev;
1759 	struct iso_list_data data;
1760 
1761 	memset(&data, 0, sizeof(data));
1762 
1763 	/* Allocate a CIG if not set */
1764 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1765 		for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1766 			data.count = 0;
1767 			data.cis = 0xff;
1768 
1769 			hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1770 						 BT_BOUND, &data);
1771 			if (data.count)
1772 				continue;
1773 
1774 			hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1775 						 BT_CONNECTED, &data);
1776 			if (!data.count)
1777 				break;
1778 		}
1779 
1780 		if (data.cig == 0xff)
1781 			return false;
1782 
1783 		/* Update CIG */
1784 		qos->ucast.cig = data.cig;
1785 	}
1786 
1787 	data.pdu.cp.cig_id = qos->ucast.cig;
1788 	hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval);
1789 	hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval);
1790 	data.pdu.cp.sca = qos->ucast.sca;
1791 	data.pdu.cp.packing = qos->ucast.packing;
1792 	data.pdu.cp.framing = qos->ucast.framing;
1793 	data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1794 	data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1795 
1796 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1797 		data.count = 0;
1798 		data.cig = qos->ucast.cig;
1799 		data.cis = qos->ucast.cis;
1800 
1801 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1802 					 &data);
1803 		if (data.count)
1804 			return false;
1805 
1806 		cis_add(&data, qos);
1807 	}
1808 
1809 	/* Reprogram all CIS(s) with the same CIG */
1810 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11;
1811 	     data.cis++) {
1812 		data.count = 0;
1813 
1814 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1815 					 &data);
1816 		if (data.count)
1817 			continue;
1818 
1819 		/* Allocate a CIS if not set */
1820 		if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) {
1821 			/* Update CIS */
1822 			qos->ucast.cis = data.cis;
1823 			cis_add(&data, qos);
1824 		}
1825 	}
1826 
1827 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1828 		return false;
1829 
1830 	if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1831 			 sizeof(data.pdu.cp) +
1832 			 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1833 			 &data.pdu) < 0)
1834 		return false;
1835 
1836 	return true;
1837 }
1838 
1839 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1840 			      __u8 dst_type, struct bt_iso_qos *qos)
1841 {
1842 	struct hci_conn *cis;
1843 
1844 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1845 				       qos->ucast.cis);
1846 	if (!cis) {
1847 		cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1848 		if (!cis)
1849 			return ERR_PTR(-ENOMEM);
1850 		cis->cleanup = cis_cleanup;
1851 		cis->dst_type = dst_type;
1852 	}
1853 
1854 	if (cis->state == BT_CONNECTED)
1855 		return cis;
1856 
1857 	/* Check if CIS has been set and the settings matches */
1858 	if (cis->state == BT_BOUND &&
1859 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1860 		return cis;
1861 
1862 	/* Update LINK PHYs according to QoS preference */
1863 	cis->le_tx_phy = qos->ucast.out.phy;
1864 	cis->le_rx_phy = qos->ucast.in.phy;
1865 
1866 	/* If output interval is not set use the input interval as it cannot be
1867 	 * 0x000000.
1868 	 */
1869 	if (!qos->ucast.out.interval)
1870 		qos->ucast.out.interval = qos->ucast.in.interval;
1871 
1872 	/* If input interval is not set use the output interval as it cannot be
1873 	 * 0x000000.
1874 	 */
1875 	if (!qos->ucast.in.interval)
1876 		qos->ucast.in.interval = qos->ucast.out.interval;
1877 
1878 	/* If output latency is not set use the input latency as it cannot be
1879 	 * 0x0000.
1880 	 */
1881 	if (!qos->ucast.out.latency)
1882 		qos->ucast.out.latency = qos->ucast.in.latency;
1883 
1884 	/* If input latency is not set use the output latency as it cannot be
1885 	 * 0x0000.
1886 	 */
1887 	if (!qos->ucast.in.latency)
1888 		qos->ucast.in.latency = qos->ucast.out.latency;
1889 
1890 	if (!hci_le_set_cig_params(cis, qos)) {
1891 		hci_conn_drop(cis);
1892 		return ERR_PTR(-EINVAL);
1893 	}
1894 
1895 	cis->iso_qos = *qos;
1896 	cis->state = BT_BOUND;
1897 
1898 	return cis;
1899 }
1900 
1901 bool hci_iso_setup_path(struct hci_conn *conn)
1902 {
1903 	struct hci_dev *hdev = conn->hdev;
1904 	struct hci_cp_le_setup_iso_path cmd;
1905 
1906 	memset(&cmd, 0, sizeof(cmd));
1907 
1908 	if (conn->iso_qos.ucast.out.sdu) {
1909 		cmd.handle = cpu_to_le16(conn->handle);
1910 		cmd.direction = 0x00; /* Input (Host to Controller) */
1911 		cmd.path = 0x00; /* HCI path if enabled */
1912 		cmd.codec = 0x03; /* Transparent Data */
1913 
1914 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1915 				 &cmd) < 0)
1916 			return false;
1917 	}
1918 
1919 	if (conn->iso_qos.ucast.in.sdu) {
1920 		cmd.handle = cpu_to_le16(conn->handle);
1921 		cmd.direction = 0x01; /* Output (Controller to Host) */
1922 		cmd.path = 0x00; /* HCI path if enabled */
1923 		cmd.codec = 0x03; /* Transparent Data */
1924 
1925 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1926 				 &cmd) < 0)
1927 			return false;
1928 	}
1929 
1930 	return true;
1931 }
1932 
1933 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1934 {
1935 	return hci_le_create_cis_sync(hdev, data);
1936 }
1937 
1938 int hci_le_create_cis(struct hci_conn *conn)
1939 {
1940 	struct hci_conn *cis;
1941 	struct hci_link *link, *t;
1942 	struct hci_dev *hdev = conn->hdev;
1943 	int err;
1944 
1945 	bt_dev_dbg(hdev, "hcon %p", conn);
1946 
1947 	switch (conn->type) {
1948 	case LE_LINK:
1949 		if (conn->state != BT_CONNECTED || list_empty(&conn->link_list))
1950 			return -EINVAL;
1951 
1952 		cis = NULL;
1953 
1954 		/* hci_conn_link uses list_add_tail_rcu so the list is in
1955 		 * the same order as the connections are requested.
1956 		 */
1957 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1958 			if (link->conn->state == BT_BOUND) {
1959 				err = hci_le_create_cis(link->conn);
1960 				if (err)
1961 					return err;
1962 
1963 				cis = link->conn;
1964 			}
1965 		}
1966 
1967 		return cis ? 0 : -EINVAL;
1968 	case ISO_LINK:
1969 		cis = conn;
1970 		break;
1971 	default:
1972 		return -EINVAL;
1973 	}
1974 
1975 	if (cis->state == BT_CONNECT)
1976 		return 0;
1977 
1978 	/* Queue Create CIS */
1979 	err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1980 	if (err)
1981 		return err;
1982 
1983 	cis->state = BT_CONNECT;
1984 
1985 	return 0;
1986 }
1987 
1988 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1989 			      struct bt_iso_io_qos *qos, __u8 phy)
1990 {
1991 	/* Only set MTU if PHY is enabled */
1992 	if (!qos->sdu && qos->phy) {
1993 		if (hdev->iso_mtu > 0)
1994 			qos->sdu = hdev->iso_mtu;
1995 		else if (hdev->le_mtu > 0)
1996 			qos->sdu = hdev->le_mtu;
1997 		else
1998 			qos->sdu = hdev->acl_mtu;
1999 	}
2000 
2001 	/* Use the same PHY as ACL if set to any */
2002 	if (qos->phy == BT_ISO_PHY_ANY)
2003 		qos->phy = phy;
2004 
2005 	/* Use LE ACL connection interval if not set */
2006 	if (!qos->interval)
2007 		/* ACL interval unit in 1.25 ms to us */
2008 		qos->interval = conn->le_conn_interval * 1250;
2009 
2010 	/* Use LE ACL connection latency if not set */
2011 	if (!qos->latency)
2012 		qos->latency = conn->le_conn_latency;
2013 }
2014 
2015 static void hci_bind_bis(struct hci_conn *conn,
2016 			 struct bt_iso_qos *qos)
2017 {
2018 	/* Update LINK PHYs according to QoS preference */
2019 	conn->le_tx_phy = qos->bcast.out.phy;
2020 	conn->le_tx_phy = qos->bcast.out.phy;
2021 	conn->iso_qos = *qos;
2022 	conn->state = BT_BOUND;
2023 }
2024 
2025 static int create_big_sync(struct hci_dev *hdev, void *data)
2026 {
2027 	struct hci_conn *conn = data;
2028 	struct bt_iso_qos *qos = &conn->iso_qos;
2029 	u16 interval, sync_interval = 0;
2030 	u32 flags = 0;
2031 	int err;
2032 
2033 	if (qos->bcast.out.phy == 0x02)
2034 		flags |= MGMT_ADV_FLAG_SEC_2M;
2035 
2036 	/* Align intervals */
2037 	interval = qos->bcast.out.interval / 1250;
2038 
2039 	if (qos->bcast.bis)
2040 		sync_interval = qos->bcast.sync_interval * 1600;
2041 
2042 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2043 				     conn->le_per_adv_data, flags, interval,
2044 				     interval, sync_interval);
2045 	if (err)
2046 		return err;
2047 
2048 	return hci_le_create_big(conn, &conn->iso_qos);
2049 }
2050 
2051 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2052 {
2053 	struct hci_cp_le_pa_create_sync *cp = data;
2054 
2055 	bt_dev_dbg(hdev, "");
2056 
2057 	if (err)
2058 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2059 
2060 	kfree(cp);
2061 }
2062 
2063 static int create_pa_sync(struct hci_dev *hdev, void *data)
2064 {
2065 	struct hci_cp_le_pa_create_sync *cp = data;
2066 	int err;
2067 
2068 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2069 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2070 	if (err) {
2071 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2072 		return err;
2073 	}
2074 
2075 	return hci_update_passive_scan_sync(hdev);
2076 }
2077 
2078 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2079 		       __u8 sid, struct bt_iso_qos *qos)
2080 {
2081 	struct hci_cp_le_pa_create_sync *cp;
2082 
2083 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2084 		return -EBUSY;
2085 
2086 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2087 	if (!cp) {
2088 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2089 		return -ENOMEM;
2090 	}
2091 
2092 	cp->options = qos->bcast.options;
2093 	cp->sid = sid;
2094 	cp->addr_type = dst_type;
2095 	bacpy(&cp->addr, dst);
2096 	cp->skip = cpu_to_le16(qos->bcast.skip);
2097 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2098 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2099 
2100 	/* Queue start pa_create_sync and scan */
2101 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2102 }
2103 
2104 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2105 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2106 {
2107 	struct _packed {
2108 		struct hci_cp_le_big_create_sync cp;
2109 		__u8  bis[0x11];
2110 	} pdu;
2111 	int err;
2112 
2113 	if (num_bis > sizeof(pdu.bis))
2114 		return -EINVAL;
2115 
2116 	err = qos_set_big(hdev, qos);
2117 	if (err)
2118 		return err;
2119 
2120 	memset(&pdu, 0, sizeof(pdu));
2121 	pdu.cp.handle = qos->bcast.big;
2122 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2123 	pdu.cp.encryption = qos->bcast.encryption;
2124 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2125 	pdu.cp.mse = qos->bcast.mse;
2126 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2127 	pdu.cp.num_bis = num_bis;
2128 	memcpy(pdu.bis, bis, num_bis);
2129 
2130 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2131 			    sizeof(pdu.cp) + num_bis, &pdu);
2132 }
2133 
2134 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2135 {
2136 	struct hci_conn *conn = data;
2137 
2138 	bt_dev_dbg(hdev, "conn %p", conn);
2139 
2140 	if (err) {
2141 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2142 		hci_connect_cfm(conn, err);
2143 		hci_conn_del(conn);
2144 	}
2145 }
2146 
2147 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2148 				 __u8 dst_type, struct bt_iso_qos *qos,
2149 				 __u8 base_len, __u8 *base)
2150 {
2151 	struct hci_conn *conn;
2152 	int err;
2153 
2154 	/* We need hci_conn object using the BDADDR_ANY as dst */
2155 	conn = hci_add_bis(hdev, dst, qos);
2156 	if (IS_ERR(conn))
2157 		return conn;
2158 
2159 	hci_bind_bis(conn, qos);
2160 
2161 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2162 	if (base_len && base) {
2163 		base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2164 						   0x1851, base, base_len);
2165 		conn->le_per_adv_data_len = base_len;
2166 	}
2167 
2168 	/* Queue start periodic advertising and create BIG */
2169 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2170 				 create_big_complete);
2171 	if (err < 0) {
2172 		hci_conn_drop(conn);
2173 		return ERR_PTR(err);
2174 	}
2175 
2176 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2177 			  conn->le_tx_phy ? conn->le_tx_phy :
2178 			  hdev->le_tx_def_phys);
2179 
2180 	return conn;
2181 }
2182 
2183 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2184 				 __u8 dst_type, struct bt_iso_qos *qos)
2185 {
2186 	struct hci_conn *le;
2187 	struct hci_conn *cis;
2188 	struct hci_link *link;
2189 
2190 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2191 		le = hci_connect_le(hdev, dst, dst_type, false,
2192 				    BT_SECURITY_LOW,
2193 				    HCI_LE_CONN_TIMEOUT,
2194 				    HCI_ROLE_SLAVE);
2195 	else
2196 		le = hci_connect_le_scan(hdev, dst, dst_type,
2197 					 BT_SECURITY_LOW,
2198 					 HCI_LE_CONN_TIMEOUT,
2199 					 CONN_REASON_ISO_CONNECT);
2200 	if (IS_ERR(le))
2201 		return le;
2202 
2203 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2204 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2205 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2206 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2207 
2208 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2209 	if (IS_ERR(cis)) {
2210 		hci_conn_drop(le);
2211 		return cis;
2212 	}
2213 
2214 	link = hci_conn_link(le, cis);
2215 	if (!link) {
2216 		hci_conn_drop(le);
2217 		hci_conn_drop(cis);
2218 		return NULL;
2219 	}
2220 
2221 	/* If LE is already connected and CIS handle is already set proceed to
2222 	 * Create CIS immediately.
2223 	 */
2224 	if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2225 		hci_le_create_cis(cis);
2226 
2227 	return cis;
2228 }
2229 
2230 /* Check link security requirement */
2231 int hci_conn_check_link_mode(struct hci_conn *conn)
2232 {
2233 	BT_DBG("hcon %p", conn);
2234 
2235 	/* In Secure Connections Only mode, it is required that Secure
2236 	 * Connections is used and the link is encrypted with AES-CCM
2237 	 * using a P-256 authenticated combination key.
2238 	 */
2239 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2240 		if (!hci_conn_sc_enabled(conn) ||
2241 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2242 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2243 			return 0;
2244 	}
2245 
2246 	 /* AES encryption is required for Level 4:
2247 	  *
2248 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2249 	  * page 1319:
2250 	  *
2251 	  * 128-bit equivalent strength for link and encryption keys
2252 	  * required using FIPS approved algorithms (E0 not allowed,
2253 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2254 	  * not shortened)
2255 	  */
2256 	if (conn->sec_level == BT_SECURITY_FIPS &&
2257 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2258 		bt_dev_err(conn->hdev,
2259 			   "Invalid security: Missing AES-CCM usage");
2260 		return 0;
2261 	}
2262 
2263 	if (hci_conn_ssp_enabled(conn) &&
2264 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2265 		return 0;
2266 
2267 	return 1;
2268 }
2269 
2270 /* Authenticate remote device */
2271 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2272 {
2273 	BT_DBG("hcon %p", conn);
2274 
2275 	if (conn->pending_sec_level > sec_level)
2276 		sec_level = conn->pending_sec_level;
2277 
2278 	if (sec_level > conn->sec_level)
2279 		conn->pending_sec_level = sec_level;
2280 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2281 		return 1;
2282 
2283 	/* Make sure we preserve an existing MITM requirement*/
2284 	auth_type |= (conn->auth_type & 0x01);
2285 
2286 	conn->auth_type = auth_type;
2287 
2288 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2289 		struct hci_cp_auth_requested cp;
2290 
2291 		cp.handle = cpu_to_le16(conn->handle);
2292 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2293 			     sizeof(cp), &cp);
2294 
2295 		/* If we're already encrypted set the REAUTH_PEND flag,
2296 		 * otherwise set the ENCRYPT_PEND.
2297 		 */
2298 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2299 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2300 		else
2301 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2302 	}
2303 
2304 	return 0;
2305 }
2306 
2307 /* Encrypt the link */
2308 static void hci_conn_encrypt(struct hci_conn *conn)
2309 {
2310 	BT_DBG("hcon %p", conn);
2311 
2312 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2313 		struct hci_cp_set_conn_encrypt cp;
2314 		cp.handle  = cpu_to_le16(conn->handle);
2315 		cp.encrypt = 0x01;
2316 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2317 			     &cp);
2318 	}
2319 }
2320 
2321 /* Enable security */
2322 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2323 		      bool initiator)
2324 {
2325 	BT_DBG("hcon %p", conn);
2326 
2327 	if (conn->type == LE_LINK)
2328 		return smp_conn_security(conn, sec_level);
2329 
2330 	/* For sdp we don't need the link key. */
2331 	if (sec_level == BT_SECURITY_SDP)
2332 		return 1;
2333 
2334 	/* For non 2.1 devices and low security level we don't need the link
2335 	   key. */
2336 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2337 		return 1;
2338 
2339 	/* For other security levels we need the link key. */
2340 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2341 		goto auth;
2342 
2343 	/* An authenticated FIPS approved combination key has sufficient
2344 	 * security for security level 4. */
2345 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2346 	    sec_level == BT_SECURITY_FIPS)
2347 		goto encrypt;
2348 
2349 	/* An authenticated combination key has sufficient security for
2350 	   security level 3. */
2351 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2352 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2353 	    sec_level == BT_SECURITY_HIGH)
2354 		goto encrypt;
2355 
2356 	/* An unauthenticated combination key has sufficient security for
2357 	   security level 1 and 2. */
2358 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2359 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2360 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2361 		goto encrypt;
2362 
2363 	/* A combination key has always sufficient security for the security
2364 	   levels 1 or 2. High security level requires the combination key
2365 	   is generated using maximum PIN code length (16).
2366 	   For pre 2.1 units. */
2367 	if (conn->key_type == HCI_LK_COMBINATION &&
2368 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2369 	     conn->pin_length == 16))
2370 		goto encrypt;
2371 
2372 auth:
2373 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2374 		return 0;
2375 
2376 	if (initiator)
2377 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2378 
2379 	if (!hci_conn_auth(conn, sec_level, auth_type))
2380 		return 0;
2381 
2382 encrypt:
2383 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2384 		/* Ensure that the encryption key size has been read,
2385 		 * otherwise stall the upper layer responses.
2386 		 */
2387 		if (!conn->enc_key_size)
2388 			return 0;
2389 
2390 		/* Nothing else needed, all requirements are met */
2391 		return 1;
2392 	}
2393 
2394 	hci_conn_encrypt(conn);
2395 	return 0;
2396 }
2397 EXPORT_SYMBOL(hci_conn_security);
2398 
2399 /* Check secure link requirement */
2400 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2401 {
2402 	BT_DBG("hcon %p", conn);
2403 
2404 	/* Accept if non-secure or higher security level is required */
2405 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2406 		return 1;
2407 
2408 	/* Accept if secure or higher security level is already present */
2409 	if (conn->sec_level == BT_SECURITY_HIGH ||
2410 	    conn->sec_level == BT_SECURITY_FIPS)
2411 		return 1;
2412 
2413 	/* Reject not secure link */
2414 	return 0;
2415 }
2416 EXPORT_SYMBOL(hci_conn_check_secure);
2417 
2418 /* Switch role */
2419 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2420 {
2421 	BT_DBG("hcon %p", conn);
2422 
2423 	if (role == conn->role)
2424 		return 1;
2425 
2426 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2427 		struct hci_cp_switch_role cp;
2428 		bacpy(&cp.bdaddr, &conn->dst);
2429 		cp.role = role;
2430 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2431 	}
2432 
2433 	return 0;
2434 }
2435 EXPORT_SYMBOL(hci_conn_switch_role);
2436 
2437 /* Enter active mode */
2438 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2439 {
2440 	struct hci_dev *hdev = conn->hdev;
2441 
2442 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2443 
2444 	if (conn->mode != HCI_CM_SNIFF)
2445 		goto timer;
2446 
2447 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2448 		goto timer;
2449 
2450 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2451 		struct hci_cp_exit_sniff_mode cp;
2452 		cp.handle = cpu_to_le16(conn->handle);
2453 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2454 	}
2455 
2456 timer:
2457 	if (hdev->idle_timeout > 0)
2458 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2459 				   msecs_to_jiffies(hdev->idle_timeout));
2460 }
2461 
2462 /* Drop all connection on the device */
2463 void hci_conn_hash_flush(struct hci_dev *hdev)
2464 {
2465 	struct hci_conn_hash *h = &hdev->conn_hash;
2466 	struct hci_conn *c, *n;
2467 
2468 	BT_DBG("hdev %s", hdev->name);
2469 
2470 	list_for_each_entry_safe(c, n, &h->list, list) {
2471 		c->state = BT_CLOSED;
2472 
2473 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
2474 
2475 		/* Unlink before deleting otherwise it is possible that
2476 		 * hci_conn_del removes the link which may cause the list to
2477 		 * contain items already freed.
2478 		 */
2479 		hci_conn_unlink(c);
2480 		hci_conn_del(c);
2481 	}
2482 }
2483 
2484 /* Check pending connect attempts */
2485 void hci_conn_check_pending(struct hci_dev *hdev)
2486 {
2487 	struct hci_conn *conn;
2488 
2489 	BT_DBG("hdev %s", hdev->name);
2490 
2491 	hci_dev_lock(hdev);
2492 
2493 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2494 	if (conn)
2495 		hci_acl_create_connection(conn);
2496 
2497 	hci_dev_unlock(hdev);
2498 }
2499 
2500 static u32 get_link_mode(struct hci_conn *conn)
2501 {
2502 	u32 link_mode = 0;
2503 
2504 	if (conn->role == HCI_ROLE_MASTER)
2505 		link_mode |= HCI_LM_MASTER;
2506 
2507 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2508 		link_mode |= HCI_LM_ENCRYPT;
2509 
2510 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2511 		link_mode |= HCI_LM_AUTH;
2512 
2513 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2514 		link_mode |= HCI_LM_SECURE;
2515 
2516 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2517 		link_mode |= HCI_LM_FIPS;
2518 
2519 	return link_mode;
2520 }
2521 
2522 int hci_get_conn_list(void __user *arg)
2523 {
2524 	struct hci_conn *c;
2525 	struct hci_conn_list_req req, *cl;
2526 	struct hci_conn_info *ci;
2527 	struct hci_dev *hdev;
2528 	int n = 0, size, err;
2529 
2530 	if (copy_from_user(&req, arg, sizeof(req)))
2531 		return -EFAULT;
2532 
2533 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2534 		return -EINVAL;
2535 
2536 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2537 
2538 	cl = kmalloc(size, GFP_KERNEL);
2539 	if (!cl)
2540 		return -ENOMEM;
2541 
2542 	hdev = hci_dev_get(req.dev_id);
2543 	if (!hdev) {
2544 		kfree(cl);
2545 		return -ENODEV;
2546 	}
2547 
2548 	ci = cl->conn_info;
2549 
2550 	hci_dev_lock(hdev);
2551 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2552 		bacpy(&(ci + n)->bdaddr, &c->dst);
2553 		(ci + n)->handle = c->handle;
2554 		(ci + n)->type  = c->type;
2555 		(ci + n)->out   = c->out;
2556 		(ci + n)->state = c->state;
2557 		(ci + n)->link_mode = get_link_mode(c);
2558 		if (++n >= req.conn_num)
2559 			break;
2560 	}
2561 	hci_dev_unlock(hdev);
2562 
2563 	cl->dev_id = hdev->id;
2564 	cl->conn_num = n;
2565 	size = sizeof(req) + n * sizeof(*ci);
2566 
2567 	hci_dev_put(hdev);
2568 
2569 	err = copy_to_user(arg, cl, size);
2570 	kfree(cl);
2571 
2572 	return err ? -EFAULT : 0;
2573 }
2574 
2575 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2576 {
2577 	struct hci_conn_info_req req;
2578 	struct hci_conn_info ci;
2579 	struct hci_conn *conn;
2580 	char __user *ptr = arg + sizeof(req);
2581 
2582 	if (copy_from_user(&req, arg, sizeof(req)))
2583 		return -EFAULT;
2584 
2585 	hci_dev_lock(hdev);
2586 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2587 	if (conn) {
2588 		bacpy(&ci.bdaddr, &conn->dst);
2589 		ci.handle = conn->handle;
2590 		ci.type  = conn->type;
2591 		ci.out   = conn->out;
2592 		ci.state = conn->state;
2593 		ci.link_mode = get_link_mode(conn);
2594 	}
2595 	hci_dev_unlock(hdev);
2596 
2597 	if (!conn)
2598 		return -ENOENT;
2599 
2600 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2601 }
2602 
2603 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2604 {
2605 	struct hci_auth_info_req req;
2606 	struct hci_conn *conn;
2607 
2608 	if (copy_from_user(&req, arg, sizeof(req)))
2609 		return -EFAULT;
2610 
2611 	hci_dev_lock(hdev);
2612 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2613 	if (conn)
2614 		req.type = conn->auth_type;
2615 	hci_dev_unlock(hdev);
2616 
2617 	if (!conn)
2618 		return -ENOENT;
2619 
2620 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2621 }
2622 
2623 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2624 {
2625 	struct hci_dev *hdev = conn->hdev;
2626 	struct hci_chan *chan;
2627 
2628 	BT_DBG("%s hcon %p", hdev->name, conn);
2629 
2630 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2631 		BT_DBG("Refusing to create new hci_chan");
2632 		return NULL;
2633 	}
2634 
2635 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2636 	if (!chan)
2637 		return NULL;
2638 
2639 	chan->conn = hci_conn_get(conn);
2640 	skb_queue_head_init(&chan->data_q);
2641 	chan->state = BT_CONNECTED;
2642 
2643 	list_add_rcu(&chan->list, &conn->chan_list);
2644 
2645 	return chan;
2646 }
2647 
2648 void hci_chan_del(struct hci_chan *chan)
2649 {
2650 	struct hci_conn *conn = chan->conn;
2651 	struct hci_dev *hdev = conn->hdev;
2652 
2653 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2654 
2655 	list_del_rcu(&chan->list);
2656 
2657 	synchronize_rcu();
2658 
2659 	/* Prevent new hci_chan's to be created for this hci_conn */
2660 	set_bit(HCI_CONN_DROP, &conn->flags);
2661 
2662 	hci_conn_put(conn);
2663 
2664 	skb_queue_purge(&chan->data_q);
2665 	kfree(chan);
2666 }
2667 
2668 void hci_chan_list_flush(struct hci_conn *conn)
2669 {
2670 	struct hci_chan *chan, *n;
2671 
2672 	BT_DBG("hcon %p", conn);
2673 
2674 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2675 		hci_chan_del(chan);
2676 }
2677 
2678 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2679 						 __u16 handle)
2680 {
2681 	struct hci_chan *hchan;
2682 
2683 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2684 		if (hchan->handle == handle)
2685 			return hchan;
2686 	}
2687 
2688 	return NULL;
2689 }
2690 
2691 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2692 {
2693 	struct hci_conn_hash *h = &hdev->conn_hash;
2694 	struct hci_conn *hcon;
2695 	struct hci_chan *hchan = NULL;
2696 
2697 	rcu_read_lock();
2698 
2699 	list_for_each_entry_rcu(hcon, &h->list, list) {
2700 		hchan = __hci_chan_lookup_handle(hcon, handle);
2701 		if (hchan)
2702 			break;
2703 	}
2704 
2705 	rcu_read_unlock();
2706 
2707 	return hchan;
2708 }
2709 
2710 u32 hci_conn_get_phy(struct hci_conn *conn)
2711 {
2712 	u32 phys = 0;
2713 
2714 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2715 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2716 	 * CPB logical transport types.
2717 	 */
2718 	switch (conn->type) {
2719 	case SCO_LINK:
2720 		/* SCO logical transport (1 Mb/s):
2721 		 * HV1, HV2, HV3 and DV.
2722 		 */
2723 		phys |= BT_PHY_BR_1M_1SLOT;
2724 
2725 		break;
2726 
2727 	case ACL_LINK:
2728 		/* ACL logical transport (1 Mb/s) ptt=0:
2729 		 * DH1, DM3, DH3, DM5 and DH5.
2730 		 */
2731 		phys |= BT_PHY_BR_1M_1SLOT;
2732 
2733 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2734 			phys |= BT_PHY_BR_1M_3SLOT;
2735 
2736 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2737 			phys |= BT_PHY_BR_1M_5SLOT;
2738 
2739 		/* ACL logical transport (2 Mb/s) ptt=1:
2740 		 * 2-DH1, 2-DH3 and 2-DH5.
2741 		 */
2742 		if (!(conn->pkt_type & HCI_2DH1))
2743 			phys |= BT_PHY_EDR_2M_1SLOT;
2744 
2745 		if (!(conn->pkt_type & HCI_2DH3))
2746 			phys |= BT_PHY_EDR_2M_3SLOT;
2747 
2748 		if (!(conn->pkt_type & HCI_2DH5))
2749 			phys |= BT_PHY_EDR_2M_5SLOT;
2750 
2751 		/* ACL logical transport (3 Mb/s) ptt=1:
2752 		 * 3-DH1, 3-DH3 and 3-DH5.
2753 		 */
2754 		if (!(conn->pkt_type & HCI_3DH1))
2755 			phys |= BT_PHY_EDR_3M_1SLOT;
2756 
2757 		if (!(conn->pkt_type & HCI_3DH3))
2758 			phys |= BT_PHY_EDR_3M_3SLOT;
2759 
2760 		if (!(conn->pkt_type & HCI_3DH5))
2761 			phys |= BT_PHY_EDR_3M_5SLOT;
2762 
2763 		break;
2764 
2765 	case ESCO_LINK:
2766 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2767 		phys |= BT_PHY_BR_1M_1SLOT;
2768 
2769 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2770 			phys |= BT_PHY_BR_1M_3SLOT;
2771 
2772 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2773 		if (!(conn->pkt_type & ESCO_2EV3))
2774 			phys |= BT_PHY_EDR_2M_1SLOT;
2775 
2776 		if (!(conn->pkt_type & ESCO_2EV5))
2777 			phys |= BT_PHY_EDR_2M_3SLOT;
2778 
2779 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2780 		if (!(conn->pkt_type & ESCO_3EV3))
2781 			phys |= BT_PHY_EDR_3M_1SLOT;
2782 
2783 		if (!(conn->pkt_type & ESCO_3EV5))
2784 			phys |= BT_PHY_EDR_3M_3SLOT;
2785 
2786 		break;
2787 
2788 	case LE_LINK:
2789 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2790 			phys |= BT_PHY_LE_1M_TX;
2791 
2792 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2793 			phys |= BT_PHY_LE_1M_RX;
2794 
2795 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2796 			phys |= BT_PHY_LE_2M_TX;
2797 
2798 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2799 			phys |= BT_PHY_LE_2M_RX;
2800 
2801 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2802 			phys |= BT_PHY_LE_CODED_TX;
2803 
2804 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2805 			phys |= BT_PHY_LE_CODED_RX;
2806 
2807 		break;
2808 	}
2809 
2810 	return phys;
2811 }
2812 
2813 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2814 {
2815 	int r = 0;
2816 
2817 	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
2818 		return 0;
2819 
2820 	switch (conn->state) {
2821 	case BT_CONNECTED:
2822 	case BT_CONFIG:
2823 		if (conn->type == AMP_LINK) {
2824 			struct hci_cp_disconn_phy_link cp;
2825 
2826 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2827 			cp.reason = reason;
2828 			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
2829 					 sizeof(cp), &cp);
2830 		} else {
2831 			struct hci_cp_disconnect dc;
2832 
2833 			dc.handle = cpu_to_le16(conn->handle);
2834 			dc.reason = reason;
2835 			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
2836 					 sizeof(dc), &dc);
2837 		}
2838 
2839 		conn->state = BT_DISCONN;
2840 
2841 		break;
2842 	case BT_CONNECT:
2843 		if (conn->type == LE_LINK) {
2844 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2845 				break;
2846 			r = hci_send_cmd(conn->hdev,
2847 					 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
2848 		} else if (conn->type == ACL_LINK) {
2849 			if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
2850 				break;
2851 			r = hci_send_cmd(conn->hdev,
2852 					 HCI_OP_CREATE_CONN_CANCEL,
2853 					 6, &conn->dst);
2854 		}
2855 		break;
2856 	case BT_CONNECT2:
2857 		if (conn->type == ACL_LINK) {
2858 			struct hci_cp_reject_conn_req rej;
2859 
2860 			bacpy(&rej.bdaddr, &conn->dst);
2861 			rej.reason = reason;
2862 
2863 			r = hci_send_cmd(conn->hdev,
2864 					 HCI_OP_REJECT_CONN_REQ,
2865 					 sizeof(rej), &rej);
2866 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2867 			struct hci_cp_reject_sync_conn_req rej;
2868 
2869 			bacpy(&rej.bdaddr, &conn->dst);
2870 
2871 			/* SCO rejection has its own limited set of
2872 			 * allowed error values (0x0D-0x0F) which isn't
2873 			 * compatible with most values passed to this
2874 			 * function. To be safe hard-code one of the
2875 			 * values that's suitable for SCO.
2876 			 */
2877 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2878 
2879 			r = hci_send_cmd(conn->hdev,
2880 					 HCI_OP_REJECT_SYNC_CONN_REQ,
2881 					 sizeof(rej), &rej);
2882 		}
2883 		break;
2884 	default:
2885 		conn->state = BT_CLOSED;
2886 		break;
2887 	}
2888 
2889 	return r;
2890 }
2891