xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision d54151aa)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/iso.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "a2mp.h"
39 #include "eir.h"
40 
41 struct sco_param {
42 	u16 pkt_type;
43 	u16 max_latency;
44 	u8  retrans_effort;
45 };
46 
47 struct conn_handle_t {
48 	struct hci_conn *conn;
49 	__u16 handle;
50 };
51 
52 static const struct sco_param esco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58 };
59 
60 static const struct sco_param sco_param_cvsd[] = {
61 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63 };
64 
65 static const struct sco_param esco_param_msbc[] = {
66 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68 };
69 
70 /* This function requires the caller holds hdev->lock */
71 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73 	struct hci_conn_params *params;
74 	struct hci_dev *hdev = conn->hdev;
75 	struct smp_irk *irk;
76 	bdaddr_t *bdaddr;
77 	u8 bdaddr_type;
78 
79 	bdaddr = &conn->dst;
80 	bdaddr_type = conn->dst_type;
81 
82 	/* Check if we need to convert to identity address */
83 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84 	if (irk) {
85 		bdaddr = &irk->bdaddr;
86 		bdaddr_type = irk->addr_type;
87 	}
88 
89 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90 					   bdaddr_type);
91 	if (!params)
92 		return;
93 
94 	if (params->conn) {
95 		hci_conn_drop(params->conn);
96 		hci_conn_put(params->conn);
97 		params->conn = NULL;
98 	}
99 
100 	if (!params->explicit_connect)
101 		return;
102 
103 	/* If the status indicates successful cancellation of
104 	 * the attempt (i.e. Unknown Connection Id) there's no point of
105 	 * notifying failure since we'll go back to keep trying to
106 	 * connect. The only exception is explicit connect requests
107 	 * where a timeout + cancel does indicate an actual failure.
108 	 */
109 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
111 				    conn->dst_type, status);
112 
113 	/* The connection attempt was doing scan for new RPA, and is
114 	 * in scan phase. If params are not associated with any other
115 	 * autoconnect action, remove them completely. If they are, just unmark
116 	 * them as waiting for connection, by clearing explicit_connect field.
117 	 */
118 	params->explicit_connect = false;
119 
120 	list_del_init(&params->action);
121 
122 	switch (params->auto_connect) {
123 	case HCI_AUTO_CONN_EXPLICIT:
124 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125 		/* return instead of break to avoid duplicate scan update */
126 		return;
127 	case HCI_AUTO_CONN_DIRECT:
128 	case HCI_AUTO_CONN_ALWAYS:
129 		list_add(&params->action, &hdev->pend_le_conns);
130 		break;
131 	case HCI_AUTO_CONN_REPORT:
132 		list_add(&params->action, &hdev->pend_le_reports);
133 		break;
134 	default:
135 		break;
136 	}
137 
138 	hci_update_passive_scan(hdev);
139 }
140 
141 static void hci_conn_cleanup(struct hci_conn *conn)
142 {
143 	struct hci_dev *hdev = conn->hdev;
144 
145 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147 
148 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149 		hci_remove_link_key(hdev, &conn->dst);
150 
151 	hci_chan_list_flush(conn);
152 
153 	hci_conn_hash_del(hdev, conn);
154 
155 	if (conn->cleanup)
156 		conn->cleanup(conn);
157 
158 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
159 		switch (conn->setting & SCO_AIRMODE_MASK) {
160 		case SCO_AIRMODE_CVSD:
161 		case SCO_AIRMODE_TRANSP:
162 			if (hdev->notify)
163 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
164 			break;
165 		}
166 	} else {
167 		if (hdev->notify)
168 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
169 	}
170 
171 	hci_conn_del_sysfs(conn);
172 
173 	debugfs_remove_recursive(conn->debugfs);
174 
175 	hci_dev_put(hdev);
176 
177 	hci_conn_put(conn);
178 }
179 
180 static void le_scan_cleanup(struct work_struct *work)
181 {
182 	struct hci_conn *conn = container_of(work, struct hci_conn,
183 					     le_scan_cleanup);
184 	struct hci_dev *hdev = conn->hdev;
185 	struct hci_conn *c = NULL;
186 
187 	BT_DBG("%s hcon %p", hdev->name, conn);
188 
189 	hci_dev_lock(hdev);
190 
191 	/* Check that the hci_conn is still around */
192 	rcu_read_lock();
193 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
194 		if (c == conn)
195 			break;
196 	}
197 	rcu_read_unlock();
198 
199 	if (c == conn) {
200 		hci_connect_le_scan_cleanup(conn, 0x00);
201 		hci_conn_cleanup(conn);
202 	}
203 
204 	hci_dev_unlock(hdev);
205 	hci_dev_put(hdev);
206 	hci_conn_put(conn);
207 }
208 
209 static void hci_connect_le_scan_remove(struct hci_conn *conn)
210 {
211 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
212 
213 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
214 	 * could deadlock with another hci_conn_del() call that's holding
215 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
216 	 * Instead, grab temporary extra references to the hci_dev and
217 	 * hci_conn and perform the necessary cleanup in a separate work
218 	 * callback.
219 	 */
220 
221 	hci_dev_hold(conn->hdev);
222 	hci_conn_get(conn);
223 
224 	/* Even though we hold a reference to the hdev, many other
225 	 * things might get cleaned up meanwhile, including the hdev's
226 	 * own workqueue, so we can't use that for scheduling.
227 	 */
228 	schedule_work(&conn->le_scan_cleanup);
229 }
230 
231 static void hci_acl_create_connection(struct hci_conn *conn)
232 {
233 	struct hci_dev *hdev = conn->hdev;
234 	struct inquiry_entry *ie;
235 	struct hci_cp_create_conn cp;
236 
237 	BT_DBG("hcon %p", conn);
238 
239 	/* Many controllers disallow HCI Create Connection while it is doing
240 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
241 	 * Connection. This may cause the MGMT discovering state to become false
242 	 * without user space's request but it is okay since the MGMT Discovery
243 	 * APIs do not promise that discovery should be done forever. Instead,
244 	 * the user space monitors the status of MGMT discovering and it may
245 	 * request for discovery again when this flag becomes false.
246 	 */
247 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
248 		/* Put this connection to "pending" state so that it will be
249 		 * executed after the inquiry cancel command complete event.
250 		 */
251 		conn->state = BT_CONNECT2;
252 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
253 		return;
254 	}
255 
256 	conn->state = BT_CONNECT;
257 	conn->out = true;
258 	conn->role = HCI_ROLE_MASTER;
259 
260 	conn->attempt++;
261 
262 	conn->link_policy = hdev->link_policy;
263 
264 	memset(&cp, 0, sizeof(cp));
265 	bacpy(&cp.bdaddr, &conn->dst);
266 	cp.pscan_rep_mode = 0x02;
267 
268 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
269 	if (ie) {
270 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
271 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
272 			cp.pscan_mode     = ie->data.pscan_mode;
273 			cp.clock_offset   = ie->data.clock_offset |
274 					    cpu_to_le16(0x8000);
275 		}
276 
277 		memcpy(conn->dev_class, ie->data.dev_class, 3);
278 	}
279 
280 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
281 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
282 		cp.role_switch = 0x01;
283 	else
284 		cp.role_switch = 0x00;
285 
286 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
287 }
288 
289 int hci_disconnect(struct hci_conn *conn, __u8 reason)
290 {
291 	BT_DBG("hcon %p", conn);
292 
293 	/* When we are central of an established connection and it enters
294 	 * the disconnect timeout, then go ahead and try to read the
295 	 * current clock offset.  Processing of the result is done
296 	 * within the event handling and hci_clock_offset_evt function.
297 	 */
298 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
299 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
300 		struct hci_dev *hdev = conn->hdev;
301 		struct hci_cp_read_clock_offset clkoff_cp;
302 
303 		clkoff_cp.handle = cpu_to_le16(conn->handle);
304 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
305 			     &clkoff_cp);
306 	}
307 
308 	return hci_abort_conn(conn, reason);
309 }
310 
311 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
312 {
313 	struct hci_dev *hdev = conn->hdev;
314 	struct hci_cp_add_sco cp;
315 
316 	BT_DBG("hcon %p", conn);
317 
318 	conn->state = BT_CONNECT;
319 	conn->out = true;
320 
321 	conn->attempt++;
322 
323 	cp.handle   = cpu_to_le16(handle);
324 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
325 
326 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
327 }
328 
329 static bool find_next_esco_param(struct hci_conn *conn,
330 				 const struct sco_param *esco_param, int size)
331 {
332 	for (; conn->attempt <= size; conn->attempt++) {
333 		if (lmp_esco_2m_capable(conn->link) ||
334 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
335 			break;
336 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
337 		       conn, conn->attempt);
338 	}
339 
340 	return conn->attempt <= size;
341 }
342 
343 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
344 {
345 	int err;
346 	__u8 vnd_len, *vnd_data = NULL;
347 	struct hci_op_configure_data_path *cmd = NULL;
348 
349 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
350 					  &vnd_data);
351 	if (err < 0)
352 		goto error;
353 
354 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
355 	if (!cmd) {
356 		err = -ENOMEM;
357 		goto error;
358 	}
359 
360 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
361 	if (err < 0)
362 		goto error;
363 
364 	cmd->vnd_len = vnd_len;
365 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
366 
367 	cmd->direction = 0x00;
368 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
369 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
370 
371 	cmd->direction = 0x01;
372 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
373 				    sizeof(*cmd) + vnd_len, cmd,
374 				    HCI_CMD_TIMEOUT);
375 error:
376 
377 	kfree(cmd);
378 	kfree(vnd_data);
379 	return err;
380 }
381 
382 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
383 {
384 	struct conn_handle_t *conn_handle = data;
385 	struct hci_conn *conn = conn_handle->conn;
386 	__u16 handle = conn_handle->handle;
387 	struct hci_cp_enhanced_setup_sync_conn cp;
388 	const struct sco_param *param;
389 
390 	kfree(conn_handle);
391 
392 	bt_dev_dbg(hdev, "hcon %p", conn);
393 
394 	/* for offload use case, codec needs to configured before opening SCO */
395 	if (conn->codec.data_path)
396 		configure_datapath_sync(hdev, &conn->codec);
397 
398 	conn->state = BT_CONNECT;
399 	conn->out = true;
400 
401 	conn->attempt++;
402 
403 	memset(&cp, 0x00, sizeof(cp));
404 
405 	cp.handle   = cpu_to_le16(handle);
406 
407 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
408 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
409 
410 	switch (conn->codec.id) {
411 	case BT_CODEC_MSBC:
412 		if (!find_next_esco_param(conn, esco_param_msbc,
413 					  ARRAY_SIZE(esco_param_msbc)))
414 			return -EINVAL;
415 
416 		param = &esco_param_msbc[conn->attempt - 1];
417 		cp.tx_coding_format.id = 0x05;
418 		cp.rx_coding_format.id = 0x05;
419 		cp.tx_codec_frame_size = __cpu_to_le16(60);
420 		cp.rx_codec_frame_size = __cpu_to_le16(60);
421 		cp.in_bandwidth = __cpu_to_le32(32000);
422 		cp.out_bandwidth = __cpu_to_le32(32000);
423 		cp.in_coding_format.id = 0x04;
424 		cp.out_coding_format.id = 0x04;
425 		cp.in_coded_data_size = __cpu_to_le16(16);
426 		cp.out_coded_data_size = __cpu_to_le16(16);
427 		cp.in_pcm_data_format = 2;
428 		cp.out_pcm_data_format = 2;
429 		cp.in_pcm_sample_payload_msb_pos = 0;
430 		cp.out_pcm_sample_payload_msb_pos = 0;
431 		cp.in_data_path = conn->codec.data_path;
432 		cp.out_data_path = conn->codec.data_path;
433 		cp.in_transport_unit_size = 1;
434 		cp.out_transport_unit_size = 1;
435 		break;
436 
437 	case BT_CODEC_TRANSPARENT:
438 		if (!find_next_esco_param(conn, esco_param_msbc,
439 					  ARRAY_SIZE(esco_param_msbc)))
440 			return false;
441 		param = &esco_param_msbc[conn->attempt - 1];
442 		cp.tx_coding_format.id = 0x03;
443 		cp.rx_coding_format.id = 0x03;
444 		cp.tx_codec_frame_size = __cpu_to_le16(60);
445 		cp.rx_codec_frame_size = __cpu_to_le16(60);
446 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
447 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
448 		cp.in_coding_format.id = 0x03;
449 		cp.out_coding_format.id = 0x03;
450 		cp.in_coded_data_size = __cpu_to_le16(16);
451 		cp.out_coded_data_size = __cpu_to_le16(16);
452 		cp.in_pcm_data_format = 2;
453 		cp.out_pcm_data_format = 2;
454 		cp.in_pcm_sample_payload_msb_pos = 0;
455 		cp.out_pcm_sample_payload_msb_pos = 0;
456 		cp.in_data_path = conn->codec.data_path;
457 		cp.out_data_path = conn->codec.data_path;
458 		cp.in_transport_unit_size = 1;
459 		cp.out_transport_unit_size = 1;
460 		break;
461 
462 	case BT_CODEC_CVSD:
463 		if (lmp_esco_capable(conn->link)) {
464 			if (!find_next_esco_param(conn, esco_param_cvsd,
465 						  ARRAY_SIZE(esco_param_cvsd)))
466 				return -EINVAL;
467 			param = &esco_param_cvsd[conn->attempt - 1];
468 		} else {
469 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
470 				return -EINVAL;
471 			param = &sco_param_cvsd[conn->attempt - 1];
472 		}
473 		cp.tx_coding_format.id = 2;
474 		cp.rx_coding_format.id = 2;
475 		cp.tx_codec_frame_size = __cpu_to_le16(60);
476 		cp.rx_codec_frame_size = __cpu_to_le16(60);
477 		cp.in_bandwidth = __cpu_to_le32(16000);
478 		cp.out_bandwidth = __cpu_to_le32(16000);
479 		cp.in_coding_format.id = 4;
480 		cp.out_coding_format.id = 4;
481 		cp.in_coded_data_size = __cpu_to_le16(16);
482 		cp.out_coded_data_size = __cpu_to_le16(16);
483 		cp.in_pcm_data_format = 2;
484 		cp.out_pcm_data_format = 2;
485 		cp.in_pcm_sample_payload_msb_pos = 0;
486 		cp.out_pcm_sample_payload_msb_pos = 0;
487 		cp.in_data_path = conn->codec.data_path;
488 		cp.out_data_path = conn->codec.data_path;
489 		cp.in_transport_unit_size = 16;
490 		cp.out_transport_unit_size = 16;
491 		break;
492 	default:
493 		return -EINVAL;
494 	}
495 
496 	cp.retrans_effort = param->retrans_effort;
497 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
498 	cp.max_latency = __cpu_to_le16(param->max_latency);
499 
500 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
501 		return -EIO;
502 
503 	return 0;
504 }
505 
506 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
507 {
508 	struct hci_dev *hdev = conn->hdev;
509 	struct hci_cp_setup_sync_conn cp;
510 	const struct sco_param *param;
511 
512 	bt_dev_dbg(hdev, "hcon %p", conn);
513 
514 	conn->state = BT_CONNECT;
515 	conn->out = true;
516 
517 	conn->attempt++;
518 
519 	cp.handle   = cpu_to_le16(handle);
520 
521 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
522 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
523 	cp.voice_setting  = cpu_to_le16(conn->setting);
524 
525 	switch (conn->setting & SCO_AIRMODE_MASK) {
526 	case SCO_AIRMODE_TRANSP:
527 		if (!find_next_esco_param(conn, esco_param_msbc,
528 					  ARRAY_SIZE(esco_param_msbc)))
529 			return false;
530 		param = &esco_param_msbc[conn->attempt - 1];
531 		break;
532 	case SCO_AIRMODE_CVSD:
533 		if (lmp_esco_capable(conn->link)) {
534 			if (!find_next_esco_param(conn, esco_param_cvsd,
535 						  ARRAY_SIZE(esco_param_cvsd)))
536 				return false;
537 			param = &esco_param_cvsd[conn->attempt - 1];
538 		} else {
539 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
540 				return false;
541 			param = &sco_param_cvsd[conn->attempt - 1];
542 		}
543 		break;
544 	default:
545 		return false;
546 	}
547 
548 	cp.retrans_effort = param->retrans_effort;
549 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
550 	cp.max_latency = __cpu_to_le16(param->max_latency);
551 
552 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
553 		return false;
554 
555 	return true;
556 }
557 
558 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
559 {
560 	int result;
561 	struct conn_handle_t *conn_handle;
562 
563 	if (enhanced_sync_conn_capable(conn->hdev)) {
564 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
565 
566 		if (!conn_handle)
567 			return false;
568 
569 		conn_handle->conn = conn;
570 		conn_handle->handle = handle;
571 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
572 					    conn_handle, NULL);
573 		if (result < 0)
574 			kfree(conn_handle);
575 
576 		return result == 0;
577 	}
578 
579 	return hci_setup_sync_conn(conn, handle);
580 }
581 
582 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
583 		      u16 to_multiplier)
584 {
585 	struct hci_dev *hdev = conn->hdev;
586 	struct hci_conn_params *params;
587 	struct hci_cp_le_conn_update cp;
588 
589 	hci_dev_lock(hdev);
590 
591 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
592 	if (params) {
593 		params->conn_min_interval = min;
594 		params->conn_max_interval = max;
595 		params->conn_latency = latency;
596 		params->supervision_timeout = to_multiplier;
597 	}
598 
599 	hci_dev_unlock(hdev);
600 
601 	memset(&cp, 0, sizeof(cp));
602 	cp.handle		= cpu_to_le16(conn->handle);
603 	cp.conn_interval_min	= cpu_to_le16(min);
604 	cp.conn_interval_max	= cpu_to_le16(max);
605 	cp.conn_latency		= cpu_to_le16(latency);
606 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
607 	cp.min_ce_len		= cpu_to_le16(0x0000);
608 	cp.max_ce_len		= cpu_to_le16(0x0000);
609 
610 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
611 
612 	if (params)
613 		return 0x01;
614 
615 	return 0x00;
616 }
617 
618 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
619 		      __u8 ltk[16], __u8 key_size)
620 {
621 	struct hci_dev *hdev = conn->hdev;
622 	struct hci_cp_le_start_enc cp;
623 
624 	BT_DBG("hcon %p", conn);
625 
626 	memset(&cp, 0, sizeof(cp));
627 
628 	cp.handle = cpu_to_le16(conn->handle);
629 	cp.rand = rand;
630 	cp.ediv = ediv;
631 	memcpy(cp.ltk, ltk, key_size);
632 
633 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
634 }
635 
636 /* Device _must_ be locked */
637 void hci_sco_setup(struct hci_conn *conn, __u8 status)
638 {
639 	struct hci_conn *sco = conn->link;
640 
641 	if (!sco)
642 		return;
643 
644 	BT_DBG("hcon %p", conn);
645 
646 	if (!status) {
647 		if (lmp_esco_capable(conn->hdev))
648 			hci_setup_sync(sco, conn->handle);
649 		else
650 			hci_add_sco(sco, conn->handle);
651 	} else {
652 		hci_connect_cfm(sco, status);
653 		hci_conn_del(sco);
654 	}
655 }
656 
657 static void hci_conn_timeout(struct work_struct *work)
658 {
659 	struct hci_conn *conn = container_of(work, struct hci_conn,
660 					     disc_work.work);
661 	int refcnt = atomic_read(&conn->refcnt);
662 
663 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
664 
665 	WARN_ON(refcnt < 0);
666 
667 	/* FIXME: It was observed that in pairing failed scenario, refcnt
668 	 * drops below 0. Probably this is because l2cap_conn_del calls
669 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
670 	 * dropped. After that loop hci_chan_del is called which also drops
671 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
672 	 * otherwise drop it.
673 	 */
674 	if (refcnt > 0)
675 		return;
676 
677 	/* LE connections in scanning state need special handling */
678 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
679 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
680 		hci_connect_le_scan_remove(conn);
681 		return;
682 	}
683 
684 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
685 }
686 
687 /* Enter sniff mode */
688 static void hci_conn_idle(struct work_struct *work)
689 {
690 	struct hci_conn *conn = container_of(work, struct hci_conn,
691 					     idle_work.work);
692 	struct hci_dev *hdev = conn->hdev;
693 
694 	BT_DBG("hcon %p mode %d", conn, conn->mode);
695 
696 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
697 		return;
698 
699 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
700 		return;
701 
702 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
703 		struct hci_cp_sniff_subrate cp;
704 		cp.handle             = cpu_to_le16(conn->handle);
705 		cp.max_latency        = cpu_to_le16(0);
706 		cp.min_remote_timeout = cpu_to_le16(0);
707 		cp.min_local_timeout  = cpu_to_le16(0);
708 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
709 	}
710 
711 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
712 		struct hci_cp_sniff_mode cp;
713 		cp.handle       = cpu_to_le16(conn->handle);
714 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
715 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
716 		cp.attempt      = cpu_to_le16(4);
717 		cp.timeout      = cpu_to_le16(1);
718 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
719 	}
720 }
721 
722 static void hci_conn_auto_accept(struct work_struct *work)
723 {
724 	struct hci_conn *conn = container_of(work, struct hci_conn,
725 					     auto_accept_work.work);
726 
727 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
728 		     &conn->dst);
729 }
730 
731 static void le_disable_advertising(struct hci_dev *hdev)
732 {
733 	if (ext_adv_capable(hdev)) {
734 		struct hci_cp_le_set_ext_adv_enable cp;
735 
736 		cp.enable = 0x00;
737 		cp.num_of_sets = 0x00;
738 
739 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
740 			     &cp);
741 	} else {
742 		u8 enable = 0x00;
743 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
744 			     &enable);
745 	}
746 }
747 
748 static void le_conn_timeout(struct work_struct *work)
749 {
750 	struct hci_conn *conn = container_of(work, struct hci_conn,
751 					     le_conn_timeout.work);
752 	struct hci_dev *hdev = conn->hdev;
753 
754 	BT_DBG("");
755 
756 	/* We could end up here due to having done directed advertising,
757 	 * so clean up the state if necessary. This should however only
758 	 * happen with broken hardware or if low duty cycle was used
759 	 * (which doesn't have a timeout of its own).
760 	 */
761 	if (conn->role == HCI_ROLE_SLAVE) {
762 		/* Disable LE Advertising */
763 		le_disable_advertising(hdev);
764 		hci_dev_lock(hdev);
765 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
766 		hci_dev_unlock(hdev);
767 		return;
768 	}
769 
770 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
771 }
772 
773 struct iso_list_data {
774 	union {
775 		u8  cig;
776 		u8  big;
777 	};
778 	union {
779 		u8  cis;
780 		u8  bis;
781 		u16 sync_handle;
782 	};
783 	int count;
784 	struct {
785 		struct hci_cp_le_set_cig_params cp;
786 		struct hci_cis_params cis[0x11];
787 	} pdu;
788 };
789 
790 static void bis_list(struct hci_conn *conn, void *data)
791 {
792 	struct iso_list_data *d = data;
793 
794 	/* Skip if not broadcast/ANY address */
795 	if (bacmp(&conn->dst, BDADDR_ANY))
796 		return;
797 
798 	if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
799 	    d->bis != conn->iso_qos.bis)
800 		return;
801 
802 	d->count++;
803 }
804 
805 static void find_bis(struct hci_conn *conn, void *data)
806 {
807 	struct iso_list_data *d = data;
808 
809 	/* Ignore unicast */
810 	if (bacmp(&conn->dst, BDADDR_ANY))
811 		return;
812 
813 	d->count++;
814 }
815 
816 static int terminate_big_sync(struct hci_dev *hdev, void *data)
817 {
818 	struct iso_list_data *d = data;
819 
820 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
821 
822 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
823 
824 	/* Check if ISO connection is a BIS and terminate BIG if there are
825 	 * no other connections using it.
826 	 */
827 	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
828 	if (d->count)
829 		return 0;
830 
831 	return hci_le_terminate_big_sync(hdev, d->big,
832 					 HCI_ERROR_LOCAL_HOST_TERM);
833 }
834 
835 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
836 {
837 	kfree(data);
838 }
839 
840 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
841 {
842 	struct iso_list_data *d;
843 	int ret;
844 
845 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
846 
847 	d = kzalloc(sizeof(*d), GFP_KERNEL);
848 	if (!d)
849 		return -ENOMEM;
850 
851 	d->big = big;
852 	d->bis = bis;
853 
854 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
855 				 terminate_big_destroy);
856 	if (ret)
857 		kfree(d);
858 
859 	return ret;
860 }
861 
862 static int big_terminate_sync(struct hci_dev *hdev, void *data)
863 {
864 	struct iso_list_data *d = data;
865 
866 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
867 		   d->sync_handle);
868 
869 	/* Check if ISO connection is a BIS and terminate BIG if there are
870 	 * no other connections using it.
871 	 */
872 	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
873 	if (d->count)
874 		return 0;
875 
876 	hci_le_big_terminate_sync(hdev, d->big);
877 
878 	return hci_le_pa_terminate_sync(hdev, d->sync_handle);
879 }
880 
881 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
882 {
883 	struct iso_list_data *d;
884 	int ret;
885 
886 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
887 
888 	d = kzalloc(sizeof(*d), GFP_KERNEL);
889 	if (!d)
890 		return -ENOMEM;
891 
892 	d->big = big;
893 	d->sync_handle = sync_handle;
894 
895 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
896 				 terminate_big_destroy);
897 	if (ret)
898 		kfree(d);
899 
900 	return ret;
901 }
902 
903 /* Cleanup BIS connection
904  *
905  * Detects if there any BIS left connected in a BIG
906  * broadcaster: Remove advertising instance and terminate BIG.
907  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
908  */
909 static void bis_cleanup(struct hci_conn *conn)
910 {
911 	struct hci_dev *hdev = conn->hdev;
912 
913 	bt_dev_dbg(hdev, "conn %p", conn);
914 
915 	if (conn->role == HCI_ROLE_MASTER) {
916 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
917 			return;
918 
919 		hci_le_terminate_big(hdev, conn->iso_qos.big,
920 				     conn->iso_qos.bis);
921 	} else {
922 		hci_le_big_terminate(hdev, conn->iso_qos.big,
923 				     conn->sync_handle);
924 	}
925 }
926 
927 static int remove_cig_sync(struct hci_dev *hdev, void *data)
928 {
929 	u8 handle = PTR_ERR(data);
930 
931 	return hci_le_remove_cig_sync(hdev, handle);
932 }
933 
934 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
935 {
936 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
937 
938 	return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
939 }
940 
941 static void find_cis(struct hci_conn *conn, void *data)
942 {
943 	struct iso_list_data *d = data;
944 
945 	/* Ignore broadcast */
946 	if (!bacmp(&conn->dst, BDADDR_ANY))
947 		return;
948 
949 	d->count++;
950 }
951 
952 /* Cleanup CIS connection:
953  *
954  * Detects if there any CIS left connected in a CIG and remove it.
955  */
956 static void cis_cleanup(struct hci_conn *conn)
957 {
958 	struct hci_dev *hdev = conn->hdev;
959 	struct iso_list_data d;
960 
961 	memset(&d, 0, sizeof(d));
962 	d.cig = conn->iso_qos.cig;
963 
964 	/* Check if ISO connection is a CIS and remove CIG if there are
965 	 * no other connections using it.
966 	 */
967 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
968 	if (d.count)
969 		return;
970 
971 	hci_le_remove_cig(hdev, conn->iso_qos.cig);
972 }
973 
974 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
975 			      u8 role)
976 {
977 	struct hci_conn *conn;
978 
979 	BT_DBG("%s dst %pMR", hdev->name, dst);
980 
981 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
982 	if (!conn)
983 		return NULL;
984 
985 	bacpy(&conn->dst, dst);
986 	bacpy(&conn->src, &hdev->bdaddr);
987 	conn->handle = HCI_CONN_HANDLE_UNSET;
988 	conn->hdev  = hdev;
989 	conn->type  = type;
990 	conn->role  = role;
991 	conn->mode  = HCI_CM_ACTIVE;
992 	conn->state = BT_OPEN;
993 	conn->auth_type = HCI_AT_GENERAL_BONDING;
994 	conn->io_capability = hdev->io_capability;
995 	conn->remote_auth = 0xff;
996 	conn->key_type = 0xff;
997 	conn->rssi = HCI_RSSI_INVALID;
998 	conn->tx_power = HCI_TX_POWER_INVALID;
999 	conn->max_tx_power = HCI_TX_POWER_INVALID;
1000 
1001 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
1002 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1003 
1004 	/* Set Default Authenticated payload timeout to 30s */
1005 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
1006 
1007 	if (conn->role == HCI_ROLE_MASTER)
1008 		conn->out = true;
1009 
1010 	switch (type) {
1011 	case ACL_LINK:
1012 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1013 		break;
1014 	case LE_LINK:
1015 		/* conn->src should reflect the local identity address */
1016 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1017 		break;
1018 	case ISO_LINK:
1019 		/* conn->src should reflect the local identity address */
1020 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1021 
1022 		/* set proper cleanup function */
1023 		if (!bacmp(dst, BDADDR_ANY))
1024 			conn->cleanup = bis_cleanup;
1025 		else if (conn->role == HCI_ROLE_MASTER)
1026 			conn->cleanup = cis_cleanup;
1027 
1028 		break;
1029 	case SCO_LINK:
1030 		if (lmp_esco_capable(hdev))
1031 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1032 					(hdev->esco_type & EDR_ESCO_MASK);
1033 		else
1034 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1035 		break;
1036 	case ESCO_LINK:
1037 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1038 		break;
1039 	}
1040 
1041 	skb_queue_head_init(&conn->data_q);
1042 
1043 	INIT_LIST_HEAD(&conn->chan_list);
1044 
1045 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1046 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1047 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1048 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1049 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
1050 
1051 	atomic_set(&conn->refcnt, 0);
1052 
1053 	hci_dev_hold(hdev);
1054 
1055 	hci_conn_hash_add(hdev, conn);
1056 
1057 	/* The SCO and eSCO connections will only be notified when their
1058 	 * setup has been completed. This is different to ACL links which
1059 	 * can be notified right away.
1060 	 */
1061 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1062 		if (hdev->notify)
1063 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1064 	}
1065 
1066 	hci_conn_init_sysfs(conn);
1067 
1068 	return conn;
1069 }
1070 
1071 static bool hci_conn_unlink(struct hci_conn *conn)
1072 {
1073 	if (!conn->link)
1074 		return false;
1075 
1076 	conn->link->link = NULL;
1077 	conn->link = NULL;
1078 
1079 	return true;
1080 }
1081 
1082 int hci_conn_del(struct hci_conn *conn)
1083 {
1084 	struct hci_dev *hdev = conn->hdev;
1085 
1086 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1087 
1088 	cancel_delayed_work_sync(&conn->disc_work);
1089 	cancel_delayed_work_sync(&conn->auto_accept_work);
1090 	cancel_delayed_work_sync(&conn->idle_work);
1091 
1092 	if (conn->type == ACL_LINK) {
1093 		struct hci_conn *link = conn->link;
1094 
1095 		if (link) {
1096 			hci_conn_unlink(conn);
1097 			/* Due to race, SCO connection might be not established
1098 			 * yet at this point. Delete it now, otherwise it is
1099 			 * possible for it to be stuck and can't be deleted.
1100 			 */
1101 			if (link->handle == HCI_CONN_HANDLE_UNSET)
1102 				hci_conn_del(link);
1103 		}
1104 
1105 		/* Unacked frames */
1106 		hdev->acl_cnt += conn->sent;
1107 	} else if (conn->type == LE_LINK) {
1108 		cancel_delayed_work(&conn->le_conn_timeout);
1109 
1110 		if (hdev->le_pkts)
1111 			hdev->le_cnt += conn->sent;
1112 		else
1113 			hdev->acl_cnt += conn->sent;
1114 	} else {
1115 		struct hci_conn *acl = conn->link;
1116 
1117 		if (acl) {
1118 			hci_conn_unlink(conn);
1119 			hci_conn_drop(acl);
1120 		}
1121 
1122 		/* Unacked ISO frames */
1123 		if (conn->type == ISO_LINK) {
1124 			if (hdev->iso_pkts)
1125 				hdev->iso_cnt += conn->sent;
1126 			else if (hdev->le_pkts)
1127 				hdev->le_cnt += conn->sent;
1128 			else
1129 				hdev->acl_cnt += conn->sent;
1130 		}
1131 	}
1132 
1133 	if (conn->amp_mgr)
1134 		amp_mgr_put(conn->amp_mgr);
1135 
1136 	skb_queue_purge(&conn->data_q);
1137 
1138 	/* Remove the connection from the list and cleanup its remaining
1139 	 * state. This is a separate function since for some cases like
1140 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1141 	 * rest of hci_conn_del.
1142 	 */
1143 	hci_conn_cleanup(conn);
1144 
1145 	return 0;
1146 }
1147 
1148 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1149 {
1150 	int use_src = bacmp(src, BDADDR_ANY);
1151 	struct hci_dev *hdev = NULL, *d;
1152 
1153 	BT_DBG("%pMR -> %pMR", src, dst);
1154 
1155 	read_lock(&hci_dev_list_lock);
1156 
1157 	list_for_each_entry(d, &hci_dev_list, list) {
1158 		if (!test_bit(HCI_UP, &d->flags) ||
1159 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1160 		    d->dev_type != HCI_PRIMARY)
1161 			continue;
1162 
1163 		/* Simple routing:
1164 		 *   No source address - find interface with bdaddr != dst
1165 		 *   Source address    - find interface with bdaddr == src
1166 		 */
1167 
1168 		if (use_src) {
1169 			bdaddr_t id_addr;
1170 			u8 id_addr_type;
1171 
1172 			if (src_type == BDADDR_BREDR) {
1173 				if (!lmp_bredr_capable(d))
1174 					continue;
1175 				bacpy(&id_addr, &d->bdaddr);
1176 				id_addr_type = BDADDR_BREDR;
1177 			} else {
1178 				if (!lmp_le_capable(d))
1179 					continue;
1180 
1181 				hci_copy_identity_address(d, &id_addr,
1182 							  &id_addr_type);
1183 
1184 				/* Convert from HCI to three-value type */
1185 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1186 					id_addr_type = BDADDR_LE_PUBLIC;
1187 				else
1188 					id_addr_type = BDADDR_LE_RANDOM;
1189 			}
1190 
1191 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1192 				hdev = d; break;
1193 			}
1194 		} else {
1195 			if (bacmp(&d->bdaddr, dst)) {
1196 				hdev = d; break;
1197 			}
1198 		}
1199 	}
1200 
1201 	if (hdev)
1202 		hdev = hci_dev_hold(hdev);
1203 
1204 	read_unlock(&hci_dev_list_lock);
1205 	return hdev;
1206 }
1207 EXPORT_SYMBOL(hci_get_route);
1208 
1209 /* This function requires the caller holds hdev->lock */
1210 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1211 {
1212 	struct hci_dev *hdev = conn->hdev;
1213 
1214 	hci_connect_le_scan_cleanup(conn, status);
1215 
1216 	/* Enable advertising in case this was a failed connection
1217 	 * attempt as a peripheral.
1218 	 */
1219 	hci_enable_advertising(hdev);
1220 }
1221 
1222 /* This function requires the caller holds hdev->lock */
1223 void hci_conn_failed(struct hci_conn *conn, u8 status)
1224 {
1225 	struct hci_dev *hdev = conn->hdev;
1226 
1227 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1228 
1229 	switch (conn->type) {
1230 	case LE_LINK:
1231 		hci_le_conn_failed(conn, status);
1232 		break;
1233 	case ACL_LINK:
1234 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1235 				    conn->dst_type, status);
1236 		break;
1237 	}
1238 
1239 	conn->state = BT_CLOSED;
1240 	hci_connect_cfm(conn, status);
1241 	hci_conn_del(conn);
1242 }
1243 
1244 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1245 {
1246 	struct hci_conn *conn = data;
1247 
1248 	bt_dev_dbg(hdev, "err %d", err);
1249 
1250 	hci_dev_lock(hdev);
1251 
1252 	if (!err) {
1253 		hci_connect_le_scan_cleanup(conn, 0x00);
1254 		goto done;
1255 	}
1256 
1257 	/* Check if connection is still pending */
1258 	if (conn != hci_lookup_le_connect(hdev))
1259 		goto done;
1260 
1261 	/* Flush to make sure we send create conn cancel command if needed */
1262 	flush_delayed_work(&conn->le_conn_timeout);
1263 	hci_conn_failed(conn, bt_status(err));
1264 
1265 done:
1266 	hci_dev_unlock(hdev);
1267 }
1268 
1269 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1270 {
1271 	struct hci_conn *conn = data;
1272 
1273 	bt_dev_dbg(hdev, "conn %p", conn);
1274 
1275 	return hci_le_create_conn_sync(hdev, conn);
1276 }
1277 
1278 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1279 				u8 dst_type, bool dst_resolved, u8 sec_level,
1280 				u16 conn_timeout, u8 role)
1281 {
1282 	struct hci_conn *conn;
1283 	struct smp_irk *irk;
1284 	int err;
1285 
1286 	/* Let's make sure that le is enabled.*/
1287 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1288 		if (lmp_le_capable(hdev))
1289 			return ERR_PTR(-ECONNREFUSED);
1290 
1291 		return ERR_PTR(-EOPNOTSUPP);
1292 	}
1293 
1294 	/* Since the controller supports only one LE connection attempt at a
1295 	 * time, we return -EBUSY if there is any connection attempt running.
1296 	 */
1297 	if (hci_lookup_le_connect(hdev))
1298 		return ERR_PTR(-EBUSY);
1299 
1300 	/* If there's already a connection object but it's not in
1301 	 * scanning state it means it must already be established, in
1302 	 * which case we can't do anything else except report a failure
1303 	 * to connect.
1304 	 */
1305 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1306 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1307 		return ERR_PTR(-EBUSY);
1308 	}
1309 
1310 	/* Check if the destination address has been resolved by the controller
1311 	 * since if it did then the identity address shall be used.
1312 	 */
1313 	if (!dst_resolved) {
1314 		/* When given an identity address with existing identity
1315 		 * resolving key, the connection needs to be established
1316 		 * to a resolvable random address.
1317 		 *
1318 		 * Storing the resolvable random address is required here
1319 		 * to handle connection failures. The address will later
1320 		 * be resolved back into the original identity address
1321 		 * from the connect request.
1322 		 */
1323 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1324 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1325 			dst = &irk->rpa;
1326 			dst_type = ADDR_LE_DEV_RANDOM;
1327 		}
1328 	}
1329 
1330 	if (conn) {
1331 		bacpy(&conn->dst, dst);
1332 	} else {
1333 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1334 		if (!conn)
1335 			return ERR_PTR(-ENOMEM);
1336 		hci_conn_hold(conn);
1337 		conn->pending_sec_level = sec_level;
1338 	}
1339 
1340 	conn->dst_type = dst_type;
1341 	conn->sec_level = BT_SECURITY_LOW;
1342 	conn->conn_timeout = conn_timeout;
1343 
1344 	conn->state = BT_CONNECT;
1345 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1346 
1347 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1348 				 create_le_conn_complete);
1349 	if (err) {
1350 		hci_conn_del(conn);
1351 		return ERR_PTR(err);
1352 	}
1353 
1354 	return conn;
1355 }
1356 
1357 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1358 {
1359 	struct hci_conn *conn;
1360 
1361 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1362 	if (!conn)
1363 		return false;
1364 
1365 	if (conn->state != BT_CONNECTED)
1366 		return false;
1367 
1368 	return true;
1369 }
1370 
1371 /* This function requires the caller holds hdev->lock */
1372 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1373 					bdaddr_t *addr, u8 addr_type)
1374 {
1375 	struct hci_conn_params *params;
1376 
1377 	if (is_connected(hdev, addr, addr_type))
1378 		return -EISCONN;
1379 
1380 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1381 	if (!params) {
1382 		params = hci_conn_params_add(hdev, addr, addr_type);
1383 		if (!params)
1384 			return -ENOMEM;
1385 
1386 		/* If we created new params, mark them to be deleted in
1387 		 * hci_connect_le_scan_cleanup. It's different case than
1388 		 * existing disabled params, those will stay after cleanup.
1389 		 */
1390 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1391 	}
1392 
1393 	/* We're trying to connect, so make sure params are at pend_le_conns */
1394 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1395 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1396 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1397 		list_del_init(&params->action);
1398 		list_add(&params->action, &hdev->pend_le_conns);
1399 	}
1400 
1401 	params->explicit_connect = true;
1402 
1403 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1404 	       params->auto_connect);
1405 
1406 	return 0;
1407 }
1408 
1409 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1410 {
1411 	struct iso_list_data data;
1412 
1413 	/* Allocate a BIG if not set */
1414 	if (qos->big == BT_ISO_QOS_BIG_UNSET) {
1415 		for (data.big = 0x00; data.big < 0xef; data.big++) {
1416 			data.count = 0;
1417 			data.bis = 0xff;
1418 
1419 			hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1420 						 BT_BOUND, &data);
1421 			if (!data.count)
1422 				break;
1423 		}
1424 
1425 		if (data.big == 0xef)
1426 			return -EADDRNOTAVAIL;
1427 
1428 		/* Update BIG */
1429 		qos->big = data.big;
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1436 {
1437 	struct iso_list_data data;
1438 
1439 	/* Allocate BIS if not set */
1440 	if (qos->bis == BT_ISO_QOS_BIS_UNSET) {
1441 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1442 		 * since it is reserved as general purpose set.
1443 		 */
1444 		for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1445 		     data.bis++) {
1446 			data.count = 0;
1447 
1448 			hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1449 						 BT_BOUND, &data);
1450 			if (!data.count)
1451 				break;
1452 		}
1453 
1454 		if (data.bis == hdev->le_num_of_adv_sets)
1455 			return -EADDRNOTAVAIL;
1456 
1457 		/* Update BIS */
1458 		qos->bis = data.bis;
1459 	}
1460 
1461 	return 0;
1462 }
1463 
1464 /* This function requires the caller holds hdev->lock */
1465 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1466 				    struct bt_iso_qos *qos)
1467 {
1468 	struct hci_conn *conn;
1469 	struct iso_list_data data;
1470 	int err;
1471 
1472 	/* Let's make sure that le is enabled.*/
1473 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1474 		if (lmp_le_capable(hdev))
1475 			return ERR_PTR(-ECONNREFUSED);
1476 		return ERR_PTR(-EOPNOTSUPP);
1477 	}
1478 
1479 	err = qos_set_big(hdev, qos);
1480 	if (err)
1481 		return ERR_PTR(err);
1482 
1483 	err = qos_set_bis(hdev, qos);
1484 	if (err)
1485 		return ERR_PTR(err);
1486 
1487 	data.big = qos->big;
1488 	data.bis = qos->bis;
1489 	data.count = 0;
1490 
1491 	/* Check if there is already a matching BIG/BIS */
1492 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1493 	if (data.count)
1494 		return ERR_PTR(-EADDRINUSE);
1495 
1496 	conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis);
1497 	if (conn)
1498 		return ERR_PTR(-EADDRINUSE);
1499 
1500 	conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1501 	if (!conn)
1502 		return ERR_PTR(-ENOMEM);
1503 
1504 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
1505 	conn->state = BT_CONNECT;
1506 
1507 	hci_conn_hold(conn);
1508 	return conn;
1509 }
1510 
1511 /* This function requires the caller holds hdev->lock */
1512 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1513 				     u8 dst_type, u8 sec_level,
1514 				     u16 conn_timeout,
1515 				     enum conn_reasons conn_reason)
1516 {
1517 	struct hci_conn *conn;
1518 
1519 	/* Let's make sure that le is enabled.*/
1520 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1521 		if (lmp_le_capable(hdev))
1522 			return ERR_PTR(-ECONNREFUSED);
1523 
1524 		return ERR_PTR(-EOPNOTSUPP);
1525 	}
1526 
1527 	/* Some devices send ATT messages as soon as the physical link is
1528 	 * established. To be able to handle these ATT messages, the user-
1529 	 * space first establishes the connection and then starts the pairing
1530 	 * process.
1531 	 *
1532 	 * So if a hci_conn object already exists for the following connection
1533 	 * attempt, we simply update pending_sec_level and auth_type fields
1534 	 * and return the object found.
1535 	 */
1536 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1537 	if (conn) {
1538 		if (conn->pending_sec_level < sec_level)
1539 			conn->pending_sec_level = sec_level;
1540 		goto done;
1541 	}
1542 
1543 	BT_DBG("requesting refresh of dst_addr");
1544 
1545 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1546 	if (!conn)
1547 		return ERR_PTR(-ENOMEM);
1548 
1549 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1550 		hci_conn_del(conn);
1551 		return ERR_PTR(-EBUSY);
1552 	}
1553 
1554 	conn->state = BT_CONNECT;
1555 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1556 	conn->dst_type = dst_type;
1557 	conn->sec_level = BT_SECURITY_LOW;
1558 	conn->pending_sec_level = sec_level;
1559 	conn->conn_timeout = conn_timeout;
1560 	conn->conn_reason = conn_reason;
1561 
1562 	hci_update_passive_scan(hdev);
1563 
1564 done:
1565 	hci_conn_hold(conn);
1566 	return conn;
1567 }
1568 
1569 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1570 				 u8 sec_level, u8 auth_type,
1571 				 enum conn_reasons conn_reason)
1572 {
1573 	struct hci_conn *acl;
1574 
1575 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1576 		if (lmp_bredr_capable(hdev))
1577 			return ERR_PTR(-ECONNREFUSED);
1578 
1579 		return ERR_PTR(-EOPNOTSUPP);
1580 	}
1581 
1582 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1583 	if (!acl) {
1584 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1585 		if (!acl)
1586 			return ERR_PTR(-ENOMEM);
1587 	}
1588 
1589 	hci_conn_hold(acl);
1590 
1591 	acl->conn_reason = conn_reason;
1592 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1593 		acl->sec_level = BT_SECURITY_LOW;
1594 		acl->pending_sec_level = sec_level;
1595 		acl->auth_type = auth_type;
1596 		hci_acl_create_connection(acl);
1597 	}
1598 
1599 	return acl;
1600 }
1601 
1602 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1603 				 __u16 setting, struct bt_codec *codec)
1604 {
1605 	struct hci_conn *acl;
1606 	struct hci_conn *sco;
1607 
1608 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1609 			      CONN_REASON_SCO_CONNECT);
1610 	if (IS_ERR(acl))
1611 		return acl;
1612 
1613 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1614 	if (!sco) {
1615 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1616 		if (!sco) {
1617 			hci_conn_drop(acl);
1618 			return ERR_PTR(-ENOMEM);
1619 		}
1620 	}
1621 
1622 	acl->link = sco;
1623 	sco->link = acl;
1624 
1625 	hci_conn_hold(sco);
1626 
1627 	sco->setting = setting;
1628 	sco->codec = *codec;
1629 
1630 	if (acl->state == BT_CONNECTED &&
1631 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1632 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1633 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1634 
1635 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1636 			/* defer SCO setup until mode change completed */
1637 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1638 			return sco;
1639 		}
1640 
1641 		hci_sco_setup(acl, 0x00);
1642 	}
1643 
1644 	return sco;
1645 }
1646 
1647 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1648 {
1649 	struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1650 
1651 	cis->cis_id = qos->cis;
1652 	cis->c_sdu  = cpu_to_le16(qos->out.sdu);
1653 	cis->p_sdu  = cpu_to_le16(qos->in.sdu);
1654 	cis->c_phy  = qos->out.phy ? qos->out.phy : qos->in.phy;
1655 	cis->p_phy  = qos->in.phy ? qos->in.phy : qos->out.phy;
1656 	cis->c_rtn  = qos->out.rtn;
1657 	cis->p_rtn  = qos->in.rtn;
1658 
1659 	d->pdu.cp.num_cis++;
1660 }
1661 
1662 static void cis_list(struct hci_conn *conn, void *data)
1663 {
1664 	struct iso_list_data *d = data;
1665 
1666 	/* Skip if broadcast/ANY address */
1667 	if (!bacmp(&conn->dst, BDADDR_ANY))
1668 		return;
1669 
1670 	if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1671 	    d->cis != conn->iso_qos.cis)
1672 		return;
1673 
1674 	d->count++;
1675 
1676 	if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1677 	    d->count >= ARRAY_SIZE(d->pdu.cis))
1678 		return;
1679 
1680 	cis_add(d, &conn->iso_qos);
1681 }
1682 
1683 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1684 {
1685 	struct hci_dev *hdev = conn->hdev;
1686 	struct hci_cp_le_create_big cp;
1687 
1688 	memset(&cp, 0, sizeof(cp));
1689 
1690 	cp.handle = qos->big;
1691 	cp.adv_handle = qos->bis;
1692 	cp.num_bis  = 0x01;
1693 	hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval);
1694 	cp.bis.sdu = cpu_to_le16(qos->out.sdu);
1695 	cp.bis.latency =  cpu_to_le16(qos->out.latency);
1696 	cp.bis.rtn  = qos->out.rtn;
1697 	cp.bis.phy  = qos->out.phy;
1698 	cp.bis.packing = qos->packing;
1699 	cp.bis.framing = qos->framing;
1700 	cp.bis.encryption = 0x00;
1701 	memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode));
1702 
1703 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1704 }
1705 
1706 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1707 {
1708 	struct hci_dev *hdev = conn->hdev;
1709 	struct iso_list_data data;
1710 
1711 	memset(&data, 0, sizeof(data));
1712 
1713 	/* Allocate a CIG if not set */
1714 	if (qos->cig == BT_ISO_QOS_CIG_UNSET) {
1715 		for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1716 			data.count = 0;
1717 			data.cis = 0xff;
1718 
1719 			hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1720 						 BT_BOUND, &data);
1721 			if (data.count)
1722 				continue;
1723 
1724 			hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1725 						 BT_CONNECTED, &data);
1726 			if (!data.count)
1727 				break;
1728 		}
1729 
1730 		if (data.cig == 0xff)
1731 			return false;
1732 
1733 		/* Update CIG */
1734 		qos->cig = data.cig;
1735 	}
1736 
1737 	data.pdu.cp.cig_id = qos->cig;
1738 	hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval);
1739 	hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval);
1740 	data.pdu.cp.sca = qos->sca;
1741 	data.pdu.cp.packing = qos->packing;
1742 	data.pdu.cp.framing = qos->framing;
1743 	data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency);
1744 	data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency);
1745 
1746 	if (qos->cis != BT_ISO_QOS_CIS_UNSET) {
1747 		data.count = 0;
1748 		data.cig = qos->cig;
1749 		data.cis = qos->cis;
1750 
1751 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1752 					 &data);
1753 		if (data.count)
1754 			return false;
1755 
1756 		cis_add(&data, qos);
1757 	}
1758 
1759 	/* Reprogram all CIS(s) with the same CIG */
1760 	for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11;
1761 	     data.cis++) {
1762 		data.count = 0;
1763 
1764 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1765 					 &data);
1766 		if (data.count)
1767 			continue;
1768 
1769 		/* Allocate a CIS if not set */
1770 		if (qos->cis == BT_ISO_QOS_CIS_UNSET) {
1771 			/* Update CIS */
1772 			qos->cis = data.cis;
1773 			cis_add(&data, qos);
1774 		}
1775 	}
1776 
1777 	if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1778 		return false;
1779 
1780 	if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1781 			 sizeof(data.pdu.cp) +
1782 			 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1783 			 &data.pdu) < 0)
1784 		return false;
1785 
1786 	return true;
1787 }
1788 
1789 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1790 			      __u8 dst_type, struct bt_iso_qos *qos)
1791 {
1792 	struct hci_conn *cis;
1793 
1794 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type);
1795 	if (!cis) {
1796 		cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1797 		if (!cis)
1798 			return ERR_PTR(-ENOMEM);
1799 		cis->cleanup = cis_cleanup;
1800 		cis->dst_type = dst_type;
1801 	}
1802 
1803 	if (cis->state == BT_CONNECTED)
1804 		return cis;
1805 
1806 	/* Check if CIS has been set and the settings matches */
1807 	if (cis->state == BT_BOUND &&
1808 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1809 		return cis;
1810 
1811 	/* Update LINK PHYs according to QoS preference */
1812 	cis->le_tx_phy = qos->out.phy;
1813 	cis->le_rx_phy = qos->in.phy;
1814 
1815 	/* If output interval is not set use the input interval as it cannot be
1816 	 * 0x000000.
1817 	 */
1818 	if (!qos->out.interval)
1819 		qos->out.interval = qos->in.interval;
1820 
1821 	/* If input interval is not set use the output interval as it cannot be
1822 	 * 0x000000.
1823 	 */
1824 	if (!qos->in.interval)
1825 		qos->in.interval = qos->out.interval;
1826 
1827 	/* If output latency is not set use the input latency as it cannot be
1828 	 * 0x0000.
1829 	 */
1830 	if (!qos->out.latency)
1831 		qos->out.latency = qos->in.latency;
1832 
1833 	/* If input latency is not set use the output latency as it cannot be
1834 	 * 0x0000.
1835 	 */
1836 	if (!qos->in.latency)
1837 		qos->in.latency = qos->out.latency;
1838 
1839 	if (!hci_le_set_cig_params(cis, qos)) {
1840 		hci_conn_drop(cis);
1841 		return ERR_PTR(-EINVAL);
1842 	}
1843 
1844 	cis->iso_qos = *qos;
1845 	cis->state = BT_BOUND;
1846 
1847 	return cis;
1848 }
1849 
1850 bool hci_iso_setup_path(struct hci_conn *conn)
1851 {
1852 	struct hci_dev *hdev = conn->hdev;
1853 	struct hci_cp_le_setup_iso_path cmd;
1854 
1855 	memset(&cmd, 0, sizeof(cmd));
1856 
1857 	if (conn->iso_qos.out.sdu) {
1858 		cmd.handle = cpu_to_le16(conn->handle);
1859 		cmd.direction = 0x00; /* Input (Host to Controller) */
1860 		cmd.path = 0x00; /* HCI path if enabled */
1861 		cmd.codec = 0x03; /* Transparent Data */
1862 
1863 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1864 				 &cmd) < 0)
1865 			return false;
1866 	}
1867 
1868 	if (conn->iso_qos.in.sdu) {
1869 		cmd.handle = cpu_to_le16(conn->handle);
1870 		cmd.direction = 0x01; /* Output (Controller to Host) */
1871 		cmd.path = 0x00; /* HCI path if enabled */
1872 		cmd.codec = 0x03; /* Transparent Data */
1873 
1874 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1875 				 &cmd) < 0)
1876 			return false;
1877 	}
1878 
1879 	return true;
1880 }
1881 
1882 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1883 {
1884 	struct {
1885 		struct hci_cp_le_create_cis cp;
1886 		struct hci_cis cis[0x1f];
1887 	} cmd;
1888 	struct hci_conn *conn = data;
1889 	u8 cig;
1890 
1891 	memset(&cmd, 0, sizeof(cmd));
1892 	cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle);
1893 	cmd.cis[0].cis_handle = cpu_to_le16(conn->handle);
1894 	cmd.cp.num_cis++;
1895 	cig = conn->iso_qos.cig;
1896 
1897 	hci_dev_lock(hdev);
1898 
1899 	rcu_read_lock();
1900 
1901 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1902 		struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
1903 
1904 		if (conn == data || conn->type != ISO_LINK ||
1905 		    conn->state == BT_CONNECTED || conn->iso_qos.cig != cig)
1906 			continue;
1907 
1908 		/* Check if all CIS(s) belonging to a CIG are ready */
1909 		if (!conn->link || conn->link->state != BT_CONNECTED ||
1910 		    conn->state != BT_CONNECT) {
1911 			cmd.cp.num_cis = 0;
1912 			break;
1913 		}
1914 
1915 		/* Group all CIS with state BT_CONNECT since the spec don't
1916 		 * allow to send them individually:
1917 		 *
1918 		 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
1919 		 * page 2566:
1920 		 *
1921 		 * If the Host issues this command before all the
1922 		 * HCI_LE_CIS_Established events from the previous use of the
1923 		 * command have been generated, the Controller shall return the
1924 		 * error code Command Disallowed (0x0C).
1925 		 */
1926 		cis->acl_handle = cpu_to_le16(conn->link->handle);
1927 		cis->cis_handle = cpu_to_le16(conn->handle);
1928 		cmd.cp.num_cis++;
1929 	}
1930 
1931 	rcu_read_unlock();
1932 
1933 	hci_dev_unlock(hdev);
1934 
1935 	if (!cmd.cp.num_cis)
1936 		return 0;
1937 
1938 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) +
1939 			    sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd);
1940 }
1941 
1942 int hci_le_create_cis(struct hci_conn *conn)
1943 {
1944 	struct hci_conn *cis;
1945 	struct hci_dev *hdev = conn->hdev;
1946 	int err;
1947 
1948 	switch (conn->type) {
1949 	case LE_LINK:
1950 		if (!conn->link || conn->state != BT_CONNECTED)
1951 			return -EINVAL;
1952 		cis = conn->link;
1953 		break;
1954 	case ISO_LINK:
1955 		cis = conn;
1956 		break;
1957 	default:
1958 		return -EINVAL;
1959 	}
1960 
1961 	if (cis->state == BT_CONNECT)
1962 		return 0;
1963 
1964 	/* Queue Create CIS */
1965 	err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1966 	if (err)
1967 		return err;
1968 
1969 	cis->state = BT_CONNECT;
1970 
1971 	return 0;
1972 }
1973 
1974 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1975 			      struct bt_iso_io_qos *qos, __u8 phy)
1976 {
1977 	/* Only set MTU if PHY is enabled */
1978 	if (!qos->sdu && qos->phy) {
1979 		if (hdev->iso_mtu > 0)
1980 			qos->sdu = hdev->iso_mtu;
1981 		else if (hdev->le_mtu > 0)
1982 			qos->sdu = hdev->le_mtu;
1983 		else
1984 			qos->sdu = hdev->acl_mtu;
1985 	}
1986 
1987 	/* Use the same PHY as ACL if set to any */
1988 	if (qos->phy == BT_ISO_PHY_ANY)
1989 		qos->phy = phy;
1990 
1991 	/* Use LE ACL connection interval if not set */
1992 	if (!qos->interval)
1993 		/* ACL interval unit in 1.25 ms to us */
1994 		qos->interval = conn->le_conn_interval * 1250;
1995 
1996 	/* Use LE ACL connection latency if not set */
1997 	if (!qos->latency)
1998 		qos->latency = conn->le_conn_latency;
1999 }
2000 
2001 static void hci_bind_bis(struct hci_conn *conn,
2002 			 struct bt_iso_qos *qos)
2003 {
2004 	/* Update LINK PHYs according to QoS preference */
2005 	conn->le_tx_phy = qos->out.phy;
2006 	conn->le_tx_phy = qos->out.phy;
2007 	conn->iso_qos = *qos;
2008 	conn->state = BT_BOUND;
2009 }
2010 
2011 static int create_big_sync(struct hci_dev *hdev, void *data)
2012 {
2013 	struct hci_conn *conn = data;
2014 	struct bt_iso_qos *qos = &conn->iso_qos;
2015 	u16 interval, sync_interval = 0;
2016 	u32 flags = 0;
2017 	int err;
2018 
2019 	if (qos->out.phy == 0x02)
2020 		flags |= MGMT_ADV_FLAG_SEC_2M;
2021 
2022 	/* Align intervals */
2023 	interval = qos->out.interval / 1250;
2024 
2025 	if (qos->bis)
2026 		sync_interval = qos->sync_interval * 1600;
2027 
2028 	err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len,
2029 				     conn->le_per_adv_data, flags, interval,
2030 				     interval, sync_interval);
2031 	if (err)
2032 		return err;
2033 
2034 	return hci_le_create_big(conn, &conn->iso_qos);
2035 }
2036 
2037 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2038 {
2039 	struct hci_cp_le_pa_create_sync *cp = data;
2040 
2041 	bt_dev_dbg(hdev, "");
2042 
2043 	if (err)
2044 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2045 
2046 	kfree(cp);
2047 }
2048 
2049 static int create_pa_sync(struct hci_dev *hdev, void *data)
2050 {
2051 	struct hci_cp_le_pa_create_sync *cp = data;
2052 	int err;
2053 
2054 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2055 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2056 	if (err) {
2057 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2058 		return err;
2059 	}
2060 
2061 	return hci_update_passive_scan_sync(hdev);
2062 }
2063 
2064 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2065 		       __u8 sid)
2066 {
2067 	struct hci_cp_le_pa_create_sync *cp;
2068 
2069 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2070 		return -EBUSY;
2071 
2072 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2073 	if (!cp) {
2074 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2075 		return -ENOMEM;
2076 	}
2077 
2078 	cp->sid = sid;
2079 	cp->addr_type = dst_type;
2080 	bacpy(&cp->addr, dst);
2081 
2082 	/* Queue start pa_create_sync and scan */
2083 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2084 }
2085 
2086 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2087 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2088 {
2089 	struct _packed {
2090 		struct hci_cp_le_big_create_sync cp;
2091 		__u8  bis[0x11];
2092 	} pdu;
2093 	int err;
2094 
2095 	if (num_bis > sizeof(pdu.bis))
2096 		return -EINVAL;
2097 
2098 	err = qos_set_big(hdev, qos);
2099 	if (err)
2100 		return err;
2101 
2102 	memset(&pdu, 0, sizeof(pdu));
2103 	pdu.cp.handle = qos->big;
2104 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2105 	pdu.cp.num_bis = num_bis;
2106 	memcpy(pdu.bis, bis, num_bis);
2107 
2108 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2109 			    sizeof(pdu.cp) + num_bis, &pdu);
2110 }
2111 
2112 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2113 {
2114 	struct hci_conn *conn = data;
2115 
2116 	bt_dev_dbg(hdev, "conn %p", conn);
2117 
2118 	if (err) {
2119 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2120 		hci_connect_cfm(conn, err);
2121 		hci_conn_del(conn);
2122 	}
2123 }
2124 
2125 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2126 				 __u8 dst_type, struct bt_iso_qos *qos,
2127 				 __u8 base_len, __u8 *base)
2128 {
2129 	struct hci_conn *conn;
2130 	int err;
2131 
2132 	/* We need hci_conn object using the BDADDR_ANY as dst */
2133 	conn = hci_add_bis(hdev, dst, qos);
2134 	if (IS_ERR(conn))
2135 		return conn;
2136 
2137 	hci_bind_bis(conn, qos);
2138 
2139 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2140 	if (base_len && base) {
2141 		base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2142 						   0x1851, base, base_len);
2143 		conn->le_per_adv_data_len = base_len;
2144 	}
2145 
2146 	/* Queue start periodic advertising and create BIG */
2147 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2148 				 create_big_complete);
2149 	if (err < 0) {
2150 		hci_conn_drop(conn);
2151 		return ERR_PTR(err);
2152 	}
2153 
2154 	hci_iso_qos_setup(hdev, conn, &qos->out,
2155 			  conn->le_tx_phy ? conn->le_tx_phy :
2156 			  hdev->le_tx_def_phys);
2157 
2158 	return conn;
2159 }
2160 
2161 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2162 				 __u8 dst_type, struct bt_iso_qos *qos)
2163 {
2164 	struct hci_conn *le;
2165 	struct hci_conn *cis;
2166 
2167 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2168 		le = hci_connect_le(hdev, dst, dst_type, false,
2169 				    BT_SECURITY_LOW,
2170 				    HCI_LE_CONN_TIMEOUT,
2171 				    HCI_ROLE_SLAVE);
2172 	else
2173 		le = hci_connect_le_scan(hdev, dst, dst_type,
2174 					 BT_SECURITY_LOW,
2175 					 HCI_LE_CONN_TIMEOUT,
2176 					 CONN_REASON_ISO_CONNECT);
2177 	if (IS_ERR(le))
2178 		return le;
2179 
2180 	hci_iso_qos_setup(hdev, le, &qos->out,
2181 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2182 	hci_iso_qos_setup(hdev, le, &qos->in,
2183 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2184 
2185 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2186 	if (IS_ERR(cis)) {
2187 		hci_conn_drop(le);
2188 		return cis;
2189 	}
2190 
2191 	le->link = cis;
2192 	cis->link = le;
2193 
2194 	hci_conn_hold(cis);
2195 
2196 	/* If LE is already connected and CIS handle is already set proceed to
2197 	 * Create CIS immediately.
2198 	 */
2199 	if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2200 		hci_le_create_cis(le);
2201 
2202 	return cis;
2203 }
2204 
2205 /* Check link security requirement */
2206 int hci_conn_check_link_mode(struct hci_conn *conn)
2207 {
2208 	BT_DBG("hcon %p", conn);
2209 
2210 	/* In Secure Connections Only mode, it is required that Secure
2211 	 * Connections is used and the link is encrypted with AES-CCM
2212 	 * using a P-256 authenticated combination key.
2213 	 */
2214 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2215 		if (!hci_conn_sc_enabled(conn) ||
2216 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2217 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2218 			return 0;
2219 	}
2220 
2221 	 /* AES encryption is required for Level 4:
2222 	  *
2223 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2224 	  * page 1319:
2225 	  *
2226 	  * 128-bit equivalent strength for link and encryption keys
2227 	  * required using FIPS approved algorithms (E0 not allowed,
2228 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2229 	  * not shortened)
2230 	  */
2231 	if (conn->sec_level == BT_SECURITY_FIPS &&
2232 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2233 		bt_dev_err(conn->hdev,
2234 			   "Invalid security: Missing AES-CCM usage");
2235 		return 0;
2236 	}
2237 
2238 	if (hci_conn_ssp_enabled(conn) &&
2239 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2240 		return 0;
2241 
2242 	return 1;
2243 }
2244 
2245 /* Authenticate remote device */
2246 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2247 {
2248 	BT_DBG("hcon %p", conn);
2249 
2250 	if (conn->pending_sec_level > sec_level)
2251 		sec_level = conn->pending_sec_level;
2252 
2253 	if (sec_level > conn->sec_level)
2254 		conn->pending_sec_level = sec_level;
2255 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2256 		return 1;
2257 
2258 	/* Make sure we preserve an existing MITM requirement*/
2259 	auth_type |= (conn->auth_type & 0x01);
2260 
2261 	conn->auth_type = auth_type;
2262 
2263 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2264 		struct hci_cp_auth_requested cp;
2265 
2266 		cp.handle = cpu_to_le16(conn->handle);
2267 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2268 			     sizeof(cp), &cp);
2269 
2270 		/* If we're already encrypted set the REAUTH_PEND flag,
2271 		 * otherwise set the ENCRYPT_PEND.
2272 		 */
2273 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2274 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2275 		else
2276 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2277 	}
2278 
2279 	return 0;
2280 }
2281 
2282 /* Encrypt the link */
2283 static void hci_conn_encrypt(struct hci_conn *conn)
2284 {
2285 	BT_DBG("hcon %p", conn);
2286 
2287 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2288 		struct hci_cp_set_conn_encrypt cp;
2289 		cp.handle  = cpu_to_le16(conn->handle);
2290 		cp.encrypt = 0x01;
2291 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2292 			     &cp);
2293 	}
2294 }
2295 
2296 /* Enable security */
2297 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2298 		      bool initiator)
2299 {
2300 	BT_DBG("hcon %p", conn);
2301 
2302 	if (conn->type == LE_LINK)
2303 		return smp_conn_security(conn, sec_level);
2304 
2305 	/* For sdp we don't need the link key. */
2306 	if (sec_level == BT_SECURITY_SDP)
2307 		return 1;
2308 
2309 	/* For non 2.1 devices and low security level we don't need the link
2310 	   key. */
2311 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2312 		return 1;
2313 
2314 	/* For other security levels we need the link key. */
2315 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2316 		goto auth;
2317 
2318 	/* An authenticated FIPS approved combination key has sufficient
2319 	 * security for security level 4. */
2320 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2321 	    sec_level == BT_SECURITY_FIPS)
2322 		goto encrypt;
2323 
2324 	/* An authenticated combination key has sufficient security for
2325 	   security level 3. */
2326 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2327 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2328 	    sec_level == BT_SECURITY_HIGH)
2329 		goto encrypt;
2330 
2331 	/* An unauthenticated combination key has sufficient security for
2332 	   security level 1 and 2. */
2333 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2334 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2335 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2336 		goto encrypt;
2337 
2338 	/* A combination key has always sufficient security for the security
2339 	   levels 1 or 2. High security level requires the combination key
2340 	   is generated using maximum PIN code length (16).
2341 	   For pre 2.1 units. */
2342 	if (conn->key_type == HCI_LK_COMBINATION &&
2343 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2344 	     conn->pin_length == 16))
2345 		goto encrypt;
2346 
2347 auth:
2348 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2349 		return 0;
2350 
2351 	if (initiator)
2352 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2353 
2354 	if (!hci_conn_auth(conn, sec_level, auth_type))
2355 		return 0;
2356 
2357 encrypt:
2358 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2359 		/* Ensure that the encryption key size has been read,
2360 		 * otherwise stall the upper layer responses.
2361 		 */
2362 		if (!conn->enc_key_size)
2363 			return 0;
2364 
2365 		/* Nothing else needed, all requirements are met */
2366 		return 1;
2367 	}
2368 
2369 	hci_conn_encrypt(conn);
2370 	return 0;
2371 }
2372 EXPORT_SYMBOL(hci_conn_security);
2373 
2374 /* Check secure link requirement */
2375 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2376 {
2377 	BT_DBG("hcon %p", conn);
2378 
2379 	/* Accept if non-secure or higher security level is required */
2380 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2381 		return 1;
2382 
2383 	/* Accept if secure or higher security level is already present */
2384 	if (conn->sec_level == BT_SECURITY_HIGH ||
2385 	    conn->sec_level == BT_SECURITY_FIPS)
2386 		return 1;
2387 
2388 	/* Reject not secure link */
2389 	return 0;
2390 }
2391 EXPORT_SYMBOL(hci_conn_check_secure);
2392 
2393 /* Switch role */
2394 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2395 {
2396 	BT_DBG("hcon %p", conn);
2397 
2398 	if (role == conn->role)
2399 		return 1;
2400 
2401 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2402 		struct hci_cp_switch_role cp;
2403 		bacpy(&cp.bdaddr, &conn->dst);
2404 		cp.role = role;
2405 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2406 	}
2407 
2408 	return 0;
2409 }
2410 EXPORT_SYMBOL(hci_conn_switch_role);
2411 
2412 /* Enter active mode */
2413 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2414 {
2415 	struct hci_dev *hdev = conn->hdev;
2416 
2417 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2418 
2419 	if (conn->mode != HCI_CM_SNIFF)
2420 		goto timer;
2421 
2422 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2423 		goto timer;
2424 
2425 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2426 		struct hci_cp_exit_sniff_mode cp;
2427 		cp.handle = cpu_to_le16(conn->handle);
2428 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2429 	}
2430 
2431 timer:
2432 	if (hdev->idle_timeout > 0)
2433 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2434 				   msecs_to_jiffies(hdev->idle_timeout));
2435 }
2436 
2437 /* Drop all connection on the device */
2438 void hci_conn_hash_flush(struct hci_dev *hdev)
2439 {
2440 	struct hci_conn_hash *h = &hdev->conn_hash;
2441 	struct hci_conn *c, *n;
2442 
2443 	BT_DBG("hdev %s", hdev->name);
2444 
2445 	list_for_each_entry_safe(c, n, &h->list, list) {
2446 		c->state = BT_CLOSED;
2447 
2448 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
2449 
2450 		/* Unlink before deleting otherwise it is possible that
2451 		 * hci_conn_del removes the link which may cause the list to
2452 		 * contain items already freed.
2453 		 */
2454 		hci_conn_unlink(c);
2455 		hci_conn_del(c);
2456 	}
2457 }
2458 
2459 /* Check pending connect attempts */
2460 void hci_conn_check_pending(struct hci_dev *hdev)
2461 {
2462 	struct hci_conn *conn;
2463 
2464 	BT_DBG("hdev %s", hdev->name);
2465 
2466 	hci_dev_lock(hdev);
2467 
2468 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2469 	if (conn)
2470 		hci_acl_create_connection(conn);
2471 
2472 	hci_dev_unlock(hdev);
2473 }
2474 
2475 static u32 get_link_mode(struct hci_conn *conn)
2476 {
2477 	u32 link_mode = 0;
2478 
2479 	if (conn->role == HCI_ROLE_MASTER)
2480 		link_mode |= HCI_LM_MASTER;
2481 
2482 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2483 		link_mode |= HCI_LM_ENCRYPT;
2484 
2485 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2486 		link_mode |= HCI_LM_AUTH;
2487 
2488 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2489 		link_mode |= HCI_LM_SECURE;
2490 
2491 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2492 		link_mode |= HCI_LM_FIPS;
2493 
2494 	return link_mode;
2495 }
2496 
2497 int hci_get_conn_list(void __user *arg)
2498 {
2499 	struct hci_conn *c;
2500 	struct hci_conn_list_req req, *cl;
2501 	struct hci_conn_info *ci;
2502 	struct hci_dev *hdev;
2503 	int n = 0, size, err;
2504 
2505 	if (copy_from_user(&req, arg, sizeof(req)))
2506 		return -EFAULT;
2507 
2508 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2509 		return -EINVAL;
2510 
2511 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2512 
2513 	cl = kmalloc(size, GFP_KERNEL);
2514 	if (!cl)
2515 		return -ENOMEM;
2516 
2517 	hdev = hci_dev_get(req.dev_id);
2518 	if (!hdev) {
2519 		kfree(cl);
2520 		return -ENODEV;
2521 	}
2522 
2523 	ci = cl->conn_info;
2524 
2525 	hci_dev_lock(hdev);
2526 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2527 		bacpy(&(ci + n)->bdaddr, &c->dst);
2528 		(ci + n)->handle = c->handle;
2529 		(ci + n)->type  = c->type;
2530 		(ci + n)->out   = c->out;
2531 		(ci + n)->state = c->state;
2532 		(ci + n)->link_mode = get_link_mode(c);
2533 		if (++n >= req.conn_num)
2534 			break;
2535 	}
2536 	hci_dev_unlock(hdev);
2537 
2538 	cl->dev_id = hdev->id;
2539 	cl->conn_num = n;
2540 	size = sizeof(req) + n * sizeof(*ci);
2541 
2542 	hci_dev_put(hdev);
2543 
2544 	err = copy_to_user(arg, cl, size);
2545 	kfree(cl);
2546 
2547 	return err ? -EFAULT : 0;
2548 }
2549 
2550 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2551 {
2552 	struct hci_conn_info_req req;
2553 	struct hci_conn_info ci;
2554 	struct hci_conn *conn;
2555 	char __user *ptr = arg + sizeof(req);
2556 
2557 	if (copy_from_user(&req, arg, sizeof(req)))
2558 		return -EFAULT;
2559 
2560 	hci_dev_lock(hdev);
2561 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2562 	if (conn) {
2563 		bacpy(&ci.bdaddr, &conn->dst);
2564 		ci.handle = conn->handle;
2565 		ci.type  = conn->type;
2566 		ci.out   = conn->out;
2567 		ci.state = conn->state;
2568 		ci.link_mode = get_link_mode(conn);
2569 	}
2570 	hci_dev_unlock(hdev);
2571 
2572 	if (!conn)
2573 		return -ENOENT;
2574 
2575 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2576 }
2577 
2578 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2579 {
2580 	struct hci_auth_info_req req;
2581 	struct hci_conn *conn;
2582 
2583 	if (copy_from_user(&req, arg, sizeof(req)))
2584 		return -EFAULT;
2585 
2586 	hci_dev_lock(hdev);
2587 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2588 	if (conn)
2589 		req.type = conn->auth_type;
2590 	hci_dev_unlock(hdev);
2591 
2592 	if (!conn)
2593 		return -ENOENT;
2594 
2595 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2596 }
2597 
2598 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2599 {
2600 	struct hci_dev *hdev = conn->hdev;
2601 	struct hci_chan *chan;
2602 
2603 	BT_DBG("%s hcon %p", hdev->name, conn);
2604 
2605 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2606 		BT_DBG("Refusing to create new hci_chan");
2607 		return NULL;
2608 	}
2609 
2610 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2611 	if (!chan)
2612 		return NULL;
2613 
2614 	chan->conn = hci_conn_get(conn);
2615 	skb_queue_head_init(&chan->data_q);
2616 	chan->state = BT_CONNECTED;
2617 
2618 	list_add_rcu(&chan->list, &conn->chan_list);
2619 
2620 	return chan;
2621 }
2622 
2623 void hci_chan_del(struct hci_chan *chan)
2624 {
2625 	struct hci_conn *conn = chan->conn;
2626 	struct hci_dev *hdev = conn->hdev;
2627 
2628 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2629 
2630 	list_del_rcu(&chan->list);
2631 
2632 	synchronize_rcu();
2633 
2634 	/* Prevent new hci_chan's to be created for this hci_conn */
2635 	set_bit(HCI_CONN_DROP, &conn->flags);
2636 
2637 	hci_conn_put(conn);
2638 
2639 	skb_queue_purge(&chan->data_q);
2640 	kfree(chan);
2641 }
2642 
2643 void hci_chan_list_flush(struct hci_conn *conn)
2644 {
2645 	struct hci_chan *chan, *n;
2646 
2647 	BT_DBG("hcon %p", conn);
2648 
2649 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2650 		hci_chan_del(chan);
2651 }
2652 
2653 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2654 						 __u16 handle)
2655 {
2656 	struct hci_chan *hchan;
2657 
2658 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2659 		if (hchan->handle == handle)
2660 			return hchan;
2661 	}
2662 
2663 	return NULL;
2664 }
2665 
2666 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2667 {
2668 	struct hci_conn_hash *h = &hdev->conn_hash;
2669 	struct hci_conn *hcon;
2670 	struct hci_chan *hchan = NULL;
2671 
2672 	rcu_read_lock();
2673 
2674 	list_for_each_entry_rcu(hcon, &h->list, list) {
2675 		hchan = __hci_chan_lookup_handle(hcon, handle);
2676 		if (hchan)
2677 			break;
2678 	}
2679 
2680 	rcu_read_unlock();
2681 
2682 	return hchan;
2683 }
2684 
2685 u32 hci_conn_get_phy(struct hci_conn *conn)
2686 {
2687 	u32 phys = 0;
2688 
2689 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2690 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2691 	 * CPB logical transport types.
2692 	 */
2693 	switch (conn->type) {
2694 	case SCO_LINK:
2695 		/* SCO logical transport (1 Mb/s):
2696 		 * HV1, HV2, HV3 and DV.
2697 		 */
2698 		phys |= BT_PHY_BR_1M_1SLOT;
2699 
2700 		break;
2701 
2702 	case ACL_LINK:
2703 		/* ACL logical transport (1 Mb/s) ptt=0:
2704 		 * DH1, DM3, DH3, DM5 and DH5.
2705 		 */
2706 		phys |= BT_PHY_BR_1M_1SLOT;
2707 
2708 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2709 			phys |= BT_PHY_BR_1M_3SLOT;
2710 
2711 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2712 			phys |= BT_PHY_BR_1M_5SLOT;
2713 
2714 		/* ACL logical transport (2 Mb/s) ptt=1:
2715 		 * 2-DH1, 2-DH3 and 2-DH5.
2716 		 */
2717 		if (!(conn->pkt_type & HCI_2DH1))
2718 			phys |= BT_PHY_EDR_2M_1SLOT;
2719 
2720 		if (!(conn->pkt_type & HCI_2DH3))
2721 			phys |= BT_PHY_EDR_2M_3SLOT;
2722 
2723 		if (!(conn->pkt_type & HCI_2DH5))
2724 			phys |= BT_PHY_EDR_2M_5SLOT;
2725 
2726 		/* ACL logical transport (3 Mb/s) ptt=1:
2727 		 * 3-DH1, 3-DH3 and 3-DH5.
2728 		 */
2729 		if (!(conn->pkt_type & HCI_3DH1))
2730 			phys |= BT_PHY_EDR_3M_1SLOT;
2731 
2732 		if (!(conn->pkt_type & HCI_3DH3))
2733 			phys |= BT_PHY_EDR_3M_3SLOT;
2734 
2735 		if (!(conn->pkt_type & HCI_3DH5))
2736 			phys |= BT_PHY_EDR_3M_5SLOT;
2737 
2738 		break;
2739 
2740 	case ESCO_LINK:
2741 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2742 		phys |= BT_PHY_BR_1M_1SLOT;
2743 
2744 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2745 			phys |= BT_PHY_BR_1M_3SLOT;
2746 
2747 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2748 		if (!(conn->pkt_type & ESCO_2EV3))
2749 			phys |= BT_PHY_EDR_2M_1SLOT;
2750 
2751 		if (!(conn->pkt_type & ESCO_2EV5))
2752 			phys |= BT_PHY_EDR_2M_3SLOT;
2753 
2754 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2755 		if (!(conn->pkt_type & ESCO_3EV3))
2756 			phys |= BT_PHY_EDR_3M_1SLOT;
2757 
2758 		if (!(conn->pkt_type & ESCO_3EV5))
2759 			phys |= BT_PHY_EDR_3M_3SLOT;
2760 
2761 		break;
2762 
2763 	case LE_LINK:
2764 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2765 			phys |= BT_PHY_LE_1M_TX;
2766 
2767 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2768 			phys |= BT_PHY_LE_1M_RX;
2769 
2770 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2771 			phys |= BT_PHY_LE_2M_TX;
2772 
2773 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2774 			phys |= BT_PHY_LE_2M_RX;
2775 
2776 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2777 			phys |= BT_PHY_LE_CODED_TX;
2778 
2779 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2780 			phys |= BT_PHY_LE_CODED_RX;
2781 
2782 		break;
2783 	}
2784 
2785 	return phys;
2786 }
2787 
2788 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2789 {
2790 	int r = 0;
2791 
2792 	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
2793 		return 0;
2794 
2795 	switch (conn->state) {
2796 	case BT_CONNECTED:
2797 	case BT_CONFIG:
2798 		if (conn->type == AMP_LINK) {
2799 			struct hci_cp_disconn_phy_link cp;
2800 
2801 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2802 			cp.reason = reason;
2803 			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
2804 					 sizeof(cp), &cp);
2805 		} else {
2806 			struct hci_cp_disconnect dc;
2807 
2808 			dc.handle = cpu_to_le16(conn->handle);
2809 			dc.reason = reason;
2810 			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
2811 					 sizeof(dc), &dc);
2812 		}
2813 
2814 		conn->state = BT_DISCONN;
2815 
2816 		break;
2817 	case BT_CONNECT:
2818 		if (conn->type == LE_LINK) {
2819 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2820 				break;
2821 			r = hci_send_cmd(conn->hdev,
2822 					 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
2823 		} else if (conn->type == ACL_LINK) {
2824 			if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
2825 				break;
2826 			r = hci_send_cmd(conn->hdev,
2827 					 HCI_OP_CREATE_CONN_CANCEL,
2828 					 6, &conn->dst);
2829 		}
2830 		break;
2831 	case BT_CONNECT2:
2832 		if (conn->type == ACL_LINK) {
2833 			struct hci_cp_reject_conn_req rej;
2834 
2835 			bacpy(&rej.bdaddr, &conn->dst);
2836 			rej.reason = reason;
2837 
2838 			r = hci_send_cmd(conn->hdev,
2839 					 HCI_OP_REJECT_CONN_REQ,
2840 					 sizeof(rej), &rej);
2841 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2842 			struct hci_cp_reject_sync_conn_req rej;
2843 
2844 			bacpy(&rej.bdaddr, &conn->dst);
2845 
2846 			/* SCO rejection has its own limited set of
2847 			 * allowed error values (0x0D-0x0F) which isn't
2848 			 * compatible with most values passed to this
2849 			 * function. To be safe hard-code one of the
2850 			 * values that's suitable for SCO.
2851 			 */
2852 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2853 
2854 			r = hci_send_cmd(conn->hdev,
2855 					 HCI_OP_REJECT_SYNC_CONN_REQ,
2856 					 sizeof(rej), &rej);
2857 		}
2858 		break;
2859 	default:
2860 		conn->state = BT_CLOSED;
2861 		break;
2862 	}
2863 
2864 	return r;
2865 }
2866