xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision d5a05299)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI connection handling. */
27 
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "a2mp.h"
40 #include "eir.h"
41 
42 struct sco_param {
43 	u16 pkt_type;
44 	u16 max_latency;
45 	u8  retrans_effort;
46 };
47 
48 struct conn_handle_t {
49 	struct hci_conn *conn;
50 	__u16 handle;
51 };
52 
53 static const struct sco_param esco_param_cvsd[] = {
54 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
55 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
56 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
57 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
58 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
59 };
60 
61 static const struct sco_param sco_param_cvsd[] = {
62 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
63 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
64 };
65 
66 static const struct sco_param esco_param_msbc[] = {
67 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
68 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
69 };
70 
71 /* This function requires the caller holds hdev->lock */
72 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
73 {
74 	struct hci_conn_params *params;
75 	struct hci_dev *hdev = conn->hdev;
76 	struct smp_irk *irk;
77 	bdaddr_t *bdaddr;
78 	u8 bdaddr_type;
79 
80 	bdaddr = &conn->dst;
81 	bdaddr_type = conn->dst_type;
82 
83 	/* Check if we need to convert to identity address */
84 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
85 	if (irk) {
86 		bdaddr = &irk->bdaddr;
87 		bdaddr_type = irk->addr_type;
88 	}
89 
90 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
91 					   bdaddr_type);
92 	if (!params)
93 		return;
94 
95 	if (params->conn) {
96 		hci_conn_drop(params->conn);
97 		hci_conn_put(params->conn);
98 		params->conn = NULL;
99 	}
100 
101 	if (!params->explicit_connect)
102 		return;
103 
104 	/* If the status indicates successful cancellation of
105 	 * the attempt (i.e. Unknown Connection Id) there's no point of
106 	 * notifying failure since we'll go back to keep trying to
107 	 * connect. The only exception is explicit connect requests
108 	 * where a timeout + cancel does indicate an actual failure.
109 	 */
110 	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
111 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
112 				    conn->dst_type, status);
113 
114 	/* The connection attempt was doing scan for new RPA, and is
115 	 * in scan phase. If params are not associated with any other
116 	 * autoconnect action, remove them completely. If they are, just unmark
117 	 * them as waiting for connection, by clearing explicit_connect field.
118 	 */
119 	params->explicit_connect = false;
120 
121 	list_del_init(&params->action);
122 
123 	switch (params->auto_connect) {
124 	case HCI_AUTO_CONN_EXPLICIT:
125 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
126 		/* return instead of break to avoid duplicate scan update */
127 		return;
128 	case HCI_AUTO_CONN_DIRECT:
129 	case HCI_AUTO_CONN_ALWAYS:
130 		list_add(&params->action, &hdev->pend_le_conns);
131 		break;
132 	case HCI_AUTO_CONN_REPORT:
133 		list_add(&params->action, &hdev->pend_le_reports);
134 		break;
135 	default:
136 		break;
137 	}
138 
139 	hci_update_passive_scan(hdev);
140 }
141 
142 static void hci_conn_cleanup(struct hci_conn *conn)
143 {
144 	struct hci_dev *hdev = conn->hdev;
145 
146 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
147 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
148 
149 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
150 		hci_remove_link_key(hdev, &conn->dst);
151 
152 	hci_chan_list_flush(conn);
153 
154 	hci_conn_hash_del(hdev, conn);
155 
156 	if (conn->cleanup)
157 		conn->cleanup(conn);
158 
159 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160 		switch (conn->setting & SCO_AIRMODE_MASK) {
161 		case SCO_AIRMODE_CVSD:
162 		case SCO_AIRMODE_TRANSP:
163 			if (hdev->notify)
164 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
165 			break;
166 		}
167 	} else {
168 		if (hdev->notify)
169 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
170 	}
171 
172 	hci_conn_del_sysfs(conn);
173 
174 	debugfs_remove_recursive(conn->debugfs);
175 
176 	hci_dev_put(hdev);
177 
178 	hci_conn_put(conn);
179 }
180 
181 static void le_scan_cleanup(struct work_struct *work)
182 {
183 	struct hci_conn *conn = container_of(work, struct hci_conn,
184 					     le_scan_cleanup);
185 	struct hci_dev *hdev = conn->hdev;
186 	struct hci_conn *c = NULL;
187 
188 	BT_DBG("%s hcon %p", hdev->name, conn);
189 
190 	hci_dev_lock(hdev);
191 
192 	/* Check that the hci_conn is still around */
193 	rcu_read_lock();
194 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
195 		if (c == conn)
196 			break;
197 	}
198 	rcu_read_unlock();
199 
200 	if (c == conn) {
201 		hci_connect_le_scan_cleanup(conn, 0x00);
202 		hci_conn_cleanup(conn);
203 	}
204 
205 	hci_dev_unlock(hdev);
206 	hci_dev_put(hdev);
207 	hci_conn_put(conn);
208 }
209 
210 static void hci_connect_le_scan_remove(struct hci_conn *conn)
211 {
212 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
213 
214 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
215 	 * could deadlock with another hci_conn_del() call that's holding
216 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
217 	 * Instead, grab temporary extra references to the hci_dev and
218 	 * hci_conn and perform the necessary cleanup in a separate work
219 	 * callback.
220 	 */
221 
222 	hci_dev_hold(conn->hdev);
223 	hci_conn_get(conn);
224 
225 	/* Even though we hold a reference to the hdev, many other
226 	 * things might get cleaned up meanwhile, including the hdev's
227 	 * own workqueue, so we can't use that for scheduling.
228 	 */
229 	schedule_work(&conn->le_scan_cleanup);
230 }
231 
232 static void hci_acl_create_connection(struct hci_conn *conn)
233 {
234 	struct hci_dev *hdev = conn->hdev;
235 	struct inquiry_entry *ie;
236 	struct hci_cp_create_conn cp;
237 
238 	BT_DBG("hcon %p", conn);
239 
240 	/* Many controllers disallow HCI Create Connection while it is doing
241 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
242 	 * Connection. This may cause the MGMT discovering state to become false
243 	 * without user space's request but it is okay since the MGMT Discovery
244 	 * APIs do not promise that discovery should be done forever. Instead,
245 	 * the user space monitors the status of MGMT discovering and it may
246 	 * request for discovery again when this flag becomes false.
247 	 */
248 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
249 		/* Put this connection to "pending" state so that it will be
250 		 * executed after the inquiry cancel command complete event.
251 		 */
252 		conn->state = BT_CONNECT2;
253 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
254 		return;
255 	}
256 
257 	conn->state = BT_CONNECT;
258 	conn->out = true;
259 	conn->role = HCI_ROLE_MASTER;
260 
261 	conn->attempt++;
262 
263 	conn->link_policy = hdev->link_policy;
264 
265 	memset(&cp, 0, sizeof(cp));
266 	bacpy(&cp.bdaddr, &conn->dst);
267 	cp.pscan_rep_mode = 0x02;
268 
269 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
270 	if (ie) {
271 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
272 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
273 			cp.pscan_mode     = ie->data.pscan_mode;
274 			cp.clock_offset   = ie->data.clock_offset |
275 					    cpu_to_le16(0x8000);
276 		}
277 
278 		memcpy(conn->dev_class, ie->data.dev_class, 3);
279 	}
280 
281 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
282 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
283 		cp.role_switch = 0x01;
284 	else
285 		cp.role_switch = 0x00;
286 
287 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
288 }
289 
290 int hci_disconnect(struct hci_conn *conn, __u8 reason)
291 {
292 	BT_DBG("hcon %p", conn);
293 
294 	/* When we are central of an established connection and it enters
295 	 * the disconnect timeout, then go ahead and try to read the
296 	 * current clock offset.  Processing of the result is done
297 	 * within the event handling and hci_clock_offset_evt function.
298 	 */
299 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
300 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
301 		struct hci_dev *hdev = conn->hdev;
302 		struct hci_cp_read_clock_offset clkoff_cp;
303 
304 		clkoff_cp.handle = cpu_to_le16(conn->handle);
305 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
306 			     &clkoff_cp);
307 	}
308 
309 	return hci_abort_conn(conn, reason);
310 }
311 
312 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
313 {
314 	struct hci_dev *hdev = conn->hdev;
315 	struct hci_cp_add_sco cp;
316 
317 	BT_DBG("hcon %p", conn);
318 
319 	conn->state = BT_CONNECT;
320 	conn->out = true;
321 
322 	conn->attempt++;
323 
324 	cp.handle   = cpu_to_le16(handle);
325 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
326 
327 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
328 }
329 
330 static bool find_next_esco_param(struct hci_conn *conn,
331 				 const struct sco_param *esco_param, int size)
332 {
333 	if (!conn->parent)
334 		return false;
335 
336 	for (; conn->attempt <= size; conn->attempt++) {
337 		if (lmp_esco_2m_capable(conn->parent) ||
338 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
339 			break;
340 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
341 		       conn, conn->attempt);
342 	}
343 
344 	return conn->attempt <= size;
345 }
346 
347 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
348 {
349 	int err;
350 	__u8 vnd_len, *vnd_data = NULL;
351 	struct hci_op_configure_data_path *cmd = NULL;
352 
353 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
354 					  &vnd_data);
355 	if (err < 0)
356 		goto error;
357 
358 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
359 	if (!cmd) {
360 		err = -ENOMEM;
361 		goto error;
362 	}
363 
364 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
365 	if (err < 0)
366 		goto error;
367 
368 	cmd->vnd_len = vnd_len;
369 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
370 
371 	cmd->direction = 0x00;
372 	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
373 			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
374 
375 	cmd->direction = 0x01;
376 	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
377 				    sizeof(*cmd) + vnd_len, cmd,
378 				    HCI_CMD_TIMEOUT);
379 error:
380 
381 	kfree(cmd);
382 	kfree(vnd_data);
383 	return err;
384 }
385 
386 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
387 {
388 	struct conn_handle_t *conn_handle = data;
389 	struct hci_conn *conn = conn_handle->conn;
390 	__u16 handle = conn_handle->handle;
391 	struct hci_cp_enhanced_setup_sync_conn cp;
392 	const struct sco_param *param;
393 
394 	kfree(conn_handle);
395 
396 	bt_dev_dbg(hdev, "hcon %p", conn);
397 
398 	/* for offload use case, codec needs to configured before opening SCO */
399 	if (conn->codec.data_path)
400 		configure_datapath_sync(hdev, &conn->codec);
401 
402 	conn->state = BT_CONNECT;
403 	conn->out = true;
404 
405 	conn->attempt++;
406 
407 	memset(&cp, 0x00, sizeof(cp));
408 
409 	cp.handle   = cpu_to_le16(handle);
410 
411 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
412 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
413 
414 	switch (conn->codec.id) {
415 	case BT_CODEC_MSBC:
416 		if (!find_next_esco_param(conn, esco_param_msbc,
417 					  ARRAY_SIZE(esco_param_msbc)))
418 			return -EINVAL;
419 
420 		param = &esco_param_msbc[conn->attempt - 1];
421 		cp.tx_coding_format.id = 0x05;
422 		cp.rx_coding_format.id = 0x05;
423 		cp.tx_codec_frame_size = __cpu_to_le16(60);
424 		cp.rx_codec_frame_size = __cpu_to_le16(60);
425 		cp.in_bandwidth = __cpu_to_le32(32000);
426 		cp.out_bandwidth = __cpu_to_le32(32000);
427 		cp.in_coding_format.id = 0x04;
428 		cp.out_coding_format.id = 0x04;
429 		cp.in_coded_data_size = __cpu_to_le16(16);
430 		cp.out_coded_data_size = __cpu_to_le16(16);
431 		cp.in_pcm_data_format = 2;
432 		cp.out_pcm_data_format = 2;
433 		cp.in_pcm_sample_payload_msb_pos = 0;
434 		cp.out_pcm_sample_payload_msb_pos = 0;
435 		cp.in_data_path = conn->codec.data_path;
436 		cp.out_data_path = conn->codec.data_path;
437 		cp.in_transport_unit_size = 1;
438 		cp.out_transport_unit_size = 1;
439 		break;
440 
441 	case BT_CODEC_TRANSPARENT:
442 		if (!find_next_esco_param(conn, esco_param_msbc,
443 					  ARRAY_SIZE(esco_param_msbc)))
444 			return false;
445 		param = &esco_param_msbc[conn->attempt - 1];
446 		cp.tx_coding_format.id = 0x03;
447 		cp.rx_coding_format.id = 0x03;
448 		cp.tx_codec_frame_size = __cpu_to_le16(60);
449 		cp.rx_codec_frame_size = __cpu_to_le16(60);
450 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
451 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
452 		cp.in_coding_format.id = 0x03;
453 		cp.out_coding_format.id = 0x03;
454 		cp.in_coded_data_size = __cpu_to_le16(16);
455 		cp.out_coded_data_size = __cpu_to_le16(16);
456 		cp.in_pcm_data_format = 2;
457 		cp.out_pcm_data_format = 2;
458 		cp.in_pcm_sample_payload_msb_pos = 0;
459 		cp.out_pcm_sample_payload_msb_pos = 0;
460 		cp.in_data_path = conn->codec.data_path;
461 		cp.out_data_path = conn->codec.data_path;
462 		cp.in_transport_unit_size = 1;
463 		cp.out_transport_unit_size = 1;
464 		break;
465 
466 	case BT_CODEC_CVSD:
467 		if (conn->parent && lmp_esco_capable(conn->parent)) {
468 			if (!find_next_esco_param(conn, esco_param_cvsd,
469 						  ARRAY_SIZE(esco_param_cvsd)))
470 				return -EINVAL;
471 			param = &esco_param_cvsd[conn->attempt - 1];
472 		} else {
473 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
474 				return -EINVAL;
475 			param = &sco_param_cvsd[conn->attempt - 1];
476 		}
477 		cp.tx_coding_format.id = 2;
478 		cp.rx_coding_format.id = 2;
479 		cp.tx_codec_frame_size = __cpu_to_le16(60);
480 		cp.rx_codec_frame_size = __cpu_to_le16(60);
481 		cp.in_bandwidth = __cpu_to_le32(16000);
482 		cp.out_bandwidth = __cpu_to_le32(16000);
483 		cp.in_coding_format.id = 4;
484 		cp.out_coding_format.id = 4;
485 		cp.in_coded_data_size = __cpu_to_le16(16);
486 		cp.out_coded_data_size = __cpu_to_le16(16);
487 		cp.in_pcm_data_format = 2;
488 		cp.out_pcm_data_format = 2;
489 		cp.in_pcm_sample_payload_msb_pos = 0;
490 		cp.out_pcm_sample_payload_msb_pos = 0;
491 		cp.in_data_path = conn->codec.data_path;
492 		cp.out_data_path = conn->codec.data_path;
493 		cp.in_transport_unit_size = 16;
494 		cp.out_transport_unit_size = 16;
495 		break;
496 	default:
497 		return -EINVAL;
498 	}
499 
500 	cp.retrans_effort = param->retrans_effort;
501 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
502 	cp.max_latency = __cpu_to_le16(param->max_latency);
503 
504 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
505 		return -EIO;
506 
507 	return 0;
508 }
509 
510 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
511 {
512 	struct hci_dev *hdev = conn->hdev;
513 	struct hci_cp_setup_sync_conn cp;
514 	const struct sco_param *param;
515 
516 	bt_dev_dbg(hdev, "hcon %p", conn);
517 
518 	conn->state = BT_CONNECT;
519 	conn->out = true;
520 
521 	conn->attempt++;
522 
523 	cp.handle   = cpu_to_le16(handle);
524 
525 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
526 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
527 	cp.voice_setting  = cpu_to_le16(conn->setting);
528 
529 	switch (conn->setting & SCO_AIRMODE_MASK) {
530 	case SCO_AIRMODE_TRANSP:
531 		if (!find_next_esco_param(conn, esco_param_msbc,
532 					  ARRAY_SIZE(esco_param_msbc)))
533 			return false;
534 		param = &esco_param_msbc[conn->attempt - 1];
535 		break;
536 	case SCO_AIRMODE_CVSD:
537 		if (conn->parent && lmp_esco_capable(conn->parent)) {
538 			if (!find_next_esco_param(conn, esco_param_cvsd,
539 						  ARRAY_SIZE(esco_param_cvsd)))
540 				return false;
541 			param = &esco_param_cvsd[conn->attempt - 1];
542 		} else {
543 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
544 				return false;
545 			param = &sco_param_cvsd[conn->attempt - 1];
546 		}
547 		break;
548 	default:
549 		return false;
550 	}
551 
552 	cp.retrans_effort = param->retrans_effort;
553 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
554 	cp.max_latency = __cpu_to_le16(param->max_latency);
555 
556 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
557 		return false;
558 
559 	return true;
560 }
561 
562 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
563 {
564 	int result;
565 	struct conn_handle_t *conn_handle;
566 
567 	if (enhanced_sync_conn_capable(conn->hdev)) {
568 		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
569 
570 		if (!conn_handle)
571 			return false;
572 
573 		conn_handle->conn = conn;
574 		conn_handle->handle = handle;
575 		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
576 					    conn_handle, NULL);
577 		if (result < 0)
578 			kfree(conn_handle);
579 
580 		return result == 0;
581 	}
582 
583 	return hci_setup_sync_conn(conn, handle);
584 }
585 
586 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
587 		      u16 to_multiplier)
588 {
589 	struct hci_dev *hdev = conn->hdev;
590 	struct hci_conn_params *params;
591 	struct hci_cp_le_conn_update cp;
592 
593 	hci_dev_lock(hdev);
594 
595 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
596 	if (params) {
597 		params->conn_min_interval = min;
598 		params->conn_max_interval = max;
599 		params->conn_latency = latency;
600 		params->supervision_timeout = to_multiplier;
601 	}
602 
603 	hci_dev_unlock(hdev);
604 
605 	memset(&cp, 0, sizeof(cp));
606 	cp.handle		= cpu_to_le16(conn->handle);
607 	cp.conn_interval_min	= cpu_to_le16(min);
608 	cp.conn_interval_max	= cpu_to_le16(max);
609 	cp.conn_latency		= cpu_to_le16(latency);
610 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
611 	cp.min_ce_len		= cpu_to_le16(0x0000);
612 	cp.max_ce_len		= cpu_to_le16(0x0000);
613 
614 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
615 
616 	if (params)
617 		return 0x01;
618 
619 	return 0x00;
620 }
621 
622 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
623 		      __u8 ltk[16], __u8 key_size)
624 {
625 	struct hci_dev *hdev = conn->hdev;
626 	struct hci_cp_le_start_enc cp;
627 
628 	BT_DBG("hcon %p", conn);
629 
630 	memset(&cp, 0, sizeof(cp));
631 
632 	cp.handle = cpu_to_le16(conn->handle);
633 	cp.rand = rand;
634 	cp.ediv = ediv;
635 	memcpy(cp.ltk, ltk, key_size);
636 
637 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
638 }
639 
640 /* Device _must_ be locked */
641 void hci_sco_setup(struct hci_conn *conn, __u8 status)
642 {
643 	struct hci_link *link;
644 
645 	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
646 	if (!link || !link->conn)
647 		return;
648 
649 	BT_DBG("hcon %p", conn);
650 
651 	if (!status) {
652 		if (lmp_esco_capable(conn->hdev))
653 			hci_setup_sync(link->conn, conn->handle);
654 		else
655 			hci_add_sco(link->conn, conn->handle);
656 	} else {
657 		hci_connect_cfm(link->conn, status);
658 		hci_conn_del(link->conn);
659 	}
660 }
661 
662 static void hci_conn_timeout(struct work_struct *work)
663 {
664 	struct hci_conn *conn = container_of(work, struct hci_conn,
665 					     disc_work.work);
666 	int refcnt = atomic_read(&conn->refcnt);
667 
668 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
669 
670 	WARN_ON(refcnt < 0);
671 
672 	/* FIXME: It was observed that in pairing failed scenario, refcnt
673 	 * drops below 0. Probably this is because l2cap_conn_del calls
674 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
675 	 * dropped. After that loop hci_chan_del is called which also drops
676 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
677 	 * otherwise drop it.
678 	 */
679 	if (refcnt > 0)
680 		return;
681 
682 	/* LE connections in scanning state need special handling */
683 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
684 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
685 		hci_connect_le_scan_remove(conn);
686 		return;
687 	}
688 
689 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
690 }
691 
692 /* Enter sniff mode */
693 static void hci_conn_idle(struct work_struct *work)
694 {
695 	struct hci_conn *conn = container_of(work, struct hci_conn,
696 					     idle_work.work);
697 	struct hci_dev *hdev = conn->hdev;
698 
699 	BT_DBG("hcon %p mode %d", conn, conn->mode);
700 
701 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
702 		return;
703 
704 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
705 		return;
706 
707 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
708 		struct hci_cp_sniff_subrate cp;
709 		cp.handle             = cpu_to_le16(conn->handle);
710 		cp.max_latency        = cpu_to_le16(0);
711 		cp.min_remote_timeout = cpu_to_le16(0);
712 		cp.min_local_timeout  = cpu_to_le16(0);
713 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
714 	}
715 
716 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
717 		struct hci_cp_sniff_mode cp;
718 		cp.handle       = cpu_to_le16(conn->handle);
719 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
720 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
721 		cp.attempt      = cpu_to_le16(4);
722 		cp.timeout      = cpu_to_le16(1);
723 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
724 	}
725 }
726 
727 static void hci_conn_auto_accept(struct work_struct *work)
728 {
729 	struct hci_conn *conn = container_of(work, struct hci_conn,
730 					     auto_accept_work.work);
731 
732 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
733 		     &conn->dst);
734 }
735 
736 static void le_disable_advertising(struct hci_dev *hdev)
737 {
738 	if (ext_adv_capable(hdev)) {
739 		struct hci_cp_le_set_ext_adv_enable cp;
740 
741 		cp.enable = 0x00;
742 		cp.num_of_sets = 0x00;
743 
744 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
745 			     &cp);
746 	} else {
747 		u8 enable = 0x00;
748 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
749 			     &enable);
750 	}
751 }
752 
753 static void le_conn_timeout(struct work_struct *work)
754 {
755 	struct hci_conn *conn = container_of(work, struct hci_conn,
756 					     le_conn_timeout.work);
757 	struct hci_dev *hdev = conn->hdev;
758 
759 	BT_DBG("");
760 
761 	/* We could end up here due to having done directed advertising,
762 	 * so clean up the state if necessary. This should however only
763 	 * happen with broken hardware or if low duty cycle was used
764 	 * (which doesn't have a timeout of its own).
765 	 */
766 	if (conn->role == HCI_ROLE_SLAVE) {
767 		/* Disable LE Advertising */
768 		le_disable_advertising(hdev);
769 		hci_dev_lock(hdev);
770 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
771 		hci_dev_unlock(hdev);
772 		return;
773 	}
774 
775 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
776 }
777 
778 struct iso_list_data {
779 	union {
780 		u8  cig;
781 		u8  big;
782 	};
783 	union {
784 		u8  cis;
785 		u8  bis;
786 		u16 sync_handle;
787 	};
788 	int count;
789 	struct {
790 		struct hci_cp_le_set_cig_params cp;
791 		struct hci_cis_params cis[0x11];
792 	} pdu;
793 };
794 
795 static void bis_list(struct hci_conn *conn, void *data)
796 {
797 	struct iso_list_data *d = data;
798 
799 	/* Skip if not broadcast/ANY address */
800 	if (bacmp(&conn->dst, BDADDR_ANY))
801 		return;
802 
803 	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
804 	    d->bis != conn->iso_qos.bcast.bis)
805 		return;
806 
807 	d->count++;
808 }
809 
810 static void find_bis(struct hci_conn *conn, void *data)
811 {
812 	struct iso_list_data *d = data;
813 
814 	/* Ignore unicast */
815 	if (bacmp(&conn->dst, BDADDR_ANY))
816 		return;
817 
818 	d->count++;
819 }
820 
821 static int terminate_big_sync(struct hci_dev *hdev, void *data)
822 {
823 	struct iso_list_data *d = data;
824 
825 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
826 
827 	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
828 
829 	/* Check if ISO connection is a BIS and terminate BIG if there are
830 	 * no other connections using it.
831 	 */
832 	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
833 	if (d->count)
834 		return 0;
835 
836 	return hci_le_terminate_big_sync(hdev, d->big,
837 					 HCI_ERROR_LOCAL_HOST_TERM);
838 }
839 
840 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
841 {
842 	kfree(data);
843 }
844 
845 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
846 {
847 	struct iso_list_data *d;
848 	int ret;
849 
850 	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
851 
852 	d = kzalloc(sizeof(*d), GFP_KERNEL);
853 	if (!d)
854 		return -ENOMEM;
855 
856 	d->big = big;
857 	d->bis = bis;
858 
859 	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
860 				 terminate_big_destroy);
861 	if (ret)
862 		kfree(d);
863 
864 	return ret;
865 }
866 
867 static int big_terminate_sync(struct hci_dev *hdev, void *data)
868 {
869 	struct iso_list_data *d = data;
870 
871 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
872 		   d->sync_handle);
873 
874 	/* Check if ISO connection is a BIS and terminate BIG if there are
875 	 * no other connections using it.
876 	 */
877 	hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
878 	if (d->count)
879 		return 0;
880 
881 	hci_le_big_terminate_sync(hdev, d->big);
882 
883 	return hci_le_pa_terminate_sync(hdev, d->sync_handle);
884 }
885 
886 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
887 {
888 	struct iso_list_data *d;
889 	int ret;
890 
891 	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
892 
893 	d = kzalloc(sizeof(*d), GFP_KERNEL);
894 	if (!d)
895 		return -ENOMEM;
896 
897 	d->big = big;
898 	d->sync_handle = sync_handle;
899 
900 	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
901 				 terminate_big_destroy);
902 	if (ret)
903 		kfree(d);
904 
905 	return ret;
906 }
907 
908 /* Cleanup BIS connection
909  *
910  * Detects if there any BIS left connected in a BIG
911  * broadcaster: Remove advertising instance and terminate BIG.
912  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
913  */
914 static void bis_cleanup(struct hci_conn *conn)
915 {
916 	struct hci_dev *hdev = conn->hdev;
917 
918 	bt_dev_dbg(hdev, "conn %p", conn);
919 
920 	if (conn->role == HCI_ROLE_MASTER) {
921 		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
922 			return;
923 
924 		hci_le_terminate_big(hdev, conn->iso_qos.bcast.big,
925 				     conn->iso_qos.bcast.bis);
926 	} else {
927 		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
928 				     conn->sync_handle);
929 	}
930 }
931 
932 static int remove_cig_sync(struct hci_dev *hdev, void *data)
933 {
934 	u8 handle = PTR_ERR(data);
935 
936 	return hci_le_remove_cig_sync(hdev, handle);
937 }
938 
939 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
940 {
941 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
942 
943 	return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
944 }
945 
946 static void find_cis(struct hci_conn *conn, void *data)
947 {
948 	struct iso_list_data *d = data;
949 
950 	/* Ignore broadcast or if CIG don't match */
951 	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
952 		return;
953 
954 	d->count++;
955 }
956 
957 /* Cleanup CIS connection:
958  *
959  * Detects if there any CIS left connected in a CIG and remove it.
960  */
961 static void cis_cleanup(struct hci_conn *conn)
962 {
963 	struct hci_dev *hdev = conn->hdev;
964 	struct iso_list_data d;
965 
966 	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
967 		return;
968 
969 	memset(&d, 0, sizeof(d));
970 	d.cig = conn->iso_qos.ucast.cig;
971 
972 	/* Check if ISO connection is a CIS and remove CIG if there are
973 	 * no other connections using it.
974 	 */
975 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
976 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
977 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
978 	if (d.count)
979 		return;
980 
981 	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
982 }
983 
984 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
985 			      u8 role)
986 {
987 	struct hci_conn *conn;
988 
989 	BT_DBG("%s dst %pMR", hdev->name, dst);
990 
991 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
992 	if (!conn)
993 		return NULL;
994 
995 	bacpy(&conn->dst, dst);
996 	bacpy(&conn->src, &hdev->bdaddr);
997 	conn->handle = HCI_CONN_HANDLE_UNSET;
998 	conn->hdev  = hdev;
999 	conn->type  = type;
1000 	conn->role  = role;
1001 	conn->mode  = HCI_CM_ACTIVE;
1002 	conn->state = BT_OPEN;
1003 	conn->auth_type = HCI_AT_GENERAL_BONDING;
1004 	conn->io_capability = hdev->io_capability;
1005 	conn->remote_auth = 0xff;
1006 	conn->key_type = 0xff;
1007 	conn->rssi = HCI_RSSI_INVALID;
1008 	conn->tx_power = HCI_TX_POWER_INVALID;
1009 	conn->max_tx_power = HCI_TX_POWER_INVALID;
1010 
1011 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
1012 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1013 
1014 	/* Set Default Authenticated payload timeout to 30s */
1015 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
1016 
1017 	if (conn->role == HCI_ROLE_MASTER)
1018 		conn->out = true;
1019 
1020 	switch (type) {
1021 	case ACL_LINK:
1022 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1023 		break;
1024 	case LE_LINK:
1025 		/* conn->src should reflect the local identity address */
1026 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1027 		break;
1028 	case ISO_LINK:
1029 		/* conn->src should reflect the local identity address */
1030 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1031 
1032 		/* set proper cleanup function */
1033 		if (!bacmp(dst, BDADDR_ANY))
1034 			conn->cleanup = bis_cleanup;
1035 		else if (conn->role == HCI_ROLE_MASTER)
1036 			conn->cleanup = cis_cleanup;
1037 
1038 		break;
1039 	case SCO_LINK:
1040 		if (lmp_esco_capable(hdev))
1041 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1042 					(hdev->esco_type & EDR_ESCO_MASK);
1043 		else
1044 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1045 		break;
1046 	case ESCO_LINK:
1047 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1048 		break;
1049 	}
1050 
1051 	skb_queue_head_init(&conn->data_q);
1052 
1053 	INIT_LIST_HEAD(&conn->chan_list);
1054 	INIT_LIST_HEAD(&conn->link_list);
1055 
1056 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1057 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1058 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1059 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1060 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
1061 
1062 	atomic_set(&conn->refcnt, 0);
1063 
1064 	hci_dev_hold(hdev);
1065 
1066 	hci_conn_hash_add(hdev, conn);
1067 
1068 	/* The SCO and eSCO connections will only be notified when their
1069 	 * setup has been completed. This is different to ACL links which
1070 	 * can be notified right away.
1071 	 */
1072 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1073 		if (hdev->notify)
1074 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1075 	}
1076 
1077 	hci_conn_init_sysfs(conn);
1078 
1079 	return conn;
1080 }
1081 
1082 static void hci_conn_unlink(struct hci_conn *conn)
1083 {
1084 	struct hci_dev *hdev = conn->hdev;
1085 
1086 	bt_dev_dbg(hdev, "hcon %p", conn);
1087 
1088 	if (!conn->parent) {
1089 		struct hci_link *link, *t;
1090 
1091 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1092 			struct hci_conn *child = link->conn;
1093 
1094 			hci_conn_unlink(child);
1095 
1096 			/* If hdev is down it means
1097 			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1098 			 * and links don't need to be cleanup as all connections
1099 			 * would be cleanup.
1100 			 */
1101 			if (!test_bit(HCI_UP, &hdev->flags))
1102 				continue;
1103 
1104 			/* Due to race, SCO connection might be not established
1105 			 * yet at this point. Delete it now, otherwise it is
1106 			 * possible for it to be stuck and can't be deleted.
1107 			 */
1108 			if ((child->type == SCO_LINK ||
1109 			     child->type == ESCO_LINK) &&
1110 			    child->handle == HCI_CONN_HANDLE_UNSET)
1111 				hci_conn_del(child);
1112 		}
1113 
1114 		return;
1115 	}
1116 
1117 	if (!conn->link)
1118 		return;
1119 
1120 	list_del_rcu(&conn->link->list);
1121 	synchronize_rcu();
1122 
1123 	hci_conn_drop(conn->parent);
1124 	hci_conn_put(conn->parent);
1125 	conn->parent = NULL;
1126 
1127 	kfree(conn->link);
1128 	conn->link = NULL;
1129 }
1130 
1131 void hci_conn_del(struct hci_conn *conn)
1132 {
1133 	struct hci_dev *hdev = conn->hdev;
1134 
1135 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1136 
1137 	hci_conn_unlink(conn);
1138 
1139 	cancel_delayed_work_sync(&conn->disc_work);
1140 	cancel_delayed_work_sync(&conn->auto_accept_work);
1141 	cancel_delayed_work_sync(&conn->idle_work);
1142 
1143 	if (conn->type == ACL_LINK) {
1144 		/* Unacked frames */
1145 		hdev->acl_cnt += conn->sent;
1146 	} else if (conn->type == LE_LINK) {
1147 		cancel_delayed_work(&conn->le_conn_timeout);
1148 
1149 		if (hdev->le_pkts)
1150 			hdev->le_cnt += conn->sent;
1151 		else
1152 			hdev->acl_cnt += conn->sent;
1153 	} else {
1154 		/* Unacked ISO frames */
1155 		if (conn->type == ISO_LINK) {
1156 			if (hdev->iso_pkts)
1157 				hdev->iso_cnt += conn->sent;
1158 			else if (hdev->le_pkts)
1159 				hdev->le_cnt += conn->sent;
1160 			else
1161 				hdev->acl_cnt += conn->sent;
1162 		}
1163 	}
1164 
1165 	if (conn->amp_mgr)
1166 		amp_mgr_put(conn->amp_mgr);
1167 
1168 	skb_queue_purge(&conn->data_q);
1169 
1170 	/* Remove the connection from the list and cleanup its remaining
1171 	 * state. This is a separate function since for some cases like
1172 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1173 	 * rest of hci_conn_del.
1174 	 */
1175 	hci_conn_cleanup(conn);
1176 }
1177 
1178 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1179 {
1180 	int use_src = bacmp(src, BDADDR_ANY);
1181 	struct hci_dev *hdev = NULL, *d;
1182 
1183 	BT_DBG("%pMR -> %pMR", src, dst);
1184 
1185 	read_lock(&hci_dev_list_lock);
1186 
1187 	list_for_each_entry(d, &hci_dev_list, list) {
1188 		if (!test_bit(HCI_UP, &d->flags) ||
1189 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1190 		    d->dev_type != HCI_PRIMARY)
1191 			continue;
1192 
1193 		/* Simple routing:
1194 		 *   No source address - find interface with bdaddr != dst
1195 		 *   Source address    - find interface with bdaddr == src
1196 		 */
1197 
1198 		if (use_src) {
1199 			bdaddr_t id_addr;
1200 			u8 id_addr_type;
1201 
1202 			if (src_type == BDADDR_BREDR) {
1203 				if (!lmp_bredr_capable(d))
1204 					continue;
1205 				bacpy(&id_addr, &d->bdaddr);
1206 				id_addr_type = BDADDR_BREDR;
1207 			} else {
1208 				if (!lmp_le_capable(d))
1209 					continue;
1210 
1211 				hci_copy_identity_address(d, &id_addr,
1212 							  &id_addr_type);
1213 
1214 				/* Convert from HCI to three-value type */
1215 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1216 					id_addr_type = BDADDR_LE_PUBLIC;
1217 				else
1218 					id_addr_type = BDADDR_LE_RANDOM;
1219 			}
1220 
1221 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1222 				hdev = d; break;
1223 			}
1224 		} else {
1225 			if (bacmp(&d->bdaddr, dst)) {
1226 				hdev = d; break;
1227 			}
1228 		}
1229 	}
1230 
1231 	if (hdev)
1232 		hdev = hci_dev_hold(hdev);
1233 
1234 	read_unlock(&hci_dev_list_lock);
1235 	return hdev;
1236 }
1237 EXPORT_SYMBOL(hci_get_route);
1238 
1239 /* This function requires the caller holds hdev->lock */
1240 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1241 {
1242 	struct hci_dev *hdev = conn->hdev;
1243 
1244 	hci_connect_le_scan_cleanup(conn, status);
1245 
1246 	/* Enable advertising in case this was a failed connection
1247 	 * attempt as a peripheral.
1248 	 */
1249 	hci_enable_advertising(hdev);
1250 }
1251 
1252 /* This function requires the caller holds hdev->lock */
1253 void hci_conn_failed(struct hci_conn *conn, u8 status)
1254 {
1255 	struct hci_dev *hdev = conn->hdev;
1256 
1257 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1258 
1259 	switch (conn->type) {
1260 	case LE_LINK:
1261 		hci_le_conn_failed(conn, status);
1262 		break;
1263 	case ACL_LINK:
1264 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1265 				    conn->dst_type, status);
1266 		break;
1267 	}
1268 
1269 	conn->state = BT_CLOSED;
1270 	hci_connect_cfm(conn, status);
1271 	hci_conn_del(conn);
1272 }
1273 
1274 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1275 {
1276 	struct hci_conn *conn = data;
1277 
1278 	bt_dev_dbg(hdev, "err %d", err);
1279 
1280 	hci_dev_lock(hdev);
1281 
1282 	if (!err) {
1283 		hci_connect_le_scan_cleanup(conn, 0x00);
1284 		goto done;
1285 	}
1286 
1287 	/* Check if connection is still pending */
1288 	if (conn != hci_lookup_le_connect(hdev))
1289 		goto done;
1290 
1291 	/* Flush to make sure we send create conn cancel command if needed */
1292 	flush_delayed_work(&conn->le_conn_timeout);
1293 	hci_conn_failed(conn, bt_status(err));
1294 
1295 done:
1296 	hci_dev_unlock(hdev);
1297 }
1298 
1299 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1300 {
1301 	struct hci_conn *conn = data;
1302 
1303 	bt_dev_dbg(hdev, "conn %p", conn);
1304 
1305 	return hci_le_create_conn_sync(hdev, conn);
1306 }
1307 
1308 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1309 				u8 dst_type, bool dst_resolved, u8 sec_level,
1310 				u16 conn_timeout, u8 role)
1311 {
1312 	struct hci_conn *conn;
1313 	struct smp_irk *irk;
1314 	int err;
1315 
1316 	/* Let's make sure that le is enabled.*/
1317 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1318 		if (lmp_le_capable(hdev))
1319 			return ERR_PTR(-ECONNREFUSED);
1320 
1321 		return ERR_PTR(-EOPNOTSUPP);
1322 	}
1323 
1324 	/* Since the controller supports only one LE connection attempt at a
1325 	 * time, we return -EBUSY if there is any connection attempt running.
1326 	 */
1327 	if (hci_lookup_le_connect(hdev))
1328 		return ERR_PTR(-EBUSY);
1329 
1330 	/* If there's already a connection object but it's not in
1331 	 * scanning state it means it must already be established, in
1332 	 * which case we can't do anything else except report a failure
1333 	 * to connect.
1334 	 */
1335 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1336 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1337 		return ERR_PTR(-EBUSY);
1338 	}
1339 
1340 	/* Check if the destination address has been resolved by the controller
1341 	 * since if it did then the identity address shall be used.
1342 	 */
1343 	if (!dst_resolved) {
1344 		/* When given an identity address with existing identity
1345 		 * resolving key, the connection needs to be established
1346 		 * to a resolvable random address.
1347 		 *
1348 		 * Storing the resolvable random address is required here
1349 		 * to handle connection failures. The address will later
1350 		 * be resolved back into the original identity address
1351 		 * from the connect request.
1352 		 */
1353 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1354 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1355 			dst = &irk->rpa;
1356 			dst_type = ADDR_LE_DEV_RANDOM;
1357 		}
1358 	}
1359 
1360 	if (conn) {
1361 		bacpy(&conn->dst, dst);
1362 	} else {
1363 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1364 		if (!conn)
1365 			return ERR_PTR(-ENOMEM);
1366 		hci_conn_hold(conn);
1367 		conn->pending_sec_level = sec_level;
1368 	}
1369 
1370 	conn->dst_type = dst_type;
1371 	conn->sec_level = BT_SECURITY_LOW;
1372 	conn->conn_timeout = conn_timeout;
1373 
1374 	conn->state = BT_CONNECT;
1375 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1376 
1377 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1378 				 create_le_conn_complete);
1379 	if (err) {
1380 		hci_conn_del(conn);
1381 		return ERR_PTR(err);
1382 	}
1383 
1384 	return conn;
1385 }
1386 
1387 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1388 {
1389 	struct hci_conn *conn;
1390 
1391 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1392 	if (!conn)
1393 		return false;
1394 
1395 	if (conn->state != BT_CONNECTED)
1396 		return false;
1397 
1398 	return true;
1399 }
1400 
1401 /* This function requires the caller holds hdev->lock */
1402 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1403 					bdaddr_t *addr, u8 addr_type)
1404 {
1405 	struct hci_conn_params *params;
1406 
1407 	if (is_connected(hdev, addr, addr_type))
1408 		return -EISCONN;
1409 
1410 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1411 	if (!params) {
1412 		params = hci_conn_params_add(hdev, addr, addr_type);
1413 		if (!params)
1414 			return -ENOMEM;
1415 
1416 		/* If we created new params, mark them to be deleted in
1417 		 * hci_connect_le_scan_cleanup. It's different case than
1418 		 * existing disabled params, those will stay after cleanup.
1419 		 */
1420 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1421 	}
1422 
1423 	/* We're trying to connect, so make sure params are at pend_le_conns */
1424 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1425 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1426 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1427 		list_del_init(&params->action);
1428 		list_add(&params->action, &hdev->pend_le_conns);
1429 	}
1430 
1431 	params->explicit_connect = true;
1432 
1433 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1434 	       params->auto_connect);
1435 
1436 	return 0;
1437 }
1438 
1439 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1440 {
1441 	struct iso_list_data data;
1442 
1443 	/* Allocate a BIG if not set */
1444 	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1445 		for (data.big = 0x00; data.big < 0xef; data.big++) {
1446 			data.count = 0;
1447 			data.bis = 0xff;
1448 
1449 			hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1450 						 BT_BOUND, &data);
1451 			if (!data.count)
1452 				break;
1453 		}
1454 
1455 		if (data.big == 0xef)
1456 			return -EADDRNOTAVAIL;
1457 
1458 		/* Update BIG */
1459 		qos->bcast.big = data.big;
1460 	}
1461 
1462 	return 0;
1463 }
1464 
1465 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1466 {
1467 	struct iso_list_data data;
1468 
1469 	/* Allocate BIS if not set */
1470 	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1471 		/* Find an unused adv set to advertise BIS, skip instance 0x00
1472 		 * since it is reserved as general purpose set.
1473 		 */
1474 		for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1475 		     data.bis++) {
1476 			data.count = 0;
1477 
1478 			hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1479 						 BT_BOUND, &data);
1480 			if (!data.count)
1481 				break;
1482 		}
1483 
1484 		if (data.bis == hdev->le_num_of_adv_sets)
1485 			return -EADDRNOTAVAIL;
1486 
1487 		/* Update BIS */
1488 		qos->bcast.bis = data.bis;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 /* This function requires the caller holds hdev->lock */
1495 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1496 				    struct bt_iso_qos *qos)
1497 {
1498 	struct hci_conn *conn;
1499 	struct iso_list_data data;
1500 	int err;
1501 
1502 	/* Let's make sure that le is enabled.*/
1503 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1504 		if (lmp_le_capable(hdev))
1505 			return ERR_PTR(-ECONNREFUSED);
1506 		return ERR_PTR(-EOPNOTSUPP);
1507 	}
1508 
1509 	err = qos_set_big(hdev, qos);
1510 	if (err)
1511 		return ERR_PTR(err);
1512 
1513 	err = qos_set_bis(hdev, qos);
1514 	if (err)
1515 		return ERR_PTR(err);
1516 
1517 	data.big = qos->bcast.big;
1518 	data.bis = qos->bcast.bis;
1519 	data.count = 0;
1520 
1521 	/* Check if there is already a matching BIG/BIS */
1522 	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1523 	if (data.count)
1524 		return ERR_PTR(-EADDRINUSE);
1525 
1526 	conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis);
1527 	if (conn)
1528 		return ERR_PTR(-EADDRINUSE);
1529 
1530 	conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1531 	if (!conn)
1532 		return ERR_PTR(-ENOMEM);
1533 
1534 	set_bit(HCI_CONN_PER_ADV, &conn->flags);
1535 	conn->state = BT_CONNECT;
1536 
1537 	hci_conn_hold(conn);
1538 	return conn;
1539 }
1540 
1541 /* This function requires the caller holds hdev->lock */
1542 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1543 				     u8 dst_type, u8 sec_level,
1544 				     u16 conn_timeout,
1545 				     enum conn_reasons conn_reason)
1546 {
1547 	struct hci_conn *conn;
1548 
1549 	/* Let's make sure that le is enabled.*/
1550 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1551 		if (lmp_le_capable(hdev))
1552 			return ERR_PTR(-ECONNREFUSED);
1553 
1554 		return ERR_PTR(-EOPNOTSUPP);
1555 	}
1556 
1557 	/* Some devices send ATT messages as soon as the physical link is
1558 	 * established. To be able to handle these ATT messages, the user-
1559 	 * space first establishes the connection and then starts the pairing
1560 	 * process.
1561 	 *
1562 	 * So if a hci_conn object already exists for the following connection
1563 	 * attempt, we simply update pending_sec_level and auth_type fields
1564 	 * and return the object found.
1565 	 */
1566 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1567 	if (conn) {
1568 		if (conn->pending_sec_level < sec_level)
1569 			conn->pending_sec_level = sec_level;
1570 		goto done;
1571 	}
1572 
1573 	BT_DBG("requesting refresh of dst_addr");
1574 
1575 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1576 	if (!conn)
1577 		return ERR_PTR(-ENOMEM);
1578 
1579 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1580 		hci_conn_del(conn);
1581 		return ERR_PTR(-EBUSY);
1582 	}
1583 
1584 	conn->state = BT_CONNECT;
1585 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1586 	conn->dst_type = dst_type;
1587 	conn->sec_level = BT_SECURITY_LOW;
1588 	conn->pending_sec_level = sec_level;
1589 	conn->conn_timeout = conn_timeout;
1590 	conn->conn_reason = conn_reason;
1591 
1592 	hci_update_passive_scan(hdev);
1593 
1594 done:
1595 	hci_conn_hold(conn);
1596 	return conn;
1597 }
1598 
1599 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1600 				 u8 sec_level, u8 auth_type,
1601 				 enum conn_reasons conn_reason)
1602 {
1603 	struct hci_conn *acl;
1604 
1605 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1606 		if (lmp_bredr_capable(hdev))
1607 			return ERR_PTR(-ECONNREFUSED);
1608 
1609 		return ERR_PTR(-EOPNOTSUPP);
1610 	}
1611 
1612 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1613 	if (!acl) {
1614 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1615 		if (!acl)
1616 			return ERR_PTR(-ENOMEM);
1617 	}
1618 
1619 	hci_conn_hold(acl);
1620 
1621 	acl->conn_reason = conn_reason;
1622 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1623 		acl->sec_level = BT_SECURITY_LOW;
1624 		acl->pending_sec_level = sec_level;
1625 		acl->auth_type = auth_type;
1626 		hci_acl_create_connection(acl);
1627 	}
1628 
1629 	return acl;
1630 }
1631 
1632 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1633 				      struct hci_conn *conn)
1634 {
1635 	struct hci_dev *hdev = parent->hdev;
1636 	struct hci_link *link;
1637 
1638 	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1639 
1640 	if (conn->link)
1641 		return conn->link;
1642 
1643 	if (conn->parent)
1644 		return NULL;
1645 
1646 	link = kzalloc(sizeof(*link), GFP_KERNEL);
1647 	if (!link)
1648 		return NULL;
1649 
1650 	link->conn = hci_conn_hold(conn);
1651 	conn->link = link;
1652 	conn->parent = hci_conn_get(parent);
1653 
1654 	/* Use list_add_tail_rcu append to the list */
1655 	list_add_tail_rcu(&link->list, &parent->link_list);
1656 
1657 	return link;
1658 }
1659 
1660 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1661 				 __u16 setting, struct bt_codec *codec)
1662 {
1663 	struct hci_conn *acl;
1664 	struct hci_conn *sco;
1665 	struct hci_link *link;
1666 
1667 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1668 			      CONN_REASON_SCO_CONNECT);
1669 	if (IS_ERR(acl))
1670 		return acl;
1671 
1672 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1673 	if (!sco) {
1674 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1675 		if (!sco) {
1676 			hci_conn_drop(acl);
1677 			return ERR_PTR(-ENOMEM);
1678 		}
1679 	}
1680 
1681 	link = hci_conn_link(acl, sco);
1682 	if (!link) {
1683 		hci_conn_drop(acl);
1684 		hci_conn_drop(sco);
1685 		return NULL;
1686 	}
1687 
1688 	sco->setting = setting;
1689 	sco->codec = *codec;
1690 
1691 	if (acl->state == BT_CONNECTED &&
1692 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1693 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1694 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1695 
1696 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1697 			/* defer SCO setup until mode change completed */
1698 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1699 			return sco;
1700 		}
1701 
1702 		hci_sco_setup(acl, 0x00);
1703 	}
1704 
1705 	return sco;
1706 }
1707 
1708 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1709 {
1710 	struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1711 
1712 	cis->cis_id = qos->ucast.cis;
1713 	cis->c_sdu  = cpu_to_le16(qos->ucast.out.sdu);
1714 	cis->p_sdu  = cpu_to_le16(qos->ucast.in.sdu);
1715 	cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy;
1716 	cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy;
1717 	cis->c_rtn  = qos->ucast.out.rtn;
1718 	cis->p_rtn  = qos->ucast.in.rtn;
1719 
1720 	d->pdu.cp.num_cis++;
1721 }
1722 
1723 static void cis_list(struct hci_conn *conn, void *data)
1724 {
1725 	struct iso_list_data *d = data;
1726 
1727 	/* Skip if broadcast/ANY address */
1728 	if (!bacmp(&conn->dst, BDADDR_ANY))
1729 		return;
1730 
1731 	if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1732 	    d->cis != conn->iso_qos.ucast.cis)
1733 		return;
1734 
1735 	d->count++;
1736 
1737 	if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1738 	    d->count >= ARRAY_SIZE(d->pdu.cis))
1739 		return;
1740 
1741 	cis_add(d, &conn->iso_qos);
1742 }
1743 
1744 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1745 {
1746 	struct hci_dev *hdev = conn->hdev;
1747 	struct hci_cp_le_create_big cp;
1748 
1749 	memset(&cp, 0, sizeof(cp));
1750 
1751 	cp.handle = qos->bcast.big;
1752 	cp.adv_handle = qos->bcast.bis;
1753 	cp.num_bis  = 0x01;
1754 	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1755 	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1756 	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1757 	cp.bis.rtn  = qos->bcast.out.rtn;
1758 	cp.bis.phy  = qos->bcast.out.phy;
1759 	cp.bis.packing = qos->bcast.packing;
1760 	cp.bis.framing = qos->bcast.framing;
1761 	cp.bis.encryption = qos->bcast.encryption;
1762 	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1763 
1764 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1765 }
1766 
1767 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1768 {
1769 	struct hci_dev *hdev = conn->hdev;
1770 	struct iso_list_data data;
1771 
1772 	memset(&data, 0, sizeof(data));
1773 
1774 	/* Allocate first still reconfigurable CIG if not set */
1775 	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1776 		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1777 			data.count = 0;
1778 
1779 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1780 						 BT_CONNECT, &data);
1781 			if (data.count)
1782 				continue;
1783 
1784 			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1785 						 BT_CONNECTED, &data);
1786 			if (!data.count)
1787 				break;
1788 		}
1789 
1790 		if (data.cig == 0xf0)
1791 			return false;
1792 
1793 		/* Update CIG */
1794 		qos->ucast.cig = data.cig;
1795 	}
1796 
1797 	data.pdu.cp.cig_id = qos->ucast.cig;
1798 	hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval);
1799 	hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval);
1800 	data.pdu.cp.sca = qos->ucast.sca;
1801 	data.pdu.cp.packing = qos->ucast.packing;
1802 	data.pdu.cp.framing = qos->ucast.framing;
1803 	data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1804 	data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1805 
1806 	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1807 		data.count = 0;
1808 		data.cig = qos->ucast.cig;
1809 		data.cis = qos->ucast.cis;
1810 
1811 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1812 					 &data);
1813 		if (data.count)
1814 			return false;
1815 
1816 		cis_add(&data, qos);
1817 	}
1818 
1819 	/* Reprogram all CIS(s) with the same CIG */
1820 	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11;
1821 	     data.cis++) {
1822 		data.count = 0;
1823 
1824 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1825 					 &data);
1826 		if (data.count)
1827 			continue;
1828 
1829 		/* Allocate a CIS if not set */
1830 		if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) {
1831 			/* Update CIS */
1832 			qos->ucast.cis = data.cis;
1833 			cis_add(&data, qos);
1834 		}
1835 	}
1836 
1837 	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1838 		return false;
1839 
1840 	if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1841 			 sizeof(data.pdu.cp) +
1842 			 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1843 			 &data.pdu) < 0)
1844 		return false;
1845 
1846 	return true;
1847 }
1848 
1849 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1850 			      __u8 dst_type, struct bt_iso_qos *qos)
1851 {
1852 	struct hci_conn *cis;
1853 
1854 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1855 				       qos->ucast.cis);
1856 	if (!cis) {
1857 		cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1858 		if (!cis)
1859 			return ERR_PTR(-ENOMEM);
1860 		cis->cleanup = cis_cleanup;
1861 		cis->dst_type = dst_type;
1862 	}
1863 
1864 	if (cis->state == BT_CONNECTED)
1865 		return cis;
1866 
1867 	/* Check if CIS has been set and the settings matches */
1868 	if (cis->state == BT_BOUND &&
1869 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1870 		return cis;
1871 
1872 	/* Update LINK PHYs according to QoS preference */
1873 	cis->le_tx_phy = qos->ucast.out.phy;
1874 	cis->le_rx_phy = qos->ucast.in.phy;
1875 
1876 	/* If output interval is not set use the input interval as it cannot be
1877 	 * 0x000000.
1878 	 */
1879 	if (!qos->ucast.out.interval)
1880 		qos->ucast.out.interval = qos->ucast.in.interval;
1881 
1882 	/* If input interval is not set use the output interval as it cannot be
1883 	 * 0x000000.
1884 	 */
1885 	if (!qos->ucast.in.interval)
1886 		qos->ucast.in.interval = qos->ucast.out.interval;
1887 
1888 	/* If output latency is not set use the input latency as it cannot be
1889 	 * 0x0000.
1890 	 */
1891 	if (!qos->ucast.out.latency)
1892 		qos->ucast.out.latency = qos->ucast.in.latency;
1893 
1894 	/* If input latency is not set use the output latency as it cannot be
1895 	 * 0x0000.
1896 	 */
1897 	if (!qos->ucast.in.latency)
1898 		qos->ucast.in.latency = qos->ucast.out.latency;
1899 
1900 	if (!hci_le_set_cig_params(cis, qos)) {
1901 		hci_conn_drop(cis);
1902 		return ERR_PTR(-EINVAL);
1903 	}
1904 
1905 	cis->iso_qos = *qos;
1906 	cis->state = BT_BOUND;
1907 
1908 	return cis;
1909 }
1910 
1911 bool hci_iso_setup_path(struct hci_conn *conn)
1912 {
1913 	struct hci_dev *hdev = conn->hdev;
1914 	struct hci_cp_le_setup_iso_path cmd;
1915 
1916 	memset(&cmd, 0, sizeof(cmd));
1917 
1918 	if (conn->iso_qos.ucast.out.sdu) {
1919 		cmd.handle = cpu_to_le16(conn->handle);
1920 		cmd.direction = 0x00; /* Input (Host to Controller) */
1921 		cmd.path = 0x00; /* HCI path if enabled */
1922 		cmd.codec = 0x03; /* Transparent Data */
1923 
1924 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1925 				 &cmd) < 0)
1926 			return false;
1927 	}
1928 
1929 	if (conn->iso_qos.ucast.in.sdu) {
1930 		cmd.handle = cpu_to_le16(conn->handle);
1931 		cmd.direction = 0x01; /* Output (Controller to Host) */
1932 		cmd.path = 0x00; /* HCI path if enabled */
1933 		cmd.codec = 0x03; /* Transparent Data */
1934 
1935 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1936 				 &cmd) < 0)
1937 			return false;
1938 	}
1939 
1940 	return true;
1941 }
1942 
1943 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1944 {
1945 	return hci_le_create_cis_sync(hdev, data);
1946 }
1947 
1948 int hci_le_create_cis(struct hci_conn *conn)
1949 {
1950 	struct hci_conn *cis;
1951 	struct hci_link *link, *t;
1952 	struct hci_dev *hdev = conn->hdev;
1953 	int err;
1954 
1955 	bt_dev_dbg(hdev, "hcon %p", conn);
1956 
1957 	switch (conn->type) {
1958 	case LE_LINK:
1959 		if (conn->state != BT_CONNECTED || list_empty(&conn->link_list))
1960 			return -EINVAL;
1961 
1962 		cis = NULL;
1963 
1964 		/* hci_conn_link uses list_add_tail_rcu so the list is in
1965 		 * the same order as the connections are requested.
1966 		 */
1967 		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1968 			if (link->conn->state == BT_BOUND) {
1969 				err = hci_le_create_cis(link->conn);
1970 				if (err)
1971 					return err;
1972 
1973 				cis = link->conn;
1974 			}
1975 		}
1976 
1977 		return cis ? 0 : -EINVAL;
1978 	case ISO_LINK:
1979 		cis = conn;
1980 		break;
1981 	default:
1982 		return -EINVAL;
1983 	}
1984 
1985 	if (cis->state == BT_CONNECT)
1986 		return 0;
1987 
1988 	/* Queue Create CIS */
1989 	err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1990 	if (err)
1991 		return err;
1992 
1993 	cis->state = BT_CONNECT;
1994 
1995 	return 0;
1996 }
1997 
1998 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1999 			      struct bt_iso_io_qos *qos, __u8 phy)
2000 {
2001 	/* Only set MTU if PHY is enabled */
2002 	if (!qos->sdu && qos->phy) {
2003 		if (hdev->iso_mtu > 0)
2004 			qos->sdu = hdev->iso_mtu;
2005 		else if (hdev->le_mtu > 0)
2006 			qos->sdu = hdev->le_mtu;
2007 		else
2008 			qos->sdu = hdev->acl_mtu;
2009 	}
2010 
2011 	/* Use the same PHY as ACL if set to any */
2012 	if (qos->phy == BT_ISO_PHY_ANY)
2013 		qos->phy = phy;
2014 
2015 	/* Use LE ACL connection interval if not set */
2016 	if (!qos->interval)
2017 		/* ACL interval unit in 1.25 ms to us */
2018 		qos->interval = conn->le_conn_interval * 1250;
2019 
2020 	/* Use LE ACL connection latency if not set */
2021 	if (!qos->latency)
2022 		qos->latency = conn->le_conn_latency;
2023 }
2024 
2025 static void hci_bind_bis(struct hci_conn *conn,
2026 			 struct bt_iso_qos *qos)
2027 {
2028 	/* Update LINK PHYs according to QoS preference */
2029 	conn->le_tx_phy = qos->bcast.out.phy;
2030 	conn->le_tx_phy = qos->bcast.out.phy;
2031 	conn->iso_qos = *qos;
2032 	conn->state = BT_BOUND;
2033 }
2034 
2035 static int create_big_sync(struct hci_dev *hdev, void *data)
2036 {
2037 	struct hci_conn *conn = data;
2038 	struct bt_iso_qos *qos = &conn->iso_qos;
2039 	u16 interval, sync_interval = 0;
2040 	u32 flags = 0;
2041 	int err;
2042 
2043 	if (qos->bcast.out.phy == 0x02)
2044 		flags |= MGMT_ADV_FLAG_SEC_2M;
2045 
2046 	/* Align intervals */
2047 	interval = qos->bcast.out.interval / 1250;
2048 
2049 	if (qos->bcast.bis)
2050 		sync_interval = qos->bcast.sync_interval * 1600;
2051 
2052 	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2053 				     conn->le_per_adv_data, flags, interval,
2054 				     interval, sync_interval);
2055 	if (err)
2056 		return err;
2057 
2058 	return hci_le_create_big(conn, &conn->iso_qos);
2059 }
2060 
2061 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2062 {
2063 	struct hci_cp_le_pa_create_sync *cp = data;
2064 
2065 	bt_dev_dbg(hdev, "");
2066 
2067 	if (err)
2068 		bt_dev_err(hdev, "Unable to create PA: %d", err);
2069 
2070 	kfree(cp);
2071 }
2072 
2073 static int create_pa_sync(struct hci_dev *hdev, void *data)
2074 {
2075 	struct hci_cp_le_pa_create_sync *cp = data;
2076 	int err;
2077 
2078 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2079 				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2080 	if (err) {
2081 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2082 		return err;
2083 	}
2084 
2085 	return hci_update_passive_scan_sync(hdev);
2086 }
2087 
2088 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2089 		       __u8 sid, struct bt_iso_qos *qos)
2090 {
2091 	struct hci_cp_le_pa_create_sync *cp;
2092 
2093 	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2094 		return -EBUSY;
2095 
2096 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2097 	if (!cp) {
2098 		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2099 		return -ENOMEM;
2100 	}
2101 
2102 	cp->options = qos->bcast.options;
2103 	cp->sid = sid;
2104 	cp->addr_type = dst_type;
2105 	bacpy(&cp->addr, dst);
2106 	cp->skip = cpu_to_le16(qos->bcast.skip);
2107 	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2108 	cp->sync_cte_type = qos->bcast.sync_cte_type;
2109 
2110 	/* Queue start pa_create_sync and scan */
2111 	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2112 }
2113 
2114 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2115 			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2116 {
2117 	struct _packed {
2118 		struct hci_cp_le_big_create_sync cp;
2119 		__u8  bis[0x11];
2120 	} pdu;
2121 	int err;
2122 
2123 	if (num_bis > sizeof(pdu.bis))
2124 		return -EINVAL;
2125 
2126 	err = qos_set_big(hdev, qos);
2127 	if (err)
2128 		return err;
2129 
2130 	memset(&pdu, 0, sizeof(pdu));
2131 	pdu.cp.handle = qos->bcast.big;
2132 	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2133 	pdu.cp.encryption = qos->bcast.encryption;
2134 	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2135 	pdu.cp.mse = qos->bcast.mse;
2136 	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2137 	pdu.cp.num_bis = num_bis;
2138 	memcpy(pdu.bis, bis, num_bis);
2139 
2140 	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2141 			    sizeof(pdu.cp) + num_bis, &pdu);
2142 }
2143 
2144 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2145 {
2146 	struct hci_conn *conn = data;
2147 
2148 	bt_dev_dbg(hdev, "conn %p", conn);
2149 
2150 	if (err) {
2151 		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2152 		hci_connect_cfm(conn, err);
2153 		hci_conn_del(conn);
2154 	}
2155 }
2156 
2157 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2158 				 __u8 dst_type, struct bt_iso_qos *qos,
2159 				 __u8 base_len, __u8 *base)
2160 {
2161 	struct hci_conn *conn;
2162 	int err;
2163 
2164 	/* We need hci_conn object using the BDADDR_ANY as dst */
2165 	conn = hci_add_bis(hdev, dst, qos);
2166 	if (IS_ERR(conn))
2167 		return conn;
2168 
2169 	hci_bind_bis(conn, qos);
2170 
2171 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2172 	if (base_len && base) {
2173 		base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2174 						   0x1851, base, base_len);
2175 		conn->le_per_adv_data_len = base_len;
2176 	}
2177 
2178 	/* Queue start periodic advertising and create BIG */
2179 	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2180 				 create_big_complete);
2181 	if (err < 0) {
2182 		hci_conn_drop(conn);
2183 		return ERR_PTR(err);
2184 	}
2185 
2186 	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2187 			  conn->le_tx_phy ? conn->le_tx_phy :
2188 			  hdev->le_tx_def_phys);
2189 
2190 	return conn;
2191 }
2192 
2193 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2194 				 __u8 dst_type, struct bt_iso_qos *qos)
2195 {
2196 	struct hci_conn *le;
2197 	struct hci_conn *cis;
2198 	struct hci_link *link;
2199 
2200 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2201 		le = hci_connect_le(hdev, dst, dst_type, false,
2202 				    BT_SECURITY_LOW,
2203 				    HCI_LE_CONN_TIMEOUT,
2204 				    HCI_ROLE_SLAVE);
2205 	else
2206 		le = hci_connect_le_scan(hdev, dst, dst_type,
2207 					 BT_SECURITY_LOW,
2208 					 HCI_LE_CONN_TIMEOUT,
2209 					 CONN_REASON_ISO_CONNECT);
2210 	if (IS_ERR(le))
2211 		return le;
2212 
2213 	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2214 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2215 	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2216 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2217 
2218 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2219 	if (IS_ERR(cis)) {
2220 		hci_conn_drop(le);
2221 		return cis;
2222 	}
2223 
2224 	link = hci_conn_link(le, cis);
2225 	if (!link) {
2226 		hci_conn_drop(le);
2227 		hci_conn_drop(cis);
2228 		return NULL;
2229 	}
2230 
2231 	/* If LE is already connected and CIS handle is already set proceed to
2232 	 * Create CIS immediately.
2233 	 */
2234 	if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2235 		hci_le_create_cis(cis);
2236 
2237 	return cis;
2238 }
2239 
2240 /* Check link security requirement */
2241 int hci_conn_check_link_mode(struct hci_conn *conn)
2242 {
2243 	BT_DBG("hcon %p", conn);
2244 
2245 	/* In Secure Connections Only mode, it is required that Secure
2246 	 * Connections is used and the link is encrypted with AES-CCM
2247 	 * using a P-256 authenticated combination key.
2248 	 */
2249 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2250 		if (!hci_conn_sc_enabled(conn) ||
2251 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2252 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2253 			return 0;
2254 	}
2255 
2256 	 /* AES encryption is required for Level 4:
2257 	  *
2258 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2259 	  * page 1319:
2260 	  *
2261 	  * 128-bit equivalent strength for link and encryption keys
2262 	  * required using FIPS approved algorithms (E0 not allowed,
2263 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2264 	  * not shortened)
2265 	  */
2266 	if (conn->sec_level == BT_SECURITY_FIPS &&
2267 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2268 		bt_dev_err(conn->hdev,
2269 			   "Invalid security: Missing AES-CCM usage");
2270 		return 0;
2271 	}
2272 
2273 	if (hci_conn_ssp_enabled(conn) &&
2274 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2275 		return 0;
2276 
2277 	return 1;
2278 }
2279 
2280 /* Authenticate remote device */
2281 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2282 {
2283 	BT_DBG("hcon %p", conn);
2284 
2285 	if (conn->pending_sec_level > sec_level)
2286 		sec_level = conn->pending_sec_level;
2287 
2288 	if (sec_level > conn->sec_level)
2289 		conn->pending_sec_level = sec_level;
2290 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2291 		return 1;
2292 
2293 	/* Make sure we preserve an existing MITM requirement*/
2294 	auth_type |= (conn->auth_type & 0x01);
2295 
2296 	conn->auth_type = auth_type;
2297 
2298 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2299 		struct hci_cp_auth_requested cp;
2300 
2301 		cp.handle = cpu_to_le16(conn->handle);
2302 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2303 			     sizeof(cp), &cp);
2304 
2305 		/* If we're already encrypted set the REAUTH_PEND flag,
2306 		 * otherwise set the ENCRYPT_PEND.
2307 		 */
2308 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2309 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2310 		else
2311 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2312 	}
2313 
2314 	return 0;
2315 }
2316 
2317 /* Encrypt the link */
2318 static void hci_conn_encrypt(struct hci_conn *conn)
2319 {
2320 	BT_DBG("hcon %p", conn);
2321 
2322 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2323 		struct hci_cp_set_conn_encrypt cp;
2324 		cp.handle  = cpu_to_le16(conn->handle);
2325 		cp.encrypt = 0x01;
2326 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2327 			     &cp);
2328 	}
2329 }
2330 
2331 /* Enable security */
2332 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2333 		      bool initiator)
2334 {
2335 	BT_DBG("hcon %p", conn);
2336 
2337 	if (conn->type == LE_LINK)
2338 		return smp_conn_security(conn, sec_level);
2339 
2340 	/* For sdp we don't need the link key. */
2341 	if (sec_level == BT_SECURITY_SDP)
2342 		return 1;
2343 
2344 	/* For non 2.1 devices and low security level we don't need the link
2345 	   key. */
2346 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2347 		return 1;
2348 
2349 	/* For other security levels we need the link key. */
2350 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2351 		goto auth;
2352 
2353 	/* An authenticated FIPS approved combination key has sufficient
2354 	 * security for security level 4. */
2355 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2356 	    sec_level == BT_SECURITY_FIPS)
2357 		goto encrypt;
2358 
2359 	/* An authenticated combination key has sufficient security for
2360 	   security level 3. */
2361 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2362 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2363 	    sec_level == BT_SECURITY_HIGH)
2364 		goto encrypt;
2365 
2366 	/* An unauthenticated combination key has sufficient security for
2367 	   security level 1 and 2. */
2368 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2369 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2370 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2371 		goto encrypt;
2372 
2373 	/* A combination key has always sufficient security for the security
2374 	   levels 1 or 2. High security level requires the combination key
2375 	   is generated using maximum PIN code length (16).
2376 	   For pre 2.1 units. */
2377 	if (conn->key_type == HCI_LK_COMBINATION &&
2378 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2379 	     conn->pin_length == 16))
2380 		goto encrypt;
2381 
2382 auth:
2383 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2384 		return 0;
2385 
2386 	if (initiator)
2387 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2388 
2389 	if (!hci_conn_auth(conn, sec_level, auth_type))
2390 		return 0;
2391 
2392 encrypt:
2393 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2394 		/* Ensure that the encryption key size has been read,
2395 		 * otherwise stall the upper layer responses.
2396 		 */
2397 		if (!conn->enc_key_size)
2398 			return 0;
2399 
2400 		/* Nothing else needed, all requirements are met */
2401 		return 1;
2402 	}
2403 
2404 	hci_conn_encrypt(conn);
2405 	return 0;
2406 }
2407 EXPORT_SYMBOL(hci_conn_security);
2408 
2409 /* Check secure link requirement */
2410 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2411 {
2412 	BT_DBG("hcon %p", conn);
2413 
2414 	/* Accept if non-secure or higher security level is required */
2415 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2416 		return 1;
2417 
2418 	/* Accept if secure or higher security level is already present */
2419 	if (conn->sec_level == BT_SECURITY_HIGH ||
2420 	    conn->sec_level == BT_SECURITY_FIPS)
2421 		return 1;
2422 
2423 	/* Reject not secure link */
2424 	return 0;
2425 }
2426 EXPORT_SYMBOL(hci_conn_check_secure);
2427 
2428 /* Switch role */
2429 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2430 {
2431 	BT_DBG("hcon %p", conn);
2432 
2433 	if (role == conn->role)
2434 		return 1;
2435 
2436 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2437 		struct hci_cp_switch_role cp;
2438 		bacpy(&cp.bdaddr, &conn->dst);
2439 		cp.role = role;
2440 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2441 	}
2442 
2443 	return 0;
2444 }
2445 EXPORT_SYMBOL(hci_conn_switch_role);
2446 
2447 /* Enter active mode */
2448 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2449 {
2450 	struct hci_dev *hdev = conn->hdev;
2451 
2452 	BT_DBG("hcon %p mode %d", conn, conn->mode);
2453 
2454 	if (conn->mode != HCI_CM_SNIFF)
2455 		goto timer;
2456 
2457 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2458 		goto timer;
2459 
2460 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2461 		struct hci_cp_exit_sniff_mode cp;
2462 		cp.handle = cpu_to_le16(conn->handle);
2463 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2464 	}
2465 
2466 timer:
2467 	if (hdev->idle_timeout > 0)
2468 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2469 				   msecs_to_jiffies(hdev->idle_timeout));
2470 }
2471 
2472 /* Drop all connection on the device */
2473 void hci_conn_hash_flush(struct hci_dev *hdev)
2474 {
2475 	struct list_head *head = &hdev->conn_hash.list;
2476 	struct hci_conn *conn;
2477 
2478 	BT_DBG("hdev %s", hdev->name);
2479 
2480 	/* We should not traverse the list here, because hci_conn_del
2481 	 * can remove extra links, which may cause the list traversal
2482 	 * to hit items that have already been released.
2483 	 */
2484 	while ((conn = list_first_entry_or_null(head,
2485 						struct hci_conn,
2486 						list)) != NULL) {
2487 		conn->state = BT_CLOSED;
2488 		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2489 		hci_conn_del(conn);
2490 	}
2491 }
2492 
2493 /* Check pending connect attempts */
2494 void hci_conn_check_pending(struct hci_dev *hdev)
2495 {
2496 	struct hci_conn *conn;
2497 
2498 	BT_DBG("hdev %s", hdev->name);
2499 
2500 	hci_dev_lock(hdev);
2501 
2502 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2503 	if (conn)
2504 		hci_acl_create_connection(conn);
2505 
2506 	hci_dev_unlock(hdev);
2507 }
2508 
2509 static u32 get_link_mode(struct hci_conn *conn)
2510 {
2511 	u32 link_mode = 0;
2512 
2513 	if (conn->role == HCI_ROLE_MASTER)
2514 		link_mode |= HCI_LM_MASTER;
2515 
2516 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2517 		link_mode |= HCI_LM_ENCRYPT;
2518 
2519 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2520 		link_mode |= HCI_LM_AUTH;
2521 
2522 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2523 		link_mode |= HCI_LM_SECURE;
2524 
2525 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2526 		link_mode |= HCI_LM_FIPS;
2527 
2528 	return link_mode;
2529 }
2530 
2531 int hci_get_conn_list(void __user *arg)
2532 {
2533 	struct hci_conn *c;
2534 	struct hci_conn_list_req req, *cl;
2535 	struct hci_conn_info *ci;
2536 	struct hci_dev *hdev;
2537 	int n = 0, size, err;
2538 
2539 	if (copy_from_user(&req, arg, sizeof(req)))
2540 		return -EFAULT;
2541 
2542 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2543 		return -EINVAL;
2544 
2545 	size = sizeof(req) + req.conn_num * sizeof(*ci);
2546 
2547 	cl = kmalloc(size, GFP_KERNEL);
2548 	if (!cl)
2549 		return -ENOMEM;
2550 
2551 	hdev = hci_dev_get(req.dev_id);
2552 	if (!hdev) {
2553 		kfree(cl);
2554 		return -ENODEV;
2555 	}
2556 
2557 	ci = cl->conn_info;
2558 
2559 	hci_dev_lock(hdev);
2560 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2561 		bacpy(&(ci + n)->bdaddr, &c->dst);
2562 		(ci + n)->handle = c->handle;
2563 		(ci + n)->type  = c->type;
2564 		(ci + n)->out   = c->out;
2565 		(ci + n)->state = c->state;
2566 		(ci + n)->link_mode = get_link_mode(c);
2567 		if (++n >= req.conn_num)
2568 			break;
2569 	}
2570 	hci_dev_unlock(hdev);
2571 
2572 	cl->dev_id = hdev->id;
2573 	cl->conn_num = n;
2574 	size = sizeof(req) + n * sizeof(*ci);
2575 
2576 	hci_dev_put(hdev);
2577 
2578 	err = copy_to_user(arg, cl, size);
2579 	kfree(cl);
2580 
2581 	return err ? -EFAULT : 0;
2582 }
2583 
2584 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2585 {
2586 	struct hci_conn_info_req req;
2587 	struct hci_conn_info ci;
2588 	struct hci_conn *conn;
2589 	char __user *ptr = arg + sizeof(req);
2590 
2591 	if (copy_from_user(&req, arg, sizeof(req)))
2592 		return -EFAULT;
2593 
2594 	hci_dev_lock(hdev);
2595 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2596 	if (conn) {
2597 		bacpy(&ci.bdaddr, &conn->dst);
2598 		ci.handle = conn->handle;
2599 		ci.type  = conn->type;
2600 		ci.out   = conn->out;
2601 		ci.state = conn->state;
2602 		ci.link_mode = get_link_mode(conn);
2603 	}
2604 	hci_dev_unlock(hdev);
2605 
2606 	if (!conn)
2607 		return -ENOENT;
2608 
2609 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2610 }
2611 
2612 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2613 {
2614 	struct hci_auth_info_req req;
2615 	struct hci_conn *conn;
2616 
2617 	if (copy_from_user(&req, arg, sizeof(req)))
2618 		return -EFAULT;
2619 
2620 	hci_dev_lock(hdev);
2621 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2622 	if (conn)
2623 		req.type = conn->auth_type;
2624 	hci_dev_unlock(hdev);
2625 
2626 	if (!conn)
2627 		return -ENOENT;
2628 
2629 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2630 }
2631 
2632 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2633 {
2634 	struct hci_dev *hdev = conn->hdev;
2635 	struct hci_chan *chan;
2636 
2637 	BT_DBG("%s hcon %p", hdev->name, conn);
2638 
2639 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2640 		BT_DBG("Refusing to create new hci_chan");
2641 		return NULL;
2642 	}
2643 
2644 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2645 	if (!chan)
2646 		return NULL;
2647 
2648 	chan->conn = hci_conn_get(conn);
2649 	skb_queue_head_init(&chan->data_q);
2650 	chan->state = BT_CONNECTED;
2651 
2652 	list_add_rcu(&chan->list, &conn->chan_list);
2653 
2654 	return chan;
2655 }
2656 
2657 void hci_chan_del(struct hci_chan *chan)
2658 {
2659 	struct hci_conn *conn = chan->conn;
2660 	struct hci_dev *hdev = conn->hdev;
2661 
2662 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2663 
2664 	list_del_rcu(&chan->list);
2665 
2666 	synchronize_rcu();
2667 
2668 	/* Prevent new hci_chan's to be created for this hci_conn */
2669 	set_bit(HCI_CONN_DROP, &conn->flags);
2670 
2671 	hci_conn_put(conn);
2672 
2673 	skb_queue_purge(&chan->data_q);
2674 	kfree(chan);
2675 }
2676 
2677 void hci_chan_list_flush(struct hci_conn *conn)
2678 {
2679 	struct hci_chan *chan, *n;
2680 
2681 	BT_DBG("hcon %p", conn);
2682 
2683 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2684 		hci_chan_del(chan);
2685 }
2686 
2687 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2688 						 __u16 handle)
2689 {
2690 	struct hci_chan *hchan;
2691 
2692 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2693 		if (hchan->handle == handle)
2694 			return hchan;
2695 	}
2696 
2697 	return NULL;
2698 }
2699 
2700 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2701 {
2702 	struct hci_conn_hash *h = &hdev->conn_hash;
2703 	struct hci_conn *hcon;
2704 	struct hci_chan *hchan = NULL;
2705 
2706 	rcu_read_lock();
2707 
2708 	list_for_each_entry_rcu(hcon, &h->list, list) {
2709 		hchan = __hci_chan_lookup_handle(hcon, handle);
2710 		if (hchan)
2711 			break;
2712 	}
2713 
2714 	rcu_read_unlock();
2715 
2716 	return hchan;
2717 }
2718 
2719 u32 hci_conn_get_phy(struct hci_conn *conn)
2720 {
2721 	u32 phys = 0;
2722 
2723 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2724 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2725 	 * CPB logical transport types.
2726 	 */
2727 	switch (conn->type) {
2728 	case SCO_LINK:
2729 		/* SCO logical transport (1 Mb/s):
2730 		 * HV1, HV2, HV3 and DV.
2731 		 */
2732 		phys |= BT_PHY_BR_1M_1SLOT;
2733 
2734 		break;
2735 
2736 	case ACL_LINK:
2737 		/* ACL logical transport (1 Mb/s) ptt=0:
2738 		 * DH1, DM3, DH3, DM5 and DH5.
2739 		 */
2740 		phys |= BT_PHY_BR_1M_1SLOT;
2741 
2742 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2743 			phys |= BT_PHY_BR_1M_3SLOT;
2744 
2745 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2746 			phys |= BT_PHY_BR_1M_5SLOT;
2747 
2748 		/* ACL logical transport (2 Mb/s) ptt=1:
2749 		 * 2-DH1, 2-DH3 and 2-DH5.
2750 		 */
2751 		if (!(conn->pkt_type & HCI_2DH1))
2752 			phys |= BT_PHY_EDR_2M_1SLOT;
2753 
2754 		if (!(conn->pkt_type & HCI_2DH3))
2755 			phys |= BT_PHY_EDR_2M_3SLOT;
2756 
2757 		if (!(conn->pkt_type & HCI_2DH5))
2758 			phys |= BT_PHY_EDR_2M_5SLOT;
2759 
2760 		/* ACL logical transport (3 Mb/s) ptt=1:
2761 		 * 3-DH1, 3-DH3 and 3-DH5.
2762 		 */
2763 		if (!(conn->pkt_type & HCI_3DH1))
2764 			phys |= BT_PHY_EDR_3M_1SLOT;
2765 
2766 		if (!(conn->pkt_type & HCI_3DH3))
2767 			phys |= BT_PHY_EDR_3M_3SLOT;
2768 
2769 		if (!(conn->pkt_type & HCI_3DH5))
2770 			phys |= BT_PHY_EDR_3M_5SLOT;
2771 
2772 		break;
2773 
2774 	case ESCO_LINK:
2775 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2776 		phys |= BT_PHY_BR_1M_1SLOT;
2777 
2778 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2779 			phys |= BT_PHY_BR_1M_3SLOT;
2780 
2781 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2782 		if (!(conn->pkt_type & ESCO_2EV3))
2783 			phys |= BT_PHY_EDR_2M_1SLOT;
2784 
2785 		if (!(conn->pkt_type & ESCO_2EV5))
2786 			phys |= BT_PHY_EDR_2M_3SLOT;
2787 
2788 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2789 		if (!(conn->pkt_type & ESCO_3EV3))
2790 			phys |= BT_PHY_EDR_3M_1SLOT;
2791 
2792 		if (!(conn->pkt_type & ESCO_3EV5))
2793 			phys |= BT_PHY_EDR_3M_3SLOT;
2794 
2795 		break;
2796 
2797 	case LE_LINK:
2798 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2799 			phys |= BT_PHY_LE_1M_TX;
2800 
2801 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2802 			phys |= BT_PHY_LE_1M_RX;
2803 
2804 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2805 			phys |= BT_PHY_LE_2M_TX;
2806 
2807 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2808 			phys |= BT_PHY_LE_2M_RX;
2809 
2810 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2811 			phys |= BT_PHY_LE_CODED_TX;
2812 
2813 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2814 			phys |= BT_PHY_LE_CODED_RX;
2815 
2816 		break;
2817 	}
2818 
2819 	return phys;
2820 }
2821 
2822 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2823 {
2824 	int r = 0;
2825 
2826 	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
2827 		return 0;
2828 
2829 	switch (conn->state) {
2830 	case BT_CONNECTED:
2831 	case BT_CONFIG:
2832 		if (conn->type == AMP_LINK) {
2833 			struct hci_cp_disconn_phy_link cp;
2834 
2835 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2836 			cp.reason = reason;
2837 			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
2838 					 sizeof(cp), &cp);
2839 		} else {
2840 			struct hci_cp_disconnect dc;
2841 
2842 			dc.handle = cpu_to_le16(conn->handle);
2843 			dc.reason = reason;
2844 			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
2845 					 sizeof(dc), &dc);
2846 		}
2847 
2848 		conn->state = BT_DISCONN;
2849 
2850 		break;
2851 	case BT_CONNECT:
2852 		if (conn->type == LE_LINK) {
2853 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2854 				break;
2855 			r = hci_send_cmd(conn->hdev,
2856 					 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
2857 		} else if (conn->type == ACL_LINK) {
2858 			if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
2859 				break;
2860 			r = hci_send_cmd(conn->hdev,
2861 					 HCI_OP_CREATE_CONN_CANCEL,
2862 					 6, &conn->dst);
2863 		}
2864 		break;
2865 	case BT_CONNECT2:
2866 		if (conn->type == ACL_LINK) {
2867 			struct hci_cp_reject_conn_req rej;
2868 
2869 			bacpy(&rej.bdaddr, &conn->dst);
2870 			rej.reason = reason;
2871 
2872 			r = hci_send_cmd(conn->hdev,
2873 					 HCI_OP_REJECT_CONN_REQ,
2874 					 sizeof(rej), &rej);
2875 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2876 			struct hci_cp_reject_sync_conn_req rej;
2877 
2878 			bacpy(&rej.bdaddr, &conn->dst);
2879 
2880 			/* SCO rejection has its own limited set of
2881 			 * allowed error values (0x0D-0x0F) which isn't
2882 			 * compatible with most values passed to this
2883 			 * function. To be safe hard-code one of the
2884 			 * values that's suitable for SCO.
2885 			 */
2886 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2887 
2888 			r = hci_send_cmd(conn->hdev,
2889 					 HCI_OP_REJECT_SYNC_CONN_REQ,
2890 					 sizeof(rej), &rej);
2891 		}
2892 		break;
2893 	default:
2894 		conn->state = BT_CLOSED;
2895 		break;
2896 	}
2897 
2898 	return r;
2899 }
2900