xref: /openbmc/linux/net/bluetooth/hci_conn.c (revision 26afbd826ee326e63a334c37fd45e82e50a615ec)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI connection handling. */
26 
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37 
38 struct sco_param {
39 	u16 pkt_type;
40 	u16 max_latency;
41 	u8  retrans_effort;
42 };
43 
44 static const struct sco_param esco_param_cvsd[] = {
45 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
46 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
47 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
48 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
49 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
50 };
51 
52 static const struct sco_param sco_param_cvsd[] = {
53 	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
54 	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
55 };
56 
57 static const struct sco_param esco_param_msbc[] = {
58 	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
59 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
60 };
61 
62 /* This function requires the caller holds hdev->lock */
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65 	struct hci_conn_params *params;
66 	struct hci_dev *hdev = conn->hdev;
67 	struct smp_irk *irk;
68 	bdaddr_t *bdaddr;
69 	u8 bdaddr_type;
70 
71 	bdaddr = &conn->dst;
72 	bdaddr_type = conn->dst_type;
73 
74 	/* Check if we need to convert to identity address */
75 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76 	if (irk) {
77 		bdaddr = &irk->bdaddr;
78 		bdaddr_type = irk->addr_type;
79 	}
80 
81 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82 					   bdaddr_type);
83 	if (!params || !params->explicit_connect)
84 		return;
85 
86 	/* The connection attempt was doing scan for new RPA, and is
87 	 * in scan phase. If params are not associated with any other
88 	 * autoconnect action, remove them completely. If they are, just unmark
89 	 * them as waiting for connection, by clearing explicit_connect field.
90 	 */
91 	params->explicit_connect = false;
92 
93 	list_del_init(&params->action);
94 
95 	switch (params->auto_connect) {
96 	case HCI_AUTO_CONN_EXPLICIT:
97 		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98 		/* return instead of break to avoid duplicate scan update */
99 		return;
100 	case HCI_AUTO_CONN_DIRECT:
101 	case HCI_AUTO_CONN_ALWAYS:
102 		list_add(&params->action, &hdev->pend_le_conns);
103 		break;
104 	case HCI_AUTO_CONN_REPORT:
105 		list_add(&params->action, &hdev->pend_le_reports);
106 		break;
107 	default:
108 		break;
109 	}
110 
111 	hci_update_passive_scan(hdev);
112 }
113 
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116 	struct hci_dev *hdev = conn->hdev;
117 
118 	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119 		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120 
121 	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
122 		hci_remove_link_key(hdev, &conn->dst);
123 
124 	hci_chan_list_flush(conn);
125 
126 	hci_conn_hash_del(hdev, conn);
127 
128 	if (conn->cleanup)
129 		conn->cleanup(conn);
130 
131 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
132 		switch (conn->setting & SCO_AIRMODE_MASK) {
133 		case SCO_AIRMODE_CVSD:
134 		case SCO_AIRMODE_TRANSP:
135 			if (hdev->notify)
136 				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
137 			break;
138 		}
139 	} else {
140 		if (hdev->notify)
141 			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
142 	}
143 
144 	hci_conn_del_sysfs(conn);
145 
146 	debugfs_remove_recursive(conn->debugfs);
147 
148 	hci_dev_put(hdev);
149 
150 	hci_conn_put(conn);
151 }
152 
153 static void le_scan_cleanup(struct work_struct *work)
154 {
155 	struct hci_conn *conn = container_of(work, struct hci_conn,
156 					     le_scan_cleanup);
157 	struct hci_dev *hdev = conn->hdev;
158 	struct hci_conn *c = NULL;
159 
160 	BT_DBG("%s hcon %p", hdev->name, conn);
161 
162 	hci_dev_lock(hdev);
163 
164 	/* Check that the hci_conn is still around */
165 	rcu_read_lock();
166 	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
167 		if (c == conn)
168 			break;
169 	}
170 	rcu_read_unlock();
171 
172 	if (c == conn) {
173 		hci_connect_le_scan_cleanup(conn);
174 		hci_conn_cleanup(conn);
175 	}
176 
177 	hci_dev_unlock(hdev);
178 	hci_dev_put(hdev);
179 	hci_conn_put(conn);
180 }
181 
182 static void hci_connect_le_scan_remove(struct hci_conn *conn)
183 {
184 	BT_DBG("%s hcon %p", conn->hdev->name, conn);
185 
186 	/* We can't call hci_conn_del/hci_conn_cleanup here since that
187 	 * could deadlock with another hci_conn_del() call that's holding
188 	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
189 	 * Instead, grab temporary extra references to the hci_dev and
190 	 * hci_conn and perform the necessary cleanup in a separate work
191 	 * callback.
192 	 */
193 
194 	hci_dev_hold(conn->hdev);
195 	hci_conn_get(conn);
196 
197 	/* Even though we hold a reference to the hdev, many other
198 	 * things might get cleaned up meanwhile, including the hdev's
199 	 * own workqueue, so we can't use that for scheduling.
200 	 */
201 	schedule_work(&conn->le_scan_cleanup);
202 }
203 
204 static void hci_acl_create_connection(struct hci_conn *conn)
205 {
206 	struct hci_dev *hdev = conn->hdev;
207 	struct inquiry_entry *ie;
208 	struct hci_cp_create_conn cp;
209 
210 	BT_DBG("hcon %p", conn);
211 
212 	/* Many controllers disallow HCI Create Connection while it is doing
213 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
214 	 * Connection. This may cause the MGMT discovering state to become false
215 	 * without user space's request but it is okay since the MGMT Discovery
216 	 * APIs do not promise that discovery should be done forever. Instead,
217 	 * the user space monitors the status of MGMT discovering and it may
218 	 * request for discovery again when this flag becomes false.
219 	 */
220 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
221 		/* Put this connection to "pending" state so that it will be
222 		 * executed after the inquiry cancel command complete event.
223 		 */
224 		conn->state = BT_CONNECT2;
225 		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
226 		return;
227 	}
228 
229 	conn->state = BT_CONNECT;
230 	conn->out = true;
231 	conn->role = HCI_ROLE_MASTER;
232 
233 	conn->attempt++;
234 
235 	conn->link_policy = hdev->link_policy;
236 
237 	memset(&cp, 0, sizeof(cp));
238 	bacpy(&cp.bdaddr, &conn->dst);
239 	cp.pscan_rep_mode = 0x02;
240 
241 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
242 	if (ie) {
243 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
244 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
245 			cp.pscan_mode     = ie->data.pscan_mode;
246 			cp.clock_offset   = ie->data.clock_offset |
247 					    cpu_to_le16(0x8000);
248 		}
249 
250 		memcpy(conn->dev_class, ie->data.dev_class, 3);
251 	}
252 
253 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
254 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
255 		cp.role_switch = 0x01;
256 	else
257 		cp.role_switch = 0x00;
258 
259 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
260 }
261 
262 int hci_disconnect(struct hci_conn *conn, __u8 reason)
263 {
264 	BT_DBG("hcon %p", conn);
265 
266 	/* When we are central of an established connection and it enters
267 	 * the disconnect timeout, then go ahead and try to read the
268 	 * current clock offset.  Processing of the result is done
269 	 * within the event handling and hci_clock_offset_evt function.
270 	 */
271 	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
272 	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
273 		struct hci_dev *hdev = conn->hdev;
274 		struct hci_cp_read_clock_offset clkoff_cp;
275 
276 		clkoff_cp.handle = cpu_to_le16(conn->handle);
277 		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
278 			     &clkoff_cp);
279 	}
280 
281 	return hci_abort_conn(conn, reason);
282 }
283 
284 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
285 {
286 	struct hci_dev *hdev = conn->hdev;
287 	struct hci_cp_add_sco cp;
288 
289 	BT_DBG("hcon %p", conn);
290 
291 	conn->state = BT_CONNECT;
292 	conn->out = true;
293 
294 	conn->attempt++;
295 
296 	cp.handle   = cpu_to_le16(handle);
297 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
298 
299 	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
300 }
301 
302 static bool find_next_esco_param(struct hci_conn *conn,
303 				 const struct sco_param *esco_param, int size)
304 {
305 	for (; conn->attempt <= size; conn->attempt++) {
306 		if (lmp_esco_2m_capable(conn->link) ||
307 		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
308 			break;
309 		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
310 		       conn, conn->attempt);
311 	}
312 
313 	return conn->attempt <= size;
314 }
315 
316 static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)
317 {
318 	struct hci_dev *hdev = conn->hdev;
319 	struct hci_cp_enhanced_setup_sync_conn cp;
320 	const struct sco_param *param;
321 
322 	bt_dev_dbg(hdev, "hcon %p", conn);
323 
324 	/* for offload use case, codec needs to configured before opening SCO */
325 	if (conn->codec.data_path)
326 		hci_req_configure_datapath(hdev, &conn->codec);
327 
328 	conn->state = BT_CONNECT;
329 	conn->out = true;
330 
331 	conn->attempt++;
332 
333 	memset(&cp, 0x00, sizeof(cp));
334 
335 	cp.handle   = cpu_to_le16(handle);
336 
337 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
338 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
339 
340 	switch (conn->codec.id) {
341 	case BT_CODEC_MSBC:
342 		if (!find_next_esco_param(conn, esco_param_msbc,
343 					  ARRAY_SIZE(esco_param_msbc)))
344 			return false;
345 
346 		param = &esco_param_msbc[conn->attempt - 1];
347 		cp.tx_coding_format.id = 0x05;
348 		cp.rx_coding_format.id = 0x05;
349 		cp.tx_codec_frame_size = __cpu_to_le16(60);
350 		cp.rx_codec_frame_size = __cpu_to_le16(60);
351 		cp.in_bandwidth = __cpu_to_le32(32000);
352 		cp.out_bandwidth = __cpu_to_le32(32000);
353 		cp.in_coding_format.id = 0x04;
354 		cp.out_coding_format.id = 0x04;
355 		cp.in_coded_data_size = __cpu_to_le16(16);
356 		cp.out_coded_data_size = __cpu_to_le16(16);
357 		cp.in_pcm_data_format = 2;
358 		cp.out_pcm_data_format = 2;
359 		cp.in_pcm_sample_payload_msb_pos = 0;
360 		cp.out_pcm_sample_payload_msb_pos = 0;
361 		cp.in_data_path = conn->codec.data_path;
362 		cp.out_data_path = conn->codec.data_path;
363 		cp.in_transport_unit_size = 1;
364 		cp.out_transport_unit_size = 1;
365 		break;
366 
367 	case BT_CODEC_TRANSPARENT:
368 		if (!find_next_esco_param(conn, esco_param_msbc,
369 					  ARRAY_SIZE(esco_param_msbc)))
370 			return false;
371 		param = &esco_param_msbc[conn->attempt - 1];
372 		cp.tx_coding_format.id = 0x03;
373 		cp.rx_coding_format.id = 0x03;
374 		cp.tx_codec_frame_size = __cpu_to_le16(60);
375 		cp.rx_codec_frame_size = __cpu_to_le16(60);
376 		cp.in_bandwidth = __cpu_to_le32(0x1f40);
377 		cp.out_bandwidth = __cpu_to_le32(0x1f40);
378 		cp.in_coding_format.id = 0x03;
379 		cp.out_coding_format.id = 0x03;
380 		cp.in_coded_data_size = __cpu_to_le16(16);
381 		cp.out_coded_data_size = __cpu_to_le16(16);
382 		cp.in_pcm_data_format = 2;
383 		cp.out_pcm_data_format = 2;
384 		cp.in_pcm_sample_payload_msb_pos = 0;
385 		cp.out_pcm_sample_payload_msb_pos = 0;
386 		cp.in_data_path = conn->codec.data_path;
387 		cp.out_data_path = conn->codec.data_path;
388 		cp.in_transport_unit_size = 1;
389 		cp.out_transport_unit_size = 1;
390 		break;
391 
392 	case BT_CODEC_CVSD:
393 		if (lmp_esco_capable(conn->link)) {
394 			if (!find_next_esco_param(conn, esco_param_cvsd,
395 						  ARRAY_SIZE(esco_param_cvsd)))
396 				return false;
397 			param = &esco_param_cvsd[conn->attempt - 1];
398 		} else {
399 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
400 				return false;
401 			param = &sco_param_cvsd[conn->attempt - 1];
402 		}
403 		cp.tx_coding_format.id = 2;
404 		cp.rx_coding_format.id = 2;
405 		cp.tx_codec_frame_size = __cpu_to_le16(60);
406 		cp.rx_codec_frame_size = __cpu_to_le16(60);
407 		cp.in_bandwidth = __cpu_to_le32(16000);
408 		cp.out_bandwidth = __cpu_to_le32(16000);
409 		cp.in_coding_format.id = 4;
410 		cp.out_coding_format.id = 4;
411 		cp.in_coded_data_size = __cpu_to_le16(16);
412 		cp.out_coded_data_size = __cpu_to_le16(16);
413 		cp.in_pcm_data_format = 2;
414 		cp.out_pcm_data_format = 2;
415 		cp.in_pcm_sample_payload_msb_pos = 0;
416 		cp.out_pcm_sample_payload_msb_pos = 0;
417 		cp.in_data_path = conn->codec.data_path;
418 		cp.out_data_path = conn->codec.data_path;
419 		cp.in_transport_unit_size = 16;
420 		cp.out_transport_unit_size = 16;
421 		break;
422 	default:
423 		return false;
424 	}
425 
426 	cp.retrans_effort = param->retrans_effort;
427 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
428 	cp.max_latency = __cpu_to_le16(param->max_latency);
429 
430 	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
431 		return false;
432 
433 	return true;
434 }
435 
436 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
437 {
438 	struct hci_dev *hdev = conn->hdev;
439 	struct hci_cp_setup_sync_conn cp;
440 	const struct sco_param *param;
441 
442 	bt_dev_dbg(hdev, "hcon %p", conn);
443 
444 	conn->state = BT_CONNECT;
445 	conn->out = true;
446 
447 	conn->attempt++;
448 
449 	cp.handle   = cpu_to_le16(handle);
450 
451 	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
452 	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
453 	cp.voice_setting  = cpu_to_le16(conn->setting);
454 
455 	switch (conn->setting & SCO_AIRMODE_MASK) {
456 	case SCO_AIRMODE_TRANSP:
457 		if (!find_next_esco_param(conn, esco_param_msbc,
458 					  ARRAY_SIZE(esco_param_msbc)))
459 			return false;
460 		param = &esco_param_msbc[conn->attempt - 1];
461 		break;
462 	case SCO_AIRMODE_CVSD:
463 		if (lmp_esco_capable(conn->link)) {
464 			if (!find_next_esco_param(conn, esco_param_cvsd,
465 						  ARRAY_SIZE(esco_param_cvsd)))
466 				return false;
467 			param = &esco_param_cvsd[conn->attempt - 1];
468 		} else {
469 			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
470 				return false;
471 			param = &sco_param_cvsd[conn->attempt - 1];
472 		}
473 		break;
474 	default:
475 		return false;
476 	}
477 
478 	cp.retrans_effort = param->retrans_effort;
479 	cp.pkt_type = __cpu_to_le16(param->pkt_type);
480 	cp.max_latency = __cpu_to_le16(param->max_latency);
481 
482 	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
483 		return false;
484 
485 	return true;
486 }
487 
488 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
489 {
490 	if (enhanced_sync_conn_capable(conn->hdev))
491 		return hci_enhanced_setup_sync_conn(conn, handle);
492 
493 	return hci_setup_sync_conn(conn, handle);
494 }
495 
496 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
497 		      u16 to_multiplier)
498 {
499 	struct hci_dev *hdev = conn->hdev;
500 	struct hci_conn_params *params;
501 	struct hci_cp_le_conn_update cp;
502 
503 	hci_dev_lock(hdev);
504 
505 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
506 	if (params) {
507 		params->conn_min_interval = min;
508 		params->conn_max_interval = max;
509 		params->conn_latency = latency;
510 		params->supervision_timeout = to_multiplier;
511 	}
512 
513 	hci_dev_unlock(hdev);
514 
515 	memset(&cp, 0, sizeof(cp));
516 	cp.handle		= cpu_to_le16(conn->handle);
517 	cp.conn_interval_min	= cpu_to_le16(min);
518 	cp.conn_interval_max	= cpu_to_le16(max);
519 	cp.conn_latency		= cpu_to_le16(latency);
520 	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
521 	cp.min_ce_len		= cpu_to_le16(0x0000);
522 	cp.max_ce_len		= cpu_to_le16(0x0000);
523 
524 	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
525 
526 	if (params)
527 		return 0x01;
528 
529 	return 0x00;
530 }
531 
532 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
533 		      __u8 ltk[16], __u8 key_size)
534 {
535 	struct hci_dev *hdev = conn->hdev;
536 	struct hci_cp_le_start_enc cp;
537 
538 	BT_DBG("hcon %p", conn);
539 
540 	memset(&cp, 0, sizeof(cp));
541 
542 	cp.handle = cpu_to_le16(conn->handle);
543 	cp.rand = rand;
544 	cp.ediv = ediv;
545 	memcpy(cp.ltk, ltk, key_size);
546 
547 	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
548 }
549 
550 /* Device _must_ be locked */
551 void hci_sco_setup(struct hci_conn *conn, __u8 status)
552 {
553 	struct hci_conn *sco = conn->link;
554 
555 	if (!sco)
556 		return;
557 
558 	BT_DBG("hcon %p", conn);
559 
560 	if (!status) {
561 		if (lmp_esco_capable(conn->hdev))
562 			hci_setup_sync(sco, conn->handle);
563 		else
564 			hci_add_sco(sco, conn->handle);
565 	} else {
566 		hci_connect_cfm(sco, status);
567 		hci_conn_del(sco);
568 	}
569 }
570 
571 static void hci_conn_timeout(struct work_struct *work)
572 {
573 	struct hci_conn *conn = container_of(work, struct hci_conn,
574 					     disc_work.work);
575 	int refcnt = atomic_read(&conn->refcnt);
576 
577 	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
578 
579 	WARN_ON(refcnt < 0);
580 
581 	/* FIXME: It was observed that in pairing failed scenario, refcnt
582 	 * drops below 0. Probably this is because l2cap_conn_del calls
583 	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
584 	 * dropped. After that loop hci_chan_del is called which also drops
585 	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
586 	 * otherwise drop it.
587 	 */
588 	if (refcnt > 0)
589 		return;
590 
591 	/* LE connections in scanning state need special handling */
592 	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
593 	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
594 		hci_connect_le_scan_remove(conn);
595 		return;
596 	}
597 
598 	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
599 }
600 
601 /* Enter sniff mode */
602 static void hci_conn_idle(struct work_struct *work)
603 {
604 	struct hci_conn *conn = container_of(work, struct hci_conn,
605 					     idle_work.work);
606 	struct hci_dev *hdev = conn->hdev;
607 
608 	BT_DBG("hcon %p mode %d", conn, conn->mode);
609 
610 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
611 		return;
612 
613 	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
614 		return;
615 
616 	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
617 		struct hci_cp_sniff_subrate cp;
618 		cp.handle             = cpu_to_le16(conn->handle);
619 		cp.max_latency        = cpu_to_le16(0);
620 		cp.min_remote_timeout = cpu_to_le16(0);
621 		cp.min_local_timeout  = cpu_to_le16(0);
622 		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
623 	}
624 
625 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
626 		struct hci_cp_sniff_mode cp;
627 		cp.handle       = cpu_to_le16(conn->handle);
628 		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
629 		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
630 		cp.attempt      = cpu_to_le16(4);
631 		cp.timeout      = cpu_to_le16(1);
632 		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
633 	}
634 }
635 
636 static void hci_conn_auto_accept(struct work_struct *work)
637 {
638 	struct hci_conn *conn = container_of(work, struct hci_conn,
639 					     auto_accept_work.work);
640 
641 	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
642 		     &conn->dst);
643 }
644 
645 static void le_disable_advertising(struct hci_dev *hdev)
646 {
647 	if (ext_adv_capable(hdev)) {
648 		struct hci_cp_le_set_ext_adv_enable cp;
649 
650 		cp.enable = 0x00;
651 		cp.num_of_sets = 0x00;
652 
653 		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
654 			     &cp);
655 	} else {
656 		u8 enable = 0x00;
657 		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
658 			     &enable);
659 	}
660 }
661 
662 static void le_conn_timeout(struct work_struct *work)
663 {
664 	struct hci_conn *conn = container_of(work, struct hci_conn,
665 					     le_conn_timeout.work);
666 	struct hci_dev *hdev = conn->hdev;
667 
668 	BT_DBG("");
669 
670 	/* We could end up here due to having done directed advertising,
671 	 * so clean up the state if necessary. This should however only
672 	 * happen with broken hardware or if low duty cycle was used
673 	 * (which doesn't have a timeout of its own).
674 	 */
675 	if (conn->role == HCI_ROLE_SLAVE) {
676 		/* Disable LE Advertising */
677 		le_disable_advertising(hdev);
678 		hci_dev_lock(hdev);
679 		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
680 		hci_dev_unlock(hdev);
681 		return;
682 	}
683 
684 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
685 }
686 
687 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
688 			      u8 role)
689 {
690 	struct hci_conn *conn;
691 
692 	BT_DBG("%s dst %pMR", hdev->name, dst);
693 
694 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
695 	if (!conn)
696 		return NULL;
697 
698 	bacpy(&conn->dst, dst);
699 	bacpy(&conn->src, &hdev->bdaddr);
700 	conn->handle = HCI_CONN_HANDLE_UNSET;
701 	conn->hdev  = hdev;
702 	conn->type  = type;
703 	conn->role  = role;
704 	conn->mode  = HCI_CM_ACTIVE;
705 	conn->state = BT_OPEN;
706 	conn->auth_type = HCI_AT_GENERAL_BONDING;
707 	conn->io_capability = hdev->io_capability;
708 	conn->remote_auth = 0xff;
709 	conn->key_type = 0xff;
710 	conn->rssi = HCI_RSSI_INVALID;
711 	conn->tx_power = HCI_TX_POWER_INVALID;
712 	conn->max_tx_power = HCI_TX_POWER_INVALID;
713 
714 	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
715 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
716 
717 	/* Set Default Authenticated payload timeout to 30s */
718 	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
719 
720 	if (conn->role == HCI_ROLE_MASTER)
721 		conn->out = true;
722 
723 	switch (type) {
724 	case ACL_LINK:
725 		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
726 		break;
727 	case LE_LINK:
728 	case ISO_LINK:
729 		/* conn->src should reflect the local identity address */
730 		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
731 		break;
732 	case SCO_LINK:
733 		if (lmp_esco_capable(hdev))
734 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
735 					(hdev->esco_type & EDR_ESCO_MASK);
736 		else
737 			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
738 		break;
739 	case ESCO_LINK:
740 		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
741 		break;
742 	}
743 
744 	skb_queue_head_init(&conn->data_q);
745 
746 	INIT_LIST_HEAD(&conn->chan_list);
747 
748 	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
749 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
750 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
751 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
752 	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
753 
754 	atomic_set(&conn->refcnt, 0);
755 
756 	hci_dev_hold(hdev);
757 
758 	hci_conn_hash_add(hdev, conn);
759 
760 	/* The SCO and eSCO connections will only be notified when their
761 	 * setup has been completed. This is different to ACL links which
762 	 * can be notified right away.
763 	 */
764 	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
765 		if (hdev->notify)
766 			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
767 	}
768 
769 	hci_conn_init_sysfs(conn);
770 
771 	return conn;
772 }
773 
774 int hci_conn_del(struct hci_conn *conn)
775 {
776 	struct hci_dev *hdev = conn->hdev;
777 
778 	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
779 
780 	cancel_delayed_work_sync(&conn->disc_work);
781 	cancel_delayed_work_sync(&conn->auto_accept_work);
782 	cancel_delayed_work_sync(&conn->idle_work);
783 
784 	if (conn->type == ACL_LINK) {
785 		struct hci_conn *sco = conn->link;
786 		if (sco)
787 			sco->link = NULL;
788 
789 		/* Unacked frames */
790 		hdev->acl_cnt += conn->sent;
791 	} else if (conn->type == LE_LINK) {
792 		cancel_delayed_work(&conn->le_conn_timeout);
793 
794 		if (hdev->le_pkts)
795 			hdev->le_cnt += conn->sent;
796 		else
797 			hdev->acl_cnt += conn->sent;
798 	} else {
799 		struct hci_conn *acl = conn->link;
800 		if (acl) {
801 			acl->link = NULL;
802 			hci_conn_drop(acl);
803 		}
804 	}
805 
806 	if (conn->amp_mgr)
807 		amp_mgr_put(conn->amp_mgr);
808 
809 	skb_queue_purge(&conn->data_q);
810 
811 	/* Remove the connection from the list and cleanup its remaining
812 	 * state. This is a separate function since for some cases like
813 	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
814 	 * rest of hci_conn_del.
815 	 */
816 	hci_conn_cleanup(conn);
817 
818 	return 0;
819 }
820 
821 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
822 {
823 	int use_src = bacmp(src, BDADDR_ANY);
824 	struct hci_dev *hdev = NULL, *d;
825 
826 	BT_DBG("%pMR -> %pMR", src, dst);
827 
828 	read_lock(&hci_dev_list_lock);
829 
830 	list_for_each_entry(d, &hci_dev_list, list) {
831 		if (!test_bit(HCI_UP, &d->flags) ||
832 		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
833 		    d->dev_type != HCI_PRIMARY)
834 			continue;
835 
836 		/* Simple routing:
837 		 *   No source address - find interface with bdaddr != dst
838 		 *   Source address    - find interface with bdaddr == src
839 		 */
840 
841 		if (use_src) {
842 			bdaddr_t id_addr;
843 			u8 id_addr_type;
844 
845 			if (src_type == BDADDR_BREDR) {
846 				if (!lmp_bredr_capable(d))
847 					continue;
848 				bacpy(&id_addr, &d->bdaddr);
849 				id_addr_type = BDADDR_BREDR;
850 			} else {
851 				if (!lmp_le_capable(d))
852 					continue;
853 
854 				hci_copy_identity_address(d, &id_addr,
855 							  &id_addr_type);
856 
857 				/* Convert from HCI to three-value type */
858 				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
859 					id_addr_type = BDADDR_LE_PUBLIC;
860 				else
861 					id_addr_type = BDADDR_LE_RANDOM;
862 			}
863 
864 			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
865 				hdev = d; break;
866 			}
867 		} else {
868 			if (bacmp(&d->bdaddr, dst)) {
869 				hdev = d; break;
870 			}
871 		}
872 	}
873 
874 	if (hdev)
875 		hdev = hci_dev_hold(hdev);
876 
877 	read_unlock(&hci_dev_list_lock);
878 	return hdev;
879 }
880 EXPORT_SYMBOL(hci_get_route);
881 
882 /* This function requires the caller holds hdev->lock */
883 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
884 {
885 	struct hci_dev *hdev = conn->hdev;
886 	struct hci_conn_params *params;
887 
888 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
889 					   conn->dst_type);
890 	if (params && params->conn) {
891 		hci_conn_drop(params->conn);
892 		hci_conn_put(params->conn);
893 		params->conn = NULL;
894 	}
895 
896 	/* If the status indicates successful cancellation of
897 	 * the attempt (i.e. Unknown Connection Id) there's no point of
898 	 * notifying failure since we'll go back to keep trying to
899 	 * connect. The only exception is explicit connect requests
900 	 * where a timeout + cancel does indicate an actual failure.
901 	 */
902 	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
903 	    (params && params->explicit_connect))
904 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
905 				    conn->dst_type, status);
906 
907 	/* Since we may have temporarily stopped the background scanning in
908 	 * favor of connection establishment, we should restart it.
909 	 */
910 	hci_update_passive_scan(hdev);
911 
912 	/* Enable advertising in case this was a failed connection
913 	 * attempt as a peripheral.
914 	 */
915 	hci_enable_advertising(hdev);
916 }
917 
918 /* This function requires the caller holds hdev->lock */
919 void hci_conn_failed(struct hci_conn *conn, u8 status)
920 {
921 	struct hci_dev *hdev = conn->hdev;
922 
923 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
924 
925 	switch (conn->type) {
926 	case LE_LINK:
927 		hci_le_conn_failed(conn, status);
928 		break;
929 	case ACL_LINK:
930 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
931 				    conn->dst_type, status);
932 		break;
933 	}
934 
935 	conn->state = BT_CLOSED;
936 	hci_connect_cfm(conn, status);
937 	hci_conn_del(conn);
938 }
939 
940 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
941 {
942 	struct hci_conn *conn = data;
943 
944 	hci_dev_lock(hdev);
945 
946 	if (!err) {
947 		hci_connect_le_scan_cleanup(conn);
948 		goto done;
949 	}
950 
951 	bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
952 
953 	/* Check if connection is still pending */
954 	if (conn != hci_lookup_le_connect(hdev))
955 		goto done;
956 
957 	hci_conn_failed(conn, bt_status(err));
958 
959 done:
960 	hci_dev_unlock(hdev);
961 }
962 
963 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
964 {
965 	struct hci_conn *conn = data;
966 
967 	bt_dev_dbg(hdev, "conn %p", conn);
968 
969 	return hci_le_create_conn_sync(hdev, conn);
970 }
971 
972 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
973 				u8 dst_type, bool dst_resolved, u8 sec_level,
974 				u16 conn_timeout, u8 role)
975 {
976 	struct hci_conn *conn;
977 	struct smp_irk *irk;
978 	int err;
979 
980 	/* Let's make sure that le is enabled.*/
981 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
982 		if (lmp_le_capable(hdev))
983 			return ERR_PTR(-ECONNREFUSED);
984 
985 		return ERR_PTR(-EOPNOTSUPP);
986 	}
987 
988 	/* Since the controller supports only one LE connection attempt at a
989 	 * time, we return -EBUSY if there is any connection attempt running.
990 	 */
991 	if (hci_lookup_le_connect(hdev))
992 		return ERR_PTR(-EBUSY);
993 
994 	/* If there's already a connection object but it's not in
995 	 * scanning state it means it must already be established, in
996 	 * which case we can't do anything else except report a failure
997 	 * to connect.
998 	 */
999 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1000 	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1001 		return ERR_PTR(-EBUSY);
1002 	}
1003 
1004 	/* Check if the destination address has been resolved by the controller
1005 	 * since if it did then the identity address shall be used.
1006 	 */
1007 	if (!dst_resolved) {
1008 		/* When given an identity address with existing identity
1009 		 * resolving key, the connection needs to be established
1010 		 * to a resolvable random address.
1011 		 *
1012 		 * Storing the resolvable random address is required here
1013 		 * to handle connection failures. The address will later
1014 		 * be resolved back into the original identity address
1015 		 * from the connect request.
1016 		 */
1017 		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1018 		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1019 			dst = &irk->rpa;
1020 			dst_type = ADDR_LE_DEV_RANDOM;
1021 		}
1022 	}
1023 
1024 	if (conn) {
1025 		bacpy(&conn->dst, dst);
1026 	} else {
1027 		conn = hci_conn_add(hdev, LE_LINK, dst, role);
1028 		if (!conn)
1029 			return ERR_PTR(-ENOMEM);
1030 		hci_conn_hold(conn);
1031 		conn->pending_sec_level = sec_level;
1032 	}
1033 
1034 	conn->dst_type = dst_type;
1035 	conn->sec_level = BT_SECURITY_LOW;
1036 	conn->conn_timeout = conn_timeout;
1037 
1038 	conn->state = BT_CONNECT;
1039 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1040 
1041 	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1042 				 create_le_conn_complete);
1043 	if (err) {
1044 		hci_conn_del(conn);
1045 		return ERR_PTR(err);
1046 	}
1047 
1048 	return conn;
1049 }
1050 
1051 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1052 {
1053 	struct hci_conn *conn;
1054 
1055 	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1056 	if (!conn)
1057 		return false;
1058 
1059 	if (conn->state != BT_CONNECTED)
1060 		return false;
1061 
1062 	return true;
1063 }
1064 
1065 /* This function requires the caller holds hdev->lock */
1066 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1067 					bdaddr_t *addr, u8 addr_type)
1068 {
1069 	struct hci_conn_params *params;
1070 
1071 	if (is_connected(hdev, addr, addr_type))
1072 		return -EISCONN;
1073 
1074 	params = hci_conn_params_lookup(hdev, addr, addr_type);
1075 	if (!params) {
1076 		params = hci_conn_params_add(hdev, addr, addr_type);
1077 		if (!params)
1078 			return -ENOMEM;
1079 
1080 		/* If we created new params, mark them to be deleted in
1081 		 * hci_connect_le_scan_cleanup. It's different case than
1082 		 * existing disabled params, those will stay after cleanup.
1083 		 */
1084 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1085 	}
1086 
1087 	/* We're trying to connect, so make sure params are at pend_le_conns */
1088 	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1089 	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1090 	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1091 		list_del_init(&params->action);
1092 		list_add(&params->action, &hdev->pend_le_conns);
1093 	}
1094 
1095 	params->explicit_connect = true;
1096 
1097 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1098 	       params->auto_connect);
1099 
1100 	return 0;
1101 }
1102 
1103 /* This function requires the caller holds hdev->lock */
1104 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1105 				     u8 dst_type, u8 sec_level,
1106 				     u16 conn_timeout,
1107 				     enum conn_reasons conn_reason)
1108 {
1109 	struct hci_conn *conn;
1110 
1111 	/* Let's make sure that le is enabled.*/
1112 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1113 		if (lmp_le_capable(hdev))
1114 			return ERR_PTR(-ECONNREFUSED);
1115 
1116 		return ERR_PTR(-EOPNOTSUPP);
1117 	}
1118 
1119 	/* Some devices send ATT messages as soon as the physical link is
1120 	 * established. To be able to handle these ATT messages, the user-
1121 	 * space first establishes the connection and then starts the pairing
1122 	 * process.
1123 	 *
1124 	 * So if a hci_conn object already exists for the following connection
1125 	 * attempt, we simply update pending_sec_level and auth_type fields
1126 	 * and return the object found.
1127 	 */
1128 	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1129 	if (conn) {
1130 		if (conn->pending_sec_level < sec_level)
1131 			conn->pending_sec_level = sec_level;
1132 		goto done;
1133 	}
1134 
1135 	BT_DBG("requesting refresh of dst_addr");
1136 
1137 	conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1138 	if (!conn)
1139 		return ERR_PTR(-ENOMEM);
1140 
1141 	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1142 		hci_conn_del(conn);
1143 		return ERR_PTR(-EBUSY);
1144 	}
1145 
1146 	conn->state = BT_CONNECT;
1147 	set_bit(HCI_CONN_SCANNING, &conn->flags);
1148 	conn->dst_type = dst_type;
1149 	conn->sec_level = BT_SECURITY_LOW;
1150 	conn->pending_sec_level = sec_level;
1151 	conn->conn_timeout = conn_timeout;
1152 	conn->conn_reason = conn_reason;
1153 
1154 	hci_update_passive_scan(hdev);
1155 
1156 done:
1157 	hci_conn_hold(conn);
1158 	return conn;
1159 }
1160 
1161 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1162 				 u8 sec_level, u8 auth_type,
1163 				 enum conn_reasons conn_reason)
1164 {
1165 	struct hci_conn *acl;
1166 
1167 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1168 		if (lmp_bredr_capable(hdev))
1169 			return ERR_PTR(-ECONNREFUSED);
1170 
1171 		return ERR_PTR(-EOPNOTSUPP);
1172 	}
1173 
1174 	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1175 	if (!acl) {
1176 		acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1177 		if (!acl)
1178 			return ERR_PTR(-ENOMEM);
1179 	}
1180 
1181 	hci_conn_hold(acl);
1182 
1183 	acl->conn_reason = conn_reason;
1184 	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1185 		acl->sec_level = BT_SECURITY_LOW;
1186 		acl->pending_sec_level = sec_level;
1187 		acl->auth_type = auth_type;
1188 		hci_acl_create_connection(acl);
1189 	}
1190 
1191 	return acl;
1192 }
1193 
1194 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1195 				 __u16 setting, struct bt_codec *codec)
1196 {
1197 	struct hci_conn *acl;
1198 	struct hci_conn *sco;
1199 
1200 	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1201 			      CONN_REASON_SCO_CONNECT);
1202 	if (IS_ERR(acl))
1203 		return acl;
1204 
1205 	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1206 	if (!sco) {
1207 		sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1208 		if (!sco) {
1209 			hci_conn_drop(acl);
1210 			return ERR_PTR(-ENOMEM);
1211 		}
1212 	}
1213 
1214 	acl->link = sco;
1215 	sco->link = acl;
1216 
1217 	hci_conn_hold(sco);
1218 
1219 	sco->setting = setting;
1220 	sco->codec = *codec;
1221 
1222 	if (acl->state == BT_CONNECTED &&
1223 	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1224 		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1225 		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1226 
1227 		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1228 			/* defer SCO setup until mode change completed */
1229 			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1230 			return sco;
1231 		}
1232 
1233 		hci_sco_setup(acl, 0x00);
1234 	}
1235 
1236 	return sco;
1237 }
1238 
1239 struct iso_list_data {
1240 	u8  cig;
1241 	u8  cis;
1242 	int count;
1243 	struct {
1244 		struct hci_cp_le_set_cig_params cp;
1245 		struct hci_cis_params cis[0x11];
1246 	} pdu;
1247 };
1248 
1249 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1250 {
1251 	struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1252 
1253 	cis->cis_id = qos->cis;
1254 	cis->c_sdu  = cpu_to_le16(qos->out.sdu);
1255 	cis->p_sdu  = cpu_to_le16(qos->in.sdu);
1256 	cis->c_phy  = qos->out.phy;
1257 	cis->p_phy  = qos->in.phy;
1258 	cis->c_rtn  = qos->out.rtn;
1259 	cis->p_rtn  = qos->in.rtn;
1260 
1261 	d->pdu.cp.num_cis++;
1262 }
1263 
1264 static void cis_list(struct hci_conn *conn, void *data)
1265 {
1266 	struct iso_list_data *d = data;
1267 
1268 	if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1269 	    d->cis != conn->iso_qos.cis)
1270 		return;
1271 
1272 	d->count++;
1273 
1274 	if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1275 	    d->count >= ARRAY_SIZE(d->pdu.cis))
1276 		return;
1277 
1278 	cis_add(d, &conn->iso_qos);
1279 }
1280 
1281 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1282 {
1283 	struct hci_dev *hdev = conn->hdev;
1284 	struct iso_list_data data;
1285 
1286 	memset(&data, 0, sizeof(data));
1287 
1288 	/* Allocate a CIG if not set */
1289 	if (qos->cig == BT_ISO_QOS_CIG_UNSET) {
1290 		for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1291 			data.count = 0;
1292 			data.cis = 0xff;
1293 
1294 			hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1295 						 BT_BOUND, &data);
1296 			if (data.count)
1297 				continue;
1298 
1299 			hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1300 						 BT_CONNECTED, &data);
1301 			if (!data.count)
1302 				break;
1303 		}
1304 
1305 		if (data.cig == 0xff)
1306 			return false;
1307 
1308 		/* Update CIG */
1309 		qos->cig = data.cig;
1310 	}
1311 
1312 	data.pdu.cp.cig_id = qos->cig;
1313 	hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval);
1314 	hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval);
1315 	data.pdu.cp.sca = qos->sca;
1316 	data.pdu.cp.packing = qos->packing;
1317 	data.pdu.cp.framing = qos->framing;
1318 	data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency);
1319 	data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency);
1320 
1321 	if (qos->cis != BT_ISO_QOS_CIS_UNSET) {
1322 		data.count = 0;
1323 		data.cig = qos->cig;
1324 		data.cis = qos->cis;
1325 
1326 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1327 					 &data);
1328 		if (data.count)
1329 			return false;
1330 
1331 		cis_add(&data, qos);
1332 	}
1333 
1334 	/* Reprogram all CIS(s) with the same CIG */
1335 	for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11;
1336 	     data.cis++) {
1337 		data.count = 0;
1338 
1339 		hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1340 					 &data);
1341 		if (data.count)
1342 			continue;
1343 
1344 		/* Allocate a CIS if not set */
1345 		if (qos->cis == BT_ISO_QOS_CIS_UNSET) {
1346 			/* Update CIS */
1347 			qos->cis = data.cis;
1348 			cis_add(&data, qos);
1349 		}
1350 	}
1351 
1352 	if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1353 		return false;
1354 
1355 	if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1356 			 sizeof(data.pdu.cp) +
1357 			 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1358 			 &data.pdu) < 0)
1359 		return false;
1360 
1361 	return true;
1362 }
1363 
1364 static void find_cis(struct hci_conn *conn, void *data)
1365 {
1366 	struct iso_list_data *d = data;
1367 
1368 	/* Ignore broadcast */
1369 	if (!bacmp(&conn->dst, BDADDR_ANY))
1370 		return;
1371 
1372 	d->count++;
1373 }
1374 
1375 static int remove_cig_sync(struct hci_dev *hdev, void *data)
1376 {
1377 	u8 handle = PTR_ERR(data);
1378 
1379 	return hci_le_remove_cig_sync(hdev, handle);
1380 }
1381 
1382 int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
1383 {
1384 	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
1385 
1386 	return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
1387 }
1388 
1389 static void cis_cleanup(struct hci_conn *conn)
1390 {
1391 	struct hci_dev *hdev = conn->hdev;
1392 	struct iso_list_data d;
1393 
1394 	memset(&d, 0, sizeof(d));
1395 	d.cig = conn->iso_qos.cig;
1396 
1397 	/* Check if ISO connection is a CIS and remove CIG if there are
1398 	 * no other connections using it.
1399 	 */
1400 	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
1401 	if (d.count)
1402 		return;
1403 
1404 	hci_le_remove_cig(hdev, conn->iso_qos.cig);
1405 }
1406 
1407 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1408 			      __u8 dst_type, struct bt_iso_qos *qos)
1409 {
1410 	struct hci_conn *cis;
1411 
1412 	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type);
1413 	if (!cis) {
1414 		cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1415 		if (!cis)
1416 			return ERR_PTR(-ENOMEM);
1417 		cis->cleanup = cis_cleanup;
1418 	}
1419 
1420 	if (cis->state == BT_CONNECTED)
1421 		return cis;
1422 
1423 	/* Check if CIS has been set and the settings matches */
1424 	if (cis->state == BT_BOUND &&
1425 	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1426 		return cis;
1427 
1428 	/* Update LINK PHYs according to QoS preference */
1429 	cis->le_tx_phy = qos->out.phy;
1430 	cis->le_rx_phy = qos->in.phy;
1431 
1432 	/* If output interval is not set use the input interval as it cannot be
1433 	 * 0x000000.
1434 	 */
1435 	if (!qos->out.interval)
1436 		qos->out.interval = qos->in.interval;
1437 
1438 	/* If input interval is not set use the output interval as it cannot be
1439 	 * 0x000000.
1440 	 */
1441 	if (!qos->in.interval)
1442 		qos->in.interval = qos->out.interval;
1443 
1444 	/* If output latency is not set use the input latency as it cannot be
1445 	 * 0x0000.
1446 	 */
1447 	if (!qos->out.latency)
1448 		qos->out.latency = qos->in.latency;
1449 
1450 	/* If input latency is not set use the output latency as it cannot be
1451 	 * 0x0000.
1452 	 */
1453 	if (!qos->in.latency)
1454 		qos->in.latency = qos->out.latency;
1455 
1456 	/* Mirror PHYs that are disabled as SDU will be set to 0 */
1457 	if (!qos->in.phy)
1458 		qos->in.phy = qos->out.phy;
1459 
1460 	if (!qos->out.phy)
1461 		qos->out.phy = qos->in.phy;
1462 
1463 	if (!hci_le_set_cig_params(cis, qos)) {
1464 		hci_conn_drop(cis);
1465 		return ERR_PTR(-EINVAL);
1466 	}
1467 
1468 	cis->iso_qos = *qos;
1469 	cis->state = BT_BOUND;
1470 
1471 	return cis;
1472 }
1473 
1474 bool hci_iso_setup_path(struct hci_conn *conn)
1475 {
1476 	struct hci_dev *hdev = conn->hdev;
1477 	struct hci_cp_le_setup_iso_path cmd;
1478 
1479 	memset(&cmd, 0, sizeof(cmd));
1480 
1481 	if (conn->iso_qos.out.sdu) {
1482 		cmd.handle = cpu_to_le16(conn->handle);
1483 		cmd.direction = 0x00; /* Input (Host to Controller) */
1484 		cmd.path = 0x00; /* HCI path if enabled */
1485 		cmd.codec = 0x03; /* Transparent Data */
1486 
1487 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1488 				 &cmd) < 0)
1489 			return false;
1490 	}
1491 
1492 	if (conn->iso_qos.in.sdu) {
1493 		cmd.handle = cpu_to_le16(conn->handle);
1494 		cmd.direction = 0x01; /* Output (Controller to Host) */
1495 		cmd.path = 0x00; /* HCI path if enabled */
1496 		cmd.codec = 0x03; /* Transparent Data */
1497 
1498 		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1499 				 &cmd) < 0)
1500 			return false;
1501 	}
1502 
1503 	return true;
1504 }
1505 
1506 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1507 {
1508 	struct {
1509 		struct hci_cp_le_create_cis cp;
1510 		struct hci_cis cis[0x1f];
1511 	} cmd;
1512 	struct hci_conn *conn = data;
1513 	u8 cig;
1514 
1515 	memset(&cmd, 0, sizeof(cmd));
1516 	cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle);
1517 	cmd.cis[0].cis_handle = cpu_to_le16(conn->handle);
1518 	cmd.cp.num_cis++;
1519 	cig = conn->iso_qos.cig;
1520 
1521 	hci_dev_lock(hdev);
1522 
1523 	rcu_read_lock();
1524 
1525 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1526 		struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
1527 
1528 		if (conn == data || conn->type != ISO_LINK ||
1529 		    conn->state == BT_CONNECTED || conn->iso_qos.cig != cig)
1530 			continue;
1531 
1532 		/* Check if all CIS(s) belonging to a CIG are ready */
1533 		if (conn->link->state != BT_CONNECTED ||
1534 		    conn->state != BT_CONNECT) {
1535 			cmd.cp.num_cis = 0;
1536 			break;
1537 		}
1538 
1539 		/* Group all CIS with state BT_CONNECT since the spec don't
1540 		 * allow to send them individually:
1541 		 *
1542 		 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
1543 		 * page 2566:
1544 		 *
1545 		 * If the Host issues this command before all the
1546 		 * HCI_LE_CIS_Established events from the previous use of the
1547 		 * command have been generated, the Controller shall return the
1548 		 * error code Command Disallowed (0x0C).
1549 		 */
1550 		cis->acl_handle = cpu_to_le16(conn->link->handle);
1551 		cis->cis_handle = cpu_to_le16(conn->handle);
1552 		cmd.cp.num_cis++;
1553 	}
1554 
1555 	rcu_read_unlock();
1556 
1557 	hci_dev_unlock(hdev);
1558 
1559 	if (!cmd.cp.num_cis)
1560 		return 0;
1561 
1562 	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) +
1563 			    sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd);
1564 }
1565 
1566 int hci_le_create_cis(struct hci_conn *conn)
1567 {
1568 	struct hci_conn *cis;
1569 	struct hci_dev *hdev = conn->hdev;
1570 	int err;
1571 
1572 	switch (conn->type) {
1573 	case LE_LINK:
1574 		if (!conn->link || conn->state != BT_CONNECTED)
1575 			return -EINVAL;
1576 		cis = conn->link;
1577 		break;
1578 	case ISO_LINK:
1579 		cis = conn;
1580 		break;
1581 	default:
1582 		return -EINVAL;
1583 	}
1584 
1585 	if (cis->state == BT_CONNECT)
1586 		return 0;
1587 
1588 	/* Queue Create CIS */
1589 	err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1590 	if (err)
1591 		return err;
1592 
1593 	cis->state = BT_CONNECT;
1594 
1595 	return 0;
1596 }
1597 
1598 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1599 			      struct bt_iso_io_qos *qos, __u8 phy)
1600 {
1601 	/* Only set MTU if PHY is enabled */
1602 	if (!qos->sdu && qos->phy) {
1603 		if (hdev->iso_mtu > 0)
1604 			qos->sdu = hdev->iso_mtu;
1605 		else if (hdev->le_mtu > 0)
1606 			qos->sdu = hdev->le_mtu;
1607 		else
1608 			qos->sdu = hdev->acl_mtu;
1609 	}
1610 
1611 	/* Use the same PHY as ACL if set to any */
1612 	if (qos->phy == BT_ISO_PHY_ANY)
1613 		qos->phy = phy;
1614 
1615 	/* Use LE ACL connection interval if not set */
1616 	if (!qos->interval)
1617 		/* ACL interval unit in 1.25 ms to us */
1618 		qos->interval = conn->le_conn_interval * 1250;
1619 
1620 	/* Use LE ACL connection latency if not set */
1621 	if (!qos->latency)
1622 		qos->latency = conn->le_conn_latency;
1623 }
1624 
1625 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
1626 				 __u8 dst_type, struct bt_iso_qos *qos)
1627 {
1628 	struct hci_conn *le;
1629 	struct hci_conn *cis;
1630 
1631 	/* Convert from ISO socket address type to HCI address type  */
1632 	if (dst_type == BDADDR_LE_PUBLIC)
1633 		dst_type = ADDR_LE_DEV_PUBLIC;
1634 	else
1635 		dst_type = ADDR_LE_DEV_RANDOM;
1636 
1637 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1638 		le = hci_connect_le(hdev, dst, dst_type, false,
1639 				    BT_SECURITY_LOW,
1640 				    HCI_LE_CONN_TIMEOUT,
1641 				    HCI_ROLE_SLAVE);
1642 	else
1643 		le = hci_connect_le_scan(hdev, dst, dst_type,
1644 					 BT_SECURITY_LOW,
1645 					 HCI_LE_CONN_TIMEOUT,
1646 					 CONN_REASON_ISO_CONNECT);
1647 	if (IS_ERR(le))
1648 		return le;
1649 
1650 	hci_iso_qos_setup(hdev, le, &qos->out,
1651 			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
1652 	hci_iso_qos_setup(hdev, le, &qos->in,
1653 			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
1654 
1655 	cis = hci_bind_cis(hdev, dst, dst_type, qos);
1656 	if (IS_ERR(cis)) {
1657 		hci_conn_drop(le);
1658 		return cis;
1659 	}
1660 
1661 	le->link = cis;
1662 	cis->link = le;
1663 
1664 	hci_conn_hold(cis);
1665 
1666 	/* If LE is already connected and CIS handle is already set proceed to
1667 	 * Create CIS immediately.
1668 	 */
1669 	if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
1670 		hci_le_create_cis(le);
1671 
1672 	return cis;
1673 }
1674 
1675 /* Check link security requirement */
1676 int hci_conn_check_link_mode(struct hci_conn *conn)
1677 {
1678 	BT_DBG("hcon %p", conn);
1679 
1680 	/* In Secure Connections Only mode, it is required that Secure
1681 	 * Connections is used and the link is encrypted with AES-CCM
1682 	 * using a P-256 authenticated combination key.
1683 	 */
1684 	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1685 		if (!hci_conn_sc_enabled(conn) ||
1686 		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1687 		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1688 			return 0;
1689 	}
1690 
1691 	 /* AES encryption is required for Level 4:
1692 	  *
1693 	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
1694 	  * page 1319:
1695 	  *
1696 	  * 128-bit equivalent strength for link and encryption keys
1697 	  * required using FIPS approved algorithms (E0 not allowed,
1698 	  * SAFER+ not allowed, and P-192 not allowed; encryption key
1699 	  * not shortened)
1700 	  */
1701 	if (conn->sec_level == BT_SECURITY_FIPS &&
1702 	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
1703 		bt_dev_err(conn->hdev,
1704 			   "Invalid security: Missing AES-CCM usage");
1705 		return 0;
1706 	}
1707 
1708 	if (hci_conn_ssp_enabled(conn) &&
1709 	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1710 		return 0;
1711 
1712 	return 1;
1713 }
1714 
1715 /* Authenticate remote device */
1716 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1717 {
1718 	BT_DBG("hcon %p", conn);
1719 
1720 	if (conn->pending_sec_level > sec_level)
1721 		sec_level = conn->pending_sec_level;
1722 
1723 	if (sec_level > conn->sec_level)
1724 		conn->pending_sec_level = sec_level;
1725 	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1726 		return 1;
1727 
1728 	/* Make sure we preserve an existing MITM requirement*/
1729 	auth_type |= (conn->auth_type & 0x01);
1730 
1731 	conn->auth_type = auth_type;
1732 
1733 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1734 		struct hci_cp_auth_requested cp;
1735 
1736 		cp.handle = cpu_to_le16(conn->handle);
1737 		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1738 			     sizeof(cp), &cp);
1739 
1740 		/* If we're already encrypted set the REAUTH_PEND flag,
1741 		 * otherwise set the ENCRYPT_PEND.
1742 		 */
1743 		if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1744 			set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1745 		else
1746 			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1747 	}
1748 
1749 	return 0;
1750 }
1751 
1752 /* Encrypt the link */
1753 static void hci_conn_encrypt(struct hci_conn *conn)
1754 {
1755 	BT_DBG("hcon %p", conn);
1756 
1757 	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1758 		struct hci_cp_set_conn_encrypt cp;
1759 		cp.handle  = cpu_to_le16(conn->handle);
1760 		cp.encrypt = 0x01;
1761 		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1762 			     &cp);
1763 	}
1764 }
1765 
1766 /* Enable security */
1767 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1768 		      bool initiator)
1769 {
1770 	BT_DBG("hcon %p", conn);
1771 
1772 	if (conn->type == LE_LINK)
1773 		return smp_conn_security(conn, sec_level);
1774 
1775 	/* For sdp we don't need the link key. */
1776 	if (sec_level == BT_SECURITY_SDP)
1777 		return 1;
1778 
1779 	/* For non 2.1 devices and low security level we don't need the link
1780 	   key. */
1781 	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1782 		return 1;
1783 
1784 	/* For other security levels we need the link key. */
1785 	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1786 		goto auth;
1787 
1788 	/* An authenticated FIPS approved combination key has sufficient
1789 	 * security for security level 4. */
1790 	if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1791 	    sec_level == BT_SECURITY_FIPS)
1792 		goto encrypt;
1793 
1794 	/* An authenticated combination key has sufficient security for
1795 	   security level 3. */
1796 	if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1797 	     conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1798 	    sec_level == BT_SECURITY_HIGH)
1799 		goto encrypt;
1800 
1801 	/* An unauthenticated combination key has sufficient security for
1802 	   security level 1 and 2. */
1803 	if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1804 	     conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1805 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1806 		goto encrypt;
1807 
1808 	/* A combination key has always sufficient security for the security
1809 	   levels 1 or 2. High security level requires the combination key
1810 	   is generated using maximum PIN code length (16).
1811 	   For pre 2.1 units. */
1812 	if (conn->key_type == HCI_LK_COMBINATION &&
1813 	    (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1814 	     conn->pin_length == 16))
1815 		goto encrypt;
1816 
1817 auth:
1818 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1819 		return 0;
1820 
1821 	if (initiator)
1822 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1823 
1824 	if (!hci_conn_auth(conn, sec_level, auth_type))
1825 		return 0;
1826 
1827 encrypt:
1828 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1829 		/* Ensure that the encryption key size has been read,
1830 		 * otherwise stall the upper layer responses.
1831 		 */
1832 		if (!conn->enc_key_size)
1833 			return 0;
1834 
1835 		/* Nothing else needed, all requirements are met */
1836 		return 1;
1837 	}
1838 
1839 	hci_conn_encrypt(conn);
1840 	return 0;
1841 }
1842 EXPORT_SYMBOL(hci_conn_security);
1843 
1844 /* Check secure link requirement */
1845 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1846 {
1847 	BT_DBG("hcon %p", conn);
1848 
1849 	/* Accept if non-secure or higher security level is required */
1850 	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1851 		return 1;
1852 
1853 	/* Accept if secure or higher security level is already present */
1854 	if (conn->sec_level == BT_SECURITY_HIGH ||
1855 	    conn->sec_level == BT_SECURITY_FIPS)
1856 		return 1;
1857 
1858 	/* Reject not secure link */
1859 	return 0;
1860 }
1861 EXPORT_SYMBOL(hci_conn_check_secure);
1862 
1863 /* Switch role */
1864 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1865 {
1866 	BT_DBG("hcon %p", conn);
1867 
1868 	if (role == conn->role)
1869 		return 1;
1870 
1871 	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1872 		struct hci_cp_switch_role cp;
1873 		bacpy(&cp.bdaddr, &conn->dst);
1874 		cp.role = role;
1875 		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1876 	}
1877 
1878 	return 0;
1879 }
1880 EXPORT_SYMBOL(hci_conn_switch_role);
1881 
1882 /* Enter active mode */
1883 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1884 {
1885 	struct hci_dev *hdev = conn->hdev;
1886 
1887 	BT_DBG("hcon %p mode %d", conn, conn->mode);
1888 
1889 	if (conn->mode != HCI_CM_SNIFF)
1890 		goto timer;
1891 
1892 	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1893 		goto timer;
1894 
1895 	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1896 		struct hci_cp_exit_sniff_mode cp;
1897 		cp.handle = cpu_to_le16(conn->handle);
1898 		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1899 	}
1900 
1901 timer:
1902 	if (hdev->idle_timeout > 0)
1903 		queue_delayed_work(hdev->workqueue, &conn->idle_work,
1904 				   msecs_to_jiffies(hdev->idle_timeout));
1905 }
1906 
1907 /* Drop all connection on the device */
1908 void hci_conn_hash_flush(struct hci_dev *hdev)
1909 {
1910 	struct hci_conn_hash *h = &hdev->conn_hash;
1911 	struct hci_conn *c, *n;
1912 
1913 	BT_DBG("hdev %s", hdev->name);
1914 
1915 	list_for_each_entry_safe(c, n, &h->list, list) {
1916 		c->state = BT_CLOSED;
1917 
1918 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1919 		hci_conn_del(c);
1920 	}
1921 }
1922 
1923 /* Check pending connect attempts */
1924 void hci_conn_check_pending(struct hci_dev *hdev)
1925 {
1926 	struct hci_conn *conn;
1927 
1928 	BT_DBG("hdev %s", hdev->name);
1929 
1930 	hci_dev_lock(hdev);
1931 
1932 	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1933 	if (conn)
1934 		hci_acl_create_connection(conn);
1935 
1936 	hci_dev_unlock(hdev);
1937 }
1938 
1939 static u32 get_link_mode(struct hci_conn *conn)
1940 {
1941 	u32 link_mode = 0;
1942 
1943 	if (conn->role == HCI_ROLE_MASTER)
1944 		link_mode |= HCI_LM_MASTER;
1945 
1946 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1947 		link_mode |= HCI_LM_ENCRYPT;
1948 
1949 	if (test_bit(HCI_CONN_AUTH, &conn->flags))
1950 		link_mode |= HCI_LM_AUTH;
1951 
1952 	if (test_bit(HCI_CONN_SECURE, &conn->flags))
1953 		link_mode |= HCI_LM_SECURE;
1954 
1955 	if (test_bit(HCI_CONN_FIPS, &conn->flags))
1956 		link_mode |= HCI_LM_FIPS;
1957 
1958 	return link_mode;
1959 }
1960 
1961 int hci_get_conn_list(void __user *arg)
1962 {
1963 	struct hci_conn *c;
1964 	struct hci_conn_list_req req, *cl;
1965 	struct hci_conn_info *ci;
1966 	struct hci_dev *hdev;
1967 	int n = 0, size, err;
1968 
1969 	if (copy_from_user(&req, arg, sizeof(req)))
1970 		return -EFAULT;
1971 
1972 	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1973 		return -EINVAL;
1974 
1975 	size = sizeof(req) + req.conn_num * sizeof(*ci);
1976 
1977 	cl = kmalloc(size, GFP_KERNEL);
1978 	if (!cl)
1979 		return -ENOMEM;
1980 
1981 	hdev = hci_dev_get(req.dev_id);
1982 	if (!hdev) {
1983 		kfree(cl);
1984 		return -ENODEV;
1985 	}
1986 
1987 	ci = cl->conn_info;
1988 
1989 	hci_dev_lock(hdev);
1990 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
1991 		bacpy(&(ci + n)->bdaddr, &c->dst);
1992 		(ci + n)->handle = c->handle;
1993 		(ci + n)->type  = c->type;
1994 		(ci + n)->out   = c->out;
1995 		(ci + n)->state = c->state;
1996 		(ci + n)->link_mode = get_link_mode(c);
1997 		if (++n >= req.conn_num)
1998 			break;
1999 	}
2000 	hci_dev_unlock(hdev);
2001 
2002 	cl->dev_id = hdev->id;
2003 	cl->conn_num = n;
2004 	size = sizeof(req) + n * sizeof(*ci);
2005 
2006 	hci_dev_put(hdev);
2007 
2008 	err = copy_to_user(arg, cl, size);
2009 	kfree(cl);
2010 
2011 	return err ? -EFAULT : 0;
2012 }
2013 
2014 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2015 {
2016 	struct hci_conn_info_req req;
2017 	struct hci_conn_info ci;
2018 	struct hci_conn *conn;
2019 	char __user *ptr = arg + sizeof(req);
2020 
2021 	if (copy_from_user(&req, arg, sizeof(req)))
2022 		return -EFAULT;
2023 
2024 	hci_dev_lock(hdev);
2025 	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2026 	if (conn) {
2027 		bacpy(&ci.bdaddr, &conn->dst);
2028 		ci.handle = conn->handle;
2029 		ci.type  = conn->type;
2030 		ci.out   = conn->out;
2031 		ci.state = conn->state;
2032 		ci.link_mode = get_link_mode(conn);
2033 	}
2034 	hci_dev_unlock(hdev);
2035 
2036 	if (!conn)
2037 		return -ENOENT;
2038 
2039 	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2040 }
2041 
2042 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2043 {
2044 	struct hci_auth_info_req req;
2045 	struct hci_conn *conn;
2046 
2047 	if (copy_from_user(&req, arg, sizeof(req)))
2048 		return -EFAULT;
2049 
2050 	hci_dev_lock(hdev);
2051 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2052 	if (conn)
2053 		req.type = conn->auth_type;
2054 	hci_dev_unlock(hdev);
2055 
2056 	if (!conn)
2057 		return -ENOENT;
2058 
2059 	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2060 }
2061 
2062 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2063 {
2064 	struct hci_dev *hdev = conn->hdev;
2065 	struct hci_chan *chan;
2066 
2067 	BT_DBG("%s hcon %p", hdev->name, conn);
2068 
2069 	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2070 		BT_DBG("Refusing to create new hci_chan");
2071 		return NULL;
2072 	}
2073 
2074 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2075 	if (!chan)
2076 		return NULL;
2077 
2078 	chan->conn = hci_conn_get(conn);
2079 	skb_queue_head_init(&chan->data_q);
2080 	chan->state = BT_CONNECTED;
2081 
2082 	list_add_rcu(&chan->list, &conn->chan_list);
2083 
2084 	return chan;
2085 }
2086 
2087 void hci_chan_del(struct hci_chan *chan)
2088 {
2089 	struct hci_conn *conn = chan->conn;
2090 	struct hci_dev *hdev = conn->hdev;
2091 
2092 	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2093 
2094 	list_del_rcu(&chan->list);
2095 
2096 	synchronize_rcu();
2097 
2098 	/* Prevent new hci_chan's to be created for this hci_conn */
2099 	set_bit(HCI_CONN_DROP, &conn->flags);
2100 
2101 	hci_conn_put(conn);
2102 
2103 	skb_queue_purge(&chan->data_q);
2104 	kfree(chan);
2105 }
2106 
2107 void hci_chan_list_flush(struct hci_conn *conn)
2108 {
2109 	struct hci_chan *chan, *n;
2110 
2111 	BT_DBG("hcon %p", conn);
2112 
2113 	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2114 		hci_chan_del(chan);
2115 }
2116 
2117 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2118 						 __u16 handle)
2119 {
2120 	struct hci_chan *hchan;
2121 
2122 	list_for_each_entry(hchan, &hcon->chan_list, list) {
2123 		if (hchan->handle == handle)
2124 			return hchan;
2125 	}
2126 
2127 	return NULL;
2128 }
2129 
2130 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2131 {
2132 	struct hci_conn_hash *h = &hdev->conn_hash;
2133 	struct hci_conn *hcon;
2134 	struct hci_chan *hchan = NULL;
2135 
2136 	rcu_read_lock();
2137 
2138 	list_for_each_entry_rcu(hcon, &h->list, list) {
2139 		hchan = __hci_chan_lookup_handle(hcon, handle);
2140 		if (hchan)
2141 			break;
2142 	}
2143 
2144 	rcu_read_unlock();
2145 
2146 	return hchan;
2147 }
2148 
2149 u32 hci_conn_get_phy(struct hci_conn *conn)
2150 {
2151 	u32 phys = 0;
2152 
2153 	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2154 	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2155 	 * CPB logical transport types.
2156 	 */
2157 	switch (conn->type) {
2158 	case SCO_LINK:
2159 		/* SCO logical transport (1 Mb/s):
2160 		 * HV1, HV2, HV3 and DV.
2161 		 */
2162 		phys |= BT_PHY_BR_1M_1SLOT;
2163 
2164 		break;
2165 
2166 	case ACL_LINK:
2167 		/* ACL logical transport (1 Mb/s) ptt=0:
2168 		 * DH1, DM3, DH3, DM5 and DH5.
2169 		 */
2170 		phys |= BT_PHY_BR_1M_1SLOT;
2171 
2172 		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2173 			phys |= BT_PHY_BR_1M_3SLOT;
2174 
2175 		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2176 			phys |= BT_PHY_BR_1M_5SLOT;
2177 
2178 		/* ACL logical transport (2 Mb/s) ptt=1:
2179 		 * 2-DH1, 2-DH3 and 2-DH5.
2180 		 */
2181 		if (!(conn->pkt_type & HCI_2DH1))
2182 			phys |= BT_PHY_EDR_2M_1SLOT;
2183 
2184 		if (!(conn->pkt_type & HCI_2DH3))
2185 			phys |= BT_PHY_EDR_2M_3SLOT;
2186 
2187 		if (!(conn->pkt_type & HCI_2DH5))
2188 			phys |= BT_PHY_EDR_2M_5SLOT;
2189 
2190 		/* ACL logical transport (3 Mb/s) ptt=1:
2191 		 * 3-DH1, 3-DH3 and 3-DH5.
2192 		 */
2193 		if (!(conn->pkt_type & HCI_3DH1))
2194 			phys |= BT_PHY_EDR_3M_1SLOT;
2195 
2196 		if (!(conn->pkt_type & HCI_3DH3))
2197 			phys |= BT_PHY_EDR_3M_3SLOT;
2198 
2199 		if (!(conn->pkt_type & HCI_3DH5))
2200 			phys |= BT_PHY_EDR_3M_5SLOT;
2201 
2202 		break;
2203 
2204 	case ESCO_LINK:
2205 		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2206 		phys |= BT_PHY_BR_1M_1SLOT;
2207 
2208 		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2209 			phys |= BT_PHY_BR_1M_3SLOT;
2210 
2211 		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2212 		if (!(conn->pkt_type & ESCO_2EV3))
2213 			phys |= BT_PHY_EDR_2M_1SLOT;
2214 
2215 		if (!(conn->pkt_type & ESCO_2EV5))
2216 			phys |= BT_PHY_EDR_2M_3SLOT;
2217 
2218 		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2219 		if (!(conn->pkt_type & ESCO_3EV3))
2220 			phys |= BT_PHY_EDR_3M_1SLOT;
2221 
2222 		if (!(conn->pkt_type & ESCO_3EV5))
2223 			phys |= BT_PHY_EDR_3M_3SLOT;
2224 
2225 		break;
2226 
2227 	case LE_LINK:
2228 		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2229 			phys |= BT_PHY_LE_1M_TX;
2230 
2231 		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2232 			phys |= BT_PHY_LE_1M_RX;
2233 
2234 		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2235 			phys |= BT_PHY_LE_2M_TX;
2236 
2237 		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2238 			phys |= BT_PHY_LE_2M_RX;
2239 
2240 		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2241 			phys |= BT_PHY_LE_CODED_TX;
2242 
2243 		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2244 			phys |= BT_PHY_LE_CODED_RX;
2245 
2246 		break;
2247 	}
2248 
2249 	return phys;
2250 }
2251