xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 61ae993c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "a2mp.h"
40 #include "amp.h"
41 #include "smp.h"
42 #include "msft.h"
43 #include "eir.h"
44 
45 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
46 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
47 
48 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
49 
50 /* Handle HCI Event packets */
51 
52 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53 			     u8 ev, size_t len)
54 {
55 	void *data;
56 
57 	data = skb_pull_data(skb, len);
58 	if (!data)
59 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60 
61 	return data;
62 }
63 
64 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65 			     u16 op, size_t len)
66 {
67 	void *data;
68 
69 	data = skb_pull_data(skb, len);
70 	if (!data)
71 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72 
73 	return data;
74 }
75 
76 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77 				u8 ev, size_t len)
78 {
79 	void *data;
80 
81 	data = skb_pull_data(skb, len);
82 	if (!data)
83 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84 
85 	return data;
86 }
87 
88 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
89 				struct sk_buff *skb)
90 {
91 	struct hci_ev_status *rp = data;
92 
93 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
94 
95 	/* It is possible that we receive Inquiry Complete event right
96 	 * before we receive Inquiry Cancel Command Complete event, in
97 	 * which case the latter event should have status of Command
98 	 * Disallowed (0x0c). This should not be treated as error, since
99 	 * we actually achieve what Inquiry Cancel wants to achieve,
100 	 * which is to end the last Inquiry session.
101 	 */
102 	if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
103 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
104 		rp->status = 0x00;
105 	}
106 
107 	if (rp->status)
108 		return rp->status;
109 
110 	clear_bit(HCI_INQUIRY, &hdev->flags);
111 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
112 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
113 
114 	hci_dev_lock(hdev);
115 	/* Set discovery state to stopped if we're not doing LE active
116 	 * scanning.
117 	 */
118 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
119 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
120 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
121 	hci_dev_unlock(hdev);
122 
123 	hci_conn_check_pending(hdev);
124 
125 	return rp->status;
126 }
127 
128 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
129 			      struct sk_buff *skb)
130 {
131 	struct hci_ev_status *rp = data;
132 
133 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134 
135 	if (rp->status)
136 		return rp->status;
137 
138 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139 
140 	return rp->status;
141 }
142 
143 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
144 				   struct sk_buff *skb)
145 {
146 	struct hci_ev_status *rp = data;
147 
148 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149 
150 	if (rp->status)
151 		return rp->status;
152 
153 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
154 
155 	hci_conn_check_pending(hdev);
156 
157 	return rp->status;
158 }
159 
160 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
161 					struct sk_buff *skb)
162 {
163 	struct hci_ev_status *rp = data;
164 
165 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166 
167 	return rp->status;
168 }
169 
170 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
171 				struct sk_buff *skb)
172 {
173 	struct hci_rp_role_discovery *rp = data;
174 	struct hci_conn *conn;
175 
176 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
177 
178 	if (rp->status)
179 		return rp->status;
180 
181 	hci_dev_lock(hdev);
182 
183 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
184 	if (conn)
185 		conn->role = rp->role;
186 
187 	hci_dev_unlock(hdev);
188 
189 	return rp->status;
190 }
191 
192 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
193 				  struct sk_buff *skb)
194 {
195 	struct hci_rp_read_link_policy *rp = data;
196 	struct hci_conn *conn;
197 
198 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
199 
200 	if (rp->status)
201 		return rp->status;
202 
203 	hci_dev_lock(hdev);
204 
205 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
206 	if (conn)
207 		conn->link_policy = __le16_to_cpu(rp->policy);
208 
209 	hci_dev_unlock(hdev);
210 
211 	return rp->status;
212 }
213 
214 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
215 				   struct sk_buff *skb)
216 {
217 	struct hci_rp_write_link_policy *rp = data;
218 	struct hci_conn *conn;
219 	void *sent;
220 
221 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222 
223 	if (rp->status)
224 		return rp->status;
225 
226 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
227 	if (!sent)
228 		return rp->status;
229 
230 	hci_dev_lock(hdev);
231 
232 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
233 	if (conn)
234 		conn->link_policy = get_unaligned_le16(sent + 2);
235 
236 	hci_dev_unlock(hdev);
237 
238 	return rp->status;
239 }
240 
241 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
242 				      struct sk_buff *skb)
243 {
244 	struct hci_rp_read_def_link_policy *rp = data;
245 
246 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
247 
248 	if (rp->status)
249 		return rp->status;
250 
251 	hdev->link_policy = __le16_to_cpu(rp->policy);
252 
253 	return rp->status;
254 }
255 
256 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
257 				       struct sk_buff *skb)
258 {
259 	struct hci_ev_status *rp = data;
260 	void *sent;
261 
262 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
263 
264 	if (rp->status)
265 		return rp->status;
266 
267 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
268 	if (!sent)
269 		return rp->status;
270 
271 	hdev->link_policy = get_unaligned_le16(sent);
272 
273 	return rp->status;
274 }
275 
276 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
277 {
278 	struct hci_ev_status *rp = data;
279 
280 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
281 
282 	clear_bit(HCI_RESET, &hdev->flags);
283 
284 	if (rp->status)
285 		return rp->status;
286 
287 	/* Reset all non-persistent flags */
288 	hci_dev_clear_volatile_flags(hdev);
289 
290 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
291 
292 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
293 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
294 
295 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
296 	hdev->adv_data_len = 0;
297 
298 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
299 	hdev->scan_rsp_data_len = 0;
300 
301 	hdev->le_scan_type = LE_SCAN_PASSIVE;
302 
303 	hdev->ssp_debug_mode = 0;
304 
305 	hci_bdaddr_list_clear(&hdev->le_accept_list);
306 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
307 
308 	return rp->status;
309 }
310 
311 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
312 				      struct sk_buff *skb)
313 {
314 	struct hci_rp_read_stored_link_key *rp = data;
315 	struct hci_cp_read_stored_link_key *sent;
316 
317 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
318 
319 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
320 	if (!sent)
321 		return rp->status;
322 
323 	if (!rp->status && sent->read_all == 0x01) {
324 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
325 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
326 	}
327 
328 	return rp->status;
329 }
330 
331 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
332 					struct sk_buff *skb)
333 {
334 	struct hci_rp_delete_stored_link_key *rp = data;
335 	u16 num_keys;
336 
337 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
338 
339 	if (rp->status)
340 		return rp->status;
341 
342 	num_keys = le16_to_cpu(rp->num_keys);
343 
344 	if (num_keys <= hdev->stored_num_keys)
345 		hdev->stored_num_keys -= num_keys;
346 	else
347 		hdev->stored_num_keys = 0;
348 
349 	return rp->status;
350 }
351 
352 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
353 				  struct sk_buff *skb)
354 {
355 	struct hci_ev_status *rp = data;
356 	void *sent;
357 
358 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
359 
360 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
361 	if (!sent)
362 		return rp->status;
363 
364 	hci_dev_lock(hdev);
365 
366 	if (hci_dev_test_flag(hdev, HCI_MGMT))
367 		mgmt_set_local_name_complete(hdev, sent, rp->status);
368 	else if (!rp->status)
369 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
370 
371 	hci_dev_unlock(hdev);
372 
373 	return rp->status;
374 }
375 
376 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
377 				 struct sk_buff *skb)
378 {
379 	struct hci_rp_read_local_name *rp = data;
380 
381 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
382 
383 	if (rp->status)
384 		return rp->status;
385 
386 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
387 	    hci_dev_test_flag(hdev, HCI_CONFIG))
388 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
389 
390 	return rp->status;
391 }
392 
393 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
394 				   struct sk_buff *skb)
395 {
396 	struct hci_ev_status *rp = data;
397 	void *sent;
398 
399 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
400 
401 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
402 	if (!sent)
403 		return rp->status;
404 
405 	hci_dev_lock(hdev);
406 
407 	if (!rp->status) {
408 		__u8 param = *((__u8 *) sent);
409 
410 		if (param == AUTH_ENABLED)
411 			set_bit(HCI_AUTH, &hdev->flags);
412 		else
413 			clear_bit(HCI_AUTH, &hdev->flags);
414 	}
415 
416 	if (hci_dev_test_flag(hdev, HCI_MGMT))
417 		mgmt_auth_enable_complete(hdev, rp->status);
418 
419 	hci_dev_unlock(hdev);
420 
421 	return rp->status;
422 }
423 
424 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
425 				    struct sk_buff *skb)
426 {
427 	struct hci_ev_status *rp = data;
428 	__u8 param;
429 	void *sent;
430 
431 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
432 
433 	if (rp->status)
434 		return rp->status;
435 
436 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
437 	if (!sent)
438 		return rp->status;
439 
440 	param = *((__u8 *) sent);
441 
442 	if (param)
443 		set_bit(HCI_ENCRYPT, &hdev->flags);
444 	else
445 		clear_bit(HCI_ENCRYPT, &hdev->flags);
446 
447 	return rp->status;
448 }
449 
450 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
451 				   struct sk_buff *skb)
452 {
453 	struct hci_ev_status *rp = data;
454 	__u8 param;
455 	void *sent;
456 
457 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
458 
459 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
460 	if (!sent)
461 		return rp->status;
462 
463 	param = *((__u8 *) sent);
464 
465 	hci_dev_lock(hdev);
466 
467 	if (rp->status) {
468 		hdev->discov_timeout = 0;
469 		goto done;
470 	}
471 
472 	if (param & SCAN_INQUIRY)
473 		set_bit(HCI_ISCAN, &hdev->flags);
474 	else
475 		clear_bit(HCI_ISCAN, &hdev->flags);
476 
477 	if (param & SCAN_PAGE)
478 		set_bit(HCI_PSCAN, &hdev->flags);
479 	else
480 		clear_bit(HCI_PSCAN, &hdev->flags);
481 
482 done:
483 	hci_dev_unlock(hdev);
484 
485 	return rp->status;
486 }
487 
488 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
489 				  struct sk_buff *skb)
490 {
491 	struct hci_ev_status *rp = data;
492 	struct hci_cp_set_event_filter *cp;
493 	void *sent;
494 
495 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
496 
497 	if (rp->status)
498 		return rp->status;
499 
500 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
501 	if (!sent)
502 		return rp->status;
503 
504 	cp = (struct hci_cp_set_event_filter *)sent;
505 
506 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
507 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508 	else
509 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
510 
511 	return rp->status;
512 }
513 
514 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
515 				   struct sk_buff *skb)
516 {
517 	struct hci_rp_read_class_of_dev *rp = data;
518 
519 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
520 
521 	if (rp->status)
522 		return rp->status;
523 
524 	memcpy(hdev->dev_class, rp->dev_class, 3);
525 
526 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
527 		   hdev->dev_class[1], hdev->dev_class[0]);
528 
529 	return rp->status;
530 }
531 
532 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
533 				    struct sk_buff *skb)
534 {
535 	struct hci_ev_status *rp = data;
536 	void *sent;
537 
538 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
539 
540 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
541 	if (!sent)
542 		return rp->status;
543 
544 	hci_dev_lock(hdev);
545 
546 	if (!rp->status)
547 		memcpy(hdev->dev_class, sent, 3);
548 
549 	if (hci_dev_test_flag(hdev, HCI_MGMT))
550 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
551 
552 	hci_dev_unlock(hdev);
553 
554 	return rp->status;
555 }
556 
557 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
558 				    struct sk_buff *skb)
559 {
560 	struct hci_rp_read_voice_setting *rp = data;
561 	__u16 setting;
562 
563 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
564 
565 	if (rp->status)
566 		return rp->status;
567 
568 	setting = __le16_to_cpu(rp->voice_setting);
569 
570 	if (hdev->voice_setting == setting)
571 		return rp->status;
572 
573 	hdev->voice_setting = setting;
574 
575 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
576 
577 	if (hdev->notify)
578 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
579 
580 	return rp->status;
581 }
582 
583 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
584 				     struct sk_buff *skb)
585 {
586 	struct hci_ev_status *rp = data;
587 	__u16 setting;
588 	void *sent;
589 
590 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
591 
592 	if (rp->status)
593 		return rp->status;
594 
595 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
596 	if (!sent)
597 		return rp->status;
598 
599 	setting = get_unaligned_le16(sent);
600 
601 	if (hdev->voice_setting == setting)
602 		return rp->status;
603 
604 	hdev->voice_setting = setting;
605 
606 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
607 
608 	if (hdev->notify)
609 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
610 
611 	return rp->status;
612 }
613 
614 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
615 					struct sk_buff *skb)
616 {
617 	struct hci_rp_read_num_supported_iac *rp = data;
618 
619 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
620 
621 	if (rp->status)
622 		return rp->status;
623 
624 	hdev->num_iac = rp->num_iac;
625 
626 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
627 
628 	return rp->status;
629 }
630 
631 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
632 				struct sk_buff *skb)
633 {
634 	struct hci_ev_status *rp = data;
635 	struct hci_cp_write_ssp_mode *sent;
636 
637 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
638 
639 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
640 	if (!sent)
641 		return rp->status;
642 
643 	hci_dev_lock(hdev);
644 
645 	if (!rp->status) {
646 		if (sent->mode)
647 			hdev->features[1][0] |= LMP_HOST_SSP;
648 		else
649 			hdev->features[1][0] &= ~LMP_HOST_SSP;
650 	}
651 
652 	if (!rp->status) {
653 		if (sent->mode)
654 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
655 		else
656 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
657 	}
658 
659 	hci_dev_unlock(hdev);
660 
661 	return rp->status;
662 }
663 
664 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
665 				  struct sk_buff *skb)
666 {
667 	struct hci_ev_status *rp = data;
668 	struct hci_cp_write_sc_support *sent;
669 
670 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
671 
672 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
673 	if (!sent)
674 		return rp->status;
675 
676 	hci_dev_lock(hdev);
677 
678 	if (!rp->status) {
679 		if (sent->support)
680 			hdev->features[1][0] |= LMP_HOST_SC;
681 		else
682 			hdev->features[1][0] &= ~LMP_HOST_SC;
683 	}
684 
685 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
686 		if (sent->support)
687 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
688 		else
689 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
690 	}
691 
692 	hci_dev_unlock(hdev);
693 
694 	return rp->status;
695 }
696 
697 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
698 				    struct sk_buff *skb)
699 {
700 	struct hci_rp_read_local_version *rp = data;
701 
702 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
703 
704 	if (rp->status)
705 		return rp->status;
706 
707 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
708 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
709 		hdev->hci_ver = rp->hci_ver;
710 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
711 		hdev->lmp_ver = rp->lmp_ver;
712 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
713 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
714 	}
715 
716 	return rp->status;
717 }
718 
719 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
720 				   struct sk_buff *skb)
721 {
722 	struct hci_rp_read_enc_key_size *rp = data;
723 	struct hci_conn *conn;
724 	u16 handle;
725 	u8 status = rp->status;
726 
727 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
728 
729 	handle = le16_to_cpu(rp->handle);
730 
731 	hci_dev_lock(hdev);
732 
733 	conn = hci_conn_hash_lookup_handle(hdev, handle);
734 	if (!conn) {
735 		status = 0xFF;
736 		goto done;
737 	}
738 
739 	/* While unexpected, the read_enc_key_size command may fail. The most
740 	 * secure approach is to then assume the key size is 0 to force a
741 	 * disconnection.
742 	 */
743 	if (status) {
744 		bt_dev_err(hdev, "failed to read key size for handle %u",
745 			   handle);
746 		conn->enc_key_size = 0;
747 	} else {
748 		conn->enc_key_size = rp->key_size;
749 		status = 0;
750 	}
751 
752 	hci_encrypt_cfm(conn, 0);
753 
754 done:
755 	hci_dev_unlock(hdev);
756 
757 	return status;
758 }
759 
760 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
761 				     struct sk_buff *skb)
762 {
763 	struct hci_rp_read_local_commands *rp = data;
764 
765 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
766 
767 	if (rp->status)
768 		return rp->status;
769 
770 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
771 	    hci_dev_test_flag(hdev, HCI_CONFIG))
772 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
773 
774 	return rp->status;
775 }
776 
777 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
778 					   struct sk_buff *skb)
779 {
780 	struct hci_rp_read_auth_payload_to *rp = data;
781 	struct hci_conn *conn;
782 
783 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
784 
785 	if (rp->status)
786 		return rp->status;
787 
788 	hci_dev_lock(hdev);
789 
790 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
791 	if (conn)
792 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
793 
794 	hci_dev_unlock(hdev);
795 
796 	return rp->status;
797 }
798 
799 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
800 					    struct sk_buff *skb)
801 {
802 	struct hci_rp_write_auth_payload_to *rp = data;
803 	struct hci_conn *conn;
804 	void *sent;
805 
806 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
807 
808 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
809 	if (!sent)
810 		return rp->status;
811 
812 	hci_dev_lock(hdev);
813 
814 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
815 	if (!conn) {
816 		rp->status = 0xff;
817 		goto unlock;
818 	}
819 
820 	if (!rp->status)
821 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
822 
823 	hci_encrypt_cfm(conn, 0);
824 
825 unlock:
826 	hci_dev_unlock(hdev);
827 
828 	return rp->status;
829 }
830 
831 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
832 				     struct sk_buff *skb)
833 {
834 	struct hci_rp_read_local_features *rp = data;
835 
836 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
837 
838 	if (rp->status)
839 		return rp->status;
840 
841 	memcpy(hdev->features, rp->features, 8);
842 
843 	/* Adjust default settings according to features
844 	 * supported by device. */
845 
846 	if (hdev->features[0][0] & LMP_3SLOT)
847 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
848 
849 	if (hdev->features[0][0] & LMP_5SLOT)
850 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
851 
852 	if (hdev->features[0][1] & LMP_HV2) {
853 		hdev->pkt_type  |= (HCI_HV2);
854 		hdev->esco_type |= (ESCO_HV2);
855 	}
856 
857 	if (hdev->features[0][1] & LMP_HV3) {
858 		hdev->pkt_type  |= (HCI_HV3);
859 		hdev->esco_type |= (ESCO_HV3);
860 	}
861 
862 	if (lmp_esco_capable(hdev))
863 		hdev->esco_type |= (ESCO_EV3);
864 
865 	if (hdev->features[0][4] & LMP_EV4)
866 		hdev->esco_type |= (ESCO_EV4);
867 
868 	if (hdev->features[0][4] & LMP_EV5)
869 		hdev->esco_type |= (ESCO_EV5);
870 
871 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
872 		hdev->esco_type |= (ESCO_2EV3);
873 
874 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
875 		hdev->esco_type |= (ESCO_3EV3);
876 
877 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
878 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
879 
880 	return rp->status;
881 }
882 
883 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
884 					 struct sk_buff *skb)
885 {
886 	struct hci_rp_read_local_ext_features *rp = data;
887 
888 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
889 
890 	if (rp->status)
891 		return rp->status;
892 
893 	if (hdev->max_page < rp->max_page) {
894 		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
895 			     &hdev->quirks))
896 			bt_dev_warn(hdev, "broken local ext features page 2");
897 		else
898 			hdev->max_page = rp->max_page;
899 	}
900 
901 	if (rp->page < HCI_MAX_PAGES)
902 		memcpy(hdev->features[rp->page], rp->features, 8);
903 
904 	return rp->status;
905 }
906 
907 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
908 					struct sk_buff *skb)
909 {
910 	struct hci_rp_read_flow_control_mode *rp = data;
911 
912 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
913 
914 	if (rp->status)
915 		return rp->status;
916 
917 	hdev->flow_ctl_mode = rp->mode;
918 
919 	return rp->status;
920 }
921 
922 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
923 				  struct sk_buff *skb)
924 {
925 	struct hci_rp_read_buffer_size *rp = data;
926 
927 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
928 
929 	if (rp->status)
930 		return rp->status;
931 
932 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
933 	hdev->sco_mtu  = rp->sco_mtu;
934 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
935 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
936 
937 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
938 		hdev->sco_mtu  = 64;
939 		hdev->sco_pkts = 8;
940 	}
941 
942 	hdev->acl_cnt = hdev->acl_pkts;
943 	hdev->sco_cnt = hdev->sco_pkts;
944 
945 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
946 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
947 
948 	return rp->status;
949 }
950 
951 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
952 			      struct sk_buff *skb)
953 {
954 	struct hci_rp_read_bd_addr *rp = data;
955 
956 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
957 
958 	if (rp->status)
959 		return rp->status;
960 
961 	if (test_bit(HCI_INIT, &hdev->flags))
962 		bacpy(&hdev->bdaddr, &rp->bdaddr);
963 
964 	if (hci_dev_test_flag(hdev, HCI_SETUP))
965 		bacpy(&hdev->setup_addr, &rp->bdaddr);
966 
967 	return rp->status;
968 }
969 
970 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
971 					 struct sk_buff *skb)
972 {
973 	struct hci_rp_read_local_pairing_opts *rp = data;
974 
975 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
976 
977 	if (rp->status)
978 		return rp->status;
979 
980 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
981 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
982 		hdev->pairing_opts = rp->pairing_opts;
983 		hdev->max_enc_key_size = rp->max_key_size;
984 	}
985 
986 	return rp->status;
987 }
988 
989 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
990 					 struct sk_buff *skb)
991 {
992 	struct hci_rp_read_page_scan_activity *rp = data;
993 
994 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
995 
996 	if (rp->status)
997 		return rp->status;
998 
999 	if (test_bit(HCI_INIT, &hdev->flags)) {
1000 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1001 		hdev->page_scan_window = __le16_to_cpu(rp->window);
1002 	}
1003 
1004 	return rp->status;
1005 }
1006 
1007 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1008 					  struct sk_buff *skb)
1009 {
1010 	struct hci_ev_status *rp = data;
1011 	struct hci_cp_write_page_scan_activity *sent;
1012 
1013 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1014 
1015 	if (rp->status)
1016 		return rp->status;
1017 
1018 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1019 	if (!sent)
1020 		return rp->status;
1021 
1022 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1023 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1024 
1025 	return rp->status;
1026 }
1027 
1028 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1029 				     struct sk_buff *skb)
1030 {
1031 	struct hci_rp_read_page_scan_type *rp = data;
1032 
1033 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1034 
1035 	if (rp->status)
1036 		return rp->status;
1037 
1038 	if (test_bit(HCI_INIT, &hdev->flags))
1039 		hdev->page_scan_type = rp->type;
1040 
1041 	return rp->status;
1042 }
1043 
1044 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1045 				      struct sk_buff *skb)
1046 {
1047 	struct hci_ev_status *rp = data;
1048 	u8 *type;
1049 
1050 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1051 
1052 	if (rp->status)
1053 		return rp->status;
1054 
1055 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1056 	if (type)
1057 		hdev->page_scan_type = *type;
1058 
1059 	return rp->status;
1060 }
1061 
1062 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1063 				      struct sk_buff *skb)
1064 {
1065 	struct hci_rp_read_data_block_size *rp = data;
1066 
1067 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1068 
1069 	if (rp->status)
1070 		return rp->status;
1071 
1072 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1073 	hdev->block_len = __le16_to_cpu(rp->block_len);
1074 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1075 
1076 	hdev->block_cnt = hdev->num_blocks;
1077 
1078 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1079 	       hdev->block_cnt, hdev->block_len);
1080 
1081 	return rp->status;
1082 }
1083 
1084 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1085 			    struct sk_buff *skb)
1086 {
1087 	struct hci_rp_read_clock *rp = data;
1088 	struct hci_cp_read_clock *cp;
1089 	struct hci_conn *conn;
1090 
1091 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1092 
1093 	if (rp->status)
1094 		return rp->status;
1095 
1096 	hci_dev_lock(hdev);
1097 
1098 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1099 	if (!cp)
1100 		goto unlock;
1101 
1102 	if (cp->which == 0x00) {
1103 		hdev->clock = le32_to_cpu(rp->clock);
1104 		goto unlock;
1105 	}
1106 
1107 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1108 	if (conn) {
1109 		conn->clock = le32_to_cpu(rp->clock);
1110 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1111 	}
1112 
1113 unlock:
1114 	hci_dev_unlock(hdev);
1115 	return rp->status;
1116 }
1117 
1118 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1119 				     struct sk_buff *skb)
1120 {
1121 	struct hci_rp_read_local_amp_info *rp = data;
1122 
1123 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1124 
1125 	if (rp->status)
1126 		return rp->status;
1127 
1128 	hdev->amp_status = rp->amp_status;
1129 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1130 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1131 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1132 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1133 	hdev->amp_type = rp->amp_type;
1134 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1135 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1136 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1137 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1138 
1139 	return rp->status;
1140 }
1141 
1142 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1143 				       struct sk_buff *skb)
1144 {
1145 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1146 
1147 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1148 
1149 	if (rp->status)
1150 		return rp->status;
1151 
1152 	hdev->inq_tx_power = rp->tx_power;
1153 
1154 	return rp->status;
1155 }
1156 
1157 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1158 					     struct sk_buff *skb)
1159 {
1160 	struct hci_rp_read_def_err_data_reporting *rp = data;
1161 
1162 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1163 
1164 	if (rp->status)
1165 		return rp->status;
1166 
1167 	hdev->err_data_reporting = rp->err_data_reporting;
1168 
1169 	return rp->status;
1170 }
1171 
1172 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1173 					      struct sk_buff *skb)
1174 {
1175 	struct hci_ev_status *rp = data;
1176 	struct hci_cp_write_def_err_data_reporting *cp;
1177 
1178 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1179 
1180 	if (rp->status)
1181 		return rp->status;
1182 
1183 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1184 	if (!cp)
1185 		return rp->status;
1186 
1187 	hdev->err_data_reporting = cp->err_data_reporting;
1188 
1189 	return rp->status;
1190 }
1191 
1192 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1193 				struct sk_buff *skb)
1194 {
1195 	struct hci_rp_pin_code_reply *rp = data;
1196 	struct hci_cp_pin_code_reply *cp;
1197 	struct hci_conn *conn;
1198 
1199 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1200 
1201 	hci_dev_lock(hdev);
1202 
1203 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1204 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1205 
1206 	if (rp->status)
1207 		goto unlock;
1208 
1209 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1210 	if (!cp)
1211 		goto unlock;
1212 
1213 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1214 	if (conn)
1215 		conn->pin_length = cp->pin_len;
1216 
1217 unlock:
1218 	hci_dev_unlock(hdev);
1219 	return rp->status;
1220 }
1221 
1222 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1223 				    struct sk_buff *skb)
1224 {
1225 	struct hci_rp_pin_code_neg_reply *rp = data;
1226 
1227 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1228 
1229 	hci_dev_lock(hdev);
1230 
1231 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1232 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1233 						 rp->status);
1234 
1235 	hci_dev_unlock(hdev);
1236 
1237 	return rp->status;
1238 }
1239 
1240 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1241 				     struct sk_buff *skb)
1242 {
1243 	struct hci_rp_le_read_buffer_size *rp = data;
1244 
1245 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1246 
1247 	if (rp->status)
1248 		return rp->status;
1249 
1250 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1251 	hdev->le_pkts = rp->le_max_pkt;
1252 
1253 	hdev->le_cnt = hdev->le_pkts;
1254 
1255 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1256 
1257 	return rp->status;
1258 }
1259 
1260 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1261 					struct sk_buff *skb)
1262 {
1263 	struct hci_rp_le_read_local_features *rp = data;
1264 
1265 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1266 
1267 	if (rp->status)
1268 		return rp->status;
1269 
1270 	memcpy(hdev->le_features, rp->features, 8);
1271 
1272 	return rp->status;
1273 }
1274 
1275 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1276 				      struct sk_buff *skb)
1277 {
1278 	struct hci_rp_le_read_adv_tx_power *rp = data;
1279 
1280 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1281 
1282 	if (rp->status)
1283 		return rp->status;
1284 
1285 	hdev->adv_tx_power = rp->tx_power;
1286 
1287 	return rp->status;
1288 }
1289 
1290 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1291 				    struct sk_buff *skb)
1292 {
1293 	struct hci_rp_user_confirm_reply *rp = data;
1294 
1295 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1296 
1297 	hci_dev_lock(hdev);
1298 
1299 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1300 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1301 						 rp->status);
1302 
1303 	hci_dev_unlock(hdev);
1304 
1305 	return rp->status;
1306 }
1307 
1308 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1309 					struct sk_buff *skb)
1310 {
1311 	struct hci_rp_user_confirm_reply *rp = data;
1312 
1313 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1314 
1315 	hci_dev_lock(hdev);
1316 
1317 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1318 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1319 						     ACL_LINK, 0, rp->status);
1320 
1321 	hci_dev_unlock(hdev);
1322 
1323 	return rp->status;
1324 }
1325 
1326 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1327 				    struct sk_buff *skb)
1328 {
1329 	struct hci_rp_user_confirm_reply *rp = data;
1330 
1331 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1332 
1333 	hci_dev_lock(hdev);
1334 
1335 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1336 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1337 						 0, rp->status);
1338 
1339 	hci_dev_unlock(hdev);
1340 
1341 	return rp->status;
1342 }
1343 
1344 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1345 					struct sk_buff *skb)
1346 {
1347 	struct hci_rp_user_confirm_reply *rp = data;
1348 
1349 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1350 
1351 	hci_dev_lock(hdev);
1352 
1353 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1354 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1355 						     ACL_LINK, 0, rp->status);
1356 
1357 	hci_dev_unlock(hdev);
1358 
1359 	return rp->status;
1360 }
1361 
1362 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1363 				     struct sk_buff *skb)
1364 {
1365 	struct hci_rp_read_local_oob_data *rp = data;
1366 
1367 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1368 
1369 	return rp->status;
1370 }
1371 
1372 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1373 					 struct sk_buff *skb)
1374 {
1375 	struct hci_rp_read_local_oob_ext_data *rp = data;
1376 
1377 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1378 
1379 	return rp->status;
1380 }
1381 
1382 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1383 				    struct sk_buff *skb)
1384 {
1385 	struct hci_ev_status *rp = data;
1386 	bdaddr_t *sent;
1387 
1388 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1389 
1390 	if (rp->status)
1391 		return rp->status;
1392 
1393 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1394 	if (!sent)
1395 		return rp->status;
1396 
1397 	hci_dev_lock(hdev);
1398 
1399 	bacpy(&hdev->random_addr, sent);
1400 
1401 	if (!bacmp(&hdev->rpa, sent)) {
1402 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1403 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1404 				   secs_to_jiffies(hdev->rpa_timeout));
1405 	}
1406 
1407 	hci_dev_unlock(hdev);
1408 
1409 	return rp->status;
1410 }
1411 
1412 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1413 				    struct sk_buff *skb)
1414 {
1415 	struct hci_ev_status *rp = data;
1416 	struct hci_cp_le_set_default_phy *cp;
1417 
1418 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1419 
1420 	if (rp->status)
1421 		return rp->status;
1422 
1423 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1424 	if (!cp)
1425 		return rp->status;
1426 
1427 	hci_dev_lock(hdev);
1428 
1429 	hdev->le_tx_def_phys = cp->tx_phys;
1430 	hdev->le_rx_def_phys = cp->rx_phys;
1431 
1432 	hci_dev_unlock(hdev);
1433 
1434 	return rp->status;
1435 }
1436 
1437 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1438 					    struct sk_buff *skb)
1439 {
1440 	struct hci_ev_status *rp = data;
1441 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1442 	struct adv_info *adv;
1443 
1444 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1445 
1446 	if (rp->status)
1447 		return rp->status;
1448 
1449 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1450 	/* Update only in case the adv instance since handle 0x00 shall be using
1451 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1452 	 * non-extended adverting.
1453 	 */
1454 	if (!cp || !cp->handle)
1455 		return rp->status;
1456 
1457 	hci_dev_lock(hdev);
1458 
1459 	adv = hci_find_adv_instance(hdev, cp->handle);
1460 	if (adv) {
1461 		bacpy(&adv->random_addr, &cp->bdaddr);
1462 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1463 			adv->rpa_expired = false;
1464 			queue_delayed_work(hdev->workqueue,
1465 					   &adv->rpa_expired_cb,
1466 					   secs_to_jiffies(hdev->rpa_timeout));
1467 		}
1468 	}
1469 
1470 	hci_dev_unlock(hdev);
1471 
1472 	return rp->status;
1473 }
1474 
1475 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1476 				   struct sk_buff *skb)
1477 {
1478 	struct hci_ev_status *rp = data;
1479 	u8 *instance;
1480 	int err;
1481 
1482 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1483 
1484 	if (rp->status)
1485 		return rp->status;
1486 
1487 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1488 	if (!instance)
1489 		return rp->status;
1490 
1491 	hci_dev_lock(hdev);
1492 
1493 	err = hci_remove_adv_instance(hdev, *instance);
1494 	if (!err)
1495 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1496 					 *instance);
1497 
1498 	hci_dev_unlock(hdev);
1499 
1500 	return rp->status;
1501 }
1502 
1503 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1504 				   struct sk_buff *skb)
1505 {
1506 	struct hci_ev_status *rp = data;
1507 	struct adv_info *adv, *n;
1508 	int err;
1509 
1510 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1511 
1512 	if (rp->status)
1513 		return rp->status;
1514 
1515 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1516 		return rp->status;
1517 
1518 	hci_dev_lock(hdev);
1519 
1520 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1521 		u8 instance = adv->instance;
1522 
1523 		err = hci_remove_adv_instance(hdev, instance);
1524 		if (!err)
1525 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1526 						 hdev, instance);
1527 	}
1528 
1529 	hci_dev_unlock(hdev);
1530 
1531 	return rp->status;
1532 }
1533 
1534 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1535 					struct sk_buff *skb)
1536 {
1537 	struct hci_rp_le_read_transmit_power *rp = data;
1538 
1539 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1540 
1541 	if (rp->status)
1542 		return rp->status;
1543 
1544 	hdev->min_le_tx_power = rp->min_le_tx_power;
1545 	hdev->max_le_tx_power = rp->max_le_tx_power;
1546 
1547 	return rp->status;
1548 }
1549 
1550 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1551 				     struct sk_buff *skb)
1552 {
1553 	struct hci_ev_status *rp = data;
1554 	struct hci_cp_le_set_privacy_mode *cp;
1555 	struct hci_conn_params *params;
1556 
1557 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1558 
1559 	if (rp->status)
1560 		return rp->status;
1561 
1562 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1563 	if (!cp)
1564 		return rp->status;
1565 
1566 	hci_dev_lock(hdev);
1567 
1568 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1569 	if (params)
1570 		WRITE_ONCE(params->privacy_mode, cp->mode);
1571 
1572 	hci_dev_unlock(hdev);
1573 
1574 	return rp->status;
1575 }
1576 
1577 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1578 				   struct sk_buff *skb)
1579 {
1580 	struct hci_ev_status *rp = data;
1581 	__u8 *sent;
1582 
1583 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1584 
1585 	if (rp->status)
1586 		return rp->status;
1587 
1588 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1589 	if (!sent)
1590 		return rp->status;
1591 
1592 	hci_dev_lock(hdev);
1593 
1594 	/* If we're doing connection initiation as peripheral. Set a
1595 	 * timeout in case something goes wrong.
1596 	 */
1597 	if (*sent) {
1598 		struct hci_conn *conn;
1599 
1600 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1601 
1602 		conn = hci_lookup_le_connect(hdev);
1603 		if (conn)
1604 			queue_delayed_work(hdev->workqueue,
1605 					   &conn->le_conn_timeout,
1606 					   conn->conn_timeout);
1607 	} else {
1608 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1609 	}
1610 
1611 	hci_dev_unlock(hdev);
1612 
1613 	return rp->status;
1614 }
1615 
1616 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1617 				       struct sk_buff *skb)
1618 {
1619 	struct hci_cp_le_set_ext_adv_enable *cp;
1620 	struct hci_cp_ext_adv_set *set;
1621 	struct adv_info *adv = NULL, *n;
1622 	struct hci_ev_status *rp = data;
1623 
1624 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1625 
1626 	if (rp->status)
1627 		return rp->status;
1628 
1629 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1630 	if (!cp)
1631 		return rp->status;
1632 
1633 	set = (void *)cp->data;
1634 
1635 	hci_dev_lock(hdev);
1636 
1637 	if (cp->num_of_sets)
1638 		adv = hci_find_adv_instance(hdev, set->handle);
1639 
1640 	if (cp->enable) {
1641 		struct hci_conn *conn;
1642 
1643 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1644 
1645 		if (adv && !adv->periodic)
1646 			adv->enabled = true;
1647 
1648 		conn = hci_lookup_le_connect(hdev);
1649 		if (conn)
1650 			queue_delayed_work(hdev->workqueue,
1651 					   &conn->le_conn_timeout,
1652 					   conn->conn_timeout);
1653 	} else {
1654 		if (cp->num_of_sets) {
1655 			if (adv)
1656 				adv->enabled = false;
1657 
1658 			/* If just one instance was disabled check if there are
1659 			 * any other instance enabled before clearing HCI_LE_ADV
1660 			 */
1661 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1662 						 list) {
1663 				if (adv->enabled)
1664 					goto unlock;
1665 			}
1666 		} else {
1667 			/* All instances shall be considered disabled */
1668 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1669 						 list)
1670 				adv->enabled = false;
1671 		}
1672 
1673 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1674 	}
1675 
1676 unlock:
1677 	hci_dev_unlock(hdev);
1678 	return rp->status;
1679 }
1680 
1681 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1682 				   struct sk_buff *skb)
1683 {
1684 	struct hci_cp_le_set_scan_param *cp;
1685 	struct hci_ev_status *rp = data;
1686 
1687 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1688 
1689 	if (rp->status)
1690 		return rp->status;
1691 
1692 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1693 	if (!cp)
1694 		return rp->status;
1695 
1696 	hci_dev_lock(hdev);
1697 
1698 	hdev->le_scan_type = cp->type;
1699 
1700 	hci_dev_unlock(hdev);
1701 
1702 	return rp->status;
1703 }
1704 
1705 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1706 				       struct sk_buff *skb)
1707 {
1708 	struct hci_cp_le_set_ext_scan_params *cp;
1709 	struct hci_ev_status *rp = data;
1710 	struct hci_cp_le_scan_phy_params *phy_param;
1711 
1712 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1713 
1714 	if (rp->status)
1715 		return rp->status;
1716 
1717 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1718 	if (!cp)
1719 		return rp->status;
1720 
1721 	phy_param = (void *)cp->data;
1722 
1723 	hci_dev_lock(hdev);
1724 
1725 	hdev->le_scan_type = phy_param->type;
1726 
1727 	hci_dev_unlock(hdev);
1728 
1729 	return rp->status;
1730 }
1731 
1732 static bool has_pending_adv_report(struct hci_dev *hdev)
1733 {
1734 	struct discovery_state *d = &hdev->discovery;
1735 
1736 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1737 }
1738 
1739 static void clear_pending_adv_report(struct hci_dev *hdev)
1740 {
1741 	struct discovery_state *d = &hdev->discovery;
1742 
1743 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1744 	d->last_adv_data_len = 0;
1745 }
1746 
1747 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1748 				     u8 bdaddr_type, s8 rssi, u32 flags,
1749 				     u8 *data, u8 len)
1750 {
1751 	struct discovery_state *d = &hdev->discovery;
1752 
1753 	if (len > max_adv_len(hdev))
1754 		return;
1755 
1756 	bacpy(&d->last_adv_addr, bdaddr);
1757 	d->last_adv_addr_type = bdaddr_type;
1758 	d->last_adv_rssi = rssi;
1759 	d->last_adv_flags = flags;
1760 	memcpy(d->last_adv_data, data, len);
1761 	d->last_adv_data_len = len;
1762 }
1763 
1764 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1765 {
1766 	hci_dev_lock(hdev);
1767 
1768 	switch (enable) {
1769 	case LE_SCAN_ENABLE:
1770 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1771 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1772 			clear_pending_adv_report(hdev);
1773 		if (hci_dev_test_flag(hdev, HCI_MESH))
1774 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1775 		break;
1776 
1777 	case LE_SCAN_DISABLE:
1778 		/* We do this here instead of when setting DISCOVERY_STOPPED
1779 		 * since the latter would potentially require waiting for
1780 		 * inquiry to stop too.
1781 		 */
1782 		if (has_pending_adv_report(hdev)) {
1783 			struct discovery_state *d = &hdev->discovery;
1784 
1785 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1786 					  d->last_adv_addr_type, NULL,
1787 					  d->last_adv_rssi, d->last_adv_flags,
1788 					  d->last_adv_data,
1789 					  d->last_adv_data_len, NULL, 0, 0);
1790 		}
1791 
1792 		/* Cancel this timer so that we don't try to disable scanning
1793 		 * when it's already disabled.
1794 		 */
1795 		cancel_delayed_work(&hdev->le_scan_disable);
1796 
1797 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1798 
1799 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1800 		 * interrupted scanning due to a connect request. Mark
1801 		 * therefore discovery as stopped.
1802 		 */
1803 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1804 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1805 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1806 			 hdev->discovery.state == DISCOVERY_FINDING)
1807 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1808 
1809 		break;
1810 
1811 	default:
1812 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1813 			   enable);
1814 		break;
1815 	}
1816 
1817 	hci_dev_unlock(hdev);
1818 }
1819 
1820 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1821 				    struct sk_buff *skb)
1822 {
1823 	struct hci_cp_le_set_scan_enable *cp;
1824 	struct hci_ev_status *rp = data;
1825 
1826 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1827 
1828 	if (rp->status)
1829 		return rp->status;
1830 
1831 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1832 	if (!cp)
1833 		return rp->status;
1834 
1835 	le_set_scan_enable_complete(hdev, cp->enable);
1836 
1837 	return rp->status;
1838 }
1839 
1840 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1841 					struct sk_buff *skb)
1842 {
1843 	struct hci_cp_le_set_ext_scan_enable *cp;
1844 	struct hci_ev_status *rp = data;
1845 
1846 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1847 
1848 	if (rp->status)
1849 		return rp->status;
1850 
1851 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1852 	if (!cp)
1853 		return rp->status;
1854 
1855 	le_set_scan_enable_complete(hdev, cp->enable);
1856 
1857 	return rp->status;
1858 }
1859 
1860 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1861 				      struct sk_buff *skb)
1862 {
1863 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1864 
1865 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1866 		   rp->num_of_sets);
1867 
1868 	if (rp->status)
1869 		return rp->status;
1870 
1871 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1872 
1873 	return rp->status;
1874 }
1875 
1876 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1877 					  struct sk_buff *skb)
1878 {
1879 	struct hci_rp_le_read_accept_list_size *rp = data;
1880 
1881 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1882 
1883 	if (rp->status)
1884 		return rp->status;
1885 
1886 	hdev->le_accept_list_size = rp->size;
1887 
1888 	return rp->status;
1889 }
1890 
1891 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1892 				      struct sk_buff *skb)
1893 {
1894 	struct hci_ev_status *rp = data;
1895 
1896 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1897 
1898 	if (rp->status)
1899 		return rp->status;
1900 
1901 	hci_dev_lock(hdev);
1902 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1903 	hci_dev_unlock(hdev);
1904 
1905 	return rp->status;
1906 }
1907 
1908 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1909 				       struct sk_buff *skb)
1910 {
1911 	struct hci_cp_le_add_to_accept_list *sent;
1912 	struct hci_ev_status *rp = data;
1913 
1914 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1915 
1916 	if (rp->status)
1917 		return rp->status;
1918 
1919 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1920 	if (!sent)
1921 		return rp->status;
1922 
1923 	hci_dev_lock(hdev);
1924 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1925 			    sent->bdaddr_type);
1926 	hci_dev_unlock(hdev);
1927 
1928 	return rp->status;
1929 }
1930 
1931 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1932 					 struct sk_buff *skb)
1933 {
1934 	struct hci_cp_le_del_from_accept_list *sent;
1935 	struct hci_ev_status *rp = data;
1936 
1937 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1938 
1939 	if (rp->status)
1940 		return rp->status;
1941 
1942 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1943 	if (!sent)
1944 		return rp->status;
1945 
1946 	hci_dev_lock(hdev);
1947 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1948 			    sent->bdaddr_type);
1949 	hci_dev_unlock(hdev);
1950 
1951 	return rp->status;
1952 }
1953 
1954 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1955 					  struct sk_buff *skb)
1956 {
1957 	struct hci_rp_le_read_supported_states *rp = data;
1958 
1959 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1960 
1961 	if (rp->status)
1962 		return rp->status;
1963 
1964 	memcpy(hdev->le_states, rp->le_states, 8);
1965 
1966 	return rp->status;
1967 }
1968 
1969 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1970 				      struct sk_buff *skb)
1971 {
1972 	struct hci_rp_le_read_def_data_len *rp = data;
1973 
1974 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1975 
1976 	if (rp->status)
1977 		return rp->status;
1978 
1979 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1980 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1981 
1982 	return rp->status;
1983 }
1984 
1985 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1986 				       struct sk_buff *skb)
1987 {
1988 	struct hci_cp_le_write_def_data_len *sent;
1989 	struct hci_ev_status *rp = data;
1990 
1991 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1992 
1993 	if (rp->status)
1994 		return rp->status;
1995 
1996 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1997 	if (!sent)
1998 		return rp->status;
1999 
2000 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2001 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2002 
2003 	return rp->status;
2004 }
2005 
2006 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2007 				       struct sk_buff *skb)
2008 {
2009 	struct hci_cp_le_add_to_resolv_list *sent;
2010 	struct hci_ev_status *rp = data;
2011 
2012 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2013 
2014 	if (rp->status)
2015 		return rp->status;
2016 
2017 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2018 	if (!sent)
2019 		return rp->status;
2020 
2021 	hci_dev_lock(hdev);
2022 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2023 				sent->bdaddr_type, sent->peer_irk,
2024 				sent->local_irk);
2025 	hci_dev_unlock(hdev);
2026 
2027 	return rp->status;
2028 }
2029 
2030 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2031 					 struct sk_buff *skb)
2032 {
2033 	struct hci_cp_le_del_from_resolv_list *sent;
2034 	struct hci_ev_status *rp = data;
2035 
2036 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2037 
2038 	if (rp->status)
2039 		return rp->status;
2040 
2041 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2042 	if (!sent)
2043 		return rp->status;
2044 
2045 	hci_dev_lock(hdev);
2046 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2047 			    sent->bdaddr_type);
2048 	hci_dev_unlock(hdev);
2049 
2050 	return rp->status;
2051 }
2052 
2053 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2054 				      struct sk_buff *skb)
2055 {
2056 	struct hci_ev_status *rp = data;
2057 
2058 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2059 
2060 	if (rp->status)
2061 		return rp->status;
2062 
2063 	hci_dev_lock(hdev);
2064 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2065 	hci_dev_unlock(hdev);
2066 
2067 	return rp->status;
2068 }
2069 
2070 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2071 					  struct sk_buff *skb)
2072 {
2073 	struct hci_rp_le_read_resolv_list_size *rp = data;
2074 
2075 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2076 
2077 	if (rp->status)
2078 		return rp->status;
2079 
2080 	hdev->le_resolv_list_size = rp->size;
2081 
2082 	return rp->status;
2083 }
2084 
2085 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2086 					       struct sk_buff *skb)
2087 {
2088 	struct hci_ev_status *rp = data;
2089 	__u8 *sent;
2090 
2091 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2092 
2093 	if (rp->status)
2094 		return rp->status;
2095 
2096 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2097 	if (!sent)
2098 		return rp->status;
2099 
2100 	hci_dev_lock(hdev);
2101 
2102 	if (*sent)
2103 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2104 	else
2105 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2106 
2107 	hci_dev_unlock(hdev);
2108 
2109 	return rp->status;
2110 }
2111 
2112 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2113 				      struct sk_buff *skb)
2114 {
2115 	struct hci_rp_le_read_max_data_len *rp = data;
2116 
2117 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2118 
2119 	if (rp->status)
2120 		return rp->status;
2121 
2122 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2123 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2124 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2125 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2126 
2127 	return rp->status;
2128 }
2129 
2130 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2131 					 struct sk_buff *skb)
2132 {
2133 	struct hci_cp_write_le_host_supported *sent;
2134 	struct hci_ev_status *rp = data;
2135 
2136 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2137 
2138 	if (rp->status)
2139 		return rp->status;
2140 
2141 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2142 	if (!sent)
2143 		return rp->status;
2144 
2145 	hci_dev_lock(hdev);
2146 
2147 	if (sent->le) {
2148 		hdev->features[1][0] |= LMP_HOST_LE;
2149 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2150 	} else {
2151 		hdev->features[1][0] &= ~LMP_HOST_LE;
2152 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2153 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2154 	}
2155 
2156 	if (sent->simul)
2157 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2158 	else
2159 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2160 
2161 	hci_dev_unlock(hdev);
2162 
2163 	return rp->status;
2164 }
2165 
2166 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2167 			       struct sk_buff *skb)
2168 {
2169 	struct hci_cp_le_set_adv_param *cp;
2170 	struct hci_ev_status *rp = data;
2171 
2172 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2173 
2174 	if (rp->status)
2175 		return rp->status;
2176 
2177 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2178 	if (!cp)
2179 		return rp->status;
2180 
2181 	hci_dev_lock(hdev);
2182 	hdev->adv_addr_type = cp->own_address_type;
2183 	hci_dev_unlock(hdev);
2184 
2185 	return rp->status;
2186 }
2187 
2188 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2189 				   struct sk_buff *skb)
2190 {
2191 	struct hci_rp_le_set_ext_adv_params *rp = data;
2192 	struct hci_cp_le_set_ext_adv_params *cp;
2193 	struct adv_info *adv_instance;
2194 
2195 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2196 
2197 	if (rp->status)
2198 		return rp->status;
2199 
2200 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2201 	if (!cp)
2202 		return rp->status;
2203 
2204 	hci_dev_lock(hdev);
2205 	hdev->adv_addr_type = cp->own_addr_type;
2206 	if (!cp->handle) {
2207 		/* Store in hdev for instance 0 */
2208 		hdev->adv_tx_power = rp->tx_power;
2209 	} else {
2210 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2211 		if (adv_instance)
2212 			adv_instance->tx_power = rp->tx_power;
2213 	}
2214 	/* Update adv data as tx power is known now */
2215 	hci_update_adv_data(hdev, cp->handle);
2216 
2217 	hci_dev_unlock(hdev);
2218 
2219 	return rp->status;
2220 }
2221 
2222 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2223 			   struct sk_buff *skb)
2224 {
2225 	struct hci_rp_read_rssi *rp = data;
2226 	struct hci_conn *conn;
2227 
2228 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2229 
2230 	if (rp->status)
2231 		return rp->status;
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2236 	if (conn)
2237 		conn->rssi = rp->rssi;
2238 
2239 	hci_dev_unlock(hdev);
2240 
2241 	return rp->status;
2242 }
2243 
2244 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2245 			       struct sk_buff *skb)
2246 {
2247 	struct hci_cp_read_tx_power *sent;
2248 	struct hci_rp_read_tx_power *rp = data;
2249 	struct hci_conn *conn;
2250 
2251 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2252 
2253 	if (rp->status)
2254 		return rp->status;
2255 
2256 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2257 	if (!sent)
2258 		return rp->status;
2259 
2260 	hci_dev_lock(hdev);
2261 
2262 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2263 	if (!conn)
2264 		goto unlock;
2265 
2266 	switch (sent->type) {
2267 	case 0x00:
2268 		conn->tx_power = rp->tx_power;
2269 		break;
2270 	case 0x01:
2271 		conn->max_tx_power = rp->tx_power;
2272 		break;
2273 	}
2274 
2275 unlock:
2276 	hci_dev_unlock(hdev);
2277 	return rp->status;
2278 }
2279 
2280 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2281 				      struct sk_buff *skb)
2282 {
2283 	struct hci_ev_status *rp = data;
2284 	u8 *mode;
2285 
2286 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2287 
2288 	if (rp->status)
2289 		return rp->status;
2290 
2291 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2292 	if (mode)
2293 		hdev->ssp_debug_mode = *mode;
2294 
2295 	return rp->status;
2296 }
2297 
2298 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2299 {
2300 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2301 
2302 	if (status) {
2303 		hci_conn_check_pending(hdev);
2304 		return;
2305 	}
2306 
2307 	set_bit(HCI_INQUIRY, &hdev->flags);
2308 }
2309 
2310 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2311 {
2312 	struct hci_cp_create_conn *cp;
2313 	struct hci_conn *conn;
2314 
2315 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2316 
2317 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2318 	if (!cp)
2319 		return;
2320 
2321 	hci_dev_lock(hdev);
2322 
2323 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2324 
2325 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2326 
2327 	if (status) {
2328 		if (conn && conn->state == BT_CONNECT) {
2329 			if (status != 0x0c || conn->attempt > 2) {
2330 				conn->state = BT_CLOSED;
2331 				hci_connect_cfm(conn, status);
2332 				hci_conn_del(conn);
2333 			} else
2334 				conn->state = BT_CONNECT2;
2335 		}
2336 	} else {
2337 		if (!conn) {
2338 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2339 						  HCI_ROLE_MASTER);
2340 			if (!conn)
2341 				bt_dev_err(hdev, "no memory for new connection");
2342 		}
2343 	}
2344 
2345 	hci_dev_unlock(hdev);
2346 }
2347 
2348 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2349 {
2350 	struct hci_cp_add_sco *cp;
2351 	struct hci_conn *acl;
2352 	struct hci_link *link;
2353 	__u16 handle;
2354 
2355 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2356 
2357 	if (!status)
2358 		return;
2359 
2360 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2361 	if (!cp)
2362 		return;
2363 
2364 	handle = __le16_to_cpu(cp->handle);
2365 
2366 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2367 
2368 	hci_dev_lock(hdev);
2369 
2370 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2371 	if (acl) {
2372 		link = list_first_entry_or_null(&acl->link_list,
2373 						struct hci_link, list);
2374 		if (link && link->conn) {
2375 			link->conn->state = BT_CLOSED;
2376 
2377 			hci_connect_cfm(link->conn, status);
2378 			hci_conn_del(link->conn);
2379 		}
2380 	}
2381 
2382 	hci_dev_unlock(hdev);
2383 }
2384 
2385 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2386 {
2387 	struct hci_cp_auth_requested *cp;
2388 	struct hci_conn *conn;
2389 
2390 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2391 
2392 	if (!status)
2393 		return;
2394 
2395 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2396 	if (!cp)
2397 		return;
2398 
2399 	hci_dev_lock(hdev);
2400 
2401 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2402 	if (conn) {
2403 		if (conn->state == BT_CONFIG) {
2404 			hci_connect_cfm(conn, status);
2405 			hci_conn_drop(conn);
2406 		}
2407 	}
2408 
2409 	hci_dev_unlock(hdev);
2410 }
2411 
2412 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2413 {
2414 	struct hci_cp_set_conn_encrypt *cp;
2415 	struct hci_conn *conn;
2416 
2417 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2418 
2419 	if (!status)
2420 		return;
2421 
2422 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2423 	if (!cp)
2424 		return;
2425 
2426 	hci_dev_lock(hdev);
2427 
2428 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2429 	if (conn) {
2430 		if (conn->state == BT_CONFIG) {
2431 			hci_connect_cfm(conn, status);
2432 			hci_conn_drop(conn);
2433 		}
2434 	}
2435 
2436 	hci_dev_unlock(hdev);
2437 }
2438 
2439 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2440 				    struct hci_conn *conn)
2441 {
2442 	if (conn->state != BT_CONFIG || !conn->out)
2443 		return 0;
2444 
2445 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2446 		return 0;
2447 
2448 	/* Only request authentication for SSP connections or non-SSP
2449 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2450 	 * is requested.
2451 	 */
2452 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2453 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2454 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2455 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2456 		return 0;
2457 
2458 	return 1;
2459 }
2460 
2461 static int hci_resolve_name(struct hci_dev *hdev,
2462 				   struct inquiry_entry *e)
2463 {
2464 	struct hci_cp_remote_name_req cp;
2465 
2466 	memset(&cp, 0, sizeof(cp));
2467 
2468 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2469 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2470 	cp.pscan_mode = e->data.pscan_mode;
2471 	cp.clock_offset = e->data.clock_offset;
2472 
2473 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2474 }
2475 
2476 static bool hci_resolve_next_name(struct hci_dev *hdev)
2477 {
2478 	struct discovery_state *discov = &hdev->discovery;
2479 	struct inquiry_entry *e;
2480 
2481 	if (list_empty(&discov->resolve))
2482 		return false;
2483 
2484 	/* We should stop if we already spent too much time resolving names. */
2485 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2486 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2487 		return false;
2488 	}
2489 
2490 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2491 	if (!e)
2492 		return false;
2493 
2494 	if (hci_resolve_name(hdev, e) == 0) {
2495 		e->name_state = NAME_PENDING;
2496 		return true;
2497 	}
2498 
2499 	return false;
2500 }
2501 
2502 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2503 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2504 {
2505 	struct discovery_state *discov = &hdev->discovery;
2506 	struct inquiry_entry *e;
2507 
2508 	/* Update the mgmt connected state if necessary. Be careful with
2509 	 * conn objects that exist but are not (yet) connected however.
2510 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2511 	 * considered connected.
2512 	 */
2513 	if (conn &&
2514 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2515 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2516 		mgmt_device_connected(hdev, conn, name, name_len);
2517 
2518 	if (discov->state == DISCOVERY_STOPPED)
2519 		return;
2520 
2521 	if (discov->state == DISCOVERY_STOPPING)
2522 		goto discov_complete;
2523 
2524 	if (discov->state != DISCOVERY_RESOLVING)
2525 		return;
2526 
2527 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2528 	/* If the device was not found in a list of found devices names of which
2529 	 * are pending. there is no need to continue resolving a next name as it
2530 	 * will be done upon receiving another Remote Name Request Complete
2531 	 * Event */
2532 	if (!e)
2533 		return;
2534 
2535 	list_del(&e->list);
2536 
2537 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2538 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2539 			 name, name_len);
2540 
2541 	if (hci_resolve_next_name(hdev))
2542 		return;
2543 
2544 discov_complete:
2545 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2546 }
2547 
2548 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2549 {
2550 	struct hci_cp_remote_name_req *cp;
2551 	struct hci_conn *conn;
2552 
2553 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2554 
2555 	/* If successful wait for the name req complete event before
2556 	 * checking for the need to do authentication */
2557 	if (!status)
2558 		return;
2559 
2560 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2561 	if (!cp)
2562 		return;
2563 
2564 	hci_dev_lock(hdev);
2565 
2566 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2567 
2568 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2569 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2570 
2571 	if (!conn)
2572 		goto unlock;
2573 
2574 	if (!hci_outgoing_auth_needed(hdev, conn))
2575 		goto unlock;
2576 
2577 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2578 		struct hci_cp_auth_requested auth_cp;
2579 
2580 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2581 
2582 		auth_cp.handle = __cpu_to_le16(conn->handle);
2583 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2584 			     sizeof(auth_cp), &auth_cp);
2585 	}
2586 
2587 unlock:
2588 	hci_dev_unlock(hdev);
2589 }
2590 
2591 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2592 {
2593 	struct hci_cp_read_remote_features *cp;
2594 	struct hci_conn *conn;
2595 
2596 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2597 
2598 	if (!status)
2599 		return;
2600 
2601 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2602 	if (!cp)
2603 		return;
2604 
2605 	hci_dev_lock(hdev);
2606 
2607 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2608 	if (conn) {
2609 		if (conn->state == BT_CONFIG) {
2610 			hci_connect_cfm(conn, status);
2611 			hci_conn_drop(conn);
2612 		}
2613 	}
2614 
2615 	hci_dev_unlock(hdev);
2616 }
2617 
2618 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2619 {
2620 	struct hci_cp_read_remote_ext_features *cp;
2621 	struct hci_conn *conn;
2622 
2623 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2624 
2625 	if (!status)
2626 		return;
2627 
2628 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2629 	if (!cp)
2630 		return;
2631 
2632 	hci_dev_lock(hdev);
2633 
2634 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2635 	if (conn) {
2636 		if (conn->state == BT_CONFIG) {
2637 			hci_connect_cfm(conn, status);
2638 			hci_conn_drop(conn);
2639 		}
2640 	}
2641 
2642 	hci_dev_unlock(hdev);
2643 }
2644 
2645 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2646 				       __u8 status)
2647 {
2648 	struct hci_conn *acl;
2649 	struct hci_link *link;
2650 
2651 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2652 
2653 	hci_dev_lock(hdev);
2654 
2655 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2656 	if (acl) {
2657 		link = list_first_entry_or_null(&acl->link_list,
2658 						struct hci_link, list);
2659 		if (link && link->conn) {
2660 			link->conn->state = BT_CLOSED;
2661 
2662 			hci_connect_cfm(link->conn, status);
2663 			hci_conn_del(link->conn);
2664 		}
2665 	}
2666 
2667 	hci_dev_unlock(hdev);
2668 }
2669 
2670 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2671 {
2672 	struct hci_cp_setup_sync_conn *cp;
2673 
2674 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2675 
2676 	if (!status)
2677 		return;
2678 
2679 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2680 	if (!cp)
2681 		return;
2682 
2683 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2684 }
2685 
2686 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2687 {
2688 	struct hci_cp_enhanced_setup_sync_conn *cp;
2689 
2690 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2691 
2692 	if (!status)
2693 		return;
2694 
2695 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2696 	if (!cp)
2697 		return;
2698 
2699 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2700 }
2701 
2702 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2703 {
2704 	struct hci_cp_sniff_mode *cp;
2705 	struct hci_conn *conn;
2706 
2707 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2708 
2709 	if (!status)
2710 		return;
2711 
2712 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2713 	if (!cp)
2714 		return;
2715 
2716 	hci_dev_lock(hdev);
2717 
2718 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2719 	if (conn) {
2720 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2721 
2722 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2723 			hci_sco_setup(conn, status);
2724 	}
2725 
2726 	hci_dev_unlock(hdev);
2727 }
2728 
2729 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2730 {
2731 	struct hci_cp_exit_sniff_mode *cp;
2732 	struct hci_conn *conn;
2733 
2734 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2735 
2736 	if (!status)
2737 		return;
2738 
2739 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2740 	if (!cp)
2741 		return;
2742 
2743 	hci_dev_lock(hdev);
2744 
2745 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2746 	if (conn) {
2747 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2748 
2749 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2750 			hci_sco_setup(conn, status);
2751 	}
2752 
2753 	hci_dev_unlock(hdev);
2754 }
2755 
2756 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2757 {
2758 	struct hci_cp_disconnect *cp;
2759 	struct hci_conn_params *params;
2760 	struct hci_conn *conn;
2761 	bool mgmt_conn;
2762 
2763 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2764 
2765 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2766 	 * otherwise cleanup the connection immediately.
2767 	 */
2768 	if (!status && !hdev->suspended)
2769 		return;
2770 
2771 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2772 	if (!cp)
2773 		return;
2774 
2775 	hci_dev_lock(hdev);
2776 
2777 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2778 	if (!conn)
2779 		goto unlock;
2780 
2781 	if (status) {
2782 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2783 				       conn->dst_type, status);
2784 
2785 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2786 			hdev->cur_adv_instance = conn->adv_instance;
2787 			hci_enable_advertising(hdev);
2788 		}
2789 
2790 		/* Inform sockets conn is gone before we delete it */
2791 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2792 
2793 		goto done;
2794 	}
2795 
2796 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2797 
2798 	if (conn->type == ACL_LINK) {
2799 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2800 			hci_remove_link_key(hdev, &conn->dst);
2801 	}
2802 
2803 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2804 	if (params) {
2805 		switch (params->auto_connect) {
2806 		case HCI_AUTO_CONN_LINK_LOSS:
2807 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2808 				break;
2809 			fallthrough;
2810 
2811 		case HCI_AUTO_CONN_DIRECT:
2812 		case HCI_AUTO_CONN_ALWAYS:
2813 			hci_pend_le_list_del_init(params);
2814 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2815 			break;
2816 
2817 		default:
2818 			break;
2819 		}
2820 	}
2821 
2822 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2823 				 cp->reason, mgmt_conn);
2824 
2825 	hci_disconn_cfm(conn, cp->reason);
2826 
2827 done:
2828 	/* If the disconnection failed for any reason, the upper layer
2829 	 * does not retry to disconnect in current implementation.
2830 	 * Hence, we need to do some basic cleanup here and re-enable
2831 	 * advertising if necessary.
2832 	 */
2833 	hci_conn_del(conn);
2834 unlock:
2835 	hci_dev_unlock(hdev);
2836 }
2837 
2838 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2839 {
2840 	/* When using controller based address resolution, then the new
2841 	 * address types 0x02 and 0x03 are used. These types need to be
2842 	 * converted back into either public address or random address type
2843 	 */
2844 	switch (type) {
2845 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2846 		if (resolved)
2847 			*resolved = true;
2848 		return ADDR_LE_DEV_PUBLIC;
2849 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2850 		if (resolved)
2851 			*resolved = true;
2852 		return ADDR_LE_DEV_RANDOM;
2853 	}
2854 
2855 	if (resolved)
2856 		*resolved = false;
2857 	return type;
2858 }
2859 
2860 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2861 			      u8 peer_addr_type, u8 own_address_type,
2862 			      u8 filter_policy)
2863 {
2864 	struct hci_conn *conn;
2865 
2866 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2867 				       peer_addr_type);
2868 	if (!conn)
2869 		return;
2870 
2871 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2872 
2873 	/* Store the initiator and responder address information which
2874 	 * is needed for SMP. These values will not change during the
2875 	 * lifetime of the connection.
2876 	 */
2877 	conn->init_addr_type = own_address_type;
2878 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2879 		bacpy(&conn->init_addr, &hdev->random_addr);
2880 	else
2881 		bacpy(&conn->init_addr, &hdev->bdaddr);
2882 
2883 	conn->resp_addr_type = peer_addr_type;
2884 	bacpy(&conn->resp_addr, peer_addr);
2885 }
2886 
2887 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2888 {
2889 	struct hci_cp_le_create_conn *cp;
2890 
2891 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2892 
2893 	/* All connection failure handling is taken care of by the
2894 	 * hci_conn_failed function which is triggered by the HCI
2895 	 * request completion callbacks used for connecting.
2896 	 */
2897 	if (status)
2898 		return;
2899 
2900 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2901 	if (!cp)
2902 		return;
2903 
2904 	hci_dev_lock(hdev);
2905 
2906 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2907 			  cp->own_address_type, cp->filter_policy);
2908 
2909 	hci_dev_unlock(hdev);
2910 }
2911 
2912 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2913 {
2914 	struct hci_cp_le_ext_create_conn *cp;
2915 
2916 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2917 
2918 	/* All connection failure handling is taken care of by the
2919 	 * hci_conn_failed function which is triggered by the HCI
2920 	 * request completion callbacks used for connecting.
2921 	 */
2922 	if (status)
2923 		return;
2924 
2925 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2926 	if (!cp)
2927 		return;
2928 
2929 	hci_dev_lock(hdev);
2930 
2931 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2932 			  cp->own_addr_type, cp->filter_policy);
2933 
2934 	hci_dev_unlock(hdev);
2935 }
2936 
2937 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2938 {
2939 	struct hci_cp_le_read_remote_features *cp;
2940 	struct hci_conn *conn;
2941 
2942 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2943 
2944 	if (!status)
2945 		return;
2946 
2947 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2948 	if (!cp)
2949 		return;
2950 
2951 	hci_dev_lock(hdev);
2952 
2953 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2954 	if (conn) {
2955 		if (conn->state == BT_CONFIG) {
2956 			hci_connect_cfm(conn, status);
2957 			hci_conn_drop(conn);
2958 		}
2959 	}
2960 
2961 	hci_dev_unlock(hdev);
2962 }
2963 
2964 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2965 {
2966 	struct hci_cp_le_start_enc *cp;
2967 	struct hci_conn *conn;
2968 
2969 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2970 
2971 	if (!status)
2972 		return;
2973 
2974 	hci_dev_lock(hdev);
2975 
2976 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2977 	if (!cp)
2978 		goto unlock;
2979 
2980 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2981 	if (!conn)
2982 		goto unlock;
2983 
2984 	if (conn->state != BT_CONNECTED)
2985 		goto unlock;
2986 
2987 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2988 	hci_conn_drop(conn);
2989 
2990 unlock:
2991 	hci_dev_unlock(hdev);
2992 }
2993 
2994 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2995 {
2996 	struct hci_cp_switch_role *cp;
2997 	struct hci_conn *conn;
2998 
2999 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
3000 
3001 	if (!status)
3002 		return;
3003 
3004 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3005 	if (!cp)
3006 		return;
3007 
3008 	hci_dev_lock(hdev);
3009 
3010 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3011 	if (conn)
3012 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3013 
3014 	hci_dev_unlock(hdev);
3015 }
3016 
3017 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3018 				     struct sk_buff *skb)
3019 {
3020 	struct hci_ev_status *ev = data;
3021 	struct discovery_state *discov = &hdev->discovery;
3022 	struct inquiry_entry *e;
3023 
3024 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3025 
3026 	hci_conn_check_pending(hdev);
3027 
3028 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3029 		return;
3030 
3031 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3032 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
3033 
3034 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3035 		return;
3036 
3037 	hci_dev_lock(hdev);
3038 
3039 	if (discov->state != DISCOVERY_FINDING)
3040 		goto unlock;
3041 
3042 	if (list_empty(&discov->resolve)) {
3043 		/* When BR/EDR inquiry is active and no LE scanning is in
3044 		 * progress, then change discovery state to indicate completion.
3045 		 *
3046 		 * When running LE scanning and BR/EDR inquiry simultaneously
3047 		 * and the LE scan already finished, then change the discovery
3048 		 * state to indicate completion.
3049 		 */
3050 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3051 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3052 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3053 		goto unlock;
3054 	}
3055 
3056 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3057 	if (e && hci_resolve_name(hdev, e) == 0) {
3058 		e->name_state = NAME_PENDING;
3059 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3060 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3061 	} else {
3062 		/* When BR/EDR inquiry is active and no LE scanning is in
3063 		 * progress, then change discovery state to indicate completion.
3064 		 *
3065 		 * When running LE scanning and BR/EDR inquiry simultaneously
3066 		 * and the LE scan already finished, then change the discovery
3067 		 * state to indicate completion.
3068 		 */
3069 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3070 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3071 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3072 	}
3073 
3074 unlock:
3075 	hci_dev_unlock(hdev);
3076 }
3077 
3078 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3079 				   struct sk_buff *skb)
3080 {
3081 	struct hci_ev_inquiry_result *ev = edata;
3082 	struct inquiry_data data;
3083 	int i;
3084 
3085 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3086 			     flex_array_size(ev, info, ev->num)))
3087 		return;
3088 
3089 	bt_dev_dbg(hdev, "num %d", ev->num);
3090 
3091 	if (!ev->num)
3092 		return;
3093 
3094 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3095 		return;
3096 
3097 	hci_dev_lock(hdev);
3098 
3099 	for (i = 0; i < ev->num; i++) {
3100 		struct inquiry_info *info = &ev->info[i];
3101 		u32 flags;
3102 
3103 		bacpy(&data.bdaddr, &info->bdaddr);
3104 		data.pscan_rep_mode	= info->pscan_rep_mode;
3105 		data.pscan_period_mode	= info->pscan_period_mode;
3106 		data.pscan_mode		= info->pscan_mode;
3107 		memcpy(data.dev_class, info->dev_class, 3);
3108 		data.clock_offset	= info->clock_offset;
3109 		data.rssi		= HCI_RSSI_INVALID;
3110 		data.ssp_mode		= 0x00;
3111 
3112 		flags = hci_inquiry_cache_update(hdev, &data, false);
3113 
3114 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3115 				  info->dev_class, HCI_RSSI_INVALID,
3116 				  flags, NULL, 0, NULL, 0, 0);
3117 	}
3118 
3119 	hci_dev_unlock(hdev);
3120 }
3121 
3122 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3123 				  struct sk_buff *skb)
3124 {
3125 	struct hci_ev_conn_complete *ev = data;
3126 	struct hci_conn *conn;
3127 	u8 status = ev->status;
3128 
3129 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3130 
3131 	hci_dev_lock(hdev);
3132 
3133 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3134 	if (!conn) {
3135 		/* In case of error status and there is no connection pending
3136 		 * just unlock as there is nothing to cleanup.
3137 		 */
3138 		if (ev->status)
3139 			goto unlock;
3140 
3141 		/* Connection may not exist if auto-connected. Check the bredr
3142 		 * allowlist to see if this device is allowed to auto connect.
3143 		 * If link is an ACL type, create a connection class
3144 		 * automatically.
3145 		 *
3146 		 * Auto-connect will only occur if the event filter is
3147 		 * programmed with a given address. Right now, event filter is
3148 		 * only used during suspend.
3149 		 */
3150 		if (ev->link_type == ACL_LINK &&
3151 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3152 						      &ev->bdaddr,
3153 						      BDADDR_BREDR)) {
3154 			conn = hci_conn_add_unset(hdev, ev->link_type,
3155 						  &ev->bdaddr, HCI_ROLE_SLAVE);
3156 			if (!conn) {
3157 				bt_dev_err(hdev, "no memory for new conn");
3158 				goto unlock;
3159 			}
3160 		} else {
3161 			if (ev->link_type != SCO_LINK)
3162 				goto unlock;
3163 
3164 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3165 						       &ev->bdaddr);
3166 			if (!conn)
3167 				goto unlock;
3168 
3169 			conn->type = SCO_LINK;
3170 		}
3171 	}
3172 
3173 	/* The HCI_Connection_Complete event is only sent once per connection.
3174 	 * Processing it more than once per connection can corrupt kernel memory.
3175 	 *
3176 	 * As the connection handle is set here for the first time, it indicates
3177 	 * whether the connection is already set up.
3178 	 */
3179 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3180 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3181 		goto unlock;
3182 	}
3183 
3184 	if (!status) {
3185 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3186 		if (status)
3187 			goto done;
3188 
3189 		if (conn->type == ACL_LINK) {
3190 			conn->state = BT_CONFIG;
3191 			hci_conn_hold(conn);
3192 
3193 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3194 			    !hci_find_link_key(hdev, &ev->bdaddr))
3195 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3196 			else
3197 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3198 		} else
3199 			conn->state = BT_CONNECTED;
3200 
3201 		hci_debugfs_create_conn(conn);
3202 		hci_conn_add_sysfs(conn);
3203 
3204 		if (test_bit(HCI_AUTH, &hdev->flags))
3205 			set_bit(HCI_CONN_AUTH, &conn->flags);
3206 
3207 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3208 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3209 
3210 		/* Get remote features */
3211 		if (conn->type == ACL_LINK) {
3212 			struct hci_cp_read_remote_features cp;
3213 			cp.handle = ev->handle;
3214 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3215 				     sizeof(cp), &cp);
3216 
3217 			hci_update_scan(hdev);
3218 		}
3219 
3220 		/* Set packet type for incoming connection */
3221 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3222 			struct hci_cp_change_conn_ptype cp;
3223 			cp.handle = ev->handle;
3224 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3225 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3226 				     &cp);
3227 		}
3228 	}
3229 
3230 	if (conn->type == ACL_LINK)
3231 		hci_sco_setup(conn, ev->status);
3232 
3233 done:
3234 	if (status) {
3235 		hci_conn_failed(conn, status);
3236 	} else if (ev->link_type == SCO_LINK) {
3237 		switch (conn->setting & SCO_AIRMODE_MASK) {
3238 		case SCO_AIRMODE_CVSD:
3239 			if (hdev->notify)
3240 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3241 			break;
3242 		}
3243 
3244 		hci_connect_cfm(conn, status);
3245 	}
3246 
3247 unlock:
3248 	hci_dev_unlock(hdev);
3249 
3250 	hci_conn_check_pending(hdev);
3251 }
3252 
3253 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3254 {
3255 	struct hci_cp_reject_conn_req cp;
3256 
3257 	bacpy(&cp.bdaddr, bdaddr);
3258 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3259 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3260 }
3261 
3262 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3263 				 struct sk_buff *skb)
3264 {
3265 	struct hci_ev_conn_request *ev = data;
3266 	int mask = hdev->link_mode;
3267 	struct inquiry_entry *ie;
3268 	struct hci_conn *conn;
3269 	__u8 flags = 0;
3270 
3271 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3272 
3273 	/* Reject incoming connection from device with same BD ADDR against
3274 	 * CVE-2020-26555
3275 	 */
3276 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3277 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3278 			   &ev->bdaddr);
3279 		hci_reject_conn(hdev, &ev->bdaddr);
3280 		return;
3281 	}
3282 
3283 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3284 				      &flags);
3285 
3286 	if (!(mask & HCI_LM_ACCEPT)) {
3287 		hci_reject_conn(hdev, &ev->bdaddr);
3288 		return;
3289 	}
3290 
3291 	hci_dev_lock(hdev);
3292 
3293 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3294 				   BDADDR_BREDR)) {
3295 		hci_reject_conn(hdev, &ev->bdaddr);
3296 		goto unlock;
3297 	}
3298 
3299 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3300 	 * connection. These features are only touched through mgmt so
3301 	 * only do the checks if HCI_MGMT is set.
3302 	 */
3303 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3304 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3305 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3306 					       BDADDR_BREDR)) {
3307 		hci_reject_conn(hdev, &ev->bdaddr);
3308 		goto unlock;
3309 	}
3310 
3311 	/* Connection accepted */
3312 
3313 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3314 	if (ie)
3315 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3316 
3317 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3318 			&ev->bdaddr);
3319 	if (!conn) {
3320 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3321 					  HCI_ROLE_SLAVE);
3322 		if (!conn) {
3323 			bt_dev_err(hdev, "no memory for new connection");
3324 			goto unlock;
3325 		}
3326 	}
3327 
3328 	memcpy(conn->dev_class, ev->dev_class, 3);
3329 
3330 	hci_dev_unlock(hdev);
3331 
3332 	if (ev->link_type == ACL_LINK ||
3333 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3334 		struct hci_cp_accept_conn_req cp;
3335 		conn->state = BT_CONNECT;
3336 
3337 		bacpy(&cp.bdaddr, &ev->bdaddr);
3338 
3339 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3340 			cp.role = 0x00; /* Become central */
3341 		else
3342 			cp.role = 0x01; /* Remain peripheral */
3343 
3344 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3345 	} else if (!(flags & HCI_PROTO_DEFER)) {
3346 		struct hci_cp_accept_sync_conn_req cp;
3347 		conn->state = BT_CONNECT;
3348 
3349 		bacpy(&cp.bdaddr, &ev->bdaddr);
3350 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3351 
3352 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3353 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3354 		cp.max_latency    = cpu_to_le16(0xffff);
3355 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3356 		cp.retrans_effort = 0xff;
3357 
3358 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3359 			     &cp);
3360 	} else {
3361 		conn->state = BT_CONNECT2;
3362 		hci_connect_cfm(conn, 0);
3363 	}
3364 
3365 	return;
3366 unlock:
3367 	hci_dev_unlock(hdev);
3368 }
3369 
3370 static u8 hci_to_mgmt_reason(u8 err)
3371 {
3372 	switch (err) {
3373 	case HCI_ERROR_CONNECTION_TIMEOUT:
3374 		return MGMT_DEV_DISCONN_TIMEOUT;
3375 	case HCI_ERROR_REMOTE_USER_TERM:
3376 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3377 	case HCI_ERROR_REMOTE_POWER_OFF:
3378 		return MGMT_DEV_DISCONN_REMOTE;
3379 	case HCI_ERROR_LOCAL_HOST_TERM:
3380 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3381 	default:
3382 		return MGMT_DEV_DISCONN_UNKNOWN;
3383 	}
3384 }
3385 
3386 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3387 				     struct sk_buff *skb)
3388 {
3389 	struct hci_ev_disconn_complete *ev = data;
3390 	u8 reason;
3391 	struct hci_conn_params *params;
3392 	struct hci_conn *conn;
3393 	bool mgmt_connected;
3394 
3395 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3396 
3397 	hci_dev_lock(hdev);
3398 
3399 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3400 	if (!conn)
3401 		goto unlock;
3402 
3403 	if (ev->status) {
3404 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3405 				       conn->dst_type, ev->status);
3406 		goto unlock;
3407 	}
3408 
3409 	conn->state = BT_CLOSED;
3410 
3411 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3412 
3413 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3414 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3415 	else
3416 		reason = hci_to_mgmt_reason(ev->reason);
3417 
3418 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3419 				reason, mgmt_connected);
3420 
3421 	if (conn->type == ACL_LINK) {
3422 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3423 			hci_remove_link_key(hdev, &conn->dst);
3424 
3425 		hci_update_scan(hdev);
3426 	}
3427 
3428 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3429 	if (params) {
3430 		switch (params->auto_connect) {
3431 		case HCI_AUTO_CONN_LINK_LOSS:
3432 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3433 				break;
3434 			fallthrough;
3435 
3436 		case HCI_AUTO_CONN_DIRECT:
3437 		case HCI_AUTO_CONN_ALWAYS:
3438 			hci_pend_le_list_del_init(params);
3439 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3440 			hci_update_passive_scan(hdev);
3441 			break;
3442 
3443 		default:
3444 			break;
3445 		}
3446 	}
3447 
3448 	hci_disconn_cfm(conn, ev->reason);
3449 
3450 	/* Re-enable advertising if necessary, since it might
3451 	 * have been disabled by the connection. From the
3452 	 * HCI_LE_Set_Advertise_Enable command description in
3453 	 * the core specification (v4.0):
3454 	 * "The Controller shall continue advertising until the Host
3455 	 * issues an LE_Set_Advertise_Enable command with
3456 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3457 	 * or until a connection is created or until the Advertising
3458 	 * is timed out due to Directed Advertising."
3459 	 */
3460 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3461 		hdev->cur_adv_instance = conn->adv_instance;
3462 		hci_enable_advertising(hdev);
3463 	}
3464 
3465 	hci_conn_del(conn);
3466 
3467 unlock:
3468 	hci_dev_unlock(hdev);
3469 }
3470 
3471 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3472 				  struct sk_buff *skb)
3473 {
3474 	struct hci_ev_auth_complete *ev = data;
3475 	struct hci_conn *conn;
3476 
3477 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3478 
3479 	hci_dev_lock(hdev);
3480 
3481 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3482 	if (!conn)
3483 		goto unlock;
3484 
3485 	if (!ev->status) {
3486 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3487 
3488 		if (!hci_conn_ssp_enabled(conn) &&
3489 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3490 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3491 		} else {
3492 			set_bit(HCI_CONN_AUTH, &conn->flags);
3493 			conn->sec_level = conn->pending_sec_level;
3494 		}
3495 	} else {
3496 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3497 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3498 
3499 		mgmt_auth_failed(conn, ev->status);
3500 	}
3501 
3502 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3503 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3504 
3505 	if (conn->state == BT_CONFIG) {
3506 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3507 			struct hci_cp_set_conn_encrypt cp;
3508 			cp.handle  = ev->handle;
3509 			cp.encrypt = 0x01;
3510 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3511 				     &cp);
3512 		} else {
3513 			conn->state = BT_CONNECTED;
3514 			hci_connect_cfm(conn, ev->status);
3515 			hci_conn_drop(conn);
3516 		}
3517 	} else {
3518 		hci_auth_cfm(conn, ev->status);
3519 
3520 		hci_conn_hold(conn);
3521 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3522 		hci_conn_drop(conn);
3523 	}
3524 
3525 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3526 		if (!ev->status) {
3527 			struct hci_cp_set_conn_encrypt cp;
3528 			cp.handle  = ev->handle;
3529 			cp.encrypt = 0x01;
3530 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3531 				     &cp);
3532 		} else {
3533 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3534 			hci_encrypt_cfm(conn, ev->status);
3535 		}
3536 	}
3537 
3538 unlock:
3539 	hci_dev_unlock(hdev);
3540 }
3541 
3542 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3543 				struct sk_buff *skb)
3544 {
3545 	struct hci_ev_remote_name *ev = data;
3546 	struct hci_conn *conn;
3547 
3548 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3549 
3550 	hci_conn_check_pending(hdev);
3551 
3552 	hci_dev_lock(hdev);
3553 
3554 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3555 
3556 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3557 		goto check_auth;
3558 
3559 	if (ev->status == 0)
3560 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3561 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3562 	else
3563 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3564 
3565 check_auth:
3566 	if (!conn)
3567 		goto unlock;
3568 
3569 	if (!hci_outgoing_auth_needed(hdev, conn))
3570 		goto unlock;
3571 
3572 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3573 		struct hci_cp_auth_requested cp;
3574 
3575 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3576 
3577 		cp.handle = __cpu_to_le16(conn->handle);
3578 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3579 	}
3580 
3581 unlock:
3582 	hci_dev_unlock(hdev);
3583 }
3584 
3585 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3586 				   struct sk_buff *skb)
3587 {
3588 	struct hci_ev_encrypt_change *ev = data;
3589 	struct hci_conn *conn;
3590 
3591 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3592 
3593 	hci_dev_lock(hdev);
3594 
3595 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3596 	if (!conn)
3597 		goto unlock;
3598 
3599 	if (!ev->status) {
3600 		if (ev->encrypt) {
3601 			/* Encryption implies authentication */
3602 			set_bit(HCI_CONN_AUTH, &conn->flags);
3603 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3604 			conn->sec_level = conn->pending_sec_level;
3605 
3606 			/* P-256 authentication key implies FIPS */
3607 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3608 				set_bit(HCI_CONN_FIPS, &conn->flags);
3609 
3610 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3611 			    conn->type == LE_LINK)
3612 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3613 		} else {
3614 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3615 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3616 		}
3617 	}
3618 
3619 	/* We should disregard the current RPA and generate a new one
3620 	 * whenever the encryption procedure fails.
3621 	 */
3622 	if (ev->status && conn->type == LE_LINK) {
3623 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3624 		hci_adv_instances_set_rpa_expired(hdev, true);
3625 	}
3626 
3627 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3628 
3629 	/* Check link security requirements are met */
3630 	if (!hci_conn_check_link_mode(conn))
3631 		ev->status = HCI_ERROR_AUTH_FAILURE;
3632 
3633 	if (ev->status && conn->state == BT_CONNECTED) {
3634 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3635 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3636 
3637 		/* Notify upper layers so they can cleanup before
3638 		 * disconnecting.
3639 		 */
3640 		hci_encrypt_cfm(conn, ev->status);
3641 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3642 		hci_conn_drop(conn);
3643 		goto unlock;
3644 	}
3645 
3646 	/* Try reading the encryption key size for encrypted ACL links */
3647 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3648 		struct hci_cp_read_enc_key_size cp;
3649 
3650 		/* Only send HCI_Read_Encryption_Key_Size if the
3651 		 * controller really supports it. If it doesn't, assume
3652 		 * the default size (16).
3653 		 */
3654 		if (!(hdev->commands[20] & 0x10)) {
3655 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3656 			goto notify;
3657 		}
3658 
3659 		cp.handle = cpu_to_le16(conn->handle);
3660 		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3661 				 sizeof(cp), &cp)) {
3662 			bt_dev_err(hdev, "sending read key size failed");
3663 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3664 			goto notify;
3665 		}
3666 
3667 		goto unlock;
3668 	}
3669 
3670 	/* Set the default Authenticated Payload Timeout after
3671 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3672 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3673 	 * sent when the link is active and Encryption is enabled, the conn
3674 	 * type can be either LE or ACL and controller must support LMP Ping.
3675 	 * Ensure for AES-CCM encryption as well.
3676 	 */
3677 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3678 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3679 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3680 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3681 		struct hci_cp_write_auth_payload_to cp;
3682 
3683 		cp.handle = cpu_to_le16(conn->handle);
3684 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3685 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3686 				 sizeof(cp), &cp)) {
3687 			bt_dev_err(hdev, "write auth payload timeout failed");
3688 			goto notify;
3689 		}
3690 
3691 		goto unlock;
3692 	}
3693 
3694 notify:
3695 	hci_encrypt_cfm(conn, ev->status);
3696 
3697 unlock:
3698 	hci_dev_unlock(hdev);
3699 }
3700 
3701 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3702 					     struct sk_buff *skb)
3703 {
3704 	struct hci_ev_change_link_key_complete *ev = data;
3705 	struct hci_conn *conn;
3706 
3707 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3708 
3709 	hci_dev_lock(hdev);
3710 
3711 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3712 	if (conn) {
3713 		if (!ev->status)
3714 			set_bit(HCI_CONN_SECURE, &conn->flags);
3715 
3716 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3717 
3718 		hci_key_change_cfm(conn, ev->status);
3719 	}
3720 
3721 	hci_dev_unlock(hdev);
3722 }
3723 
3724 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3725 				    struct sk_buff *skb)
3726 {
3727 	struct hci_ev_remote_features *ev = data;
3728 	struct hci_conn *conn;
3729 
3730 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3731 
3732 	hci_dev_lock(hdev);
3733 
3734 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3735 	if (!conn)
3736 		goto unlock;
3737 
3738 	if (!ev->status)
3739 		memcpy(conn->features[0], ev->features, 8);
3740 
3741 	if (conn->state != BT_CONFIG)
3742 		goto unlock;
3743 
3744 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3745 	    lmp_ext_feat_capable(conn)) {
3746 		struct hci_cp_read_remote_ext_features cp;
3747 		cp.handle = ev->handle;
3748 		cp.page = 0x01;
3749 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3750 			     sizeof(cp), &cp);
3751 		goto unlock;
3752 	}
3753 
3754 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3755 		struct hci_cp_remote_name_req cp;
3756 		memset(&cp, 0, sizeof(cp));
3757 		bacpy(&cp.bdaddr, &conn->dst);
3758 		cp.pscan_rep_mode = 0x02;
3759 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3760 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3761 		mgmt_device_connected(hdev, conn, NULL, 0);
3762 
3763 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3764 		conn->state = BT_CONNECTED;
3765 		hci_connect_cfm(conn, ev->status);
3766 		hci_conn_drop(conn);
3767 	}
3768 
3769 unlock:
3770 	hci_dev_unlock(hdev);
3771 }
3772 
3773 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3774 {
3775 	cancel_delayed_work(&hdev->cmd_timer);
3776 
3777 	rcu_read_lock();
3778 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3779 		if (ncmd) {
3780 			cancel_delayed_work(&hdev->ncmd_timer);
3781 			atomic_set(&hdev->cmd_cnt, 1);
3782 		} else {
3783 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3784 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3785 						   HCI_NCMD_TIMEOUT);
3786 		}
3787 	}
3788 	rcu_read_unlock();
3789 }
3790 
3791 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3792 					struct sk_buff *skb)
3793 {
3794 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3795 
3796 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3797 
3798 	if (rp->status)
3799 		return rp->status;
3800 
3801 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3802 	hdev->le_pkts  = rp->acl_max_pkt;
3803 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3804 	hdev->iso_pkts = rp->iso_max_pkt;
3805 
3806 	hdev->le_cnt  = hdev->le_pkts;
3807 	hdev->iso_cnt = hdev->iso_pkts;
3808 
3809 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3810 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3811 
3812 	return rp->status;
3813 }
3814 
3815 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3816 {
3817 	struct hci_conn *conn, *tmp;
3818 
3819 	lockdep_assert_held(&hdev->lock);
3820 
3821 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3822 		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3823 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3824 			continue;
3825 
3826 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3827 			hci_conn_failed(conn, status);
3828 	}
3829 }
3830 
3831 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3832 				   struct sk_buff *skb)
3833 {
3834 	struct hci_rp_le_set_cig_params *rp = data;
3835 	struct hci_cp_le_set_cig_params *cp;
3836 	struct hci_conn *conn;
3837 	u8 status = rp->status;
3838 	bool pending = false;
3839 	int i;
3840 
3841 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3842 
3843 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3844 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3845 			    rp->cig_id != cp->cig_id)) {
3846 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3847 		status = HCI_ERROR_UNSPECIFIED;
3848 	}
3849 
3850 	hci_dev_lock(hdev);
3851 
3852 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3853 	 *
3854 	 * If the Status return parameter is non-zero, then the state of the CIG
3855 	 * and its CIS configurations shall not be changed by the command. If
3856 	 * the CIG did not already exist, it shall not be created.
3857 	 */
3858 	if (status) {
3859 		/* Keep current configuration, fail only the unbound CIS */
3860 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3861 		goto unlock;
3862 	}
3863 
3864 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3865 	 *
3866 	 * If the Status return parameter is zero, then the Controller shall
3867 	 * set the Connection_Handle arrayed return parameter to the connection
3868 	 * handle(s) corresponding to the CIS configurations specified in
3869 	 * the CIS_IDs command parameter, in the same order.
3870 	 */
3871 	for (i = 0; i < rp->num_handles; ++i) {
3872 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3873 						cp->cis[i].cis_id);
3874 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3875 			continue;
3876 
3877 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3878 			continue;
3879 
3880 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3881 			continue;
3882 
3883 		if (conn->state == BT_CONNECT)
3884 			pending = true;
3885 	}
3886 
3887 unlock:
3888 	if (pending)
3889 		hci_le_create_cis_pending(hdev);
3890 
3891 	hci_dev_unlock(hdev);
3892 
3893 	return rp->status;
3894 }
3895 
3896 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3897 				   struct sk_buff *skb)
3898 {
3899 	struct hci_rp_le_setup_iso_path *rp = data;
3900 	struct hci_cp_le_setup_iso_path *cp;
3901 	struct hci_conn *conn;
3902 
3903 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3904 
3905 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3906 	if (!cp)
3907 		return rp->status;
3908 
3909 	hci_dev_lock(hdev);
3910 
3911 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3912 	if (!conn)
3913 		goto unlock;
3914 
3915 	if (rp->status) {
3916 		hci_connect_cfm(conn, rp->status);
3917 		hci_conn_del(conn);
3918 		goto unlock;
3919 	}
3920 
3921 	switch (cp->direction) {
3922 	/* Input (Host to Controller) */
3923 	case 0x00:
3924 		/* Only confirm connection if output only */
3925 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3926 			hci_connect_cfm(conn, rp->status);
3927 		break;
3928 	/* Output (Controller to Host) */
3929 	case 0x01:
3930 		/* Confirm connection since conn->iso_qos is always configured
3931 		 * last.
3932 		 */
3933 		hci_connect_cfm(conn, rp->status);
3934 		break;
3935 	}
3936 
3937 unlock:
3938 	hci_dev_unlock(hdev);
3939 	return rp->status;
3940 }
3941 
3942 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3943 {
3944 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3945 }
3946 
3947 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3948 				   struct sk_buff *skb)
3949 {
3950 	struct hci_ev_status *rp = data;
3951 	struct hci_cp_le_set_per_adv_params *cp;
3952 
3953 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3954 
3955 	if (rp->status)
3956 		return rp->status;
3957 
3958 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3959 	if (!cp)
3960 		return rp->status;
3961 
3962 	/* TODO: set the conn state */
3963 	return rp->status;
3964 }
3965 
3966 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3967 				       struct sk_buff *skb)
3968 {
3969 	struct hci_ev_status *rp = data;
3970 	struct hci_cp_le_set_per_adv_enable *cp;
3971 	struct adv_info *adv = NULL, *n;
3972 	u8 per_adv_cnt = 0;
3973 
3974 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3975 
3976 	if (rp->status)
3977 		return rp->status;
3978 
3979 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3980 	if (!cp)
3981 		return rp->status;
3982 
3983 	hci_dev_lock(hdev);
3984 
3985 	adv = hci_find_adv_instance(hdev, cp->handle);
3986 
3987 	if (cp->enable) {
3988 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3989 
3990 		if (adv)
3991 			adv->enabled = true;
3992 	} else {
3993 		/* If just one instance was disabled check if there are
3994 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
3995 		 * The current periodic adv instance will be marked as
3996 		 * disabled once extended advertising is also disabled.
3997 		 */
3998 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
3999 					 list) {
4000 			if (adv->periodic && adv->enabled)
4001 				per_adv_cnt++;
4002 		}
4003 
4004 		if (per_adv_cnt > 1)
4005 			goto unlock;
4006 
4007 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4008 	}
4009 
4010 unlock:
4011 	hci_dev_unlock(hdev);
4012 
4013 	return rp->status;
4014 }
4015 
4016 #define HCI_CC_VL(_op, _func, _min, _max) \
4017 { \
4018 	.op = _op, \
4019 	.func = _func, \
4020 	.min_len = _min, \
4021 	.max_len = _max, \
4022 }
4023 
4024 #define HCI_CC(_op, _func, _len) \
4025 	HCI_CC_VL(_op, _func, _len, _len)
4026 
4027 #define HCI_CC_STATUS(_op, _func) \
4028 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4029 
4030 static const struct hci_cc {
4031 	u16  op;
4032 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4033 	u16  min_len;
4034 	u16  max_len;
4035 } hci_cc_table[] = {
4036 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4037 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4038 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4039 	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4040 		      hci_cc_remote_name_req_cancel),
4041 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4042 	       sizeof(struct hci_rp_role_discovery)),
4043 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4044 	       sizeof(struct hci_rp_read_link_policy)),
4045 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4046 	       sizeof(struct hci_rp_write_link_policy)),
4047 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4048 	       sizeof(struct hci_rp_read_def_link_policy)),
4049 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4050 		      hci_cc_write_def_link_policy),
4051 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4052 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4053 	       sizeof(struct hci_rp_read_stored_link_key)),
4054 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4055 	       sizeof(struct hci_rp_delete_stored_link_key)),
4056 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4057 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4058 	       sizeof(struct hci_rp_read_local_name)),
4059 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4060 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4061 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4062 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4063 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4064 	       sizeof(struct hci_rp_read_class_of_dev)),
4065 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4066 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4067 	       sizeof(struct hci_rp_read_voice_setting)),
4068 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4069 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4070 	       sizeof(struct hci_rp_read_num_supported_iac)),
4071 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4072 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4073 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4074 	       sizeof(struct hci_rp_read_auth_payload_to)),
4075 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4076 	       sizeof(struct hci_rp_write_auth_payload_to)),
4077 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4078 	       sizeof(struct hci_rp_read_local_version)),
4079 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4080 	       sizeof(struct hci_rp_read_local_commands)),
4081 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4082 	       sizeof(struct hci_rp_read_local_features)),
4083 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4084 	       sizeof(struct hci_rp_read_local_ext_features)),
4085 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4086 	       sizeof(struct hci_rp_read_buffer_size)),
4087 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4088 	       sizeof(struct hci_rp_read_bd_addr)),
4089 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4090 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4091 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4092 	       sizeof(struct hci_rp_read_page_scan_activity)),
4093 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4094 		      hci_cc_write_page_scan_activity),
4095 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4096 	       sizeof(struct hci_rp_read_page_scan_type)),
4097 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4098 	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4099 	       sizeof(struct hci_rp_read_data_block_size)),
4100 	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4101 	       sizeof(struct hci_rp_read_flow_control_mode)),
4102 	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4103 	       sizeof(struct hci_rp_read_local_amp_info)),
4104 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4105 	       sizeof(struct hci_rp_read_clock)),
4106 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4107 	       sizeof(struct hci_rp_read_enc_key_size)),
4108 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4109 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4110 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4111 	       hci_cc_read_def_err_data_reporting,
4112 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4113 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4114 		      hci_cc_write_def_err_data_reporting),
4115 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4116 	       sizeof(struct hci_rp_pin_code_reply)),
4117 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4118 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4119 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4120 	       sizeof(struct hci_rp_read_local_oob_data)),
4121 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4122 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4123 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4124 	       sizeof(struct hci_rp_le_read_buffer_size)),
4125 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4126 	       sizeof(struct hci_rp_le_read_local_features)),
4127 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4128 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4129 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4130 	       sizeof(struct hci_rp_user_confirm_reply)),
4131 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4132 	       sizeof(struct hci_rp_user_confirm_reply)),
4133 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4134 	       sizeof(struct hci_rp_user_confirm_reply)),
4135 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4136 	       sizeof(struct hci_rp_user_confirm_reply)),
4137 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4138 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4139 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4140 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4141 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4142 	       hci_cc_le_read_accept_list_size,
4143 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4144 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4145 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4146 		      hci_cc_le_add_to_accept_list),
4147 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4148 		      hci_cc_le_del_from_accept_list),
4149 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4150 	       sizeof(struct hci_rp_le_read_supported_states)),
4151 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4152 	       sizeof(struct hci_rp_le_read_def_data_len)),
4153 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4154 		      hci_cc_le_write_def_data_len),
4155 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4156 		      hci_cc_le_add_to_resolv_list),
4157 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4158 		      hci_cc_le_del_from_resolv_list),
4159 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4160 		      hci_cc_le_clear_resolv_list),
4161 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4162 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4163 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4164 		      hci_cc_le_set_addr_resolution_enable),
4165 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4166 	       sizeof(struct hci_rp_le_read_max_data_len)),
4167 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4168 		      hci_cc_write_le_host_supported),
4169 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4170 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4171 	       sizeof(struct hci_rp_read_rssi)),
4172 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4173 	       sizeof(struct hci_rp_read_tx_power)),
4174 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4175 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4176 		      hci_cc_le_set_ext_scan_param),
4177 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4178 		      hci_cc_le_set_ext_scan_enable),
4179 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4180 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4181 	       hci_cc_le_read_num_adv_sets,
4182 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4183 	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4184 	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4185 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4186 		      hci_cc_le_set_ext_adv_enable),
4187 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4188 		      hci_cc_le_set_adv_set_random_addr),
4189 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4190 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4191 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4192 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4193 		      hci_cc_le_set_per_adv_enable),
4194 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4195 	       sizeof(struct hci_rp_le_read_transmit_power)),
4196 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4197 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4198 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4199 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4200 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4201 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4202 	       sizeof(struct hci_rp_le_setup_iso_path)),
4203 };
4204 
4205 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4206 		      struct sk_buff *skb)
4207 {
4208 	void *data;
4209 
4210 	if (skb->len < cc->min_len) {
4211 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4212 			   cc->op, skb->len, cc->min_len);
4213 		return HCI_ERROR_UNSPECIFIED;
4214 	}
4215 
4216 	/* Just warn if the length is over max_len size it still be possible to
4217 	 * partially parse the cc so leave to callback to decide if that is
4218 	 * acceptable.
4219 	 */
4220 	if (skb->len > cc->max_len)
4221 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4222 			    cc->op, skb->len, cc->max_len);
4223 
4224 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4225 	if (!data)
4226 		return HCI_ERROR_UNSPECIFIED;
4227 
4228 	return cc->func(hdev, data, skb);
4229 }
4230 
4231 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4232 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4233 				 hci_req_complete_t *req_complete,
4234 				 hci_req_complete_skb_t *req_complete_skb)
4235 {
4236 	struct hci_ev_cmd_complete *ev = data;
4237 	int i;
4238 
4239 	*opcode = __le16_to_cpu(ev->opcode);
4240 
4241 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4242 
4243 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4244 		if (hci_cc_table[i].op == *opcode) {
4245 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4246 			break;
4247 		}
4248 	}
4249 
4250 	if (i == ARRAY_SIZE(hci_cc_table)) {
4251 		/* Unknown opcode, assume byte 0 contains the status, so
4252 		 * that e.g. __hci_cmd_sync() properly returns errors
4253 		 * for vendor specific commands send by HCI drivers.
4254 		 * If a vendor doesn't actually follow this convention we may
4255 		 * need to introduce a vendor CC table in order to properly set
4256 		 * the status.
4257 		 */
4258 		*status = skb->data[0];
4259 	}
4260 
4261 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4262 
4263 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4264 			     req_complete_skb);
4265 
4266 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4267 		bt_dev_err(hdev,
4268 			   "unexpected event for opcode 0x%4.4x", *opcode);
4269 		return;
4270 	}
4271 
4272 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4273 		queue_work(hdev->workqueue, &hdev->cmd_work);
4274 }
4275 
4276 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4277 {
4278 	struct hci_cp_le_create_cis *cp;
4279 	bool pending = false;
4280 	int i;
4281 
4282 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4283 
4284 	if (!status)
4285 		return;
4286 
4287 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4288 	if (!cp)
4289 		return;
4290 
4291 	hci_dev_lock(hdev);
4292 
4293 	/* Remove connection if command failed */
4294 	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4295 		struct hci_conn *conn;
4296 		u16 handle;
4297 
4298 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4299 
4300 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4301 		if (conn) {
4302 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4303 					       &conn->flags))
4304 				pending = true;
4305 			conn->state = BT_CLOSED;
4306 			hci_connect_cfm(conn, status);
4307 			hci_conn_del(conn);
4308 		}
4309 	}
4310 
4311 	if (pending)
4312 		hci_le_create_cis_pending(hdev);
4313 
4314 	hci_dev_unlock(hdev);
4315 }
4316 
4317 #define HCI_CS(_op, _func) \
4318 { \
4319 	.op = _op, \
4320 	.func = _func, \
4321 }
4322 
4323 static const struct hci_cs {
4324 	u16  op;
4325 	void (*func)(struct hci_dev *hdev, __u8 status);
4326 } hci_cs_table[] = {
4327 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4328 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4329 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4330 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4331 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4332 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4333 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4334 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4335 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4336 	       hci_cs_read_remote_ext_features),
4337 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4338 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4339 	       hci_cs_enhanced_setup_sync_conn),
4340 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4341 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4342 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4343 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4344 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4345 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4346 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4347 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4348 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4349 };
4350 
4351 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4352 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4353 			       hci_req_complete_t *req_complete,
4354 			       hci_req_complete_skb_t *req_complete_skb)
4355 {
4356 	struct hci_ev_cmd_status *ev = data;
4357 	int i;
4358 
4359 	*opcode = __le16_to_cpu(ev->opcode);
4360 	*status = ev->status;
4361 
4362 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4363 
4364 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4365 		if (hci_cs_table[i].op == *opcode) {
4366 			hci_cs_table[i].func(hdev, ev->status);
4367 			break;
4368 		}
4369 	}
4370 
4371 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4372 
4373 	/* Indicate request completion if the command failed. Also, if
4374 	 * we're not waiting for a special event and we get a success
4375 	 * command status we should try to flag the request as completed
4376 	 * (since for this kind of commands there will not be a command
4377 	 * complete event).
4378 	 */
4379 	if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4380 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4381 				     req_complete_skb);
4382 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4383 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4384 				   *opcode);
4385 			return;
4386 		}
4387 	}
4388 
4389 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4390 		queue_work(hdev->workqueue, &hdev->cmd_work);
4391 }
4392 
4393 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4394 				   struct sk_buff *skb)
4395 {
4396 	struct hci_ev_hardware_error *ev = data;
4397 
4398 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4399 
4400 	hdev->hw_error_code = ev->code;
4401 
4402 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4403 }
4404 
4405 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4406 				struct sk_buff *skb)
4407 {
4408 	struct hci_ev_role_change *ev = data;
4409 	struct hci_conn *conn;
4410 
4411 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4412 
4413 	hci_dev_lock(hdev);
4414 
4415 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4416 	if (conn) {
4417 		if (!ev->status)
4418 			conn->role = ev->role;
4419 
4420 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4421 
4422 		hci_role_switch_cfm(conn, ev->status, ev->role);
4423 	}
4424 
4425 	hci_dev_unlock(hdev);
4426 }
4427 
4428 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4429 				  struct sk_buff *skb)
4430 {
4431 	struct hci_ev_num_comp_pkts *ev = data;
4432 	int i;
4433 
4434 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4435 			     flex_array_size(ev, handles, ev->num)))
4436 		return;
4437 
4438 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4439 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4440 		return;
4441 	}
4442 
4443 	bt_dev_dbg(hdev, "num %d", ev->num);
4444 
4445 	for (i = 0; i < ev->num; i++) {
4446 		struct hci_comp_pkts_info *info = &ev->handles[i];
4447 		struct hci_conn *conn;
4448 		__u16  handle, count;
4449 
4450 		handle = __le16_to_cpu(info->handle);
4451 		count  = __le16_to_cpu(info->count);
4452 
4453 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4454 		if (!conn)
4455 			continue;
4456 
4457 		conn->sent -= count;
4458 
4459 		switch (conn->type) {
4460 		case ACL_LINK:
4461 			hdev->acl_cnt += count;
4462 			if (hdev->acl_cnt > hdev->acl_pkts)
4463 				hdev->acl_cnt = hdev->acl_pkts;
4464 			break;
4465 
4466 		case LE_LINK:
4467 			if (hdev->le_pkts) {
4468 				hdev->le_cnt += count;
4469 				if (hdev->le_cnt > hdev->le_pkts)
4470 					hdev->le_cnt = hdev->le_pkts;
4471 			} else {
4472 				hdev->acl_cnt += count;
4473 				if (hdev->acl_cnt > hdev->acl_pkts)
4474 					hdev->acl_cnt = hdev->acl_pkts;
4475 			}
4476 			break;
4477 
4478 		case SCO_LINK:
4479 			hdev->sco_cnt += count;
4480 			if (hdev->sco_cnt > hdev->sco_pkts)
4481 				hdev->sco_cnt = hdev->sco_pkts;
4482 			break;
4483 
4484 		case ISO_LINK:
4485 			if (hdev->iso_pkts) {
4486 				hdev->iso_cnt += count;
4487 				if (hdev->iso_cnt > hdev->iso_pkts)
4488 					hdev->iso_cnt = hdev->iso_pkts;
4489 			} else if (hdev->le_pkts) {
4490 				hdev->le_cnt += count;
4491 				if (hdev->le_cnt > hdev->le_pkts)
4492 					hdev->le_cnt = hdev->le_pkts;
4493 			} else {
4494 				hdev->acl_cnt += count;
4495 				if (hdev->acl_cnt > hdev->acl_pkts)
4496 					hdev->acl_cnt = hdev->acl_pkts;
4497 			}
4498 			break;
4499 
4500 		default:
4501 			bt_dev_err(hdev, "unknown type %d conn %p",
4502 				   conn->type, conn);
4503 			break;
4504 		}
4505 	}
4506 
4507 	queue_work(hdev->workqueue, &hdev->tx_work);
4508 }
4509 
4510 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4511 						 __u16 handle)
4512 {
4513 	struct hci_chan *chan;
4514 
4515 	switch (hdev->dev_type) {
4516 	case HCI_PRIMARY:
4517 		return hci_conn_hash_lookup_handle(hdev, handle);
4518 	case HCI_AMP:
4519 		chan = hci_chan_lookup_handle(hdev, handle);
4520 		if (chan)
4521 			return chan->conn;
4522 		break;
4523 	default:
4524 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4525 		break;
4526 	}
4527 
4528 	return NULL;
4529 }
4530 
4531 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4532 				    struct sk_buff *skb)
4533 {
4534 	struct hci_ev_num_comp_blocks *ev = data;
4535 	int i;
4536 
4537 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4538 			     flex_array_size(ev, handles, ev->num_hndl)))
4539 		return;
4540 
4541 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4542 		bt_dev_err(hdev, "wrong event for mode %d",
4543 			   hdev->flow_ctl_mode);
4544 		return;
4545 	}
4546 
4547 	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4548 		   ev->num_hndl);
4549 
4550 	for (i = 0; i < ev->num_hndl; i++) {
4551 		struct hci_comp_blocks_info *info = &ev->handles[i];
4552 		struct hci_conn *conn = NULL;
4553 		__u16  handle, block_count;
4554 
4555 		handle = __le16_to_cpu(info->handle);
4556 		block_count = __le16_to_cpu(info->blocks);
4557 
4558 		conn = __hci_conn_lookup_handle(hdev, handle);
4559 		if (!conn)
4560 			continue;
4561 
4562 		conn->sent -= block_count;
4563 
4564 		switch (conn->type) {
4565 		case ACL_LINK:
4566 		case AMP_LINK:
4567 			hdev->block_cnt += block_count;
4568 			if (hdev->block_cnt > hdev->num_blocks)
4569 				hdev->block_cnt = hdev->num_blocks;
4570 			break;
4571 
4572 		default:
4573 			bt_dev_err(hdev, "unknown type %d conn %p",
4574 				   conn->type, conn);
4575 			break;
4576 		}
4577 	}
4578 
4579 	queue_work(hdev->workqueue, &hdev->tx_work);
4580 }
4581 
4582 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4583 				struct sk_buff *skb)
4584 {
4585 	struct hci_ev_mode_change *ev = data;
4586 	struct hci_conn *conn;
4587 
4588 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4589 
4590 	hci_dev_lock(hdev);
4591 
4592 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4593 	if (conn) {
4594 		conn->mode = ev->mode;
4595 
4596 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4597 					&conn->flags)) {
4598 			if (conn->mode == HCI_CM_ACTIVE)
4599 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4600 			else
4601 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4602 		}
4603 
4604 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4605 			hci_sco_setup(conn, ev->status);
4606 	}
4607 
4608 	hci_dev_unlock(hdev);
4609 }
4610 
4611 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4612 				     struct sk_buff *skb)
4613 {
4614 	struct hci_ev_pin_code_req *ev = data;
4615 	struct hci_conn *conn;
4616 
4617 	bt_dev_dbg(hdev, "");
4618 
4619 	hci_dev_lock(hdev);
4620 
4621 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4622 	if (!conn)
4623 		goto unlock;
4624 
4625 	if (conn->state == BT_CONNECTED) {
4626 		hci_conn_hold(conn);
4627 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4628 		hci_conn_drop(conn);
4629 	}
4630 
4631 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4632 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4633 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4634 			     sizeof(ev->bdaddr), &ev->bdaddr);
4635 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4636 		u8 secure;
4637 
4638 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4639 			secure = 1;
4640 		else
4641 			secure = 0;
4642 
4643 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4644 	}
4645 
4646 unlock:
4647 	hci_dev_unlock(hdev);
4648 }
4649 
4650 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4651 {
4652 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4653 		return;
4654 
4655 	conn->pin_length = pin_len;
4656 	conn->key_type = key_type;
4657 
4658 	switch (key_type) {
4659 	case HCI_LK_LOCAL_UNIT:
4660 	case HCI_LK_REMOTE_UNIT:
4661 	case HCI_LK_DEBUG_COMBINATION:
4662 		return;
4663 	case HCI_LK_COMBINATION:
4664 		if (pin_len == 16)
4665 			conn->pending_sec_level = BT_SECURITY_HIGH;
4666 		else
4667 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4668 		break;
4669 	case HCI_LK_UNAUTH_COMBINATION_P192:
4670 	case HCI_LK_UNAUTH_COMBINATION_P256:
4671 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4672 		break;
4673 	case HCI_LK_AUTH_COMBINATION_P192:
4674 		conn->pending_sec_level = BT_SECURITY_HIGH;
4675 		break;
4676 	case HCI_LK_AUTH_COMBINATION_P256:
4677 		conn->pending_sec_level = BT_SECURITY_FIPS;
4678 		break;
4679 	}
4680 }
4681 
4682 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4683 				     struct sk_buff *skb)
4684 {
4685 	struct hci_ev_link_key_req *ev = data;
4686 	struct hci_cp_link_key_reply cp;
4687 	struct hci_conn *conn;
4688 	struct link_key *key;
4689 
4690 	bt_dev_dbg(hdev, "");
4691 
4692 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4693 		return;
4694 
4695 	hci_dev_lock(hdev);
4696 
4697 	key = hci_find_link_key(hdev, &ev->bdaddr);
4698 	if (!key) {
4699 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4700 		goto not_found;
4701 	}
4702 
4703 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4704 
4705 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4706 	if (conn) {
4707 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4708 
4709 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4710 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4711 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4712 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4713 			goto not_found;
4714 		}
4715 
4716 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4717 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4718 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4719 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4720 			goto not_found;
4721 		}
4722 
4723 		conn_set_key(conn, key->type, key->pin_len);
4724 	}
4725 
4726 	bacpy(&cp.bdaddr, &ev->bdaddr);
4727 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4728 
4729 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4730 
4731 	hci_dev_unlock(hdev);
4732 
4733 	return;
4734 
4735 not_found:
4736 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4737 	hci_dev_unlock(hdev);
4738 }
4739 
4740 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4741 				    struct sk_buff *skb)
4742 {
4743 	struct hci_ev_link_key_notify *ev = data;
4744 	struct hci_conn *conn;
4745 	struct link_key *key;
4746 	bool persistent;
4747 	u8 pin_len = 0;
4748 
4749 	bt_dev_dbg(hdev, "");
4750 
4751 	hci_dev_lock(hdev);
4752 
4753 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4754 	if (!conn)
4755 		goto unlock;
4756 
4757 	/* Ignore NULL link key against CVE-2020-26555 */
4758 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4759 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4760 			   &ev->bdaddr);
4761 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4762 		hci_conn_drop(conn);
4763 		goto unlock;
4764 	}
4765 
4766 	hci_conn_hold(conn);
4767 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4768 	hci_conn_drop(conn);
4769 
4770 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4771 	conn_set_key(conn, ev->key_type, conn->pin_length);
4772 
4773 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4774 		goto unlock;
4775 
4776 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4777 			        ev->key_type, pin_len, &persistent);
4778 	if (!key)
4779 		goto unlock;
4780 
4781 	/* Update connection information since adding the key will have
4782 	 * fixed up the type in the case of changed combination keys.
4783 	 */
4784 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4785 		conn_set_key(conn, key->type, key->pin_len);
4786 
4787 	mgmt_new_link_key(hdev, key, persistent);
4788 
4789 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4790 	 * is set. If it's not set simply remove the key from the kernel
4791 	 * list (we've still notified user space about it but with
4792 	 * store_hint being 0).
4793 	 */
4794 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4795 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4796 		list_del_rcu(&key->list);
4797 		kfree_rcu(key, rcu);
4798 		goto unlock;
4799 	}
4800 
4801 	if (persistent)
4802 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4803 	else
4804 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4805 
4806 unlock:
4807 	hci_dev_unlock(hdev);
4808 }
4809 
4810 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4811 				 struct sk_buff *skb)
4812 {
4813 	struct hci_ev_clock_offset *ev = data;
4814 	struct hci_conn *conn;
4815 
4816 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4817 
4818 	hci_dev_lock(hdev);
4819 
4820 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4821 	if (conn && !ev->status) {
4822 		struct inquiry_entry *ie;
4823 
4824 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4825 		if (ie) {
4826 			ie->data.clock_offset = ev->clock_offset;
4827 			ie->timestamp = jiffies;
4828 		}
4829 	}
4830 
4831 	hci_dev_unlock(hdev);
4832 }
4833 
4834 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4835 				    struct sk_buff *skb)
4836 {
4837 	struct hci_ev_pkt_type_change *ev = data;
4838 	struct hci_conn *conn;
4839 
4840 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4841 
4842 	hci_dev_lock(hdev);
4843 
4844 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4845 	if (conn && !ev->status)
4846 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4847 
4848 	hci_dev_unlock(hdev);
4849 }
4850 
4851 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4852 				   struct sk_buff *skb)
4853 {
4854 	struct hci_ev_pscan_rep_mode *ev = data;
4855 	struct inquiry_entry *ie;
4856 
4857 	bt_dev_dbg(hdev, "");
4858 
4859 	hci_dev_lock(hdev);
4860 
4861 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4862 	if (ie) {
4863 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4864 		ie->timestamp = jiffies;
4865 	}
4866 
4867 	hci_dev_unlock(hdev);
4868 }
4869 
4870 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4871 					     struct sk_buff *skb)
4872 {
4873 	struct hci_ev_inquiry_result_rssi *ev = edata;
4874 	struct inquiry_data data;
4875 	int i;
4876 
4877 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4878 
4879 	if (!ev->num)
4880 		return;
4881 
4882 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4883 		return;
4884 
4885 	hci_dev_lock(hdev);
4886 
4887 	if (skb->len == array_size(ev->num,
4888 				   sizeof(struct inquiry_info_rssi_pscan))) {
4889 		struct inquiry_info_rssi_pscan *info;
4890 
4891 		for (i = 0; i < ev->num; i++) {
4892 			u32 flags;
4893 
4894 			info = hci_ev_skb_pull(hdev, skb,
4895 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4896 					       sizeof(*info));
4897 			if (!info) {
4898 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4899 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4900 				goto unlock;
4901 			}
4902 
4903 			bacpy(&data.bdaddr, &info->bdaddr);
4904 			data.pscan_rep_mode	= info->pscan_rep_mode;
4905 			data.pscan_period_mode	= info->pscan_period_mode;
4906 			data.pscan_mode		= info->pscan_mode;
4907 			memcpy(data.dev_class, info->dev_class, 3);
4908 			data.clock_offset	= info->clock_offset;
4909 			data.rssi		= info->rssi;
4910 			data.ssp_mode		= 0x00;
4911 
4912 			flags = hci_inquiry_cache_update(hdev, &data, false);
4913 
4914 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4915 					  info->dev_class, info->rssi,
4916 					  flags, NULL, 0, NULL, 0, 0);
4917 		}
4918 	} else if (skb->len == array_size(ev->num,
4919 					  sizeof(struct inquiry_info_rssi))) {
4920 		struct inquiry_info_rssi *info;
4921 
4922 		for (i = 0; i < ev->num; i++) {
4923 			u32 flags;
4924 
4925 			info = hci_ev_skb_pull(hdev, skb,
4926 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4927 					       sizeof(*info));
4928 			if (!info) {
4929 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4930 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4931 				goto unlock;
4932 			}
4933 
4934 			bacpy(&data.bdaddr, &info->bdaddr);
4935 			data.pscan_rep_mode	= info->pscan_rep_mode;
4936 			data.pscan_period_mode	= info->pscan_period_mode;
4937 			data.pscan_mode		= 0x00;
4938 			memcpy(data.dev_class, info->dev_class, 3);
4939 			data.clock_offset	= info->clock_offset;
4940 			data.rssi		= info->rssi;
4941 			data.ssp_mode		= 0x00;
4942 
4943 			flags = hci_inquiry_cache_update(hdev, &data, false);
4944 
4945 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4946 					  info->dev_class, info->rssi,
4947 					  flags, NULL, 0, NULL, 0, 0);
4948 		}
4949 	} else {
4950 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4951 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4952 	}
4953 unlock:
4954 	hci_dev_unlock(hdev);
4955 }
4956 
4957 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4958 					struct sk_buff *skb)
4959 {
4960 	struct hci_ev_remote_ext_features *ev = data;
4961 	struct hci_conn *conn;
4962 
4963 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4964 
4965 	hci_dev_lock(hdev);
4966 
4967 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4968 	if (!conn)
4969 		goto unlock;
4970 
4971 	if (ev->page < HCI_MAX_PAGES)
4972 		memcpy(conn->features[ev->page], ev->features, 8);
4973 
4974 	if (!ev->status && ev->page == 0x01) {
4975 		struct inquiry_entry *ie;
4976 
4977 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4978 		if (ie)
4979 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4980 
4981 		if (ev->features[0] & LMP_HOST_SSP) {
4982 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4983 		} else {
4984 			/* It is mandatory by the Bluetooth specification that
4985 			 * Extended Inquiry Results are only used when Secure
4986 			 * Simple Pairing is enabled, but some devices violate
4987 			 * this.
4988 			 *
4989 			 * To make these devices work, the internal SSP
4990 			 * enabled flag needs to be cleared if the remote host
4991 			 * features do not indicate SSP support */
4992 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4993 		}
4994 
4995 		if (ev->features[0] & LMP_HOST_SC)
4996 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4997 	}
4998 
4999 	if (conn->state != BT_CONFIG)
5000 		goto unlock;
5001 
5002 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5003 		struct hci_cp_remote_name_req cp;
5004 		memset(&cp, 0, sizeof(cp));
5005 		bacpy(&cp.bdaddr, &conn->dst);
5006 		cp.pscan_rep_mode = 0x02;
5007 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5008 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5009 		mgmt_device_connected(hdev, conn, NULL, 0);
5010 
5011 	if (!hci_outgoing_auth_needed(hdev, conn)) {
5012 		conn->state = BT_CONNECTED;
5013 		hci_connect_cfm(conn, ev->status);
5014 		hci_conn_drop(conn);
5015 	}
5016 
5017 unlock:
5018 	hci_dev_unlock(hdev);
5019 }
5020 
5021 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5022 				       struct sk_buff *skb)
5023 {
5024 	struct hci_ev_sync_conn_complete *ev = data;
5025 	struct hci_conn *conn;
5026 	u8 status = ev->status;
5027 
5028 	switch (ev->link_type) {
5029 	case SCO_LINK:
5030 	case ESCO_LINK:
5031 		break;
5032 	default:
5033 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5034 		 * for HCI_Synchronous_Connection_Complete is limited to
5035 		 * either SCO or eSCO
5036 		 */
5037 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5038 		return;
5039 	}
5040 
5041 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
5042 
5043 	hci_dev_lock(hdev);
5044 
5045 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5046 	if (!conn) {
5047 		if (ev->link_type == ESCO_LINK)
5048 			goto unlock;
5049 
5050 		/* When the link type in the event indicates SCO connection
5051 		 * and lookup of the connection object fails, then check
5052 		 * if an eSCO connection object exists.
5053 		 *
5054 		 * The core limits the synchronous connections to either
5055 		 * SCO or eSCO. The eSCO connection is preferred and tried
5056 		 * to be setup first and until successfully established,
5057 		 * the link type will be hinted as eSCO.
5058 		 */
5059 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5060 		if (!conn)
5061 			goto unlock;
5062 	}
5063 
5064 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5065 	 * Processing it more than once per connection can corrupt kernel memory.
5066 	 *
5067 	 * As the connection handle is set here for the first time, it indicates
5068 	 * whether the connection is already set up.
5069 	 */
5070 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5071 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5072 		goto unlock;
5073 	}
5074 
5075 	switch (status) {
5076 	case 0x00:
5077 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5078 		if (status) {
5079 			conn->state = BT_CLOSED;
5080 			break;
5081 		}
5082 
5083 		conn->state  = BT_CONNECTED;
5084 		conn->type   = ev->link_type;
5085 
5086 		hci_debugfs_create_conn(conn);
5087 		hci_conn_add_sysfs(conn);
5088 		break;
5089 
5090 	case 0x10:	/* Connection Accept Timeout */
5091 	case 0x0d:	/* Connection Rejected due to Limited Resources */
5092 	case 0x11:	/* Unsupported Feature or Parameter Value */
5093 	case 0x1c:	/* SCO interval rejected */
5094 	case 0x1a:	/* Unsupported Remote Feature */
5095 	case 0x1e:	/* Invalid LMP Parameters */
5096 	case 0x1f:	/* Unspecified error */
5097 	case 0x20:	/* Unsupported LMP Parameter value */
5098 		if (conn->out) {
5099 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5100 					(hdev->esco_type & EDR_ESCO_MASK);
5101 			if (hci_setup_sync(conn, conn->parent->handle))
5102 				goto unlock;
5103 		}
5104 		fallthrough;
5105 
5106 	default:
5107 		conn->state = BT_CLOSED;
5108 		break;
5109 	}
5110 
5111 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5112 	/* Notify only in case of SCO over HCI transport data path which
5113 	 * is zero and non-zero value shall be non-HCI transport data path
5114 	 */
5115 	if (conn->codec.data_path == 0 && hdev->notify) {
5116 		switch (ev->air_mode) {
5117 		case 0x02:
5118 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5119 			break;
5120 		case 0x03:
5121 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5122 			break;
5123 		}
5124 	}
5125 
5126 	hci_connect_cfm(conn, status);
5127 	if (status)
5128 		hci_conn_del(conn);
5129 
5130 unlock:
5131 	hci_dev_unlock(hdev);
5132 }
5133 
5134 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5135 {
5136 	size_t parsed = 0;
5137 
5138 	while (parsed < eir_len) {
5139 		u8 field_len = eir[0];
5140 
5141 		if (field_len == 0)
5142 			return parsed;
5143 
5144 		parsed += field_len + 1;
5145 		eir += field_len + 1;
5146 	}
5147 
5148 	return eir_len;
5149 }
5150 
5151 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5152 					    struct sk_buff *skb)
5153 {
5154 	struct hci_ev_ext_inquiry_result *ev = edata;
5155 	struct inquiry_data data;
5156 	size_t eir_len;
5157 	int i;
5158 
5159 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5160 			     flex_array_size(ev, info, ev->num)))
5161 		return;
5162 
5163 	bt_dev_dbg(hdev, "num %d", ev->num);
5164 
5165 	if (!ev->num)
5166 		return;
5167 
5168 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5169 		return;
5170 
5171 	hci_dev_lock(hdev);
5172 
5173 	for (i = 0; i < ev->num; i++) {
5174 		struct extended_inquiry_info *info = &ev->info[i];
5175 		u32 flags;
5176 		bool name_known;
5177 
5178 		bacpy(&data.bdaddr, &info->bdaddr);
5179 		data.pscan_rep_mode	= info->pscan_rep_mode;
5180 		data.pscan_period_mode	= info->pscan_period_mode;
5181 		data.pscan_mode		= 0x00;
5182 		memcpy(data.dev_class, info->dev_class, 3);
5183 		data.clock_offset	= info->clock_offset;
5184 		data.rssi		= info->rssi;
5185 		data.ssp_mode		= 0x01;
5186 
5187 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5188 			name_known = eir_get_data(info->data,
5189 						  sizeof(info->data),
5190 						  EIR_NAME_COMPLETE, NULL);
5191 		else
5192 			name_known = true;
5193 
5194 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5195 
5196 		eir_len = eir_get_length(info->data, sizeof(info->data));
5197 
5198 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5199 				  info->dev_class, info->rssi,
5200 				  flags, info->data, eir_len, NULL, 0, 0);
5201 	}
5202 
5203 	hci_dev_unlock(hdev);
5204 }
5205 
5206 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5207 					 struct sk_buff *skb)
5208 {
5209 	struct hci_ev_key_refresh_complete *ev = data;
5210 	struct hci_conn *conn;
5211 
5212 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5213 		   __le16_to_cpu(ev->handle));
5214 
5215 	hci_dev_lock(hdev);
5216 
5217 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5218 	if (!conn)
5219 		goto unlock;
5220 
5221 	/* For BR/EDR the necessary steps are taken through the
5222 	 * auth_complete event.
5223 	 */
5224 	if (conn->type != LE_LINK)
5225 		goto unlock;
5226 
5227 	if (!ev->status)
5228 		conn->sec_level = conn->pending_sec_level;
5229 
5230 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5231 
5232 	if (ev->status && conn->state == BT_CONNECTED) {
5233 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5234 		hci_conn_drop(conn);
5235 		goto unlock;
5236 	}
5237 
5238 	if (conn->state == BT_CONFIG) {
5239 		if (!ev->status)
5240 			conn->state = BT_CONNECTED;
5241 
5242 		hci_connect_cfm(conn, ev->status);
5243 		hci_conn_drop(conn);
5244 	} else {
5245 		hci_auth_cfm(conn, ev->status);
5246 
5247 		hci_conn_hold(conn);
5248 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5249 		hci_conn_drop(conn);
5250 	}
5251 
5252 unlock:
5253 	hci_dev_unlock(hdev);
5254 }
5255 
5256 static u8 hci_get_auth_req(struct hci_conn *conn)
5257 {
5258 	/* If remote requests no-bonding follow that lead */
5259 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5260 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5261 		return conn->remote_auth | (conn->auth_type & 0x01);
5262 
5263 	/* If both remote and local have enough IO capabilities, require
5264 	 * MITM protection
5265 	 */
5266 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5267 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5268 		return conn->remote_auth | 0x01;
5269 
5270 	/* No MITM protection possible so ignore remote requirement */
5271 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5272 }
5273 
5274 static u8 bredr_oob_data_present(struct hci_conn *conn)
5275 {
5276 	struct hci_dev *hdev = conn->hdev;
5277 	struct oob_data *data;
5278 
5279 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5280 	if (!data)
5281 		return 0x00;
5282 
5283 	if (bredr_sc_enabled(hdev)) {
5284 		/* When Secure Connections is enabled, then just
5285 		 * return the present value stored with the OOB
5286 		 * data. The stored value contains the right present
5287 		 * information. However it can only be trusted when
5288 		 * not in Secure Connection Only mode.
5289 		 */
5290 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5291 			return data->present;
5292 
5293 		/* When Secure Connections Only mode is enabled, then
5294 		 * the P-256 values are required. If they are not
5295 		 * available, then do not declare that OOB data is
5296 		 * present.
5297 		 */
5298 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5299 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5300 			return 0x00;
5301 
5302 		return 0x02;
5303 	}
5304 
5305 	/* When Secure Connections is not enabled or actually
5306 	 * not supported by the hardware, then check that if
5307 	 * P-192 data values are present.
5308 	 */
5309 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5310 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5311 		return 0x00;
5312 
5313 	return 0x01;
5314 }
5315 
5316 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5317 				    struct sk_buff *skb)
5318 {
5319 	struct hci_ev_io_capa_request *ev = data;
5320 	struct hci_conn *conn;
5321 
5322 	bt_dev_dbg(hdev, "");
5323 
5324 	hci_dev_lock(hdev);
5325 
5326 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5327 	if (!conn || !hci_conn_ssp_enabled(conn))
5328 		goto unlock;
5329 
5330 	hci_conn_hold(conn);
5331 
5332 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5333 		goto unlock;
5334 
5335 	/* Allow pairing if we're pairable, the initiators of the
5336 	 * pairing or if the remote is not requesting bonding.
5337 	 */
5338 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5339 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5340 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5341 		struct hci_cp_io_capability_reply cp;
5342 
5343 		bacpy(&cp.bdaddr, &ev->bdaddr);
5344 		/* Change the IO capability from KeyboardDisplay
5345 		 * to DisplayYesNo as it is not supported by BT spec. */
5346 		cp.capability = (conn->io_capability == 0x04) ?
5347 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5348 
5349 		/* If we are initiators, there is no remote information yet */
5350 		if (conn->remote_auth == 0xff) {
5351 			/* Request MITM protection if our IO caps allow it
5352 			 * except for the no-bonding case.
5353 			 */
5354 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5355 			    conn->auth_type != HCI_AT_NO_BONDING)
5356 				conn->auth_type |= 0x01;
5357 		} else {
5358 			conn->auth_type = hci_get_auth_req(conn);
5359 		}
5360 
5361 		/* If we're not bondable, force one of the non-bondable
5362 		 * authentication requirement values.
5363 		 */
5364 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5365 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5366 
5367 		cp.authentication = conn->auth_type;
5368 		cp.oob_data = bredr_oob_data_present(conn);
5369 
5370 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5371 			     sizeof(cp), &cp);
5372 	} else {
5373 		struct hci_cp_io_capability_neg_reply cp;
5374 
5375 		bacpy(&cp.bdaddr, &ev->bdaddr);
5376 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5377 
5378 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5379 			     sizeof(cp), &cp);
5380 	}
5381 
5382 unlock:
5383 	hci_dev_unlock(hdev);
5384 }
5385 
5386 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5387 				  struct sk_buff *skb)
5388 {
5389 	struct hci_ev_io_capa_reply *ev = data;
5390 	struct hci_conn *conn;
5391 
5392 	bt_dev_dbg(hdev, "");
5393 
5394 	hci_dev_lock(hdev);
5395 
5396 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5397 	if (!conn)
5398 		goto unlock;
5399 
5400 	conn->remote_cap = ev->capability;
5401 	conn->remote_auth = ev->authentication;
5402 
5403 unlock:
5404 	hci_dev_unlock(hdev);
5405 }
5406 
5407 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5408 					 struct sk_buff *skb)
5409 {
5410 	struct hci_ev_user_confirm_req *ev = data;
5411 	int loc_mitm, rem_mitm, confirm_hint = 0;
5412 	struct hci_conn *conn;
5413 
5414 	bt_dev_dbg(hdev, "");
5415 
5416 	hci_dev_lock(hdev);
5417 
5418 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5419 		goto unlock;
5420 
5421 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5422 	if (!conn)
5423 		goto unlock;
5424 
5425 	loc_mitm = (conn->auth_type & 0x01);
5426 	rem_mitm = (conn->remote_auth & 0x01);
5427 
5428 	/* If we require MITM but the remote device can't provide that
5429 	 * (it has NoInputNoOutput) then reject the confirmation
5430 	 * request. We check the security level here since it doesn't
5431 	 * necessarily match conn->auth_type.
5432 	 */
5433 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5434 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5435 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5436 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5437 			     sizeof(ev->bdaddr), &ev->bdaddr);
5438 		goto unlock;
5439 	}
5440 
5441 	/* If no side requires MITM protection; auto-accept */
5442 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5443 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5444 
5445 		/* If we're not the initiators request authorization to
5446 		 * proceed from user space (mgmt_user_confirm with
5447 		 * confirm_hint set to 1). The exception is if neither
5448 		 * side had MITM or if the local IO capability is
5449 		 * NoInputNoOutput, in which case we do auto-accept
5450 		 */
5451 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5452 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5453 		    (loc_mitm || rem_mitm)) {
5454 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5455 			confirm_hint = 1;
5456 			goto confirm;
5457 		}
5458 
5459 		/* If there already exists link key in local host, leave the
5460 		 * decision to user space since the remote device could be
5461 		 * legitimate or malicious.
5462 		 */
5463 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5464 			bt_dev_dbg(hdev, "Local host already has link key");
5465 			confirm_hint = 1;
5466 			goto confirm;
5467 		}
5468 
5469 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5470 		       hdev->auto_accept_delay);
5471 
5472 		if (hdev->auto_accept_delay > 0) {
5473 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5474 			queue_delayed_work(conn->hdev->workqueue,
5475 					   &conn->auto_accept_work, delay);
5476 			goto unlock;
5477 		}
5478 
5479 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5480 			     sizeof(ev->bdaddr), &ev->bdaddr);
5481 		goto unlock;
5482 	}
5483 
5484 confirm:
5485 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5486 				  le32_to_cpu(ev->passkey), confirm_hint);
5487 
5488 unlock:
5489 	hci_dev_unlock(hdev);
5490 }
5491 
5492 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5493 					 struct sk_buff *skb)
5494 {
5495 	struct hci_ev_user_passkey_req *ev = data;
5496 
5497 	bt_dev_dbg(hdev, "");
5498 
5499 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5500 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5501 }
5502 
5503 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5504 					struct sk_buff *skb)
5505 {
5506 	struct hci_ev_user_passkey_notify *ev = data;
5507 	struct hci_conn *conn;
5508 
5509 	bt_dev_dbg(hdev, "");
5510 
5511 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5512 	if (!conn)
5513 		return;
5514 
5515 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5516 	conn->passkey_entered = 0;
5517 
5518 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5519 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5520 					 conn->dst_type, conn->passkey_notify,
5521 					 conn->passkey_entered);
5522 }
5523 
5524 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5525 				    struct sk_buff *skb)
5526 {
5527 	struct hci_ev_keypress_notify *ev = data;
5528 	struct hci_conn *conn;
5529 
5530 	bt_dev_dbg(hdev, "");
5531 
5532 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5533 	if (!conn)
5534 		return;
5535 
5536 	switch (ev->type) {
5537 	case HCI_KEYPRESS_STARTED:
5538 		conn->passkey_entered = 0;
5539 		return;
5540 
5541 	case HCI_KEYPRESS_ENTERED:
5542 		conn->passkey_entered++;
5543 		break;
5544 
5545 	case HCI_KEYPRESS_ERASED:
5546 		conn->passkey_entered--;
5547 		break;
5548 
5549 	case HCI_KEYPRESS_CLEARED:
5550 		conn->passkey_entered = 0;
5551 		break;
5552 
5553 	case HCI_KEYPRESS_COMPLETED:
5554 		return;
5555 	}
5556 
5557 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5558 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5559 					 conn->dst_type, conn->passkey_notify,
5560 					 conn->passkey_entered);
5561 }
5562 
5563 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5564 					 struct sk_buff *skb)
5565 {
5566 	struct hci_ev_simple_pair_complete *ev = data;
5567 	struct hci_conn *conn;
5568 
5569 	bt_dev_dbg(hdev, "");
5570 
5571 	hci_dev_lock(hdev);
5572 
5573 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5574 	if (!conn || !hci_conn_ssp_enabled(conn))
5575 		goto unlock;
5576 
5577 	/* Reset the authentication requirement to unknown */
5578 	conn->remote_auth = 0xff;
5579 
5580 	/* To avoid duplicate auth_failed events to user space we check
5581 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5582 	 * initiated the authentication. A traditional auth_complete
5583 	 * event gets always produced as initiator and is also mapped to
5584 	 * the mgmt_auth_failed event */
5585 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5586 		mgmt_auth_failed(conn, ev->status);
5587 
5588 	hci_conn_drop(conn);
5589 
5590 unlock:
5591 	hci_dev_unlock(hdev);
5592 }
5593 
5594 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5595 					 struct sk_buff *skb)
5596 {
5597 	struct hci_ev_remote_host_features *ev = data;
5598 	struct inquiry_entry *ie;
5599 	struct hci_conn *conn;
5600 
5601 	bt_dev_dbg(hdev, "");
5602 
5603 	hci_dev_lock(hdev);
5604 
5605 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5606 	if (conn)
5607 		memcpy(conn->features[1], ev->features, 8);
5608 
5609 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5610 	if (ie)
5611 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5612 
5613 	hci_dev_unlock(hdev);
5614 }
5615 
5616 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5617 					    struct sk_buff *skb)
5618 {
5619 	struct hci_ev_remote_oob_data_request *ev = edata;
5620 	struct oob_data *data;
5621 
5622 	bt_dev_dbg(hdev, "");
5623 
5624 	hci_dev_lock(hdev);
5625 
5626 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5627 		goto unlock;
5628 
5629 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5630 	if (!data) {
5631 		struct hci_cp_remote_oob_data_neg_reply cp;
5632 
5633 		bacpy(&cp.bdaddr, &ev->bdaddr);
5634 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5635 			     sizeof(cp), &cp);
5636 		goto unlock;
5637 	}
5638 
5639 	if (bredr_sc_enabled(hdev)) {
5640 		struct hci_cp_remote_oob_ext_data_reply cp;
5641 
5642 		bacpy(&cp.bdaddr, &ev->bdaddr);
5643 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5644 			memset(cp.hash192, 0, sizeof(cp.hash192));
5645 			memset(cp.rand192, 0, sizeof(cp.rand192));
5646 		} else {
5647 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5648 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5649 		}
5650 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5651 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5652 
5653 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5654 			     sizeof(cp), &cp);
5655 	} else {
5656 		struct hci_cp_remote_oob_data_reply cp;
5657 
5658 		bacpy(&cp.bdaddr, &ev->bdaddr);
5659 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5660 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5661 
5662 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5663 			     sizeof(cp), &cp);
5664 	}
5665 
5666 unlock:
5667 	hci_dev_unlock(hdev);
5668 }
5669 
5670 #if IS_ENABLED(CONFIG_BT_HS)
5671 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5672 				  struct sk_buff *skb)
5673 {
5674 	struct hci_ev_channel_selected *ev = data;
5675 	struct hci_conn *hcon;
5676 
5677 	bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5678 
5679 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5680 	if (!hcon)
5681 		return;
5682 
5683 	amp_read_loc_assoc_final_data(hdev, hcon);
5684 }
5685 
5686 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5687 				      struct sk_buff *skb)
5688 {
5689 	struct hci_ev_phy_link_complete *ev = data;
5690 	struct hci_conn *hcon, *bredr_hcon;
5691 
5692 	bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5693 		   ev->status);
5694 
5695 	hci_dev_lock(hdev);
5696 
5697 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5698 	if (!hcon)
5699 		goto unlock;
5700 
5701 	if (!hcon->amp_mgr)
5702 		goto unlock;
5703 
5704 	if (ev->status) {
5705 		hci_conn_del(hcon);
5706 		goto unlock;
5707 	}
5708 
5709 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5710 
5711 	hcon->state = BT_CONNECTED;
5712 	bacpy(&hcon->dst, &bredr_hcon->dst);
5713 
5714 	hci_conn_hold(hcon);
5715 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5716 	hci_conn_drop(hcon);
5717 
5718 	hci_debugfs_create_conn(hcon);
5719 	hci_conn_add_sysfs(hcon);
5720 
5721 	amp_physical_cfm(bredr_hcon, hcon);
5722 
5723 unlock:
5724 	hci_dev_unlock(hdev);
5725 }
5726 
5727 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5728 				     struct sk_buff *skb)
5729 {
5730 	struct hci_ev_logical_link_complete *ev = data;
5731 	struct hci_conn *hcon;
5732 	struct hci_chan *hchan;
5733 	struct amp_mgr *mgr;
5734 
5735 	bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5736 		   le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5737 
5738 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5739 	if (!hcon)
5740 		return;
5741 
5742 	/* Create AMP hchan */
5743 	hchan = hci_chan_create(hcon);
5744 	if (!hchan)
5745 		return;
5746 
5747 	hchan->handle = le16_to_cpu(ev->handle);
5748 	hchan->amp = true;
5749 
5750 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5751 
5752 	mgr = hcon->amp_mgr;
5753 	if (mgr && mgr->bredr_chan) {
5754 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5755 
5756 		l2cap_chan_lock(bredr_chan);
5757 
5758 		bredr_chan->conn->mtu = hdev->block_mtu;
5759 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5760 		hci_conn_hold(hcon);
5761 
5762 		l2cap_chan_unlock(bredr_chan);
5763 	}
5764 }
5765 
5766 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5767 					     struct sk_buff *skb)
5768 {
5769 	struct hci_ev_disconn_logical_link_complete *ev = data;
5770 	struct hci_chan *hchan;
5771 
5772 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5773 		   le16_to_cpu(ev->handle), ev->status);
5774 
5775 	if (ev->status)
5776 		return;
5777 
5778 	hci_dev_lock(hdev);
5779 
5780 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5781 	if (!hchan || !hchan->amp)
5782 		goto unlock;
5783 
5784 	amp_destroy_logical_link(hchan, ev->reason);
5785 
5786 unlock:
5787 	hci_dev_unlock(hdev);
5788 }
5789 
5790 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5791 					     struct sk_buff *skb)
5792 {
5793 	struct hci_ev_disconn_phy_link_complete *ev = data;
5794 	struct hci_conn *hcon;
5795 
5796 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5797 
5798 	if (ev->status)
5799 		return;
5800 
5801 	hci_dev_lock(hdev);
5802 
5803 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5804 	if (hcon && hcon->type == AMP_LINK) {
5805 		hcon->state = BT_CLOSED;
5806 		hci_disconn_cfm(hcon, ev->reason);
5807 		hci_conn_del(hcon);
5808 	}
5809 
5810 	hci_dev_unlock(hdev);
5811 }
5812 #endif
5813 
5814 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5815 				u8 bdaddr_type, bdaddr_t *local_rpa)
5816 {
5817 	if (conn->out) {
5818 		conn->dst_type = bdaddr_type;
5819 		conn->resp_addr_type = bdaddr_type;
5820 		bacpy(&conn->resp_addr, bdaddr);
5821 
5822 		/* Check if the controller has set a Local RPA then it must be
5823 		 * used instead or hdev->rpa.
5824 		 */
5825 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5826 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5827 			bacpy(&conn->init_addr, local_rpa);
5828 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5829 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5830 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5831 		} else {
5832 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5833 						  &conn->init_addr_type);
5834 		}
5835 	} else {
5836 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5837 		/* Check if the controller has set a Local RPA then it must be
5838 		 * used instead or hdev->rpa.
5839 		 */
5840 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5841 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5842 			bacpy(&conn->resp_addr, local_rpa);
5843 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5844 			/* In case of ext adv, resp_addr will be updated in
5845 			 * Adv Terminated event.
5846 			 */
5847 			if (!ext_adv_capable(conn->hdev))
5848 				bacpy(&conn->resp_addr,
5849 				      &conn->hdev->random_addr);
5850 		} else {
5851 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5852 		}
5853 
5854 		conn->init_addr_type = bdaddr_type;
5855 		bacpy(&conn->init_addr, bdaddr);
5856 
5857 		/* For incoming connections, set the default minimum
5858 		 * and maximum connection interval. They will be used
5859 		 * to check if the parameters are in range and if not
5860 		 * trigger the connection update procedure.
5861 		 */
5862 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5863 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5864 	}
5865 }
5866 
5867 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5868 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5869 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5870 				 u16 interval, u16 latency,
5871 				 u16 supervision_timeout)
5872 {
5873 	struct hci_conn_params *params;
5874 	struct hci_conn *conn;
5875 	struct smp_irk *irk;
5876 	u8 addr_type;
5877 
5878 	hci_dev_lock(hdev);
5879 
5880 	/* All controllers implicitly stop advertising in the event of a
5881 	 * connection, so ensure that the state bit is cleared.
5882 	 */
5883 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5884 
5885 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5886 	if (!conn) {
5887 		/* In case of error status and there is no connection pending
5888 		 * just unlock as there is nothing to cleanup.
5889 		 */
5890 		if (status)
5891 			goto unlock;
5892 
5893 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5894 		if (!conn) {
5895 			bt_dev_err(hdev, "no memory for new connection");
5896 			goto unlock;
5897 		}
5898 
5899 		conn->dst_type = bdaddr_type;
5900 
5901 		/* If we didn't have a hci_conn object previously
5902 		 * but we're in central role this must be something
5903 		 * initiated using an accept list. Since accept list based
5904 		 * connections are not "first class citizens" we don't
5905 		 * have full tracking of them. Therefore, we go ahead
5906 		 * with a "best effort" approach of determining the
5907 		 * initiator address based on the HCI_PRIVACY flag.
5908 		 */
5909 		if (conn->out) {
5910 			conn->resp_addr_type = bdaddr_type;
5911 			bacpy(&conn->resp_addr, bdaddr);
5912 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5913 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5914 				bacpy(&conn->init_addr, &hdev->rpa);
5915 			} else {
5916 				hci_copy_identity_address(hdev,
5917 							  &conn->init_addr,
5918 							  &conn->init_addr_type);
5919 			}
5920 		}
5921 	} else {
5922 		cancel_delayed_work(&conn->le_conn_timeout);
5923 	}
5924 
5925 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5926 	 * Processing it more than once per connection can corrupt kernel memory.
5927 	 *
5928 	 * As the connection handle is set here for the first time, it indicates
5929 	 * whether the connection is already set up.
5930 	 */
5931 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5932 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5933 		goto unlock;
5934 	}
5935 
5936 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5937 
5938 	/* Lookup the identity address from the stored connection
5939 	 * address and address type.
5940 	 *
5941 	 * When establishing connections to an identity address, the
5942 	 * connection procedure will store the resolvable random
5943 	 * address first. Now if it can be converted back into the
5944 	 * identity address, start using the identity address from
5945 	 * now on.
5946 	 */
5947 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5948 	if (irk) {
5949 		bacpy(&conn->dst, &irk->bdaddr);
5950 		conn->dst_type = irk->addr_type;
5951 	}
5952 
5953 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5954 
5955 	/* All connection failure handling is taken care of by the
5956 	 * hci_conn_failed function which is triggered by the HCI
5957 	 * request completion callbacks used for connecting.
5958 	 */
5959 	if (status || hci_conn_set_handle(conn, handle))
5960 		goto unlock;
5961 
5962 	/* Drop the connection if it has been aborted */
5963 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5964 		hci_conn_drop(conn);
5965 		goto unlock;
5966 	}
5967 
5968 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5969 		addr_type = BDADDR_LE_PUBLIC;
5970 	else
5971 		addr_type = BDADDR_LE_RANDOM;
5972 
5973 	/* Drop the connection if the device is blocked */
5974 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5975 		hci_conn_drop(conn);
5976 		goto unlock;
5977 	}
5978 
5979 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5980 		mgmt_device_connected(hdev, conn, NULL, 0);
5981 
5982 	conn->sec_level = BT_SECURITY_LOW;
5983 	conn->state = BT_CONFIG;
5984 
5985 	/* Store current advertising instance as connection advertising instance
5986 	 * when sotfware rotation is in use so it can be re-enabled when
5987 	 * disconnected.
5988 	 */
5989 	if (!ext_adv_capable(hdev))
5990 		conn->adv_instance = hdev->cur_adv_instance;
5991 
5992 	conn->le_conn_interval = interval;
5993 	conn->le_conn_latency = latency;
5994 	conn->le_supv_timeout = supervision_timeout;
5995 
5996 	hci_debugfs_create_conn(conn);
5997 	hci_conn_add_sysfs(conn);
5998 
5999 	/* The remote features procedure is defined for central
6000 	 * role only. So only in case of an initiated connection
6001 	 * request the remote features.
6002 	 *
6003 	 * If the local controller supports peripheral-initiated features
6004 	 * exchange, then requesting the remote features in peripheral
6005 	 * role is possible. Otherwise just transition into the
6006 	 * connected state without requesting the remote features.
6007 	 */
6008 	if (conn->out ||
6009 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6010 		struct hci_cp_le_read_remote_features cp;
6011 
6012 		cp.handle = __cpu_to_le16(conn->handle);
6013 
6014 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6015 			     sizeof(cp), &cp);
6016 
6017 		hci_conn_hold(conn);
6018 	} else {
6019 		conn->state = BT_CONNECTED;
6020 		hci_connect_cfm(conn, status);
6021 	}
6022 
6023 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6024 					   conn->dst_type);
6025 	if (params) {
6026 		hci_pend_le_list_del_init(params);
6027 		if (params->conn) {
6028 			hci_conn_drop(params->conn);
6029 			hci_conn_put(params->conn);
6030 			params->conn = NULL;
6031 		}
6032 	}
6033 
6034 unlock:
6035 	hci_update_passive_scan(hdev);
6036 	hci_dev_unlock(hdev);
6037 }
6038 
6039 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6040 				     struct sk_buff *skb)
6041 {
6042 	struct hci_ev_le_conn_complete *ev = data;
6043 
6044 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6045 
6046 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6047 			     NULL, ev->role, le16_to_cpu(ev->handle),
6048 			     le16_to_cpu(ev->interval),
6049 			     le16_to_cpu(ev->latency),
6050 			     le16_to_cpu(ev->supervision_timeout));
6051 }
6052 
6053 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6054 					 struct sk_buff *skb)
6055 {
6056 	struct hci_ev_le_enh_conn_complete *ev = data;
6057 
6058 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6059 
6060 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6061 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6062 			     le16_to_cpu(ev->interval),
6063 			     le16_to_cpu(ev->latency),
6064 			     le16_to_cpu(ev->supervision_timeout));
6065 }
6066 
6067 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6068 				    struct sk_buff *skb)
6069 {
6070 	struct hci_evt_le_ext_adv_set_term *ev = data;
6071 	struct hci_conn *conn;
6072 	struct adv_info *adv, *n;
6073 
6074 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6075 
6076 	/* The Bluetooth Core 5.3 specification clearly states that this event
6077 	 * shall not be sent when the Host disables the advertising set. So in
6078 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6079 	 *
6080 	 * When the Host disables an advertising set, all cleanup is done via
6081 	 * its command callback and not needed to be duplicated here.
6082 	 */
6083 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6084 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6085 		return;
6086 	}
6087 
6088 	hci_dev_lock(hdev);
6089 
6090 	adv = hci_find_adv_instance(hdev, ev->handle);
6091 
6092 	if (ev->status) {
6093 		if (!adv)
6094 			goto unlock;
6095 
6096 		/* Remove advertising as it has been terminated */
6097 		hci_remove_adv_instance(hdev, ev->handle);
6098 		mgmt_advertising_removed(NULL, hdev, ev->handle);
6099 
6100 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6101 			if (adv->enabled)
6102 				goto unlock;
6103 		}
6104 
6105 		/* We are no longer advertising, clear HCI_LE_ADV */
6106 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
6107 		goto unlock;
6108 	}
6109 
6110 	if (adv)
6111 		adv->enabled = false;
6112 
6113 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6114 	if (conn) {
6115 		/* Store handle in the connection so the correct advertising
6116 		 * instance can be re-enabled when disconnected.
6117 		 */
6118 		conn->adv_instance = ev->handle;
6119 
6120 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6121 		    bacmp(&conn->resp_addr, BDADDR_ANY))
6122 			goto unlock;
6123 
6124 		if (!ev->handle) {
6125 			bacpy(&conn->resp_addr, &hdev->random_addr);
6126 			goto unlock;
6127 		}
6128 
6129 		if (adv)
6130 			bacpy(&conn->resp_addr, &adv->random_addr);
6131 	}
6132 
6133 unlock:
6134 	hci_dev_unlock(hdev);
6135 }
6136 
6137 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6138 					    struct sk_buff *skb)
6139 {
6140 	struct hci_ev_le_conn_update_complete *ev = data;
6141 	struct hci_conn *conn;
6142 
6143 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6144 
6145 	if (ev->status)
6146 		return;
6147 
6148 	hci_dev_lock(hdev);
6149 
6150 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6151 	if (conn) {
6152 		conn->le_conn_interval = le16_to_cpu(ev->interval);
6153 		conn->le_conn_latency = le16_to_cpu(ev->latency);
6154 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6155 	}
6156 
6157 	hci_dev_unlock(hdev);
6158 }
6159 
6160 /* This function requires the caller holds hdev->lock */
6161 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6162 					      bdaddr_t *addr,
6163 					      u8 addr_type, bool addr_resolved,
6164 					      u8 adv_type)
6165 {
6166 	struct hci_conn *conn;
6167 	struct hci_conn_params *params;
6168 
6169 	/* If the event is not connectable don't proceed further */
6170 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6171 		return NULL;
6172 
6173 	/* Ignore if the device is blocked or hdev is suspended */
6174 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6175 	    hdev->suspended)
6176 		return NULL;
6177 
6178 	/* Most controller will fail if we try to create new connections
6179 	 * while we have an existing one in peripheral role.
6180 	 */
6181 	if (hdev->conn_hash.le_num_peripheral > 0 &&
6182 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6183 	     !(hdev->le_states[3] & 0x10)))
6184 		return NULL;
6185 
6186 	/* If we're not connectable only connect devices that we have in
6187 	 * our pend_le_conns list.
6188 	 */
6189 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6190 					   addr_type);
6191 	if (!params)
6192 		return NULL;
6193 
6194 	if (!params->explicit_connect) {
6195 		switch (params->auto_connect) {
6196 		case HCI_AUTO_CONN_DIRECT:
6197 			/* Only devices advertising with ADV_DIRECT_IND are
6198 			 * triggering a connection attempt. This is allowing
6199 			 * incoming connections from peripheral devices.
6200 			 */
6201 			if (adv_type != LE_ADV_DIRECT_IND)
6202 				return NULL;
6203 			break;
6204 		case HCI_AUTO_CONN_ALWAYS:
6205 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6206 			 * are triggering a connection attempt. This means
6207 			 * that incoming connections from peripheral device are
6208 			 * accepted and also outgoing connections to peripheral
6209 			 * devices are established when found.
6210 			 */
6211 			break;
6212 		default:
6213 			return NULL;
6214 		}
6215 	}
6216 
6217 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6218 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6219 			      HCI_ROLE_MASTER);
6220 	if (!IS_ERR(conn)) {
6221 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6222 		 * by higher layer that tried to connect, if no then
6223 		 * store the pointer since we don't really have any
6224 		 * other owner of the object besides the params that
6225 		 * triggered it. This way we can abort the connection if
6226 		 * the parameters get removed and keep the reference
6227 		 * count consistent once the connection is established.
6228 		 */
6229 
6230 		if (!params->explicit_connect)
6231 			params->conn = hci_conn_get(conn);
6232 
6233 		return conn;
6234 	}
6235 
6236 	switch (PTR_ERR(conn)) {
6237 	case -EBUSY:
6238 		/* If hci_connect() returns -EBUSY it means there is already
6239 		 * an LE connection attempt going on. Since controllers don't
6240 		 * support more than one connection attempt at the time, we
6241 		 * don't consider this an error case.
6242 		 */
6243 		break;
6244 	default:
6245 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6246 		return NULL;
6247 	}
6248 
6249 	return NULL;
6250 }
6251 
6252 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6253 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6254 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6255 			       bool ext_adv, bool ctl_time, u64 instant)
6256 {
6257 	struct discovery_state *d = &hdev->discovery;
6258 	struct smp_irk *irk;
6259 	struct hci_conn *conn;
6260 	bool match, bdaddr_resolved;
6261 	u32 flags;
6262 	u8 *ptr;
6263 
6264 	switch (type) {
6265 	case LE_ADV_IND:
6266 	case LE_ADV_DIRECT_IND:
6267 	case LE_ADV_SCAN_IND:
6268 	case LE_ADV_NONCONN_IND:
6269 	case LE_ADV_SCAN_RSP:
6270 		break;
6271 	default:
6272 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6273 				       "type: 0x%02x", type);
6274 		return;
6275 	}
6276 
6277 	if (len > max_adv_len(hdev)) {
6278 		bt_dev_err_ratelimited(hdev,
6279 				       "adv larger than maximum supported");
6280 		return;
6281 	}
6282 
6283 	/* Find the end of the data in case the report contains padded zero
6284 	 * bytes at the end causing an invalid length value.
6285 	 *
6286 	 * When data is NULL, len is 0 so there is no need for extra ptr
6287 	 * check as 'ptr < data + 0' is already false in such case.
6288 	 */
6289 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6290 		if (ptr + 1 + *ptr > data + len)
6291 			break;
6292 	}
6293 
6294 	/* Adjust for actual length. This handles the case when remote
6295 	 * device is advertising with incorrect data length.
6296 	 */
6297 	len = ptr - data;
6298 
6299 	/* If the direct address is present, then this report is from
6300 	 * a LE Direct Advertising Report event. In that case it is
6301 	 * important to see if the address is matching the local
6302 	 * controller address.
6303 	 */
6304 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6305 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6306 						  &bdaddr_resolved);
6307 
6308 		/* Only resolvable random addresses are valid for these
6309 		 * kind of reports and others can be ignored.
6310 		 */
6311 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6312 			return;
6313 
6314 		/* If the controller is not using resolvable random
6315 		 * addresses, then this report can be ignored.
6316 		 */
6317 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6318 			return;
6319 
6320 		/* If the local IRK of the controller does not match
6321 		 * with the resolvable random address provided, then
6322 		 * this report can be ignored.
6323 		 */
6324 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6325 			return;
6326 	}
6327 
6328 	/* Check if we need to convert to identity address */
6329 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6330 	if (irk) {
6331 		bdaddr = &irk->bdaddr;
6332 		bdaddr_type = irk->addr_type;
6333 	}
6334 
6335 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6336 
6337 	/* Check if we have been requested to connect to this device.
6338 	 *
6339 	 * direct_addr is set only for directed advertising reports (it is NULL
6340 	 * for advertising reports) and is already verified to be RPA above.
6341 	 */
6342 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6343 				     type);
6344 	if (!ext_adv && conn && type == LE_ADV_IND &&
6345 	    len <= max_adv_len(hdev)) {
6346 		/* Store report for later inclusion by
6347 		 * mgmt_device_connected
6348 		 */
6349 		memcpy(conn->le_adv_data, data, len);
6350 		conn->le_adv_data_len = len;
6351 	}
6352 
6353 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6354 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6355 	else
6356 		flags = 0;
6357 
6358 	/* All scan results should be sent up for Mesh systems */
6359 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6360 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6361 				  rssi, flags, data, len, NULL, 0, instant);
6362 		return;
6363 	}
6364 
6365 	/* Passive scanning shouldn't trigger any device found events,
6366 	 * except for devices marked as CONN_REPORT for which we do send
6367 	 * device found events, or advertisement monitoring requested.
6368 	 */
6369 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6370 		if (type == LE_ADV_DIRECT_IND)
6371 			return;
6372 
6373 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6374 					       bdaddr, bdaddr_type) &&
6375 		    idr_is_empty(&hdev->adv_monitors_idr))
6376 			return;
6377 
6378 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6379 				  rssi, flags, data, len, NULL, 0, 0);
6380 		return;
6381 	}
6382 
6383 	/* When receiving a scan response, then there is no way to
6384 	 * know if the remote device is connectable or not. However
6385 	 * since scan responses are merged with a previously seen
6386 	 * advertising report, the flags field from that report
6387 	 * will be used.
6388 	 *
6389 	 * In the unlikely case that a controller just sends a scan
6390 	 * response event that doesn't match the pending report, then
6391 	 * it is marked as a standalone SCAN_RSP.
6392 	 */
6393 	if (type == LE_ADV_SCAN_RSP)
6394 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6395 
6396 	/* If there's nothing pending either store the data from this
6397 	 * event or send an immediate device found event if the data
6398 	 * should not be stored for later.
6399 	 */
6400 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6401 		/* If the report will trigger a SCAN_REQ store it for
6402 		 * later merging.
6403 		 */
6404 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6405 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6406 						 rssi, flags, data, len);
6407 			return;
6408 		}
6409 
6410 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6411 				  rssi, flags, data, len, NULL, 0, 0);
6412 		return;
6413 	}
6414 
6415 	/* Check if the pending report is for the same device as the new one */
6416 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6417 		 bdaddr_type == d->last_adv_addr_type);
6418 
6419 	/* If the pending data doesn't match this report or this isn't a
6420 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6421 	 * sending of the pending data.
6422 	 */
6423 	if (type != LE_ADV_SCAN_RSP || !match) {
6424 		/* Send out whatever is in the cache, but skip duplicates */
6425 		if (!match)
6426 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6427 					  d->last_adv_addr_type, NULL,
6428 					  d->last_adv_rssi, d->last_adv_flags,
6429 					  d->last_adv_data,
6430 					  d->last_adv_data_len, NULL, 0, 0);
6431 
6432 		/* If the new report will trigger a SCAN_REQ store it for
6433 		 * later merging.
6434 		 */
6435 		if (!ext_adv && (type == LE_ADV_IND ||
6436 				 type == LE_ADV_SCAN_IND)) {
6437 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6438 						 rssi, flags, data, len);
6439 			return;
6440 		}
6441 
6442 		/* The advertising reports cannot be merged, so clear
6443 		 * the pending report and send out a device found event.
6444 		 */
6445 		clear_pending_adv_report(hdev);
6446 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6447 				  rssi, flags, data, len, NULL, 0, 0);
6448 		return;
6449 	}
6450 
6451 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6452 	 * the new event is a SCAN_RSP. We can therefore proceed with
6453 	 * sending a merged device found event.
6454 	 */
6455 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6456 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6457 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6458 	clear_pending_adv_report(hdev);
6459 }
6460 
6461 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6462 				  struct sk_buff *skb)
6463 {
6464 	struct hci_ev_le_advertising_report *ev = data;
6465 	u64 instant = jiffies;
6466 
6467 	if (!ev->num)
6468 		return;
6469 
6470 	hci_dev_lock(hdev);
6471 
6472 	while (ev->num--) {
6473 		struct hci_ev_le_advertising_info *info;
6474 		s8 rssi;
6475 
6476 		info = hci_le_ev_skb_pull(hdev, skb,
6477 					  HCI_EV_LE_ADVERTISING_REPORT,
6478 					  sizeof(*info));
6479 		if (!info)
6480 			break;
6481 
6482 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6483 					info->length + 1))
6484 			break;
6485 
6486 		if (info->length <= max_adv_len(hdev)) {
6487 			rssi = info->data[info->length];
6488 			process_adv_report(hdev, info->type, &info->bdaddr,
6489 					   info->bdaddr_type, NULL, 0, rssi,
6490 					   info->data, info->length, false,
6491 					   false, instant);
6492 		} else {
6493 			bt_dev_err(hdev, "Dropping invalid advertising data");
6494 		}
6495 	}
6496 
6497 	hci_dev_unlock(hdev);
6498 }
6499 
6500 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6501 {
6502 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6503 		switch (evt_type) {
6504 		case LE_LEGACY_ADV_IND:
6505 			return LE_ADV_IND;
6506 		case LE_LEGACY_ADV_DIRECT_IND:
6507 			return LE_ADV_DIRECT_IND;
6508 		case LE_LEGACY_ADV_SCAN_IND:
6509 			return LE_ADV_SCAN_IND;
6510 		case LE_LEGACY_NONCONN_IND:
6511 			return LE_ADV_NONCONN_IND;
6512 		case LE_LEGACY_SCAN_RSP_ADV:
6513 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6514 			return LE_ADV_SCAN_RSP;
6515 		}
6516 
6517 		goto invalid;
6518 	}
6519 
6520 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6521 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6522 			return LE_ADV_DIRECT_IND;
6523 
6524 		return LE_ADV_IND;
6525 	}
6526 
6527 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6528 		return LE_ADV_SCAN_RSP;
6529 
6530 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6531 		return LE_ADV_SCAN_IND;
6532 
6533 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6534 	    evt_type & LE_EXT_ADV_DIRECT_IND)
6535 		return LE_ADV_NONCONN_IND;
6536 
6537 invalid:
6538 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6539 			       evt_type);
6540 
6541 	return LE_ADV_INVALID;
6542 }
6543 
6544 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6545 				      struct sk_buff *skb)
6546 {
6547 	struct hci_ev_le_ext_adv_report *ev = data;
6548 	u64 instant = jiffies;
6549 
6550 	if (!ev->num)
6551 		return;
6552 
6553 	hci_dev_lock(hdev);
6554 
6555 	while (ev->num--) {
6556 		struct hci_ev_le_ext_adv_info *info;
6557 		u8 legacy_evt_type;
6558 		u16 evt_type;
6559 
6560 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6561 					  sizeof(*info));
6562 		if (!info)
6563 			break;
6564 
6565 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6566 					info->length))
6567 			break;
6568 
6569 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6570 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6571 		if (legacy_evt_type != LE_ADV_INVALID) {
6572 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6573 					   info->bdaddr_type, NULL, 0,
6574 					   info->rssi, info->data, info->length,
6575 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6576 					   false, instant);
6577 		}
6578 	}
6579 
6580 	hci_dev_unlock(hdev);
6581 }
6582 
6583 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6584 {
6585 	struct hci_cp_le_pa_term_sync cp;
6586 
6587 	memset(&cp, 0, sizeof(cp));
6588 	cp.handle = handle;
6589 
6590 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6591 }
6592 
6593 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6594 					    struct sk_buff *skb)
6595 {
6596 	struct hci_ev_le_pa_sync_established *ev = data;
6597 	int mask = hdev->link_mode;
6598 	__u8 flags = 0;
6599 	struct hci_conn *pa_sync;
6600 
6601 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6602 
6603 	hci_dev_lock(hdev);
6604 
6605 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6606 
6607 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6608 	if (!(mask & HCI_LM_ACCEPT)) {
6609 		hci_le_pa_term_sync(hdev, ev->handle);
6610 		goto unlock;
6611 	}
6612 
6613 	if (!(flags & HCI_PROTO_DEFER))
6614 		goto unlock;
6615 
6616 	if (ev->status) {
6617 		/* Add connection to indicate the failed PA sync event */
6618 		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6619 					     HCI_ROLE_SLAVE);
6620 
6621 		if (!pa_sync)
6622 			goto unlock;
6623 
6624 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6625 
6626 		/* Notify iso layer */
6627 		hci_connect_cfm(pa_sync, ev->status);
6628 	}
6629 
6630 unlock:
6631 	hci_dev_unlock(hdev);
6632 }
6633 
6634 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6635 				      struct sk_buff *skb)
6636 {
6637 	struct hci_ev_le_per_adv_report *ev = data;
6638 	int mask = hdev->link_mode;
6639 	__u8 flags = 0;
6640 
6641 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6642 
6643 	hci_dev_lock(hdev);
6644 
6645 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6646 	if (!(mask & HCI_LM_ACCEPT))
6647 		hci_le_pa_term_sync(hdev, ev->sync_handle);
6648 
6649 	hci_dev_unlock(hdev);
6650 }
6651 
6652 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6653 					    struct sk_buff *skb)
6654 {
6655 	struct hci_ev_le_remote_feat_complete *ev = data;
6656 	struct hci_conn *conn;
6657 
6658 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6659 
6660 	hci_dev_lock(hdev);
6661 
6662 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6663 	if (conn) {
6664 		if (!ev->status)
6665 			memcpy(conn->features[0], ev->features, 8);
6666 
6667 		if (conn->state == BT_CONFIG) {
6668 			__u8 status;
6669 
6670 			/* If the local controller supports peripheral-initiated
6671 			 * features exchange, but the remote controller does
6672 			 * not, then it is possible that the error code 0x1a
6673 			 * for unsupported remote feature gets returned.
6674 			 *
6675 			 * In this specific case, allow the connection to
6676 			 * transition into connected state and mark it as
6677 			 * successful.
6678 			 */
6679 			if (!conn->out && ev->status == 0x1a &&
6680 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6681 				status = 0x00;
6682 			else
6683 				status = ev->status;
6684 
6685 			conn->state = BT_CONNECTED;
6686 			hci_connect_cfm(conn, status);
6687 			hci_conn_drop(conn);
6688 		}
6689 	}
6690 
6691 	hci_dev_unlock(hdev);
6692 }
6693 
6694 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6695 				   struct sk_buff *skb)
6696 {
6697 	struct hci_ev_le_ltk_req *ev = data;
6698 	struct hci_cp_le_ltk_reply cp;
6699 	struct hci_cp_le_ltk_neg_reply neg;
6700 	struct hci_conn *conn;
6701 	struct smp_ltk *ltk;
6702 
6703 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6704 
6705 	hci_dev_lock(hdev);
6706 
6707 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6708 	if (conn == NULL)
6709 		goto not_found;
6710 
6711 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6712 	if (!ltk)
6713 		goto not_found;
6714 
6715 	if (smp_ltk_is_sc(ltk)) {
6716 		/* With SC both EDiv and Rand are set to zero */
6717 		if (ev->ediv || ev->rand)
6718 			goto not_found;
6719 	} else {
6720 		/* For non-SC keys check that EDiv and Rand match */
6721 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6722 			goto not_found;
6723 	}
6724 
6725 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6726 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6727 	cp.handle = cpu_to_le16(conn->handle);
6728 
6729 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6730 
6731 	conn->enc_key_size = ltk->enc_size;
6732 
6733 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6734 
6735 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6736 	 * temporary key used to encrypt a connection following
6737 	 * pairing. It is used during the Encrypted Session Setup to
6738 	 * distribute the keys. Later, security can be re-established
6739 	 * using a distributed LTK.
6740 	 */
6741 	if (ltk->type == SMP_STK) {
6742 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6743 		list_del_rcu(&ltk->list);
6744 		kfree_rcu(ltk, rcu);
6745 	} else {
6746 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6747 	}
6748 
6749 	hci_dev_unlock(hdev);
6750 
6751 	return;
6752 
6753 not_found:
6754 	neg.handle = ev->handle;
6755 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6756 	hci_dev_unlock(hdev);
6757 }
6758 
6759 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6760 				      u8 reason)
6761 {
6762 	struct hci_cp_le_conn_param_req_neg_reply cp;
6763 
6764 	cp.handle = cpu_to_le16(handle);
6765 	cp.reason = reason;
6766 
6767 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6768 		     &cp);
6769 }
6770 
6771 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6772 					     struct sk_buff *skb)
6773 {
6774 	struct hci_ev_le_remote_conn_param_req *ev = data;
6775 	struct hci_cp_le_conn_param_req_reply cp;
6776 	struct hci_conn *hcon;
6777 	u16 handle, min, max, latency, timeout;
6778 
6779 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6780 
6781 	handle = le16_to_cpu(ev->handle);
6782 	min = le16_to_cpu(ev->interval_min);
6783 	max = le16_to_cpu(ev->interval_max);
6784 	latency = le16_to_cpu(ev->latency);
6785 	timeout = le16_to_cpu(ev->timeout);
6786 
6787 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6788 	if (!hcon || hcon->state != BT_CONNECTED)
6789 		return send_conn_param_neg_reply(hdev, handle,
6790 						 HCI_ERROR_UNKNOWN_CONN_ID);
6791 
6792 	if (hci_check_conn_params(min, max, latency, timeout))
6793 		return send_conn_param_neg_reply(hdev, handle,
6794 						 HCI_ERROR_INVALID_LL_PARAMS);
6795 
6796 	if (hcon->role == HCI_ROLE_MASTER) {
6797 		struct hci_conn_params *params;
6798 		u8 store_hint;
6799 
6800 		hci_dev_lock(hdev);
6801 
6802 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6803 						hcon->dst_type);
6804 		if (params) {
6805 			params->conn_min_interval = min;
6806 			params->conn_max_interval = max;
6807 			params->conn_latency = latency;
6808 			params->supervision_timeout = timeout;
6809 			store_hint = 0x01;
6810 		} else {
6811 			store_hint = 0x00;
6812 		}
6813 
6814 		hci_dev_unlock(hdev);
6815 
6816 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6817 				    store_hint, min, max, latency, timeout);
6818 	}
6819 
6820 	cp.handle = ev->handle;
6821 	cp.interval_min = ev->interval_min;
6822 	cp.interval_max = ev->interval_max;
6823 	cp.latency = ev->latency;
6824 	cp.timeout = ev->timeout;
6825 	cp.min_ce_len = 0;
6826 	cp.max_ce_len = 0;
6827 
6828 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6829 }
6830 
6831 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6832 					 struct sk_buff *skb)
6833 {
6834 	struct hci_ev_le_direct_adv_report *ev = data;
6835 	u64 instant = jiffies;
6836 	int i;
6837 
6838 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6839 				flex_array_size(ev, info, ev->num)))
6840 		return;
6841 
6842 	if (!ev->num)
6843 		return;
6844 
6845 	hci_dev_lock(hdev);
6846 
6847 	for (i = 0; i < ev->num; i++) {
6848 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6849 
6850 		process_adv_report(hdev, info->type, &info->bdaddr,
6851 				   info->bdaddr_type, &info->direct_addr,
6852 				   info->direct_addr_type, info->rssi, NULL, 0,
6853 				   false, false, instant);
6854 	}
6855 
6856 	hci_dev_unlock(hdev);
6857 }
6858 
6859 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6860 				  struct sk_buff *skb)
6861 {
6862 	struct hci_ev_le_phy_update_complete *ev = data;
6863 	struct hci_conn *conn;
6864 
6865 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6866 
6867 	if (ev->status)
6868 		return;
6869 
6870 	hci_dev_lock(hdev);
6871 
6872 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6873 	if (!conn)
6874 		goto unlock;
6875 
6876 	conn->le_tx_phy = ev->tx_phy;
6877 	conn->le_rx_phy = ev->rx_phy;
6878 
6879 unlock:
6880 	hci_dev_unlock(hdev);
6881 }
6882 
6883 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6884 					struct sk_buff *skb)
6885 {
6886 	struct hci_evt_le_cis_established *ev = data;
6887 	struct hci_conn *conn;
6888 	struct bt_iso_qos *qos;
6889 	bool pending = false;
6890 	u16 handle = __le16_to_cpu(ev->handle);
6891 
6892 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6893 
6894 	hci_dev_lock(hdev);
6895 
6896 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6897 	if (!conn) {
6898 		bt_dev_err(hdev,
6899 			   "Unable to find connection with handle 0x%4.4x",
6900 			   handle);
6901 		goto unlock;
6902 	}
6903 
6904 	if (conn->type != ISO_LINK) {
6905 		bt_dev_err(hdev,
6906 			   "Invalid connection link type handle 0x%4.4x",
6907 			   handle);
6908 		goto unlock;
6909 	}
6910 
6911 	qos = &conn->iso_qos;
6912 
6913 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6914 
6915 	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6916 	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6917 	qos->ucast.out.interval = qos->ucast.in.interval;
6918 
6919 	switch (conn->role) {
6920 	case HCI_ROLE_SLAVE:
6921 		/* Convert Transport Latency (us) to Latency (msec) */
6922 		qos->ucast.in.latency =
6923 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6924 					  1000);
6925 		qos->ucast.out.latency =
6926 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6927 					  1000);
6928 		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6929 		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6930 		qos->ucast.in.phy = ev->c_phy;
6931 		qos->ucast.out.phy = ev->p_phy;
6932 		break;
6933 	case HCI_ROLE_MASTER:
6934 		/* Convert Transport Latency (us) to Latency (msec) */
6935 		qos->ucast.out.latency =
6936 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6937 					  1000);
6938 		qos->ucast.in.latency =
6939 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6940 					  1000);
6941 		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6942 		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6943 		qos->ucast.out.phy = ev->c_phy;
6944 		qos->ucast.in.phy = ev->p_phy;
6945 		break;
6946 	}
6947 
6948 	if (!ev->status) {
6949 		conn->state = BT_CONNECTED;
6950 		hci_debugfs_create_conn(conn);
6951 		hci_conn_add_sysfs(conn);
6952 		hci_iso_setup_path(conn);
6953 		goto unlock;
6954 	}
6955 
6956 	conn->state = BT_CLOSED;
6957 	hci_connect_cfm(conn, ev->status);
6958 	hci_conn_del(conn);
6959 
6960 unlock:
6961 	if (pending)
6962 		hci_le_create_cis_pending(hdev);
6963 
6964 	hci_dev_unlock(hdev);
6965 }
6966 
6967 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6968 {
6969 	struct hci_cp_le_reject_cis cp;
6970 
6971 	memset(&cp, 0, sizeof(cp));
6972 	cp.handle = handle;
6973 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6974 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6975 }
6976 
6977 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6978 {
6979 	struct hci_cp_le_accept_cis cp;
6980 
6981 	memset(&cp, 0, sizeof(cp));
6982 	cp.handle = handle;
6983 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6984 }
6985 
6986 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6987 			       struct sk_buff *skb)
6988 {
6989 	struct hci_evt_le_cis_req *ev = data;
6990 	u16 acl_handle, cis_handle;
6991 	struct hci_conn *acl, *cis;
6992 	int mask;
6993 	__u8 flags = 0;
6994 
6995 	acl_handle = __le16_to_cpu(ev->acl_handle);
6996 	cis_handle = __le16_to_cpu(ev->cis_handle);
6997 
6998 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6999 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7000 
7001 	hci_dev_lock(hdev);
7002 
7003 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7004 	if (!acl)
7005 		goto unlock;
7006 
7007 	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7008 	if (!(mask & HCI_LM_ACCEPT)) {
7009 		hci_le_reject_cis(hdev, ev->cis_handle);
7010 		goto unlock;
7011 	}
7012 
7013 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7014 	if (!cis) {
7015 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7016 				   cis_handle);
7017 		if (!cis) {
7018 			hci_le_reject_cis(hdev, ev->cis_handle);
7019 			goto unlock;
7020 		}
7021 	}
7022 
7023 	cis->iso_qos.ucast.cig = ev->cig_id;
7024 	cis->iso_qos.ucast.cis = ev->cis_id;
7025 
7026 	if (!(flags & HCI_PROTO_DEFER)) {
7027 		hci_le_accept_cis(hdev, ev->cis_handle);
7028 	} else {
7029 		cis->state = BT_CONNECT2;
7030 		hci_connect_cfm(cis, 0);
7031 	}
7032 
7033 unlock:
7034 	hci_dev_unlock(hdev);
7035 }
7036 
7037 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7038 {
7039 	u8 handle = PTR_UINT(data);
7040 
7041 	return hci_le_terminate_big_sync(hdev, handle,
7042 					 HCI_ERROR_LOCAL_HOST_TERM);
7043 }
7044 
7045 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7046 					   struct sk_buff *skb)
7047 {
7048 	struct hci_evt_le_create_big_complete *ev = data;
7049 	struct hci_conn *conn;
7050 	__u8 i = 0;
7051 
7052 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7053 
7054 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7055 				flex_array_size(ev, bis_handle, ev->num_bis)))
7056 		return;
7057 
7058 	hci_dev_lock(hdev);
7059 	rcu_read_lock();
7060 
7061 	/* Connect all BISes that are bound to the BIG */
7062 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7063 		if (bacmp(&conn->dst, BDADDR_ANY) ||
7064 		    conn->type != ISO_LINK ||
7065 		    conn->iso_qos.bcast.big != ev->handle)
7066 			continue;
7067 
7068 		if (hci_conn_set_handle(conn,
7069 					__le16_to_cpu(ev->bis_handle[i++])))
7070 			continue;
7071 
7072 		if (!ev->status) {
7073 			conn->state = BT_CONNECTED;
7074 			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7075 			rcu_read_unlock();
7076 			hci_debugfs_create_conn(conn);
7077 			hci_conn_add_sysfs(conn);
7078 			hci_iso_setup_path(conn);
7079 			rcu_read_lock();
7080 			continue;
7081 		}
7082 
7083 		hci_connect_cfm(conn, ev->status);
7084 		rcu_read_unlock();
7085 		hci_conn_del(conn);
7086 		rcu_read_lock();
7087 	}
7088 
7089 	rcu_read_unlock();
7090 
7091 	if (!ev->status && !i)
7092 		/* If no BISes have been connected for the BIG,
7093 		 * terminate. This is in case all bound connections
7094 		 * have been closed before the BIG creation
7095 		 * has completed.
7096 		 */
7097 		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7098 				   UINT_PTR(ev->handle), NULL);
7099 
7100 	hci_dev_unlock(hdev);
7101 }
7102 
7103 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7104 					    struct sk_buff *skb)
7105 {
7106 	struct hci_evt_le_big_sync_estabilished *ev = data;
7107 	struct hci_conn *bis;
7108 	struct hci_conn *pa_sync;
7109 	int i;
7110 
7111 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7112 
7113 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7114 				flex_array_size(ev, bis, ev->num_bis)))
7115 		return;
7116 
7117 	hci_dev_lock(hdev);
7118 
7119 	if (!ev->status) {
7120 		pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7121 		if (pa_sync)
7122 			/* Also mark the BIG sync established event on the
7123 			 * associated PA sync hcon
7124 			 */
7125 			set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
7126 	}
7127 
7128 	for (i = 0; i < ev->num_bis; i++) {
7129 		u16 handle = le16_to_cpu(ev->bis[i]);
7130 		__le32 interval;
7131 
7132 		bis = hci_conn_hash_lookup_handle(hdev, handle);
7133 		if (!bis) {
7134 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7135 					   HCI_ROLE_SLAVE, handle);
7136 			if (!bis)
7137 				continue;
7138 		}
7139 
7140 		if (ev->status != 0x42)
7141 			/* Mark PA sync as established */
7142 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7143 
7144 		bis->iso_qos.bcast.big = ev->handle;
7145 		memset(&interval, 0, sizeof(interval));
7146 		memcpy(&interval, ev->latency, sizeof(ev->latency));
7147 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7148 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7149 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7150 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7151 
7152 		if (!ev->status) {
7153 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7154 			hci_iso_setup_path(bis);
7155 		}
7156 	}
7157 
7158 	/* In case BIG sync failed, notify each failed connection to
7159 	 * the user after all hci connections have been added
7160 	 */
7161 	if (ev->status)
7162 		for (i = 0; i < ev->num_bis; i++) {
7163 			u16 handle = le16_to_cpu(ev->bis[i]);
7164 
7165 			bis = hci_conn_hash_lookup_handle(hdev, handle);
7166 
7167 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7168 			hci_connect_cfm(bis, ev->status);
7169 		}
7170 
7171 	hci_dev_unlock(hdev);
7172 }
7173 
7174 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7175 					   struct sk_buff *skb)
7176 {
7177 	struct hci_evt_le_big_info_adv_report *ev = data;
7178 	int mask = hdev->link_mode;
7179 	__u8 flags = 0;
7180 	struct hci_conn *pa_sync;
7181 
7182 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7183 
7184 	hci_dev_lock(hdev);
7185 
7186 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7187 	if (!(mask & HCI_LM_ACCEPT)) {
7188 		hci_le_pa_term_sync(hdev, ev->sync_handle);
7189 		goto unlock;
7190 	}
7191 
7192 	if (!(flags & HCI_PROTO_DEFER))
7193 		goto unlock;
7194 
7195 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7196 			(hdev,
7197 			le16_to_cpu(ev->sync_handle));
7198 
7199 	if (pa_sync)
7200 		goto unlock;
7201 
7202 	/* Add connection to indicate the PA sync event */
7203 	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7204 				     HCI_ROLE_SLAVE);
7205 
7206 	if (!pa_sync)
7207 		goto unlock;
7208 
7209 	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7210 	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7211 
7212 	/* Notify iso layer */
7213 	hci_connect_cfm(pa_sync, 0x00);
7214 
7215 unlock:
7216 	hci_dev_unlock(hdev);
7217 }
7218 
7219 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7220 [_op] = { \
7221 	.func = _func, \
7222 	.min_len = _min_len, \
7223 	.max_len = _max_len, \
7224 }
7225 
7226 #define HCI_LE_EV(_op, _func, _len) \
7227 	HCI_LE_EV_VL(_op, _func, _len, _len)
7228 
7229 #define HCI_LE_EV_STATUS(_op, _func) \
7230 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7231 
7232 /* Entries in this table shall have their position according to the subevent
7233  * opcode they handle so the use of the macros above is recommend since it does
7234  * attempt to initialize at its proper index using Designated Initializers that
7235  * way events without a callback function can be ommited.
7236  */
7237 static const struct hci_le_ev {
7238 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7239 	u16  min_len;
7240 	u16  max_len;
7241 } hci_le_ev_table[U8_MAX + 1] = {
7242 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7243 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7244 		  sizeof(struct hci_ev_le_conn_complete)),
7245 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7246 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7247 		     sizeof(struct hci_ev_le_advertising_report),
7248 		     HCI_MAX_EVENT_SIZE),
7249 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7250 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7251 		  hci_le_conn_update_complete_evt,
7252 		  sizeof(struct hci_ev_le_conn_update_complete)),
7253 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7254 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7255 		  hci_le_remote_feat_complete_evt,
7256 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7257 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7258 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7259 		  sizeof(struct hci_ev_le_ltk_req)),
7260 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7261 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7262 		  hci_le_remote_conn_param_req_evt,
7263 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7264 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7265 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7266 		  hci_le_enh_conn_complete_evt,
7267 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7268 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7269 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7270 		     sizeof(struct hci_ev_le_direct_adv_report),
7271 		     HCI_MAX_EVENT_SIZE),
7272 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7273 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7274 		  sizeof(struct hci_ev_le_phy_update_complete)),
7275 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7276 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7277 		     sizeof(struct hci_ev_le_ext_adv_report),
7278 		     HCI_MAX_EVENT_SIZE),
7279 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7280 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7281 		  hci_le_pa_sync_estabilished_evt,
7282 		  sizeof(struct hci_ev_le_pa_sync_established)),
7283 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7284 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7285 				 hci_le_per_adv_report_evt,
7286 				 sizeof(struct hci_ev_le_per_adv_report),
7287 				 HCI_MAX_EVENT_SIZE),
7288 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7289 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7290 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7291 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7292 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7293 		  sizeof(struct hci_evt_le_cis_established)),
7294 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7295 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7296 		  sizeof(struct hci_evt_le_cis_req)),
7297 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7298 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7299 		     hci_le_create_big_complete_evt,
7300 		     sizeof(struct hci_evt_le_create_big_complete),
7301 		     HCI_MAX_EVENT_SIZE),
7302 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7303 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7304 		     hci_le_big_sync_established_evt,
7305 		     sizeof(struct hci_evt_le_big_sync_estabilished),
7306 		     HCI_MAX_EVENT_SIZE),
7307 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7308 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7309 		     hci_le_big_info_adv_report_evt,
7310 		     sizeof(struct hci_evt_le_big_info_adv_report),
7311 		     HCI_MAX_EVENT_SIZE),
7312 };
7313 
7314 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7315 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7316 			    hci_req_complete_t *req_complete,
7317 			    hci_req_complete_skb_t *req_complete_skb)
7318 {
7319 	struct hci_ev_le_meta *ev = data;
7320 	const struct hci_le_ev *subev;
7321 
7322 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7323 
7324 	/* Only match event if command OGF is for LE */
7325 	if (hdev->sent_cmd &&
7326 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7327 	    hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7328 		*opcode = hci_skb_opcode(hdev->sent_cmd);
7329 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7330 				     req_complete_skb);
7331 	}
7332 
7333 	subev = &hci_le_ev_table[ev->subevent];
7334 	if (!subev->func)
7335 		return;
7336 
7337 	if (skb->len < subev->min_len) {
7338 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7339 			   ev->subevent, skb->len, subev->min_len);
7340 		return;
7341 	}
7342 
7343 	/* Just warn if the length is over max_len size it still be
7344 	 * possible to partially parse the event so leave to callback to
7345 	 * decide if that is acceptable.
7346 	 */
7347 	if (skb->len > subev->max_len)
7348 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7349 			    ev->subevent, skb->len, subev->max_len);
7350 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7351 	if (!data)
7352 		return;
7353 
7354 	subev->func(hdev, data, skb);
7355 }
7356 
7357 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7358 				 u8 event, struct sk_buff *skb)
7359 {
7360 	struct hci_ev_cmd_complete *ev;
7361 	struct hci_event_hdr *hdr;
7362 
7363 	if (!skb)
7364 		return false;
7365 
7366 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7367 	if (!hdr)
7368 		return false;
7369 
7370 	if (event) {
7371 		if (hdr->evt != event)
7372 			return false;
7373 		return true;
7374 	}
7375 
7376 	/* Check if request ended in Command Status - no way to retrieve
7377 	 * any extra parameters in this case.
7378 	 */
7379 	if (hdr->evt == HCI_EV_CMD_STATUS)
7380 		return false;
7381 
7382 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7383 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7384 			   hdr->evt);
7385 		return false;
7386 	}
7387 
7388 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7389 	if (!ev)
7390 		return false;
7391 
7392 	if (opcode != __le16_to_cpu(ev->opcode)) {
7393 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7394 		       __le16_to_cpu(ev->opcode));
7395 		return false;
7396 	}
7397 
7398 	return true;
7399 }
7400 
7401 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7402 				  struct sk_buff *skb)
7403 {
7404 	struct hci_ev_le_advertising_info *adv;
7405 	struct hci_ev_le_direct_adv_info *direct_adv;
7406 	struct hci_ev_le_ext_adv_info *ext_adv;
7407 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7408 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7409 
7410 	hci_dev_lock(hdev);
7411 
7412 	/* If we are currently suspended and this is the first BT event seen,
7413 	 * save the wake reason associated with the event.
7414 	 */
7415 	if (!hdev->suspended || hdev->wake_reason)
7416 		goto unlock;
7417 
7418 	/* Default to remote wake. Values for wake_reason are documented in the
7419 	 * Bluez mgmt api docs.
7420 	 */
7421 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7422 
7423 	/* Once configured for remote wakeup, we should only wake up for
7424 	 * reconnections. It's useful to see which device is waking us up so
7425 	 * keep track of the bdaddr of the connection event that woke us up.
7426 	 */
7427 	if (event == HCI_EV_CONN_REQUEST) {
7428 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7429 		hdev->wake_addr_type = BDADDR_BREDR;
7430 	} else if (event == HCI_EV_CONN_COMPLETE) {
7431 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7432 		hdev->wake_addr_type = BDADDR_BREDR;
7433 	} else if (event == HCI_EV_LE_META) {
7434 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7435 		u8 subevent = le_ev->subevent;
7436 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7437 		u8 num_reports = *ptr;
7438 
7439 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7440 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7441 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7442 		    num_reports) {
7443 			adv = (void *)(ptr + 1);
7444 			direct_adv = (void *)(ptr + 1);
7445 			ext_adv = (void *)(ptr + 1);
7446 
7447 			switch (subevent) {
7448 			case HCI_EV_LE_ADVERTISING_REPORT:
7449 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7450 				hdev->wake_addr_type = adv->bdaddr_type;
7451 				break;
7452 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7453 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7454 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7455 				break;
7456 			case HCI_EV_LE_EXT_ADV_REPORT:
7457 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7458 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7459 				break;
7460 			}
7461 		}
7462 	} else {
7463 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7464 	}
7465 
7466 unlock:
7467 	hci_dev_unlock(hdev);
7468 }
7469 
7470 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7471 [_op] = { \
7472 	.req = false, \
7473 	.func = _func, \
7474 	.min_len = _min_len, \
7475 	.max_len = _max_len, \
7476 }
7477 
7478 #define HCI_EV(_op, _func, _len) \
7479 	HCI_EV_VL(_op, _func, _len, _len)
7480 
7481 #define HCI_EV_STATUS(_op, _func) \
7482 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7483 
7484 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7485 [_op] = { \
7486 	.req = true, \
7487 	.func_req = _func, \
7488 	.min_len = _min_len, \
7489 	.max_len = _max_len, \
7490 }
7491 
7492 #define HCI_EV_REQ(_op, _func, _len) \
7493 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7494 
7495 /* Entries in this table shall have their position according to the event opcode
7496  * they handle so the use of the macros above is recommend since it does attempt
7497  * to initialize at its proper index using Designated Initializers that way
7498  * events without a callback function don't have entered.
7499  */
7500 static const struct hci_ev {
7501 	bool req;
7502 	union {
7503 		void (*func)(struct hci_dev *hdev, void *data,
7504 			     struct sk_buff *skb);
7505 		void (*func_req)(struct hci_dev *hdev, void *data,
7506 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7507 				 hci_req_complete_t *req_complete,
7508 				 hci_req_complete_skb_t *req_complete_skb);
7509 	};
7510 	u16  min_len;
7511 	u16  max_len;
7512 } hci_ev_table[U8_MAX + 1] = {
7513 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7514 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7515 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7516 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7517 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7518 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7519 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7520 	       sizeof(struct hci_ev_conn_complete)),
7521 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7522 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7523 	       sizeof(struct hci_ev_conn_request)),
7524 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7525 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7526 	       sizeof(struct hci_ev_disconn_complete)),
7527 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7528 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7529 	       sizeof(struct hci_ev_auth_complete)),
7530 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7531 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7532 	       sizeof(struct hci_ev_remote_name)),
7533 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7534 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7535 	       sizeof(struct hci_ev_encrypt_change)),
7536 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7537 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7538 	       hci_change_link_key_complete_evt,
7539 	       sizeof(struct hci_ev_change_link_key_complete)),
7540 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7541 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7542 	       sizeof(struct hci_ev_remote_features)),
7543 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7544 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7545 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7546 	/* [0x0f = HCI_EV_CMD_STATUS] */
7547 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7548 		   sizeof(struct hci_ev_cmd_status)),
7549 	/* [0x10 = HCI_EV_CMD_STATUS] */
7550 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7551 	       sizeof(struct hci_ev_hardware_error)),
7552 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7553 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7554 	       sizeof(struct hci_ev_role_change)),
7555 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7556 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7557 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7558 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7559 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7560 	       sizeof(struct hci_ev_mode_change)),
7561 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7562 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7563 	       sizeof(struct hci_ev_pin_code_req)),
7564 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7565 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7566 	       sizeof(struct hci_ev_link_key_req)),
7567 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7568 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7569 	       sizeof(struct hci_ev_link_key_notify)),
7570 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7571 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7572 	       sizeof(struct hci_ev_clock_offset)),
7573 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7574 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7575 	       sizeof(struct hci_ev_pkt_type_change)),
7576 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7577 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7578 	       sizeof(struct hci_ev_pscan_rep_mode)),
7579 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7580 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7581 		  hci_inquiry_result_with_rssi_evt,
7582 		  sizeof(struct hci_ev_inquiry_result_rssi),
7583 		  HCI_MAX_EVENT_SIZE),
7584 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7585 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7586 	       sizeof(struct hci_ev_remote_ext_features)),
7587 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7588 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7589 	       sizeof(struct hci_ev_sync_conn_complete)),
7590 	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7591 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7592 		  hci_extended_inquiry_result_evt,
7593 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7594 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7595 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7596 	       sizeof(struct hci_ev_key_refresh_complete)),
7597 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7598 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7599 	       sizeof(struct hci_ev_io_capa_request)),
7600 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7601 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7602 	       sizeof(struct hci_ev_io_capa_reply)),
7603 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7604 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7605 	       sizeof(struct hci_ev_user_confirm_req)),
7606 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7607 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7608 	       sizeof(struct hci_ev_user_passkey_req)),
7609 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7610 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7611 	       sizeof(struct hci_ev_remote_oob_data_request)),
7612 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7613 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7614 	       sizeof(struct hci_ev_simple_pair_complete)),
7615 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7616 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7617 	       sizeof(struct hci_ev_user_passkey_notify)),
7618 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7619 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7620 	       sizeof(struct hci_ev_keypress_notify)),
7621 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7622 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7623 	       sizeof(struct hci_ev_remote_host_features)),
7624 	/* [0x3e = HCI_EV_LE_META] */
7625 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7626 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7627 #if IS_ENABLED(CONFIG_BT_HS)
7628 	/* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7629 	HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7630 	       sizeof(struct hci_ev_phy_link_complete)),
7631 	/* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7632 	HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7633 	       sizeof(struct hci_ev_channel_selected)),
7634 	/* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7635 	HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7636 	       hci_disconn_loglink_complete_evt,
7637 	       sizeof(struct hci_ev_disconn_logical_link_complete)),
7638 	/* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7639 	HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7640 	       sizeof(struct hci_ev_logical_link_complete)),
7641 	/* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7642 	HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7643 	       hci_disconn_phylink_complete_evt,
7644 	       sizeof(struct hci_ev_disconn_phy_link_complete)),
7645 #endif
7646 	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7647 	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7648 	       sizeof(struct hci_ev_num_comp_blocks)),
7649 	/* [0xff = HCI_EV_VENDOR] */
7650 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7651 };
7652 
7653 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7654 			   u16 *opcode, u8 *status,
7655 			   hci_req_complete_t *req_complete,
7656 			   hci_req_complete_skb_t *req_complete_skb)
7657 {
7658 	const struct hci_ev *ev = &hci_ev_table[event];
7659 	void *data;
7660 
7661 	if (!ev->func)
7662 		return;
7663 
7664 	if (skb->len < ev->min_len) {
7665 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7666 			   event, skb->len, ev->min_len);
7667 		return;
7668 	}
7669 
7670 	/* Just warn if the length is over max_len size it still be
7671 	 * possible to partially parse the event so leave to callback to
7672 	 * decide if that is acceptable.
7673 	 */
7674 	if (skb->len > ev->max_len)
7675 		bt_dev_warn_ratelimited(hdev,
7676 					"unexpected event 0x%2.2x length: %u > %u",
7677 					event, skb->len, ev->max_len);
7678 
7679 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7680 	if (!data)
7681 		return;
7682 
7683 	if (ev->req)
7684 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7685 			     req_complete_skb);
7686 	else
7687 		ev->func(hdev, data, skb);
7688 }
7689 
7690 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7691 {
7692 	struct hci_event_hdr *hdr = (void *) skb->data;
7693 	hci_req_complete_t req_complete = NULL;
7694 	hci_req_complete_skb_t req_complete_skb = NULL;
7695 	struct sk_buff *orig_skb = NULL;
7696 	u8 status = 0, event, req_evt = 0;
7697 	u16 opcode = HCI_OP_NOP;
7698 
7699 	if (skb->len < sizeof(*hdr)) {
7700 		bt_dev_err(hdev, "Malformed HCI Event");
7701 		goto done;
7702 	}
7703 
7704 	kfree_skb(hdev->recv_event);
7705 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7706 
7707 	event = hdr->evt;
7708 	if (!event) {
7709 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7710 			    event);
7711 		goto done;
7712 	}
7713 
7714 	/* Only match event if command OGF is not for LE */
7715 	if (hdev->sent_cmd &&
7716 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7717 	    hci_skb_event(hdev->sent_cmd) == event) {
7718 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7719 				     status, &req_complete, &req_complete_skb);
7720 		req_evt = event;
7721 	}
7722 
7723 	/* If it looks like we might end up having to call
7724 	 * req_complete_skb, store a pristine copy of the skb since the
7725 	 * various handlers may modify the original one through
7726 	 * skb_pull() calls, etc.
7727 	 */
7728 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7729 	    event == HCI_EV_CMD_COMPLETE)
7730 		orig_skb = skb_clone(skb, GFP_KERNEL);
7731 
7732 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7733 
7734 	/* Store wake reason if we're suspended */
7735 	hci_store_wake_reason(hdev, event, skb);
7736 
7737 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7738 
7739 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7740 		       &req_complete_skb);
7741 
7742 	if (req_complete) {
7743 		req_complete(hdev, status, opcode);
7744 	} else if (req_complete_skb) {
7745 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7746 			kfree_skb(orig_skb);
7747 			orig_skb = NULL;
7748 		}
7749 		req_complete_skb(hdev, status, opcode, orig_skb);
7750 	}
7751 
7752 done:
7753 	kfree_skb(orig_skb);
7754 	kfree_skb(skb);
7755 	hdev->stat.evt_rx++;
7756 }
7757