xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 0e73f1ba)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI event handling. */
27 
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
39 #include "a2mp.h"
40 #include "amp.h"
41 #include "smp.h"
42 #include "msft.h"
43 #include "eir.h"
44 
45 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
46 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
47 
48 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
49 
50 /* Handle HCI Event packets */
51 
52 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53 			     u8 ev, size_t len)
54 {
55 	void *data;
56 
57 	data = skb_pull_data(skb, len);
58 	if (!data)
59 		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60 
61 	return data;
62 }
63 
64 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65 			     u16 op, size_t len)
66 {
67 	void *data;
68 
69 	data = skb_pull_data(skb, len);
70 	if (!data)
71 		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72 
73 	return data;
74 }
75 
76 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77 				u8 ev, size_t len)
78 {
79 	void *data;
80 
81 	data = skb_pull_data(skb, len);
82 	if (!data)
83 		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84 
85 	return data;
86 }
87 
88 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
89 				struct sk_buff *skb)
90 {
91 	struct hci_ev_status *rp = data;
92 
93 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
94 
95 	/* It is possible that we receive Inquiry Complete event right
96 	 * before we receive Inquiry Cancel Command Complete event, in
97 	 * which case the latter event should have status of Command
98 	 * Disallowed (0x0c). This should not be treated as error, since
99 	 * we actually achieve what Inquiry Cancel wants to achieve,
100 	 * which is to end the last Inquiry session.
101 	 */
102 	if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
103 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
104 		rp->status = 0x00;
105 	}
106 
107 	if (rp->status)
108 		return rp->status;
109 
110 	clear_bit(HCI_INQUIRY, &hdev->flags);
111 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
112 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
113 
114 	hci_dev_lock(hdev);
115 	/* Set discovery state to stopped if we're not doing LE active
116 	 * scanning.
117 	 */
118 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
119 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
120 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
121 	hci_dev_unlock(hdev);
122 
123 	hci_conn_check_pending(hdev);
124 
125 	return rp->status;
126 }
127 
128 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
129 			      struct sk_buff *skb)
130 {
131 	struct hci_ev_status *rp = data;
132 
133 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134 
135 	if (rp->status)
136 		return rp->status;
137 
138 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139 
140 	return rp->status;
141 }
142 
143 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
144 				   struct sk_buff *skb)
145 {
146 	struct hci_ev_status *rp = data;
147 
148 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149 
150 	if (rp->status)
151 		return rp->status;
152 
153 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
154 
155 	hci_conn_check_pending(hdev);
156 
157 	return rp->status;
158 }
159 
160 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
161 					struct sk_buff *skb)
162 {
163 	struct hci_ev_status *rp = data;
164 
165 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166 
167 	return rp->status;
168 }
169 
170 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
171 				struct sk_buff *skb)
172 {
173 	struct hci_rp_role_discovery *rp = data;
174 	struct hci_conn *conn;
175 
176 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
177 
178 	if (rp->status)
179 		return rp->status;
180 
181 	hci_dev_lock(hdev);
182 
183 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
184 	if (conn)
185 		conn->role = rp->role;
186 
187 	hci_dev_unlock(hdev);
188 
189 	return rp->status;
190 }
191 
192 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
193 				  struct sk_buff *skb)
194 {
195 	struct hci_rp_read_link_policy *rp = data;
196 	struct hci_conn *conn;
197 
198 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
199 
200 	if (rp->status)
201 		return rp->status;
202 
203 	hci_dev_lock(hdev);
204 
205 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
206 	if (conn)
207 		conn->link_policy = __le16_to_cpu(rp->policy);
208 
209 	hci_dev_unlock(hdev);
210 
211 	return rp->status;
212 }
213 
214 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
215 				   struct sk_buff *skb)
216 {
217 	struct hci_rp_write_link_policy *rp = data;
218 	struct hci_conn *conn;
219 	void *sent;
220 
221 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222 
223 	if (rp->status)
224 		return rp->status;
225 
226 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
227 	if (!sent)
228 		return rp->status;
229 
230 	hci_dev_lock(hdev);
231 
232 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
233 	if (conn)
234 		conn->link_policy = get_unaligned_le16(sent + 2);
235 
236 	hci_dev_unlock(hdev);
237 
238 	return rp->status;
239 }
240 
241 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
242 				      struct sk_buff *skb)
243 {
244 	struct hci_rp_read_def_link_policy *rp = data;
245 
246 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
247 
248 	if (rp->status)
249 		return rp->status;
250 
251 	hdev->link_policy = __le16_to_cpu(rp->policy);
252 
253 	return rp->status;
254 }
255 
256 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
257 				       struct sk_buff *skb)
258 {
259 	struct hci_ev_status *rp = data;
260 	void *sent;
261 
262 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
263 
264 	if (rp->status)
265 		return rp->status;
266 
267 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
268 	if (!sent)
269 		return rp->status;
270 
271 	hdev->link_policy = get_unaligned_le16(sent);
272 
273 	return rp->status;
274 }
275 
276 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
277 {
278 	struct hci_ev_status *rp = data;
279 
280 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
281 
282 	clear_bit(HCI_RESET, &hdev->flags);
283 
284 	if (rp->status)
285 		return rp->status;
286 
287 	/* Reset all non-persistent flags */
288 	hci_dev_clear_volatile_flags(hdev);
289 
290 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
291 
292 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
293 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
294 
295 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
296 	hdev->adv_data_len = 0;
297 
298 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
299 	hdev->scan_rsp_data_len = 0;
300 
301 	hdev->le_scan_type = LE_SCAN_PASSIVE;
302 
303 	hdev->ssp_debug_mode = 0;
304 
305 	hci_bdaddr_list_clear(&hdev->le_accept_list);
306 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
307 
308 	return rp->status;
309 }
310 
311 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
312 				      struct sk_buff *skb)
313 {
314 	struct hci_rp_read_stored_link_key *rp = data;
315 	struct hci_cp_read_stored_link_key *sent;
316 
317 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
318 
319 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
320 	if (!sent)
321 		return rp->status;
322 
323 	if (!rp->status && sent->read_all == 0x01) {
324 		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
325 		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
326 	}
327 
328 	return rp->status;
329 }
330 
331 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
332 					struct sk_buff *skb)
333 {
334 	struct hci_rp_delete_stored_link_key *rp = data;
335 	u16 num_keys;
336 
337 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
338 
339 	if (rp->status)
340 		return rp->status;
341 
342 	num_keys = le16_to_cpu(rp->num_keys);
343 
344 	if (num_keys <= hdev->stored_num_keys)
345 		hdev->stored_num_keys -= num_keys;
346 	else
347 		hdev->stored_num_keys = 0;
348 
349 	return rp->status;
350 }
351 
352 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
353 				  struct sk_buff *skb)
354 {
355 	struct hci_ev_status *rp = data;
356 	void *sent;
357 
358 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
359 
360 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
361 	if (!sent)
362 		return rp->status;
363 
364 	hci_dev_lock(hdev);
365 
366 	if (hci_dev_test_flag(hdev, HCI_MGMT))
367 		mgmt_set_local_name_complete(hdev, sent, rp->status);
368 	else if (!rp->status)
369 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
370 
371 	hci_dev_unlock(hdev);
372 
373 	return rp->status;
374 }
375 
376 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
377 				 struct sk_buff *skb)
378 {
379 	struct hci_rp_read_local_name *rp = data;
380 
381 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
382 
383 	if (rp->status)
384 		return rp->status;
385 
386 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
387 	    hci_dev_test_flag(hdev, HCI_CONFIG))
388 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
389 
390 	return rp->status;
391 }
392 
393 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
394 				   struct sk_buff *skb)
395 {
396 	struct hci_ev_status *rp = data;
397 	void *sent;
398 
399 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
400 
401 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
402 	if (!sent)
403 		return rp->status;
404 
405 	hci_dev_lock(hdev);
406 
407 	if (!rp->status) {
408 		__u8 param = *((__u8 *) sent);
409 
410 		if (param == AUTH_ENABLED)
411 			set_bit(HCI_AUTH, &hdev->flags);
412 		else
413 			clear_bit(HCI_AUTH, &hdev->flags);
414 	}
415 
416 	if (hci_dev_test_flag(hdev, HCI_MGMT))
417 		mgmt_auth_enable_complete(hdev, rp->status);
418 
419 	hci_dev_unlock(hdev);
420 
421 	return rp->status;
422 }
423 
424 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
425 				    struct sk_buff *skb)
426 {
427 	struct hci_ev_status *rp = data;
428 	__u8 param;
429 	void *sent;
430 
431 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
432 
433 	if (rp->status)
434 		return rp->status;
435 
436 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
437 	if (!sent)
438 		return rp->status;
439 
440 	param = *((__u8 *) sent);
441 
442 	if (param)
443 		set_bit(HCI_ENCRYPT, &hdev->flags);
444 	else
445 		clear_bit(HCI_ENCRYPT, &hdev->flags);
446 
447 	return rp->status;
448 }
449 
450 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
451 				   struct sk_buff *skb)
452 {
453 	struct hci_ev_status *rp = data;
454 	__u8 param;
455 	void *sent;
456 
457 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
458 
459 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
460 	if (!sent)
461 		return rp->status;
462 
463 	param = *((__u8 *) sent);
464 
465 	hci_dev_lock(hdev);
466 
467 	if (rp->status) {
468 		hdev->discov_timeout = 0;
469 		goto done;
470 	}
471 
472 	if (param & SCAN_INQUIRY)
473 		set_bit(HCI_ISCAN, &hdev->flags);
474 	else
475 		clear_bit(HCI_ISCAN, &hdev->flags);
476 
477 	if (param & SCAN_PAGE)
478 		set_bit(HCI_PSCAN, &hdev->flags);
479 	else
480 		clear_bit(HCI_PSCAN, &hdev->flags);
481 
482 done:
483 	hci_dev_unlock(hdev);
484 
485 	return rp->status;
486 }
487 
488 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
489 				  struct sk_buff *skb)
490 {
491 	struct hci_ev_status *rp = data;
492 	struct hci_cp_set_event_filter *cp;
493 	void *sent;
494 
495 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
496 
497 	if (rp->status)
498 		return rp->status;
499 
500 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
501 	if (!sent)
502 		return rp->status;
503 
504 	cp = (struct hci_cp_set_event_filter *)sent;
505 
506 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
507 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508 	else
509 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
510 
511 	return rp->status;
512 }
513 
514 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
515 				   struct sk_buff *skb)
516 {
517 	struct hci_rp_read_class_of_dev *rp = data;
518 
519 	if (WARN_ON(!hdev))
520 		return HCI_ERROR_UNSPECIFIED;
521 
522 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
523 
524 	if (rp->status)
525 		return rp->status;
526 
527 	memcpy(hdev->dev_class, rp->dev_class, 3);
528 
529 	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
530 		   hdev->dev_class[1], hdev->dev_class[0]);
531 
532 	return rp->status;
533 }
534 
535 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
536 				    struct sk_buff *skb)
537 {
538 	struct hci_ev_status *rp = data;
539 	void *sent;
540 
541 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
542 
543 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
544 	if (!sent)
545 		return rp->status;
546 
547 	hci_dev_lock(hdev);
548 
549 	if (!rp->status)
550 		memcpy(hdev->dev_class, sent, 3);
551 
552 	if (hci_dev_test_flag(hdev, HCI_MGMT))
553 		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
554 
555 	hci_dev_unlock(hdev);
556 
557 	return rp->status;
558 }
559 
560 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
561 				    struct sk_buff *skb)
562 {
563 	struct hci_rp_read_voice_setting *rp = data;
564 	__u16 setting;
565 
566 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
567 
568 	if (rp->status)
569 		return rp->status;
570 
571 	setting = __le16_to_cpu(rp->voice_setting);
572 
573 	if (hdev->voice_setting == setting)
574 		return rp->status;
575 
576 	hdev->voice_setting = setting;
577 
578 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
579 
580 	if (hdev->notify)
581 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
582 
583 	return rp->status;
584 }
585 
586 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
587 				     struct sk_buff *skb)
588 {
589 	struct hci_ev_status *rp = data;
590 	__u16 setting;
591 	void *sent;
592 
593 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
594 
595 	if (rp->status)
596 		return rp->status;
597 
598 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
599 	if (!sent)
600 		return rp->status;
601 
602 	setting = get_unaligned_le16(sent);
603 
604 	if (hdev->voice_setting == setting)
605 		return rp->status;
606 
607 	hdev->voice_setting = setting;
608 
609 	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
610 
611 	if (hdev->notify)
612 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
613 
614 	return rp->status;
615 }
616 
617 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
618 					struct sk_buff *skb)
619 {
620 	struct hci_rp_read_num_supported_iac *rp = data;
621 
622 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
623 
624 	if (rp->status)
625 		return rp->status;
626 
627 	hdev->num_iac = rp->num_iac;
628 
629 	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
630 
631 	return rp->status;
632 }
633 
634 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
635 				struct sk_buff *skb)
636 {
637 	struct hci_ev_status *rp = data;
638 	struct hci_cp_write_ssp_mode *sent;
639 
640 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
641 
642 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
643 	if (!sent)
644 		return rp->status;
645 
646 	hci_dev_lock(hdev);
647 
648 	if (!rp->status) {
649 		if (sent->mode)
650 			hdev->features[1][0] |= LMP_HOST_SSP;
651 		else
652 			hdev->features[1][0] &= ~LMP_HOST_SSP;
653 	}
654 
655 	if (!rp->status) {
656 		if (sent->mode)
657 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
658 		else
659 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
660 	}
661 
662 	hci_dev_unlock(hdev);
663 
664 	return rp->status;
665 }
666 
667 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
668 				  struct sk_buff *skb)
669 {
670 	struct hci_ev_status *rp = data;
671 	struct hci_cp_write_sc_support *sent;
672 
673 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
674 
675 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
676 	if (!sent)
677 		return rp->status;
678 
679 	hci_dev_lock(hdev);
680 
681 	if (!rp->status) {
682 		if (sent->support)
683 			hdev->features[1][0] |= LMP_HOST_SC;
684 		else
685 			hdev->features[1][0] &= ~LMP_HOST_SC;
686 	}
687 
688 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
689 		if (sent->support)
690 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
691 		else
692 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
693 	}
694 
695 	hci_dev_unlock(hdev);
696 
697 	return rp->status;
698 }
699 
700 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
701 				    struct sk_buff *skb)
702 {
703 	struct hci_rp_read_local_version *rp = data;
704 
705 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
706 
707 	if (rp->status)
708 		return rp->status;
709 
710 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
711 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
712 		hdev->hci_ver = rp->hci_ver;
713 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
714 		hdev->lmp_ver = rp->lmp_ver;
715 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
716 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
717 	}
718 
719 	return rp->status;
720 }
721 
722 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
723 				   struct sk_buff *skb)
724 {
725 	struct hci_rp_read_enc_key_size *rp = data;
726 	struct hci_conn *conn;
727 	u16 handle;
728 	u8 status = rp->status;
729 
730 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
731 
732 	handle = le16_to_cpu(rp->handle);
733 
734 	hci_dev_lock(hdev);
735 
736 	conn = hci_conn_hash_lookup_handle(hdev, handle);
737 	if (!conn) {
738 		status = 0xFF;
739 		goto done;
740 	}
741 
742 	/* While unexpected, the read_enc_key_size command may fail. The most
743 	 * secure approach is to then assume the key size is 0 to force a
744 	 * disconnection.
745 	 */
746 	if (status) {
747 		bt_dev_err(hdev, "failed to read key size for handle %u",
748 			   handle);
749 		conn->enc_key_size = 0;
750 	} else {
751 		conn->enc_key_size = rp->key_size;
752 		status = 0;
753 
754 		if (conn->enc_key_size < hdev->min_enc_key_size) {
755 			/* As slave role, the conn->state has been set to
756 			 * BT_CONNECTED and l2cap conn req might not be received
757 			 * yet, at this moment the l2cap layer almost does
758 			 * nothing with the non-zero status.
759 			 * So we also clear encrypt related bits, and then the
760 			 * handler of l2cap conn req will get the right secure
761 			 * state at a later time.
762 			 */
763 			status = HCI_ERROR_AUTH_FAILURE;
764 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
765 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
766 		}
767 	}
768 
769 	hci_encrypt_cfm(conn, status);
770 
771 done:
772 	hci_dev_unlock(hdev);
773 
774 	return status;
775 }
776 
777 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
778 				     struct sk_buff *skb)
779 {
780 	struct hci_rp_read_local_commands *rp = data;
781 
782 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
783 
784 	if (rp->status)
785 		return rp->status;
786 
787 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
788 	    hci_dev_test_flag(hdev, HCI_CONFIG))
789 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
790 
791 	return rp->status;
792 }
793 
794 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
795 					   struct sk_buff *skb)
796 {
797 	struct hci_rp_read_auth_payload_to *rp = data;
798 	struct hci_conn *conn;
799 
800 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
801 
802 	if (rp->status)
803 		return rp->status;
804 
805 	hci_dev_lock(hdev);
806 
807 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
808 	if (conn)
809 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
810 
811 	hci_dev_unlock(hdev);
812 
813 	return rp->status;
814 }
815 
816 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
817 					    struct sk_buff *skb)
818 {
819 	struct hci_rp_write_auth_payload_to *rp = data;
820 	struct hci_conn *conn;
821 	void *sent;
822 
823 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
824 
825 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
826 	if (!sent)
827 		return rp->status;
828 
829 	hci_dev_lock(hdev);
830 
831 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
832 	if (!conn) {
833 		rp->status = 0xff;
834 		goto unlock;
835 	}
836 
837 	if (!rp->status)
838 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
839 
840 unlock:
841 	hci_dev_unlock(hdev);
842 
843 	return rp->status;
844 }
845 
846 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
847 				     struct sk_buff *skb)
848 {
849 	struct hci_rp_read_local_features *rp = data;
850 
851 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
852 
853 	if (rp->status)
854 		return rp->status;
855 
856 	memcpy(hdev->features, rp->features, 8);
857 
858 	/* Adjust default settings according to features
859 	 * supported by device. */
860 
861 	if (hdev->features[0][0] & LMP_3SLOT)
862 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
863 
864 	if (hdev->features[0][0] & LMP_5SLOT)
865 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
866 
867 	if (hdev->features[0][1] & LMP_HV2) {
868 		hdev->pkt_type  |= (HCI_HV2);
869 		hdev->esco_type |= (ESCO_HV2);
870 	}
871 
872 	if (hdev->features[0][1] & LMP_HV3) {
873 		hdev->pkt_type  |= (HCI_HV3);
874 		hdev->esco_type |= (ESCO_HV3);
875 	}
876 
877 	if (lmp_esco_capable(hdev))
878 		hdev->esco_type |= (ESCO_EV3);
879 
880 	if (hdev->features[0][4] & LMP_EV4)
881 		hdev->esco_type |= (ESCO_EV4);
882 
883 	if (hdev->features[0][4] & LMP_EV5)
884 		hdev->esco_type |= (ESCO_EV5);
885 
886 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
887 		hdev->esco_type |= (ESCO_2EV3);
888 
889 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
890 		hdev->esco_type |= (ESCO_3EV3);
891 
892 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
893 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
894 
895 	return rp->status;
896 }
897 
898 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
899 					 struct sk_buff *skb)
900 {
901 	struct hci_rp_read_local_ext_features *rp = data;
902 
903 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
904 
905 	if (rp->status)
906 		return rp->status;
907 
908 	if (hdev->max_page < rp->max_page) {
909 		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
910 			     &hdev->quirks))
911 			bt_dev_warn(hdev, "broken local ext features page 2");
912 		else
913 			hdev->max_page = rp->max_page;
914 	}
915 
916 	if (rp->page < HCI_MAX_PAGES)
917 		memcpy(hdev->features[rp->page], rp->features, 8);
918 
919 	return rp->status;
920 }
921 
922 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
923 					struct sk_buff *skb)
924 {
925 	struct hci_rp_read_flow_control_mode *rp = data;
926 
927 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
928 
929 	if (rp->status)
930 		return rp->status;
931 
932 	hdev->flow_ctl_mode = rp->mode;
933 
934 	return rp->status;
935 }
936 
937 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
938 				  struct sk_buff *skb)
939 {
940 	struct hci_rp_read_buffer_size *rp = data;
941 
942 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
943 
944 	if (rp->status)
945 		return rp->status;
946 
947 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
948 	hdev->sco_mtu  = rp->sco_mtu;
949 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
950 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
951 
952 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
953 		hdev->sco_mtu  = 64;
954 		hdev->sco_pkts = 8;
955 	}
956 
957 	hdev->acl_cnt = hdev->acl_pkts;
958 	hdev->sco_cnt = hdev->sco_pkts;
959 
960 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
961 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
962 
963 	return rp->status;
964 }
965 
966 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
967 			      struct sk_buff *skb)
968 {
969 	struct hci_rp_read_bd_addr *rp = data;
970 
971 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
972 
973 	if (rp->status)
974 		return rp->status;
975 
976 	if (test_bit(HCI_INIT, &hdev->flags))
977 		bacpy(&hdev->bdaddr, &rp->bdaddr);
978 
979 	if (hci_dev_test_flag(hdev, HCI_SETUP))
980 		bacpy(&hdev->setup_addr, &rp->bdaddr);
981 
982 	return rp->status;
983 }
984 
985 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
986 					 struct sk_buff *skb)
987 {
988 	struct hci_rp_read_local_pairing_opts *rp = data;
989 
990 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
991 
992 	if (rp->status)
993 		return rp->status;
994 
995 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
996 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
997 		hdev->pairing_opts = rp->pairing_opts;
998 		hdev->max_enc_key_size = rp->max_key_size;
999 	}
1000 
1001 	return rp->status;
1002 }
1003 
1004 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1005 					 struct sk_buff *skb)
1006 {
1007 	struct hci_rp_read_page_scan_activity *rp = data;
1008 
1009 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1010 
1011 	if (rp->status)
1012 		return rp->status;
1013 
1014 	if (test_bit(HCI_INIT, &hdev->flags)) {
1015 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1016 		hdev->page_scan_window = __le16_to_cpu(rp->window);
1017 	}
1018 
1019 	return rp->status;
1020 }
1021 
1022 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1023 					  struct sk_buff *skb)
1024 {
1025 	struct hci_ev_status *rp = data;
1026 	struct hci_cp_write_page_scan_activity *sent;
1027 
1028 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1029 
1030 	if (rp->status)
1031 		return rp->status;
1032 
1033 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1034 	if (!sent)
1035 		return rp->status;
1036 
1037 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1038 	hdev->page_scan_window = __le16_to_cpu(sent->window);
1039 
1040 	return rp->status;
1041 }
1042 
1043 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1044 				     struct sk_buff *skb)
1045 {
1046 	struct hci_rp_read_page_scan_type *rp = data;
1047 
1048 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1049 
1050 	if (rp->status)
1051 		return rp->status;
1052 
1053 	if (test_bit(HCI_INIT, &hdev->flags))
1054 		hdev->page_scan_type = rp->type;
1055 
1056 	return rp->status;
1057 }
1058 
1059 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1060 				      struct sk_buff *skb)
1061 {
1062 	struct hci_ev_status *rp = data;
1063 	u8 *type;
1064 
1065 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066 
1067 	if (rp->status)
1068 		return rp->status;
1069 
1070 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1071 	if (type)
1072 		hdev->page_scan_type = *type;
1073 
1074 	return rp->status;
1075 }
1076 
1077 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1078 				      struct sk_buff *skb)
1079 {
1080 	struct hci_rp_read_data_block_size *rp = data;
1081 
1082 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1083 
1084 	if (rp->status)
1085 		return rp->status;
1086 
1087 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1088 	hdev->block_len = __le16_to_cpu(rp->block_len);
1089 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1090 
1091 	hdev->block_cnt = hdev->num_blocks;
1092 
1093 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1094 	       hdev->block_cnt, hdev->block_len);
1095 
1096 	return rp->status;
1097 }
1098 
1099 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1100 			    struct sk_buff *skb)
1101 {
1102 	struct hci_rp_read_clock *rp = data;
1103 	struct hci_cp_read_clock *cp;
1104 	struct hci_conn *conn;
1105 
1106 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1107 
1108 	if (rp->status)
1109 		return rp->status;
1110 
1111 	hci_dev_lock(hdev);
1112 
1113 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1114 	if (!cp)
1115 		goto unlock;
1116 
1117 	if (cp->which == 0x00) {
1118 		hdev->clock = le32_to_cpu(rp->clock);
1119 		goto unlock;
1120 	}
1121 
1122 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1123 	if (conn) {
1124 		conn->clock = le32_to_cpu(rp->clock);
1125 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1126 	}
1127 
1128 unlock:
1129 	hci_dev_unlock(hdev);
1130 	return rp->status;
1131 }
1132 
1133 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1134 				     struct sk_buff *skb)
1135 {
1136 	struct hci_rp_read_local_amp_info *rp = data;
1137 
1138 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1139 
1140 	if (rp->status)
1141 		return rp->status;
1142 
1143 	hdev->amp_status = rp->amp_status;
1144 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1145 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1146 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1147 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1148 	hdev->amp_type = rp->amp_type;
1149 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1150 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1151 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1152 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1153 
1154 	return rp->status;
1155 }
1156 
1157 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1158 				       struct sk_buff *skb)
1159 {
1160 	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1161 
1162 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1163 
1164 	if (rp->status)
1165 		return rp->status;
1166 
1167 	hdev->inq_tx_power = rp->tx_power;
1168 
1169 	return rp->status;
1170 }
1171 
1172 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1173 					     struct sk_buff *skb)
1174 {
1175 	struct hci_rp_read_def_err_data_reporting *rp = data;
1176 
1177 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178 
1179 	if (rp->status)
1180 		return rp->status;
1181 
1182 	hdev->err_data_reporting = rp->err_data_reporting;
1183 
1184 	return rp->status;
1185 }
1186 
1187 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1188 					      struct sk_buff *skb)
1189 {
1190 	struct hci_ev_status *rp = data;
1191 	struct hci_cp_write_def_err_data_reporting *cp;
1192 
1193 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1194 
1195 	if (rp->status)
1196 		return rp->status;
1197 
1198 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1199 	if (!cp)
1200 		return rp->status;
1201 
1202 	hdev->err_data_reporting = cp->err_data_reporting;
1203 
1204 	return rp->status;
1205 }
1206 
1207 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1208 				struct sk_buff *skb)
1209 {
1210 	struct hci_rp_pin_code_reply *rp = data;
1211 	struct hci_cp_pin_code_reply *cp;
1212 	struct hci_conn *conn;
1213 
1214 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1215 
1216 	hci_dev_lock(hdev);
1217 
1218 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1219 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1220 
1221 	if (rp->status)
1222 		goto unlock;
1223 
1224 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1225 	if (!cp)
1226 		goto unlock;
1227 
1228 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1229 	if (conn)
1230 		conn->pin_length = cp->pin_len;
1231 
1232 unlock:
1233 	hci_dev_unlock(hdev);
1234 	return rp->status;
1235 }
1236 
1237 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1238 				    struct sk_buff *skb)
1239 {
1240 	struct hci_rp_pin_code_neg_reply *rp = data;
1241 
1242 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1243 
1244 	hci_dev_lock(hdev);
1245 
1246 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1247 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1248 						 rp->status);
1249 
1250 	hci_dev_unlock(hdev);
1251 
1252 	return rp->status;
1253 }
1254 
1255 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1256 				     struct sk_buff *skb)
1257 {
1258 	struct hci_rp_le_read_buffer_size *rp = data;
1259 
1260 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261 
1262 	if (rp->status)
1263 		return rp->status;
1264 
1265 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1266 	hdev->le_pkts = rp->le_max_pkt;
1267 
1268 	hdev->le_cnt = hdev->le_pkts;
1269 
1270 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1271 
1272 	return rp->status;
1273 }
1274 
1275 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1276 					struct sk_buff *skb)
1277 {
1278 	struct hci_rp_le_read_local_features *rp = data;
1279 
1280 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281 
1282 	if (rp->status)
1283 		return rp->status;
1284 
1285 	memcpy(hdev->le_features, rp->features, 8);
1286 
1287 	return rp->status;
1288 }
1289 
1290 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1291 				      struct sk_buff *skb)
1292 {
1293 	struct hci_rp_le_read_adv_tx_power *rp = data;
1294 
1295 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1296 
1297 	if (rp->status)
1298 		return rp->status;
1299 
1300 	hdev->adv_tx_power = rp->tx_power;
1301 
1302 	return rp->status;
1303 }
1304 
1305 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1306 				    struct sk_buff *skb)
1307 {
1308 	struct hci_rp_user_confirm_reply *rp = data;
1309 
1310 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1311 
1312 	hci_dev_lock(hdev);
1313 
1314 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1315 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1316 						 rp->status);
1317 
1318 	hci_dev_unlock(hdev);
1319 
1320 	return rp->status;
1321 }
1322 
1323 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1324 					struct sk_buff *skb)
1325 {
1326 	struct hci_rp_user_confirm_reply *rp = data;
1327 
1328 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1329 
1330 	hci_dev_lock(hdev);
1331 
1332 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1333 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1334 						     ACL_LINK, 0, rp->status);
1335 
1336 	hci_dev_unlock(hdev);
1337 
1338 	return rp->status;
1339 }
1340 
1341 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1342 				    struct sk_buff *skb)
1343 {
1344 	struct hci_rp_user_confirm_reply *rp = data;
1345 
1346 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1347 
1348 	hci_dev_lock(hdev);
1349 
1350 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1351 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1352 						 0, rp->status);
1353 
1354 	hci_dev_unlock(hdev);
1355 
1356 	return rp->status;
1357 }
1358 
1359 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1360 					struct sk_buff *skb)
1361 {
1362 	struct hci_rp_user_confirm_reply *rp = data;
1363 
1364 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1365 
1366 	hci_dev_lock(hdev);
1367 
1368 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1369 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1370 						     ACL_LINK, 0, rp->status);
1371 
1372 	hci_dev_unlock(hdev);
1373 
1374 	return rp->status;
1375 }
1376 
1377 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1378 				     struct sk_buff *skb)
1379 {
1380 	struct hci_rp_read_local_oob_data *rp = data;
1381 
1382 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1383 
1384 	return rp->status;
1385 }
1386 
1387 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1388 					 struct sk_buff *skb)
1389 {
1390 	struct hci_rp_read_local_oob_ext_data *rp = data;
1391 
1392 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1393 
1394 	return rp->status;
1395 }
1396 
1397 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1398 				    struct sk_buff *skb)
1399 {
1400 	struct hci_ev_status *rp = data;
1401 	bdaddr_t *sent;
1402 
1403 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1404 
1405 	if (rp->status)
1406 		return rp->status;
1407 
1408 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1409 	if (!sent)
1410 		return rp->status;
1411 
1412 	hci_dev_lock(hdev);
1413 
1414 	bacpy(&hdev->random_addr, sent);
1415 
1416 	if (!bacmp(&hdev->rpa, sent)) {
1417 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1418 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1419 				   secs_to_jiffies(hdev->rpa_timeout));
1420 	}
1421 
1422 	hci_dev_unlock(hdev);
1423 
1424 	return rp->status;
1425 }
1426 
1427 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1428 				    struct sk_buff *skb)
1429 {
1430 	struct hci_ev_status *rp = data;
1431 	struct hci_cp_le_set_default_phy *cp;
1432 
1433 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1434 
1435 	if (rp->status)
1436 		return rp->status;
1437 
1438 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1439 	if (!cp)
1440 		return rp->status;
1441 
1442 	hci_dev_lock(hdev);
1443 
1444 	hdev->le_tx_def_phys = cp->tx_phys;
1445 	hdev->le_rx_def_phys = cp->rx_phys;
1446 
1447 	hci_dev_unlock(hdev);
1448 
1449 	return rp->status;
1450 }
1451 
1452 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1453 					    struct sk_buff *skb)
1454 {
1455 	struct hci_ev_status *rp = data;
1456 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1457 	struct adv_info *adv;
1458 
1459 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1460 
1461 	if (rp->status)
1462 		return rp->status;
1463 
1464 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1465 	/* Update only in case the adv instance since handle 0x00 shall be using
1466 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1467 	 * non-extended adverting.
1468 	 */
1469 	if (!cp || !cp->handle)
1470 		return rp->status;
1471 
1472 	hci_dev_lock(hdev);
1473 
1474 	adv = hci_find_adv_instance(hdev, cp->handle);
1475 	if (adv) {
1476 		bacpy(&adv->random_addr, &cp->bdaddr);
1477 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1478 			adv->rpa_expired = false;
1479 			queue_delayed_work(hdev->workqueue,
1480 					   &adv->rpa_expired_cb,
1481 					   secs_to_jiffies(hdev->rpa_timeout));
1482 		}
1483 	}
1484 
1485 	hci_dev_unlock(hdev);
1486 
1487 	return rp->status;
1488 }
1489 
1490 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1491 				   struct sk_buff *skb)
1492 {
1493 	struct hci_ev_status *rp = data;
1494 	u8 *instance;
1495 	int err;
1496 
1497 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1498 
1499 	if (rp->status)
1500 		return rp->status;
1501 
1502 	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1503 	if (!instance)
1504 		return rp->status;
1505 
1506 	hci_dev_lock(hdev);
1507 
1508 	err = hci_remove_adv_instance(hdev, *instance);
1509 	if (!err)
1510 		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1511 					 *instance);
1512 
1513 	hci_dev_unlock(hdev);
1514 
1515 	return rp->status;
1516 }
1517 
1518 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1519 				   struct sk_buff *skb)
1520 {
1521 	struct hci_ev_status *rp = data;
1522 	struct adv_info *adv, *n;
1523 	int err;
1524 
1525 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1526 
1527 	if (rp->status)
1528 		return rp->status;
1529 
1530 	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1531 		return rp->status;
1532 
1533 	hci_dev_lock(hdev);
1534 
1535 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1536 		u8 instance = adv->instance;
1537 
1538 		err = hci_remove_adv_instance(hdev, instance);
1539 		if (!err)
1540 			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1541 						 hdev, instance);
1542 	}
1543 
1544 	hci_dev_unlock(hdev);
1545 
1546 	return rp->status;
1547 }
1548 
1549 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1550 					struct sk_buff *skb)
1551 {
1552 	struct hci_rp_le_read_transmit_power *rp = data;
1553 
1554 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1555 
1556 	if (rp->status)
1557 		return rp->status;
1558 
1559 	hdev->min_le_tx_power = rp->min_le_tx_power;
1560 	hdev->max_le_tx_power = rp->max_le_tx_power;
1561 
1562 	return rp->status;
1563 }
1564 
1565 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1566 				     struct sk_buff *skb)
1567 {
1568 	struct hci_ev_status *rp = data;
1569 	struct hci_cp_le_set_privacy_mode *cp;
1570 	struct hci_conn_params *params;
1571 
1572 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1573 
1574 	if (rp->status)
1575 		return rp->status;
1576 
1577 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1578 	if (!cp)
1579 		return rp->status;
1580 
1581 	hci_dev_lock(hdev);
1582 
1583 	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1584 	if (params)
1585 		WRITE_ONCE(params->privacy_mode, cp->mode);
1586 
1587 	hci_dev_unlock(hdev);
1588 
1589 	return rp->status;
1590 }
1591 
1592 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1593 				   struct sk_buff *skb)
1594 {
1595 	struct hci_ev_status *rp = data;
1596 	__u8 *sent;
1597 
1598 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1599 
1600 	if (rp->status)
1601 		return rp->status;
1602 
1603 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1604 	if (!sent)
1605 		return rp->status;
1606 
1607 	hci_dev_lock(hdev);
1608 
1609 	/* If we're doing connection initiation as peripheral. Set a
1610 	 * timeout in case something goes wrong.
1611 	 */
1612 	if (*sent) {
1613 		struct hci_conn *conn;
1614 
1615 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1616 
1617 		conn = hci_lookup_le_connect(hdev);
1618 		if (conn)
1619 			queue_delayed_work(hdev->workqueue,
1620 					   &conn->le_conn_timeout,
1621 					   conn->conn_timeout);
1622 	} else {
1623 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1624 	}
1625 
1626 	hci_dev_unlock(hdev);
1627 
1628 	return rp->status;
1629 }
1630 
1631 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1632 				       struct sk_buff *skb)
1633 {
1634 	struct hci_cp_le_set_ext_adv_enable *cp;
1635 	struct hci_cp_ext_adv_set *set;
1636 	struct adv_info *adv = NULL, *n;
1637 	struct hci_ev_status *rp = data;
1638 
1639 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1640 
1641 	if (rp->status)
1642 		return rp->status;
1643 
1644 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1645 	if (!cp)
1646 		return rp->status;
1647 
1648 	set = (void *)cp->data;
1649 
1650 	hci_dev_lock(hdev);
1651 
1652 	if (cp->num_of_sets)
1653 		adv = hci_find_adv_instance(hdev, set->handle);
1654 
1655 	if (cp->enable) {
1656 		struct hci_conn *conn;
1657 
1658 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1659 
1660 		if (adv && !adv->periodic)
1661 			adv->enabled = true;
1662 
1663 		conn = hci_lookup_le_connect(hdev);
1664 		if (conn)
1665 			queue_delayed_work(hdev->workqueue,
1666 					   &conn->le_conn_timeout,
1667 					   conn->conn_timeout);
1668 	} else {
1669 		if (cp->num_of_sets) {
1670 			if (adv)
1671 				adv->enabled = false;
1672 
1673 			/* If just one instance was disabled check if there are
1674 			 * any other instance enabled before clearing HCI_LE_ADV
1675 			 */
1676 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1677 						 list) {
1678 				if (adv->enabled)
1679 					goto unlock;
1680 			}
1681 		} else {
1682 			/* All instances shall be considered disabled */
1683 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1684 						 list)
1685 				adv->enabled = false;
1686 		}
1687 
1688 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1689 	}
1690 
1691 unlock:
1692 	hci_dev_unlock(hdev);
1693 	return rp->status;
1694 }
1695 
1696 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1697 				   struct sk_buff *skb)
1698 {
1699 	struct hci_cp_le_set_scan_param *cp;
1700 	struct hci_ev_status *rp = data;
1701 
1702 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1703 
1704 	if (rp->status)
1705 		return rp->status;
1706 
1707 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1708 	if (!cp)
1709 		return rp->status;
1710 
1711 	hci_dev_lock(hdev);
1712 
1713 	hdev->le_scan_type = cp->type;
1714 
1715 	hci_dev_unlock(hdev);
1716 
1717 	return rp->status;
1718 }
1719 
1720 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1721 				       struct sk_buff *skb)
1722 {
1723 	struct hci_cp_le_set_ext_scan_params *cp;
1724 	struct hci_ev_status *rp = data;
1725 	struct hci_cp_le_scan_phy_params *phy_param;
1726 
1727 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1728 
1729 	if (rp->status)
1730 		return rp->status;
1731 
1732 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1733 	if (!cp)
1734 		return rp->status;
1735 
1736 	phy_param = (void *)cp->data;
1737 
1738 	hci_dev_lock(hdev);
1739 
1740 	hdev->le_scan_type = phy_param->type;
1741 
1742 	hci_dev_unlock(hdev);
1743 
1744 	return rp->status;
1745 }
1746 
1747 static bool has_pending_adv_report(struct hci_dev *hdev)
1748 {
1749 	struct discovery_state *d = &hdev->discovery;
1750 
1751 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1752 }
1753 
1754 static void clear_pending_adv_report(struct hci_dev *hdev)
1755 {
1756 	struct discovery_state *d = &hdev->discovery;
1757 
1758 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1759 	d->last_adv_data_len = 0;
1760 }
1761 
1762 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1763 				     u8 bdaddr_type, s8 rssi, u32 flags,
1764 				     u8 *data, u8 len)
1765 {
1766 	struct discovery_state *d = &hdev->discovery;
1767 
1768 	if (len > max_adv_len(hdev))
1769 		return;
1770 
1771 	bacpy(&d->last_adv_addr, bdaddr);
1772 	d->last_adv_addr_type = bdaddr_type;
1773 	d->last_adv_rssi = rssi;
1774 	d->last_adv_flags = flags;
1775 	memcpy(d->last_adv_data, data, len);
1776 	d->last_adv_data_len = len;
1777 }
1778 
1779 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1780 {
1781 	hci_dev_lock(hdev);
1782 
1783 	switch (enable) {
1784 	case LE_SCAN_ENABLE:
1785 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1786 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1787 			clear_pending_adv_report(hdev);
1788 		if (hci_dev_test_flag(hdev, HCI_MESH))
1789 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1790 		break;
1791 
1792 	case LE_SCAN_DISABLE:
1793 		/* We do this here instead of when setting DISCOVERY_STOPPED
1794 		 * since the latter would potentially require waiting for
1795 		 * inquiry to stop too.
1796 		 */
1797 		if (has_pending_adv_report(hdev)) {
1798 			struct discovery_state *d = &hdev->discovery;
1799 
1800 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1801 					  d->last_adv_addr_type, NULL,
1802 					  d->last_adv_rssi, d->last_adv_flags,
1803 					  d->last_adv_data,
1804 					  d->last_adv_data_len, NULL, 0, 0);
1805 		}
1806 
1807 		/* Cancel this timer so that we don't try to disable scanning
1808 		 * when it's already disabled.
1809 		 */
1810 		cancel_delayed_work(&hdev->le_scan_disable);
1811 
1812 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1813 
1814 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1815 		 * interrupted scanning due to a connect request. Mark
1816 		 * therefore discovery as stopped.
1817 		 */
1818 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1819 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1820 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1821 			 hdev->discovery.state == DISCOVERY_FINDING)
1822 			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1823 
1824 		break;
1825 
1826 	default:
1827 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1828 			   enable);
1829 		break;
1830 	}
1831 
1832 	hci_dev_unlock(hdev);
1833 }
1834 
1835 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1836 				    struct sk_buff *skb)
1837 {
1838 	struct hci_cp_le_set_scan_enable *cp;
1839 	struct hci_ev_status *rp = data;
1840 
1841 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1842 
1843 	if (rp->status)
1844 		return rp->status;
1845 
1846 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1847 	if (!cp)
1848 		return rp->status;
1849 
1850 	le_set_scan_enable_complete(hdev, cp->enable);
1851 
1852 	return rp->status;
1853 }
1854 
1855 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1856 					struct sk_buff *skb)
1857 {
1858 	struct hci_cp_le_set_ext_scan_enable *cp;
1859 	struct hci_ev_status *rp = data;
1860 
1861 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1862 
1863 	if (rp->status)
1864 		return rp->status;
1865 
1866 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1867 	if (!cp)
1868 		return rp->status;
1869 
1870 	le_set_scan_enable_complete(hdev, cp->enable);
1871 
1872 	return rp->status;
1873 }
1874 
1875 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1876 				      struct sk_buff *skb)
1877 {
1878 	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1879 
1880 	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1881 		   rp->num_of_sets);
1882 
1883 	if (rp->status)
1884 		return rp->status;
1885 
1886 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1887 
1888 	return rp->status;
1889 }
1890 
1891 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1892 					  struct sk_buff *skb)
1893 {
1894 	struct hci_rp_le_read_accept_list_size *rp = data;
1895 
1896 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1897 
1898 	if (rp->status)
1899 		return rp->status;
1900 
1901 	hdev->le_accept_list_size = rp->size;
1902 
1903 	return rp->status;
1904 }
1905 
1906 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1907 				      struct sk_buff *skb)
1908 {
1909 	struct hci_ev_status *rp = data;
1910 
1911 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1912 
1913 	if (rp->status)
1914 		return rp->status;
1915 
1916 	hci_dev_lock(hdev);
1917 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1918 	hci_dev_unlock(hdev);
1919 
1920 	return rp->status;
1921 }
1922 
1923 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1924 				       struct sk_buff *skb)
1925 {
1926 	struct hci_cp_le_add_to_accept_list *sent;
1927 	struct hci_ev_status *rp = data;
1928 
1929 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1930 
1931 	if (rp->status)
1932 		return rp->status;
1933 
1934 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1935 	if (!sent)
1936 		return rp->status;
1937 
1938 	hci_dev_lock(hdev);
1939 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1940 			    sent->bdaddr_type);
1941 	hci_dev_unlock(hdev);
1942 
1943 	return rp->status;
1944 }
1945 
1946 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1947 					 struct sk_buff *skb)
1948 {
1949 	struct hci_cp_le_del_from_accept_list *sent;
1950 	struct hci_ev_status *rp = data;
1951 
1952 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1953 
1954 	if (rp->status)
1955 		return rp->status;
1956 
1957 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1958 	if (!sent)
1959 		return rp->status;
1960 
1961 	hci_dev_lock(hdev);
1962 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1963 			    sent->bdaddr_type);
1964 	hci_dev_unlock(hdev);
1965 
1966 	return rp->status;
1967 }
1968 
1969 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1970 					  struct sk_buff *skb)
1971 {
1972 	struct hci_rp_le_read_supported_states *rp = data;
1973 
1974 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1975 
1976 	if (rp->status)
1977 		return rp->status;
1978 
1979 	memcpy(hdev->le_states, rp->le_states, 8);
1980 
1981 	return rp->status;
1982 }
1983 
1984 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1985 				      struct sk_buff *skb)
1986 {
1987 	struct hci_rp_le_read_def_data_len *rp = data;
1988 
1989 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1990 
1991 	if (rp->status)
1992 		return rp->status;
1993 
1994 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1995 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1996 
1997 	return rp->status;
1998 }
1999 
2000 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2001 				       struct sk_buff *skb)
2002 {
2003 	struct hci_cp_le_write_def_data_len *sent;
2004 	struct hci_ev_status *rp = data;
2005 
2006 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2007 
2008 	if (rp->status)
2009 		return rp->status;
2010 
2011 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2012 	if (!sent)
2013 		return rp->status;
2014 
2015 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2016 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2017 
2018 	return rp->status;
2019 }
2020 
2021 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2022 				       struct sk_buff *skb)
2023 {
2024 	struct hci_cp_le_add_to_resolv_list *sent;
2025 	struct hci_ev_status *rp = data;
2026 
2027 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028 
2029 	if (rp->status)
2030 		return rp->status;
2031 
2032 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2033 	if (!sent)
2034 		return rp->status;
2035 
2036 	hci_dev_lock(hdev);
2037 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2038 				sent->bdaddr_type, sent->peer_irk,
2039 				sent->local_irk);
2040 	hci_dev_unlock(hdev);
2041 
2042 	return rp->status;
2043 }
2044 
2045 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2046 					 struct sk_buff *skb)
2047 {
2048 	struct hci_cp_le_del_from_resolv_list *sent;
2049 	struct hci_ev_status *rp = data;
2050 
2051 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2052 
2053 	if (rp->status)
2054 		return rp->status;
2055 
2056 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2057 	if (!sent)
2058 		return rp->status;
2059 
2060 	hci_dev_lock(hdev);
2061 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2062 			    sent->bdaddr_type);
2063 	hci_dev_unlock(hdev);
2064 
2065 	return rp->status;
2066 }
2067 
2068 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2069 				      struct sk_buff *skb)
2070 {
2071 	struct hci_ev_status *rp = data;
2072 
2073 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2074 
2075 	if (rp->status)
2076 		return rp->status;
2077 
2078 	hci_dev_lock(hdev);
2079 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2080 	hci_dev_unlock(hdev);
2081 
2082 	return rp->status;
2083 }
2084 
2085 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2086 					  struct sk_buff *skb)
2087 {
2088 	struct hci_rp_le_read_resolv_list_size *rp = data;
2089 
2090 	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2091 
2092 	if (rp->status)
2093 		return rp->status;
2094 
2095 	hdev->le_resolv_list_size = rp->size;
2096 
2097 	return rp->status;
2098 }
2099 
2100 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2101 					       struct sk_buff *skb)
2102 {
2103 	struct hci_ev_status *rp = data;
2104 	__u8 *sent;
2105 
2106 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2107 
2108 	if (rp->status)
2109 		return rp->status;
2110 
2111 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2112 	if (!sent)
2113 		return rp->status;
2114 
2115 	hci_dev_lock(hdev);
2116 
2117 	if (*sent)
2118 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2119 	else
2120 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2121 
2122 	hci_dev_unlock(hdev);
2123 
2124 	return rp->status;
2125 }
2126 
2127 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2128 				      struct sk_buff *skb)
2129 {
2130 	struct hci_rp_le_read_max_data_len *rp = data;
2131 
2132 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2133 
2134 	if (rp->status)
2135 		return rp->status;
2136 
2137 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2138 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2139 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2140 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2141 
2142 	return rp->status;
2143 }
2144 
2145 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2146 					 struct sk_buff *skb)
2147 {
2148 	struct hci_cp_write_le_host_supported *sent;
2149 	struct hci_ev_status *rp = data;
2150 
2151 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2152 
2153 	if (rp->status)
2154 		return rp->status;
2155 
2156 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2157 	if (!sent)
2158 		return rp->status;
2159 
2160 	hci_dev_lock(hdev);
2161 
2162 	if (sent->le) {
2163 		hdev->features[1][0] |= LMP_HOST_LE;
2164 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2165 	} else {
2166 		hdev->features[1][0] &= ~LMP_HOST_LE;
2167 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2168 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2169 	}
2170 
2171 	if (sent->simul)
2172 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2173 	else
2174 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2175 
2176 	hci_dev_unlock(hdev);
2177 
2178 	return rp->status;
2179 }
2180 
2181 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2182 			       struct sk_buff *skb)
2183 {
2184 	struct hci_cp_le_set_adv_param *cp;
2185 	struct hci_ev_status *rp = data;
2186 
2187 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2188 
2189 	if (rp->status)
2190 		return rp->status;
2191 
2192 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2193 	if (!cp)
2194 		return rp->status;
2195 
2196 	hci_dev_lock(hdev);
2197 	hdev->adv_addr_type = cp->own_address_type;
2198 	hci_dev_unlock(hdev);
2199 
2200 	return rp->status;
2201 }
2202 
2203 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2204 				   struct sk_buff *skb)
2205 {
2206 	struct hci_rp_le_set_ext_adv_params *rp = data;
2207 	struct hci_cp_le_set_ext_adv_params *cp;
2208 	struct adv_info *adv_instance;
2209 
2210 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2211 
2212 	if (rp->status)
2213 		return rp->status;
2214 
2215 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2216 	if (!cp)
2217 		return rp->status;
2218 
2219 	hci_dev_lock(hdev);
2220 	hdev->adv_addr_type = cp->own_addr_type;
2221 	if (!cp->handle) {
2222 		/* Store in hdev for instance 0 */
2223 		hdev->adv_tx_power = rp->tx_power;
2224 	} else {
2225 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2226 		if (adv_instance)
2227 			adv_instance->tx_power = rp->tx_power;
2228 	}
2229 	/* Update adv data as tx power is known now */
2230 	hci_update_adv_data(hdev, cp->handle);
2231 
2232 	hci_dev_unlock(hdev);
2233 
2234 	return rp->status;
2235 }
2236 
2237 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2238 			   struct sk_buff *skb)
2239 {
2240 	struct hci_rp_read_rssi *rp = data;
2241 	struct hci_conn *conn;
2242 
2243 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2244 
2245 	if (rp->status)
2246 		return rp->status;
2247 
2248 	hci_dev_lock(hdev);
2249 
2250 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2251 	if (conn)
2252 		conn->rssi = rp->rssi;
2253 
2254 	hci_dev_unlock(hdev);
2255 
2256 	return rp->status;
2257 }
2258 
2259 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2260 			       struct sk_buff *skb)
2261 {
2262 	struct hci_cp_read_tx_power *sent;
2263 	struct hci_rp_read_tx_power *rp = data;
2264 	struct hci_conn *conn;
2265 
2266 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2267 
2268 	if (rp->status)
2269 		return rp->status;
2270 
2271 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2272 	if (!sent)
2273 		return rp->status;
2274 
2275 	hci_dev_lock(hdev);
2276 
2277 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2278 	if (!conn)
2279 		goto unlock;
2280 
2281 	switch (sent->type) {
2282 	case 0x00:
2283 		conn->tx_power = rp->tx_power;
2284 		break;
2285 	case 0x01:
2286 		conn->max_tx_power = rp->tx_power;
2287 		break;
2288 	}
2289 
2290 unlock:
2291 	hci_dev_unlock(hdev);
2292 	return rp->status;
2293 }
2294 
2295 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2296 				      struct sk_buff *skb)
2297 {
2298 	struct hci_ev_status *rp = data;
2299 	u8 *mode;
2300 
2301 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2302 
2303 	if (rp->status)
2304 		return rp->status;
2305 
2306 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2307 	if (mode)
2308 		hdev->ssp_debug_mode = *mode;
2309 
2310 	return rp->status;
2311 }
2312 
2313 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2314 {
2315 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2316 
2317 	if (status) {
2318 		hci_conn_check_pending(hdev);
2319 		return;
2320 	}
2321 
2322 	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2323 		set_bit(HCI_INQUIRY, &hdev->flags);
2324 }
2325 
2326 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2327 {
2328 	struct hci_cp_create_conn *cp;
2329 	struct hci_conn *conn;
2330 
2331 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2332 
2333 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2334 	if (!cp)
2335 		return;
2336 
2337 	hci_dev_lock(hdev);
2338 
2339 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2340 
2341 	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2342 
2343 	if (status) {
2344 		if (conn && conn->state == BT_CONNECT) {
2345 			if (status != 0x0c || conn->attempt > 2) {
2346 				conn->state = BT_CLOSED;
2347 				hci_connect_cfm(conn, status);
2348 				hci_conn_del(conn);
2349 			} else
2350 				conn->state = BT_CONNECT2;
2351 		}
2352 	} else {
2353 		if (!conn) {
2354 			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2355 						  HCI_ROLE_MASTER);
2356 			if (!conn)
2357 				bt_dev_err(hdev, "no memory for new connection");
2358 		}
2359 	}
2360 
2361 	hci_dev_unlock(hdev);
2362 }
2363 
2364 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2365 {
2366 	struct hci_cp_add_sco *cp;
2367 	struct hci_conn *acl;
2368 	struct hci_link *link;
2369 	__u16 handle;
2370 
2371 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2372 
2373 	if (!status)
2374 		return;
2375 
2376 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2377 	if (!cp)
2378 		return;
2379 
2380 	handle = __le16_to_cpu(cp->handle);
2381 
2382 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2383 
2384 	hci_dev_lock(hdev);
2385 
2386 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2387 	if (acl) {
2388 		link = list_first_entry_or_null(&acl->link_list,
2389 						struct hci_link, list);
2390 		if (link && link->conn) {
2391 			link->conn->state = BT_CLOSED;
2392 
2393 			hci_connect_cfm(link->conn, status);
2394 			hci_conn_del(link->conn);
2395 		}
2396 	}
2397 
2398 	hci_dev_unlock(hdev);
2399 }
2400 
2401 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2402 {
2403 	struct hci_cp_auth_requested *cp;
2404 	struct hci_conn *conn;
2405 
2406 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2407 
2408 	if (!status)
2409 		return;
2410 
2411 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2412 	if (!cp)
2413 		return;
2414 
2415 	hci_dev_lock(hdev);
2416 
2417 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2418 	if (conn) {
2419 		if (conn->state == BT_CONFIG) {
2420 			hci_connect_cfm(conn, status);
2421 			hci_conn_drop(conn);
2422 		}
2423 	}
2424 
2425 	hci_dev_unlock(hdev);
2426 }
2427 
2428 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2429 {
2430 	struct hci_cp_set_conn_encrypt *cp;
2431 	struct hci_conn *conn;
2432 
2433 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2434 
2435 	if (!status)
2436 		return;
2437 
2438 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2439 	if (!cp)
2440 		return;
2441 
2442 	hci_dev_lock(hdev);
2443 
2444 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2445 	if (conn) {
2446 		if (conn->state == BT_CONFIG) {
2447 			hci_connect_cfm(conn, status);
2448 			hci_conn_drop(conn);
2449 		}
2450 	}
2451 
2452 	hci_dev_unlock(hdev);
2453 }
2454 
2455 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2456 				    struct hci_conn *conn)
2457 {
2458 	if (conn->state != BT_CONFIG || !conn->out)
2459 		return 0;
2460 
2461 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2462 		return 0;
2463 
2464 	/* Only request authentication for SSP connections or non-SSP
2465 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2466 	 * is requested.
2467 	 */
2468 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2469 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2470 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2471 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2472 		return 0;
2473 
2474 	return 1;
2475 }
2476 
2477 static int hci_resolve_name(struct hci_dev *hdev,
2478 				   struct inquiry_entry *e)
2479 {
2480 	struct hci_cp_remote_name_req cp;
2481 
2482 	memset(&cp, 0, sizeof(cp));
2483 
2484 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2485 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2486 	cp.pscan_mode = e->data.pscan_mode;
2487 	cp.clock_offset = e->data.clock_offset;
2488 
2489 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2490 }
2491 
2492 static bool hci_resolve_next_name(struct hci_dev *hdev)
2493 {
2494 	struct discovery_state *discov = &hdev->discovery;
2495 	struct inquiry_entry *e;
2496 
2497 	if (list_empty(&discov->resolve))
2498 		return false;
2499 
2500 	/* We should stop if we already spent too much time resolving names. */
2501 	if (time_after(jiffies, discov->name_resolve_timeout)) {
2502 		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2503 		return false;
2504 	}
2505 
2506 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2507 	if (!e)
2508 		return false;
2509 
2510 	if (hci_resolve_name(hdev, e) == 0) {
2511 		e->name_state = NAME_PENDING;
2512 		return true;
2513 	}
2514 
2515 	return false;
2516 }
2517 
2518 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2519 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2520 {
2521 	struct discovery_state *discov = &hdev->discovery;
2522 	struct inquiry_entry *e;
2523 
2524 	/* Update the mgmt connected state if necessary. Be careful with
2525 	 * conn objects that exist but are not (yet) connected however.
2526 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2527 	 * considered connected.
2528 	 */
2529 	if (conn &&
2530 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2531 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2532 		mgmt_device_connected(hdev, conn, name, name_len);
2533 
2534 	if (discov->state == DISCOVERY_STOPPED)
2535 		return;
2536 
2537 	if (discov->state == DISCOVERY_STOPPING)
2538 		goto discov_complete;
2539 
2540 	if (discov->state != DISCOVERY_RESOLVING)
2541 		return;
2542 
2543 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2544 	/* If the device was not found in a list of found devices names of which
2545 	 * are pending. there is no need to continue resolving a next name as it
2546 	 * will be done upon receiving another Remote Name Request Complete
2547 	 * Event */
2548 	if (!e)
2549 		return;
2550 
2551 	list_del(&e->list);
2552 
2553 	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2554 	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2555 			 name, name_len);
2556 
2557 	if (hci_resolve_next_name(hdev))
2558 		return;
2559 
2560 discov_complete:
2561 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2562 }
2563 
2564 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2565 {
2566 	struct hci_cp_remote_name_req *cp;
2567 	struct hci_conn *conn;
2568 
2569 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2570 
2571 	/* If successful wait for the name req complete event before
2572 	 * checking for the need to do authentication */
2573 	if (!status)
2574 		return;
2575 
2576 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2577 	if (!cp)
2578 		return;
2579 
2580 	hci_dev_lock(hdev);
2581 
2582 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2583 
2584 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2585 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2586 
2587 	if (!conn)
2588 		goto unlock;
2589 
2590 	if (!hci_outgoing_auth_needed(hdev, conn))
2591 		goto unlock;
2592 
2593 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2594 		struct hci_cp_auth_requested auth_cp;
2595 
2596 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2597 
2598 		auth_cp.handle = __cpu_to_le16(conn->handle);
2599 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2600 			     sizeof(auth_cp), &auth_cp);
2601 	}
2602 
2603 unlock:
2604 	hci_dev_unlock(hdev);
2605 }
2606 
2607 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2608 {
2609 	struct hci_cp_read_remote_features *cp;
2610 	struct hci_conn *conn;
2611 
2612 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2613 
2614 	if (!status)
2615 		return;
2616 
2617 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2618 	if (!cp)
2619 		return;
2620 
2621 	hci_dev_lock(hdev);
2622 
2623 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2624 	if (conn) {
2625 		if (conn->state == BT_CONFIG) {
2626 			hci_connect_cfm(conn, status);
2627 			hci_conn_drop(conn);
2628 		}
2629 	}
2630 
2631 	hci_dev_unlock(hdev);
2632 }
2633 
2634 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2635 {
2636 	struct hci_cp_read_remote_ext_features *cp;
2637 	struct hci_conn *conn;
2638 
2639 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2640 
2641 	if (!status)
2642 		return;
2643 
2644 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2645 	if (!cp)
2646 		return;
2647 
2648 	hci_dev_lock(hdev);
2649 
2650 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2651 	if (conn) {
2652 		if (conn->state == BT_CONFIG) {
2653 			hci_connect_cfm(conn, status);
2654 			hci_conn_drop(conn);
2655 		}
2656 	}
2657 
2658 	hci_dev_unlock(hdev);
2659 }
2660 
2661 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2662 				       __u8 status)
2663 {
2664 	struct hci_conn *acl;
2665 	struct hci_link *link;
2666 
2667 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2668 
2669 	hci_dev_lock(hdev);
2670 
2671 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2672 	if (acl) {
2673 		link = list_first_entry_or_null(&acl->link_list,
2674 						struct hci_link, list);
2675 		if (link && link->conn) {
2676 			link->conn->state = BT_CLOSED;
2677 
2678 			hci_connect_cfm(link->conn, status);
2679 			hci_conn_del(link->conn);
2680 		}
2681 	}
2682 
2683 	hci_dev_unlock(hdev);
2684 }
2685 
2686 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2687 {
2688 	struct hci_cp_setup_sync_conn *cp;
2689 
2690 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2691 
2692 	if (!status)
2693 		return;
2694 
2695 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2696 	if (!cp)
2697 		return;
2698 
2699 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2700 }
2701 
2702 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2703 {
2704 	struct hci_cp_enhanced_setup_sync_conn *cp;
2705 
2706 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2707 
2708 	if (!status)
2709 		return;
2710 
2711 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2712 	if (!cp)
2713 		return;
2714 
2715 	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2716 }
2717 
2718 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2719 {
2720 	struct hci_cp_sniff_mode *cp;
2721 	struct hci_conn *conn;
2722 
2723 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2724 
2725 	if (!status)
2726 		return;
2727 
2728 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2729 	if (!cp)
2730 		return;
2731 
2732 	hci_dev_lock(hdev);
2733 
2734 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2735 	if (conn) {
2736 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2737 
2738 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2739 			hci_sco_setup(conn, status);
2740 	}
2741 
2742 	hci_dev_unlock(hdev);
2743 }
2744 
2745 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2746 {
2747 	struct hci_cp_exit_sniff_mode *cp;
2748 	struct hci_conn *conn;
2749 
2750 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2751 
2752 	if (!status)
2753 		return;
2754 
2755 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2756 	if (!cp)
2757 		return;
2758 
2759 	hci_dev_lock(hdev);
2760 
2761 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2762 	if (conn) {
2763 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2764 
2765 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2766 			hci_sco_setup(conn, status);
2767 	}
2768 
2769 	hci_dev_unlock(hdev);
2770 }
2771 
2772 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2773 {
2774 	struct hci_cp_disconnect *cp;
2775 	struct hci_conn_params *params;
2776 	struct hci_conn *conn;
2777 	bool mgmt_conn;
2778 
2779 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2780 
2781 	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2782 	 * otherwise cleanup the connection immediately.
2783 	 */
2784 	if (!status && !hdev->suspended)
2785 		return;
2786 
2787 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2788 	if (!cp)
2789 		return;
2790 
2791 	hci_dev_lock(hdev);
2792 
2793 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2794 	if (!conn)
2795 		goto unlock;
2796 
2797 	if (status) {
2798 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2799 				       conn->dst_type, status);
2800 
2801 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2802 			hdev->cur_adv_instance = conn->adv_instance;
2803 			hci_enable_advertising(hdev);
2804 		}
2805 
2806 		/* Inform sockets conn is gone before we delete it */
2807 		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2808 
2809 		goto done;
2810 	}
2811 
2812 	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2813 
2814 	if (conn->type == ACL_LINK) {
2815 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2816 			hci_remove_link_key(hdev, &conn->dst);
2817 	}
2818 
2819 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2820 	if (params) {
2821 		switch (params->auto_connect) {
2822 		case HCI_AUTO_CONN_LINK_LOSS:
2823 			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2824 				break;
2825 			fallthrough;
2826 
2827 		case HCI_AUTO_CONN_DIRECT:
2828 		case HCI_AUTO_CONN_ALWAYS:
2829 			hci_pend_le_list_del_init(params);
2830 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2831 			break;
2832 
2833 		default:
2834 			break;
2835 		}
2836 	}
2837 
2838 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2839 				 cp->reason, mgmt_conn);
2840 
2841 	hci_disconn_cfm(conn, cp->reason);
2842 
2843 done:
2844 	/* If the disconnection failed for any reason, the upper layer
2845 	 * does not retry to disconnect in current implementation.
2846 	 * Hence, we need to do some basic cleanup here and re-enable
2847 	 * advertising if necessary.
2848 	 */
2849 	hci_conn_del(conn);
2850 unlock:
2851 	hci_dev_unlock(hdev);
2852 }
2853 
2854 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2855 {
2856 	/* When using controller based address resolution, then the new
2857 	 * address types 0x02 and 0x03 are used. These types need to be
2858 	 * converted back into either public address or random address type
2859 	 */
2860 	switch (type) {
2861 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2862 		if (resolved)
2863 			*resolved = true;
2864 		return ADDR_LE_DEV_PUBLIC;
2865 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2866 		if (resolved)
2867 			*resolved = true;
2868 		return ADDR_LE_DEV_RANDOM;
2869 	}
2870 
2871 	if (resolved)
2872 		*resolved = false;
2873 	return type;
2874 }
2875 
2876 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2877 			      u8 peer_addr_type, u8 own_address_type,
2878 			      u8 filter_policy)
2879 {
2880 	struct hci_conn *conn;
2881 
2882 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2883 				       peer_addr_type);
2884 	if (!conn)
2885 		return;
2886 
2887 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2888 
2889 	/* Store the initiator and responder address information which
2890 	 * is needed for SMP. These values will not change during the
2891 	 * lifetime of the connection.
2892 	 */
2893 	conn->init_addr_type = own_address_type;
2894 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2895 		bacpy(&conn->init_addr, &hdev->random_addr);
2896 	else
2897 		bacpy(&conn->init_addr, &hdev->bdaddr);
2898 
2899 	conn->resp_addr_type = peer_addr_type;
2900 	bacpy(&conn->resp_addr, peer_addr);
2901 }
2902 
2903 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2904 {
2905 	struct hci_cp_le_create_conn *cp;
2906 
2907 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2908 
2909 	/* All connection failure handling is taken care of by the
2910 	 * hci_conn_failed function which is triggered by the HCI
2911 	 * request completion callbacks used for connecting.
2912 	 */
2913 	if (status)
2914 		return;
2915 
2916 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2917 	if (!cp)
2918 		return;
2919 
2920 	hci_dev_lock(hdev);
2921 
2922 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2923 			  cp->own_address_type, cp->filter_policy);
2924 
2925 	hci_dev_unlock(hdev);
2926 }
2927 
2928 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2929 {
2930 	struct hci_cp_le_ext_create_conn *cp;
2931 
2932 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2933 
2934 	/* All connection failure handling is taken care of by the
2935 	 * hci_conn_failed function which is triggered by the HCI
2936 	 * request completion callbacks used for connecting.
2937 	 */
2938 	if (status)
2939 		return;
2940 
2941 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2942 	if (!cp)
2943 		return;
2944 
2945 	hci_dev_lock(hdev);
2946 
2947 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2948 			  cp->own_addr_type, cp->filter_policy);
2949 
2950 	hci_dev_unlock(hdev);
2951 }
2952 
2953 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2954 {
2955 	struct hci_cp_le_read_remote_features *cp;
2956 	struct hci_conn *conn;
2957 
2958 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2959 
2960 	if (!status)
2961 		return;
2962 
2963 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2964 	if (!cp)
2965 		return;
2966 
2967 	hci_dev_lock(hdev);
2968 
2969 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2970 	if (conn) {
2971 		if (conn->state == BT_CONFIG) {
2972 			hci_connect_cfm(conn, status);
2973 			hci_conn_drop(conn);
2974 		}
2975 	}
2976 
2977 	hci_dev_unlock(hdev);
2978 }
2979 
2980 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2981 {
2982 	struct hci_cp_le_start_enc *cp;
2983 	struct hci_conn *conn;
2984 
2985 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2986 
2987 	if (!status)
2988 		return;
2989 
2990 	hci_dev_lock(hdev);
2991 
2992 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2993 	if (!cp)
2994 		goto unlock;
2995 
2996 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2997 	if (!conn)
2998 		goto unlock;
2999 
3000 	if (conn->state != BT_CONNECTED)
3001 		goto unlock;
3002 
3003 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3004 	hci_conn_drop(conn);
3005 
3006 unlock:
3007 	hci_dev_unlock(hdev);
3008 }
3009 
3010 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3011 {
3012 	struct hci_cp_switch_role *cp;
3013 	struct hci_conn *conn;
3014 
3015 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
3016 
3017 	if (!status)
3018 		return;
3019 
3020 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3021 	if (!cp)
3022 		return;
3023 
3024 	hci_dev_lock(hdev);
3025 
3026 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3027 	if (conn)
3028 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3029 
3030 	hci_dev_unlock(hdev);
3031 }
3032 
3033 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3034 				     struct sk_buff *skb)
3035 {
3036 	struct hci_ev_status *ev = data;
3037 	struct discovery_state *discov = &hdev->discovery;
3038 	struct inquiry_entry *e;
3039 
3040 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3041 
3042 	hci_conn_check_pending(hdev);
3043 
3044 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3045 		return;
3046 
3047 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3048 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
3049 
3050 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3051 		return;
3052 
3053 	hci_dev_lock(hdev);
3054 
3055 	if (discov->state != DISCOVERY_FINDING)
3056 		goto unlock;
3057 
3058 	if (list_empty(&discov->resolve)) {
3059 		/* When BR/EDR inquiry is active and no LE scanning is in
3060 		 * progress, then change discovery state to indicate completion.
3061 		 *
3062 		 * When running LE scanning and BR/EDR inquiry simultaneously
3063 		 * and the LE scan already finished, then change the discovery
3064 		 * state to indicate completion.
3065 		 */
3066 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3067 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3068 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3069 		goto unlock;
3070 	}
3071 
3072 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3073 	if (e && hci_resolve_name(hdev, e) == 0) {
3074 		e->name_state = NAME_PENDING;
3075 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3076 		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3077 	} else {
3078 		/* When BR/EDR inquiry is active and no LE scanning is in
3079 		 * progress, then change discovery state to indicate completion.
3080 		 *
3081 		 * When running LE scanning and BR/EDR inquiry simultaneously
3082 		 * and the LE scan already finished, then change the discovery
3083 		 * state to indicate completion.
3084 		 */
3085 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3086 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3087 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3088 	}
3089 
3090 unlock:
3091 	hci_dev_unlock(hdev);
3092 }
3093 
3094 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3095 				   struct sk_buff *skb)
3096 {
3097 	struct hci_ev_inquiry_result *ev = edata;
3098 	struct inquiry_data data;
3099 	int i;
3100 
3101 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3102 			     flex_array_size(ev, info, ev->num)))
3103 		return;
3104 
3105 	bt_dev_dbg(hdev, "num %d", ev->num);
3106 
3107 	if (!ev->num)
3108 		return;
3109 
3110 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3111 		return;
3112 
3113 	hci_dev_lock(hdev);
3114 
3115 	for (i = 0; i < ev->num; i++) {
3116 		struct inquiry_info *info = &ev->info[i];
3117 		u32 flags;
3118 
3119 		bacpy(&data.bdaddr, &info->bdaddr);
3120 		data.pscan_rep_mode	= info->pscan_rep_mode;
3121 		data.pscan_period_mode	= info->pscan_period_mode;
3122 		data.pscan_mode		= info->pscan_mode;
3123 		memcpy(data.dev_class, info->dev_class, 3);
3124 		data.clock_offset	= info->clock_offset;
3125 		data.rssi		= HCI_RSSI_INVALID;
3126 		data.ssp_mode		= 0x00;
3127 
3128 		flags = hci_inquiry_cache_update(hdev, &data, false);
3129 
3130 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3131 				  info->dev_class, HCI_RSSI_INVALID,
3132 				  flags, NULL, 0, NULL, 0, 0);
3133 	}
3134 
3135 	hci_dev_unlock(hdev);
3136 }
3137 
3138 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3139 				  struct sk_buff *skb)
3140 {
3141 	struct hci_ev_conn_complete *ev = data;
3142 	struct hci_conn *conn;
3143 	u8 status = ev->status;
3144 
3145 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3146 
3147 	hci_dev_lock(hdev);
3148 
3149 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3150 	if (!conn) {
3151 		/* In case of error status and there is no connection pending
3152 		 * just unlock as there is nothing to cleanup.
3153 		 */
3154 		if (ev->status)
3155 			goto unlock;
3156 
3157 		/* Connection may not exist if auto-connected. Check the bredr
3158 		 * allowlist to see if this device is allowed to auto connect.
3159 		 * If link is an ACL type, create a connection class
3160 		 * automatically.
3161 		 *
3162 		 * Auto-connect will only occur if the event filter is
3163 		 * programmed with a given address. Right now, event filter is
3164 		 * only used during suspend.
3165 		 */
3166 		if (ev->link_type == ACL_LINK &&
3167 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3168 						      &ev->bdaddr,
3169 						      BDADDR_BREDR)) {
3170 			conn = hci_conn_add_unset(hdev, ev->link_type,
3171 						  &ev->bdaddr, HCI_ROLE_SLAVE);
3172 			if (!conn) {
3173 				bt_dev_err(hdev, "no memory for new conn");
3174 				goto unlock;
3175 			}
3176 		} else {
3177 			if (ev->link_type != SCO_LINK)
3178 				goto unlock;
3179 
3180 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3181 						       &ev->bdaddr);
3182 			if (!conn)
3183 				goto unlock;
3184 
3185 			conn->type = SCO_LINK;
3186 		}
3187 	}
3188 
3189 	/* The HCI_Connection_Complete event is only sent once per connection.
3190 	 * Processing it more than once per connection can corrupt kernel memory.
3191 	 *
3192 	 * As the connection handle is set here for the first time, it indicates
3193 	 * whether the connection is already set up.
3194 	 */
3195 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3196 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3197 		goto unlock;
3198 	}
3199 
3200 	if (!status) {
3201 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3202 		if (status)
3203 			goto done;
3204 
3205 		if (conn->type == ACL_LINK) {
3206 			conn->state = BT_CONFIG;
3207 			hci_conn_hold(conn);
3208 
3209 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3210 			    !hci_find_link_key(hdev, &ev->bdaddr))
3211 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3212 			else
3213 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3214 		} else
3215 			conn->state = BT_CONNECTED;
3216 
3217 		hci_debugfs_create_conn(conn);
3218 		hci_conn_add_sysfs(conn);
3219 
3220 		if (test_bit(HCI_AUTH, &hdev->flags))
3221 			set_bit(HCI_CONN_AUTH, &conn->flags);
3222 
3223 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3224 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3225 
3226 		/* Get remote features */
3227 		if (conn->type == ACL_LINK) {
3228 			struct hci_cp_read_remote_features cp;
3229 			cp.handle = ev->handle;
3230 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3231 				     sizeof(cp), &cp);
3232 
3233 			hci_update_scan(hdev);
3234 		}
3235 
3236 		/* Set packet type for incoming connection */
3237 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3238 			struct hci_cp_change_conn_ptype cp;
3239 			cp.handle = ev->handle;
3240 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3241 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3242 				     &cp);
3243 		}
3244 	}
3245 
3246 	if (conn->type == ACL_LINK)
3247 		hci_sco_setup(conn, ev->status);
3248 
3249 done:
3250 	if (status) {
3251 		hci_conn_failed(conn, status);
3252 	} else if (ev->link_type == SCO_LINK) {
3253 		switch (conn->setting & SCO_AIRMODE_MASK) {
3254 		case SCO_AIRMODE_CVSD:
3255 			if (hdev->notify)
3256 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3257 			break;
3258 		}
3259 
3260 		hci_connect_cfm(conn, status);
3261 	}
3262 
3263 unlock:
3264 	hci_dev_unlock(hdev);
3265 
3266 	hci_conn_check_pending(hdev);
3267 }
3268 
3269 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3270 {
3271 	struct hci_cp_reject_conn_req cp;
3272 
3273 	bacpy(&cp.bdaddr, bdaddr);
3274 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3275 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3276 }
3277 
3278 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3279 				 struct sk_buff *skb)
3280 {
3281 	struct hci_ev_conn_request *ev = data;
3282 	int mask = hdev->link_mode;
3283 	struct inquiry_entry *ie;
3284 	struct hci_conn *conn;
3285 	__u8 flags = 0;
3286 
3287 	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3288 
3289 	/* Reject incoming connection from device with same BD ADDR against
3290 	 * CVE-2020-26555
3291 	 */
3292 	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3293 		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3294 			   &ev->bdaddr);
3295 		hci_reject_conn(hdev, &ev->bdaddr);
3296 		return;
3297 	}
3298 
3299 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3300 				      &flags);
3301 
3302 	if (!(mask & HCI_LM_ACCEPT)) {
3303 		hci_reject_conn(hdev, &ev->bdaddr);
3304 		return;
3305 	}
3306 
3307 	hci_dev_lock(hdev);
3308 
3309 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3310 				   BDADDR_BREDR)) {
3311 		hci_reject_conn(hdev, &ev->bdaddr);
3312 		goto unlock;
3313 	}
3314 
3315 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3316 	 * connection. These features are only touched through mgmt so
3317 	 * only do the checks if HCI_MGMT is set.
3318 	 */
3319 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3320 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3321 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3322 					       BDADDR_BREDR)) {
3323 		hci_reject_conn(hdev, &ev->bdaddr);
3324 		goto unlock;
3325 	}
3326 
3327 	/* Connection accepted */
3328 
3329 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3330 	if (ie)
3331 		memcpy(ie->data.dev_class, ev->dev_class, 3);
3332 
3333 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3334 			&ev->bdaddr);
3335 	if (!conn) {
3336 		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3337 					  HCI_ROLE_SLAVE);
3338 		if (!conn) {
3339 			bt_dev_err(hdev, "no memory for new connection");
3340 			goto unlock;
3341 		}
3342 	}
3343 
3344 	memcpy(conn->dev_class, ev->dev_class, 3);
3345 
3346 	hci_dev_unlock(hdev);
3347 
3348 	if (ev->link_type == ACL_LINK ||
3349 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3350 		struct hci_cp_accept_conn_req cp;
3351 		conn->state = BT_CONNECT;
3352 
3353 		bacpy(&cp.bdaddr, &ev->bdaddr);
3354 
3355 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3356 			cp.role = 0x00; /* Become central */
3357 		else
3358 			cp.role = 0x01; /* Remain peripheral */
3359 
3360 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3361 	} else if (!(flags & HCI_PROTO_DEFER)) {
3362 		struct hci_cp_accept_sync_conn_req cp;
3363 		conn->state = BT_CONNECT;
3364 
3365 		bacpy(&cp.bdaddr, &ev->bdaddr);
3366 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3367 
3368 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3369 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3370 		cp.max_latency    = cpu_to_le16(0xffff);
3371 		cp.content_format = cpu_to_le16(hdev->voice_setting);
3372 		cp.retrans_effort = 0xff;
3373 
3374 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3375 			     &cp);
3376 	} else {
3377 		conn->state = BT_CONNECT2;
3378 		hci_connect_cfm(conn, 0);
3379 	}
3380 
3381 	return;
3382 unlock:
3383 	hci_dev_unlock(hdev);
3384 }
3385 
3386 static u8 hci_to_mgmt_reason(u8 err)
3387 {
3388 	switch (err) {
3389 	case HCI_ERROR_CONNECTION_TIMEOUT:
3390 		return MGMT_DEV_DISCONN_TIMEOUT;
3391 	case HCI_ERROR_REMOTE_USER_TERM:
3392 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3393 	case HCI_ERROR_REMOTE_POWER_OFF:
3394 		return MGMT_DEV_DISCONN_REMOTE;
3395 	case HCI_ERROR_LOCAL_HOST_TERM:
3396 		return MGMT_DEV_DISCONN_LOCAL_HOST;
3397 	default:
3398 		return MGMT_DEV_DISCONN_UNKNOWN;
3399 	}
3400 }
3401 
3402 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3403 				     struct sk_buff *skb)
3404 {
3405 	struct hci_ev_disconn_complete *ev = data;
3406 	u8 reason;
3407 	struct hci_conn_params *params;
3408 	struct hci_conn *conn;
3409 	bool mgmt_connected;
3410 
3411 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3412 
3413 	hci_dev_lock(hdev);
3414 
3415 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3416 	if (!conn)
3417 		goto unlock;
3418 
3419 	if (ev->status) {
3420 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3421 				       conn->dst_type, ev->status);
3422 		goto unlock;
3423 	}
3424 
3425 	conn->state = BT_CLOSED;
3426 
3427 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3428 
3429 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3430 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3431 	else
3432 		reason = hci_to_mgmt_reason(ev->reason);
3433 
3434 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3435 				reason, mgmt_connected);
3436 
3437 	if (conn->type == ACL_LINK) {
3438 		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3439 			hci_remove_link_key(hdev, &conn->dst);
3440 
3441 		hci_update_scan(hdev);
3442 	}
3443 
3444 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3445 	if (params) {
3446 		switch (params->auto_connect) {
3447 		case HCI_AUTO_CONN_LINK_LOSS:
3448 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3449 				break;
3450 			fallthrough;
3451 
3452 		case HCI_AUTO_CONN_DIRECT:
3453 		case HCI_AUTO_CONN_ALWAYS:
3454 			hci_pend_le_list_del_init(params);
3455 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3456 			hci_update_passive_scan(hdev);
3457 			break;
3458 
3459 		default:
3460 			break;
3461 		}
3462 	}
3463 
3464 	hci_disconn_cfm(conn, ev->reason);
3465 
3466 	/* Re-enable advertising if necessary, since it might
3467 	 * have been disabled by the connection. From the
3468 	 * HCI_LE_Set_Advertise_Enable command description in
3469 	 * the core specification (v4.0):
3470 	 * "The Controller shall continue advertising until the Host
3471 	 * issues an LE_Set_Advertise_Enable command with
3472 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3473 	 * or until a connection is created or until the Advertising
3474 	 * is timed out due to Directed Advertising."
3475 	 */
3476 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3477 		hdev->cur_adv_instance = conn->adv_instance;
3478 		hci_enable_advertising(hdev);
3479 	}
3480 
3481 	hci_conn_del(conn);
3482 
3483 unlock:
3484 	hci_dev_unlock(hdev);
3485 }
3486 
3487 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3488 				  struct sk_buff *skb)
3489 {
3490 	struct hci_ev_auth_complete *ev = data;
3491 	struct hci_conn *conn;
3492 
3493 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3494 
3495 	hci_dev_lock(hdev);
3496 
3497 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3498 	if (!conn)
3499 		goto unlock;
3500 
3501 	if (!ev->status) {
3502 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3503 
3504 		if (!hci_conn_ssp_enabled(conn) &&
3505 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3506 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3507 		} else {
3508 			set_bit(HCI_CONN_AUTH, &conn->flags);
3509 			conn->sec_level = conn->pending_sec_level;
3510 		}
3511 	} else {
3512 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3513 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3514 
3515 		mgmt_auth_failed(conn, ev->status);
3516 	}
3517 
3518 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3519 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3520 
3521 	if (conn->state == BT_CONFIG) {
3522 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3523 			struct hci_cp_set_conn_encrypt cp;
3524 			cp.handle  = ev->handle;
3525 			cp.encrypt = 0x01;
3526 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3527 				     &cp);
3528 		} else {
3529 			conn->state = BT_CONNECTED;
3530 			hci_connect_cfm(conn, ev->status);
3531 			hci_conn_drop(conn);
3532 		}
3533 	} else {
3534 		hci_auth_cfm(conn, ev->status);
3535 
3536 		hci_conn_hold(conn);
3537 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3538 		hci_conn_drop(conn);
3539 	}
3540 
3541 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3542 		if (!ev->status) {
3543 			struct hci_cp_set_conn_encrypt cp;
3544 			cp.handle  = ev->handle;
3545 			cp.encrypt = 0x01;
3546 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3547 				     &cp);
3548 		} else {
3549 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3550 			hci_encrypt_cfm(conn, ev->status);
3551 		}
3552 	}
3553 
3554 unlock:
3555 	hci_dev_unlock(hdev);
3556 }
3557 
3558 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3559 				struct sk_buff *skb)
3560 {
3561 	struct hci_ev_remote_name *ev = data;
3562 	struct hci_conn *conn;
3563 
3564 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3565 
3566 	hci_conn_check_pending(hdev);
3567 
3568 	hci_dev_lock(hdev);
3569 
3570 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3571 
3572 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3573 		goto check_auth;
3574 
3575 	if (ev->status == 0)
3576 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3577 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3578 	else
3579 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3580 
3581 check_auth:
3582 	if (!conn)
3583 		goto unlock;
3584 
3585 	if (!hci_outgoing_auth_needed(hdev, conn))
3586 		goto unlock;
3587 
3588 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3589 		struct hci_cp_auth_requested cp;
3590 
3591 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3592 
3593 		cp.handle = __cpu_to_le16(conn->handle);
3594 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3595 	}
3596 
3597 unlock:
3598 	hci_dev_unlock(hdev);
3599 }
3600 
3601 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3602 				   struct sk_buff *skb)
3603 {
3604 	struct hci_ev_encrypt_change *ev = data;
3605 	struct hci_conn *conn;
3606 
3607 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3608 
3609 	hci_dev_lock(hdev);
3610 
3611 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3612 	if (!conn)
3613 		goto unlock;
3614 
3615 	if (!ev->status) {
3616 		if (ev->encrypt) {
3617 			/* Encryption implies authentication */
3618 			set_bit(HCI_CONN_AUTH, &conn->flags);
3619 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3620 			conn->sec_level = conn->pending_sec_level;
3621 
3622 			/* P-256 authentication key implies FIPS */
3623 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3624 				set_bit(HCI_CONN_FIPS, &conn->flags);
3625 
3626 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3627 			    conn->type == LE_LINK)
3628 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3629 		} else {
3630 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3631 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3632 		}
3633 	}
3634 
3635 	/* We should disregard the current RPA and generate a new one
3636 	 * whenever the encryption procedure fails.
3637 	 */
3638 	if (ev->status && conn->type == LE_LINK) {
3639 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3640 		hci_adv_instances_set_rpa_expired(hdev, true);
3641 	}
3642 
3643 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3644 
3645 	/* Check link security requirements are met */
3646 	if (!hci_conn_check_link_mode(conn))
3647 		ev->status = HCI_ERROR_AUTH_FAILURE;
3648 
3649 	if (ev->status && conn->state == BT_CONNECTED) {
3650 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3651 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3652 
3653 		/* Notify upper layers so they can cleanup before
3654 		 * disconnecting.
3655 		 */
3656 		hci_encrypt_cfm(conn, ev->status);
3657 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3658 		hci_conn_drop(conn);
3659 		goto unlock;
3660 	}
3661 
3662 	/* Try reading the encryption key size for encrypted ACL links */
3663 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3664 		struct hci_cp_read_enc_key_size cp;
3665 
3666 		/* Only send HCI_Read_Encryption_Key_Size if the
3667 		 * controller really supports it. If it doesn't, assume
3668 		 * the default size (16).
3669 		 */
3670 		if (!(hdev->commands[20] & 0x10)) {
3671 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3672 			goto notify;
3673 		}
3674 
3675 		cp.handle = cpu_to_le16(conn->handle);
3676 		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3677 				 sizeof(cp), &cp)) {
3678 			bt_dev_err(hdev, "sending read key size failed");
3679 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3680 			goto notify;
3681 		}
3682 
3683 		goto unlock;
3684 	}
3685 
3686 	/* Set the default Authenticated Payload Timeout after
3687 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3688 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3689 	 * sent when the link is active and Encryption is enabled, the conn
3690 	 * type can be either LE or ACL and controller must support LMP Ping.
3691 	 * Ensure for AES-CCM encryption as well.
3692 	 */
3693 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3694 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3695 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3696 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3697 		struct hci_cp_write_auth_payload_to cp;
3698 
3699 		cp.handle = cpu_to_le16(conn->handle);
3700 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3701 		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3702 				 sizeof(cp), &cp))
3703 			bt_dev_err(hdev, "write auth payload timeout failed");
3704 	}
3705 
3706 notify:
3707 	hci_encrypt_cfm(conn, ev->status);
3708 
3709 unlock:
3710 	hci_dev_unlock(hdev);
3711 }
3712 
3713 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3714 					     struct sk_buff *skb)
3715 {
3716 	struct hci_ev_change_link_key_complete *ev = data;
3717 	struct hci_conn *conn;
3718 
3719 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3720 
3721 	hci_dev_lock(hdev);
3722 
3723 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3724 	if (conn) {
3725 		if (!ev->status)
3726 			set_bit(HCI_CONN_SECURE, &conn->flags);
3727 
3728 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3729 
3730 		hci_key_change_cfm(conn, ev->status);
3731 	}
3732 
3733 	hci_dev_unlock(hdev);
3734 }
3735 
3736 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3737 				    struct sk_buff *skb)
3738 {
3739 	struct hci_ev_remote_features *ev = data;
3740 	struct hci_conn *conn;
3741 
3742 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3743 
3744 	hci_dev_lock(hdev);
3745 
3746 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3747 	if (!conn)
3748 		goto unlock;
3749 
3750 	if (!ev->status)
3751 		memcpy(conn->features[0], ev->features, 8);
3752 
3753 	if (conn->state != BT_CONFIG)
3754 		goto unlock;
3755 
3756 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3757 	    lmp_ext_feat_capable(conn)) {
3758 		struct hci_cp_read_remote_ext_features cp;
3759 		cp.handle = ev->handle;
3760 		cp.page = 0x01;
3761 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3762 			     sizeof(cp), &cp);
3763 		goto unlock;
3764 	}
3765 
3766 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3767 		struct hci_cp_remote_name_req cp;
3768 		memset(&cp, 0, sizeof(cp));
3769 		bacpy(&cp.bdaddr, &conn->dst);
3770 		cp.pscan_rep_mode = 0x02;
3771 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3772 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3773 		mgmt_device_connected(hdev, conn, NULL, 0);
3774 
3775 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3776 		conn->state = BT_CONNECTED;
3777 		hci_connect_cfm(conn, ev->status);
3778 		hci_conn_drop(conn);
3779 	}
3780 
3781 unlock:
3782 	hci_dev_unlock(hdev);
3783 }
3784 
3785 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3786 {
3787 	cancel_delayed_work(&hdev->cmd_timer);
3788 
3789 	rcu_read_lock();
3790 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3791 		if (ncmd) {
3792 			cancel_delayed_work(&hdev->ncmd_timer);
3793 			atomic_set(&hdev->cmd_cnt, 1);
3794 		} else {
3795 			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3796 				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3797 						   HCI_NCMD_TIMEOUT);
3798 		}
3799 	}
3800 	rcu_read_unlock();
3801 }
3802 
3803 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3804 					struct sk_buff *skb)
3805 {
3806 	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3807 
3808 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3809 
3810 	if (rp->status)
3811 		return rp->status;
3812 
3813 	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3814 	hdev->le_pkts  = rp->acl_max_pkt;
3815 	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3816 	hdev->iso_pkts = rp->iso_max_pkt;
3817 
3818 	hdev->le_cnt  = hdev->le_pkts;
3819 	hdev->iso_cnt = hdev->iso_pkts;
3820 
3821 	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3822 	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3823 
3824 	return rp->status;
3825 }
3826 
3827 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3828 {
3829 	struct hci_conn *conn, *tmp;
3830 
3831 	lockdep_assert_held(&hdev->lock);
3832 
3833 	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3834 		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3835 		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3836 			continue;
3837 
3838 		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3839 			hci_conn_failed(conn, status);
3840 	}
3841 }
3842 
3843 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3844 				   struct sk_buff *skb)
3845 {
3846 	struct hci_rp_le_set_cig_params *rp = data;
3847 	struct hci_cp_le_set_cig_params *cp;
3848 	struct hci_conn *conn;
3849 	u8 status = rp->status;
3850 	bool pending = false;
3851 	int i;
3852 
3853 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3854 
3855 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3856 	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3857 			    rp->cig_id != cp->cig_id)) {
3858 		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3859 		status = HCI_ERROR_UNSPECIFIED;
3860 	}
3861 
3862 	hci_dev_lock(hdev);
3863 
3864 	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3865 	 *
3866 	 * If the Status return parameter is non-zero, then the state of the CIG
3867 	 * and its CIS configurations shall not be changed by the command. If
3868 	 * the CIG did not already exist, it shall not be created.
3869 	 */
3870 	if (status) {
3871 		/* Keep current configuration, fail only the unbound CIS */
3872 		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3873 		goto unlock;
3874 	}
3875 
3876 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3877 	 *
3878 	 * If the Status return parameter is zero, then the Controller shall
3879 	 * set the Connection_Handle arrayed return parameter to the connection
3880 	 * handle(s) corresponding to the CIS configurations specified in
3881 	 * the CIS_IDs command parameter, in the same order.
3882 	 */
3883 	for (i = 0; i < rp->num_handles; ++i) {
3884 		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3885 						cp->cis[i].cis_id);
3886 		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3887 			continue;
3888 
3889 		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3890 			continue;
3891 
3892 		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3893 			continue;
3894 
3895 		if (conn->state == BT_CONNECT)
3896 			pending = true;
3897 	}
3898 
3899 unlock:
3900 	if (pending)
3901 		hci_le_create_cis_pending(hdev);
3902 
3903 	hci_dev_unlock(hdev);
3904 
3905 	return rp->status;
3906 }
3907 
3908 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3909 				   struct sk_buff *skb)
3910 {
3911 	struct hci_rp_le_setup_iso_path *rp = data;
3912 	struct hci_cp_le_setup_iso_path *cp;
3913 	struct hci_conn *conn;
3914 
3915 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3916 
3917 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3918 	if (!cp)
3919 		return rp->status;
3920 
3921 	hci_dev_lock(hdev);
3922 
3923 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3924 	if (!conn)
3925 		goto unlock;
3926 
3927 	if (rp->status) {
3928 		hci_connect_cfm(conn, rp->status);
3929 		hci_conn_del(conn);
3930 		goto unlock;
3931 	}
3932 
3933 	switch (cp->direction) {
3934 	/* Input (Host to Controller) */
3935 	case 0x00:
3936 		/* Only confirm connection if output only */
3937 		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3938 			hci_connect_cfm(conn, rp->status);
3939 		break;
3940 	/* Output (Controller to Host) */
3941 	case 0x01:
3942 		/* Confirm connection since conn->iso_qos is always configured
3943 		 * last.
3944 		 */
3945 		hci_connect_cfm(conn, rp->status);
3946 		break;
3947 	}
3948 
3949 unlock:
3950 	hci_dev_unlock(hdev);
3951 	return rp->status;
3952 }
3953 
3954 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3955 {
3956 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3957 }
3958 
3959 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3960 				   struct sk_buff *skb)
3961 {
3962 	struct hci_ev_status *rp = data;
3963 	struct hci_cp_le_set_per_adv_params *cp;
3964 
3965 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3966 
3967 	if (rp->status)
3968 		return rp->status;
3969 
3970 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3971 	if (!cp)
3972 		return rp->status;
3973 
3974 	/* TODO: set the conn state */
3975 	return rp->status;
3976 }
3977 
3978 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3979 				       struct sk_buff *skb)
3980 {
3981 	struct hci_ev_status *rp = data;
3982 	struct hci_cp_le_set_per_adv_enable *cp;
3983 	struct adv_info *adv = NULL, *n;
3984 	u8 per_adv_cnt = 0;
3985 
3986 	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3987 
3988 	if (rp->status)
3989 		return rp->status;
3990 
3991 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3992 	if (!cp)
3993 		return rp->status;
3994 
3995 	hci_dev_lock(hdev);
3996 
3997 	adv = hci_find_adv_instance(hdev, cp->handle);
3998 
3999 	if (cp->enable) {
4000 		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4001 
4002 		if (adv)
4003 			adv->enabled = true;
4004 	} else {
4005 		/* If just one instance was disabled check if there are
4006 		 * any other instance enabled before clearing HCI_LE_PER_ADV.
4007 		 * The current periodic adv instance will be marked as
4008 		 * disabled once extended advertising is also disabled.
4009 		 */
4010 		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4011 					 list) {
4012 			if (adv->periodic && adv->enabled)
4013 				per_adv_cnt++;
4014 		}
4015 
4016 		if (per_adv_cnt > 1)
4017 			goto unlock;
4018 
4019 		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4020 	}
4021 
4022 unlock:
4023 	hci_dev_unlock(hdev);
4024 
4025 	return rp->status;
4026 }
4027 
4028 #define HCI_CC_VL(_op, _func, _min, _max) \
4029 { \
4030 	.op = _op, \
4031 	.func = _func, \
4032 	.min_len = _min, \
4033 	.max_len = _max, \
4034 }
4035 
4036 #define HCI_CC(_op, _func, _len) \
4037 	HCI_CC_VL(_op, _func, _len, _len)
4038 
4039 #define HCI_CC_STATUS(_op, _func) \
4040 	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4041 
4042 static const struct hci_cc {
4043 	u16  op;
4044 	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4045 	u16  min_len;
4046 	u16  max_len;
4047 } hci_cc_table[] = {
4048 	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4049 	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4050 	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4051 	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4052 		      hci_cc_remote_name_req_cancel),
4053 	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4054 	       sizeof(struct hci_rp_role_discovery)),
4055 	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4056 	       sizeof(struct hci_rp_read_link_policy)),
4057 	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4058 	       sizeof(struct hci_rp_write_link_policy)),
4059 	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4060 	       sizeof(struct hci_rp_read_def_link_policy)),
4061 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4062 		      hci_cc_write_def_link_policy),
4063 	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4064 	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4065 	       sizeof(struct hci_rp_read_stored_link_key)),
4066 	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4067 	       sizeof(struct hci_rp_delete_stored_link_key)),
4068 	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4069 	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4070 	       sizeof(struct hci_rp_read_local_name)),
4071 	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4072 	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4073 	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4074 	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4075 	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4076 	       sizeof(struct hci_rp_read_class_of_dev)),
4077 	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4078 	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4079 	       sizeof(struct hci_rp_read_voice_setting)),
4080 	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4081 	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4082 	       sizeof(struct hci_rp_read_num_supported_iac)),
4083 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4084 	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4085 	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4086 	       sizeof(struct hci_rp_read_auth_payload_to)),
4087 	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4088 	       sizeof(struct hci_rp_write_auth_payload_to)),
4089 	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4090 	       sizeof(struct hci_rp_read_local_version)),
4091 	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4092 	       sizeof(struct hci_rp_read_local_commands)),
4093 	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4094 	       sizeof(struct hci_rp_read_local_features)),
4095 	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4096 	       sizeof(struct hci_rp_read_local_ext_features)),
4097 	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4098 	       sizeof(struct hci_rp_read_buffer_size)),
4099 	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4100 	       sizeof(struct hci_rp_read_bd_addr)),
4101 	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4102 	       sizeof(struct hci_rp_read_local_pairing_opts)),
4103 	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4104 	       sizeof(struct hci_rp_read_page_scan_activity)),
4105 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4106 		      hci_cc_write_page_scan_activity),
4107 	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4108 	       sizeof(struct hci_rp_read_page_scan_type)),
4109 	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4110 	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4111 	       sizeof(struct hci_rp_read_data_block_size)),
4112 	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4113 	       sizeof(struct hci_rp_read_flow_control_mode)),
4114 	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4115 	       sizeof(struct hci_rp_read_local_amp_info)),
4116 	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4117 	       sizeof(struct hci_rp_read_clock)),
4118 	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4119 	       sizeof(struct hci_rp_read_enc_key_size)),
4120 	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4121 	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4122 	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4123 	       hci_cc_read_def_err_data_reporting,
4124 	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4125 	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4126 		      hci_cc_write_def_err_data_reporting),
4127 	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4128 	       sizeof(struct hci_rp_pin_code_reply)),
4129 	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4130 	       sizeof(struct hci_rp_pin_code_neg_reply)),
4131 	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4132 	       sizeof(struct hci_rp_read_local_oob_data)),
4133 	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4134 	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4135 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4136 	       sizeof(struct hci_rp_le_read_buffer_size)),
4137 	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4138 	       sizeof(struct hci_rp_le_read_local_features)),
4139 	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4140 	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4141 	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4142 	       sizeof(struct hci_rp_user_confirm_reply)),
4143 	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4144 	       sizeof(struct hci_rp_user_confirm_reply)),
4145 	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4146 	       sizeof(struct hci_rp_user_confirm_reply)),
4147 	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4148 	       sizeof(struct hci_rp_user_confirm_reply)),
4149 	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4150 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4151 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4152 	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4153 	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4154 	       hci_cc_le_read_accept_list_size,
4155 	       sizeof(struct hci_rp_le_read_accept_list_size)),
4156 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4157 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4158 		      hci_cc_le_add_to_accept_list),
4159 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4160 		      hci_cc_le_del_from_accept_list),
4161 	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4162 	       sizeof(struct hci_rp_le_read_supported_states)),
4163 	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4164 	       sizeof(struct hci_rp_le_read_def_data_len)),
4165 	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4166 		      hci_cc_le_write_def_data_len),
4167 	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4168 		      hci_cc_le_add_to_resolv_list),
4169 	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4170 		      hci_cc_le_del_from_resolv_list),
4171 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4172 		      hci_cc_le_clear_resolv_list),
4173 	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4174 	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4175 	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4176 		      hci_cc_le_set_addr_resolution_enable),
4177 	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4178 	       sizeof(struct hci_rp_le_read_max_data_len)),
4179 	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4180 		      hci_cc_write_le_host_supported),
4181 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4182 	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4183 	       sizeof(struct hci_rp_read_rssi)),
4184 	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4185 	       sizeof(struct hci_rp_read_tx_power)),
4186 	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4187 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4188 		      hci_cc_le_set_ext_scan_param),
4189 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4190 		      hci_cc_le_set_ext_scan_enable),
4191 	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4192 	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4193 	       hci_cc_le_read_num_adv_sets,
4194 	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4195 	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4196 	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4197 	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4198 		      hci_cc_le_set_ext_adv_enable),
4199 	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4200 		      hci_cc_le_set_adv_set_random_addr),
4201 	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4202 	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4203 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4204 	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4205 		      hci_cc_le_set_per_adv_enable),
4206 	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4207 	       sizeof(struct hci_rp_le_read_transmit_power)),
4208 	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4209 	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4210 	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4211 	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4212 		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4213 	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4214 	       sizeof(struct hci_rp_le_setup_iso_path)),
4215 };
4216 
4217 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4218 		      struct sk_buff *skb)
4219 {
4220 	void *data;
4221 
4222 	if (skb->len < cc->min_len) {
4223 		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4224 			   cc->op, skb->len, cc->min_len);
4225 		return HCI_ERROR_UNSPECIFIED;
4226 	}
4227 
4228 	/* Just warn if the length is over max_len size it still be possible to
4229 	 * partially parse the cc so leave to callback to decide if that is
4230 	 * acceptable.
4231 	 */
4232 	if (skb->len > cc->max_len)
4233 		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4234 			    cc->op, skb->len, cc->max_len);
4235 
4236 	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4237 	if (!data)
4238 		return HCI_ERROR_UNSPECIFIED;
4239 
4240 	return cc->func(hdev, data, skb);
4241 }
4242 
4243 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4244 				 struct sk_buff *skb, u16 *opcode, u8 *status,
4245 				 hci_req_complete_t *req_complete,
4246 				 hci_req_complete_skb_t *req_complete_skb)
4247 {
4248 	struct hci_ev_cmd_complete *ev = data;
4249 	int i;
4250 
4251 	*opcode = __le16_to_cpu(ev->opcode);
4252 
4253 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4254 
4255 	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4256 		if (hci_cc_table[i].op == *opcode) {
4257 			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4258 			break;
4259 		}
4260 	}
4261 
4262 	if (i == ARRAY_SIZE(hci_cc_table)) {
4263 		/* Unknown opcode, assume byte 0 contains the status, so
4264 		 * that e.g. __hci_cmd_sync() properly returns errors
4265 		 * for vendor specific commands send by HCI drivers.
4266 		 * If a vendor doesn't actually follow this convention we may
4267 		 * need to introduce a vendor CC table in order to properly set
4268 		 * the status.
4269 		 */
4270 		*status = skb->data[0];
4271 	}
4272 
4273 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4274 
4275 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4276 			     req_complete_skb);
4277 
4278 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4279 		bt_dev_err(hdev,
4280 			   "unexpected event for opcode 0x%4.4x", *opcode);
4281 		return;
4282 	}
4283 
4284 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4285 		queue_work(hdev->workqueue, &hdev->cmd_work);
4286 }
4287 
4288 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4289 {
4290 	struct hci_cp_le_create_cis *cp;
4291 	bool pending = false;
4292 	int i;
4293 
4294 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4295 
4296 	if (!status)
4297 		return;
4298 
4299 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4300 	if (!cp)
4301 		return;
4302 
4303 	hci_dev_lock(hdev);
4304 
4305 	/* Remove connection if command failed */
4306 	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4307 		struct hci_conn *conn;
4308 		u16 handle;
4309 
4310 		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4311 
4312 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4313 		if (conn) {
4314 			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4315 					       &conn->flags))
4316 				pending = true;
4317 			conn->state = BT_CLOSED;
4318 			hci_connect_cfm(conn, status);
4319 			hci_conn_del(conn);
4320 		}
4321 	}
4322 
4323 	if (pending)
4324 		hci_le_create_cis_pending(hdev);
4325 
4326 	hci_dev_unlock(hdev);
4327 }
4328 
4329 #define HCI_CS(_op, _func) \
4330 { \
4331 	.op = _op, \
4332 	.func = _func, \
4333 }
4334 
4335 static const struct hci_cs {
4336 	u16  op;
4337 	void (*func)(struct hci_dev *hdev, __u8 status);
4338 } hci_cs_table[] = {
4339 	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4340 	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4341 	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4342 	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4343 	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4344 	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4345 	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4346 	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4347 	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4348 	       hci_cs_read_remote_ext_features),
4349 	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4350 	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4351 	       hci_cs_enhanced_setup_sync_conn),
4352 	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4353 	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4354 	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4355 	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4356 	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4357 	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4358 	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4359 	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4360 	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4361 };
4362 
4363 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4364 			       struct sk_buff *skb, u16 *opcode, u8 *status,
4365 			       hci_req_complete_t *req_complete,
4366 			       hci_req_complete_skb_t *req_complete_skb)
4367 {
4368 	struct hci_ev_cmd_status *ev = data;
4369 	int i;
4370 
4371 	*opcode = __le16_to_cpu(ev->opcode);
4372 	*status = ev->status;
4373 
4374 	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4375 
4376 	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4377 		if (hci_cs_table[i].op == *opcode) {
4378 			hci_cs_table[i].func(hdev, ev->status);
4379 			break;
4380 		}
4381 	}
4382 
4383 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4384 
4385 	/* Indicate request completion if the command failed. Also, if
4386 	 * we're not waiting for a special event and we get a success
4387 	 * command status we should try to flag the request as completed
4388 	 * (since for this kind of commands there will not be a command
4389 	 * complete event).
4390 	 */
4391 	if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4392 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4393 				     req_complete_skb);
4394 		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4395 			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4396 				   *opcode);
4397 			return;
4398 		}
4399 	}
4400 
4401 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4402 		queue_work(hdev->workqueue, &hdev->cmd_work);
4403 }
4404 
4405 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4406 				   struct sk_buff *skb)
4407 {
4408 	struct hci_ev_hardware_error *ev = data;
4409 
4410 	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4411 
4412 	hdev->hw_error_code = ev->code;
4413 
4414 	queue_work(hdev->req_workqueue, &hdev->error_reset);
4415 }
4416 
4417 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4418 				struct sk_buff *skb)
4419 {
4420 	struct hci_ev_role_change *ev = data;
4421 	struct hci_conn *conn;
4422 
4423 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4424 
4425 	hci_dev_lock(hdev);
4426 
4427 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4428 	if (conn) {
4429 		if (!ev->status)
4430 			conn->role = ev->role;
4431 
4432 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4433 
4434 		hci_role_switch_cfm(conn, ev->status, ev->role);
4435 	}
4436 
4437 	hci_dev_unlock(hdev);
4438 }
4439 
4440 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4441 				  struct sk_buff *skb)
4442 {
4443 	struct hci_ev_num_comp_pkts *ev = data;
4444 	int i;
4445 
4446 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4447 			     flex_array_size(ev, handles, ev->num)))
4448 		return;
4449 
4450 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4451 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4452 		return;
4453 	}
4454 
4455 	bt_dev_dbg(hdev, "num %d", ev->num);
4456 
4457 	for (i = 0; i < ev->num; i++) {
4458 		struct hci_comp_pkts_info *info = &ev->handles[i];
4459 		struct hci_conn *conn;
4460 		__u16  handle, count;
4461 
4462 		handle = __le16_to_cpu(info->handle);
4463 		count  = __le16_to_cpu(info->count);
4464 
4465 		conn = hci_conn_hash_lookup_handle(hdev, handle);
4466 		if (!conn)
4467 			continue;
4468 
4469 		conn->sent -= count;
4470 
4471 		switch (conn->type) {
4472 		case ACL_LINK:
4473 			hdev->acl_cnt += count;
4474 			if (hdev->acl_cnt > hdev->acl_pkts)
4475 				hdev->acl_cnt = hdev->acl_pkts;
4476 			break;
4477 
4478 		case LE_LINK:
4479 			if (hdev->le_pkts) {
4480 				hdev->le_cnt += count;
4481 				if (hdev->le_cnt > hdev->le_pkts)
4482 					hdev->le_cnt = hdev->le_pkts;
4483 			} else {
4484 				hdev->acl_cnt += count;
4485 				if (hdev->acl_cnt > hdev->acl_pkts)
4486 					hdev->acl_cnt = hdev->acl_pkts;
4487 			}
4488 			break;
4489 
4490 		case SCO_LINK:
4491 			hdev->sco_cnt += count;
4492 			if (hdev->sco_cnt > hdev->sco_pkts)
4493 				hdev->sco_cnt = hdev->sco_pkts;
4494 			break;
4495 
4496 		case ISO_LINK:
4497 			if (hdev->iso_pkts) {
4498 				hdev->iso_cnt += count;
4499 				if (hdev->iso_cnt > hdev->iso_pkts)
4500 					hdev->iso_cnt = hdev->iso_pkts;
4501 			} else if (hdev->le_pkts) {
4502 				hdev->le_cnt += count;
4503 				if (hdev->le_cnt > hdev->le_pkts)
4504 					hdev->le_cnt = hdev->le_pkts;
4505 			} else {
4506 				hdev->acl_cnt += count;
4507 				if (hdev->acl_cnt > hdev->acl_pkts)
4508 					hdev->acl_cnt = hdev->acl_pkts;
4509 			}
4510 			break;
4511 
4512 		default:
4513 			bt_dev_err(hdev, "unknown type %d conn %p",
4514 				   conn->type, conn);
4515 			break;
4516 		}
4517 	}
4518 
4519 	queue_work(hdev->workqueue, &hdev->tx_work);
4520 }
4521 
4522 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4523 						 __u16 handle)
4524 {
4525 	struct hci_chan *chan;
4526 
4527 	switch (hdev->dev_type) {
4528 	case HCI_PRIMARY:
4529 		return hci_conn_hash_lookup_handle(hdev, handle);
4530 	case HCI_AMP:
4531 		chan = hci_chan_lookup_handle(hdev, handle);
4532 		if (chan)
4533 			return chan->conn;
4534 		break;
4535 	default:
4536 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4537 		break;
4538 	}
4539 
4540 	return NULL;
4541 }
4542 
4543 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4544 				    struct sk_buff *skb)
4545 {
4546 	struct hci_ev_num_comp_blocks *ev = data;
4547 	int i;
4548 
4549 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4550 			     flex_array_size(ev, handles, ev->num_hndl)))
4551 		return;
4552 
4553 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4554 		bt_dev_err(hdev, "wrong event for mode %d",
4555 			   hdev->flow_ctl_mode);
4556 		return;
4557 	}
4558 
4559 	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4560 		   ev->num_hndl);
4561 
4562 	for (i = 0; i < ev->num_hndl; i++) {
4563 		struct hci_comp_blocks_info *info = &ev->handles[i];
4564 		struct hci_conn *conn = NULL;
4565 		__u16  handle, block_count;
4566 
4567 		handle = __le16_to_cpu(info->handle);
4568 		block_count = __le16_to_cpu(info->blocks);
4569 
4570 		conn = __hci_conn_lookup_handle(hdev, handle);
4571 		if (!conn)
4572 			continue;
4573 
4574 		conn->sent -= block_count;
4575 
4576 		switch (conn->type) {
4577 		case ACL_LINK:
4578 		case AMP_LINK:
4579 			hdev->block_cnt += block_count;
4580 			if (hdev->block_cnt > hdev->num_blocks)
4581 				hdev->block_cnt = hdev->num_blocks;
4582 			break;
4583 
4584 		default:
4585 			bt_dev_err(hdev, "unknown type %d conn %p",
4586 				   conn->type, conn);
4587 			break;
4588 		}
4589 	}
4590 
4591 	queue_work(hdev->workqueue, &hdev->tx_work);
4592 }
4593 
4594 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4595 				struct sk_buff *skb)
4596 {
4597 	struct hci_ev_mode_change *ev = data;
4598 	struct hci_conn *conn;
4599 
4600 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4601 
4602 	hci_dev_lock(hdev);
4603 
4604 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4605 	if (conn) {
4606 		conn->mode = ev->mode;
4607 
4608 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4609 					&conn->flags)) {
4610 			if (conn->mode == HCI_CM_ACTIVE)
4611 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4612 			else
4613 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4614 		}
4615 
4616 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4617 			hci_sco_setup(conn, ev->status);
4618 	}
4619 
4620 	hci_dev_unlock(hdev);
4621 }
4622 
4623 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4624 				     struct sk_buff *skb)
4625 {
4626 	struct hci_ev_pin_code_req *ev = data;
4627 	struct hci_conn *conn;
4628 
4629 	bt_dev_dbg(hdev, "");
4630 
4631 	hci_dev_lock(hdev);
4632 
4633 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4634 	if (!conn)
4635 		goto unlock;
4636 
4637 	if (conn->state == BT_CONNECTED) {
4638 		hci_conn_hold(conn);
4639 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4640 		hci_conn_drop(conn);
4641 	}
4642 
4643 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4644 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4645 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4646 			     sizeof(ev->bdaddr), &ev->bdaddr);
4647 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4648 		u8 secure;
4649 
4650 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4651 			secure = 1;
4652 		else
4653 			secure = 0;
4654 
4655 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4656 	}
4657 
4658 unlock:
4659 	hci_dev_unlock(hdev);
4660 }
4661 
4662 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4663 {
4664 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4665 		return;
4666 
4667 	conn->pin_length = pin_len;
4668 	conn->key_type = key_type;
4669 
4670 	switch (key_type) {
4671 	case HCI_LK_LOCAL_UNIT:
4672 	case HCI_LK_REMOTE_UNIT:
4673 	case HCI_LK_DEBUG_COMBINATION:
4674 		return;
4675 	case HCI_LK_COMBINATION:
4676 		if (pin_len == 16)
4677 			conn->pending_sec_level = BT_SECURITY_HIGH;
4678 		else
4679 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4680 		break;
4681 	case HCI_LK_UNAUTH_COMBINATION_P192:
4682 	case HCI_LK_UNAUTH_COMBINATION_P256:
4683 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4684 		break;
4685 	case HCI_LK_AUTH_COMBINATION_P192:
4686 		conn->pending_sec_level = BT_SECURITY_HIGH;
4687 		break;
4688 	case HCI_LK_AUTH_COMBINATION_P256:
4689 		conn->pending_sec_level = BT_SECURITY_FIPS;
4690 		break;
4691 	}
4692 }
4693 
4694 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4695 				     struct sk_buff *skb)
4696 {
4697 	struct hci_ev_link_key_req *ev = data;
4698 	struct hci_cp_link_key_reply cp;
4699 	struct hci_conn *conn;
4700 	struct link_key *key;
4701 
4702 	bt_dev_dbg(hdev, "");
4703 
4704 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4705 		return;
4706 
4707 	hci_dev_lock(hdev);
4708 
4709 	key = hci_find_link_key(hdev, &ev->bdaddr);
4710 	if (!key) {
4711 		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4712 		goto not_found;
4713 	}
4714 
4715 	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4716 
4717 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4718 	if (conn) {
4719 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4720 
4721 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4722 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4723 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4724 			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4725 			goto not_found;
4726 		}
4727 
4728 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4729 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4730 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4731 			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4732 			goto not_found;
4733 		}
4734 
4735 		conn_set_key(conn, key->type, key->pin_len);
4736 	}
4737 
4738 	bacpy(&cp.bdaddr, &ev->bdaddr);
4739 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4740 
4741 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4742 
4743 	hci_dev_unlock(hdev);
4744 
4745 	return;
4746 
4747 not_found:
4748 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4749 	hci_dev_unlock(hdev);
4750 }
4751 
4752 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4753 				    struct sk_buff *skb)
4754 {
4755 	struct hci_ev_link_key_notify *ev = data;
4756 	struct hci_conn *conn;
4757 	struct link_key *key;
4758 	bool persistent;
4759 	u8 pin_len = 0;
4760 
4761 	bt_dev_dbg(hdev, "");
4762 
4763 	hci_dev_lock(hdev);
4764 
4765 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4766 	if (!conn)
4767 		goto unlock;
4768 
4769 	/* Ignore NULL link key against CVE-2020-26555 */
4770 	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4771 		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4772 			   &ev->bdaddr);
4773 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4774 		hci_conn_drop(conn);
4775 		goto unlock;
4776 	}
4777 
4778 	hci_conn_hold(conn);
4779 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4780 	hci_conn_drop(conn);
4781 
4782 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4783 	conn_set_key(conn, ev->key_type, conn->pin_length);
4784 
4785 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4786 		goto unlock;
4787 
4788 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4789 			        ev->key_type, pin_len, &persistent);
4790 	if (!key)
4791 		goto unlock;
4792 
4793 	/* Update connection information since adding the key will have
4794 	 * fixed up the type in the case of changed combination keys.
4795 	 */
4796 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4797 		conn_set_key(conn, key->type, key->pin_len);
4798 
4799 	mgmt_new_link_key(hdev, key, persistent);
4800 
4801 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4802 	 * is set. If it's not set simply remove the key from the kernel
4803 	 * list (we've still notified user space about it but with
4804 	 * store_hint being 0).
4805 	 */
4806 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4807 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4808 		list_del_rcu(&key->list);
4809 		kfree_rcu(key, rcu);
4810 		goto unlock;
4811 	}
4812 
4813 	if (persistent)
4814 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4815 	else
4816 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4817 
4818 unlock:
4819 	hci_dev_unlock(hdev);
4820 }
4821 
4822 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4823 				 struct sk_buff *skb)
4824 {
4825 	struct hci_ev_clock_offset *ev = data;
4826 	struct hci_conn *conn;
4827 
4828 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4829 
4830 	hci_dev_lock(hdev);
4831 
4832 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4833 	if (conn && !ev->status) {
4834 		struct inquiry_entry *ie;
4835 
4836 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4837 		if (ie) {
4838 			ie->data.clock_offset = ev->clock_offset;
4839 			ie->timestamp = jiffies;
4840 		}
4841 	}
4842 
4843 	hci_dev_unlock(hdev);
4844 }
4845 
4846 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4847 				    struct sk_buff *skb)
4848 {
4849 	struct hci_ev_pkt_type_change *ev = data;
4850 	struct hci_conn *conn;
4851 
4852 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4853 
4854 	hci_dev_lock(hdev);
4855 
4856 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4857 	if (conn && !ev->status)
4858 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4859 
4860 	hci_dev_unlock(hdev);
4861 }
4862 
4863 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4864 				   struct sk_buff *skb)
4865 {
4866 	struct hci_ev_pscan_rep_mode *ev = data;
4867 	struct inquiry_entry *ie;
4868 
4869 	bt_dev_dbg(hdev, "");
4870 
4871 	hci_dev_lock(hdev);
4872 
4873 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4874 	if (ie) {
4875 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4876 		ie->timestamp = jiffies;
4877 	}
4878 
4879 	hci_dev_unlock(hdev);
4880 }
4881 
4882 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4883 					     struct sk_buff *skb)
4884 {
4885 	struct hci_ev_inquiry_result_rssi *ev = edata;
4886 	struct inquiry_data data;
4887 	int i;
4888 
4889 	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4890 
4891 	if (!ev->num)
4892 		return;
4893 
4894 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4895 		return;
4896 
4897 	hci_dev_lock(hdev);
4898 
4899 	if (skb->len == array_size(ev->num,
4900 				   sizeof(struct inquiry_info_rssi_pscan))) {
4901 		struct inquiry_info_rssi_pscan *info;
4902 
4903 		for (i = 0; i < ev->num; i++) {
4904 			u32 flags;
4905 
4906 			info = hci_ev_skb_pull(hdev, skb,
4907 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4908 					       sizeof(*info));
4909 			if (!info) {
4910 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4911 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4912 				goto unlock;
4913 			}
4914 
4915 			bacpy(&data.bdaddr, &info->bdaddr);
4916 			data.pscan_rep_mode	= info->pscan_rep_mode;
4917 			data.pscan_period_mode	= info->pscan_period_mode;
4918 			data.pscan_mode		= info->pscan_mode;
4919 			memcpy(data.dev_class, info->dev_class, 3);
4920 			data.clock_offset	= info->clock_offset;
4921 			data.rssi		= info->rssi;
4922 			data.ssp_mode		= 0x00;
4923 
4924 			flags = hci_inquiry_cache_update(hdev, &data, false);
4925 
4926 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4927 					  info->dev_class, info->rssi,
4928 					  flags, NULL, 0, NULL, 0, 0);
4929 		}
4930 	} else if (skb->len == array_size(ev->num,
4931 					  sizeof(struct inquiry_info_rssi))) {
4932 		struct inquiry_info_rssi *info;
4933 
4934 		for (i = 0; i < ev->num; i++) {
4935 			u32 flags;
4936 
4937 			info = hci_ev_skb_pull(hdev, skb,
4938 					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4939 					       sizeof(*info));
4940 			if (!info) {
4941 				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4942 					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4943 				goto unlock;
4944 			}
4945 
4946 			bacpy(&data.bdaddr, &info->bdaddr);
4947 			data.pscan_rep_mode	= info->pscan_rep_mode;
4948 			data.pscan_period_mode	= info->pscan_period_mode;
4949 			data.pscan_mode		= 0x00;
4950 			memcpy(data.dev_class, info->dev_class, 3);
4951 			data.clock_offset	= info->clock_offset;
4952 			data.rssi		= info->rssi;
4953 			data.ssp_mode		= 0x00;
4954 
4955 			flags = hci_inquiry_cache_update(hdev, &data, false);
4956 
4957 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4958 					  info->dev_class, info->rssi,
4959 					  flags, NULL, 0, NULL, 0, 0);
4960 		}
4961 	} else {
4962 		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4963 			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4964 	}
4965 unlock:
4966 	hci_dev_unlock(hdev);
4967 }
4968 
4969 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4970 					struct sk_buff *skb)
4971 {
4972 	struct hci_ev_remote_ext_features *ev = data;
4973 	struct hci_conn *conn;
4974 
4975 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4976 
4977 	hci_dev_lock(hdev);
4978 
4979 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4980 	if (!conn)
4981 		goto unlock;
4982 
4983 	if (ev->page < HCI_MAX_PAGES)
4984 		memcpy(conn->features[ev->page], ev->features, 8);
4985 
4986 	if (!ev->status && ev->page == 0x01) {
4987 		struct inquiry_entry *ie;
4988 
4989 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4990 		if (ie)
4991 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4992 
4993 		if (ev->features[0] & LMP_HOST_SSP) {
4994 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4995 		} else {
4996 			/* It is mandatory by the Bluetooth specification that
4997 			 * Extended Inquiry Results are only used when Secure
4998 			 * Simple Pairing is enabled, but some devices violate
4999 			 * this.
5000 			 *
5001 			 * To make these devices work, the internal SSP
5002 			 * enabled flag needs to be cleared if the remote host
5003 			 * features do not indicate SSP support */
5004 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5005 		}
5006 
5007 		if (ev->features[0] & LMP_HOST_SC)
5008 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5009 	}
5010 
5011 	if (conn->state != BT_CONFIG)
5012 		goto unlock;
5013 
5014 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5015 		struct hci_cp_remote_name_req cp;
5016 		memset(&cp, 0, sizeof(cp));
5017 		bacpy(&cp.bdaddr, &conn->dst);
5018 		cp.pscan_rep_mode = 0x02;
5019 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5020 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5021 		mgmt_device_connected(hdev, conn, NULL, 0);
5022 
5023 	if (!hci_outgoing_auth_needed(hdev, conn)) {
5024 		conn->state = BT_CONNECTED;
5025 		hci_connect_cfm(conn, ev->status);
5026 		hci_conn_drop(conn);
5027 	}
5028 
5029 unlock:
5030 	hci_dev_unlock(hdev);
5031 }
5032 
5033 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5034 				       struct sk_buff *skb)
5035 {
5036 	struct hci_ev_sync_conn_complete *ev = data;
5037 	struct hci_conn *conn;
5038 	u8 status = ev->status;
5039 
5040 	switch (ev->link_type) {
5041 	case SCO_LINK:
5042 	case ESCO_LINK:
5043 		break;
5044 	default:
5045 		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5046 		 * for HCI_Synchronous_Connection_Complete is limited to
5047 		 * either SCO or eSCO
5048 		 */
5049 		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5050 		return;
5051 	}
5052 
5053 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
5054 
5055 	hci_dev_lock(hdev);
5056 
5057 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5058 	if (!conn) {
5059 		if (ev->link_type == ESCO_LINK)
5060 			goto unlock;
5061 
5062 		/* When the link type in the event indicates SCO connection
5063 		 * and lookup of the connection object fails, then check
5064 		 * if an eSCO connection object exists.
5065 		 *
5066 		 * The core limits the synchronous connections to either
5067 		 * SCO or eSCO. The eSCO connection is preferred and tried
5068 		 * to be setup first and until successfully established,
5069 		 * the link type will be hinted as eSCO.
5070 		 */
5071 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5072 		if (!conn)
5073 			goto unlock;
5074 	}
5075 
5076 	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5077 	 * Processing it more than once per connection can corrupt kernel memory.
5078 	 *
5079 	 * As the connection handle is set here for the first time, it indicates
5080 	 * whether the connection is already set up.
5081 	 */
5082 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5083 		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5084 		goto unlock;
5085 	}
5086 
5087 	switch (status) {
5088 	case 0x00:
5089 		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5090 		if (status) {
5091 			conn->state = BT_CLOSED;
5092 			break;
5093 		}
5094 
5095 		conn->state  = BT_CONNECTED;
5096 		conn->type   = ev->link_type;
5097 
5098 		hci_debugfs_create_conn(conn);
5099 		hci_conn_add_sysfs(conn);
5100 		break;
5101 
5102 	case 0x10:	/* Connection Accept Timeout */
5103 	case 0x0d:	/* Connection Rejected due to Limited Resources */
5104 	case 0x11:	/* Unsupported Feature or Parameter Value */
5105 	case 0x1c:	/* SCO interval rejected */
5106 	case 0x1a:	/* Unsupported Remote Feature */
5107 	case 0x1e:	/* Invalid LMP Parameters */
5108 	case 0x1f:	/* Unspecified error */
5109 	case 0x20:	/* Unsupported LMP Parameter value */
5110 		if (conn->out) {
5111 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5112 					(hdev->esco_type & EDR_ESCO_MASK);
5113 			if (hci_setup_sync(conn, conn->parent->handle))
5114 				goto unlock;
5115 		}
5116 		fallthrough;
5117 
5118 	default:
5119 		conn->state = BT_CLOSED;
5120 		break;
5121 	}
5122 
5123 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5124 	/* Notify only in case of SCO over HCI transport data path which
5125 	 * is zero and non-zero value shall be non-HCI transport data path
5126 	 */
5127 	if (conn->codec.data_path == 0 && hdev->notify) {
5128 		switch (ev->air_mode) {
5129 		case 0x02:
5130 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5131 			break;
5132 		case 0x03:
5133 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5134 			break;
5135 		}
5136 	}
5137 
5138 	hci_connect_cfm(conn, status);
5139 	if (status)
5140 		hci_conn_del(conn);
5141 
5142 unlock:
5143 	hci_dev_unlock(hdev);
5144 }
5145 
5146 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5147 {
5148 	size_t parsed = 0;
5149 
5150 	while (parsed < eir_len) {
5151 		u8 field_len = eir[0];
5152 
5153 		if (field_len == 0)
5154 			return parsed;
5155 
5156 		parsed += field_len + 1;
5157 		eir += field_len + 1;
5158 	}
5159 
5160 	return eir_len;
5161 }
5162 
5163 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5164 					    struct sk_buff *skb)
5165 {
5166 	struct hci_ev_ext_inquiry_result *ev = edata;
5167 	struct inquiry_data data;
5168 	size_t eir_len;
5169 	int i;
5170 
5171 	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5172 			     flex_array_size(ev, info, ev->num)))
5173 		return;
5174 
5175 	bt_dev_dbg(hdev, "num %d", ev->num);
5176 
5177 	if (!ev->num)
5178 		return;
5179 
5180 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5181 		return;
5182 
5183 	hci_dev_lock(hdev);
5184 
5185 	for (i = 0; i < ev->num; i++) {
5186 		struct extended_inquiry_info *info = &ev->info[i];
5187 		u32 flags;
5188 		bool name_known;
5189 
5190 		bacpy(&data.bdaddr, &info->bdaddr);
5191 		data.pscan_rep_mode	= info->pscan_rep_mode;
5192 		data.pscan_period_mode	= info->pscan_period_mode;
5193 		data.pscan_mode		= 0x00;
5194 		memcpy(data.dev_class, info->dev_class, 3);
5195 		data.clock_offset	= info->clock_offset;
5196 		data.rssi		= info->rssi;
5197 		data.ssp_mode		= 0x01;
5198 
5199 		if (hci_dev_test_flag(hdev, HCI_MGMT))
5200 			name_known = eir_get_data(info->data,
5201 						  sizeof(info->data),
5202 						  EIR_NAME_COMPLETE, NULL);
5203 		else
5204 			name_known = true;
5205 
5206 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5207 
5208 		eir_len = eir_get_length(info->data, sizeof(info->data));
5209 
5210 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5211 				  info->dev_class, info->rssi,
5212 				  flags, info->data, eir_len, NULL, 0, 0);
5213 	}
5214 
5215 	hci_dev_unlock(hdev);
5216 }
5217 
5218 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5219 					 struct sk_buff *skb)
5220 {
5221 	struct hci_ev_key_refresh_complete *ev = data;
5222 	struct hci_conn *conn;
5223 
5224 	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5225 		   __le16_to_cpu(ev->handle));
5226 
5227 	hci_dev_lock(hdev);
5228 
5229 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5230 	if (!conn)
5231 		goto unlock;
5232 
5233 	/* For BR/EDR the necessary steps are taken through the
5234 	 * auth_complete event.
5235 	 */
5236 	if (conn->type != LE_LINK)
5237 		goto unlock;
5238 
5239 	if (!ev->status)
5240 		conn->sec_level = conn->pending_sec_level;
5241 
5242 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5243 
5244 	if (ev->status && conn->state == BT_CONNECTED) {
5245 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5246 		hci_conn_drop(conn);
5247 		goto unlock;
5248 	}
5249 
5250 	if (conn->state == BT_CONFIG) {
5251 		if (!ev->status)
5252 			conn->state = BT_CONNECTED;
5253 
5254 		hci_connect_cfm(conn, ev->status);
5255 		hci_conn_drop(conn);
5256 	} else {
5257 		hci_auth_cfm(conn, ev->status);
5258 
5259 		hci_conn_hold(conn);
5260 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5261 		hci_conn_drop(conn);
5262 	}
5263 
5264 unlock:
5265 	hci_dev_unlock(hdev);
5266 }
5267 
5268 static u8 hci_get_auth_req(struct hci_conn *conn)
5269 {
5270 	/* If remote requests no-bonding follow that lead */
5271 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5272 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5273 		return conn->remote_auth | (conn->auth_type & 0x01);
5274 
5275 	/* If both remote and local have enough IO capabilities, require
5276 	 * MITM protection
5277 	 */
5278 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5279 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5280 		return conn->remote_auth | 0x01;
5281 
5282 	/* No MITM protection possible so ignore remote requirement */
5283 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5284 }
5285 
5286 static u8 bredr_oob_data_present(struct hci_conn *conn)
5287 {
5288 	struct hci_dev *hdev = conn->hdev;
5289 	struct oob_data *data;
5290 
5291 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5292 	if (!data)
5293 		return 0x00;
5294 
5295 	if (bredr_sc_enabled(hdev)) {
5296 		/* When Secure Connections is enabled, then just
5297 		 * return the present value stored with the OOB
5298 		 * data. The stored value contains the right present
5299 		 * information. However it can only be trusted when
5300 		 * not in Secure Connection Only mode.
5301 		 */
5302 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5303 			return data->present;
5304 
5305 		/* When Secure Connections Only mode is enabled, then
5306 		 * the P-256 values are required. If they are not
5307 		 * available, then do not declare that OOB data is
5308 		 * present.
5309 		 */
5310 		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5311 		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5312 			return 0x00;
5313 
5314 		return 0x02;
5315 	}
5316 
5317 	/* When Secure Connections is not enabled or actually
5318 	 * not supported by the hardware, then check that if
5319 	 * P-192 data values are present.
5320 	 */
5321 	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5322 	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5323 		return 0x00;
5324 
5325 	return 0x01;
5326 }
5327 
5328 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5329 				    struct sk_buff *skb)
5330 {
5331 	struct hci_ev_io_capa_request *ev = data;
5332 	struct hci_conn *conn;
5333 
5334 	bt_dev_dbg(hdev, "");
5335 
5336 	hci_dev_lock(hdev);
5337 
5338 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5339 	if (!conn || !hci_conn_ssp_enabled(conn))
5340 		goto unlock;
5341 
5342 	hci_conn_hold(conn);
5343 
5344 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5345 		goto unlock;
5346 
5347 	/* Allow pairing if we're pairable, the initiators of the
5348 	 * pairing or if the remote is not requesting bonding.
5349 	 */
5350 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5351 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5352 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5353 		struct hci_cp_io_capability_reply cp;
5354 
5355 		bacpy(&cp.bdaddr, &ev->bdaddr);
5356 		/* Change the IO capability from KeyboardDisplay
5357 		 * to DisplayYesNo as it is not supported by BT spec. */
5358 		cp.capability = (conn->io_capability == 0x04) ?
5359 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5360 
5361 		/* If we are initiators, there is no remote information yet */
5362 		if (conn->remote_auth == 0xff) {
5363 			/* Request MITM protection if our IO caps allow it
5364 			 * except for the no-bonding case.
5365 			 */
5366 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5367 			    conn->auth_type != HCI_AT_NO_BONDING)
5368 				conn->auth_type |= 0x01;
5369 		} else {
5370 			conn->auth_type = hci_get_auth_req(conn);
5371 		}
5372 
5373 		/* If we're not bondable, force one of the non-bondable
5374 		 * authentication requirement values.
5375 		 */
5376 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5377 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5378 
5379 		cp.authentication = conn->auth_type;
5380 		cp.oob_data = bredr_oob_data_present(conn);
5381 
5382 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5383 			     sizeof(cp), &cp);
5384 	} else {
5385 		struct hci_cp_io_capability_neg_reply cp;
5386 
5387 		bacpy(&cp.bdaddr, &ev->bdaddr);
5388 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5389 
5390 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5391 			     sizeof(cp), &cp);
5392 	}
5393 
5394 unlock:
5395 	hci_dev_unlock(hdev);
5396 }
5397 
5398 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5399 				  struct sk_buff *skb)
5400 {
5401 	struct hci_ev_io_capa_reply *ev = data;
5402 	struct hci_conn *conn;
5403 
5404 	bt_dev_dbg(hdev, "");
5405 
5406 	hci_dev_lock(hdev);
5407 
5408 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5409 	if (!conn)
5410 		goto unlock;
5411 
5412 	conn->remote_cap = ev->capability;
5413 	conn->remote_auth = ev->authentication;
5414 
5415 unlock:
5416 	hci_dev_unlock(hdev);
5417 }
5418 
5419 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5420 					 struct sk_buff *skb)
5421 {
5422 	struct hci_ev_user_confirm_req *ev = data;
5423 	int loc_mitm, rem_mitm, confirm_hint = 0;
5424 	struct hci_conn *conn;
5425 
5426 	bt_dev_dbg(hdev, "");
5427 
5428 	hci_dev_lock(hdev);
5429 
5430 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5431 		goto unlock;
5432 
5433 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5434 	if (!conn)
5435 		goto unlock;
5436 
5437 	loc_mitm = (conn->auth_type & 0x01);
5438 	rem_mitm = (conn->remote_auth & 0x01);
5439 
5440 	/* If we require MITM but the remote device can't provide that
5441 	 * (it has NoInputNoOutput) then reject the confirmation
5442 	 * request. We check the security level here since it doesn't
5443 	 * necessarily match conn->auth_type.
5444 	 */
5445 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5446 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5447 		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5448 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5449 			     sizeof(ev->bdaddr), &ev->bdaddr);
5450 		goto unlock;
5451 	}
5452 
5453 	/* If no side requires MITM protection; auto-accept */
5454 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5455 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5456 
5457 		/* If we're not the initiators request authorization to
5458 		 * proceed from user space (mgmt_user_confirm with
5459 		 * confirm_hint set to 1). The exception is if neither
5460 		 * side had MITM or if the local IO capability is
5461 		 * NoInputNoOutput, in which case we do auto-accept
5462 		 */
5463 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5464 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5465 		    (loc_mitm || rem_mitm)) {
5466 			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5467 			confirm_hint = 1;
5468 			goto confirm;
5469 		}
5470 
5471 		/* If there already exists link key in local host, leave the
5472 		 * decision to user space since the remote device could be
5473 		 * legitimate or malicious.
5474 		 */
5475 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5476 			bt_dev_dbg(hdev, "Local host already has link key");
5477 			confirm_hint = 1;
5478 			goto confirm;
5479 		}
5480 
5481 		BT_DBG("Auto-accept of user confirmation with %ums delay",
5482 		       hdev->auto_accept_delay);
5483 
5484 		if (hdev->auto_accept_delay > 0) {
5485 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5486 			queue_delayed_work(conn->hdev->workqueue,
5487 					   &conn->auto_accept_work, delay);
5488 			goto unlock;
5489 		}
5490 
5491 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5492 			     sizeof(ev->bdaddr), &ev->bdaddr);
5493 		goto unlock;
5494 	}
5495 
5496 confirm:
5497 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5498 				  le32_to_cpu(ev->passkey), confirm_hint);
5499 
5500 unlock:
5501 	hci_dev_unlock(hdev);
5502 }
5503 
5504 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5505 					 struct sk_buff *skb)
5506 {
5507 	struct hci_ev_user_passkey_req *ev = data;
5508 
5509 	bt_dev_dbg(hdev, "");
5510 
5511 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5512 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5513 }
5514 
5515 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5516 					struct sk_buff *skb)
5517 {
5518 	struct hci_ev_user_passkey_notify *ev = data;
5519 	struct hci_conn *conn;
5520 
5521 	bt_dev_dbg(hdev, "");
5522 
5523 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5524 	if (!conn)
5525 		return;
5526 
5527 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5528 	conn->passkey_entered = 0;
5529 
5530 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5531 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5532 					 conn->dst_type, conn->passkey_notify,
5533 					 conn->passkey_entered);
5534 }
5535 
5536 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5537 				    struct sk_buff *skb)
5538 {
5539 	struct hci_ev_keypress_notify *ev = data;
5540 	struct hci_conn *conn;
5541 
5542 	bt_dev_dbg(hdev, "");
5543 
5544 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5545 	if (!conn)
5546 		return;
5547 
5548 	switch (ev->type) {
5549 	case HCI_KEYPRESS_STARTED:
5550 		conn->passkey_entered = 0;
5551 		return;
5552 
5553 	case HCI_KEYPRESS_ENTERED:
5554 		conn->passkey_entered++;
5555 		break;
5556 
5557 	case HCI_KEYPRESS_ERASED:
5558 		conn->passkey_entered--;
5559 		break;
5560 
5561 	case HCI_KEYPRESS_CLEARED:
5562 		conn->passkey_entered = 0;
5563 		break;
5564 
5565 	case HCI_KEYPRESS_COMPLETED:
5566 		return;
5567 	}
5568 
5569 	if (hci_dev_test_flag(hdev, HCI_MGMT))
5570 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5571 					 conn->dst_type, conn->passkey_notify,
5572 					 conn->passkey_entered);
5573 }
5574 
5575 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5576 					 struct sk_buff *skb)
5577 {
5578 	struct hci_ev_simple_pair_complete *ev = data;
5579 	struct hci_conn *conn;
5580 
5581 	bt_dev_dbg(hdev, "");
5582 
5583 	hci_dev_lock(hdev);
5584 
5585 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5586 	if (!conn || !hci_conn_ssp_enabled(conn))
5587 		goto unlock;
5588 
5589 	/* Reset the authentication requirement to unknown */
5590 	conn->remote_auth = 0xff;
5591 
5592 	/* To avoid duplicate auth_failed events to user space we check
5593 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5594 	 * initiated the authentication. A traditional auth_complete
5595 	 * event gets always produced as initiator and is also mapped to
5596 	 * the mgmt_auth_failed event */
5597 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5598 		mgmt_auth_failed(conn, ev->status);
5599 
5600 	hci_conn_drop(conn);
5601 
5602 unlock:
5603 	hci_dev_unlock(hdev);
5604 }
5605 
5606 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5607 					 struct sk_buff *skb)
5608 {
5609 	struct hci_ev_remote_host_features *ev = data;
5610 	struct inquiry_entry *ie;
5611 	struct hci_conn *conn;
5612 
5613 	bt_dev_dbg(hdev, "");
5614 
5615 	hci_dev_lock(hdev);
5616 
5617 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5618 	if (conn)
5619 		memcpy(conn->features[1], ev->features, 8);
5620 
5621 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5622 	if (ie)
5623 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5624 
5625 	hci_dev_unlock(hdev);
5626 }
5627 
5628 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5629 					    struct sk_buff *skb)
5630 {
5631 	struct hci_ev_remote_oob_data_request *ev = edata;
5632 	struct oob_data *data;
5633 
5634 	bt_dev_dbg(hdev, "");
5635 
5636 	hci_dev_lock(hdev);
5637 
5638 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5639 		goto unlock;
5640 
5641 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5642 	if (!data) {
5643 		struct hci_cp_remote_oob_data_neg_reply cp;
5644 
5645 		bacpy(&cp.bdaddr, &ev->bdaddr);
5646 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5647 			     sizeof(cp), &cp);
5648 		goto unlock;
5649 	}
5650 
5651 	if (bredr_sc_enabled(hdev)) {
5652 		struct hci_cp_remote_oob_ext_data_reply cp;
5653 
5654 		bacpy(&cp.bdaddr, &ev->bdaddr);
5655 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5656 			memset(cp.hash192, 0, sizeof(cp.hash192));
5657 			memset(cp.rand192, 0, sizeof(cp.rand192));
5658 		} else {
5659 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5660 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5661 		}
5662 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5663 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5664 
5665 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5666 			     sizeof(cp), &cp);
5667 	} else {
5668 		struct hci_cp_remote_oob_data_reply cp;
5669 
5670 		bacpy(&cp.bdaddr, &ev->bdaddr);
5671 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5672 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5673 
5674 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5675 			     sizeof(cp), &cp);
5676 	}
5677 
5678 unlock:
5679 	hci_dev_unlock(hdev);
5680 }
5681 
5682 #if IS_ENABLED(CONFIG_BT_HS)
5683 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5684 				  struct sk_buff *skb)
5685 {
5686 	struct hci_ev_channel_selected *ev = data;
5687 	struct hci_conn *hcon;
5688 
5689 	bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5690 
5691 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5692 	if (!hcon)
5693 		return;
5694 
5695 	amp_read_loc_assoc_final_data(hdev, hcon);
5696 }
5697 
5698 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5699 				      struct sk_buff *skb)
5700 {
5701 	struct hci_ev_phy_link_complete *ev = data;
5702 	struct hci_conn *hcon, *bredr_hcon;
5703 
5704 	bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5705 		   ev->status);
5706 
5707 	hci_dev_lock(hdev);
5708 
5709 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5710 	if (!hcon)
5711 		goto unlock;
5712 
5713 	if (!hcon->amp_mgr)
5714 		goto unlock;
5715 
5716 	if (ev->status) {
5717 		hci_conn_del(hcon);
5718 		goto unlock;
5719 	}
5720 
5721 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5722 
5723 	hcon->state = BT_CONNECTED;
5724 	bacpy(&hcon->dst, &bredr_hcon->dst);
5725 
5726 	hci_conn_hold(hcon);
5727 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5728 	hci_conn_drop(hcon);
5729 
5730 	hci_debugfs_create_conn(hcon);
5731 	hci_conn_add_sysfs(hcon);
5732 
5733 	amp_physical_cfm(bredr_hcon, hcon);
5734 
5735 unlock:
5736 	hci_dev_unlock(hdev);
5737 }
5738 
5739 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5740 				     struct sk_buff *skb)
5741 {
5742 	struct hci_ev_logical_link_complete *ev = data;
5743 	struct hci_conn *hcon;
5744 	struct hci_chan *hchan;
5745 	struct amp_mgr *mgr;
5746 
5747 	bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5748 		   le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5749 
5750 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5751 	if (!hcon)
5752 		return;
5753 
5754 	/* Create AMP hchan */
5755 	hchan = hci_chan_create(hcon);
5756 	if (!hchan)
5757 		return;
5758 
5759 	hchan->handle = le16_to_cpu(ev->handle);
5760 	hchan->amp = true;
5761 
5762 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5763 
5764 	mgr = hcon->amp_mgr;
5765 	if (mgr && mgr->bredr_chan) {
5766 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5767 
5768 		l2cap_chan_lock(bredr_chan);
5769 
5770 		bredr_chan->conn->mtu = hdev->block_mtu;
5771 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5772 		hci_conn_hold(hcon);
5773 
5774 		l2cap_chan_unlock(bredr_chan);
5775 	}
5776 }
5777 
5778 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5779 					     struct sk_buff *skb)
5780 {
5781 	struct hci_ev_disconn_logical_link_complete *ev = data;
5782 	struct hci_chan *hchan;
5783 
5784 	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5785 		   le16_to_cpu(ev->handle), ev->status);
5786 
5787 	if (ev->status)
5788 		return;
5789 
5790 	hci_dev_lock(hdev);
5791 
5792 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5793 	if (!hchan || !hchan->amp)
5794 		goto unlock;
5795 
5796 	amp_destroy_logical_link(hchan, ev->reason);
5797 
5798 unlock:
5799 	hci_dev_unlock(hdev);
5800 }
5801 
5802 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5803 					     struct sk_buff *skb)
5804 {
5805 	struct hci_ev_disconn_phy_link_complete *ev = data;
5806 	struct hci_conn *hcon;
5807 
5808 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5809 
5810 	if (ev->status)
5811 		return;
5812 
5813 	hci_dev_lock(hdev);
5814 
5815 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5816 	if (hcon && hcon->type == AMP_LINK) {
5817 		hcon->state = BT_CLOSED;
5818 		hci_disconn_cfm(hcon, ev->reason);
5819 		hci_conn_del(hcon);
5820 	}
5821 
5822 	hci_dev_unlock(hdev);
5823 }
5824 #endif
5825 
5826 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5827 				u8 bdaddr_type, bdaddr_t *local_rpa)
5828 {
5829 	if (conn->out) {
5830 		conn->dst_type = bdaddr_type;
5831 		conn->resp_addr_type = bdaddr_type;
5832 		bacpy(&conn->resp_addr, bdaddr);
5833 
5834 		/* Check if the controller has set a Local RPA then it must be
5835 		 * used instead or hdev->rpa.
5836 		 */
5837 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5838 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5839 			bacpy(&conn->init_addr, local_rpa);
5840 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5841 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5842 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5843 		} else {
5844 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5845 						  &conn->init_addr_type);
5846 		}
5847 	} else {
5848 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5849 		/* Check if the controller has set a Local RPA then it must be
5850 		 * used instead or hdev->rpa.
5851 		 */
5852 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5853 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5854 			bacpy(&conn->resp_addr, local_rpa);
5855 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5856 			/* In case of ext adv, resp_addr will be updated in
5857 			 * Adv Terminated event.
5858 			 */
5859 			if (!ext_adv_capable(conn->hdev))
5860 				bacpy(&conn->resp_addr,
5861 				      &conn->hdev->random_addr);
5862 		} else {
5863 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5864 		}
5865 
5866 		conn->init_addr_type = bdaddr_type;
5867 		bacpy(&conn->init_addr, bdaddr);
5868 
5869 		/* For incoming connections, set the default minimum
5870 		 * and maximum connection interval. They will be used
5871 		 * to check if the parameters are in range and if not
5872 		 * trigger the connection update procedure.
5873 		 */
5874 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5875 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5876 	}
5877 }
5878 
5879 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5880 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5881 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5882 				 u16 interval, u16 latency,
5883 				 u16 supervision_timeout)
5884 {
5885 	struct hci_conn_params *params;
5886 	struct hci_conn *conn;
5887 	struct smp_irk *irk;
5888 	u8 addr_type;
5889 
5890 	hci_dev_lock(hdev);
5891 
5892 	/* All controllers implicitly stop advertising in the event of a
5893 	 * connection, so ensure that the state bit is cleared.
5894 	 */
5895 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5896 
5897 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5898 	if (!conn) {
5899 		/* In case of error status and there is no connection pending
5900 		 * just unlock as there is nothing to cleanup.
5901 		 */
5902 		if (status)
5903 			goto unlock;
5904 
5905 		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5906 		if (!conn) {
5907 			bt_dev_err(hdev, "no memory for new connection");
5908 			goto unlock;
5909 		}
5910 
5911 		conn->dst_type = bdaddr_type;
5912 
5913 		/* If we didn't have a hci_conn object previously
5914 		 * but we're in central role this must be something
5915 		 * initiated using an accept list. Since accept list based
5916 		 * connections are not "first class citizens" we don't
5917 		 * have full tracking of them. Therefore, we go ahead
5918 		 * with a "best effort" approach of determining the
5919 		 * initiator address based on the HCI_PRIVACY flag.
5920 		 */
5921 		if (conn->out) {
5922 			conn->resp_addr_type = bdaddr_type;
5923 			bacpy(&conn->resp_addr, bdaddr);
5924 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5925 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5926 				bacpy(&conn->init_addr, &hdev->rpa);
5927 			} else {
5928 				hci_copy_identity_address(hdev,
5929 							  &conn->init_addr,
5930 							  &conn->init_addr_type);
5931 			}
5932 		}
5933 	} else {
5934 		cancel_delayed_work(&conn->le_conn_timeout);
5935 	}
5936 
5937 	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5938 	 * Processing it more than once per connection can corrupt kernel memory.
5939 	 *
5940 	 * As the connection handle is set here for the first time, it indicates
5941 	 * whether the connection is already set up.
5942 	 */
5943 	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5944 		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5945 		goto unlock;
5946 	}
5947 
5948 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5949 
5950 	/* Lookup the identity address from the stored connection
5951 	 * address and address type.
5952 	 *
5953 	 * When establishing connections to an identity address, the
5954 	 * connection procedure will store the resolvable random
5955 	 * address first. Now if it can be converted back into the
5956 	 * identity address, start using the identity address from
5957 	 * now on.
5958 	 */
5959 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5960 	if (irk) {
5961 		bacpy(&conn->dst, &irk->bdaddr);
5962 		conn->dst_type = irk->addr_type;
5963 	}
5964 
5965 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5966 
5967 	/* All connection failure handling is taken care of by the
5968 	 * hci_conn_failed function which is triggered by the HCI
5969 	 * request completion callbacks used for connecting.
5970 	 */
5971 	if (status || hci_conn_set_handle(conn, handle))
5972 		goto unlock;
5973 
5974 	/* Drop the connection if it has been aborted */
5975 	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5976 		hci_conn_drop(conn);
5977 		goto unlock;
5978 	}
5979 
5980 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5981 		addr_type = BDADDR_LE_PUBLIC;
5982 	else
5983 		addr_type = BDADDR_LE_RANDOM;
5984 
5985 	/* Drop the connection if the device is blocked */
5986 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5987 		hci_conn_drop(conn);
5988 		goto unlock;
5989 	}
5990 
5991 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5992 		mgmt_device_connected(hdev, conn, NULL, 0);
5993 
5994 	conn->sec_level = BT_SECURITY_LOW;
5995 	conn->state = BT_CONFIG;
5996 
5997 	/* Store current advertising instance as connection advertising instance
5998 	 * when sotfware rotation is in use so it can be re-enabled when
5999 	 * disconnected.
6000 	 */
6001 	if (!ext_adv_capable(hdev))
6002 		conn->adv_instance = hdev->cur_adv_instance;
6003 
6004 	conn->le_conn_interval = interval;
6005 	conn->le_conn_latency = latency;
6006 	conn->le_supv_timeout = supervision_timeout;
6007 
6008 	hci_debugfs_create_conn(conn);
6009 	hci_conn_add_sysfs(conn);
6010 
6011 	/* The remote features procedure is defined for central
6012 	 * role only. So only in case of an initiated connection
6013 	 * request the remote features.
6014 	 *
6015 	 * If the local controller supports peripheral-initiated features
6016 	 * exchange, then requesting the remote features in peripheral
6017 	 * role is possible. Otherwise just transition into the
6018 	 * connected state without requesting the remote features.
6019 	 */
6020 	if (conn->out ||
6021 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6022 		struct hci_cp_le_read_remote_features cp;
6023 
6024 		cp.handle = __cpu_to_le16(conn->handle);
6025 
6026 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6027 			     sizeof(cp), &cp);
6028 
6029 		hci_conn_hold(conn);
6030 	} else {
6031 		conn->state = BT_CONNECTED;
6032 		hci_connect_cfm(conn, status);
6033 	}
6034 
6035 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6036 					   conn->dst_type);
6037 	if (params) {
6038 		hci_pend_le_list_del_init(params);
6039 		if (params->conn) {
6040 			hci_conn_drop(params->conn);
6041 			hci_conn_put(params->conn);
6042 			params->conn = NULL;
6043 		}
6044 	}
6045 
6046 unlock:
6047 	hci_update_passive_scan(hdev);
6048 	hci_dev_unlock(hdev);
6049 }
6050 
6051 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6052 				     struct sk_buff *skb)
6053 {
6054 	struct hci_ev_le_conn_complete *ev = data;
6055 
6056 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6057 
6058 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6059 			     NULL, ev->role, le16_to_cpu(ev->handle),
6060 			     le16_to_cpu(ev->interval),
6061 			     le16_to_cpu(ev->latency),
6062 			     le16_to_cpu(ev->supervision_timeout));
6063 }
6064 
6065 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6066 					 struct sk_buff *skb)
6067 {
6068 	struct hci_ev_le_enh_conn_complete *ev = data;
6069 
6070 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6071 
6072 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6073 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6074 			     le16_to_cpu(ev->interval),
6075 			     le16_to_cpu(ev->latency),
6076 			     le16_to_cpu(ev->supervision_timeout));
6077 }
6078 
6079 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6080 				    struct sk_buff *skb)
6081 {
6082 	struct hci_evt_le_ext_adv_set_term *ev = data;
6083 	struct hci_conn *conn;
6084 	struct adv_info *adv, *n;
6085 
6086 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6087 
6088 	/* The Bluetooth Core 5.3 specification clearly states that this event
6089 	 * shall not be sent when the Host disables the advertising set. So in
6090 	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6091 	 *
6092 	 * When the Host disables an advertising set, all cleanup is done via
6093 	 * its command callback and not needed to be duplicated here.
6094 	 */
6095 	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6096 		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6097 		return;
6098 	}
6099 
6100 	hci_dev_lock(hdev);
6101 
6102 	adv = hci_find_adv_instance(hdev, ev->handle);
6103 
6104 	if (ev->status) {
6105 		if (!adv)
6106 			goto unlock;
6107 
6108 		/* Remove advertising as it has been terminated */
6109 		hci_remove_adv_instance(hdev, ev->handle);
6110 		mgmt_advertising_removed(NULL, hdev, ev->handle);
6111 
6112 		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6113 			if (adv->enabled)
6114 				goto unlock;
6115 		}
6116 
6117 		/* We are no longer advertising, clear HCI_LE_ADV */
6118 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
6119 		goto unlock;
6120 	}
6121 
6122 	if (adv)
6123 		adv->enabled = false;
6124 
6125 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6126 	if (conn) {
6127 		/* Store handle in the connection so the correct advertising
6128 		 * instance can be re-enabled when disconnected.
6129 		 */
6130 		conn->adv_instance = ev->handle;
6131 
6132 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6133 		    bacmp(&conn->resp_addr, BDADDR_ANY))
6134 			goto unlock;
6135 
6136 		if (!ev->handle) {
6137 			bacpy(&conn->resp_addr, &hdev->random_addr);
6138 			goto unlock;
6139 		}
6140 
6141 		if (adv)
6142 			bacpy(&conn->resp_addr, &adv->random_addr);
6143 	}
6144 
6145 unlock:
6146 	hci_dev_unlock(hdev);
6147 }
6148 
6149 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6150 					    struct sk_buff *skb)
6151 {
6152 	struct hci_ev_le_conn_update_complete *ev = data;
6153 	struct hci_conn *conn;
6154 
6155 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6156 
6157 	if (ev->status)
6158 		return;
6159 
6160 	hci_dev_lock(hdev);
6161 
6162 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6163 	if (conn) {
6164 		conn->le_conn_interval = le16_to_cpu(ev->interval);
6165 		conn->le_conn_latency = le16_to_cpu(ev->latency);
6166 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6167 	}
6168 
6169 	hci_dev_unlock(hdev);
6170 }
6171 
6172 /* This function requires the caller holds hdev->lock */
6173 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6174 					      bdaddr_t *addr,
6175 					      u8 addr_type, bool addr_resolved,
6176 					      u8 adv_type)
6177 {
6178 	struct hci_conn *conn;
6179 	struct hci_conn_params *params;
6180 
6181 	/* If the event is not connectable don't proceed further */
6182 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6183 		return NULL;
6184 
6185 	/* Ignore if the device is blocked or hdev is suspended */
6186 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6187 	    hdev->suspended)
6188 		return NULL;
6189 
6190 	/* Most controller will fail if we try to create new connections
6191 	 * while we have an existing one in peripheral role.
6192 	 */
6193 	if (hdev->conn_hash.le_num_peripheral > 0 &&
6194 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6195 	     !(hdev->le_states[3] & 0x10)))
6196 		return NULL;
6197 
6198 	/* If we're not connectable only connect devices that we have in
6199 	 * our pend_le_conns list.
6200 	 */
6201 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6202 					   addr_type);
6203 	if (!params)
6204 		return NULL;
6205 
6206 	if (!params->explicit_connect) {
6207 		switch (params->auto_connect) {
6208 		case HCI_AUTO_CONN_DIRECT:
6209 			/* Only devices advertising with ADV_DIRECT_IND are
6210 			 * triggering a connection attempt. This is allowing
6211 			 * incoming connections from peripheral devices.
6212 			 */
6213 			if (adv_type != LE_ADV_DIRECT_IND)
6214 				return NULL;
6215 			break;
6216 		case HCI_AUTO_CONN_ALWAYS:
6217 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6218 			 * are triggering a connection attempt. This means
6219 			 * that incoming connections from peripheral device are
6220 			 * accepted and also outgoing connections to peripheral
6221 			 * devices are established when found.
6222 			 */
6223 			break;
6224 		default:
6225 			return NULL;
6226 		}
6227 	}
6228 
6229 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6230 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6231 			      HCI_ROLE_MASTER);
6232 	if (!IS_ERR(conn)) {
6233 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6234 		 * by higher layer that tried to connect, if no then
6235 		 * store the pointer since we don't really have any
6236 		 * other owner of the object besides the params that
6237 		 * triggered it. This way we can abort the connection if
6238 		 * the parameters get removed and keep the reference
6239 		 * count consistent once the connection is established.
6240 		 */
6241 
6242 		if (!params->explicit_connect)
6243 			params->conn = hci_conn_get(conn);
6244 
6245 		return conn;
6246 	}
6247 
6248 	switch (PTR_ERR(conn)) {
6249 	case -EBUSY:
6250 		/* If hci_connect() returns -EBUSY it means there is already
6251 		 * an LE connection attempt going on. Since controllers don't
6252 		 * support more than one connection attempt at the time, we
6253 		 * don't consider this an error case.
6254 		 */
6255 		break;
6256 	default:
6257 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6258 		return NULL;
6259 	}
6260 
6261 	return NULL;
6262 }
6263 
6264 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6265 			       u8 bdaddr_type, bdaddr_t *direct_addr,
6266 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6267 			       bool ext_adv, bool ctl_time, u64 instant)
6268 {
6269 	struct discovery_state *d = &hdev->discovery;
6270 	struct smp_irk *irk;
6271 	struct hci_conn *conn;
6272 	bool match, bdaddr_resolved;
6273 	u32 flags;
6274 	u8 *ptr;
6275 
6276 	switch (type) {
6277 	case LE_ADV_IND:
6278 	case LE_ADV_DIRECT_IND:
6279 	case LE_ADV_SCAN_IND:
6280 	case LE_ADV_NONCONN_IND:
6281 	case LE_ADV_SCAN_RSP:
6282 		break;
6283 	default:
6284 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6285 				       "type: 0x%02x", type);
6286 		return;
6287 	}
6288 
6289 	if (len > max_adv_len(hdev)) {
6290 		bt_dev_err_ratelimited(hdev,
6291 				       "adv larger than maximum supported");
6292 		return;
6293 	}
6294 
6295 	/* Find the end of the data in case the report contains padded zero
6296 	 * bytes at the end causing an invalid length value.
6297 	 *
6298 	 * When data is NULL, len is 0 so there is no need for extra ptr
6299 	 * check as 'ptr < data + 0' is already false in such case.
6300 	 */
6301 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6302 		if (ptr + 1 + *ptr > data + len)
6303 			break;
6304 	}
6305 
6306 	/* Adjust for actual length. This handles the case when remote
6307 	 * device is advertising with incorrect data length.
6308 	 */
6309 	len = ptr - data;
6310 
6311 	/* If the direct address is present, then this report is from
6312 	 * a LE Direct Advertising Report event. In that case it is
6313 	 * important to see if the address is matching the local
6314 	 * controller address.
6315 	 */
6316 	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6317 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6318 						  &bdaddr_resolved);
6319 
6320 		/* Only resolvable random addresses are valid for these
6321 		 * kind of reports and others can be ignored.
6322 		 */
6323 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6324 			return;
6325 
6326 		/* If the controller is not using resolvable random
6327 		 * addresses, then this report can be ignored.
6328 		 */
6329 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6330 			return;
6331 
6332 		/* If the local IRK of the controller does not match
6333 		 * with the resolvable random address provided, then
6334 		 * this report can be ignored.
6335 		 */
6336 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6337 			return;
6338 	}
6339 
6340 	/* Check if we need to convert to identity address */
6341 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6342 	if (irk) {
6343 		bdaddr = &irk->bdaddr;
6344 		bdaddr_type = irk->addr_type;
6345 	}
6346 
6347 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6348 
6349 	/* Check if we have been requested to connect to this device.
6350 	 *
6351 	 * direct_addr is set only for directed advertising reports (it is NULL
6352 	 * for advertising reports) and is already verified to be RPA above.
6353 	 */
6354 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6355 				     type);
6356 	if (!ext_adv && conn && type == LE_ADV_IND &&
6357 	    len <= max_adv_len(hdev)) {
6358 		/* Store report for later inclusion by
6359 		 * mgmt_device_connected
6360 		 */
6361 		memcpy(conn->le_adv_data, data, len);
6362 		conn->le_adv_data_len = len;
6363 	}
6364 
6365 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6366 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6367 	else
6368 		flags = 0;
6369 
6370 	/* All scan results should be sent up for Mesh systems */
6371 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6372 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6373 				  rssi, flags, data, len, NULL, 0, instant);
6374 		return;
6375 	}
6376 
6377 	/* Passive scanning shouldn't trigger any device found events,
6378 	 * except for devices marked as CONN_REPORT for which we do send
6379 	 * device found events, or advertisement monitoring requested.
6380 	 */
6381 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6382 		if (type == LE_ADV_DIRECT_IND)
6383 			return;
6384 
6385 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6386 					       bdaddr, bdaddr_type) &&
6387 		    idr_is_empty(&hdev->adv_monitors_idr))
6388 			return;
6389 
6390 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6391 				  rssi, flags, data, len, NULL, 0, 0);
6392 		return;
6393 	}
6394 
6395 	/* When receiving a scan response, then there is no way to
6396 	 * know if the remote device is connectable or not. However
6397 	 * since scan responses are merged with a previously seen
6398 	 * advertising report, the flags field from that report
6399 	 * will be used.
6400 	 *
6401 	 * In the unlikely case that a controller just sends a scan
6402 	 * response event that doesn't match the pending report, then
6403 	 * it is marked as a standalone SCAN_RSP.
6404 	 */
6405 	if (type == LE_ADV_SCAN_RSP)
6406 		flags = MGMT_DEV_FOUND_SCAN_RSP;
6407 
6408 	/* If there's nothing pending either store the data from this
6409 	 * event or send an immediate device found event if the data
6410 	 * should not be stored for later.
6411 	 */
6412 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6413 		/* If the report will trigger a SCAN_REQ store it for
6414 		 * later merging.
6415 		 */
6416 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6417 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6418 						 rssi, flags, data, len);
6419 			return;
6420 		}
6421 
6422 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6423 				  rssi, flags, data, len, NULL, 0, 0);
6424 		return;
6425 	}
6426 
6427 	/* Check if the pending report is for the same device as the new one */
6428 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6429 		 bdaddr_type == d->last_adv_addr_type);
6430 
6431 	/* If the pending data doesn't match this report or this isn't a
6432 	 * scan response (e.g. we got a duplicate ADV_IND) then force
6433 	 * sending of the pending data.
6434 	 */
6435 	if (type != LE_ADV_SCAN_RSP || !match) {
6436 		/* Send out whatever is in the cache, but skip duplicates */
6437 		if (!match)
6438 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6439 					  d->last_adv_addr_type, NULL,
6440 					  d->last_adv_rssi, d->last_adv_flags,
6441 					  d->last_adv_data,
6442 					  d->last_adv_data_len, NULL, 0, 0);
6443 
6444 		/* If the new report will trigger a SCAN_REQ store it for
6445 		 * later merging.
6446 		 */
6447 		if (!ext_adv && (type == LE_ADV_IND ||
6448 				 type == LE_ADV_SCAN_IND)) {
6449 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6450 						 rssi, flags, data, len);
6451 			return;
6452 		}
6453 
6454 		/* The advertising reports cannot be merged, so clear
6455 		 * the pending report and send out a device found event.
6456 		 */
6457 		clear_pending_adv_report(hdev);
6458 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6459 				  rssi, flags, data, len, NULL, 0, 0);
6460 		return;
6461 	}
6462 
6463 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6464 	 * the new event is a SCAN_RSP. We can therefore proceed with
6465 	 * sending a merged device found event.
6466 	 */
6467 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6468 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6469 			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6470 	clear_pending_adv_report(hdev);
6471 }
6472 
6473 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6474 				  struct sk_buff *skb)
6475 {
6476 	struct hci_ev_le_advertising_report *ev = data;
6477 	u64 instant = jiffies;
6478 
6479 	if (!ev->num)
6480 		return;
6481 
6482 	hci_dev_lock(hdev);
6483 
6484 	while (ev->num--) {
6485 		struct hci_ev_le_advertising_info *info;
6486 		s8 rssi;
6487 
6488 		info = hci_le_ev_skb_pull(hdev, skb,
6489 					  HCI_EV_LE_ADVERTISING_REPORT,
6490 					  sizeof(*info));
6491 		if (!info)
6492 			break;
6493 
6494 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6495 					info->length + 1))
6496 			break;
6497 
6498 		if (info->length <= max_adv_len(hdev)) {
6499 			rssi = info->data[info->length];
6500 			process_adv_report(hdev, info->type, &info->bdaddr,
6501 					   info->bdaddr_type, NULL, 0, rssi,
6502 					   info->data, info->length, false,
6503 					   false, instant);
6504 		} else {
6505 			bt_dev_err(hdev, "Dropping invalid advertising data");
6506 		}
6507 	}
6508 
6509 	hci_dev_unlock(hdev);
6510 }
6511 
6512 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6513 {
6514 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6515 		switch (evt_type) {
6516 		case LE_LEGACY_ADV_IND:
6517 			return LE_ADV_IND;
6518 		case LE_LEGACY_ADV_DIRECT_IND:
6519 			return LE_ADV_DIRECT_IND;
6520 		case LE_LEGACY_ADV_SCAN_IND:
6521 			return LE_ADV_SCAN_IND;
6522 		case LE_LEGACY_NONCONN_IND:
6523 			return LE_ADV_NONCONN_IND;
6524 		case LE_LEGACY_SCAN_RSP_ADV:
6525 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6526 			return LE_ADV_SCAN_RSP;
6527 		}
6528 
6529 		goto invalid;
6530 	}
6531 
6532 	if (evt_type & LE_EXT_ADV_CONN_IND) {
6533 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6534 			return LE_ADV_DIRECT_IND;
6535 
6536 		return LE_ADV_IND;
6537 	}
6538 
6539 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6540 		return LE_ADV_SCAN_RSP;
6541 
6542 	if (evt_type & LE_EXT_ADV_SCAN_IND)
6543 		return LE_ADV_SCAN_IND;
6544 
6545 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6546 	    evt_type & LE_EXT_ADV_DIRECT_IND)
6547 		return LE_ADV_NONCONN_IND;
6548 
6549 invalid:
6550 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6551 			       evt_type);
6552 
6553 	return LE_ADV_INVALID;
6554 }
6555 
6556 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6557 				      struct sk_buff *skb)
6558 {
6559 	struct hci_ev_le_ext_adv_report *ev = data;
6560 	u64 instant = jiffies;
6561 
6562 	if (!ev->num)
6563 		return;
6564 
6565 	hci_dev_lock(hdev);
6566 
6567 	while (ev->num--) {
6568 		struct hci_ev_le_ext_adv_info *info;
6569 		u8 legacy_evt_type;
6570 		u16 evt_type;
6571 
6572 		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6573 					  sizeof(*info));
6574 		if (!info)
6575 			break;
6576 
6577 		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6578 					info->length))
6579 			break;
6580 
6581 		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6582 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6583 		if (legacy_evt_type != LE_ADV_INVALID) {
6584 			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6585 					   info->bdaddr_type, NULL, 0,
6586 					   info->rssi, info->data, info->length,
6587 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6588 					   false, instant);
6589 		}
6590 	}
6591 
6592 	hci_dev_unlock(hdev);
6593 }
6594 
6595 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6596 {
6597 	struct hci_cp_le_pa_term_sync cp;
6598 
6599 	memset(&cp, 0, sizeof(cp));
6600 	cp.handle = handle;
6601 
6602 	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6603 }
6604 
6605 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6606 					    struct sk_buff *skb)
6607 {
6608 	struct hci_ev_le_pa_sync_established *ev = data;
6609 	int mask = hdev->link_mode;
6610 	__u8 flags = 0;
6611 	struct hci_conn *pa_sync;
6612 
6613 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6614 
6615 	hci_dev_lock(hdev);
6616 
6617 	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6618 
6619 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6620 	if (!(mask & HCI_LM_ACCEPT)) {
6621 		hci_le_pa_term_sync(hdev, ev->handle);
6622 		goto unlock;
6623 	}
6624 
6625 	if (!(flags & HCI_PROTO_DEFER))
6626 		goto unlock;
6627 
6628 	if (ev->status) {
6629 		/* Add connection to indicate the failed PA sync event */
6630 		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6631 					     HCI_ROLE_SLAVE);
6632 
6633 		if (!pa_sync)
6634 			goto unlock;
6635 
6636 		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6637 
6638 		/* Notify iso layer */
6639 		hci_connect_cfm(pa_sync, ev->status);
6640 	}
6641 
6642 unlock:
6643 	hci_dev_unlock(hdev);
6644 }
6645 
6646 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6647 				      struct sk_buff *skb)
6648 {
6649 	struct hci_ev_le_per_adv_report *ev = data;
6650 	int mask = hdev->link_mode;
6651 	__u8 flags = 0;
6652 
6653 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6654 
6655 	hci_dev_lock(hdev);
6656 
6657 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6658 	if (!(mask & HCI_LM_ACCEPT))
6659 		hci_le_pa_term_sync(hdev, ev->sync_handle);
6660 
6661 	hci_dev_unlock(hdev);
6662 }
6663 
6664 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6665 					    struct sk_buff *skb)
6666 {
6667 	struct hci_ev_le_remote_feat_complete *ev = data;
6668 	struct hci_conn *conn;
6669 
6670 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6671 
6672 	hci_dev_lock(hdev);
6673 
6674 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6675 	if (conn) {
6676 		if (!ev->status)
6677 			memcpy(conn->features[0], ev->features, 8);
6678 
6679 		if (conn->state == BT_CONFIG) {
6680 			__u8 status;
6681 
6682 			/* If the local controller supports peripheral-initiated
6683 			 * features exchange, but the remote controller does
6684 			 * not, then it is possible that the error code 0x1a
6685 			 * for unsupported remote feature gets returned.
6686 			 *
6687 			 * In this specific case, allow the connection to
6688 			 * transition into connected state and mark it as
6689 			 * successful.
6690 			 */
6691 			if (!conn->out && ev->status == 0x1a &&
6692 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6693 				status = 0x00;
6694 			else
6695 				status = ev->status;
6696 
6697 			conn->state = BT_CONNECTED;
6698 			hci_connect_cfm(conn, status);
6699 			hci_conn_drop(conn);
6700 		}
6701 	}
6702 
6703 	hci_dev_unlock(hdev);
6704 }
6705 
6706 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6707 				   struct sk_buff *skb)
6708 {
6709 	struct hci_ev_le_ltk_req *ev = data;
6710 	struct hci_cp_le_ltk_reply cp;
6711 	struct hci_cp_le_ltk_neg_reply neg;
6712 	struct hci_conn *conn;
6713 	struct smp_ltk *ltk;
6714 
6715 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6716 
6717 	hci_dev_lock(hdev);
6718 
6719 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6720 	if (conn == NULL)
6721 		goto not_found;
6722 
6723 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6724 	if (!ltk)
6725 		goto not_found;
6726 
6727 	if (smp_ltk_is_sc(ltk)) {
6728 		/* With SC both EDiv and Rand are set to zero */
6729 		if (ev->ediv || ev->rand)
6730 			goto not_found;
6731 	} else {
6732 		/* For non-SC keys check that EDiv and Rand match */
6733 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6734 			goto not_found;
6735 	}
6736 
6737 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6738 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6739 	cp.handle = cpu_to_le16(conn->handle);
6740 
6741 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6742 
6743 	conn->enc_key_size = ltk->enc_size;
6744 
6745 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6746 
6747 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6748 	 * temporary key used to encrypt a connection following
6749 	 * pairing. It is used during the Encrypted Session Setup to
6750 	 * distribute the keys. Later, security can be re-established
6751 	 * using a distributed LTK.
6752 	 */
6753 	if (ltk->type == SMP_STK) {
6754 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6755 		list_del_rcu(&ltk->list);
6756 		kfree_rcu(ltk, rcu);
6757 	} else {
6758 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6759 	}
6760 
6761 	hci_dev_unlock(hdev);
6762 
6763 	return;
6764 
6765 not_found:
6766 	neg.handle = ev->handle;
6767 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6768 	hci_dev_unlock(hdev);
6769 }
6770 
6771 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6772 				      u8 reason)
6773 {
6774 	struct hci_cp_le_conn_param_req_neg_reply cp;
6775 
6776 	cp.handle = cpu_to_le16(handle);
6777 	cp.reason = reason;
6778 
6779 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6780 		     &cp);
6781 }
6782 
6783 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6784 					     struct sk_buff *skb)
6785 {
6786 	struct hci_ev_le_remote_conn_param_req *ev = data;
6787 	struct hci_cp_le_conn_param_req_reply cp;
6788 	struct hci_conn *hcon;
6789 	u16 handle, min, max, latency, timeout;
6790 
6791 	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6792 
6793 	handle = le16_to_cpu(ev->handle);
6794 	min = le16_to_cpu(ev->interval_min);
6795 	max = le16_to_cpu(ev->interval_max);
6796 	latency = le16_to_cpu(ev->latency);
6797 	timeout = le16_to_cpu(ev->timeout);
6798 
6799 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6800 	if (!hcon || hcon->state != BT_CONNECTED)
6801 		return send_conn_param_neg_reply(hdev, handle,
6802 						 HCI_ERROR_UNKNOWN_CONN_ID);
6803 
6804 	if (hci_check_conn_params(min, max, latency, timeout))
6805 		return send_conn_param_neg_reply(hdev, handle,
6806 						 HCI_ERROR_INVALID_LL_PARAMS);
6807 
6808 	if (hcon->role == HCI_ROLE_MASTER) {
6809 		struct hci_conn_params *params;
6810 		u8 store_hint;
6811 
6812 		hci_dev_lock(hdev);
6813 
6814 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6815 						hcon->dst_type);
6816 		if (params) {
6817 			params->conn_min_interval = min;
6818 			params->conn_max_interval = max;
6819 			params->conn_latency = latency;
6820 			params->supervision_timeout = timeout;
6821 			store_hint = 0x01;
6822 		} else {
6823 			store_hint = 0x00;
6824 		}
6825 
6826 		hci_dev_unlock(hdev);
6827 
6828 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6829 				    store_hint, min, max, latency, timeout);
6830 	}
6831 
6832 	cp.handle = ev->handle;
6833 	cp.interval_min = ev->interval_min;
6834 	cp.interval_max = ev->interval_max;
6835 	cp.latency = ev->latency;
6836 	cp.timeout = ev->timeout;
6837 	cp.min_ce_len = 0;
6838 	cp.max_ce_len = 0;
6839 
6840 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6841 }
6842 
6843 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6844 					 struct sk_buff *skb)
6845 {
6846 	struct hci_ev_le_direct_adv_report *ev = data;
6847 	u64 instant = jiffies;
6848 	int i;
6849 
6850 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6851 				flex_array_size(ev, info, ev->num)))
6852 		return;
6853 
6854 	if (!ev->num)
6855 		return;
6856 
6857 	hci_dev_lock(hdev);
6858 
6859 	for (i = 0; i < ev->num; i++) {
6860 		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6861 
6862 		process_adv_report(hdev, info->type, &info->bdaddr,
6863 				   info->bdaddr_type, &info->direct_addr,
6864 				   info->direct_addr_type, info->rssi, NULL, 0,
6865 				   false, false, instant);
6866 	}
6867 
6868 	hci_dev_unlock(hdev);
6869 }
6870 
6871 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6872 				  struct sk_buff *skb)
6873 {
6874 	struct hci_ev_le_phy_update_complete *ev = data;
6875 	struct hci_conn *conn;
6876 
6877 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6878 
6879 	if (ev->status)
6880 		return;
6881 
6882 	hci_dev_lock(hdev);
6883 
6884 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6885 	if (!conn)
6886 		goto unlock;
6887 
6888 	conn->le_tx_phy = ev->tx_phy;
6889 	conn->le_rx_phy = ev->rx_phy;
6890 
6891 unlock:
6892 	hci_dev_unlock(hdev);
6893 }
6894 
6895 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6896 					struct sk_buff *skb)
6897 {
6898 	struct hci_evt_le_cis_established *ev = data;
6899 	struct hci_conn *conn;
6900 	struct bt_iso_qos *qos;
6901 	bool pending = false;
6902 	u16 handle = __le16_to_cpu(ev->handle);
6903 
6904 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6905 
6906 	hci_dev_lock(hdev);
6907 
6908 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6909 	if (!conn) {
6910 		bt_dev_err(hdev,
6911 			   "Unable to find connection with handle 0x%4.4x",
6912 			   handle);
6913 		goto unlock;
6914 	}
6915 
6916 	if (conn->type != ISO_LINK) {
6917 		bt_dev_err(hdev,
6918 			   "Invalid connection link type handle 0x%4.4x",
6919 			   handle);
6920 		goto unlock;
6921 	}
6922 
6923 	qos = &conn->iso_qos;
6924 
6925 	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6926 
6927 	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6928 	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6929 	qos->ucast.out.interval = qos->ucast.in.interval;
6930 
6931 	switch (conn->role) {
6932 	case HCI_ROLE_SLAVE:
6933 		/* Convert Transport Latency (us) to Latency (msec) */
6934 		qos->ucast.in.latency =
6935 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6936 					  1000);
6937 		qos->ucast.out.latency =
6938 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6939 					  1000);
6940 		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6941 		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6942 		qos->ucast.in.phy = ev->c_phy;
6943 		qos->ucast.out.phy = ev->p_phy;
6944 		break;
6945 	case HCI_ROLE_MASTER:
6946 		/* Convert Transport Latency (us) to Latency (msec) */
6947 		qos->ucast.out.latency =
6948 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6949 					  1000);
6950 		qos->ucast.in.latency =
6951 			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6952 					  1000);
6953 		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6954 		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6955 		qos->ucast.out.phy = ev->c_phy;
6956 		qos->ucast.in.phy = ev->p_phy;
6957 		break;
6958 	}
6959 
6960 	if (!ev->status) {
6961 		conn->state = BT_CONNECTED;
6962 		hci_debugfs_create_conn(conn);
6963 		hci_conn_add_sysfs(conn);
6964 		hci_iso_setup_path(conn);
6965 		goto unlock;
6966 	}
6967 
6968 	conn->state = BT_CLOSED;
6969 	hci_connect_cfm(conn, ev->status);
6970 	hci_conn_del(conn);
6971 
6972 unlock:
6973 	if (pending)
6974 		hci_le_create_cis_pending(hdev);
6975 
6976 	hci_dev_unlock(hdev);
6977 }
6978 
6979 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6980 {
6981 	struct hci_cp_le_reject_cis cp;
6982 
6983 	memset(&cp, 0, sizeof(cp));
6984 	cp.handle = handle;
6985 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6986 	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6987 }
6988 
6989 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6990 {
6991 	struct hci_cp_le_accept_cis cp;
6992 
6993 	memset(&cp, 0, sizeof(cp));
6994 	cp.handle = handle;
6995 	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6996 }
6997 
6998 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6999 			       struct sk_buff *skb)
7000 {
7001 	struct hci_evt_le_cis_req *ev = data;
7002 	u16 acl_handle, cis_handle;
7003 	struct hci_conn *acl, *cis;
7004 	int mask;
7005 	__u8 flags = 0;
7006 
7007 	acl_handle = __le16_to_cpu(ev->acl_handle);
7008 	cis_handle = __le16_to_cpu(ev->cis_handle);
7009 
7010 	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7011 		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7012 
7013 	hci_dev_lock(hdev);
7014 
7015 	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7016 	if (!acl)
7017 		goto unlock;
7018 
7019 	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7020 	if (!(mask & HCI_LM_ACCEPT)) {
7021 		hci_le_reject_cis(hdev, ev->cis_handle);
7022 		goto unlock;
7023 	}
7024 
7025 	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7026 	if (!cis) {
7027 		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7028 				   cis_handle);
7029 		if (!cis) {
7030 			hci_le_reject_cis(hdev, ev->cis_handle);
7031 			goto unlock;
7032 		}
7033 	}
7034 
7035 	cis->iso_qos.ucast.cig = ev->cig_id;
7036 	cis->iso_qos.ucast.cis = ev->cis_id;
7037 
7038 	if (!(flags & HCI_PROTO_DEFER)) {
7039 		hci_le_accept_cis(hdev, ev->cis_handle);
7040 	} else {
7041 		cis->state = BT_CONNECT2;
7042 		hci_connect_cfm(cis, 0);
7043 	}
7044 
7045 unlock:
7046 	hci_dev_unlock(hdev);
7047 }
7048 
7049 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7050 {
7051 	u8 handle = PTR_UINT(data);
7052 
7053 	return hci_le_terminate_big_sync(hdev, handle,
7054 					 HCI_ERROR_LOCAL_HOST_TERM);
7055 }
7056 
7057 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7058 					   struct sk_buff *skb)
7059 {
7060 	struct hci_evt_le_create_big_complete *ev = data;
7061 	struct hci_conn *conn;
7062 	__u8 i = 0;
7063 
7064 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7065 
7066 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7067 				flex_array_size(ev, bis_handle, ev->num_bis)))
7068 		return;
7069 
7070 	hci_dev_lock(hdev);
7071 	rcu_read_lock();
7072 
7073 	/* Connect all BISes that are bound to the BIG */
7074 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7075 		if (bacmp(&conn->dst, BDADDR_ANY) ||
7076 		    conn->type != ISO_LINK ||
7077 		    conn->iso_qos.bcast.big != ev->handle)
7078 			continue;
7079 
7080 		if (hci_conn_set_handle(conn,
7081 					__le16_to_cpu(ev->bis_handle[i++])))
7082 			continue;
7083 
7084 		if (!ev->status) {
7085 			conn->state = BT_CONNECTED;
7086 			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7087 			rcu_read_unlock();
7088 			hci_debugfs_create_conn(conn);
7089 			hci_conn_add_sysfs(conn);
7090 			hci_iso_setup_path(conn);
7091 			rcu_read_lock();
7092 			continue;
7093 		}
7094 
7095 		hci_connect_cfm(conn, ev->status);
7096 		rcu_read_unlock();
7097 		hci_conn_del(conn);
7098 		rcu_read_lock();
7099 	}
7100 
7101 	rcu_read_unlock();
7102 
7103 	if (!ev->status && !i)
7104 		/* If no BISes have been connected for the BIG,
7105 		 * terminate. This is in case all bound connections
7106 		 * have been closed before the BIG creation
7107 		 * has completed.
7108 		 */
7109 		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7110 				   UINT_PTR(ev->handle), NULL);
7111 
7112 	hci_dev_unlock(hdev);
7113 }
7114 
7115 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7116 					    struct sk_buff *skb)
7117 {
7118 	struct hci_evt_le_big_sync_estabilished *ev = data;
7119 	struct hci_conn *bis;
7120 	struct hci_conn *pa_sync;
7121 	int i;
7122 
7123 	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7124 
7125 	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7126 				flex_array_size(ev, bis, ev->num_bis)))
7127 		return;
7128 
7129 	hci_dev_lock(hdev);
7130 
7131 	if (!ev->status) {
7132 		pa_sync = hci_conn_hash_lookup_pa_sync_big_handle(hdev, ev->handle);
7133 		if (pa_sync)
7134 			/* Also mark the BIG sync established event on the
7135 			 * associated PA sync hcon
7136 			 */
7137 			set_bit(HCI_CONN_BIG_SYNC, &pa_sync->flags);
7138 	}
7139 
7140 	for (i = 0; i < ev->num_bis; i++) {
7141 		u16 handle = le16_to_cpu(ev->bis[i]);
7142 		__le32 interval;
7143 
7144 		bis = hci_conn_hash_lookup_handle(hdev, handle);
7145 		if (!bis) {
7146 			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7147 					   HCI_ROLE_SLAVE, handle);
7148 			if (!bis)
7149 				continue;
7150 		}
7151 
7152 		if (ev->status != 0x42)
7153 			/* Mark PA sync as established */
7154 			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7155 
7156 		bis->iso_qos.bcast.big = ev->handle;
7157 		memset(&interval, 0, sizeof(interval));
7158 		memcpy(&interval, ev->latency, sizeof(ev->latency));
7159 		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7160 		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7161 		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7162 		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7163 
7164 		if (!ev->status) {
7165 			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7166 			hci_iso_setup_path(bis);
7167 		}
7168 	}
7169 
7170 	/* In case BIG sync failed, notify each failed connection to
7171 	 * the user after all hci connections have been added
7172 	 */
7173 	if (ev->status)
7174 		for (i = 0; i < ev->num_bis; i++) {
7175 			u16 handle = le16_to_cpu(ev->bis[i]);
7176 
7177 			bis = hci_conn_hash_lookup_handle(hdev, handle);
7178 
7179 			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7180 			hci_connect_cfm(bis, ev->status);
7181 		}
7182 
7183 	hci_dev_unlock(hdev);
7184 }
7185 
7186 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7187 					   struct sk_buff *skb)
7188 {
7189 	struct hci_evt_le_big_info_adv_report *ev = data;
7190 	int mask = hdev->link_mode;
7191 	__u8 flags = 0;
7192 	struct hci_conn *pa_sync;
7193 
7194 	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7195 
7196 	hci_dev_lock(hdev);
7197 
7198 	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7199 	if (!(mask & HCI_LM_ACCEPT)) {
7200 		hci_le_pa_term_sync(hdev, ev->sync_handle);
7201 		goto unlock;
7202 	}
7203 
7204 	if (!(flags & HCI_PROTO_DEFER))
7205 		goto unlock;
7206 
7207 	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7208 			(hdev,
7209 			le16_to_cpu(ev->sync_handle));
7210 
7211 	if (pa_sync)
7212 		goto unlock;
7213 
7214 	/* Add connection to indicate the PA sync event */
7215 	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7216 				     HCI_ROLE_SLAVE);
7217 
7218 	if (!pa_sync)
7219 		goto unlock;
7220 
7221 	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7222 	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7223 
7224 	/* Notify iso layer */
7225 	hci_connect_cfm(pa_sync, 0x00);
7226 
7227 unlock:
7228 	hci_dev_unlock(hdev);
7229 }
7230 
7231 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7232 [_op] = { \
7233 	.func = _func, \
7234 	.min_len = _min_len, \
7235 	.max_len = _max_len, \
7236 }
7237 
7238 #define HCI_LE_EV(_op, _func, _len) \
7239 	HCI_LE_EV_VL(_op, _func, _len, _len)
7240 
7241 #define HCI_LE_EV_STATUS(_op, _func) \
7242 	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7243 
7244 /* Entries in this table shall have their position according to the subevent
7245  * opcode they handle so the use of the macros above is recommend since it does
7246  * attempt to initialize at its proper index using Designated Initializers that
7247  * way events without a callback function can be ommited.
7248  */
7249 static const struct hci_le_ev {
7250 	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7251 	u16  min_len;
7252 	u16  max_len;
7253 } hci_le_ev_table[U8_MAX + 1] = {
7254 	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7255 	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7256 		  sizeof(struct hci_ev_le_conn_complete)),
7257 	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7258 	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7259 		     sizeof(struct hci_ev_le_advertising_report),
7260 		     HCI_MAX_EVENT_SIZE),
7261 	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7262 	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7263 		  hci_le_conn_update_complete_evt,
7264 		  sizeof(struct hci_ev_le_conn_update_complete)),
7265 	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7266 	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7267 		  hci_le_remote_feat_complete_evt,
7268 		  sizeof(struct hci_ev_le_remote_feat_complete)),
7269 	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7270 	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7271 		  sizeof(struct hci_ev_le_ltk_req)),
7272 	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7273 	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7274 		  hci_le_remote_conn_param_req_evt,
7275 		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7276 	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7277 	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7278 		  hci_le_enh_conn_complete_evt,
7279 		  sizeof(struct hci_ev_le_enh_conn_complete)),
7280 	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7281 	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7282 		     sizeof(struct hci_ev_le_direct_adv_report),
7283 		     HCI_MAX_EVENT_SIZE),
7284 	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7285 	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7286 		  sizeof(struct hci_ev_le_phy_update_complete)),
7287 	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7288 	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7289 		     sizeof(struct hci_ev_le_ext_adv_report),
7290 		     HCI_MAX_EVENT_SIZE),
7291 	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7292 	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7293 		  hci_le_pa_sync_estabilished_evt,
7294 		  sizeof(struct hci_ev_le_pa_sync_established)),
7295 	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7296 	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7297 				 hci_le_per_adv_report_evt,
7298 				 sizeof(struct hci_ev_le_per_adv_report),
7299 				 HCI_MAX_EVENT_SIZE),
7300 	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7301 	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7302 		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7303 	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7304 	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7305 		  sizeof(struct hci_evt_le_cis_established)),
7306 	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7307 	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7308 		  sizeof(struct hci_evt_le_cis_req)),
7309 	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7310 	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7311 		     hci_le_create_big_complete_evt,
7312 		     sizeof(struct hci_evt_le_create_big_complete),
7313 		     HCI_MAX_EVENT_SIZE),
7314 	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7315 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7316 		     hci_le_big_sync_established_evt,
7317 		     sizeof(struct hci_evt_le_big_sync_estabilished),
7318 		     HCI_MAX_EVENT_SIZE),
7319 	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7320 	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7321 		     hci_le_big_info_adv_report_evt,
7322 		     sizeof(struct hci_evt_le_big_info_adv_report),
7323 		     HCI_MAX_EVENT_SIZE),
7324 };
7325 
7326 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7327 			    struct sk_buff *skb, u16 *opcode, u8 *status,
7328 			    hci_req_complete_t *req_complete,
7329 			    hci_req_complete_skb_t *req_complete_skb)
7330 {
7331 	struct hci_ev_le_meta *ev = data;
7332 	const struct hci_le_ev *subev;
7333 
7334 	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7335 
7336 	/* Only match event if command OGF is for LE */
7337 	if (hdev->sent_cmd &&
7338 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7339 	    hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7340 		*opcode = hci_skb_opcode(hdev->sent_cmd);
7341 		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7342 				     req_complete_skb);
7343 	}
7344 
7345 	subev = &hci_le_ev_table[ev->subevent];
7346 	if (!subev->func)
7347 		return;
7348 
7349 	if (skb->len < subev->min_len) {
7350 		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7351 			   ev->subevent, skb->len, subev->min_len);
7352 		return;
7353 	}
7354 
7355 	/* Just warn if the length is over max_len size it still be
7356 	 * possible to partially parse the event so leave to callback to
7357 	 * decide if that is acceptable.
7358 	 */
7359 	if (skb->len > subev->max_len)
7360 		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7361 			    ev->subevent, skb->len, subev->max_len);
7362 	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7363 	if (!data)
7364 		return;
7365 
7366 	subev->func(hdev, data, skb);
7367 }
7368 
7369 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7370 				 u8 event, struct sk_buff *skb)
7371 {
7372 	struct hci_ev_cmd_complete *ev;
7373 	struct hci_event_hdr *hdr;
7374 
7375 	if (!skb)
7376 		return false;
7377 
7378 	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7379 	if (!hdr)
7380 		return false;
7381 
7382 	if (event) {
7383 		if (hdr->evt != event)
7384 			return false;
7385 		return true;
7386 	}
7387 
7388 	/* Check if request ended in Command Status - no way to retrieve
7389 	 * any extra parameters in this case.
7390 	 */
7391 	if (hdr->evt == HCI_EV_CMD_STATUS)
7392 		return false;
7393 
7394 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7395 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7396 			   hdr->evt);
7397 		return false;
7398 	}
7399 
7400 	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7401 	if (!ev)
7402 		return false;
7403 
7404 	if (opcode != __le16_to_cpu(ev->opcode)) {
7405 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7406 		       __le16_to_cpu(ev->opcode));
7407 		return false;
7408 	}
7409 
7410 	return true;
7411 }
7412 
7413 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7414 				  struct sk_buff *skb)
7415 {
7416 	struct hci_ev_le_advertising_info *adv;
7417 	struct hci_ev_le_direct_adv_info *direct_adv;
7418 	struct hci_ev_le_ext_adv_info *ext_adv;
7419 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7420 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7421 
7422 	hci_dev_lock(hdev);
7423 
7424 	/* If we are currently suspended and this is the first BT event seen,
7425 	 * save the wake reason associated with the event.
7426 	 */
7427 	if (!hdev->suspended || hdev->wake_reason)
7428 		goto unlock;
7429 
7430 	/* Default to remote wake. Values for wake_reason are documented in the
7431 	 * Bluez mgmt api docs.
7432 	 */
7433 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7434 
7435 	/* Once configured for remote wakeup, we should only wake up for
7436 	 * reconnections. It's useful to see which device is waking us up so
7437 	 * keep track of the bdaddr of the connection event that woke us up.
7438 	 */
7439 	if (event == HCI_EV_CONN_REQUEST) {
7440 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7441 		hdev->wake_addr_type = BDADDR_BREDR;
7442 	} else if (event == HCI_EV_CONN_COMPLETE) {
7443 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7444 		hdev->wake_addr_type = BDADDR_BREDR;
7445 	} else if (event == HCI_EV_LE_META) {
7446 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7447 		u8 subevent = le_ev->subevent;
7448 		u8 *ptr = &skb->data[sizeof(*le_ev)];
7449 		u8 num_reports = *ptr;
7450 
7451 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7452 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7453 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7454 		    num_reports) {
7455 			adv = (void *)(ptr + 1);
7456 			direct_adv = (void *)(ptr + 1);
7457 			ext_adv = (void *)(ptr + 1);
7458 
7459 			switch (subevent) {
7460 			case HCI_EV_LE_ADVERTISING_REPORT:
7461 				bacpy(&hdev->wake_addr, &adv->bdaddr);
7462 				hdev->wake_addr_type = adv->bdaddr_type;
7463 				break;
7464 			case HCI_EV_LE_DIRECT_ADV_REPORT:
7465 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7466 				hdev->wake_addr_type = direct_adv->bdaddr_type;
7467 				break;
7468 			case HCI_EV_LE_EXT_ADV_REPORT:
7469 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7470 				hdev->wake_addr_type = ext_adv->bdaddr_type;
7471 				break;
7472 			}
7473 		}
7474 	} else {
7475 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7476 	}
7477 
7478 unlock:
7479 	hci_dev_unlock(hdev);
7480 }
7481 
7482 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7483 [_op] = { \
7484 	.req = false, \
7485 	.func = _func, \
7486 	.min_len = _min_len, \
7487 	.max_len = _max_len, \
7488 }
7489 
7490 #define HCI_EV(_op, _func, _len) \
7491 	HCI_EV_VL(_op, _func, _len, _len)
7492 
7493 #define HCI_EV_STATUS(_op, _func) \
7494 	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7495 
7496 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7497 [_op] = { \
7498 	.req = true, \
7499 	.func_req = _func, \
7500 	.min_len = _min_len, \
7501 	.max_len = _max_len, \
7502 }
7503 
7504 #define HCI_EV_REQ(_op, _func, _len) \
7505 	HCI_EV_REQ_VL(_op, _func, _len, _len)
7506 
7507 /* Entries in this table shall have their position according to the event opcode
7508  * they handle so the use of the macros above is recommend since it does attempt
7509  * to initialize at its proper index using Designated Initializers that way
7510  * events without a callback function don't have entered.
7511  */
7512 static const struct hci_ev {
7513 	bool req;
7514 	union {
7515 		void (*func)(struct hci_dev *hdev, void *data,
7516 			     struct sk_buff *skb);
7517 		void (*func_req)(struct hci_dev *hdev, void *data,
7518 				 struct sk_buff *skb, u16 *opcode, u8 *status,
7519 				 hci_req_complete_t *req_complete,
7520 				 hci_req_complete_skb_t *req_complete_skb);
7521 	};
7522 	u16  min_len;
7523 	u16  max_len;
7524 } hci_ev_table[U8_MAX + 1] = {
7525 	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7526 	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7527 	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7528 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7529 		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7530 	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7531 	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7532 	       sizeof(struct hci_ev_conn_complete)),
7533 	/* [0x04 = HCI_EV_CONN_REQUEST] */
7534 	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7535 	       sizeof(struct hci_ev_conn_request)),
7536 	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7537 	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7538 	       sizeof(struct hci_ev_disconn_complete)),
7539 	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7540 	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7541 	       sizeof(struct hci_ev_auth_complete)),
7542 	/* [0x07 = HCI_EV_REMOTE_NAME] */
7543 	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7544 	       sizeof(struct hci_ev_remote_name)),
7545 	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7546 	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7547 	       sizeof(struct hci_ev_encrypt_change)),
7548 	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7549 	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7550 	       hci_change_link_key_complete_evt,
7551 	       sizeof(struct hci_ev_change_link_key_complete)),
7552 	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7553 	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7554 	       sizeof(struct hci_ev_remote_features)),
7555 	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7556 	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7557 		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7558 	/* [0x0f = HCI_EV_CMD_STATUS] */
7559 	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7560 		   sizeof(struct hci_ev_cmd_status)),
7561 	/* [0x10 = HCI_EV_CMD_STATUS] */
7562 	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7563 	       sizeof(struct hci_ev_hardware_error)),
7564 	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7565 	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7566 	       sizeof(struct hci_ev_role_change)),
7567 	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7568 	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7569 		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7570 	/* [0x14 = HCI_EV_MODE_CHANGE] */
7571 	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7572 	       sizeof(struct hci_ev_mode_change)),
7573 	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7574 	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7575 	       sizeof(struct hci_ev_pin_code_req)),
7576 	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7577 	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7578 	       sizeof(struct hci_ev_link_key_req)),
7579 	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7580 	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7581 	       sizeof(struct hci_ev_link_key_notify)),
7582 	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7583 	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7584 	       sizeof(struct hci_ev_clock_offset)),
7585 	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7586 	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7587 	       sizeof(struct hci_ev_pkt_type_change)),
7588 	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7589 	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7590 	       sizeof(struct hci_ev_pscan_rep_mode)),
7591 	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7592 	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7593 		  hci_inquiry_result_with_rssi_evt,
7594 		  sizeof(struct hci_ev_inquiry_result_rssi),
7595 		  HCI_MAX_EVENT_SIZE),
7596 	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7597 	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7598 	       sizeof(struct hci_ev_remote_ext_features)),
7599 	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7600 	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7601 	       sizeof(struct hci_ev_sync_conn_complete)),
7602 	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7603 	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7604 		  hci_extended_inquiry_result_evt,
7605 		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7606 	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7607 	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7608 	       sizeof(struct hci_ev_key_refresh_complete)),
7609 	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7610 	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7611 	       sizeof(struct hci_ev_io_capa_request)),
7612 	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7613 	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7614 	       sizeof(struct hci_ev_io_capa_reply)),
7615 	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7616 	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7617 	       sizeof(struct hci_ev_user_confirm_req)),
7618 	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7619 	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7620 	       sizeof(struct hci_ev_user_passkey_req)),
7621 	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7622 	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7623 	       sizeof(struct hci_ev_remote_oob_data_request)),
7624 	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7625 	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7626 	       sizeof(struct hci_ev_simple_pair_complete)),
7627 	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7628 	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7629 	       sizeof(struct hci_ev_user_passkey_notify)),
7630 	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7631 	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7632 	       sizeof(struct hci_ev_keypress_notify)),
7633 	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7634 	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7635 	       sizeof(struct hci_ev_remote_host_features)),
7636 	/* [0x3e = HCI_EV_LE_META] */
7637 	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7638 		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7639 #if IS_ENABLED(CONFIG_BT_HS)
7640 	/* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7641 	HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7642 	       sizeof(struct hci_ev_phy_link_complete)),
7643 	/* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7644 	HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7645 	       sizeof(struct hci_ev_channel_selected)),
7646 	/* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7647 	HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7648 	       hci_disconn_loglink_complete_evt,
7649 	       sizeof(struct hci_ev_disconn_logical_link_complete)),
7650 	/* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7651 	HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7652 	       sizeof(struct hci_ev_logical_link_complete)),
7653 	/* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7654 	HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7655 	       hci_disconn_phylink_complete_evt,
7656 	       sizeof(struct hci_ev_disconn_phy_link_complete)),
7657 #endif
7658 	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7659 	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7660 	       sizeof(struct hci_ev_num_comp_blocks)),
7661 	/* [0xff = HCI_EV_VENDOR] */
7662 	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7663 };
7664 
7665 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7666 			   u16 *opcode, u8 *status,
7667 			   hci_req_complete_t *req_complete,
7668 			   hci_req_complete_skb_t *req_complete_skb)
7669 {
7670 	const struct hci_ev *ev = &hci_ev_table[event];
7671 	void *data;
7672 
7673 	if (!ev->func)
7674 		return;
7675 
7676 	if (skb->len < ev->min_len) {
7677 		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7678 			   event, skb->len, ev->min_len);
7679 		return;
7680 	}
7681 
7682 	/* Just warn if the length is over max_len size it still be
7683 	 * possible to partially parse the event so leave to callback to
7684 	 * decide if that is acceptable.
7685 	 */
7686 	if (skb->len > ev->max_len)
7687 		bt_dev_warn_ratelimited(hdev,
7688 					"unexpected event 0x%2.2x length: %u > %u",
7689 					event, skb->len, ev->max_len);
7690 
7691 	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7692 	if (!data)
7693 		return;
7694 
7695 	if (ev->req)
7696 		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7697 			     req_complete_skb);
7698 	else
7699 		ev->func(hdev, data, skb);
7700 }
7701 
7702 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7703 {
7704 	struct hci_event_hdr *hdr = (void *) skb->data;
7705 	hci_req_complete_t req_complete = NULL;
7706 	hci_req_complete_skb_t req_complete_skb = NULL;
7707 	struct sk_buff *orig_skb = NULL;
7708 	u8 status = 0, event, req_evt = 0;
7709 	u16 opcode = HCI_OP_NOP;
7710 
7711 	if (skb->len < sizeof(*hdr)) {
7712 		bt_dev_err(hdev, "Malformed HCI Event");
7713 		goto done;
7714 	}
7715 
7716 	kfree_skb(hdev->recv_event);
7717 	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7718 
7719 	event = hdr->evt;
7720 	if (!event) {
7721 		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7722 			    event);
7723 		goto done;
7724 	}
7725 
7726 	/* Only match event if command OGF is not for LE */
7727 	if (hdev->sent_cmd &&
7728 	    hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7729 	    hci_skb_event(hdev->sent_cmd) == event) {
7730 		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7731 				     status, &req_complete, &req_complete_skb);
7732 		req_evt = event;
7733 	}
7734 
7735 	/* If it looks like we might end up having to call
7736 	 * req_complete_skb, store a pristine copy of the skb since the
7737 	 * various handlers may modify the original one through
7738 	 * skb_pull() calls, etc.
7739 	 */
7740 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7741 	    event == HCI_EV_CMD_COMPLETE)
7742 		orig_skb = skb_clone(skb, GFP_KERNEL);
7743 
7744 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7745 
7746 	/* Store wake reason if we're suspended */
7747 	hci_store_wake_reason(hdev, event, skb);
7748 
7749 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7750 
7751 	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7752 		       &req_complete_skb);
7753 
7754 	if (req_complete) {
7755 		req_complete(hdev, status, opcode);
7756 	} else if (req_complete_skb) {
7757 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7758 			kfree_skb(orig_skb);
7759 			orig_skb = NULL;
7760 		}
7761 		req_complete_skb(hdev, status, opcode, orig_skb);
7762 	}
7763 
7764 done:
7765 	kfree_skb(orig_skb);
7766 	kfree_skb(skb);
7767 	hdev->stat.evt_rx++;
7768 }
7769