xref: /openbmc/linux/net/bluetooth/hci_event.c (revision c2a5a45c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 
43 /* Handle HCI Event packets */
44 
45 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
46 				  u8 *new_status)
47 {
48 	__u8 status = *((__u8 *) skb->data);
49 
50 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
51 
52 	/* It is possible that we receive Inquiry Complete event right
53 	 * before we receive Inquiry Cancel Command Complete event, in
54 	 * which case the latter event should have status of Command
55 	 * Disallowed (0x0c). This should not be treated as error, since
56 	 * we actually achieve what Inquiry Cancel wants to achieve,
57 	 * which is to end the last Inquiry session.
58 	 */
59 	if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
60 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
61 		status = 0x00;
62 	}
63 
64 	*new_status = status;
65 
66 	if (status)
67 		return;
68 
69 	clear_bit(HCI_INQUIRY, &hdev->flags);
70 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
71 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
72 
73 	hci_dev_lock(hdev);
74 	/* Set discovery state to stopped if we're not doing LE active
75 	 * scanning.
76 	 */
77 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
78 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
79 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
80 	hci_dev_unlock(hdev);
81 
82 	hci_conn_check_pending(hdev);
83 }
84 
85 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 	__u8 status = *((__u8 *) skb->data);
88 
89 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
90 
91 	if (status)
92 		return;
93 
94 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
95 }
96 
97 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98 {
99 	__u8 status = *((__u8 *) skb->data);
100 
101 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
102 
103 	if (status)
104 		return;
105 
106 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107 
108 	hci_conn_check_pending(hdev);
109 }
110 
111 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
112 					  struct sk_buff *skb)
113 {
114 	BT_DBG("%s", hdev->name);
115 }
116 
117 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118 {
119 	struct hci_rp_role_discovery *rp = (void *) skb->data;
120 	struct hci_conn *conn;
121 
122 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123 
124 	if (rp->status)
125 		return;
126 
127 	hci_dev_lock(hdev);
128 
129 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 	if (conn)
131 		conn->role = rp->role;
132 
133 	hci_dev_unlock(hdev);
134 }
135 
136 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 {
138 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
139 	struct hci_conn *conn;
140 
141 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142 
143 	if (rp->status)
144 		return;
145 
146 	hci_dev_lock(hdev);
147 
148 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 	if (conn)
150 		conn->link_policy = __le16_to_cpu(rp->policy);
151 
152 	hci_dev_unlock(hdev);
153 }
154 
155 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 {
157 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
158 	struct hci_conn *conn;
159 	void *sent;
160 
161 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 
163 	if (rp->status)
164 		return;
165 
166 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
167 	if (!sent)
168 		return;
169 
170 	hci_dev_lock(hdev);
171 
172 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 	if (conn)
174 		conn->link_policy = get_unaligned_le16(sent + 2);
175 
176 	hci_dev_unlock(hdev);
177 }
178 
179 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
180 					struct sk_buff *skb)
181 {
182 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183 
184 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
185 
186 	if (rp->status)
187 		return;
188 
189 	hdev->link_policy = __le16_to_cpu(rp->policy);
190 }
191 
192 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
193 					 struct sk_buff *skb)
194 {
195 	__u8 status = *((__u8 *) skb->data);
196 	void *sent;
197 
198 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
199 
200 	if (status)
201 		return;
202 
203 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
204 	if (!sent)
205 		return;
206 
207 	hdev->link_policy = get_unaligned_le16(sent);
208 }
209 
210 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211 {
212 	__u8 status = *((__u8 *) skb->data);
213 
214 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
215 
216 	clear_bit(HCI_RESET, &hdev->flags);
217 
218 	if (status)
219 		return;
220 
221 	/* Reset all non-persistent flags */
222 	hci_dev_clear_volatile_flags(hdev);
223 
224 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225 
226 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
227 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228 
229 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
230 	hdev->adv_data_len = 0;
231 
232 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
233 	hdev->scan_rsp_data_len = 0;
234 
235 	hdev->le_scan_type = LE_SCAN_PASSIVE;
236 
237 	hdev->ssp_debug_mode = 0;
238 
239 	hci_bdaddr_list_clear(&hdev->le_white_list);
240 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
241 }
242 
243 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 					struct sk_buff *skb)
245 {
246 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
247 	struct hci_cp_read_stored_link_key *sent;
248 
249 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250 
251 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
252 	if (!sent)
253 		return;
254 
255 	if (!rp->status && sent->read_all == 0x01) {
256 		hdev->stored_max_keys = rp->max_keys;
257 		hdev->stored_num_keys = rp->num_keys;
258 	}
259 }
260 
261 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 					  struct sk_buff *skb)
263 {
264 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265 
266 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
267 
268 	if (rp->status)
269 		return;
270 
271 	if (rp->num_keys <= hdev->stored_num_keys)
272 		hdev->stored_num_keys -= rp->num_keys;
273 	else
274 		hdev->stored_num_keys = 0;
275 }
276 
277 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 {
279 	__u8 status = *((__u8 *) skb->data);
280 	void *sent;
281 
282 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 
284 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
285 	if (!sent)
286 		return;
287 
288 	hci_dev_lock(hdev);
289 
290 	if (hci_dev_test_flag(hdev, HCI_MGMT))
291 		mgmt_set_local_name_complete(hdev, sent, status);
292 	else if (!status)
293 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294 
295 	hci_dev_unlock(hdev);
296 }
297 
298 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299 {
300 	struct hci_rp_read_local_name *rp = (void *) skb->data;
301 
302 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
303 
304 	if (rp->status)
305 		return;
306 
307 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
308 	    hci_dev_test_flag(hdev, HCI_CONFIG))
309 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310 }
311 
312 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313 {
314 	__u8 status = *((__u8 *) skb->data);
315 	void *sent;
316 
317 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
318 
319 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
320 	if (!sent)
321 		return;
322 
323 	hci_dev_lock(hdev);
324 
325 	if (!status) {
326 		__u8 param = *((__u8 *) sent);
327 
328 		if (param == AUTH_ENABLED)
329 			set_bit(HCI_AUTH, &hdev->flags);
330 		else
331 			clear_bit(HCI_AUTH, &hdev->flags);
332 	}
333 
334 	if (hci_dev_test_flag(hdev, HCI_MGMT))
335 		mgmt_auth_enable_complete(hdev, status);
336 
337 	hci_dev_unlock(hdev);
338 }
339 
340 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341 {
342 	__u8 status = *((__u8 *) skb->data);
343 	__u8 param;
344 	void *sent;
345 
346 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
347 
348 	if (status)
349 		return;
350 
351 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
352 	if (!sent)
353 		return;
354 
355 	param = *((__u8 *) sent);
356 
357 	if (param)
358 		set_bit(HCI_ENCRYPT, &hdev->flags);
359 	else
360 		clear_bit(HCI_ENCRYPT, &hdev->flags);
361 }
362 
363 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364 {
365 	__u8 status = *((__u8 *) skb->data);
366 	__u8 param;
367 	void *sent;
368 
369 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 
371 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
372 	if (!sent)
373 		return;
374 
375 	param = *((__u8 *) sent);
376 
377 	hci_dev_lock(hdev);
378 
379 	if (status) {
380 		hdev->discov_timeout = 0;
381 		goto done;
382 	}
383 
384 	if (param & SCAN_INQUIRY)
385 		set_bit(HCI_ISCAN, &hdev->flags);
386 	else
387 		clear_bit(HCI_ISCAN, &hdev->flags);
388 
389 	if (param & SCAN_PAGE)
390 		set_bit(HCI_PSCAN, &hdev->flags);
391 	else
392 		clear_bit(HCI_PSCAN, &hdev->flags);
393 
394 done:
395 	hci_dev_unlock(hdev);
396 }
397 
398 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 {
400 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
401 
402 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
403 
404 	if (rp->status)
405 		return;
406 
407 	memcpy(hdev->dev_class, rp->dev_class, 3);
408 
409 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
410 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
411 }
412 
413 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 	__u8 status = *((__u8 *) skb->data);
416 	void *sent;
417 
418 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
419 
420 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
421 	if (!sent)
422 		return;
423 
424 	hci_dev_lock(hdev);
425 
426 	if (status == 0)
427 		memcpy(hdev->dev_class, sent, 3);
428 
429 	if (hci_dev_test_flag(hdev, HCI_MGMT))
430 		mgmt_set_class_of_dev_complete(hdev, sent, status);
431 
432 	hci_dev_unlock(hdev);
433 }
434 
435 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
436 {
437 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
438 	__u16 setting;
439 
440 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
441 
442 	if (rp->status)
443 		return;
444 
445 	setting = __le16_to_cpu(rp->voice_setting);
446 
447 	if (hdev->voice_setting == setting)
448 		return;
449 
450 	hdev->voice_setting = setting;
451 
452 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
453 
454 	if (hdev->notify)
455 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
456 }
457 
458 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
459 				       struct sk_buff *skb)
460 {
461 	__u8 status = *((__u8 *) skb->data);
462 	__u16 setting;
463 	void *sent;
464 
465 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
466 
467 	if (status)
468 		return;
469 
470 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
471 	if (!sent)
472 		return;
473 
474 	setting = get_unaligned_le16(sent);
475 
476 	if (hdev->voice_setting == setting)
477 		return;
478 
479 	hdev->voice_setting = setting;
480 
481 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482 
483 	if (hdev->notify)
484 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485 }
486 
487 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
488 					  struct sk_buff *skb)
489 {
490 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
491 
492 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
493 
494 	if (rp->status)
495 		return;
496 
497 	hdev->num_iac = rp->num_iac;
498 
499 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
500 }
501 
502 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
503 {
504 	__u8 status = *((__u8 *) skb->data);
505 	struct hci_cp_write_ssp_mode *sent;
506 
507 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
508 
509 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
510 	if (!sent)
511 		return;
512 
513 	hci_dev_lock(hdev);
514 
515 	if (!status) {
516 		if (sent->mode)
517 			hdev->features[1][0] |= LMP_HOST_SSP;
518 		else
519 			hdev->features[1][0] &= ~LMP_HOST_SSP;
520 	}
521 
522 	if (hci_dev_test_flag(hdev, HCI_MGMT))
523 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
524 	else if (!status) {
525 		if (sent->mode)
526 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
527 		else
528 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
529 	}
530 
531 	hci_dev_unlock(hdev);
532 }
533 
534 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
535 {
536 	u8 status = *((u8 *) skb->data);
537 	struct hci_cp_write_sc_support *sent;
538 
539 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
540 
541 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
542 	if (!sent)
543 		return;
544 
545 	hci_dev_lock(hdev);
546 
547 	if (!status) {
548 		if (sent->support)
549 			hdev->features[1][0] |= LMP_HOST_SC;
550 		else
551 			hdev->features[1][0] &= ~LMP_HOST_SC;
552 	}
553 
554 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
555 		if (sent->support)
556 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
557 		else
558 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
559 	}
560 
561 	hci_dev_unlock(hdev);
562 }
563 
564 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
565 {
566 	struct hci_rp_read_local_version *rp = (void *) skb->data;
567 
568 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
569 
570 	if (rp->status)
571 		return;
572 
573 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
574 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
575 		hdev->hci_ver = rp->hci_ver;
576 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
577 		hdev->lmp_ver = rp->lmp_ver;
578 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
579 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
580 	}
581 }
582 
583 static void hci_cc_read_local_commands(struct hci_dev *hdev,
584 				       struct sk_buff *skb)
585 {
586 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
587 
588 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589 
590 	if (rp->status)
591 		return;
592 
593 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
594 	    hci_dev_test_flag(hdev, HCI_CONFIG))
595 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596 }
597 
598 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
599 					     struct sk_buff *skb)
600 {
601 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
602 	struct hci_conn *conn;
603 
604 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
605 
606 	if (rp->status)
607 		return;
608 
609 	hci_dev_lock(hdev);
610 
611 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
612 	if (conn)
613 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
614 
615 	hci_dev_unlock(hdev);
616 }
617 
618 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
619 					      struct sk_buff *skb)
620 {
621 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
622 	struct hci_conn *conn;
623 	void *sent;
624 
625 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
626 
627 	if (rp->status)
628 		return;
629 
630 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
631 	if (!sent)
632 		return;
633 
634 	hci_dev_lock(hdev);
635 
636 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
637 	if (conn)
638 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
639 
640 	hci_dev_unlock(hdev);
641 }
642 
643 static void hci_cc_read_local_features(struct hci_dev *hdev,
644 				       struct sk_buff *skb)
645 {
646 	struct hci_rp_read_local_features *rp = (void *) skb->data;
647 
648 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649 
650 	if (rp->status)
651 		return;
652 
653 	memcpy(hdev->features, rp->features, 8);
654 
655 	/* Adjust default settings according to features
656 	 * supported by device. */
657 
658 	if (hdev->features[0][0] & LMP_3SLOT)
659 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
660 
661 	if (hdev->features[0][0] & LMP_5SLOT)
662 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
663 
664 	if (hdev->features[0][1] & LMP_HV2) {
665 		hdev->pkt_type  |= (HCI_HV2);
666 		hdev->esco_type |= (ESCO_HV2);
667 	}
668 
669 	if (hdev->features[0][1] & LMP_HV3) {
670 		hdev->pkt_type  |= (HCI_HV3);
671 		hdev->esco_type |= (ESCO_HV3);
672 	}
673 
674 	if (lmp_esco_capable(hdev))
675 		hdev->esco_type |= (ESCO_EV3);
676 
677 	if (hdev->features[0][4] & LMP_EV4)
678 		hdev->esco_type |= (ESCO_EV4);
679 
680 	if (hdev->features[0][4] & LMP_EV5)
681 		hdev->esco_type |= (ESCO_EV5);
682 
683 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
684 		hdev->esco_type |= (ESCO_2EV3);
685 
686 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
687 		hdev->esco_type |= (ESCO_3EV3);
688 
689 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
690 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
691 }
692 
693 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
694 					   struct sk_buff *skb)
695 {
696 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
697 
698 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
699 
700 	if (rp->status)
701 		return;
702 
703 	if (hdev->max_page < rp->max_page)
704 		hdev->max_page = rp->max_page;
705 
706 	if (rp->page < HCI_MAX_PAGES)
707 		memcpy(hdev->features[rp->page], rp->features, 8);
708 }
709 
710 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
711 					  struct sk_buff *skb)
712 {
713 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
714 
715 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716 
717 	if (rp->status)
718 		return;
719 
720 	hdev->flow_ctl_mode = rp->mode;
721 }
722 
723 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
724 {
725 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
726 
727 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 
729 	if (rp->status)
730 		return;
731 
732 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
733 	hdev->sco_mtu  = rp->sco_mtu;
734 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
735 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
736 
737 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
738 		hdev->sco_mtu  = 64;
739 		hdev->sco_pkts = 8;
740 	}
741 
742 	hdev->acl_cnt = hdev->acl_pkts;
743 	hdev->sco_cnt = hdev->sco_pkts;
744 
745 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
746 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
747 }
748 
749 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
750 {
751 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
752 
753 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
754 
755 	if (rp->status)
756 		return;
757 
758 	if (test_bit(HCI_INIT, &hdev->flags))
759 		bacpy(&hdev->bdaddr, &rp->bdaddr);
760 
761 	if (hci_dev_test_flag(hdev, HCI_SETUP))
762 		bacpy(&hdev->setup_addr, &rp->bdaddr);
763 }
764 
765 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
766 					   struct sk_buff *skb)
767 {
768 	struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
769 
770 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771 
772 	if (rp->status)
773 		return;
774 
775 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
776 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
777 		hdev->pairing_opts = rp->pairing_opts;
778 		hdev->max_enc_key_size = rp->max_key_size;
779 	}
780 }
781 
782 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
783 					   struct sk_buff *skb)
784 {
785 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
786 
787 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
788 
789 	if (rp->status)
790 		return;
791 
792 	if (test_bit(HCI_INIT, &hdev->flags)) {
793 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
794 		hdev->page_scan_window = __le16_to_cpu(rp->window);
795 	}
796 }
797 
798 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
799 					    struct sk_buff *skb)
800 {
801 	u8 status = *((u8 *) skb->data);
802 	struct hci_cp_write_page_scan_activity *sent;
803 
804 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
805 
806 	if (status)
807 		return;
808 
809 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
810 	if (!sent)
811 		return;
812 
813 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
814 	hdev->page_scan_window = __le16_to_cpu(sent->window);
815 }
816 
817 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
818 					   struct sk_buff *skb)
819 {
820 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
821 
822 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823 
824 	if (rp->status)
825 		return;
826 
827 	if (test_bit(HCI_INIT, &hdev->flags))
828 		hdev->page_scan_type = rp->type;
829 }
830 
831 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
832 					struct sk_buff *skb)
833 {
834 	u8 status = *((u8 *) skb->data);
835 	u8 *type;
836 
837 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
838 
839 	if (status)
840 		return;
841 
842 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
843 	if (type)
844 		hdev->page_scan_type = *type;
845 }
846 
847 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
848 					struct sk_buff *skb)
849 {
850 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
851 
852 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853 
854 	if (rp->status)
855 		return;
856 
857 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
858 	hdev->block_len = __le16_to_cpu(rp->block_len);
859 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
860 
861 	hdev->block_cnt = hdev->num_blocks;
862 
863 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
864 	       hdev->block_cnt, hdev->block_len);
865 }
866 
867 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
868 {
869 	struct hci_rp_read_clock *rp = (void *) skb->data;
870 	struct hci_cp_read_clock *cp;
871 	struct hci_conn *conn;
872 
873 	BT_DBG("%s", hdev->name);
874 
875 	if (skb->len < sizeof(*rp))
876 		return;
877 
878 	if (rp->status)
879 		return;
880 
881 	hci_dev_lock(hdev);
882 
883 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
884 	if (!cp)
885 		goto unlock;
886 
887 	if (cp->which == 0x00) {
888 		hdev->clock = le32_to_cpu(rp->clock);
889 		goto unlock;
890 	}
891 
892 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
893 	if (conn) {
894 		conn->clock = le32_to_cpu(rp->clock);
895 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
896 	}
897 
898 unlock:
899 	hci_dev_unlock(hdev);
900 }
901 
902 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
903 				       struct sk_buff *skb)
904 {
905 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
906 
907 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908 
909 	if (rp->status)
910 		return;
911 
912 	hdev->amp_status = rp->amp_status;
913 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
914 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
915 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
916 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
917 	hdev->amp_type = rp->amp_type;
918 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
919 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
920 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
921 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
922 }
923 
924 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
925 					 struct sk_buff *skb)
926 {
927 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
928 
929 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 
931 	if (rp->status)
932 		return;
933 
934 	hdev->inq_tx_power = rp->tx_power;
935 }
936 
937 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
938 					       struct sk_buff *skb)
939 {
940 	struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
941 
942 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 
944 	if (rp->status)
945 		return;
946 
947 	hdev->err_data_reporting = rp->err_data_reporting;
948 }
949 
950 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
951 						struct sk_buff *skb)
952 {
953 	__u8 status = *((__u8 *)skb->data);
954 	struct hci_cp_write_def_err_data_reporting *cp;
955 
956 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
957 
958 	if (status)
959 		return;
960 
961 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
962 	if (!cp)
963 		return;
964 
965 	hdev->err_data_reporting = cp->err_data_reporting;
966 }
967 
968 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
969 {
970 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
971 	struct hci_cp_pin_code_reply *cp;
972 	struct hci_conn *conn;
973 
974 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
975 
976 	hci_dev_lock(hdev);
977 
978 	if (hci_dev_test_flag(hdev, HCI_MGMT))
979 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
980 
981 	if (rp->status)
982 		goto unlock;
983 
984 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
985 	if (!cp)
986 		goto unlock;
987 
988 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
989 	if (conn)
990 		conn->pin_length = cp->pin_len;
991 
992 unlock:
993 	hci_dev_unlock(hdev);
994 }
995 
996 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
997 {
998 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
999 
1000 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 
1002 	hci_dev_lock(hdev);
1003 
1004 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1006 						 rp->status);
1007 
1008 	hci_dev_unlock(hdev);
1009 }
1010 
1011 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1012 				       struct sk_buff *skb)
1013 {
1014 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1015 
1016 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1017 
1018 	if (rp->status)
1019 		return;
1020 
1021 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1022 	hdev->le_pkts = rp->le_max_pkt;
1023 
1024 	hdev->le_cnt = hdev->le_pkts;
1025 
1026 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1027 }
1028 
1029 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1030 					  struct sk_buff *skb)
1031 {
1032 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1033 
1034 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1035 
1036 	if (rp->status)
1037 		return;
1038 
1039 	memcpy(hdev->le_features, rp->features, 8);
1040 }
1041 
1042 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1043 					struct sk_buff *skb)
1044 {
1045 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1046 
1047 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048 
1049 	if (rp->status)
1050 		return;
1051 
1052 	hdev->adv_tx_power = rp->tx_power;
1053 }
1054 
1055 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1056 {
1057 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1058 
1059 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1060 
1061 	hci_dev_lock(hdev);
1062 
1063 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1064 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1065 						 rp->status);
1066 
1067 	hci_dev_unlock(hdev);
1068 }
1069 
1070 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1071 					  struct sk_buff *skb)
1072 {
1073 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1074 
1075 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1076 
1077 	hci_dev_lock(hdev);
1078 
1079 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1080 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1081 						     ACL_LINK, 0, rp->status);
1082 
1083 	hci_dev_unlock(hdev);
1084 }
1085 
1086 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1087 {
1088 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1089 
1090 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1091 
1092 	hci_dev_lock(hdev);
1093 
1094 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1095 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1096 						 0, rp->status);
1097 
1098 	hci_dev_unlock(hdev);
1099 }
1100 
1101 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1102 					  struct sk_buff *skb)
1103 {
1104 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1105 
1106 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1107 
1108 	hci_dev_lock(hdev);
1109 
1110 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1111 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1112 						     ACL_LINK, 0, rp->status);
1113 
1114 	hci_dev_unlock(hdev);
1115 }
1116 
1117 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1118 				       struct sk_buff *skb)
1119 {
1120 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1121 
1122 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1123 }
1124 
1125 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1126 					   struct sk_buff *skb)
1127 {
1128 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1129 
1130 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131 }
1132 
1133 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1134 {
1135 	__u8 status = *((__u8 *) skb->data);
1136 	bdaddr_t *sent;
1137 
1138 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139 
1140 	if (status)
1141 		return;
1142 
1143 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1144 	if (!sent)
1145 		return;
1146 
1147 	hci_dev_lock(hdev);
1148 
1149 	bacpy(&hdev->random_addr, sent);
1150 
1151 	hci_dev_unlock(hdev);
1152 }
1153 
1154 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1155 {
1156 	__u8 status = *((__u8 *) skb->data);
1157 	struct hci_cp_le_set_default_phy *cp;
1158 
1159 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1160 
1161 	if (status)
1162 		return;
1163 
1164 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1165 	if (!cp)
1166 		return;
1167 
1168 	hci_dev_lock(hdev);
1169 
1170 	hdev->le_tx_def_phys = cp->tx_phys;
1171 	hdev->le_rx_def_phys = cp->rx_phys;
1172 
1173 	hci_dev_unlock(hdev);
1174 }
1175 
1176 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1177                                               struct sk_buff *skb)
1178 {
1179 	__u8 status = *((__u8 *) skb->data);
1180 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1181 	struct adv_info *adv_instance;
1182 
1183 	if (status)
1184 		return;
1185 
1186 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1187 	if (!cp)
1188 		return;
1189 
1190 	hci_dev_lock(hdev);
1191 
1192 	if (!hdev->cur_adv_instance) {
1193 		/* Store in hdev for instance 0 (Set adv and Directed advs) */
1194 		bacpy(&hdev->random_addr, &cp->bdaddr);
1195 	} else {
1196 		adv_instance = hci_find_adv_instance(hdev,
1197 						     hdev->cur_adv_instance);
1198 		if (adv_instance)
1199 			bacpy(&adv_instance->random_addr, &cp->bdaddr);
1200 	}
1201 
1202 	hci_dev_unlock(hdev);
1203 }
1204 
1205 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1206 					  struct sk_buff *skb)
1207 {
1208 	struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1209 
1210 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1211 
1212 	if (rp->status)
1213 		return;
1214 
1215 	hdev->min_le_tx_power = rp->min_le_tx_power;
1216 	hdev->max_le_tx_power = rp->max_le_tx_power;
1217 }
1218 
1219 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1220 {
1221 	__u8 *sent, status = *((__u8 *) skb->data);
1222 
1223 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1224 
1225 	if (status)
1226 		return;
1227 
1228 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1229 	if (!sent)
1230 		return;
1231 
1232 	hci_dev_lock(hdev);
1233 
1234 	/* If we're doing connection initiation as peripheral. Set a
1235 	 * timeout in case something goes wrong.
1236 	 */
1237 	if (*sent) {
1238 		struct hci_conn *conn;
1239 
1240 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1241 
1242 		conn = hci_lookup_le_connect(hdev);
1243 		if (conn)
1244 			queue_delayed_work(hdev->workqueue,
1245 					   &conn->le_conn_timeout,
1246 					   conn->conn_timeout);
1247 	} else {
1248 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1249 	}
1250 
1251 	hci_dev_unlock(hdev);
1252 }
1253 
1254 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1255 					 struct sk_buff *skb)
1256 {
1257 	struct hci_cp_le_set_ext_adv_enable *cp;
1258 	__u8 status = *((__u8 *) skb->data);
1259 
1260 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1261 
1262 	if (status)
1263 		return;
1264 
1265 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1266 	if (!cp)
1267 		return;
1268 
1269 	hci_dev_lock(hdev);
1270 
1271 	if (cp->enable) {
1272 		struct hci_conn *conn;
1273 
1274 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1275 
1276 		conn = hci_lookup_le_connect(hdev);
1277 		if (conn)
1278 			queue_delayed_work(hdev->workqueue,
1279 					   &conn->le_conn_timeout,
1280 					   conn->conn_timeout);
1281 	} else {
1282 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1283 	}
1284 
1285 	hci_dev_unlock(hdev);
1286 }
1287 
1288 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1289 {
1290 	struct hci_cp_le_set_scan_param *cp;
1291 	__u8 status = *((__u8 *) skb->data);
1292 
1293 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1294 
1295 	if (status)
1296 		return;
1297 
1298 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1299 	if (!cp)
1300 		return;
1301 
1302 	hci_dev_lock(hdev);
1303 
1304 	hdev->le_scan_type = cp->type;
1305 
1306 	hci_dev_unlock(hdev);
1307 }
1308 
1309 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1310 					 struct sk_buff *skb)
1311 {
1312 	struct hci_cp_le_set_ext_scan_params *cp;
1313 	__u8 status = *((__u8 *) skb->data);
1314 	struct hci_cp_le_scan_phy_params *phy_param;
1315 
1316 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1317 
1318 	if (status)
1319 		return;
1320 
1321 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1322 	if (!cp)
1323 		return;
1324 
1325 	phy_param = (void *)cp->data;
1326 
1327 	hci_dev_lock(hdev);
1328 
1329 	hdev->le_scan_type = phy_param->type;
1330 
1331 	hci_dev_unlock(hdev);
1332 }
1333 
1334 static bool has_pending_adv_report(struct hci_dev *hdev)
1335 {
1336 	struct discovery_state *d = &hdev->discovery;
1337 
1338 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1339 }
1340 
1341 static void clear_pending_adv_report(struct hci_dev *hdev)
1342 {
1343 	struct discovery_state *d = &hdev->discovery;
1344 
1345 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1346 	d->last_adv_data_len = 0;
1347 }
1348 
1349 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 				     u8 bdaddr_type, s8 rssi, u32 flags,
1351 				     u8 *data, u8 len)
1352 {
1353 	struct discovery_state *d = &hdev->discovery;
1354 
1355 	if (len > HCI_MAX_AD_LENGTH)
1356 		return;
1357 
1358 	bacpy(&d->last_adv_addr, bdaddr);
1359 	d->last_adv_addr_type = bdaddr_type;
1360 	d->last_adv_rssi = rssi;
1361 	d->last_adv_flags = flags;
1362 	memcpy(d->last_adv_data, data, len);
1363 	d->last_adv_data_len = len;
1364 }
1365 
1366 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1367 {
1368 	hci_dev_lock(hdev);
1369 
1370 	switch (enable) {
1371 	case LE_SCAN_ENABLE:
1372 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1373 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1374 			clear_pending_adv_report(hdev);
1375 		break;
1376 
1377 	case LE_SCAN_DISABLE:
1378 		/* We do this here instead of when setting DISCOVERY_STOPPED
1379 		 * since the latter would potentially require waiting for
1380 		 * inquiry to stop too.
1381 		 */
1382 		if (has_pending_adv_report(hdev)) {
1383 			struct discovery_state *d = &hdev->discovery;
1384 
1385 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1386 					  d->last_adv_addr_type, NULL,
1387 					  d->last_adv_rssi, d->last_adv_flags,
1388 					  d->last_adv_data,
1389 					  d->last_adv_data_len, NULL, 0);
1390 		}
1391 
1392 		/* Cancel this timer so that we don't try to disable scanning
1393 		 * when it's already disabled.
1394 		 */
1395 		cancel_delayed_work(&hdev->le_scan_disable);
1396 
1397 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1398 
1399 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1400 		 * interrupted scanning due to a connect request. Mark
1401 		 * therefore discovery as stopped. If this was not
1402 		 * because of a connect request advertising might have
1403 		 * been disabled because of active scanning, so
1404 		 * re-enable it again if necessary.
1405 		 */
1406 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1407 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1408 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1409 			 hdev->discovery.state == DISCOVERY_FINDING)
1410 			hci_req_reenable_advertising(hdev);
1411 
1412 		break;
1413 
1414 	default:
1415 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1416 			   enable);
1417 		break;
1418 	}
1419 
1420 	hci_dev_unlock(hdev);
1421 }
1422 
1423 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1424 				      struct sk_buff *skb)
1425 {
1426 	struct hci_cp_le_set_scan_enable *cp;
1427 	__u8 status = *((__u8 *) skb->data);
1428 
1429 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1430 
1431 	if (status)
1432 		return;
1433 
1434 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1435 	if (!cp)
1436 		return;
1437 
1438 	le_set_scan_enable_complete(hdev, cp->enable);
1439 }
1440 
1441 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1442 				      struct sk_buff *skb)
1443 {
1444 	struct hci_cp_le_set_ext_scan_enable *cp;
1445 	__u8 status = *((__u8 *) skb->data);
1446 
1447 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1448 
1449 	if (status)
1450 		return;
1451 
1452 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1453 	if (!cp)
1454 		return;
1455 
1456 	le_set_scan_enable_complete(hdev, cp->enable);
1457 }
1458 
1459 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1460 				      struct sk_buff *skb)
1461 {
1462 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1463 
1464 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1465 	       rp->num_of_sets);
1466 
1467 	if (rp->status)
1468 		return;
1469 
1470 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1471 }
1472 
1473 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1474 					   struct sk_buff *skb)
1475 {
1476 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1477 
1478 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1479 
1480 	if (rp->status)
1481 		return;
1482 
1483 	hdev->le_white_list_size = rp->size;
1484 }
1485 
1486 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1487 				       struct sk_buff *skb)
1488 {
1489 	__u8 status = *((__u8 *) skb->data);
1490 
1491 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1492 
1493 	if (status)
1494 		return;
1495 
1496 	hci_bdaddr_list_clear(&hdev->le_white_list);
1497 }
1498 
1499 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1500 					struct sk_buff *skb)
1501 {
1502 	struct hci_cp_le_add_to_white_list *sent;
1503 	__u8 status = *((__u8 *) skb->data);
1504 
1505 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1506 
1507 	if (status)
1508 		return;
1509 
1510 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1511 	if (!sent)
1512 		return;
1513 
1514 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1515 			   sent->bdaddr_type);
1516 }
1517 
1518 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1519 					  struct sk_buff *skb)
1520 {
1521 	struct hci_cp_le_del_from_white_list *sent;
1522 	__u8 status = *((__u8 *) skb->data);
1523 
1524 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1525 
1526 	if (status)
1527 		return;
1528 
1529 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1530 	if (!sent)
1531 		return;
1532 
1533 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1534 			    sent->bdaddr_type);
1535 }
1536 
1537 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1538 					    struct sk_buff *skb)
1539 {
1540 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1541 
1542 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1543 
1544 	if (rp->status)
1545 		return;
1546 
1547 	memcpy(hdev->le_states, rp->le_states, 8);
1548 }
1549 
1550 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1551 					struct sk_buff *skb)
1552 {
1553 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1554 
1555 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1556 
1557 	if (rp->status)
1558 		return;
1559 
1560 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1561 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1562 }
1563 
1564 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1565 					 struct sk_buff *skb)
1566 {
1567 	struct hci_cp_le_write_def_data_len *sent;
1568 	__u8 status = *((__u8 *) skb->data);
1569 
1570 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1571 
1572 	if (status)
1573 		return;
1574 
1575 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1576 	if (!sent)
1577 		return;
1578 
1579 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1580 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1581 }
1582 
1583 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1584 					 struct sk_buff *skb)
1585 {
1586 	struct hci_cp_le_add_to_resolv_list *sent;
1587 	__u8 status = *((__u8 *) skb->data);
1588 
1589 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1590 
1591 	if (status)
1592 		return;
1593 
1594 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1595 	if (!sent)
1596 		return;
1597 
1598 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1599 				sent->bdaddr_type, sent->peer_irk,
1600 				sent->local_irk);
1601 }
1602 
1603 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1604 					  struct sk_buff *skb)
1605 {
1606 	struct hci_cp_le_del_from_resolv_list *sent;
1607 	__u8 status = *((__u8 *) skb->data);
1608 
1609 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1610 
1611 	if (status)
1612 		return;
1613 
1614 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1615 	if (!sent)
1616 		return;
1617 
1618 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1619 			    sent->bdaddr_type);
1620 }
1621 
1622 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1623 				       struct sk_buff *skb)
1624 {
1625 	__u8 status = *((__u8 *) skb->data);
1626 
1627 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1628 
1629 	if (status)
1630 		return;
1631 
1632 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
1633 }
1634 
1635 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1636 					   struct sk_buff *skb)
1637 {
1638 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1639 
1640 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1641 
1642 	if (rp->status)
1643 		return;
1644 
1645 	hdev->le_resolv_list_size = rp->size;
1646 }
1647 
1648 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1649 						struct sk_buff *skb)
1650 {
1651 	__u8 *sent, status = *((__u8 *) skb->data);
1652 
1653 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1654 
1655 	if (status)
1656 		return;
1657 
1658 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1659 	if (!sent)
1660 		return;
1661 
1662 	hci_dev_lock(hdev);
1663 
1664 	if (*sent)
1665 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1666 	else
1667 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1668 
1669 	hci_dev_unlock(hdev);
1670 }
1671 
1672 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1673 					struct sk_buff *skb)
1674 {
1675 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1676 
1677 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1678 
1679 	if (rp->status)
1680 		return;
1681 
1682 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1683 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1684 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1685 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1686 }
1687 
1688 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1689 					   struct sk_buff *skb)
1690 {
1691 	struct hci_cp_write_le_host_supported *sent;
1692 	__u8 status = *((__u8 *) skb->data);
1693 
1694 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1695 
1696 	if (status)
1697 		return;
1698 
1699 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1700 	if (!sent)
1701 		return;
1702 
1703 	hci_dev_lock(hdev);
1704 
1705 	if (sent->le) {
1706 		hdev->features[1][0] |= LMP_HOST_LE;
1707 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1708 	} else {
1709 		hdev->features[1][0] &= ~LMP_HOST_LE;
1710 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1711 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1712 	}
1713 
1714 	if (sent->simul)
1715 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1716 	else
1717 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1718 
1719 	hci_dev_unlock(hdev);
1720 }
1721 
1722 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1723 {
1724 	struct hci_cp_le_set_adv_param *cp;
1725 	u8 status = *((u8 *) skb->data);
1726 
1727 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1728 
1729 	if (status)
1730 		return;
1731 
1732 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1733 	if (!cp)
1734 		return;
1735 
1736 	hci_dev_lock(hdev);
1737 	hdev->adv_addr_type = cp->own_address_type;
1738 	hci_dev_unlock(hdev);
1739 }
1740 
1741 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1742 {
1743 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1744 	struct hci_cp_le_set_ext_adv_params *cp;
1745 	struct adv_info *adv_instance;
1746 
1747 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1748 
1749 	if (rp->status)
1750 		return;
1751 
1752 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1753 	if (!cp)
1754 		return;
1755 
1756 	hci_dev_lock(hdev);
1757 	hdev->adv_addr_type = cp->own_addr_type;
1758 	if (!hdev->cur_adv_instance) {
1759 		/* Store in hdev for instance 0 */
1760 		hdev->adv_tx_power = rp->tx_power;
1761 	} else {
1762 		adv_instance = hci_find_adv_instance(hdev,
1763 						     hdev->cur_adv_instance);
1764 		if (adv_instance)
1765 			adv_instance->tx_power = rp->tx_power;
1766 	}
1767 	/* Update adv data as tx power is known now */
1768 	hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1769 
1770 	hci_dev_unlock(hdev);
1771 }
1772 
1773 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1774 {
1775 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1776 	struct hci_conn *conn;
1777 
1778 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1779 
1780 	if (rp->status)
1781 		return;
1782 
1783 	hci_dev_lock(hdev);
1784 
1785 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1786 	if (conn)
1787 		conn->rssi = rp->rssi;
1788 
1789 	hci_dev_unlock(hdev);
1790 }
1791 
1792 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1793 {
1794 	struct hci_cp_read_tx_power *sent;
1795 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1796 	struct hci_conn *conn;
1797 
1798 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1799 
1800 	if (rp->status)
1801 		return;
1802 
1803 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1804 	if (!sent)
1805 		return;
1806 
1807 	hci_dev_lock(hdev);
1808 
1809 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1810 	if (!conn)
1811 		goto unlock;
1812 
1813 	switch (sent->type) {
1814 	case 0x00:
1815 		conn->tx_power = rp->tx_power;
1816 		break;
1817 	case 0x01:
1818 		conn->max_tx_power = rp->tx_power;
1819 		break;
1820 	}
1821 
1822 unlock:
1823 	hci_dev_unlock(hdev);
1824 }
1825 
1826 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1827 {
1828 	u8 status = *((u8 *) skb->data);
1829 	u8 *mode;
1830 
1831 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1832 
1833 	if (status)
1834 		return;
1835 
1836 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1837 	if (mode)
1838 		hdev->ssp_debug_mode = *mode;
1839 }
1840 
1841 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1842 {
1843 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1844 
1845 	if (status) {
1846 		hci_conn_check_pending(hdev);
1847 		return;
1848 	}
1849 
1850 	set_bit(HCI_INQUIRY, &hdev->flags);
1851 }
1852 
1853 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1854 {
1855 	struct hci_cp_create_conn *cp;
1856 	struct hci_conn *conn;
1857 
1858 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1859 
1860 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1861 	if (!cp)
1862 		return;
1863 
1864 	hci_dev_lock(hdev);
1865 
1866 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1867 
1868 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1869 
1870 	if (status) {
1871 		if (conn && conn->state == BT_CONNECT) {
1872 			if (status != 0x0c || conn->attempt > 2) {
1873 				conn->state = BT_CLOSED;
1874 				hci_connect_cfm(conn, status);
1875 				hci_conn_del(conn);
1876 			} else
1877 				conn->state = BT_CONNECT2;
1878 		}
1879 	} else {
1880 		if (!conn) {
1881 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1882 					    HCI_ROLE_MASTER);
1883 			if (!conn)
1884 				bt_dev_err(hdev, "no memory for new connection");
1885 		}
1886 	}
1887 
1888 	hci_dev_unlock(hdev);
1889 }
1890 
1891 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1892 {
1893 	struct hci_cp_add_sco *cp;
1894 	struct hci_conn *acl, *sco;
1895 	__u16 handle;
1896 
1897 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1898 
1899 	if (!status)
1900 		return;
1901 
1902 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1903 	if (!cp)
1904 		return;
1905 
1906 	handle = __le16_to_cpu(cp->handle);
1907 
1908 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1909 
1910 	hci_dev_lock(hdev);
1911 
1912 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1913 	if (acl) {
1914 		sco = acl->link;
1915 		if (sco) {
1916 			sco->state = BT_CLOSED;
1917 
1918 			hci_connect_cfm(sco, status);
1919 			hci_conn_del(sco);
1920 		}
1921 	}
1922 
1923 	hci_dev_unlock(hdev);
1924 }
1925 
1926 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1927 {
1928 	struct hci_cp_auth_requested *cp;
1929 	struct hci_conn *conn;
1930 
1931 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1932 
1933 	if (!status)
1934 		return;
1935 
1936 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1937 	if (!cp)
1938 		return;
1939 
1940 	hci_dev_lock(hdev);
1941 
1942 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1943 	if (conn) {
1944 		if (conn->state == BT_CONFIG) {
1945 			hci_connect_cfm(conn, status);
1946 			hci_conn_drop(conn);
1947 		}
1948 	}
1949 
1950 	hci_dev_unlock(hdev);
1951 }
1952 
1953 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1954 {
1955 	struct hci_cp_set_conn_encrypt *cp;
1956 	struct hci_conn *conn;
1957 
1958 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1959 
1960 	if (!status)
1961 		return;
1962 
1963 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1964 	if (!cp)
1965 		return;
1966 
1967 	hci_dev_lock(hdev);
1968 
1969 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1970 	if (conn) {
1971 		if (conn->state == BT_CONFIG) {
1972 			hci_connect_cfm(conn, status);
1973 			hci_conn_drop(conn);
1974 		}
1975 	}
1976 
1977 	hci_dev_unlock(hdev);
1978 }
1979 
1980 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1981 				    struct hci_conn *conn)
1982 {
1983 	if (conn->state != BT_CONFIG || !conn->out)
1984 		return 0;
1985 
1986 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1987 		return 0;
1988 
1989 	/* Only request authentication for SSP connections or non-SSP
1990 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1991 	 * is requested.
1992 	 */
1993 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1994 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1995 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1996 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1997 		return 0;
1998 
1999 	return 1;
2000 }
2001 
2002 static int hci_resolve_name(struct hci_dev *hdev,
2003 				   struct inquiry_entry *e)
2004 {
2005 	struct hci_cp_remote_name_req cp;
2006 
2007 	memset(&cp, 0, sizeof(cp));
2008 
2009 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2010 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2011 	cp.pscan_mode = e->data.pscan_mode;
2012 	cp.clock_offset = e->data.clock_offset;
2013 
2014 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2015 }
2016 
2017 static bool hci_resolve_next_name(struct hci_dev *hdev)
2018 {
2019 	struct discovery_state *discov = &hdev->discovery;
2020 	struct inquiry_entry *e;
2021 
2022 	if (list_empty(&discov->resolve))
2023 		return false;
2024 
2025 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2026 	if (!e)
2027 		return false;
2028 
2029 	if (hci_resolve_name(hdev, e) == 0) {
2030 		e->name_state = NAME_PENDING;
2031 		return true;
2032 	}
2033 
2034 	return false;
2035 }
2036 
2037 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2038 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2039 {
2040 	struct discovery_state *discov = &hdev->discovery;
2041 	struct inquiry_entry *e;
2042 
2043 	/* Update the mgmt connected state if necessary. Be careful with
2044 	 * conn objects that exist but are not (yet) connected however.
2045 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2046 	 * considered connected.
2047 	 */
2048 	if (conn &&
2049 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2050 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2051 		mgmt_device_connected(hdev, conn, 0, name, name_len);
2052 
2053 	if (discov->state == DISCOVERY_STOPPED)
2054 		return;
2055 
2056 	if (discov->state == DISCOVERY_STOPPING)
2057 		goto discov_complete;
2058 
2059 	if (discov->state != DISCOVERY_RESOLVING)
2060 		return;
2061 
2062 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2063 	/* If the device was not found in a list of found devices names of which
2064 	 * are pending. there is no need to continue resolving a next name as it
2065 	 * will be done upon receiving another Remote Name Request Complete
2066 	 * Event */
2067 	if (!e)
2068 		return;
2069 
2070 	list_del(&e->list);
2071 	if (name) {
2072 		e->name_state = NAME_KNOWN;
2073 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2074 				 e->data.rssi, name, name_len);
2075 	} else {
2076 		e->name_state = NAME_NOT_KNOWN;
2077 	}
2078 
2079 	if (hci_resolve_next_name(hdev))
2080 		return;
2081 
2082 discov_complete:
2083 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2084 }
2085 
2086 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2087 {
2088 	struct hci_cp_remote_name_req *cp;
2089 	struct hci_conn *conn;
2090 
2091 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2092 
2093 	/* If successful wait for the name req complete event before
2094 	 * checking for the need to do authentication */
2095 	if (!status)
2096 		return;
2097 
2098 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2099 	if (!cp)
2100 		return;
2101 
2102 	hci_dev_lock(hdev);
2103 
2104 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2105 
2106 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2107 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2108 
2109 	if (!conn)
2110 		goto unlock;
2111 
2112 	if (!hci_outgoing_auth_needed(hdev, conn))
2113 		goto unlock;
2114 
2115 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2116 		struct hci_cp_auth_requested auth_cp;
2117 
2118 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2119 
2120 		auth_cp.handle = __cpu_to_le16(conn->handle);
2121 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2122 			     sizeof(auth_cp), &auth_cp);
2123 	}
2124 
2125 unlock:
2126 	hci_dev_unlock(hdev);
2127 }
2128 
2129 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2130 {
2131 	struct hci_cp_read_remote_features *cp;
2132 	struct hci_conn *conn;
2133 
2134 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2135 
2136 	if (!status)
2137 		return;
2138 
2139 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2140 	if (!cp)
2141 		return;
2142 
2143 	hci_dev_lock(hdev);
2144 
2145 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2146 	if (conn) {
2147 		if (conn->state == BT_CONFIG) {
2148 			hci_connect_cfm(conn, status);
2149 			hci_conn_drop(conn);
2150 		}
2151 	}
2152 
2153 	hci_dev_unlock(hdev);
2154 }
2155 
2156 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2157 {
2158 	struct hci_cp_read_remote_ext_features *cp;
2159 	struct hci_conn *conn;
2160 
2161 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2162 
2163 	if (!status)
2164 		return;
2165 
2166 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2167 	if (!cp)
2168 		return;
2169 
2170 	hci_dev_lock(hdev);
2171 
2172 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2173 	if (conn) {
2174 		if (conn->state == BT_CONFIG) {
2175 			hci_connect_cfm(conn, status);
2176 			hci_conn_drop(conn);
2177 		}
2178 	}
2179 
2180 	hci_dev_unlock(hdev);
2181 }
2182 
2183 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2184 {
2185 	struct hci_cp_setup_sync_conn *cp;
2186 	struct hci_conn *acl, *sco;
2187 	__u16 handle;
2188 
2189 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2190 
2191 	if (!status)
2192 		return;
2193 
2194 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2195 	if (!cp)
2196 		return;
2197 
2198 	handle = __le16_to_cpu(cp->handle);
2199 
2200 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2201 
2202 	hci_dev_lock(hdev);
2203 
2204 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2205 	if (acl) {
2206 		sco = acl->link;
2207 		if (sco) {
2208 			sco->state = BT_CLOSED;
2209 
2210 			hci_connect_cfm(sco, status);
2211 			hci_conn_del(sco);
2212 		}
2213 	}
2214 
2215 	hci_dev_unlock(hdev);
2216 }
2217 
2218 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2219 {
2220 	struct hci_cp_sniff_mode *cp;
2221 	struct hci_conn *conn;
2222 
2223 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2224 
2225 	if (!status)
2226 		return;
2227 
2228 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2229 	if (!cp)
2230 		return;
2231 
2232 	hci_dev_lock(hdev);
2233 
2234 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2235 	if (conn) {
2236 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2237 
2238 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2239 			hci_sco_setup(conn, status);
2240 	}
2241 
2242 	hci_dev_unlock(hdev);
2243 }
2244 
2245 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2246 {
2247 	struct hci_cp_exit_sniff_mode *cp;
2248 	struct hci_conn *conn;
2249 
2250 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2251 
2252 	if (!status)
2253 		return;
2254 
2255 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2256 	if (!cp)
2257 		return;
2258 
2259 	hci_dev_lock(hdev);
2260 
2261 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2262 	if (conn) {
2263 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2264 
2265 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2266 			hci_sco_setup(conn, status);
2267 	}
2268 
2269 	hci_dev_unlock(hdev);
2270 }
2271 
2272 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2273 {
2274 	struct hci_cp_disconnect *cp;
2275 	struct hci_conn *conn;
2276 
2277 	if (!status)
2278 		return;
2279 
2280 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2281 	if (!cp)
2282 		return;
2283 
2284 	hci_dev_lock(hdev);
2285 
2286 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2287 	if (conn) {
2288 		u8 type = conn->type;
2289 
2290 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2291 				       conn->dst_type, status);
2292 
2293 		/* If the disconnection failed for any reason, the upper layer
2294 		 * does not retry to disconnect in current implementation.
2295 		 * Hence, we need to do some basic cleanup here and re-enable
2296 		 * advertising if necessary.
2297 		 */
2298 		hci_conn_del(conn);
2299 		if (type == LE_LINK)
2300 			hci_req_reenable_advertising(hdev);
2301 	}
2302 
2303 	hci_dev_unlock(hdev);
2304 }
2305 
2306 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2307 			      u8 peer_addr_type, u8 own_address_type,
2308 			      u8 filter_policy)
2309 {
2310 	struct hci_conn *conn;
2311 
2312 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2313 				       peer_addr_type);
2314 	if (!conn)
2315 		return;
2316 
2317 	/* When using controller based address resolution, then the new
2318 	 * address types 0x02 and 0x03 are used. These types need to be
2319 	 * converted back into either public address or random address type
2320 	 */
2321 	if (use_ll_privacy(hdev) &&
2322 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2323 		switch (own_address_type) {
2324 		case ADDR_LE_DEV_PUBLIC_RESOLVED:
2325 			own_address_type = ADDR_LE_DEV_PUBLIC;
2326 			break;
2327 		case ADDR_LE_DEV_RANDOM_RESOLVED:
2328 			own_address_type = ADDR_LE_DEV_RANDOM;
2329 			break;
2330 		}
2331 	}
2332 
2333 	/* Store the initiator and responder address information which
2334 	 * is needed for SMP. These values will not change during the
2335 	 * lifetime of the connection.
2336 	 */
2337 	conn->init_addr_type = own_address_type;
2338 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2339 		bacpy(&conn->init_addr, &hdev->random_addr);
2340 	else
2341 		bacpy(&conn->init_addr, &hdev->bdaddr);
2342 
2343 	conn->resp_addr_type = peer_addr_type;
2344 	bacpy(&conn->resp_addr, peer_addr);
2345 
2346 	/* We don't want the connection attempt to stick around
2347 	 * indefinitely since LE doesn't have a page timeout concept
2348 	 * like BR/EDR. Set a timer for any connection that doesn't use
2349 	 * the white list for connecting.
2350 	 */
2351 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2352 		queue_delayed_work(conn->hdev->workqueue,
2353 				   &conn->le_conn_timeout,
2354 				   conn->conn_timeout);
2355 }
2356 
2357 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2358 {
2359 	struct hci_cp_le_create_conn *cp;
2360 
2361 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2362 
2363 	/* All connection failure handling is taken care of by the
2364 	 * hci_le_conn_failed function which is triggered by the HCI
2365 	 * request completion callbacks used for connecting.
2366 	 */
2367 	if (status)
2368 		return;
2369 
2370 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2371 	if (!cp)
2372 		return;
2373 
2374 	hci_dev_lock(hdev);
2375 
2376 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2377 			  cp->own_address_type, cp->filter_policy);
2378 
2379 	hci_dev_unlock(hdev);
2380 }
2381 
2382 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2383 {
2384 	struct hci_cp_le_ext_create_conn *cp;
2385 
2386 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2387 
2388 	/* All connection failure handling is taken care of by the
2389 	 * hci_le_conn_failed function which is triggered by the HCI
2390 	 * request completion callbacks used for connecting.
2391 	 */
2392 	if (status)
2393 		return;
2394 
2395 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2396 	if (!cp)
2397 		return;
2398 
2399 	hci_dev_lock(hdev);
2400 
2401 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2402 			  cp->own_addr_type, cp->filter_policy);
2403 
2404 	hci_dev_unlock(hdev);
2405 }
2406 
2407 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2408 {
2409 	struct hci_cp_le_read_remote_features *cp;
2410 	struct hci_conn *conn;
2411 
2412 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2413 
2414 	if (!status)
2415 		return;
2416 
2417 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2418 	if (!cp)
2419 		return;
2420 
2421 	hci_dev_lock(hdev);
2422 
2423 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2424 	if (conn) {
2425 		if (conn->state == BT_CONFIG) {
2426 			hci_connect_cfm(conn, status);
2427 			hci_conn_drop(conn);
2428 		}
2429 	}
2430 
2431 	hci_dev_unlock(hdev);
2432 }
2433 
2434 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2435 {
2436 	struct hci_cp_le_start_enc *cp;
2437 	struct hci_conn *conn;
2438 
2439 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2440 
2441 	if (!status)
2442 		return;
2443 
2444 	hci_dev_lock(hdev);
2445 
2446 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2447 	if (!cp)
2448 		goto unlock;
2449 
2450 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2451 	if (!conn)
2452 		goto unlock;
2453 
2454 	if (conn->state != BT_CONNECTED)
2455 		goto unlock;
2456 
2457 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2458 	hci_conn_drop(conn);
2459 
2460 unlock:
2461 	hci_dev_unlock(hdev);
2462 }
2463 
2464 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2465 {
2466 	struct hci_cp_switch_role *cp;
2467 	struct hci_conn *conn;
2468 
2469 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2470 
2471 	if (!status)
2472 		return;
2473 
2474 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2475 	if (!cp)
2476 		return;
2477 
2478 	hci_dev_lock(hdev);
2479 
2480 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2481 	if (conn)
2482 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2483 
2484 	hci_dev_unlock(hdev);
2485 }
2486 
2487 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2488 {
2489 	__u8 status = *((__u8 *) skb->data);
2490 	struct discovery_state *discov = &hdev->discovery;
2491 	struct inquiry_entry *e;
2492 
2493 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2494 
2495 	hci_conn_check_pending(hdev);
2496 
2497 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2498 		return;
2499 
2500 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2501 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2502 
2503 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2504 		return;
2505 
2506 	hci_dev_lock(hdev);
2507 
2508 	if (discov->state != DISCOVERY_FINDING)
2509 		goto unlock;
2510 
2511 	if (list_empty(&discov->resolve)) {
2512 		/* When BR/EDR inquiry is active and no LE scanning is in
2513 		 * progress, then change discovery state to indicate completion.
2514 		 *
2515 		 * When running LE scanning and BR/EDR inquiry simultaneously
2516 		 * and the LE scan already finished, then change the discovery
2517 		 * state to indicate completion.
2518 		 */
2519 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2520 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2521 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2522 		goto unlock;
2523 	}
2524 
2525 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2526 	if (e && hci_resolve_name(hdev, e) == 0) {
2527 		e->name_state = NAME_PENDING;
2528 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2529 	} else {
2530 		/* When BR/EDR inquiry is active and no LE scanning is in
2531 		 * progress, then change discovery state to indicate completion.
2532 		 *
2533 		 * When running LE scanning and BR/EDR inquiry simultaneously
2534 		 * and the LE scan already finished, then change the discovery
2535 		 * state to indicate completion.
2536 		 */
2537 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2538 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2539 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2540 	}
2541 
2542 unlock:
2543 	hci_dev_unlock(hdev);
2544 }
2545 
2546 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2547 {
2548 	struct inquiry_data data;
2549 	struct inquiry_info *info = (void *) (skb->data + 1);
2550 	int num_rsp = *((__u8 *) skb->data);
2551 
2552 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2553 
2554 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2555 		return;
2556 
2557 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2558 		return;
2559 
2560 	hci_dev_lock(hdev);
2561 
2562 	for (; num_rsp; num_rsp--, info++) {
2563 		u32 flags;
2564 
2565 		bacpy(&data.bdaddr, &info->bdaddr);
2566 		data.pscan_rep_mode	= info->pscan_rep_mode;
2567 		data.pscan_period_mode	= info->pscan_period_mode;
2568 		data.pscan_mode		= info->pscan_mode;
2569 		memcpy(data.dev_class, info->dev_class, 3);
2570 		data.clock_offset	= info->clock_offset;
2571 		data.rssi		= HCI_RSSI_INVALID;
2572 		data.ssp_mode		= 0x00;
2573 
2574 		flags = hci_inquiry_cache_update(hdev, &data, false);
2575 
2576 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2577 				  info->dev_class, HCI_RSSI_INVALID,
2578 				  flags, NULL, 0, NULL, 0);
2579 	}
2580 
2581 	hci_dev_unlock(hdev);
2582 }
2583 
2584 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2585 {
2586 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2587 	struct hci_conn *conn;
2588 
2589 	BT_DBG("%s", hdev->name);
2590 
2591 	hci_dev_lock(hdev);
2592 
2593 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2594 	if (!conn) {
2595 		/* Connection may not exist if auto-connected. Check the bredr
2596 		 * allowlist to see if this device is allowed to auto connect.
2597 		 * If link is an ACL type, create a connection class
2598 		 * automatically.
2599 		 *
2600 		 * Auto-connect will only occur if the event filter is
2601 		 * programmed with a given address. Right now, event filter is
2602 		 * only used during suspend.
2603 		 */
2604 		if (ev->link_type == ACL_LINK &&
2605 		    hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
2606 						      &ev->bdaddr,
2607 						      BDADDR_BREDR)) {
2608 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2609 					    HCI_ROLE_SLAVE);
2610 			if (!conn) {
2611 				bt_dev_err(hdev, "no memory for new conn");
2612 				goto unlock;
2613 			}
2614 		} else {
2615 			if (ev->link_type != SCO_LINK)
2616 				goto unlock;
2617 
2618 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2619 						       &ev->bdaddr);
2620 			if (!conn)
2621 				goto unlock;
2622 
2623 			conn->type = SCO_LINK;
2624 		}
2625 	}
2626 
2627 	if (!ev->status) {
2628 		conn->handle = __le16_to_cpu(ev->handle);
2629 
2630 		if (conn->type == ACL_LINK) {
2631 			conn->state = BT_CONFIG;
2632 			hci_conn_hold(conn);
2633 
2634 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2635 			    !hci_find_link_key(hdev, &ev->bdaddr))
2636 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2637 			else
2638 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2639 		} else
2640 			conn->state = BT_CONNECTED;
2641 
2642 		hci_debugfs_create_conn(conn);
2643 		hci_conn_add_sysfs(conn);
2644 
2645 		if (test_bit(HCI_AUTH, &hdev->flags))
2646 			set_bit(HCI_CONN_AUTH, &conn->flags);
2647 
2648 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2649 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2650 
2651 		/* Get remote features */
2652 		if (conn->type == ACL_LINK) {
2653 			struct hci_cp_read_remote_features cp;
2654 			cp.handle = ev->handle;
2655 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2656 				     sizeof(cp), &cp);
2657 
2658 			hci_req_update_scan(hdev);
2659 		}
2660 
2661 		/* Set packet type for incoming connection */
2662 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2663 			struct hci_cp_change_conn_ptype cp;
2664 			cp.handle = ev->handle;
2665 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2666 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2667 				     &cp);
2668 		}
2669 	} else {
2670 		conn->state = BT_CLOSED;
2671 		if (conn->type == ACL_LINK)
2672 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2673 					    conn->dst_type, ev->status);
2674 	}
2675 
2676 	if (conn->type == ACL_LINK)
2677 		hci_sco_setup(conn, ev->status);
2678 
2679 	if (ev->status) {
2680 		hci_connect_cfm(conn, ev->status);
2681 		hci_conn_del(conn);
2682 	} else if (ev->link_type == SCO_LINK) {
2683 		switch (conn->setting & SCO_AIRMODE_MASK) {
2684 		case SCO_AIRMODE_CVSD:
2685 			if (hdev->notify)
2686 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2687 			break;
2688 		}
2689 
2690 		hci_connect_cfm(conn, ev->status);
2691 	}
2692 
2693 unlock:
2694 	hci_dev_unlock(hdev);
2695 
2696 	hci_conn_check_pending(hdev);
2697 }
2698 
2699 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2700 {
2701 	struct hci_cp_reject_conn_req cp;
2702 
2703 	bacpy(&cp.bdaddr, bdaddr);
2704 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2705 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2706 }
2707 
2708 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2709 {
2710 	struct hci_ev_conn_request *ev = (void *) skb->data;
2711 	int mask = hdev->link_mode;
2712 	struct inquiry_entry *ie;
2713 	struct hci_conn *conn;
2714 	__u8 flags = 0;
2715 
2716 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2717 	       ev->link_type);
2718 
2719 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2720 				      &flags);
2721 
2722 	if (!(mask & HCI_LM_ACCEPT)) {
2723 		hci_reject_conn(hdev, &ev->bdaddr);
2724 		return;
2725 	}
2726 
2727 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2728 				   BDADDR_BREDR)) {
2729 		hci_reject_conn(hdev, &ev->bdaddr);
2730 		return;
2731 	}
2732 
2733 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2734 	 * connection. These features are only touched through mgmt so
2735 	 * only do the checks if HCI_MGMT is set.
2736 	 */
2737 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2738 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2739 	    !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr,
2740 					       BDADDR_BREDR)) {
2741 		hci_reject_conn(hdev, &ev->bdaddr);
2742 		return;
2743 	}
2744 
2745 	/* Connection accepted */
2746 
2747 	hci_dev_lock(hdev);
2748 
2749 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2750 	if (ie)
2751 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2752 
2753 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2754 			&ev->bdaddr);
2755 	if (!conn) {
2756 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2757 				    HCI_ROLE_SLAVE);
2758 		if (!conn) {
2759 			bt_dev_err(hdev, "no memory for new connection");
2760 			hci_dev_unlock(hdev);
2761 			return;
2762 		}
2763 	}
2764 
2765 	memcpy(conn->dev_class, ev->dev_class, 3);
2766 
2767 	hci_dev_unlock(hdev);
2768 
2769 	if (ev->link_type == ACL_LINK ||
2770 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2771 		struct hci_cp_accept_conn_req cp;
2772 		conn->state = BT_CONNECT;
2773 
2774 		bacpy(&cp.bdaddr, &ev->bdaddr);
2775 
2776 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2777 			cp.role = 0x00; /* Become master */
2778 		else
2779 			cp.role = 0x01; /* Remain slave */
2780 
2781 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2782 	} else if (!(flags & HCI_PROTO_DEFER)) {
2783 		struct hci_cp_accept_sync_conn_req cp;
2784 		conn->state = BT_CONNECT;
2785 
2786 		bacpy(&cp.bdaddr, &ev->bdaddr);
2787 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2788 
2789 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2790 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2791 		cp.max_latency    = cpu_to_le16(0xffff);
2792 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2793 		cp.retrans_effort = 0xff;
2794 
2795 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2796 			     &cp);
2797 	} else {
2798 		conn->state = BT_CONNECT2;
2799 		hci_connect_cfm(conn, 0);
2800 	}
2801 }
2802 
2803 static u8 hci_to_mgmt_reason(u8 err)
2804 {
2805 	switch (err) {
2806 	case HCI_ERROR_CONNECTION_TIMEOUT:
2807 		return MGMT_DEV_DISCONN_TIMEOUT;
2808 	case HCI_ERROR_REMOTE_USER_TERM:
2809 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2810 	case HCI_ERROR_REMOTE_POWER_OFF:
2811 		return MGMT_DEV_DISCONN_REMOTE;
2812 	case HCI_ERROR_LOCAL_HOST_TERM:
2813 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2814 	default:
2815 		return MGMT_DEV_DISCONN_UNKNOWN;
2816 	}
2817 }
2818 
2819 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2820 {
2821 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2822 	u8 reason;
2823 	struct hci_conn_params *params;
2824 	struct hci_conn *conn;
2825 	bool mgmt_connected;
2826 	u8 type;
2827 
2828 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2829 
2830 	hci_dev_lock(hdev);
2831 
2832 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2833 	if (!conn)
2834 		goto unlock;
2835 
2836 	if (ev->status) {
2837 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2838 				       conn->dst_type, ev->status);
2839 		goto unlock;
2840 	}
2841 
2842 	conn->state = BT_CLOSED;
2843 
2844 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2845 
2846 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2847 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2848 	else
2849 		reason = hci_to_mgmt_reason(ev->reason);
2850 
2851 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2852 				reason, mgmt_connected);
2853 
2854 	if (conn->type == ACL_LINK) {
2855 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2856 			hci_remove_link_key(hdev, &conn->dst);
2857 
2858 		hci_req_update_scan(hdev);
2859 	}
2860 
2861 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2862 	if (params) {
2863 		switch (params->auto_connect) {
2864 		case HCI_AUTO_CONN_LINK_LOSS:
2865 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2866 				break;
2867 			fallthrough;
2868 
2869 		case HCI_AUTO_CONN_DIRECT:
2870 		case HCI_AUTO_CONN_ALWAYS:
2871 			list_del_init(&params->action);
2872 			list_add(&params->action, &hdev->pend_le_conns);
2873 			hci_update_background_scan(hdev);
2874 			break;
2875 
2876 		default:
2877 			break;
2878 		}
2879 	}
2880 
2881 	type = conn->type;
2882 
2883 	hci_disconn_cfm(conn, ev->reason);
2884 	hci_conn_del(conn);
2885 
2886 	/* The suspend notifier is waiting for all devices to disconnect so
2887 	 * clear the bit from pending tasks and inform the wait queue.
2888 	 */
2889 	if (list_empty(&hdev->conn_hash.list) &&
2890 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2891 		wake_up(&hdev->suspend_wait_q);
2892 	}
2893 
2894 	/* Re-enable advertising if necessary, since it might
2895 	 * have been disabled by the connection. From the
2896 	 * HCI_LE_Set_Advertise_Enable command description in
2897 	 * the core specification (v4.0):
2898 	 * "The Controller shall continue advertising until the Host
2899 	 * issues an LE_Set_Advertise_Enable command with
2900 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2901 	 * or until a connection is created or until the Advertising
2902 	 * is timed out due to Directed Advertising."
2903 	 */
2904 	if (type == LE_LINK)
2905 		hci_req_reenable_advertising(hdev);
2906 
2907 unlock:
2908 	hci_dev_unlock(hdev);
2909 }
2910 
2911 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2912 {
2913 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2914 	struct hci_conn *conn;
2915 
2916 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2917 
2918 	hci_dev_lock(hdev);
2919 
2920 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2921 	if (!conn)
2922 		goto unlock;
2923 
2924 	if (!ev->status) {
2925 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2926 
2927 		if (!hci_conn_ssp_enabled(conn) &&
2928 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2929 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2930 		} else {
2931 			set_bit(HCI_CONN_AUTH, &conn->flags);
2932 			conn->sec_level = conn->pending_sec_level;
2933 		}
2934 	} else {
2935 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2936 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2937 
2938 		mgmt_auth_failed(conn, ev->status);
2939 	}
2940 
2941 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2942 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2943 
2944 	if (conn->state == BT_CONFIG) {
2945 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2946 			struct hci_cp_set_conn_encrypt cp;
2947 			cp.handle  = ev->handle;
2948 			cp.encrypt = 0x01;
2949 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2950 				     &cp);
2951 		} else {
2952 			conn->state = BT_CONNECTED;
2953 			hci_connect_cfm(conn, ev->status);
2954 			hci_conn_drop(conn);
2955 		}
2956 	} else {
2957 		hci_auth_cfm(conn, ev->status);
2958 
2959 		hci_conn_hold(conn);
2960 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2961 		hci_conn_drop(conn);
2962 	}
2963 
2964 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2965 		if (!ev->status) {
2966 			struct hci_cp_set_conn_encrypt cp;
2967 			cp.handle  = ev->handle;
2968 			cp.encrypt = 0x01;
2969 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2970 				     &cp);
2971 		} else {
2972 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2973 			hci_encrypt_cfm(conn, ev->status);
2974 		}
2975 	}
2976 
2977 unlock:
2978 	hci_dev_unlock(hdev);
2979 }
2980 
2981 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2982 {
2983 	struct hci_ev_remote_name *ev = (void *) skb->data;
2984 	struct hci_conn *conn;
2985 
2986 	BT_DBG("%s", hdev->name);
2987 
2988 	hci_conn_check_pending(hdev);
2989 
2990 	hci_dev_lock(hdev);
2991 
2992 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2993 
2994 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2995 		goto check_auth;
2996 
2997 	if (ev->status == 0)
2998 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2999 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3000 	else
3001 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3002 
3003 check_auth:
3004 	if (!conn)
3005 		goto unlock;
3006 
3007 	if (!hci_outgoing_auth_needed(hdev, conn))
3008 		goto unlock;
3009 
3010 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3011 		struct hci_cp_auth_requested cp;
3012 
3013 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3014 
3015 		cp.handle = __cpu_to_le16(conn->handle);
3016 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3017 	}
3018 
3019 unlock:
3020 	hci_dev_unlock(hdev);
3021 }
3022 
3023 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3024 				       u16 opcode, struct sk_buff *skb)
3025 {
3026 	const struct hci_rp_read_enc_key_size *rp;
3027 	struct hci_conn *conn;
3028 	u16 handle;
3029 
3030 	BT_DBG("%s status 0x%02x", hdev->name, status);
3031 
3032 	if (!skb || skb->len < sizeof(*rp)) {
3033 		bt_dev_err(hdev, "invalid read key size response");
3034 		return;
3035 	}
3036 
3037 	rp = (void *)skb->data;
3038 	handle = le16_to_cpu(rp->handle);
3039 
3040 	hci_dev_lock(hdev);
3041 
3042 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3043 	if (!conn)
3044 		goto unlock;
3045 
3046 	/* While unexpected, the read_enc_key_size command may fail. The most
3047 	 * secure approach is to then assume the key size is 0 to force a
3048 	 * disconnection.
3049 	 */
3050 	if (rp->status) {
3051 		bt_dev_err(hdev, "failed to read key size for handle %u",
3052 			   handle);
3053 		conn->enc_key_size = 0;
3054 	} else {
3055 		conn->enc_key_size = rp->key_size;
3056 	}
3057 
3058 	hci_encrypt_cfm(conn, 0);
3059 
3060 unlock:
3061 	hci_dev_unlock(hdev);
3062 }
3063 
3064 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3065 {
3066 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
3067 	struct hci_conn *conn;
3068 
3069 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3070 
3071 	hci_dev_lock(hdev);
3072 
3073 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3074 	if (!conn)
3075 		goto unlock;
3076 
3077 	if (!ev->status) {
3078 		if (ev->encrypt) {
3079 			/* Encryption implies authentication */
3080 			set_bit(HCI_CONN_AUTH, &conn->flags);
3081 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3082 			conn->sec_level = conn->pending_sec_level;
3083 
3084 			/* P-256 authentication key implies FIPS */
3085 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3086 				set_bit(HCI_CONN_FIPS, &conn->flags);
3087 
3088 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3089 			    conn->type == LE_LINK)
3090 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3091 		} else {
3092 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3093 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3094 		}
3095 	}
3096 
3097 	/* We should disregard the current RPA and generate a new one
3098 	 * whenever the encryption procedure fails.
3099 	 */
3100 	if (ev->status && conn->type == LE_LINK) {
3101 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3102 		hci_adv_instances_set_rpa_expired(hdev, true);
3103 	}
3104 
3105 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3106 
3107 	/* Check link security requirements are met */
3108 	if (!hci_conn_check_link_mode(conn))
3109 		ev->status = HCI_ERROR_AUTH_FAILURE;
3110 
3111 	if (ev->status && conn->state == BT_CONNECTED) {
3112 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3113 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3114 
3115 		/* Notify upper layers so they can cleanup before
3116 		 * disconnecting.
3117 		 */
3118 		hci_encrypt_cfm(conn, ev->status);
3119 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3120 		hci_conn_drop(conn);
3121 		goto unlock;
3122 	}
3123 
3124 	/* Try reading the encryption key size for encrypted ACL links */
3125 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3126 		struct hci_cp_read_enc_key_size cp;
3127 		struct hci_request req;
3128 
3129 		/* Only send HCI_Read_Encryption_Key_Size if the
3130 		 * controller really supports it. If it doesn't, assume
3131 		 * the default size (16).
3132 		 */
3133 		if (!(hdev->commands[20] & 0x10)) {
3134 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3135 			goto notify;
3136 		}
3137 
3138 		hci_req_init(&req, hdev);
3139 
3140 		cp.handle = cpu_to_le16(conn->handle);
3141 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3142 
3143 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3144 			bt_dev_err(hdev, "sending read key size failed");
3145 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3146 			goto notify;
3147 		}
3148 
3149 		goto unlock;
3150 	}
3151 
3152 	/* Set the default Authenticated Payload Timeout after
3153 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3154 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3155 	 * sent when the link is active and Encryption is enabled, the conn
3156 	 * type can be either LE or ACL and controller must support LMP Ping.
3157 	 * Ensure for AES-CCM encryption as well.
3158 	 */
3159 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3160 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3161 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3162 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3163 		struct hci_cp_write_auth_payload_to cp;
3164 
3165 		cp.handle = cpu_to_le16(conn->handle);
3166 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3167 		hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3168 			     sizeof(cp), &cp);
3169 	}
3170 
3171 notify:
3172 	hci_encrypt_cfm(conn, ev->status);
3173 
3174 unlock:
3175 	hci_dev_unlock(hdev);
3176 }
3177 
3178 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3179 					     struct sk_buff *skb)
3180 {
3181 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3182 	struct hci_conn *conn;
3183 
3184 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3185 
3186 	hci_dev_lock(hdev);
3187 
3188 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3189 	if (conn) {
3190 		if (!ev->status)
3191 			set_bit(HCI_CONN_SECURE, &conn->flags);
3192 
3193 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3194 
3195 		hci_key_change_cfm(conn, ev->status);
3196 	}
3197 
3198 	hci_dev_unlock(hdev);
3199 }
3200 
3201 static void hci_remote_features_evt(struct hci_dev *hdev,
3202 				    struct sk_buff *skb)
3203 {
3204 	struct hci_ev_remote_features *ev = (void *) skb->data;
3205 	struct hci_conn *conn;
3206 
3207 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3208 
3209 	hci_dev_lock(hdev);
3210 
3211 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3212 	if (!conn)
3213 		goto unlock;
3214 
3215 	if (!ev->status)
3216 		memcpy(conn->features[0], ev->features, 8);
3217 
3218 	if (conn->state != BT_CONFIG)
3219 		goto unlock;
3220 
3221 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3222 	    lmp_ext_feat_capable(conn)) {
3223 		struct hci_cp_read_remote_ext_features cp;
3224 		cp.handle = ev->handle;
3225 		cp.page = 0x01;
3226 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3227 			     sizeof(cp), &cp);
3228 		goto unlock;
3229 	}
3230 
3231 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3232 		struct hci_cp_remote_name_req cp;
3233 		memset(&cp, 0, sizeof(cp));
3234 		bacpy(&cp.bdaddr, &conn->dst);
3235 		cp.pscan_rep_mode = 0x02;
3236 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3237 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3238 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3239 
3240 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3241 		conn->state = BT_CONNECTED;
3242 		hci_connect_cfm(conn, ev->status);
3243 		hci_conn_drop(conn);
3244 	}
3245 
3246 unlock:
3247 	hci_dev_unlock(hdev);
3248 }
3249 
3250 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3251 				 u16 *opcode, u8 *status,
3252 				 hci_req_complete_t *req_complete,
3253 				 hci_req_complete_skb_t *req_complete_skb)
3254 {
3255 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
3256 
3257 	*opcode = __le16_to_cpu(ev->opcode);
3258 	*status = skb->data[sizeof(*ev)];
3259 
3260 	skb_pull(skb, sizeof(*ev));
3261 
3262 	switch (*opcode) {
3263 	case HCI_OP_INQUIRY_CANCEL:
3264 		hci_cc_inquiry_cancel(hdev, skb, status);
3265 		break;
3266 
3267 	case HCI_OP_PERIODIC_INQ:
3268 		hci_cc_periodic_inq(hdev, skb);
3269 		break;
3270 
3271 	case HCI_OP_EXIT_PERIODIC_INQ:
3272 		hci_cc_exit_periodic_inq(hdev, skb);
3273 		break;
3274 
3275 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3276 		hci_cc_remote_name_req_cancel(hdev, skb);
3277 		break;
3278 
3279 	case HCI_OP_ROLE_DISCOVERY:
3280 		hci_cc_role_discovery(hdev, skb);
3281 		break;
3282 
3283 	case HCI_OP_READ_LINK_POLICY:
3284 		hci_cc_read_link_policy(hdev, skb);
3285 		break;
3286 
3287 	case HCI_OP_WRITE_LINK_POLICY:
3288 		hci_cc_write_link_policy(hdev, skb);
3289 		break;
3290 
3291 	case HCI_OP_READ_DEF_LINK_POLICY:
3292 		hci_cc_read_def_link_policy(hdev, skb);
3293 		break;
3294 
3295 	case HCI_OP_WRITE_DEF_LINK_POLICY:
3296 		hci_cc_write_def_link_policy(hdev, skb);
3297 		break;
3298 
3299 	case HCI_OP_RESET:
3300 		hci_cc_reset(hdev, skb);
3301 		break;
3302 
3303 	case HCI_OP_READ_STORED_LINK_KEY:
3304 		hci_cc_read_stored_link_key(hdev, skb);
3305 		break;
3306 
3307 	case HCI_OP_DELETE_STORED_LINK_KEY:
3308 		hci_cc_delete_stored_link_key(hdev, skb);
3309 		break;
3310 
3311 	case HCI_OP_WRITE_LOCAL_NAME:
3312 		hci_cc_write_local_name(hdev, skb);
3313 		break;
3314 
3315 	case HCI_OP_READ_LOCAL_NAME:
3316 		hci_cc_read_local_name(hdev, skb);
3317 		break;
3318 
3319 	case HCI_OP_WRITE_AUTH_ENABLE:
3320 		hci_cc_write_auth_enable(hdev, skb);
3321 		break;
3322 
3323 	case HCI_OP_WRITE_ENCRYPT_MODE:
3324 		hci_cc_write_encrypt_mode(hdev, skb);
3325 		break;
3326 
3327 	case HCI_OP_WRITE_SCAN_ENABLE:
3328 		hci_cc_write_scan_enable(hdev, skb);
3329 		break;
3330 
3331 	case HCI_OP_READ_CLASS_OF_DEV:
3332 		hci_cc_read_class_of_dev(hdev, skb);
3333 		break;
3334 
3335 	case HCI_OP_WRITE_CLASS_OF_DEV:
3336 		hci_cc_write_class_of_dev(hdev, skb);
3337 		break;
3338 
3339 	case HCI_OP_READ_VOICE_SETTING:
3340 		hci_cc_read_voice_setting(hdev, skb);
3341 		break;
3342 
3343 	case HCI_OP_WRITE_VOICE_SETTING:
3344 		hci_cc_write_voice_setting(hdev, skb);
3345 		break;
3346 
3347 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
3348 		hci_cc_read_num_supported_iac(hdev, skb);
3349 		break;
3350 
3351 	case HCI_OP_WRITE_SSP_MODE:
3352 		hci_cc_write_ssp_mode(hdev, skb);
3353 		break;
3354 
3355 	case HCI_OP_WRITE_SC_SUPPORT:
3356 		hci_cc_write_sc_support(hdev, skb);
3357 		break;
3358 
3359 	case HCI_OP_READ_AUTH_PAYLOAD_TO:
3360 		hci_cc_read_auth_payload_timeout(hdev, skb);
3361 		break;
3362 
3363 	case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3364 		hci_cc_write_auth_payload_timeout(hdev, skb);
3365 		break;
3366 
3367 	case HCI_OP_READ_LOCAL_VERSION:
3368 		hci_cc_read_local_version(hdev, skb);
3369 		break;
3370 
3371 	case HCI_OP_READ_LOCAL_COMMANDS:
3372 		hci_cc_read_local_commands(hdev, skb);
3373 		break;
3374 
3375 	case HCI_OP_READ_LOCAL_FEATURES:
3376 		hci_cc_read_local_features(hdev, skb);
3377 		break;
3378 
3379 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
3380 		hci_cc_read_local_ext_features(hdev, skb);
3381 		break;
3382 
3383 	case HCI_OP_READ_BUFFER_SIZE:
3384 		hci_cc_read_buffer_size(hdev, skb);
3385 		break;
3386 
3387 	case HCI_OP_READ_BD_ADDR:
3388 		hci_cc_read_bd_addr(hdev, skb);
3389 		break;
3390 
3391 	case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3392 		hci_cc_read_local_pairing_opts(hdev, skb);
3393 		break;
3394 
3395 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3396 		hci_cc_read_page_scan_activity(hdev, skb);
3397 		break;
3398 
3399 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3400 		hci_cc_write_page_scan_activity(hdev, skb);
3401 		break;
3402 
3403 	case HCI_OP_READ_PAGE_SCAN_TYPE:
3404 		hci_cc_read_page_scan_type(hdev, skb);
3405 		break;
3406 
3407 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3408 		hci_cc_write_page_scan_type(hdev, skb);
3409 		break;
3410 
3411 	case HCI_OP_READ_DATA_BLOCK_SIZE:
3412 		hci_cc_read_data_block_size(hdev, skb);
3413 		break;
3414 
3415 	case HCI_OP_READ_FLOW_CONTROL_MODE:
3416 		hci_cc_read_flow_control_mode(hdev, skb);
3417 		break;
3418 
3419 	case HCI_OP_READ_LOCAL_AMP_INFO:
3420 		hci_cc_read_local_amp_info(hdev, skb);
3421 		break;
3422 
3423 	case HCI_OP_READ_CLOCK:
3424 		hci_cc_read_clock(hdev, skb);
3425 		break;
3426 
3427 	case HCI_OP_READ_INQ_RSP_TX_POWER:
3428 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
3429 		break;
3430 
3431 	case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3432 		hci_cc_read_def_err_data_reporting(hdev, skb);
3433 		break;
3434 
3435 	case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3436 		hci_cc_write_def_err_data_reporting(hdev, skb);
3437 		break;
3438 
3439 	case HCI_OP_PIN_CODE_REPLY:
3440 		hci_cc_pin_code_reply(hdev, skb);
3441 		break;
3442 
3443 	case HCI_OP_PIN_CODE_NEG_REPLY:
3444 		hci_cc_pin_code_neg_reply(hdev, skb);
3445 		break;
3446 
3447 	case HCI_OP_READ_LOCAL_OOB_DATA:
3448 		hci_cc_read_local_oob_data(hdev, skb);
3449 		break;
3450 
3451 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3452 		hci_cc_read_local_oob_ext_data(hdev, skb);
3453 		break;
3454 
3455 	case HCI_OP_LE_READ_BUFFER_SIZE:
3456 		hci_cc_le_read_buffer_size(hdev, skb);
3457 		break;
3458 
3459 	case HCI_OP_LE_READ_LOCAL_FEATURES:
3460 		hci_cc_le_read_local_features(hdev, skb);
3461 		break;
3462 
3463 	case HCI_OP_LE_READ_ADV_TX_POWER:
3464 		hci_cc_le_read_adv_tx_power(hdev, skb);
3465 		break;
3466 
3467 	case HCI_OP_USER_CONFIRM_REPLY:
3468 		hci_cc_user_confirm_reply(hdev, skb);
3469 		break;
3470 
3471 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
3472 		hci_cc_user_confirm_neg_reply(hdev, skb);
3473 		break;
3474 
3475 	case HCI_OP_USER_PASSKEY_REPLY:
3476 		hci_cc_user_passkey_reply(hdev, skb);
3477 		break;
3478 
3479 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
3480 		hci_cc_user_passkey_neg_reply(hdev, skb);
3481 		break;
3482 
3483 	case HCI_OP_LE_SET_RANDOM_ADDR:
3484 		hci_cc_le_set_random_addr(hdev, skb);
3485 		break;
3486 
3487 	case HCI_OP_LE_SET_ADV_ENABLE:
3488 		hci_cc_le_set_adv_enable(hdev, skb);
3489 		break;
3490 
3491 	case HCI_OP_LE_SET_SCAN_PARAM:
3492 		hci_cc_le_set_scan_param(hdev, skb);
3493 		break;
3494 
3495 	case HCI_OP_LE_SET_SCAN_ENABLE:
3496 		hci_cc_le_set_scan_enable(hdev, skb);
3497 		break;
3498 
3499 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3500 		hci_cc_le_read_white_list_size(hdev, skb);
3501 		break;
3502 
3503 	case HCI_OP_LE_CLEAR_WHITE_LIST:
3504 		hci_cc_le_clear_white_list(hdev, skb);
3505 		break;
3506 
3507 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
3508 		hci_cc_le_add_to_white_list(hdev, skb);
3509 		break;
3510 
3511 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3512 		hci_cc_le_del_from_white_list(hdev, skb);
3513 		break;
3514 
3515 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3516 		hci_cc_le_read_supported_states(hdev, skb);
3517 		break;
3518 
3519 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3520 		hci_cc_le_read_def_data_len(hdev, skb);
3521 		break;
3522 
3523 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3524 		hci_cc_le_write_def_data_len(hdev, skb);
3525 		break;
3526 
3527 	case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3528 		hci_cc_le_add_to_resolv_list(hdev, skb);
3529 		break;
3530 
3531 	case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3532 		hci_cc_le_del_from_resolv_list(hdev, skb);
3533 		break;
3534 
3535 	case HCI_OP_LE_CLEAR_RESOLV_LIST:
3536 		hci_cc_le_clear_resolv_list(hdev, skb);
3537 		break;
3538 
3539 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3540 		hci_cc_le_read_resolv_list_size(hdev, skb);
3541 		break;
3542 
3543 	case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3544 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
3545 		break;
3546 
3547 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3548 		hci_cc_le_read_max_data_len(hdev, skb);
3549 		break;
3550 
3551 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3552 		hci_cc_write_le_host_supported(hdev, skb);
3553 		break;
3554 
3555 	case HCI_OP_LE_SET_ADV_PARAM:
3556 		hci_cc_set_adv_param(hdev, skb);
3557 		break;
3558 
3559 	case HCI_OP_READ_RSSI:
3560 		hci_cc_read_rssi(hdev, skb);
3561 		break;
3562 
3563 	case HCI_OP_READ_TX_POWER:
3564 		hci_cc_read_tx_power(hdev, skb);
3565 		break;
3566 
3567 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3568 		hci_cc_write_ssp_debug_mode(hdev, skb);
3569 		break;
3570 
3571 	case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3572 		hci_cc_le_set_ext_scan_param(hdev, skb);
3573 		break;
3574 
3575 	case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3576 		hci_cc_le_set_ext_scan_enable(hdev, skb);
3577 		break;
3578 
3579 	case HCI_OP_LE_SET_DEFAULT_PHY:
3580 		hci_cc_le_set_default_phy(hdev, skb);
3581 		break;
3582 
3583 	case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3584 		hci_cc_le_read_num_adv_sets(hdev, skb);
3585 		break;
3586 
3587 	case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3588 		hci_cc_set_ext_adv_param(hdev, skb);
3589 		break;
3590 
3591 	case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3592 		hci_cc_le_set_ext_adv_enable(hdev, skb);
3593 		break;
3594 
3595 	case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3596 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
3597 		break;
3598 
3599 	case HCI_OP_LE_READ_TRANSMIT_POWER:
3600 		hci_cc_le_read_transmit_power(hdev, skb);
3601 		break;
3602 
3603 	default:
3604 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3605 		break;
3606 	}
3607 
3608 	if (*opcode != HCI_OP_NOP)
3609 		cancel_delayed_work(&hdev->cmd_timer);
3610 
3611 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3612 		atomic_set(&hdev->cmd_cnt, 1);
3613 
3614 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3615 			     req_complete_skb);
3616 
3617 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3618 		bt_dev_err(hdev,
3619 			   "unexpected event for opcode 0x%4.4x", *opcode);
3620 		return;
3621 	}
3622 
3623 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3624 		queue_work(hdev->workqueue, &hdev->cmd_work);
3625 }
3626 
3627 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3628 			       u16 *opcode, u8 *status,
3629 			       hci_req_complete_t *req_complete,
3630 			       hci_req_complete_skb_t *req_complete_skb)
3631 {
3632 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3633 
3634 	skb_pull(skb, sizeof(*ev));
3635 
3636 	*opcode = __le16_to_cpu(ev->opcode);
3637 	*status = ev->status;
3638 
3639 	switch (*opcode) {
3640 	case HCI_OP_INQUIRY:
3641 		hci_cs_inquiry(hdev, ev->status);
3642 		break;
3643 
3644 	case HCI_OP_CREATE_CONN:
3645 		hci_cs_create_conn(hdev, ev->status);
3646 		break;
3647 
3648 	case HCI_OP_DISCONNECT:
3649 		hci_cs_disconnect(hdev, ev->status);
3650 		break;
3651 
3652 	case HCI_OP_ADD_SCO:
3653 		hci_cs_add_sco(hdev, ev->status);
3654 		break;
3655 
3656 	case HCI_OP_AUTH_REQUESTED:
3657 		hci_cs_auth_requested(hdev, ev->status);
3658 		break;
3659 
3660 	case HCI_OP_SET_CONN_ENCRYPT:
3661 		hci_cs_set_conn_encrypt(hdev, ev->status);
3662 		break;
3663 
3664 	case HCI_OP_REMOTE_NAME_REQ:
3665 		hci_cs_remote_name_req(hdev, ev->status);
3666 		break;
3667 
3668 	case HCI_OP_READ_REMOTE_FEATURES:
3669 		hci_cs_read_remote_features(hdev, ev->status);
3670 		break;
3671 
3672 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3673 		hci_cs_read_remote_ext_features(hdev, ev->status);
3674 		break;
3675 
3676 	case HCI_OP_SETUP_SYNC_CONN:
3677 		hci_cs_setup_sync_conn(hdev, ev->status);
3678 		break;
3679 
3680 	case HCI_OP_SNIFF_MODE:
3681 		hci_cs_sniff_mode(hdev, ev->status);
3682 		break;
3683 
3684 	case HCI_OP_EXIT_SNIFF_MODE:
3685 		hci_cs_exit_sniff_mode(hdev, ev->status);
3686 		break;
3687 
3688 	case HCI_OP_SWITCH_ROLE:
3689 		hci_cs_switch_role(hdev, ev->status);
3690 		break;
3691 
3692 	case HCI_OP_LE_CREATE_CONN:
3693 		hci_cs_le_create_conn(hdev, ev->status);
3694 		break;
3695 
3696 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3697 		hci_cs_le_read_remote_features(hdev, ev->status);
3698 		break;
3699 
3700 	case HCI_OP_LE_START_ENC:
3701 		hci_cs_le_start_enc(hdev, ev->status);
3702 		break;
3703 
3704 	case HCI_OP_LE_EXT_CREATE_CONN:
3705 		hci_cs_le_ext_create_conn(hdev, ev->status);
3706 		break;
3707 
3708 	default:
3709 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3710 		break;
3711 	}
3712 
3713 	if (*opcode != HCI_OP_NOP)
3714 		cancel_delayed_work(&hdev->cmd_timer);
3715 
3716 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3717 		atomic_set(&hdev->cmd_cnt, 1);
3718 
3719 	/* Indicate request completion if the command failed. Also, if
3720 	 * we're not waiting for a special event and we get a success
3721 	 * command status we should try to flag the request as completed
3722 	 * (since for this kind of commands there will not be a command
3723 	 * complete event).
3724 	 */
3725 	if (ev->status ||
3726 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3727 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3728 				     req_complete_skb);
3729 
3730 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3731 		bt_dev_err(hdev,
3732 			   "unexpected event for opcode 0x%4.4x", *opcode);
3733 		return;
3734 	}
3735 
3736 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3737 		queue_work(hdev->workqueue, &hdev->cmd_work);
3738 }
3739 
3740 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3741 {
3742 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3743 
3744 	hdev->hw_error_code = ev->code;
3745 
3746 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3747 }
3748 
3749 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3750 {
3751 	struct hci_ev_role_change *ev = (void *) skb->data;
3752 	struct hci_conn *conn;
3753 
3754 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3755 
3756 	hci_dev_lock(hdev);
3757 
3758 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3759 	if (conn) {
3760 		if (!ev->status)
3761 			conn->role = ev->role;
3762 
3763 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3764 
3765 		hci_role_switch_cfm(conn, ev->status, ev->role);
3766 	}
3767 
3768 	hci_dev_unlock(hdev);
3769 }
3770 
3771 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3772 {
3773 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3774 	int i;
3775 
3776 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3777 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3778 		return;
3779 	}
3780 
3781 	if (skb->len < sizeof(*ev) ||
3782 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3783 		BT_DBG("%s bad parameters", hdev->name);
3784 		return;
3785 	}
3786 
3787 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3788 
3789 	for (i = 0; i < ev->num_hndl; i++) {
3790 		struct hci_comp_pkts_info *info = &ev->handles[i];
3791 		struct hci_conn *conn;
3792 		__u16  handle, count;
3793 
3794 		handle = __le16_to_cpu(info->handle);
3795 		count  = __le16_to_cpu(info->count);
3796 
3797 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3798 		if (!conn)
3799 			continue;
3800 
3801 		conn->sent -= count;
3802 
3803 		switch (conn->type) {
3804 		case ACL_LINK:
3805 			hdev->acl_cnt += count;
3806 			if (hdev->acl_cnt > hdev->acl_pkts)
3807 				hdev->acl_cnt = hdev->acl_pkts;
3808 			break;
3809 
3810 		case LE_LINK:
3811 			if (hdev->le_pkts) {
3812 				hdev->le_cnt += count;
3813 				if (hdev->le_cnt > hdev->le_pkts)
3814 					hdev->le_cnt = hdev->le_pkts;
3815 			} else {
3816 				hdev->acl_cnt += count;
3817 				if (hdev->acl_cnt > hdev->acl_pkts)
3818 					hdev->acl_cnt = hdev->acl_pkts;
3819 			}
3820 			break;
3821 
3822 		case SCO_LINK:
3823 			hdev->sco_cnt += count;
3824 			if (hdev->sco_cnt > hdev->sco_pkts)
3825 				hdev->sco_cnt = hdev->sco_pkts;
3826 			break;
3827 
3828 		default:
3829 			bt_dev_err(hdev, "unknown type %d conn %p",
3830 				   conn->type, conn);
3831 			break;
3832 		}
3833 	}
3834 
3835 	queue_work(hdev->workqueue, &hdev->tx_work);
3836 }
3837 
3838 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3839 						 __u16 handle)
3840 {
3841 	struct hci_chan *chan;
3842 
3843 	switch (hdev->dev_type) {
3844 	case HCI_PRIMARY:
3845 		return hci_conn_hash_lookup_handle(hdev, handle);
3846 	case HCI_AMP:
3847 		chan = hci_chan_lookup_handle(hdev, handle);
3848 		if (chan)
3849 			return chan->conn;
3850 		break;
3851 	default:
3852 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3853 		break;
3854 	}
3855 
3856 	return NULL;
3857 }
3858 
3859 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3860 {
3861 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3862 	int i;
3863 
3864 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3865 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3866 		return;
3867 	}
3868 
3869 	if (skb->len < sizeof(*ev) ||
3870 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3871 		BT_DBG("%s bad parameters", hdev->name);
3872 		return;
3873 	}
3874 
3875 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3876 	       ev->num_hndl);
3877 
3878 	for (i = 0; i < ev->num_hndl; i++) {
3879 		struct hci_comp_blocks_info *info = &ev->handles[i];
3880 		struct hci_conn *conn = NULL;
3881 		__u16  handle, block_count;
3882 
3883 		handle = __le16_to_cpu(info->handle);
3884 		block_count = __le16_to_cpu(info->blocks);
3885 
3886 		conn = __hci_conn_lookup_handle(hdev, handle);
3887 		if (!conn)
3888 			continue;
3889 
3890 		conn->sent -= block_count;
3891 
3892 		switch (conn->type) {
3893 		case ACL_LINK:
3894 		case AMP_LINK:
3895 			hdev->block_cnt += block_count;
3896 			if (hdev->block_cnt > hdev->num_blocks)
3897 				hdev->block_cnt = hdev->num_blocks;
3898 			break;
3899 
3900 		default:
3901 			bt_dev_err(hdev, "unknown type %d conn %p",
3902 				   conn->type, conn);
3903 			break;
3904 		}
3905 	}
3906 
3907 	queue_work(hdev->workqueue, &hdev->tx_work);
3908 }
3909 
3910 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3911 {
3912 	struct hci_ev_mode_change *ev = (void *) skb->data;
3913 	struct hci_conn *conn;
3914 
3915 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3916 
3917 	hci_dev_lock(hdev);
3918 
3919 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3920 	if (conn) {
3921 		conn->mode = ev->mode;
3922 
3923 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3924 					&conn->flags)) {
3925 			if (conn->mode == HCI_CM_ACTIVE)
3926 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3927 			else
3928 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3929 		}
3930 
3931 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3932 			hci_sco_setup(conn, ev->status);
3933 	}
3934 
3935 	hci_dev_unlock(hdev);
3936 }
3937 
3938 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3939 {
3940 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3941 	struct hci_conn *conn;
3942 
3943 	BT_DBG("%s", hdev->name);
3944 
3945 	hci_dev_lock(hdev);
3946 
3947 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3948 	if (!conn)
3949 		goto unlock;
3950 
3951 	if (conn->state == BT_CONNECTED) {
3952 		hci_conn_hold(conn);
3953 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3954 		hci_conn_drop(conn);
3955 	}
3956 
3957 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3958 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3959 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3960 			     sizeof(ev->bdaddr), &ev->bdaddr);
3961 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3962 		u8 secure;
3963 
3964 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3965 			secure = 1;
3966 		else
3967 			secure = 0;
3968 
3969 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3970 	}
3971 
3972 unlock:
3973 	hci_dev_unlock(hdev);
3974 }
3975 
3976 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3977 {
3978 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3979 		return;
3980 
3981 	conn->pin_length = pin_len;
3982 	conn->key_type = key_type;
3983 
3984 	switch (key_type) {
3985 	case HCI_LK_LOCAL_UNIT:
3986 	case HCI_LK_REMOTE_UNIT:
3987 	case HCI_LK_DEBUG_COMBINATION:
3988 		return;
3989 	case HCI_LK_COMBINATION:
3990 		if (pin_len == 16)
3991 			conn->pending_sec_level = BT_SECURITY_HIGH;
3992 		else
3993 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3994 		break;
3995 	case HCI_LK_UNAUTH_COMBINATION_P192:
3996 	case HCI_LK_UNAUTH_COMBINATION_P256:
3997 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3998 		break;
3999 	case HCI_LK_AUTH_COMBINATION_P192:
4000 		conn->pending_sec_level = BT_SECURITY_HIGH;
4001 		break;
4002 	case HCI_LK_AUTH_COMBINATION_P256:
4003 		conn->pending_sec_level = BT_SECURITY_FIPS;
4004 		break;
4005 	}
4006 }
4007 
4008 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4009 {
4010 	struct hci_ev_link_key_req *ev = (void *) skb->data;
4011 	struct hci_cp_link_key_reply cp;
4012 	struct hci_conn *conn;
4013 	struct link_key *key;
4014 
4015 	BT_DBG("%s", hdev->name);
4016 
4017 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4018 		return;
4019 
4020 	hci_dev_lock(hdev);
4021 
4022 	key = hci_find_link_key(hdev, &ev->bdaddr);
4023 	if (!key) {
4024 		BT_DBG("%s link key not found for %pMR", hdev->name,
4025 		       &ev->bdaddr);
4026 		goto not_found;
4027 	}
4028 
4029 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4030 	       &ev->bdaddr);
4031 
4032 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4033 	if (conn) {
4034 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4035 
4036 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4037 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4038 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4039 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
4040 			goto not_found;
4041 		}
4042 
4043 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4044 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4045 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4046 			BT_DBG("%s ignoring key unauthenticated for high security",
4047 			       hdev->name);
4048 			goto not_found;
4049 		}
4050 
4051 		conn_set_key(conn, key->type, key->pin_len);
4052 	}
4053 
4054 	bacpy(&cp.bdaddr, &ev->bdaddr);
4055 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4056 
4057 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4058 
4059 	hci_dev_unlock(hdev);
4060 
4061 	return;
4062 
4063 not_found:
4064 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4065 	hci_dev_unlock(hdev);
4066 }
4067 
4068 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4069 {
4070 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
4071 	struct hci_conn *conn;
4072 	struct link_key *key;
4073 	bool persistent;
4074 	u8 pin_len = 0;
4075 
4076 	BT_DBG("%s", hdev->name);
4077 
4078 	hci_dev_lock(hdev);
4079 
4080 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4081 	if (!conn)
4082 		goto unlock;
4083 
4084 	hci_conn_hold(conn);
4085 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4086 	hci_conn_drop(conn);
4087 
4088 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4089 	conn_set_key(conn, ev->key_type, conn->pin_length);
4090 
4091 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4092 		goto unlock;
4093 
4094 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4095 			        ev->key_type, pin_len, &persistent);
4096 	if (!key)
4097 		goto unlock;
4098 
4099 	/* Update connection information since adding the key will have
4100 	 * fixed up the type in the case of changed combination keys.
4101 	 */
4102 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4103 		conn_set_key(conn, key->type, key->pin_len);
4104 
4105 	mgmt_new_link_key(hdev, key, persistent);
4106 
4107 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4108 	 * is set. If it's not set simply remove the key from the kernel
4109 	 * list (we've still notified user space about it but with
4110 	 * store_hint being 0).
4111 	 */
4112 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4113 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4114 		list_del_rcu(&key->list);
4115 		kfree_rcu(key, rcu);
4116 		goto unlock;
4117 	}
4118 
4119 	if (persistent)
4120 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4121 	else
4122 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4123 
4124 unlock:
4125 	hci_dev_unlock(hdev);
4126 }
4127 
4128 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4129 {
4130 	struct hci_ev_clock_offset *ev = (void *) skb->data;
4131 	struct hci_conn *conn;
4132 
4133 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4134 
4135 	hci_dev_lock(hdev);
4136 
4137 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4138 	if (conn && !ev->status) {
4139 		struct inquiry_entry *ie;
4140 
4141 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4142 		if (ie) {
4143 			ie->data.clock_offset = ev->clock_offset;
4144 			ie->timestamp = jiffies;
4145 		}
4146 	}
4147 
4148 	hci_dev_unlock(hdev);
4149 }
4150 
4151 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4152 {
4153 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4154 	struct hci_conn *conn;
4155 
4156 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4157 
4158 	hci_dev_lock(hdev);
4159 
4160 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4161 	if (conn && !ev->status)
4162 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4163 
4164 	hci_dev_unlock(hdev);
4165 }
4166 
4167 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4168 {
4169 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4170 	struct inquiry_entry *ie;
4171 
4172 	BT_DBG("%s", hdev->name);
4173 
4174 	hci_dev_lock(hdev);
4175 
4176 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4177 	if (ie) {
4178 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4179 		ie->timestamp = jiffies;
4180 	}
4181 
4182 	hci_dev_unlock(hdev);
4183 }
4184 
4185 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4186 					     struct sk_buff *skb)
4187 {
4188 	struct inquiry_data data;
4189 	int num_rsp = *((__u8 *) skb->data);
4190 
4191 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4192 
4193 	if (!num_rsp)
4194 		return;
4195 
4196 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4197 		return;
4198 
4199 	hci_dev_lock(hdev);
4200 
4201 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4202 		struct inquiry_info_with_rssi_and_pscan_mode *info;
4203 		info = (void *) (skb->data + 1);
4204 
4205 		if (skb->len < num_rsp * sizeof(*info) + 1)
4206 			goto unlock;
4207 
4208 		for (; num_rsp; num_rsp--, info++) {
4209 			u32 flags;
4210 
4211 			bacpy(&data.bdaddr, &info->bdaddr);
4212 			data.pscan_rep_mode	= info->pscan_rep_mode;
4213 			data.pscan_period_mode	= info->pscan_period_mode;
4214 			data.pscan_mode		= info->pscan_mode;
4215 			memcpy(data.dev_class, info->dev_class, 3);
4216 			data.clock_offset	= info->clock_offset;
4217 			data.rssi		= info->rssi;
4218 			data.ssp_mode		= 0x00;
4219 
4220 			flags = hci_inquiry_cache_update(hdev, &data, false);
4221 
4222 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4223 					  info->dev_class, info->rssi,
4224 					  flags, NULL, 0, NULL, 0);
4225 		}
4226 	} else {
4227 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4228 
4229 		if (skb->len < num_rsp * sizeof(*info) + 1)
4230 			goto unlock;
4231 
4232 		for (; num_rsp; num_rsp--, info++) {
4233 			u32 flags;
4234 
4235 			bacpy(&data.bdaddr, &info->bdaddr);
4236 			data.pscan_rep_mode	= info->pscan_rep_mode;
4237 			data.pscan_period_mode	= info->pscan_period_mode;
4238 			data.pscan_mode		= 0x00;
4239 			memcpy(data.dev_class, info->dev_class, 3);
4240 			data.clock_offset	= info->clock_offset;
4241 			data.rssi		= info->rssi;
4242 			data.ssp_mode		= 0x00;
4243 
4244 			flags = hci_inquiry_cache_update(hdev, &data, false);
4245 
4246 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4247 					  info->dev_class, info->rssi,
4248 					  flags, NULL, 0, NULL, 0);
4249 		}
4250 	}
4251 
4252 unlock:
4253 	hci_dev_unlock(hdev);
4254 }
4255 
4256 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4257 					struct sk_buff *skb)
4258 {
4259 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4260 	struct hci_conn *conn;
4261 
4262 	BT_DBG("%s", hdev->name);
4263 
4264 	hci_dev_lock(hdev);
4265 
4266 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4267 	if (!conn)
4268 		goto unlock;
4269 
4270 	if (ev->page < HCI_MAX_PAGES)
4271 		memcpy(conn->features[ev->page], ev->features, 8);
4272 
4273 	if (!ev->status && ev->page == 0x01) {
4274 		struct inquiry_entry *ie;
4275 
4276 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4277 		if (ie)
4278 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4279 
4280 		if (ev->features[0] & LMP_HOST_SSP) {
4281 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4282 		} else {
4283 			/* It is mandatory by the Bluetooth specification that
4284 			 * Extended Inquiry Results are only used when Secure
4285 			 * Simple Pairing is enabled, but some devices violate
4286 			 * this.
4287 			 *
4288 			 * To make these devices work, the internal SSP
4289 			 * enabled flag needs to be cleared if the remote host
4290 			 * features do not indicate SSP support */
4291 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4292 		}
4293 
4294 		if (ev->features[0] & LMP_HOST_SC)
4295 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4296 	}
4297 
4298 	if (conn->state != BT_CONFIG)
4299 		goto unlock;
4300 
4301 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4302 		struct hci_cp_remote_name_req cp;
4303 		memset(&cp, 0, sizeof(cp));
4304 		bacpy(&cp.bdaddr, &conn->dst);
4305 		cp.pscan_rep_mode = 0x02;
4306 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4307 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4308 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4309 
4310 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4311 		conn->state = BT_CONNECTED;
4312 		hci_connect_cfm(conn, ev->status);
4313 		hci_conn_drop(conn);
4314 	}
4315 
4316 unlock:
4317 	hci_dev_unlock(hdev);
4318 }
4319 
4320 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4321 				       struct sk_buff *skb)
4322 {
4323 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4324 	struct hci_conn *conn;
4325 
4326 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4327 
4328 	hci_dev_lock(hdev);
4329 
4330 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4331 	if (!conn) {
4332 		if (ev->link_type == ESCO_LINK)
4333 			goto unlock;
4334 
4335 		/* When the link type in the event indicates SCO connection
4336 		 * and lookup of the connection object fails, then check
4337 		 * if an eSCO connection object exists.
4338 		 *
4339 		 * The core limits the synchronous connections to either
4340 		 * SCO or eSCO. The eSCO connection is preferred and tried
4341 		 * to be setup first and until successfully established,
4342 		 * the link type will be hinted as eSCO.
4343 		 */
4344 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4345 		if (!conn)
4346 			goto unlock;
4347 	}
4348 
4349 	switch (ev->status) {
4350 	case 0x00:
4351 		conn->handle = __le16_to_cpu(ev->handle);
4352 		conn->state  = BT_CONNECTED;
4353 		conn->type   = ev->link_type;
4354 
4355 		hci_debugfs_create_conn(conn);
4356 		hci_conn_add_sysfs(conn);
4357 		break;
4358 
4359 	case 0x10:	/* Connection Accept Timeout */
4360 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4361 	case 0x11:	/* Unsupported Feature or Parameter Value */
4362 	case 0x1c:	/* SCO interval rejected */
4363 	case 0x1a:	/* Unsupported Remote Feature */
4364 	case 0x1e:	/* Invalid LMP Parameters */
4365 	case 0x1f:	/* Unspecified error */
4366 	case 0x20:	/* Unsupported LMP Parameter value */
4367 		if (conn->out) {
4368 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4369 					(hdev->esco_type & EDR_ESCO_MASK);
4370 			if (hci_setup_sync(conn, conn->link->handle))
4371 				goto unlock;
4372 		}
4373 		fallthrough;
4374 
4375 	default:
4376 		conn->state = BT_CLOSED;
4377 		break;
4378 	}
4379 
4380 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4381 
4382 	switch (conn->setting & SCO_AIRMODE_MASK) {
4383 	case SCO_AIRMODE_CVSD:
4384 		if (hdev->notify)
4385 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4386 		break;
4387 	case SCO_AIRMODE_TRANSP:
4388 		if (hdev->notify)
4389 			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4390 		break;
4391 	}
4392 
4393 	hci_connect_cfm(conn, ev->status);
4394 	if (ev->status)
4395 		hci_conn_del(conn);
4396 
4397 unlock:
4398 	hci_dev_unlock(hdev);
4399 }
4400 
4401 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4402 {
4403 	size_t parsed = 0;
4404 
4405 	while (parsed < eir_len) {
4406 		u8 field_len = eir[0];
4407 
4408 		if (field_len == 0)
4409 			return parsed;
4410 
4411 		parsed += field_len + 1;
4412 		eir += field_len + 1;
4413 	}
4414 
4415 	return eir_len;
4416 }
4417 
4418 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4419 					    struct sk_buff *skb)
4420 {
4421 	struct inquiry_data data;
4422 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
4423 	int num_rsp = *((__u8 *) skb->data);
4424 	size_t eir_len;
4425 
4426 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4427 
4428 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4429 		return;
4430 
4431 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4432 		return;
4433 
4434 	hci_dev_lock(hdev);
4435 
4436 	for (; num_rsp; num_rsp--, info++) {
4437 		u32 flags;
4438 		bool name_known;
4439 
4440 		bacpy(&data.bdaddr, &info->bdaddr);
4441 		data.pscan_rep_mode	= info->pscan_rep_mode;
4442 		data.pscan_period_mode	= info->pscan_period_mode;
4443 		data.pscan_mode		= 0x00;
4444 		memcpy(data.dev_class, info->dev_class, 3);
4445 		data.clock_offset	= info->clock_offset;
4446 		data.rssi		= info->rssi;
4447 		data.ssp_mode		= 0x01;
4448 
4449 		if (hci_dev_test_flag(hdev, HCI_MGMT))
4450 			name_known = eir_get_data(info->data,
4451 						  sizeof(info->data),
4452 						  EIR_NAME_COMPLETE, NULL);
4453 		else
4454 			name_known = true;
4455 
4456 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
4457 
4458 		eir_len = eir_get_length(info->data, sizeof(info->data));
4459 
4460 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4461 				  info->dev_class, info->rssi,
4462 				  flags, info->data, eir_len, NULL, 0);
4463 	}
4464 
4465 	hci_dev_unlock(hdev);
4466 }
4467 
4468 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4469 					 struct sk_buff *skb)
4470 {
4471 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4472 	struct hci_conn *conn;
4473 
4474 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4475 	       __le16_to_cpu(ev->handle));
4476 
4477 	hci_dev_lock(hdev);
4478 
4479 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4480 	if (!conn)
4481 		goto unlock;
4482 
4483 	/* For BR/EDR the necessary steps are taken through the
4484 	 * auth_complete event.
4485 	 */
4486 	if (conn->type != LE_LINK)
4487 		goto unlock;
4488 
4489 	if (!ev->status)
4490 		conn->sec_level = conn->pending_sec_level;
4491 
4492 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4493 
4494 	if (ev->status && conn->state == BT_CONNECTED) {
4495 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4496 		hci_conn_drop(conn);
4497 		goto unlock;
4498 	}
4499 
4500 	if (conn->state == BT_CONFIG) {
4501 		if (!ev->status)
4502 			conn->state = BT_CONNECTED;
4503 
4504 		hci_connect_cfm(conn, ev->status);
4505 		hci_conn_drop(conn);
4506 	} else {
4507 		hci_auth_cfm(conn, ev->status);
4508 
4509 		hci_conn_hold(conn);
4510 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4511 		hci_conn_drop(conn);
4512 	}
4513 
4514 unlock:
4515 	hci_dev_unlock(hdev);
4516 }
4517 
4518 static u8 hci_get_auth_req(struct hci_conn *conn)
4519 {
4520 	/* If remote requests no-bonding follow that lead */
4521 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
4522 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4523 		return conn->remote_auth | (conn->auth_type & 0x01);
4524 
4525 	/* If both remote and local have enough IO capabilities, require
4526 	 * MITM protection
4527 	 */
4528 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4529 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4530 		return conn->remote_auth | 0x01;
4531 
4532 	/* No MITM protection possible so ignore remote requirement */
4533 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4534 }
4535 
4536 static u8 bredr_oob_data_present(struct hci_conn *conn)
4537 {
4538 	struct hci_dev *hdev = conn->hdev;
4539 	struct oob_data *data;
4540 
4541 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4542 	if (!data)
4543 		return 0x00;
4544 
4545 	if (bredr_sc_enabled(hdev)) {
4546 		/* When Secure Connections is enabled, then just
4547 		 * return the present value stored with the OOB
4548 		 * data. The stored value contains the right present
4549 		 * information. However it can only be trusted when
4550 		 * not in Secure Connection Only mode.
4551 		 */
4552 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4553 			return data->present;
4554 
4555 		/* When Secure Connections Only mode is enabled, then
4556 		 * the P-256 values are required. If they are not
4557 		 * available, then do not declare that OOB data is
4558 		 * present.
4559 		 */
4560 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4561 		    !memcmp(data->hash256, ZERO_KEY, 16))
4562 			return 0x00;
4563 
4564 		return 0x02;
4565 	}
4566 
4567 	/* When Secure Connections is not enabled or actually
4568 	 * not supported by the hardware, then check that if
4569 	 * P-192 data values are present.
4570 	 */
4571 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4572 	    !memcmp(data->hash192, ZERO_KEY, 16))
4573 		return 0x00;
4574 
4575 	return 0x01;
4576 }
4577 
4578 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4579 {
4580 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4581 	struct hci_conn *conn;
4582 
4583 	BT_DBG("%s", hdev->name);
4584 
4585 	hci_dev_lock(hdev);
4586 
4587 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4588 	if (!conn)
4589 		goto unlock;
4590 
4591 	hci_conn_hold(conn);
4592 
4593 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4594 		goto unlock;
4595 
4596 	/* Allow pairing if we're pairable, the initiators of the
4597 	 * pairing or if the remote is not requesting bonding.
4598 	 */
4599 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4600 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4601 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4602 		struct hci_cp_io_capability_reply cp;
4603 
4604 		bacpy(&cp.bdaddr, &ev->bdaddr);
4605 		/* Change the IO capability from KeyboardDisplay
4606 		 * to DisplayYesNo as it is not supported by BT spec. */
4607 		cp.capability = (conn->io_capability == 0x04) ?
4608 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4609 
4610 		/* If we are initiators, there is no remote information yet */
4611 		if (conn->remote_auth == 0xff) {
4612 			/* Request MITM protection if our IO caps allow it
4613 			 * except for the no-bonding case.
4614 			 */
4615 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4616 			    conn->auth_type != HCI_AT_NO_BONDING)
4617 				conn->auth_type |= 0x01;
4618 		} else {
4619 			conn->auth_type = hci_get_auth_req(conn);
4620 		}
4621 
4622 		/* If we're not bondable, force one of the non-bondable
4623 		 * authentication requirement values.
4624 		 */
4625 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4626 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4627 
4628 		cp.authentication = conn->auth_type;
4629 		cp.oob_data = bredr_oob_data_present(conn);
4630 
4631 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4632 			     sizeof(cp), &cp);
4633 	} else {
4634 		struct hci_cp_io_capability_neg_reply cp;
4635 
4636 		bacpy(&cp.bdaddr, &ev->bdaddr);
4637 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4638 
4639 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4640 			     sizeof(cp), &cp);
4641 	}
4642 
4643 unlock:
4644 	hci_dev_unlock(hdev);
4645 }
4646 
4647 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4648 {
4649 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4650 	struct hci_conn *conn;
4651 
4652 	BT_DBG("%s", hdev->name);
4653 
4654 	hci_dev_lock(hdev);
4655 
4656 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4657 	if (!conn)
4658 		goto unlock;
4659 
4660 	conn->remote_cap = ev->capability;
4661 	conn->remote_auth = ev->authentication;
4662 
4663 unlock:
4664 	hci_dev_unlock(hdev);
4665 }
4666 
4667 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4668 					 struct sk_buff *skb)
4669 {
4670 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4671 	int loc_mitm, rem_mitm, confirm_hint = 0;
4672 	struct hci_conn *conn;
4673 
4674 	BT_DBG("%s", hdev->name);
4675 
4676 	hci_dev_lock(hdev);
4677 
4678 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4679 		goto unlock;
4680 
4681 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4682 	if (!conn)
4683 		goto unlock;
4684 
4685 	loc_mitm = (conn->auth_type & 0x01);
4686 	rem_mitm = (conn->remote_auth & 0x01);
4687 
4688 	/* If we require MITM but the remote device can't provide that
4689 	 * (it has NoInputNoOutput) then reject the confirmation
4690 	 * request. We check the security level here since it doesn't
4691 	 * necessarily match conn->auth_type.
4692 	 */
4693 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4694 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4695 		BT_DBG("Rejecting request: remote device can't provide MITM");
4696 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4697 			     sizeof(ev->bdaddr), &ev->bdaddr);
4698 		goto unlock;
4699 	}
4700 
4701 	/* If no side requires MITM protection; auto-accept */
4702 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4703 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4704 
4705 		/* If we're not the initiators request authorization to
4706 		 * proceed from user space (mgmt_user_confirm with
4707 		 * confirm_hint set to 1). The exception is if neither
4708 		 * side had MITM or if the local IO capability is
4709 		 * NoInputNoOutput, in which case we do auto-accept
4710 		 */
4711 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4712 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4713 		    (loc_mitm || rem_mitm)) {
4714 			BT_DBG("Confirming auto-accept as acceptor");
4715 			confirm_hint = 1;
4716 			goto confirm;
4717 		}
4718 
4719 		/* If there already exists link key in local host, leave the
4720 		 * decision to user space since the remote device could be
4721 		 * legitimate or malicious.
4722 		 */
4723 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
4724 			bt_dev_dbg(hdev, "Local host already has link key");
4725 			confirm_hint = 1;
4726 			goto confirm;
4727 		}
4728 
4729 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4730 		       hdev->auto_accept_delay);
4731 
4732 		if (hdev->auto_accept_delay > 0) {
4733 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4734 			queue_delayed_work(conn->hdev->workqueue,
4735 					   &conn->auto_accept_work, delay);
4736 			goto unlock;
4737 		}
4738 
4739 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4740 			     sizeof(ev->bdaddr), &ev->bdaddr);
4741 		goto unlock;
4742 	}
4743 
4744 confirm:
4745 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4746 				  le32_to_cpu(ev->passkey), confirm_hint);
4747 
4748 unlock:
4749 	hci_dev_unlock(hdev);
4750 }
4751 
4752 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4753 					 struct sk_buff *skb)
4754 {
4755 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4756 
4757 	BT_DBG("%s", hdev->name);
4758 
4759 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4760 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4761 }
4762 
4763 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4764 					struct sk_buff *skb)
4765 {
4766 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4767 	struct hci_conn *conn;
4768 
4769 	BT_DBG("%s", hdev->name);
4770 
4771 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4772 	if (!conn)
4773 		return;
4774 
4775 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4776 	conn->passkey_entered = 0;
4777 
4778 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4779 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4780 					 conn->dst_type, conn->passkey_notify,
4781 					 conn->passkey_entered);
4782 }
4783 
4784 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4785 {
4786 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4787 	struct hci_conn *conn;
4788 
4789 	BT_DBG("%s", hdev->name);
4790 
4791 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4792 	if (!conn)
4793 		return;
4794 
4795 	switch (ev->type) {
4796 	case HCI_KEYPRESS_STARTED:
4797 		conn->passkey_entered = 0;
4798 		return;
4799 
4800 	case HCI_KEYPRESS_ENTERED:
4801 		conn->passkey_entered++;
4802 		break;
4803 
4804 	case HCI_KEYPRESS_ERASED:
4805 		conn->passkey_entered--;
4806 		break;
4807 
4808 	case HCI_KEYPRESS_CLEARED:
4809 		conn->passkey_entered = 0;
4810 		break;
4811 
4812 	case HCI_KEYPRESS_COMPLETED:
4813 		return;
4814 	}
4815 
4816 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4817 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4818 					 conn->dst_type, conn->passkey_notify,
4819 					 conn->passkey_entered);
4820 }
4821 
4822 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4823 					 struct sk_buff *skb)
4824 {
4825 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4826 	struct hci_conn *conn;
4827 
4828 	BT_DBG("%s", hdev->name);
4829 
4830 	hci_dev_lock(hdev);
4831 
4832 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4833 	if (!conn)
4834 		goto unlock;
4835 
4836 	/* Reset the authentication requirement to unknown */
4837 	conn->remote_auth = 0xff;
4838 
4839 	/* To avoid duplicate auth_failed events to user space we check
4840 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4841 	 * initiated the authentication. A traditional auth_complete
4842 	 * event gets always produced as initiator and is also mapped to
4843 	 * the mgmt_auth_failed event */
4844 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4845 		mgmt_auth_failed(conn, ev->status);
4846 
4847 	hci_conn_drop(conn);
4848 
4849 unlock:
4850 	hci_dev_unlock(hdev);
4851 }
4852 
4853 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4854 					 struct sk_buff *skb)
4855 {
4856 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4857 	struct inquiry_entry *ie;
4858 	struct hci_conn *conn;
4859 
4860 	BT_DBG("%s", hdev->name);
4861 
4862 	hci_dev_lock(hdev);
4863 
4864 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4865 	if (conn)
4866 		memcpy(conn->features[1], ev->features, 8);
4867 
4868 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4869 	if (ie)
4870 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4871 
4872 	hci_dev_unlock(hdev);
4873 }
4874 
4875 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4876 					    struct sk_buff *skb)
4877 {
4878 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4879 	struct oob_data *data;
4880 
4881 	BT_DBG("%s", hdev->name);
4882 
4883 	hci_dev_lock(hdev);
4884 
4885 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4886 		goto unlock;
4887 
4888 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4889 	if (!data) {
4890 		struct hci_cp_remote_oob_data_neg_reply cp;
4891 
4892 		bacpy(&cp.bdaddr, &ev->bdaddr);
4893 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4894 			     sizeof(cp), &cp);
4895 		goto unlock;
4896 	}
4897 
4898 	if (bredr_sc_enabled(hdev)) {
4899 		struct hci_cp_remote_oob_ext_data_reply cp;
4900 
4901 		bacpy(&cp.bdaddr, &ev->bdaddr);
4902 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4903 			memset(cp.hash192, 0, sizeof(cp.hash192));
4904 			memset(cp.rand192, 0, sizeof(cp.rand192));
4905 		} else {
4906 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4907 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4908 		}
4909 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4910 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4911 
4912 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4913 			     sizeof(cp), &cp);
4914 	} else {
4915 		struct hci_cp_remote_oob_data_reply cp;
4916 
4917 		bacpy(&cp.bdaddr, &ev->bdaddr);
4918 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4919 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4920 
4921 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4922 			     sizeof(cp), &cp);
4923 	}
4924 
4925 unlock:
4926 	hci_dev_unlock(hdev);
4927 }
4928 
4929 #if IS_ENABLED(CONFIG_BT_HS)
4930 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4931 {
4932 	struct hci_ev_channel_selected *ev = (void *)skb->data;
4933 	struct hci_conn *hcon;
4934 
4935 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4936 
4937 	skb_pull(skb, sizeof(*ev));
4938 
4939 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4940 	if (!hcon)
4941 		return;
4942 
4943 	amp_read_loc_assoc_final_data(hdev, hcon);
4944 }
4945 
4946 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4947 				      struct sk_buff *skb)
4948 {
4949 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4950 	struct hci_conn *hcon, *bredr_hcon;
4951 
4952 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4953 	       ev->status);
4954 
4955 	hci_dev_lock(hdev);
4956 
4957 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4958 	if (!hcon)
4959 		goto unlock;
4960 
4961 	if (!hcon->amp_mgr)
4962 		goto unlock;
4963 
4964 	if (ev->status) {
4965 		hci_conn_del(hcon);
4966 		goto unlock;
4967 	}
4968 
4969 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4970 
4971 	hcon->state = BT_CONNECTED;
4972 	bacpy(&hcon->dst, &bredr_hcon->dst);
4973 
4974 	hci_conn_hold(hcon);
4975 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4976 	hci_conn_drop(hcon);
4977 
4978 	hci_debugfs_create_conn(hcon);
4979 	hci_conn_add_sysfs(hcon);
4980 
4981 	amp_physical_cfm(bredr_hcon, hcon);
4982 
4983 unlock:
4984 	hci_dev_unlock(hdev);
4985 }
4986 
4987 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4988 {
4989 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4990 	struct hci_conn *hcon;
4991 	struct hci_chan *hchan;
4992 	struct amp_mgr *mgr;
4993 
4994 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4995 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4996 	       ev->status);
4997 
4998 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4999 	if (!hcon)
5000 		return;
5001 
5002 	/* Create AMP hchan */
5003 	hchan = hci_chan_create(hcon);
5004 	if (!hchan)
5005 		return;
5006 
5007 	hchan->handle = le16_to_cpu(ev->handle);
5008 
5009 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5010 
5011 	mgr = hcon->amp_mgr;
5012 	if (mgr && mgr->bredr_chan) {
5013 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5014 
5015 		l2cap_chan_lock(bredr_chan);
5016 
5017 		bredr_chan->conn->mtu = hdev->block_mtu;
5018 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5019 		hci_conn_hold(hcon);
5020 
5021 		l2cap_chan_unlock(bredr_chan);
5022 	}
5023 }
5024 
5025 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5026 					     struct sk_buff *skb)
5027 {
5028 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5029 	struct hci_chan *hchan;
5030 
5031 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5032 	       le16_to_cpu(ev->handle), ev->status);
5033 
5034 	if (ev->status)
5035 		return;
5036 
5037 	hci_dev_lock(hdev);
5038 
5039 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5040 	if (!hchan)
5041 		goto unlock;
5042 
5043 	amp_destroy_logical_link(hchan, ev->reason);
5044 
5045 unlock:
5046 	hci_dev_unlock(hdev);
5047 }
5048 
5049 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5050 					     struct sk_buff *skb)
5051 {
5052 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5053 	struct hci_conn *hcon;
5054 
5055 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5056 
5057 	if (ev->status)
5058 		return;
5059 
5060 	hci_dev_lock(hdev);
5061 
5062 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5063 	if (hcon) {
5064 		hcon->state = BT_CLOSED;
5065 		hci_conn_del(hcon);
5066 	}
5067 
5068 	hci_dev_unlock(hdev);
5069 }
5070 #endif
5071 
5072 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5073 			bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
5074 			u16 interval, u16 latency, u16 supervision_timeout)
5075 {
5076 	struct hci_conn_params *params;
5077 	struct hci_conn *conn;
5078 	struct smp_irk *irk;
5079 	u8 addr_type;
5080 
5081 	hci_dev_lock(hdev);
5082 
5083 	/* All controllers implicitly stop advertising in the event of a
5084 	 * connection, so ensure that the state bit is cleared.
5085 	 */
5086 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5087 
5088 	conn = hci_lookup_le_connect(hdev);
5089 	if (!conn) {
5090 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5091 		if (!conn) {
5092 			bt_dev_err(hdev, "no memory for new connection");
5093 			goto unlock;
5094 		}
5095 
5096 		conn->dst_type = bdaddr_type;
5097 
5098 		/* If we didn't have a hci_conn object previously
5099 		 * but we're in master role this must be something
5100 		 * initiated using a white list. Since white list based
5101 		 * connections are not "first class citizens" we don't
5102 		 * have full tracking of them. Therefore, we go ahead
5103 		 * with a "best effort" approach of determining the
5104 		 * initiator address based on the HCI_PRIVACY flag.
5105 		 */
5106 		if (conn->out) {
5107 			conn->resp_addr_type = bdaddr_type;
5108 			bacpy(&conn->resp_addr, bdaddr);
5109 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5110 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5111 				bacpy(&conn->init_addr, &hdev->rpa);
5112 			} else {
5113 				hci_copy_identity_address(hdev,
5114 							  &conn->init_addr,
5115 							  &conn->init_addr_type);
5116 			}
5117 		}
5118 	} else {
5119 		cancel_delayed_work(&conn->le_conn_timeout);
5120 	}
5121 
5122 	if (!conn->out) {
5123 		/* Set the responder (our side) address type based on
5124 		 * the advertising address type.
5125 		 */
5126 		conn->resp_addr_type = hdev->adv_addr_type;
5127 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5128 			/* In case of ext adv, resp_addr will be updated in
5129 			 * Adv Terminated event.
5130 			 */
5131 			if (!ext_adv_capable(hdev))
5132 				bacpy(&conn->resp_addr, &hdev->random_addr);
5133 		} else {
5134 			bacpy(&conn->resp_addr, &hdev->bdaddr);
5135 		}
5136 
5137 		conn->init_addr_type = bdaddr_type;
5138 		bacpy(&conn->init_addr, bdaddr);
5139 
5140 		/* For incoming connections, set the default minimum
5141 		 * and maximum connection interval. They will be used
5142 		 * to check if the parameters are in range and if not
5143 		 * trigger the connection update procedure.
5144 		 */
5145 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
5146 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
5147 	}
5148 
5149 	/* Lookup the identity address from the stored connection
5150 	 * address and address type.
5151 	 *
5152 	 * When establishing connections to an identity address, the
5153 	 * connection procedure will store the resolvable random
5154 	 * address first. Now if it can be converted back into the
5155 	 * identity address, start using the identity address from
5156 	 * now on.
5157 	 */
5158 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5159 	if (irk) {
5160 		bacpy(&conn->dst, &irk->bdaddr);
5161 		conn->dst_type = irk->addr_type;
5162 	}
5163 
5164 	if (status) {
5165 		hci_le_conn_failed(conn, status);
5166 		goto unlock;
5167 	}
5168 
5169 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5170 		addr_type = BDADDR_LE_PUBLIC;
5171 	else
5172 		addr_type = BDADDR_LE_RANDOM;
5173 
5174 	/* Drop the connection if the device is blocked */
5175 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5176 		hci_conn_drop(conn);
5177 		goto unlock;
5178 	}
5179 
5180 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5181 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
5182 
5183 	conn->sec_level = BT_SECURITY_LOW;
5184 	conn->handle = handle;
5185 	conn->state = BT_CONFIG;
5186 
5187 	conn->le_conn_interval = interval;
5188 	conn->le_conn_latency = latency;
5189 	conn->le_supv_timeout = supervision_timeout;
5190 
5191 	hci_debugfs_create_conn(conn);
5192 	hci_conn_add_sysfs(conn);
5193 
5194 	/* The remote features procedure is defined for master
5195 	 * role only. So only in case of an initiated connection
5196 	 * request the remote features.
5197 	 *
5198 	 * If the local controller supports slave-initiated features
5199 	 * exchange, then requesting the remote features in slave
5200 	 * role is possible. Otherwise just transition into the
5201 	 * connected state without requesting the remote features.
5202 	 */
5203 	if (conn->out ||
5204 	    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5205 		struct hci_cp_le_read_remote_features cp;
5206 
5207 		cp.handle = __cpu_to_le16(conn->handle);
5208 
5209 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5210 			     sizeof(cp), &cp);
5211 
5212 		hci_conn_hold(conn);
5213 	} else {
5214 		conn->state = BT_CONNECTED;
5215 		hci_connect_cfm(conn, status);
5216 	}
5217 
5218 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5219 					   conn->dst_type);
5220 	if (params) {
5221 		list_del_init(&params->action);
5222 		if (params->conn) {
5223 			hci_conn_drop(params->conn);
5224 			hci_conn_put(params->conn);
5225 			params->conn = NULL;
5226 		}
5227 	}
5228 
5229 unlock:
5230 	hci_update_background_scan(hdev);
5231 	hci_dev_unlock(hdev);
5232 }
5233 
5234 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5235 {
5236 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5237 
5238 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5239 
5240 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5241 			     ev->role, le16_to_cpu(ev->handle),
5242 			     le16_to_cpu(ev->interval),
5243 			     le16_to_cpu(ev->latency),
5244 			     le16_to_cpu(ev->supervision_timeout));
5245 }
5246 
5247 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5248 					 struct sk_buff *skb)
5249 {
5250 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5251 
5252 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5253 
5254 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5255 			     ev->role, le16_to_cpu(ev->handle),
5256 			     le16_to_cpu(ev->interval),
5257 			     le16_to_cpu(ev->latency),
5258 			     le16_to_cpu(ev->supervision_timeout));
5259 
5260 	if (use_ll_privacy(hdev) &&
5261 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5262 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5263 		hci_req_disable_address_resolution(hdev);
5264 }
5265 
5266 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5267 {
5268 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5269 	struct hci_conn *conn;
5270 
5271 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5272 
5273 	if (ev->status)
5274 		return;
5275 
5276 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5277 	if (conn) {
5278 		struct adv_info *adv_instance;
5279 
5280 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5281 			return;
5282 
5283 		if (!hdev->cur_adv_instance) {
5284 			bacpy(&conn->resp_addr, &hdev->random_addr);
5285 			return;
5286 		}
5287 
5288 		adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5289 		if (adv_instance)
5290 			bacpy(&conn->resp_addr, &adv_instance->random_addr);
5291 	}
5292 }
5293 
5294 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5295 					    struct sk_buff *skb)
5296 {
5297 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5298 	struct hci_conn *conn;
5299 
5300 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5301 
5302 	if (ev->status)
5303 		return;
5304 
5305 	hci_dev_lock(hdev);
5306 
5307 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5308 	if (conn) {
5309 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5310 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5311 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5312 	}
5313 
5314 	hci_dev_unlock(hdev);
5315 }
5316 
5317 /* This function requires the caller holds hdev->lock */
5318 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5319 					      bdaddr_t *addr,
5320 					      u8 addr_type, u8 adv_type,
5321 					      bdaddr_t *direct_rpa)
5322 {
5323 	struct hci_conn *conn;
5324 	struct hci_conn_params *params;
5325 
5326 	/* If the event is not connectable don't proceed further */
5327 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5328 		return NULL;
5329 
5330 	/* Ignore if the device is blocked */
5331 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5332 		return NULL;
5333 
5334 	/* Most controller will fail if we try to create new connections
5335 	 * while we have an existing one in slave role.
5336 	 */
5337 	if (hdev->conn_hash.le_num_slave > 0 &&
5338 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5339 	     !(hdev->le_states[3] & 0x10)))
5340 		return NULL;
5341 
5342 	/* If we're not connectable only connect devices that we have in
5343 	 * our pend_le_conns list.
5344 	 */
5345 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5346 					   addr_type);
5347 	if (!params)
5348 		return NULL;
5349 
5350 	if (!params->explicit_connect) {
5351 		switch (params->auto_connect) {
5352 		case HCI_AUTO_CONN_DIRECT:
5353 			/* Only devices advertising with ADV_DIRECT_IND are
5354 			 * triggering a connection attempt. This is allowing
5355 			 * incoming connections from slave devices.
5356 			 */
5357 			if (adv_type != LE_ADV_DIRECT_IND)
5358 				return NULL;
5359 			break;
5360 		case HCI_AUTO_CONN_ALWAYS:
5361 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5362 			 * are triggering a connection attempt. This means
5363 			 * that incoming connections from slave device are
5364 			 * accepted and also outgoing connections to slave
5365 			 * devices are established when found.
5366 			 */
5367 			break;
5368 		default:
5369 			return NULL;
5370 		}
5371 	}
5372 
5373 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5374 			      hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5375 			      direct_rpa);
5376 	if (!IS_ERR(conn)) {
5377 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5378 		 * by higher layer that tried to connect, if no then
5379 		 * store the pointer since we don't really have any
5380 		 * other owner of the object besides the params that
5381 		 * triggered it. This way we can abort the connection if
5382 		 * the parameters get removed and keep the reference
5383 		 * count consistent once the connection is established.
5384 		 */
5385 
5386 		if (!params->explicit_connect)
5387 			params->conn = hci_conn_get(conn);
5388 
5389 		return conn;
5390 	}
5391 
5392 	switch (PTR_ERR(conn)) {
5393 	case -EBUSY:
5394 		/* If hci_connect() returns -EBUSY it means there is already
5395 		 * an LE connection attempt going on. Since controllers don't
5396 		 * support more than one connection attempt at the time, we
5397 		 * don't consider this an error case.
5398 		 */
5399 		break;
5400 	default:
5401 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5402 		return NULL;
5403 	}
5404 
5405 	return NULL;
5406 }
5407 
5408 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5409 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5410 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5411 			       bool ext_adv)
5412 {
5413 	struct discovery_state *d = &hdev->discovery;
5414 	struct smp_irk *irk;
5415 	struct hci_conn *conn;
5416 	bool match;
5417 	u32 flags;
5418 	u8 *ptr, real_len;
5419 
5420 	switch (type) {
5421 	case LE_ADV_IND:
5422 	case LE_ADV_DIRECT_IND:
5423 	case LE_ADV_SCAN_IND:
5424 	case LE_ADV_NONCONN_IND:
5425 	case LE_ADV_SCAN_RSP:
5426 		break;
5427 	default:
5428 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5429 				       "type: 0x%02x", type);
5430 		return;
5431 	}
5432 
5433 	if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5434 		bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5435 		return;
5436 	}
5437 
5438 	/* Find the end of the data in case the report contains padded zero
5439 	 * bytes at the end causing an invalid length value.
5440 	 *
5441 	 * When data is NULL, len is 0 so there is no need for extra ptr
5442 	 * check as 'ptr < data + 0' is already false in such case.
5443 	 */
5444 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5445 		if (ptr + 1 + *ptr > data + len)
5446 			break;
5447 	}
5448 
5449 	real_len = ptr - data;
5450 
5451 	/* Adjust for actual length */
5452 	if (len != real_len) {
5453 		bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u",
5454 				       len, real_len);
5455 		len = real_len;
5456 	}
5457 
5458 	/* If the direct address is present, then this report is from
5459 	 * a LE Direct Advertising Report event. In that case it is
5460 	 * important to see if the address is matching the local
5461 	 * controller address.
5462 	 */
5463 	if (direct_addr) {
5464 		/* Only resolvable random addresses are valid for these
5465 		 * kind of reports and others can be ignored.
5466 		 */
5467 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5468 			return;
5469 
5470 		/* If the controller is not using resolvable random
5471 		 * addresses, then this report can be ignored.
5472 		 */
5473 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5474 			return;
5475 
5476 		/* If the local IRK of the controller does not match
5477 		 * with the resolvable random address provided, then
5478 		 * this report can be ignored.
5479 		 */
5480 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5481 			return;
5482 	}
5483 
5484 	/* Check if we need to convert to identity address */
5485 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5486 	if (irk) {
5487 		bdaddr = &irk->bdaddr;
5488 		bdaddr_type = irk->addr_type;
5489 	}
5490 
5491 	/* Check if we have been requested to connect to this device.
5492 	 *
5493 	 * direct_addr is set only for directed advertising reports (it is NULL
5494 	 * for advertising reports) and is already verified to be RPA above.
5495 	 */
5496 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5497 								direct_addr);
5498 	if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5499 		/* Store report for later inclusion by
5500 		 * mgmt_device_connected
5501 		 */
5502 		memcpy(conn->le_adv_data, data, len);
5503 		conn->le_adv_data_len = len;
5504 	}
5505 
5506 	/* Passive scanning shouldn't trigger any device found events,
5507 	 * except for devices marked as CONN_REPORT for which we do send
5508 	 * device found events, or advertisement monitoring requested.
5509 	 */
5510 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5511 		if (type == LE_ADV_DIRECT_IND)
5512 			return;
5513 
5514 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5515 					       bdaddr, bdaddr_type) &&
5516 		    idr_is_empty(&hdev->adv_monitors_idr))
5517 			return;
5518 
5519 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5520 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5521 		else
5522 			flags = 0;
5523 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5524 				  rssi, flags, data, len, NULL, 0);
5525 		return;
5526 	}
5527 
5528 	/* When receiving non-connectable or scannable undirected
5529 	 * advertising reports, this means that the remote device is
5530 	 * not connectable and then clearly indicate this in the
5531 	 * device found event.
5532 	 *
5533 	 * When receiving a scan response, then there is no way to
5534 	 * know if the remote device is connectable or not. However
5535 	 * since scan responses are merged with a previously seen
5536 	 * advertising report, the flags field from that report
5537 	 * will be used.
5538 	 *
5539 	 * In the really unlikely case that a controller get confused
5540 	 * and just sends a scan response event, then it is marked as
5541 	 * not connectable as well.
5542 	 */
5543 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5544 	    type == LE_ADV_SCAN_RSP)
5545 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5546 	else
5547 		flags = 0;
5548 
5549 	/* If there's nothing pending either store the data from this
5550 	 * event or send an immediate device found event if the data
5551 	 * should not be stored for later.
5552 	 */
5553 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
5554 		/* If the report will trigger a SCAN_REQ store it for
5555 		 * later merging.
5556 		 */
5557 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5558 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5559 						 rssi, flags, data, len);
5560 			return;
5561 		}
5562 
5563 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5564 				  rssi, flags, data, len, NULL, 0);
5565 		return;
5566 	}
5567 
5568 	/* Check if the pending report is for the same device as the new one */
5569 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5570 		 bdaddr_type == d->last_adv_addr_type);
5571 
5572 	/* If the pending data doesn't match this report or this isn't a
5573 	 * scan response (e.g. we got a duplicate ADV_IND) then force
5574 	 * sending of the pending data.
5575 	 */
5576 	if (type != LE_ADV_SCAN_RSP || !match) {
5577 		/* Send out whatever is in the cache, but skip duplicates */
5578 		if (!match)
5579 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5580 					  d->last_adv_addr_type, NULL,
5581 					  d->last_adv_rssi, d->last_adv_flags,
5582 					  d->last_adv_data,
5583 					  d->last_adv_data_len, NULL, 0);
5584 
5585 		/* If the new report will trigger a SCAN_REQ store it for
5586 		 * later merging.
5587 		 */
5588 		if (!ext_adv && (type == LE_ADV_IND ||
5589 				 type == LE_ADV_SCAN_IND)) {
5590 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5591 						 rssi, flags, data, len);
5592 			return;
5593 		}
5594 
5595 		/* The advertising reports cannot be merged, so clear
5596 		 * the pending report and send out a device found event.
5597 		 */
5598 		clear_pending_adv_report(hdev);
5599 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5600 				  rssi, flags, data, len, NULL, 0);
5601 		return;
5602 	}
5603 
5604 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5605 	 * the new event is a SCAN_RSP. We can therefore proceed with
5606 	 * sending a merged device found event.
5607 	 */
5608 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5609 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5610 			  d->last_adv_data, d->last_adv_data_len, data, len);
5611 	clear_pending_adv_report(hdev);
5612 }
5613 
5614 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5615 {
5616 	u8 num_reports = skb->data[0];
5617 	void *ptr = &skb->data[1];
5618 
5619 	hci_dev_lock(hdev);
5620 
5621 	while (num_reports--) {
5622 		struct hci_ev_le_advertising_info *ev = ptr;
5623 		s8 rssi;
5624 
5625 		if (ev->length <= HCI_MAX_AD_LENGTH) {
5626 			rssi = ev->data[ev->length];
5627 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5628 					   ev->bdaddr_type, NULL, 0, rssi,
5629 					   ev->data, ev->length, false);
5630 		} else {
5631 			bt_dev_err(hdev, "Dropping invalid advertising data");
5632 		}
5633 
5634 		ptr += sizeof(*ev) + ev->length + 1;
5635 	}
5636 
5637 	hci_dev_unlock(hdev);
5638 }
5639 
5640 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5641 {
5642 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5643 		switch (evt_type) {
5644 		case LE_LEGACY_ADV_IND:
5645 			return LE_ADV_IND;
5646 		case LE_LEGACY_ADV_DIRECT_IND:
5647 			return LE_ADV_DIRECT_IND;
5648 		case LE_LEGACY_ADV_SCAN_IND:
5649 			return LE_ADV_SCAN_IND;
5650 		case LE_LEGACY_NONCONN_IND:
5651 			return LE_ADV_NONCONN_IND;
5652 		case LE_LEGACY_SCAN_RSP_ADV:
5653 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5654 			return LE_ADV_SCAN_RSP;
5655 		}
5656 
5657 		goto invalid;
5658 	}
5659 
5660 	if (evt_type & LE_EXT_ADV_CONN_IND) {
5661 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
5662 			return LE_ADV_DIRECT_IND;
5663 
5664 		return LE_ADV_IND;
5665 	}
5666 
5667 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
5668 		return LE_ADV_SCAN_RSP;
5669 
5670 	if (evt_type & LE_EXT_ADV_SCAN_IND)
5671 		return LE_ADV_SCAN_IND;
5672 
5673 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5674 	    evt_type & LE_EXT_ADV_DIRECT_IND)
5675 		return LE_ADV_NONCONN_IND;
5676 
5677 invalid:
5678 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5679 			       evt_type);
5680 
5681 	return LE_ADV_INVALID;
5682 }
5683 
5684 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5685 {
5686 	u8 num_reports = skb->data[0];
5687 	void *ptr = &skb->data[1];
5688 
5689 	hci_dev_lock(hdev);
5690 
5691 	while (num_reports--) {
5692 		struct hci_ev_le_ext_adv_report *ev = ptr;
5693 		u8 legacy_evt_type;
5694 		u16 evt_type;
5695 
5696 		evt_type = __le16_to_cpu(ev->evt_type);
5697 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5698 		if (legacy_evt_type != LE_ADV_INVALID) {
5699 			process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5700 					   ev->bdaddr_type, NULL, 0, ev->rssi,
5701 					   ev->data, ev->length,
5702 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5703 		}
5704 
5705 		ptr += sizeof(*ev) + ev->length;
5706 	}
5707 
5708 	hci_dev_unlock(hdev);
5709 }
5710 
5711 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5712 					    struct sk_buff *skb)
5713 {
5714 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5715 	struct hci_conn *conn;
5716 
5717 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5718 
5719 	hci_dev_lock(hdev);
5720 
5721 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5722 	if (conn) {
5723 		if (!ev->status)
5724 			memcpy(conn->features[0], ev->features, 8);
5725 
5726 		if (conn->state == BT_CONFIG) {
5727 			__u8 status;
5728 
5729 			/* If the local controller supports slave-initiated
5730 			 * features exchange, but the remote controller does
5731 			 * not, then it is possible that the error code 0x1a
5732 			 * for unsupported remote feature gets returned.
5733 			 *
5734 			 * In this specific case, allow the connection to
5735 			 * transition into connected state and mark it as
5736 			 * successful.
5737 			 */
5738 			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5739 			    !conn->out && ev->status == 0x1a)
5740 				status = 0x00;
5741 			else
5742 				status = ev->status;
5743 
5744 			conn->state = BT_CONNECTED;
5745 			hci_connect_cfm(conn, status);
5746 			hci_conn_drop(conn);
5747 		}
5748 	}
5749 
5750 	hci_dev_unlock(hdev);
5751 }
5752 
5753 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5754 {
5755 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5756 	struct hci_cp_le_ltk_reply cp;
5757 	struct hci_cp_le_ltk_neg_reply neg;
5758 	struct hci_conn *conn;
5759 	struct smp_ltk *ltk;
5760 
5761 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5762 
5763 	hci_dev_lock(hdev);
5764 
5765 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5766 	if (conn == NULL)
5767 		goto not_found;
5768 
5769 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5770 	if (!ltk)
5771 		goto not_found;
5772 
5773 	if (smp_ltk_is_sc(ltk)) {
5774 		/* With SC both EDiv and Rand are set to zero */
5775 		if (ev->ediv || ev->rand)
5776 			goto not_found;
5777 	} else {
5778 		/* For non-SC keys check that EDiv and Rand match */
5779 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5780 			goto not_found;
5781 	}
5782 
5783 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5784 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5785 	cp.handle = cpu_to_le16(conn->handle);
5786 
5787 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5788 
5789 	conn->enc_key_size = ltk->enc_size;
5790 
5791 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5792 
5793 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5794 	 * temporary key used to encrypt a connection following
5795 	 * pairing. It is used during the Encrypted Session Setup to
5796 	 * distribute the keys. Later, security can be re-established
5797 	 * using a distributed LTK.
5798 	 */
5799 	if (ltk->type == SMP_STK) {
5800 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5801 		list_del_rcu(&ltk->list);
5802 		kfree_rcu(ltk, rcu);
5803 	} else {
5804 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5805 	}
5806 
5807 	hci_dev_unlock(hdev);
5808 
5809 	return;
5810 
5811 not_found:
5812 	neg.handle = ev->handle;
5813 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5814 	hci_dev_unlock(hdev);
5815 }
5816 
5817 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5818 				      u8 reason)
5819 {
5820 	struct hci_cp_le_conn_param_req_neg_reply cp;
5821 
5822 	cp.handle = cpu_to_le16(handle);
5823 	cp.reason = reason;
5824 
5825 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5826 		     &cp);
5827 }
5828 
5829 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5830 					     struct sk_buff *skb)
5831 {
5832 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5833 	struct hci_cp_le_conn_param_req_reply cp;
5834 	struct hci_conn *hcon;
5835 	u16 handle, min, max, latency, timeout;
5836 
5837 	handle = le16_to_cpu(ev->handle);
5838 	min = le16_to_cpu(ev->interval_min);
5839 	max = le16_to_cpu(ev->interval_max);
5840 	latency = le16_to_cpu(ev->latency);
5841 	timeout = le16_to_cpu(ev->timeout);
5842 
5843 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5844 	if (!hcon || hcon->state != BT_CONNECTED)
5845 		return send_conn_param_neg_reply(hdev, handle,
5846 						 HCI_ERROR_UNKNOWN_CONN_ID);
5847 
5848 	if (hci_check_conn_params(min, max, latency, timeout))
5849 		return send_conn_param_neg_reply(hdev, handle,
5850 						 HCI_ERROR_INVALID_LL_PARAMS);
5851 
5852 	if (hcon->role == HCI_ROLE_MASTER) {
5853 		struct hci_conn_params *params;
5854 		u8 store_hint;
5855 
5856 		hci_dev_lock(hdev);
5857 
5858 		params = hci_conn_params_lookup(hdev, &hcon->dst,
5859 						hcon->dst_type);
5860 		if (params) {
5861 			params->conn_min_interval = min;
5862 			params->conn_max_interval = max;
5863 			params->conn_latency = latency;
5864 			params->supervision_timeout = timeout;
5865 			store_hint = 0x01;
5866 		} else{
5867 			store_hint = 0x00;
5868 		}
5869 
5870 		hci_dev_unlock(hdev);
5871 
5872 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5873 				    store_hint, min, max, latency, timeout);
5874 	}
5875 
5876 	cp.handle = ev->handle;
5877 	cp.interval_min = ev->interval_min;
5878 	cp.interval_max = ev->interval_max;
5879 	cp.latency = ev->latency;
5880 	cp.timeout = ev->timeout;
5881 	cp.min_ce_len = 0;
5882 	cp.max_ce_len = 0;
5883 
5884 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5885 }
5886 
5887 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5888 					 struct sk_buff *skb)
5889 {
5890 	u8 num_reports = skb->data[0];
5891 	struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5892 
5893 	if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5894 		return;
5895 
5896 	hci_dev_lock(hdev);
5897 
5898 	for (; num_reports; num_reports--, ev++)
5899 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5900 				   ev->bdaddr_type, &ev->direct_addr,
5901 				   ev->direct_addr_type, ev->rssi, NULL, 0,
5902 				   false);
5903 
5904 	hci_dev_unlock(hdev);
5905 }
5906 
5907 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5908 {
5909 	struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5910 	struct hci_conn *conn;
5911 
5912 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5913 
5914 	if (!ev->status)
5915 		return;
5916 
5917 	hci_dev_lock(hdev);
5918 
5919 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5920 	if (!conn)
5921 		goto unlock;
5922 
5923 	conn->le_tx_phy = ev->tx_phy;
5924 	conn->le_rx_phy = ev->rx_phy;
5925 
5926 unlock:
5927 	hci_dev_unlock(hdev);
5928 }
5929 
5930 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5931 {
5932 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5933 
5934 	skb_pull(skb, sizeof(*le_ev));
5935 
5936 	switch (le_ev->subevent) {
5937 	case HCI_EV_LE_CONN_COMPLETE:
5938 		hci_le_conn_complete_evt(hdev, skb);
5939 		break;
5940 
5941 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5942 		hci_le_conn_update_complete_evt(hdev, skb);
5943 		break;
5944 
5945 	case HCI_EV_LE_ADVERTISING_REPORT:
5946 		hci_le_adv_report_evt(hdev, skb);
5947 		break;
5948 
5949 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5950 		hci_le_remote_feat_complete_evt(hdev, skb);
5951 		break;
5952 
5953 	case HCI_EV_LE_LTK_REQ:
5954 		hci_le_ltk_request_evt(hdev, skb);
5955 		break;
5956 
5957 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5958 		hci_le_remote_conn_param_req_evt(hdev, skb);
5959 		break;
5960 
5961 	case HCI_EV_LE_DIRECT_ADV_REPORT:
5962 		hci_le_direct_adv_report_evt(hdev, skb);
5963 		break;
5964 
5965 	case HCI_EV_LE_PHY_UPDATE_COMPLETE:
5966 		hci_le_phy_update_evt(hdev, skb);
5967 		break;
5968 
5969 	case HCI_EV_LE_EXT_ADV_REPORT:
5970 		hci_le_ext_adv_report_evt(hdev, skb);
5971 		break;
5972 
5973 	case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5974 		hci_le_enh_conn_complete_evt(hdev, skb);
5975 		break;
5976 
5977 	case HCI_EV_LE_EXT_ADV_SET_TERM:
5978 		hci_le_ext_adv_term_evt(hdev, skb);
5979 		break;
5980 
5981 	default:
5982 		break;
5983 	}
5984 }
5985 
5986 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5987 				 u8 event, struct sk_buff *skb)
5988 {
5989 	struct hci_ev_cmd_complete *ev;
5990 	struct hci_event_hdr *hdr;
5991 
5992 	if (!skb)
5993 		return false;
5994 
5995 	if (skb->len < sizeof(*hdr)) {
5996 		bt_dev_err(hdev, "too short HCI event");
5997 		return false;
5998 	}
5999 
6000 	hdr = (void *) skb->data;
6001 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6002 
6003 	if (event) {
6004 		if (hdr->evt != event)
6005 			return false;
6006 		return true;
6007 	}
6008 
6009 	/* Check if request ended in Command Status - no way to retreive
6010 	 * any extra parameters in this case.
6011 	 */
6012 	if (hdr->evt == HCI_EV_CMD_STATUS)
6013 		return false;
6014 
6015 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6016 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6017 			   hdr->evt);
6018 		return false;
6019 	}
6020 
6021 	if (skb->len < sizeof(*ev)) {
6022 		bt_dev_err(hdev, "too short cmd_complete event");
6023 		return false;
6024 	}
6025 
6026 	ev = (void *) skb->data;
6027 	skb_pull(skb, sizeof(*ev));
6028 
6029 	if (opcode != __le16_to_cpu(ev->opcode)) {
6030 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6031 		       __le16_to_cpu(ev->opcode));
6032 		return false;
6033 	}
6034 
6035 	return true;
6036 }
6037 
6038 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6039 				  struct sk_buff *skb)
6040 {
6041 	struct hci_ev_le_advertising_info *adv;
6042 	struct hci_ev_le_direct_adv_info *direct_adv;
6043 	struct hci_ev_le_ext_adv_report *ext_adv;
6044 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6045 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6046 
6047 	hci_dev_lock(hdev);
6048 
6049 	/* If we are currently suspended and this is the first BT event seen,
6050 	 * save the wake reason associated with the event.
6051 	 */
6052 	if (!hdev->suspended || hdev->wake_reason)
6053 		goto unlock;
6054 
6055 	/* Default to remote wake. Values for wake_reason are documented in the
6056 	 * Bluez mgmt api docs.
6057 	 */
6058 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6059 
6060 	/* Once configured for remote wakeup, we should only wake up for
6061 	 * reconnections. It's useful to see which device is waking us up so
6062 	 * keep track of the bdaddr of the connection event that woke us up.
6063 	 */
6064 	if (event == HCI_EV_CONN_REQUEST) {
6065 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6066 		hdev->wake_addr_type = BDADDR_BREDR;
6067 	} else if (event == HCI_EV_CONN_COMPLETE) {
6068 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6069 		hdev->wake_addr_type = BDADDR_BREDR;
6070 	} else if (event == HCI_EV_LE_META) {
6071 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
6072 		u8 subevent = le_ev->subevent;
6073 		u8 *ptr = &skb->data[sizeof(*le_ev)];
6074 		u8 num_reports = *ptr;
6075 
6076 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6077 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6078 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6079 		    num_reports) {
6080 			adv = (void *)(ptr + 1);
6081 			direct_adv = (void *)(ptr + 1);
6082 			ext_adv = (void *)(ptr + 1);
6083 
6084 			switch (subevent) {
6085 			case HCI_EV_LE_ADVERTISING_REPORT:
6086 				bacpy(&hdev->wake_addr, &adv->bdaddr);
6087 				hdev->wake_addr_type = adv->bdaddr_type;
6088 				break;
6089 			case HCI_EV_LE_DIRECT_ADV_REPORT:
6090 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6091 				hdev->wake_addr_type = direct_adv->bdaddr_type;
6092 				break;
6093 			case HCI_EV_LE_EXT_ADV_REPORT:
6094 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6095 				hdev->wake_addr_type = ext_adv->bdaddr_type;
6096 				break;
6097 			}
6098 		}
6099 	} else {
6100 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6101 	}
6102 
6103 unlock:
6104 	hci_dev_unlock(hdev);
6105 }
6106 
6107 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6108 {
6109 	struct hci_event_hdr *hdr = (void *) skb->data;
6110 	hci_req_complete_t req_complete = NULL;
6111 	hci_req_complete_skb_t req_complete_skb = NULL;
6112 	struct sk_buff *orig_skb = NULL;
6113 	u8 status = 0, event = hdr->evt, req_evt = 0;
6114 	u16 opcode = HCI_OP_NOP;
6115 
6116 	if (!event) {
6117 		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6118 		goto done;
6119 	}
6120 
6121 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6122 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6123 		opcode = __le16_to_cpu(cmd_hdr->opcode);
6124 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6125 				     &req_complete_skb);
6126 		req_evt = event;
6127 	}
6128 
6129 	/* If it looks like we might end up having to call
6130 	 * req_complete_skb, store a pristine copy of the skb since the
6131 	 * various handlers may modify the original one through
6132 	 * skb_pull() calls, etc.
6133 	 */
6134 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6135 	    event == HCI_EV_CMD_COMPLETE)
6136 		orig_skb = skb_clone(skb, GFP_KERNEL);
6137 
6138 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6139 
6140 	/* Store wake reason if we're suspended */
6141 	hci_store_wake_reason(hdev, event, skb);
6142 
6143 	switch (event) {
6144 	case HCI_EV_INQUIRY_COMPLETE:
6145 		hci_inquiry_complete_evt(hdev, skb);
6146 		break;
6147 
6148 	case HCI_EV_INQUIRY_RESULT:
6149 		hci_inquiry_result_evt(hdev, skb);
6150 		break;
6151 
6152 	case HCI_EV_CONN_COMPLETE:
6153 		hci_conn_complete_evt(hdev, skb);
6154 		break;
6155 
6156 	case HCI_EV_CONN_REQUEST:
6157 		hci_conn_request_evt(hdev, skb);
6158 		break;
6159 
6160 	case HCI_EV_DISCONN_COMPLETE:
6161 		hci_disconn_complete_evt(hdev, skb);
6162 		break;
6163 
6164 	case HCI_EV_AUTH_COMPLETE:
6165 		hci_auth_complete_evt(hdev, skb);
6166 		break;
6167 
6168 	case HCI_EV_REMOTE_NAME:
6169 		hci_remote_name_evt(hdev, skb);
6170 		break;
6171 
6172 	case HCI_EV_ENCRYPT_CHANGE:
6173 		hci_encrypt_change_evt(hdev, skb);
6174 		break;
6175 
6176 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6177 		hci_change_link_key_complete_evt(hdev, skb);
6178 		break;
6179 
6180 	case HCI_EV_REMOTE_FEATURES:
6181 		hci_remote_features_evt(hdev, skb);
6182 		break;
6183 
6184 	case HCI_EV_CMD_COMPLETE:
6185 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6186 				     &req_complete, &req_complete_skb);
6187 		break;
6188 
6189 	case HCI_EV_CMD_STATUS:
6190 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6191 				   &req_complete_skb);
6192 		break;
6193 
6194 	case HCI_EV_HARDWARE_ERROR:
6195 		hci_hardware_error_evt(hdev, skb);
6196 		break;
6197 
6198 	case HCI_EV_ROLE_CHANGE:
6199 		hci_role_change_evt(hdev, skb);
6200 		break;
6201 
6202 	case HCI_EV_NUM_COMP_PKTS:
6203 		hci_num_comp_pkts_evt(hdev, skb);
6204 		break;
6205 
6206 	case HCI_EV_MODE_CHANGE:
6207 		hci_mode_change_evt(hdev, skb);
6208 		break;
6209 
6210 	case HCI_EV_PIN_CODE_REQ:
6211 		hci_pin_code_request_evt(hdev, skb);
6212 		break;
6213 
6214 	case HCI_EV_LINK_KEY_REQ:
6215 		hci_link_key_request_evt(hdev, skb);
6216 		break;
6217 
6218 	case HCI_EV_LINK_KEY_NOTIFY:
6219 		hci_link_key_notify_evt(hdev, skb);
6220 		break;
6221 
6222 	case HCI_EV_CLOCK_OFFSET:
6223 		hci_clock_offset_evt(hdev, skb);
6224 		break;
6225 
6226 	case HCI_EV_PKT_TYPE_CHANGE:
6227 		hci_pkt_type_change_evt(hdev, skb);
6228 		break;
6229 
6230 	case HCI_EV_PSCAN_REP_MODE:
6231 		hci_pscan_rep_mode_evt(hdev, skb);
6232 		break;
6233 
6234 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6235 		hci_inquiry_result_with_rssi_evt(hdev, skb);
6236 		break;
6237 
6238 	case HCI_EV_REMOTE_EXT_FEATURES:
6239 		hci_remote_ext_features_evt(hdev, skb);
6240 		break;
6241 
6242 	case HCI_EV_SYNC_CONN_COMPLETE:
6243 		hci_sync_conn_complete_evt(hdev, skb);
6244 		break;
6245 
6246 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
6247 		hci_extended_inquiry_result_evt(hdev, skb);
6248 		break;
6249 
6250 	case HCI_EV_KEY_REFRESH_COMPLETE:
6251 		hci_key_refresh_complete_evt(hdev, skb);
6252 		break;
6253 
6254 	case HCI_EV_IO_CAPA_REQUEST:
6255 		hci_io_capa_request_evt(hdev, skb);
6256 		break;
6257 
6258 	case HCI_EV_IO_CAPA_REPLY:
6259 		hci_io_capa_reply_evt(hdev, skb);
6260 		break;
6261 
6262 	case HCI_EV_USER_CONFIRM_REQUEST:
6263 		hci_user_confirm_request_evt(hdev, skb);
6264 		break;
6265 
6266 	case HCI_EV_USER_PASSKEY_REQUEST:
6267 		hci_user_passkey_request_evt(hdev, skb);
6268 		break;
6269 
6270 	case HCI_EV_USER_PASSKEY_NOTIFY:
6271 		hci_user_passkey_notify_evt(hdev, skb);
6272 		break;
6273 
6274 	case HCI_EV_KEYPRESS_NOTIFY:
6275 		hci_keypress_notify_evt(hdev, skb);
6276 		break;
6277 
6278 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
6279 		hci_simple_pair_complete_evt(hdev, skb);
6280 		break;
6281 
6282 	case HCI_EV_REMOTE_HOST_FEATURES:
6283 		hci_remote_host_features_evt(hdev, skb);
6284 		break;
6285 
6286 	case HCI_EV_LE_META:
6287 		hci_le_meta_evt(hdev, skb);
6288 		break;
6289 
6290 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6291 		hci_remote_oob_data_request_evt(hdev, skb);
6292 		break;
6293 
6294 #if IS_ENABLED(CONFIG_BT_HS)
6295 	case HCI_EV_CHANNEL_SELECTED:
6296 		hci_chan_selected_evt(hdev, skb);
6297 		break;
6298 
6299 	case HCI_EV_PHY_LINK_COMPLETE:
6300 		hci_phy_link_complete_evt(hdev, skb);
6301 		break;
6302 
6303 	case HCI_EV_LOGICAL_LINK_COMPLETE:
6304 		hci_loglink_complete_evt(hdev, skb);
6305 		break;
6306 
6307 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6308 		hci_disconn_loglink_complete_evt(hdev, skb);
6309 		break;
6310 
6311 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6312 		hci_disconn_phylink_complete_evt(hdev, skb);
6313 		break;
6314 #endif
6315 
6316 	case HCI_EV_NUM_COMP_BLOCKS:
6317 		hci_num_comp_blocks_evt(hdev, skb);
6318 		break;
6319 
6320 	case HCI_EV_VENDOR:
6321 		msft_vendor_evt(hdev, skb);
6322 		break;
6323 
6324 	default:
6325 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
6326 		break;
6327 	}
6328 
6329 	if (req_complete) {
6330 		req_complete(hdev, status, opcode);
6331 	} else if (req_complete_skb) {
6332 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6333 			kfree_skb(orig_skb);
6334 			orig_skb = NULL;
6335 		}
6336 		req_complete_skb(hdev, status, opcode, orig_skb);
6337 	}
6338 
6339 done:
6340 	kfree_skb(orig_skb);
6341 	kfree_skb(skb);
6342 	hdev->stat.evt_rx++;
6343 }
6344