xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 15e3ae36)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
41 
42 /* Handle HCI Event packets */
43 
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 	__u8 status = *((__u8 *) skb->data);
47 
48 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
49 
50 	if (status)
51 		return;
52 
53 	clear_bit(HCI_INQUIRY, &hdev->flags);
54 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
56 
57 	hci_dev_lock(hdev);
58 	/* Set discovery state to stopped if we're not doing LE active
59 	 * scanning.
60 	 */
61 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
63 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
64 	hci_dev_unlock(hdev);
65 
66 	hci_conn_check_pending(hdev);
67 }
68 
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
70 {
71 	__u8 status = *((__u8 *) skb->data);
72 
73 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
74 
75 	if (status)
76 		return;
77 
78 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
79 }
80 
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 {
83 	__u8 status = *((__u8 *) skb->data);
84 
85 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
86 
87 	if (status)
88 		return;
89 
90 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
91 
92 	hci_conn_check_pending(hdev);
93 }
94 
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
96 					  struct sk_buff *skb)
97 {
98 	BT_DBG("%s", hdev->name);
99 }
100 
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 	struct hci_rp_role_discovery *rp = (void *) skb->data;
104 	struct hci_conn *conn;
105 
106 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
107 
108 	if (rp->status)
109 		return;
110 
111 	hci_dev_lock(hdev);
112 
113 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
114 	if (conn)
115 		conn->role = rp->role;
116 
117 	hci_dev_unlock(hdev);
118 }
119 
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
121 {
122 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 	struct hci_conn *conn;
124 
125 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126 
127 	if (rp->status)
128 		return;
129 
130 	hci_dev_lock(hdev);
131 
132 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 	if (conn)
134 		conn->link_policy = __le16_to_cpu(rp->policy);
135 
136 	hci_dev_unlock(hdev);
137 }
138 
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 {
141 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 	struct hci_conn *conn;
143 	void *sent;
144 
145 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146 
147 	if (rp->status)
148 		return;
149 
150 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 	if (!sent)
152 		return;
153 
154 	hci_dev_lock(hdev);
155 
156 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
157 	if (conn)
158 		conn->link_policy = get_unaligned_le16(sent + 2);
159 
160 	hci_dev_unlock(hdev);
161 }
162 
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
164 					struct sk_buff *skb)
165 {
166 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
167 
168 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
169 
170 	if (rp->status)
171 		return;
172 
173 	hdev->link_policy = __le16_to_cpu(rp->policy);
174 }
175 
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
177 					 struct sk_buff *skb)
178 {
179 	__u8 status = *((__u8 *) skb->data);
180 	void *sent;
181 
182 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
183 
184 	if (status)
185 		return;
186 
187 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
188 	if (!sent)
189 		return;
190 
191 	hdev->link_policy = get_unaligned_le16(sent);
192 }
193 
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 	__u8 status = *((__u8 *) skb->data);
197 
198 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
199 
200 	clear_bit(HCI_RESET, &hdev->flags);
201 
202 	if (status)
203 		return;
204 
205 	/* Reset all non-persistent flags */
206 	hci_dev_clear_volatile_flags(hdev);
207 
208 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
209 
210 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
212 
213 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 	hdev->adv_data_len = 0;
215 
216 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 	hdev->scan_rsp_data_len = 0;
218 
219 	hdev->le_scan_type = LE_SCAN_PASSIVE;
220 
221 	hdev->ssp_debug_mode = 0;
222 
223 	hci_bdaddr_list_clear(&hdev->le_white_list);
224 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
225 }
226 
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
228 					struct sk_buff *skb)
229 {
230 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 	struct hci_cp_read_stored_link_key *sent;
232 
233 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234 
235 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
236 	if (!sent)
237 		return;
238 
239 	if (!rp->status && sent->read_all == 0x01) {
240 		hdev->stored_max_keys = rp->max_keys;
241 		hdev->stored_num_keys = rp->num_keys;
242 	}
243 }
244 
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
246 					  struct sk_buff *skb)
247 {
248 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
249 
250 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251 
252 	if (rp->status)
253 		return;
254 
255 	if (rp->num_keys <= hdev->stored_num_keys)
256 		hdev->stored_num_keys -= rp->num_keys;
257 	else
258 		hdev->stored_num_keys = 0;
259 }
260 
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
262 {
263 	__u8 status = *((__u8 *) skb->data);
264 	void *sent;
265 
266 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
267 
268 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
269 	if (!sent)
270 		return;
271 
272 	hci_dev_lock(hdev);
273 
274 	if (hci_dev_test_flag(hdev, HCI_MGMT))
275 		mgmt_set_local_name_complete(hdev, sent, status);
276 	else if (!status)
277 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
278 
279 	hci_dev_unlock(hdev);
280 }
281 
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 	struct hci_rp_read_local_name *rp = (void *) skb->data;
285 
286 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
287 
288 	if (rp->status)
289 		return;
290 
291 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 	    hci_dev_test_flag(hdev, HCI_CONFIG))
293 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
294 }
295 
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 	__u8 status = *((__u8 *) skb->data);
299 	void *sent;
300 
301 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
302 
303 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 	if (!sent)
305 		return;
306 
307 	hci_dev_lock(hdev);
308 
309 	if (!status) {
310 		__u8 param = *((__u8 *) sent);
311 
312 		if (param == AUTH_ENABLED)
313 			set_bit(HCI_AUTH, &hdev->flags);
314 		else
315 			clear_bit(HCI_AUTH, &hdev->flags);
316 	}
317 
318 	if (hci_dev_test_flag(hdev, HCI_MGMT))
319 		mgmt_auth_enable_complete(hdev, status);
320 
321 	hci_dev_unlock(hdev);
322 }
323 
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
325 {
326 	__u8 status = *((__u8 *) skb->data);
327 	__u8 param;
328 	void *sent;
329 
330 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
331 
332 	if (status)
333 		return;
334 
335 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
336 	if (!sent)
337 		return;
338 
339 	param = *((__u8 *) sent);
340 
341 	if (param)
342 		set_bit(HCI_ENCRYPT, &hdev->flags);
343 	else
344 		clear_bit(HCI_ENCRYPT, &hdev->flags);
345 }
346 
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 	__u8 status = *((__u8 *) skb->data);
350 	__u8 param;
351 	void *sent;
352 
353 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
354 
355 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
356 	if (!sent)
357 		return;
358 
359 	param = *((__u8 *) sent);
360 
361 	hci_dev_lock(hdev);
362 
363 	if (status) {
364 		hdev->discov_timeout = 0;
365 		goto done;
366 	}
367 
368 	if (param & SCAN_INQUIRY)
369 		set_bit(HCI_ISCAN, &hdev->flags);
370 	else
371 		clear_bit(HCI_ISCAN, &hdev->flags);
372 
373 	if (param & SCAN_PAGE)
374 		set_bit(HCI_PSCAN, &hdev->flags);
375 	else
376 		clear_bit(HCI_PSCAN, &hdev->flags);
377 
378 done:
379 	hci_dev_unlock(hdev);
380 }
381 
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
383 {
384 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
385 
386 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
387 
388 	if (rp->status)
389 		return;
390 
391 	memcpy(hdev->dev_class, rp->dev_class, 3);
392 
393 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
395 }
396 
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 {
399 	__u8 status = *((__u8 *) skb->data);
400 	void *sent;
401 
402 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
403 
404 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 	if (!sent)
406 		return;
407 
408 	hci_dev_lock(hdev);
409 
410 	if (status == 0)
411 		memcpy(hdev->dev_class, sent, 3);
412 
413 	if (hci_dev_test_flag(hdev, HCI_MGMT))
414 		mgmt_set_class_of_dev_complete(hdev, sent, status);
415 
416 	hci_dev_unlock(hdev);
417 }
418 
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
422 	__u16 setting;
423 
424 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425 
426 	if (rp->status)
427 		return;
428 
429 	setting = __le16_to_cpu(rp->voice_setting);
430 
431 	if (hdev->voice_setting == setting)
432 		return;
433 
434 	hdev->voice_setting = setting;
435 
436 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
437 
438 	if (hdev->notify)
439 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
440 }
441 
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
443 				       struct sk_buff *skb)
444 {
445 	__u8 status = *((__u8 *) skb->data);
446 	__u16 setting;
447 	void *sent;
448 
449 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
450 
451 	if (status)
452 		return;
453 
454 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
455 	if (!sent)
456 		return;
457 
458 	setting = get_unaligned_le16(sent);
459 
460 	if (hdev->voice_setting == setting)
461 		return;
462 
463 	hdev->voice_setting = setting;
464 
465 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
466 
467 	if (hdev->notify)
468 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
469 }
470 
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
472 					  struct sk_buff *skb)
473 {
474 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
475 
476 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
477 
478 	if (rp->status)
479 		return;
480 
481 	hdev->num_iac = rp->num_iac;
482 
483 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
484 }
485 
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
487 {
488 	__u8 status = *((__u8 *) skb->data);
489 	struct hci_cp_write_ssp_mode *sent;
490 
491 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
492 
493 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
494 	if (!sent)
495 		return;
496 
497 	hci_dev_lock(hdev);
498 
499 	if (!status) {
500 		if (sent->mode)
501 			hdev->features[1][0] |= LMP_HOST_SSP;
502 		else
503 			hdev->features[1][0] &= ~LMP_HOST_SSP;
504 	}
505 
506 	if (hci_dev_test_flag(hdev, HCI_MGMT))
507 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
508 	else if (!status) {
509 		if (sent->mode)
510 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
511 		else
512 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
513 	}
514 
515 	hci_dev_unlock(hdev);
516 }
517 
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
519 {
520 	u8 status = *((u8 *) skb->data);
521 	struct hci_cp_write_sc_support *sent;
522 
523 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
524 
525 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
526 	if (!sent)
527 		return;
528 
529 	hci_dev_lock(hdev);
530 
531 	if (!status) {
532 		if (sent->support)
533 			hdev->features[1][0] |= LMP_HOST_SC;
534 		else
535 			hdev->features[1][0] &= ~LMP_HOST_SC;
536 	}
537 
538 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
539 		if (sent->support)
540 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
541 		else
542 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
543 	}
544 
545 	hci_dev_unlock(hdev);
546 }
547 
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
549 {
550 	struct hci_rp_read_local_version *rp = (void *) skb->data;
551 
552 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
553 
554 	if (rp->status)
555 		return;
556 
557 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 		hdev->hci_ver = rp->hci_ver;
560 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 		hdev->lmp_ver = rp->lmp_ver;
562 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
564 	}
565 }
566 
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 				       struct sk_buff *skb)
569 {
570 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
571 
572 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573 
574 	if (rp->status)
575 		return;
576 
577 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 	    hci_dev_test_flag(hdev, HCI_CONFIG))
579 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
580 }
581 
582 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
583 					     struct sk_buff *skb)
584 {
585 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
586 	struct hci_conn *conn;
587 
588 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589 
590 	if (rp->status)
591 		return;
592 
593 	hci_dev_lock(hdev);
594 
595 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
596 	if (conn)
597 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
598 
599 	hci_dev_unlock(hdev);
600 }
601 
602 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
603 					      struct sk_buff *skb)
604 {
605 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
606 	struct hci_conn *conn;
607 	void *sent;
608 
609 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610 
611 	if (rp->status)
612 		return;
613 
614 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
615 	if (!sent)
616 		return;
617 
618 	hci_dev_lock(hdev);
619 
620 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
621 	if (conn)
622 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
623 
624 	hci_dev_unlock(hdev);
625 }
626 
627 static void hci_cc_read_local_features(struct hci_dev *hdev,
628 				       struct sk_buff *skb)
629 {
630 	struct hci_rp_read_local_features *rp = (void *) skb->data;
631 
632 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 
634 	if (rp->status)
635 		return;
636 
637 	memcpy(hdev->features, rp->features, 8);
638 
639 	/* Adjust default settings according to features
640 	 * supported by device. */
641 
642 	if (hdev->features[0][0] & LMP_3SLOT)
643 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
644 
645 	if (hdev->features[0][0] & LMP_5SLOT)
646 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
647 
648 	if (hdev->features[0][1] & LMP_HV2) {
649 		hdev->pkt_type  |= (HCI_HV2);
650 		hdev->esco_type |= (ESCO_HV2);
651 	}
652 
653 	if (hdev->features[0][1] & LMP_HV3) {
654 		hdev->pkt_type  |= (HCI_HV3);
655 		hdev->esco_type |= (ESCO_HV3);
656 	}
657 
658 	if (lmp_esco_capable(hdev))
659 		hdev->esco_type |= (ESCO_EV3);
660 
661 	if (hdev->features[0][4] & LMP_EV4)
662 		hdev->esco_type |= (ESCO_EV4);
663 
664 	if (hdev->features[0][4] & LMP_EV5)
665 		hdev->esco_type |= (ESCO_EV5);
666 
667 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
668 		hdev->esco_type |= (ESCO_2EV3);
669 
670 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
671 		hdev->esco_type |= (ESCO_3EV3);
672 
673 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
674 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
675 }
676 
677 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
678 					   struct sk_buff *skb)
679 {
680 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
681 
682 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
683 
684 	if (rp->status)
685 		return;
686 
687 	if (hdev->max_page < rp->max_page)
688 		hdev->max_page = rp->max_page;
689 
690 	if (rp->page < HCI_MAX_PAGES)
691 		memcpy(hdev->features[rp->page], rp->features, 8);
692 }
693 
694 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
695 					  struct sk_buff *skb)
696 {
697 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
698 
699 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
700 
701 	if (rp->status)
702 		return;
703 
704 	hdev->flow_ctl_mode = rp->mode;
705 }
706 
707 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
708 {
709 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
710 
711 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
712 
713 	if (rp->status)
714 		return;
715 
716 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
717 	hdev->sco_mtu  = rp->sco_mtu;
718 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
719 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
720 
721 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
722 		hdev->sco_mtu  = 64;
723 		hdev->sco_pkts = 8;
724 	}
725 
726 	hdev->acl_cnt = hdev->acl_pkts;
727 	hdev->sco_cnt = hdev->sco_pkts;
728 
729 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
730 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
731 }
732 
733 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
734 {
735 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
736 
737 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
738 
739 	if (rp->status)
740 		return;
741 
742 	if (test_bit(HCI_INIT, &hdev->flags))
743 		bacpy(&hdev->bdaddr, &rp->bdaddr);
744 
745 	if (hci_dev_test_flag(hdev, HCI_SETUP))
746 		bacpy(&hdev->setup_addr, &rp->bdaddr);
747 }
748 
749 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
750 					   struct sk_buff *skb)
751 {
752 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
753 
754 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
755 
756 	if (rp->status)
757 		return;
758 
759 	if (test_bit(HCI_INIT, &hdev->flags)) {
760 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
761 		hdev->page_scan_window = __le16_to_cpu(rp->window);
762 	}
763 }
764 
765 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
766 					    struct sk_buff *skb)
767 {
768 	u8 status = *((u8 *) skb->data);
769 	struct hci_cp_write_page_scan_activity *sent;
770 
771 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
772 
773 	if (status)
774 		return;
775 
776 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
777 	if (!sent)
778 		return;
779 
780 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
781 	hdev->page_scan_window = __le16_to_cpu(sent->window);
782 }
783 
784 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
785 					   struct sk_buff *skb)
786 {
787 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
788 
789 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
790 
791 	if (rp->status)
792 		return;
793 
794 	if (test_bit(HCI_INIT, &hdev->flags))
795 		hdev->page_scan_type = rp->type;
796 }
797 
798 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
799 					struct sk_buff *skb)
800 {
801 	u8 status = *((u8 *) skb->data);
802 	u8 *type;
803 
804 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
805 
806 	if (status)
807 		return;
808 
809 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
810 	if (type)
811 		hdev->page_scan_type = *type;
812 }
813 
814 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
815 					struct sk_buff *skb)
816 {
817 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
818 
819 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
820 
821 	if (rp->status)
822 		return;
823 
824 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
825 	hdev->block_len = __le16_to_cpu(rp->block_len);
826 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
827 
828 	hdev->block_cnt = hdev->num_blocks;
829 
830 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
831 	       hdev->block_cnt, hdev->block_len);
832 }
833 
834 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
835 {
836 	struct hci_rp_read_clock *rp = (void *) skb->data;
837 	struct hci_cp_read_clock *cp;
838 	struct hci_conn *conn;
839 
840 	BT_DBG("%s", hdev->name);
841 
842 	if (skb->len < sizeof(*rp))
843 		return;
844 
845 	if (rp->status)
846 		return;
847 
848 	hci_dev_lock(hdev);
849 
850 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
851 	if (!cp)
852 		goto unlock;
853 
854 	if (cp->which == 0x00) {
855 		hdev->clock = le32_to_cpu(rp->clock);
856 		goto unlock;
857 	}
858 
859 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
860 	if (conn) {
861 		conn->clock = le32_to_cpu(rp->clock);
862 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
863 	}
864 
865 unlock:
866 	hci_dev_unlock(hdev);
867 }
868 
869 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
870 				       struct sk_buff *skb)
871 {
872 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
873 
874 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875 
876 	if (rp->status)
877 		return;
878 
879 	hdev->amp_status = rp->amp_status;
880 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
881 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
882 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
883 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
884 	hdev->amp_type = rp->amp_type;
885 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
886 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
887 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
888 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
889 }
890 
891 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
892 					 struct sk_buff *skb)
893 {
894 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
895 
896 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897 
898 	if (rp->status)
899 		return;
900 
901 	hdev->inq_tx_power = rp->tx_power;
902 }
903 
904 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
905 					       struct sk_buff *skb)
906 {
907 	struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
908 
909 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910 
911 	if (rp->status)
912 		return;
913 
914 	hdev->err_data_reporting = rp->err_data_reporting;
915 }
916 
917 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
918 						struct sk_buff *skb)
919 {
920 	__u8 status = *((__u8 *)skb->data);
921 	struct hci_cp_write_def_err_data_reporting *cp;
922 
923 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
924 
925 	if (status)
926 		return;
927 
928 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
929 	if (!cp)
930 		return;
931 
932 	hdev->err_data_reporting = cp->err_data_reporting;
933 }
934 
935 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
936 {
937 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
938 	struct hci_cp_pin_code_reply *cp;
939 	struct hci_conn *conn;
940 
941 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
942 
943 	hci_dev_lock(hdev);
944 
945 	if (hci_dev_test_flag(hdev, HCI_MGMT))
946 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
947 
948 	if (rp->status)
949 		goto unlock;
950 
951 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
952 	if (!cp)
953 		goto unlock;
954 
955 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
956 	if (conn)
957 		conn->pin_length = cp->pin_len;
958 
959 unlock:
960 	hci_dev_unlock(hdev);
961 }
962 
963 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
964 {
965 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
966 
967 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968 
969 	hci_dev_lock(hdev);
970 
971 	if (hci_dev_test_flag(hdev, HCI_MGMT))
972 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
973 						 rp->status);
974 
975 	hci_dev_unlock(hdev);
976 }
977 
978 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
979 				       struct sk_buff *skb)
980 {
981 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
982 
983 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984 
985 	if (rp->status)
986 		return;
987 
988 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
989 	hdev->le_pkts = rp->le_max_pkt;
990 
991 	hdev->le_cnt = hdev->le_pkts;
992 
993 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
994 }
995 
996 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
997 					  struct sk_buff *skb)
998 {
999 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1000 
1001 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1002 
1003 	if (rp->status)
1004 		return;
1005 
1006 	memcpy(hdev->le_features, rp->features, 8);
1007 }
1008 
1009 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1010 					struct sk_buff *skb)
1011 {
1012 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1013 
1014 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015 
1016 	if (rp->status)
1017 		return;
1018 
1019 	hdev->adv_tx_power = rp->tx_power;
1020 }
1021 
1022 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 {
1024 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1025 
1026 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1027 
1028 	hci_dev_lock(hdev);
1029 
1030 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1032 						 rp->status);
1033 
1034 	hci_dev_unlock(hdev);
1035 }
1036 
1037 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1038 					  struct sk_buff *skb)
1039 {
1040 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1041 
1042 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1043 
1044 	hci_dev_lock(hdev);
1045 
1046 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1047 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1048 						     ACL_LINK, 0, rp->status);
1049 
1050 	hci_dev_unlock(hdev);
1051 }
1052 
1053 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1054 {
1055 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1056 
1057 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1058 
1059 	hci_dev_lock(hdev);
1060 
1061 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1062 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1063 						 0, rp->status);
1064 
1065 	hci_dev_unlock(hdev);
1066 }
1067 
1068 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1069 					  struct sk_buff *skb)
1070 {
1071 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1072 
1073 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1074 
1075 	hci_dev_lock(hdev);
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1078 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1079 						     ACL_LINK, 0, rp->status);
1080 
1081 	hci_dev_unlock(hdev);
1082 }
1083 
1084 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1085 				       struct sk_buff *skb)
1086 {
1087 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1088 
1089 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1090 }
1091 
1092 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1093 					   struct sk_buff *skb)
1094 {
1095 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1096 
1097 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1098 }
1099 
1100 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1101 {
1102 	__u8 status = *((__u8 *) skb->data);
1103 	bdaddr_t *sent;
1104 
1105 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1106 
1107 	if (status)
1108 		return;
1109 
1110 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1111 	if (!sent)
1112 		return;
1113 
1114 	hci_dev_lock(hdev);
1115 
1116 	bacpy(&hdev->random_addr, sent);
1117 
1118 	hci_dev_unlock(hdev);
1119 }
1120 
1121 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1122 {
1123 	__u8 status = *((__u8 *) skb->data);
1124 	struct hci_cp_le_set_default_phy *cp;
1125 
1126 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1127 
1128 	if (status)
1129 		return;
1130 
1131 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1132 	if (!cp)
1133 		return;
1134 
1135 	hci_dev_lock(hdev);
1136 
1137 	hdev->le_tx_def_phys = cp->tx_phys;
1138 	hdev->le_rx_def_phys = cp->rx_phys;
1139 
1140 	hci_dev_unlock(hdev);
1141 }
1142 
1143 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1144                                               struct sk_buff *skb)
1145 {
1146 	__u8 status = *((__u8 *) skb->data);
1147 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1148 	struct adv_info *adv_instance;
1149 
1150 	if (status)
1151 		return;
1152 
1153 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1154 	if (!cp)
1155 		return;
1156 
1157 	hci_dev_lock(hdev);
1158 
1159 	if (!hdev->cur_adv_instance) {
1160 		/* Store in hdev for instance 0 (Set adv and Directed advs) */
1161 		bacpy(&hdev->random_addr, &cp->bdaddr);
1162 	} else {
1163 		adv_instance = hci_find_adv_instance(hdev,
1164 						     hdev->cur_adv_instance);
1165 		if (adv_instance)
1166 			bacpy(&adv_instance->random_addr, &cp->bdaddr);
1167 	}
1168 
1169 	hci_dev_unlock(hdev);
1170 }
1171 
1172 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1173 {
1174 	__u8 *sent, status = *((__u8 *) skb->data);
1175 
1176 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1177 
1178 	if (status)
1179 		return;
1180 
1181 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1182 	if (!sent)
1183 		return;
1184 
1185 	hci_dev_lock(hdev);
1186 
1187 	/* If we're doing connection initiation as peripheral. Set a
1188 	 * timeout in case something goes wrong.
1189 	 */
1190 	if (*sent) {
1191 		struct hci_conn *conn;
1192 
1193 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1194 
1195 		conn = hci_lookup_le_connect(hdev);
1196 		if (conn)
1197 			queue_delayed_work(hdev->workqueue,
1198 					   &conn->le_conn_timeout,
1199 					   conn->conn_timeout);
1200 	} else {
1201 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1202 	}
1203 
1204 	hci_dev_unlock(hdev);
1205 }
1206 
1207 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1208 					 struct sk_buff *skb)
1209 {
1210 	struct hci_cp_le_set_ext_adv_enable *cp;
1211 	__u8 status = *((__u8 *) skb->data);
1212 
1213 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1214 
1215 	if (status)
1216 		return;
1217 
1218 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1219 	if (!cp)
1220 		return;
1221 
1222 	hci_dev_lock(hdev);
1223 
1224 	if (cp->enable) {
1225 		struct hci_conn *conn;
1226 
1227 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1228 
1229 		conn = hci_lookup_le_connect(hdev);
1230 		if (conn)
1231 			queue_delayed_work(hdev->workqueue,
1232 					   &conn->le_conn_timeout,
1233 					   conn->conn_timeout);
1234 	} else {
1235 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1236 	}
1237 
1238 	hci_dev_unlock(hdev);
1239 }
1240 
1241 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1242 {
1243 	struct hci_cp_le_set_scan_param *cp;
1244 	__u8 status = *((__u8 *) skb->data);
1245 
1246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1247 
1248 	if (status)
1249 		return;
1250 
1251 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1252 	if (!cp)
1253 		return;
1254 
1255 	hci_dev_lock(hdev);
1256 
1257 	hdev->le_scan_type = cp->type;
1258 
1259 	hci_dev_unlock(hdev);
1260 }
1261 
1262 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1263 					 struct sk_buff *skb)
1264 {
1265 	struct hci_cp_le_set_ext_scan_params *cp;
1266 	__u8 status = *((__u8 *) skb->data);
1267 	struct hci_cp_le_scan_phy_params *phy_param;
1268 
1269 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1270 
1271 	if (status)
1272 		return;
1273 
1274 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1275 	if (!cp)
1276 		return;
1277 
1278 	phy_param = (void *)cp->data;
1279 
1280 	hci_dev_lock(hdev);
1281 
1282 	hdev->le_scan_type = phy_param->type;
1283 
1284 	hci_dev_unlock(hdev);
1285 }
1286 
1287 static bool has_pending_adv_report(struct hci_dev *hdev)
1288 {
1289 	struct discovery_state *d = &hdev->discovery;
1290 
1291 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1292 }
1293 
1294 static void clear_pending_adv_report(struct hci_dev *hdev)
1295 {
1296 	struct discovery_state *d = &hdev->discovery;
1297 
1298 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1299 	d->last_adv_data_len = 0;
1300 }
1301 
1302 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1303 				     u8 bdaddr_type, s8 rssi, u32 flags,
1304 				     u8 *data, u8 len)
1305 {
1306 	struct discovery_state *d = &hdev->discovery;
1307 
1308 	bacpy(&d->last_adv_addr, bdaddr);
1309 	d->last_adv_addr_type = bdaddr_type;
1310 	d->last_adv_rssi = rssi;
1311 	d->last_adv_flags = flags;
1312 	memcpy(d->last_adv_data, data, len);
1313 	d->last_adv_data_len = len;
1314 }
1315 
1316 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1317 {
1318 	hci_dev_lock(hdev);
1319 
1320 	switch (enable) {
1321 	case LE_SCAN_ENABLE:
1322 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1323 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1324 			clear_pending_adv_report(hdev);
1325 		break;
1326 
1327 	case LE_SCAN_DISABLE:
1328 		/* We do this here instead of when setting DISCOVERY_STOPPED
1329 		 * since the latter would potentially require waiting for
1330 		 * inquiry to stop too.
1331 		 */
1332 		if (has_pending_adv_report(hdev)) {
1333 			struct discovery_state *d = &hdev->discovery;
1334 
1335 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1336 					  d->last_adv_addr_type, NULL,
1337 					  d->last_adv_rssi, d->last_adv_flags,
1338 					  d->last_adv_data,
1339 					  d->last_adv_data_len, NULL, 0);
1340 		}
1341 
1342 		/* Cancel this timer so that we don't try to disable scanning
1343 		 * when it's already disabled.
1344 		 */
1345 		cancel_delayed_work(&hdev->le_scan_disable);
1346 
1347 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1348 
1349 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1350 		 * interrupted scanning due to a connect request. Mark
1351 		 * therefore discovery as stopped. If this was not
1352 		 * because of a connect request advertising might have
1353 		 * been disabled because of active scanning, so
1354 		 * re-enable it again if necessary.
1355 		 */
1356 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1357 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1358 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1359 			 hdev->discovery.state == DISCOVERY_FINDING)
1360 			hci_req_reenable_advertising(hdev);
1361 
1362 		break;
1363 
1364 	default:
1365 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1366 			   enable);
1367 		break;
1368 	}
1369 
1370 	hci_dev_unlock(hdev);
1371 }
1372 
1373 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1374 				      struct sk_buff *skb)
1375 {
1376 	struct hci_cp_le_set_scan_enable *cp;
1377 	__u8 status = *((__u8 *) skb->data);
1378 
1379 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380 
1381 	if (status)
1382 		return;
1383 
1384 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1385 	if (!cp)
1386 		return;
1387 
1388 	le_set_scan_enable_complete(hdev, cp->enable);
1389 }
1390 
1391 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1392 				      struct sk_buff *skb)
1393 {
1394 	struct hci_cp_le_set_ext_scan_enable *cp;
1395 	__u8 status = *((__u8 *) skb->data);
1396 
1397 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398 
1399 	if (status)
1400 		return;
1401 
1402 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1403 	if (!cp)
1404 		return;
1405 
1406 	le_set_scan_enable_complete(hdev, cp->enable);
1407 }
1408 
1409 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1410 				      struct sk_buff *skb)
1411 {
1412 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1413 
1414 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1415 	       rp->num_of_sets);
1416 
1417 	if (rp->status)
1418 		return;
1419 
1420 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1421 }
1422 
1423 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1424 					   struct sk_buff *skb)
1425 {
1426 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1427 
1428 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1429 
1430 	if (rp->status)
1431 		return;
1432 
1433 	hdev->le_white_list_size = rp->size;
1434 }
1435 
1436 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1437 				       struct sk_buff *skb)
1438 {
1439 	__u8 status = *((__u8 *) skb->data);
1440 
1441 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1442 
1443 	if (status)
1444 		return;
1445 
1446 	hci_bdaddr_list_clear(&hdev->le_white_list);
1447 }
1448 
1449 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1450 					struct sk_buff *skb)
1451 {
1452 	struct hci_cp_le_add_to_white_list *sent;
1453 	__u8 status = *((__u8 *) skb->data);
1454 
1455 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1456 
1457 	if (status)
1458 		return;
1459 
1460 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1461 	if (!sent)
1462 		return;
1463 
1464 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1465 			   sent->bdaddr_type);
1466 }
1467 
1468 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1469 					  struct sk_buff *skb)
1470 {
1471 	struct hci_cp_le_del_from_white_list *sent;
1472 	__u8 status = *((__u8 *) skb->data);
1473 
1474 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1475 
1476 	if (status)
1477 		return;
1478 
1479 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1480 	if (!sent)
1481 		return;
1482 
1483 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1484 			    sent->bdaddr_type);
1485 }
1486 
1487 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1488 					    struct sk_buff *skb)
1489 {
1490 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1491 
1492 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1493 
1494 	if (rp->status)
1495 		return;
1496 
1497 	memcpy(hdev->le_states, rp->le_states, 8);
1498 }
1499 
1500 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1501 					struct sk_buff *skb)
1502 {
1503 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1504 
1505 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1506 
1507 	if (rp->status)
1508 		return;
1509 
1510 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1511 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1512 }
1513 
1514 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1515 					 struct sk_buff *skb)
1516 {
1517 	struct hci_cp_le_write_def_data_len *sent;
1518 	__u8 status = *((__u8 *) skb->data);
1519 
1520 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521 
1522 	if (status)
1523 		return;
1524 
1525 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1526 	if (!sent)
1527 		return;
1528 
1529 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1530 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1531 }
1532 
1533 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1534 					 struct sk_buff *skb)
1535 {
1536 	struct hci_cp_le_add_to_resolv_list *sent;
1537 	__u8 status = *((__u8 *) skb->data);
1538 
1539 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1540 
1541 	if (status)
1542 		return;
1543 
1544 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1545 	if (!sent)
1546 		return;
1547 
1548 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1549 				sent->bdaddr_type, sent->peer_irk,
1550 				sent->local_irk);
1551 }
1552 
1553 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1554 					  struct sk_buff *skb)
1555 {
1556 	struct hci_cp_le_del_from_resolv_list *sent;
1557 	__u8 status = *((__u8 *) skb->data);
1558 
1559 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1560 
1561 	if (status)
1562 		return;
1563 
1564 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1565 	if (!sent)
1566 		return;
1567 
1568 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1569 			    sent->bdaddr_type);
1570 }
1571 
1572 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1573 				       struct sk_buff *skb)
1574 {
1575 	__u8 status = *((__u8 *) skb->data);
1576 
1577 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578 
1579 	if (status)
1580 		return;
1581 
1582 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
1583 }
1584 
1585 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1586 					   struct sk_buff *skb)
1587 {
1588 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1589 
1590 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1591 
1592 	if (rp->status)
1593 		return;
1594 
1595 	hdev->le_resolv_list_size = rp->size;
1596 }
1597 
1598 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1599 						struct sk_buff *skb)
1600 {
1601 	__u8 *sent, status = *((__u8 *) skb->data);
1602 
1603 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1604 
1605 	if (status)
1606 		return;
1607 
1608 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1609 	if (!sent)
1610 		return;
1611 
1612 	hci_dev_lock(hdev);
1613 
1614 	if (*sent)
1615 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1616 	else
1617 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1618 
1619 	hci_dev_unlock(hdev);
1620 }
1621 
1622 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1623 					struct sk_buff *skb)
1624 {
1625 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1626 
1627 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1628 
1629 	if (rp->status)
1630 		return;
1631 
1632 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1633 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1634 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1635 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1636 }
1637 
1638 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1639 					   struct sk_buff *skb)
1640 {
1641 	struct hci_cp_write_le_host_supported *sent;
1642 	__u8 status = *((__u8 *) skb->data);
1643 
1644 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1645 
1646 	if (status)
1647 		return;
1648 
1649 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1650 	if (!sent)
1651 		return;
1652 
1653 	hci_dev_lock(hdev);
1654 
1655 	if (sent->le) {
1656 		hdev->features[1][0] |= LMP_HOST_LE;
1657 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1658 	} else {
1659 		hdev->features[1][0] &= ~LMP_HOST_LE;
1660 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1661 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1662 	}
1663 
1664 	if (sent->simul)
1665 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1666 	else
1667 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1668 
1669 	hci_dev_unlock(hdev);
1670 }
1671 
1672 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1673 {
1674 	struct hci_cp_le_set_adv_param *cp;
1675 	u8 status = *((u8 *) skb->data);
1676 
1677 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1678 
1679 	if (status)
1680 		return;
1681 
1682 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1683 	if (!cp)
1684 		return;
1685 
1686 	hci_dev_lock(hdev);
1687 	hdev->adv_addr_type = cp->own_address_type;
1688 	hci_dev_unlock(hdev);
1689 }
1690 
1691 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1692 {
1693 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1694 	struct hci_cp_le_set_ext_adv_params *cp;
1695 	struct adv_info *adv_instance;
1696 
1697 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1698 
1699 	if (rp->status)
1700 		return;
1701 
1702 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1703 	if (!cp)
1704 		return;
1705 
1706 	hci_dev_lock(hdev);
1707 	hdev->adv_addr_type = cp->own_addr_type;
1708 	if (!hdev->cur_adv_instance) {
1709 		/* Store in hdev for instance 0 */
1710 		hdev->adv_tx_power = rp->tx_power;
1711 	} else {
1712 		adv_instance = hci_find_adv_instance(hdev,
1713 						     hdev->cur_adv_instance);
1714 		if (adv_instance)
1715 			adv_instance->tx_power = rp->tx_power;
1716 	}
1717 	/* Update adv data as tx power is known now */
1718 	hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1719 	hci_dev_unlock(hdev);
1720 }
1721 
1722 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1723 {
1724 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1725 	struct hci_conn *conn;
1726 
1727 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1728 
1729 	if (rp->status)
1730 		return;
1731 
1732 	hci_dev_lock(hdev);
1733 
1734 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1735 	if (conn)
1736 		conn->rssi = rp->rssi;
1737 
1738 	hci_dev_unlock(hdev);
1739 }
1740 
1741 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1742 {
1743 	struct hci_cp_read_tx_power *sent;
1744 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1745 	struct hci_conn *conn;
1746 
1747 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1748 
1749 	if (rp->status)
1750 		return;
1751 
1752 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1753 	if (!sent)
1754 		return;
1755 
1756 	hci_dev_lock(hdev);
1757 
1758 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1759 	if (!conn)
1760 		goto unlock;
1761 
1762 	switch (sent->type) {
1763 	case 0x00:
1764 		conn->tx_power = rp->tx_power;
1765 		break;
1766 	case 0x01:
1767 		conn->max_tx_power = rp->tx_power;
1768 		break;
1769 	}
1770 
1771 unlock:
1772 	hci_dev_unlock(hdev);
1773 }
1774 
1775 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1776 {
1777 	u8 status = *((u8 *) skb->data);
1778 	u8 *mode;
1779 
1780 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781 
1782 	if (status)
1783 		return;
1784 
1785 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1786 	if (mode)
1787 		hdev->ssp_debug_mode = *mode;
1788 }
1789 
1790 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1791 {
1792 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793 
1794 	if (status) {
1795 		hci_conn_check_pending(hdev);
1796 		return;
1797 	}
1798 
1799 	set_bit(HCI_INQUIRY, &hdev->flags);
1800 }
1801 
1802 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1803 {
1804 	struct hci_cp_create_conn *cp;
1805 	struct hci_conn *conn;
1806 
1807 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1808 
1809 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1810 	if (!cp)
1811 		return;
1812 
1813 	hci_dev_lock(hdev);
1814 
1815 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1816 
1817 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1818 
1819 	if (status) {
1820 		if (conn && conn->state == BT_CONNECT) {
1821 			if (status != 0x0c || conn->attempt > 2) {
1822 				conn->state = BT_CLOSED;
1823 				hci_connect_cfm(conn, status);
1824 				hci_conn_del(conn);
1825 			} else
1826 				conn->state = BT_CONNECT2;
1827 		}
1828 	} else {
1829 		if (!conn) {
1830 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1831 					    HCI_ROLE_MASTER);
1832 			if (!conn)
1833 				bt_dev_err(hdev, "no memory for new connection");
1834 		}
1835 	}
1836 
1837 	hci_dev_unlock(hdev);
1838 }
1839 
1840 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1841 {
1842 	struct hci_cp_add_sco *cp;
1843 	struct hci_conn *acl, *sco;
1844 	__u16 handle;
1845 
1846 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1847 
1848 	if (!status)
1849 		return;
1850 
1851 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1852 	if (!cp)
1853 		return;
1854 
1855 	handle = __le16_to_cpu(cp->handle);
1856 
1857 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1858 
1859 	hci_dev_lock(hdev);
1860 
1861 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1862 	if (acl) {
1863 		sco = acl->link;
1864 		if (sco) {
1865 			sco->state = BT_CLOSED;
1866 
1867 			hci_connect_cfm(sco, status);
1868 			hci_conn_del(sco);
1869 		}
1870 	}
1871 
1872 	hci_dev_unlock(hdev);
1873 }
1874 
1875 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1876 {
1877 	struct hci_cp_auth_requested *cp;
1878 	struct hci_conn *conn;
1879 
1880 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1881 
1882 	if (!status)
1883 		return;
1884 
1885 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1886 	if (!cp)
1887 		return;
1888 
1889 	hci_dev_lock(hdev);
1890 
1891 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1892 	if (conn) {
1893 		if (conn->state == BT_CONFIG) {
1894 			hci_connect_cfm(conn, status);
1895 			hci_conn_drop(conn);
1896 		}
1897 	}
1898 
1899 	hci_dev_unlock(hdev);
1900 }
1901 
1902 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1903 {
1904 	struct hci_cp_set_conn_encrypt *cp;
1905 	struct hci_conn *conn;
1906 
1907 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1908 
1909 	if (!status)
1910 		return;
1911 
1912 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1913 	if (!cp)
1914 		return;
1915 
1916 	hci_dev_lock(hdev);
1917 
1918 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1919 	if (conn) {
1920 		if (conn->state == BT_CONFIG) {
1921 			hci_connect_cfm(conn, status);
1922 			hci_conn_drop(conn);
1923 		}
1924 	}
1925 
1926 	hci_dev_unlock(hdev);
1927 }
1928 
1929 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1930 				    struct hci_conn *conn)
1931 {
1932 	if (conn->state != BT_CONFIG || !conn->out)
1933 		return 0;
1934 
1935 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1936 		return 0;
1937 
1938 	/* Only request authentication for SSP connections or non-SSP
1939 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1940 	 * is requested.
1941 	 */
1942 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1943 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1944 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1945 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1946 		return 0;
1947 
1948 	return 1;
1949 }
1950 
1951 static int hci_resolve_name(struct hci_dev *hdev,
1952 				   struct inquiry_entry *e)
1953 {
1954 	struct hci_cp_remote_name_req cp;
1955 
1956 	memset(&cp, 0, sizeof(cp));
1957 
1958 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1959 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1960 	cp.pscan_mode = e->data.pscan_mode;
1961 	cp.clock_offset = e->data.clock_offset;
1962 
1963 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1964 }
1965 
1966 static bool hci_resolve_next_name(struct hci_dev *hdev)
1967 {
1968 	struct discovery_state *discov = &hdev->discovery;
1969 	struct inquiry_entry *e;
1970 
1971 	if (list_empty(&discov->resolve))
1972 		return false;
1973 
1974 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1975 	if (!e)
1976 		return false;
1977 
1978 	if (hci_resolve_name(hdev, e) == 0) {
1979 		e->name_state = NAME_PENDING;
1980 		return true;
1981 	}
1982 
1983 	return false;
1984 }
1985 
1986 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1987 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1988 {
1989 	struct discovery_state *discov = &hdev->discovery;
1990 	struct inquiry_entry *e;
1991 
1992 	/* Update the mgmt connected state if necessary. Be careful with
1993 	 * conn objects that exist but are not (yet) connected however.
1994 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1995 	 * considered connected.
1996 	 */
1997 	if (conn &&
1998 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1999 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2000 		mgmt_device_connected(hdev, conn, 0, name, name_len);
2001 
2002 	if (discov->state == DISCOVERY_STOPPED)
2003 		return;
2004 
2005 	if (discov->state == DISCOVERY_STOPPING)
2006 		goto discov_complete;
2007 
2008 	if (discov->state != DISCOVERY_RESOLVING)
2009 		return;
2010 
2011 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2012 	/* If the device was not found in a list of found devices names of which
2013 	 * are pending. there is no need to continue resolving a next name as it
2014 	 * will be done upon receiving another Remote Name Request Complete
2015 	 * Event */
2016 	if (!e)
2017 		return;
2018 
2019 	list_del(&e->list);
2020 	if (name) {
2021 		e->name_state = NAME_KNOWN;
2022 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2023 				 e->data.rssi, name, name_len);
2024 	} else {
2025 		e->name_state = NAME_NOT_KNOWN;
2026 	}
2027 
2028 	if (hci_resolve_next_name(hdev))
2029 		return;
2030 
2031 discov_complete:
2032 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2033 }
2034 
2035 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2036 {
2037 	struct hci_cp_remote_name_req *cp;
2038 	struct hci_conn *conn;
2039 
2040 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2041 
2042 	/* If successful wait for the name req complete event before
2043 	 * checking for the need to do authentication */
2044 	if (!status)
2045 		return;
2046 
2047 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2048 	if (!cp)
2049 		return;
2050 
2051 	hci_dev_lock(hdev);
2052 
2053 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2054 
2055 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2056 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2057 
2058 	if (!conn)
2059 		goto unlock;
2060 
2061 	if (!hci_outgoing_auth_needed(hdev, conn))
2062 		goto unlock;
2063 
2064 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2065 		struct hci_cp_auth_requested auth_cp;
2066 
2067 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2068 
2069 		auth_cp.handle = __cpu_to_le16(conn->handle);
2070 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2071 			     sizeof(auth_cp), &auth_cp);
2072 	}
2073 
2074 unlock:
2075 	hci_dev_unlock(hdev);
2076 }
2077 
2078 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2079 {
2080 	struct hci_cp_read_remote_features *cp;
2081 	struct hci_conn *conn;
2082 
2083 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2084 
2085 	if (!status)
2086 		return;
2087 
2088 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2089 	if (!cp)
2090 		return;
2091 
2092 	hci_dev_lock(hdev);
2093 
2094 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2095 	if (conn) {
2096 		if (conn->state == BT_CONFIG) {
2097 			hci_connect_cfm(conn, status);
2098 			hci_conn_drop(conn);
2099 		}
2100 	}
2101 
2102 	hci_dev_unlock(hdev);
2103 }
2104 
2105 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2106 {
2107 	struct hci_cp_read_remote_ext_features *cp;
2108 	struct hci_conn *conn;
2109 
2110 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2111 
2112 	if (!status)
2113 		return;
2114 
2115 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2116 	if (!cp)
2117 		return;
2118 
2119 	hci_dev_lock(hdev);
2120 
2121 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2122 	if (conn) {
2123 		if (conn->state == BT_CONFIG) {
2124 			hci_connect_cfm(conn, status);
2125 			hci_conn_drop(conn);
2126 		}
2127 	}
2128 
2129 	hci_dev_unlock(hdev);
2130 }
2131 
2132 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2133 {
2134 	struct hci_cp_setup_sync_conn *cp;
2135 	struct hci_conn *acl, *sco;
2136 	__u16 handle;
2137 
2138 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2139 
2140 	if (!status)
2141 		return;
2142 
2143 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2144 	if (!cp)
2145 		return;
2146 
2147 	handle = __le16_to_cpu(cp->handle);
2148 
2149 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2150 
2151 	hci_dev_lock(hdev);
2152 
2153 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2154 	if (acl) {
2155 		sco = acl->link;
2156 		if (sco) {
2157 			sco->state = BT_CLOSED;
2158 
2159 			hci_connect_cfm(sco, status);
2160 			hci_conn_del(sco);
2161 		}
2162 	}
2163 
2164 	hci_dev_unlock(hdev);
2165 }
2166 
2167 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2168 {
2169 	struct hci_cp_sniff_mode *cp;
2170 	struct hci_conn *conn;
2171 
2172 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2173 
2174 	if (!status)
2175 		return;
2176 
2177 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2178 	if (!cp)
2179 		return;
2180 
2181 	hci_dev_lock(hdev);
2182 
2183 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2184 	if (conn) {
2185 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2186 
2187 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2188 			hci_sco_setup(conn, status);
2189 	}
2190 
2191 	hci_dev_unlock(hdev);
2192 }
2193 
2194 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2195 {
2196 	struct hci_cp_exit_sniff_mode *cp;
2197 	struct hci_conn *conn;
2198 
2199 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2200 
2201 	if (!status)
2202 		return;
2203 
2204 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2205 	if (!cp)
2206 		return;
2207 
2208 	hci_dev_lock(hdev);
2209 
2210 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2211 	if (conn) {
2212 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2213 
2214 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2215 			hci_sco_setup(conn, status);
2216 	}
2217 
2218 	hci_dev_unlock(hdev);
2219 }
2220 
2221 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2222 {
2223 	struct hci_cp_disconnect *cp;
2224 	struct hci_conn *conn;
2225 
2226 	if (!status)
2227 		return;
2228 
2229 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2230 	if (!cp)
2231 		return;
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2236 	if (conn) {
2237 		u8 type = conn->type;
2238 
2239 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2240 				       conn->dst_type, status);
2241 
2242 		/* If the disconnection failed for any reason, the upper layer
2243 		 * does not retry to disconnect in current implementation.
2244 		 * Hence, we need to do some basic cleanup here and re-enable
2245 		 * advertising if necessary.
2246 		 */
2247 		hci_conn_del(conn);
2248 		if (type == LE_LINK)
2249 			hci_req_reenable_advertising(hdev);
2250 	}
2251 
2252 	hci_dev_unlock(hdev);
2253 }
2254 
2255 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2256 			      u8 peer_addr_type, u8 own_address_type,
2257 			      u8 filter_policy)
2258 {
2259 	struct hci_conn *conn;
2260 
2261 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2262 				       peer_addr_type);
2263 	if (!conn)
2264 		return;
2265 
2266 	/* Store the initiator and responder address information which
2267 	 * is needed for SMP. These values will not change during the
2268 	 * lifetime of the connection.
2269 	 */
2270 	conn->init_addr_type = own_address_type;
2271 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2272 		bacpy(&conn->init_addr, &hdev->random_addr);
2273 	else
2274 		bacpy(&conn->init_addr, &hdev->bdaddr);
2275 
2276 	conn->resp_addr_type = peer_addr_type;
2277 	bacpy(&conn->resp_addr, peer_addr);
2278 
2279 	/* We don't want the connection attempt to stick around
2280 	 * indefinitely since LE doesn't have a page timeout concept
2281 	 * like BR/EDR. Set a timer for any connection that doesn't use
2282 	 * the white list for connecting.
2283 	 */
2284 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2285 		queue_delayed_work(conn->hdev->workqueue,
2286 				   &conn->le_conn_timeout,
2287 				   conn->conn_timeout);
2288 }
2289 
2290 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2291 {
2292 	struct hci_cp_le_create_conn *cp;
2293 
2294 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2295 
2296 	/* All connection failure handling is taken care of by the
2297 	 * hci_le_conn_failed function which is triggered by the HCI
2298 	 * request completion callbacks used for connecting.
2299 	 */
2300 	if (status)
2301 		return;
2302 
2303 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2304 	if (!cp)
2305 		return;
2306 
2307 	hci_dev_lock(hdev);
2308 
2309 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2310 			  cp->own_address_type, cp->filter_policy);
2311 
2312 	hci_dev_unlock(hdev);
2313 }
2314 
2315 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2316 {
2317 	struct hci_cp_le_ext_create_conn *cp;
2318 
2319 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2320 
2321 	/* All connection failure handling is taken care of by the
2322 	 * hci_le_conn_failed function which is triggered by the HCI
2323 	 * request completion callbacks used for connecting.
2324 	 */
2325 	if (status)
2326 		return;
2327 
2328 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2329 	if (!cp)
2330 		return;
2331 
2332 	hci_dev_lock(hdev);
2333 
2334 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2335 			  cp->own_addr_type, cp->filter_policy);
2336 
2337 	hci_dev_unlock(hdev);
2338 }
2339 
2340 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2341 {
2342 	struct hci_cp_le_read_remote_features *cp;
2343 	struct hci_conn *conn;
2344 
2345 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2346 
2347 	if (!status)
2348 		return;
2349 
2350 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2351 	if (!cp)
2352 		return;
2353 
2354 	hci_dev_lock(hdev);
2355 
2356 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2357 	if (conn) {
2358 		if (conn->state == BT_CONFIG) {
2359 			hci_connect_cfm(conn, status);
2360 			hci_conn_drop(conn);
2361 		}
2362 	}
2363 
2364 	hci_dev_unlock(hdev);
2365 }
2366 
2367 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2368 {
2369 	struct hci_cp_le_start_enc *cp;
2370 	struct hci_conn *conn;
2371 
2372 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2373 
2374 	if (!status)
2375 		return;
2376 
2377 	hci_dev_lock(hdev);
2378 
2379 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2380 	if (!cp)
2381 		goto unlock;
2382 
2383 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2384 	if (!conn)
2385 		goto unlock;
2386 
2387 	if (conn->state != BT_CONNECTED)
2388 		goto unlock;
2389 
2390 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2391 	hci_conn_drop(conn);
2392 
2393 unlock:
2394 	hci_dev_unlock(hdev);
2395 }
2396 
2397 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2398 {
2399 	struct hci_cp_switch_role *cp;
2400 	struct hci_conn *conn;
2401 
2402 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2403 
2404 	if (!status)
2405 		return;
2406 
2407 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2408 	if (!cp)
2409 		return;
2410 
2411 	hci_dev_lock(hdev);
2412 
2413 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2414 	if (conn)
2415 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2416 
2417 	hci_dev_unlock(hdev);
2418 }
2419 
2420 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2421 {
2422 	__u8 status = *((__u8 *) skb->data);
2423 	struct discovery_state *discov = &hdev->discovery;
2424 	struct inquiry_entry *e;
2425 
2426 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2427 
2428 	hci_conn_check_pending(hdev);
2429 
2430 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2431 		return;
2432 
2433 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2434 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2435 
2436 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2437 		return;
2438 
2439 	hci_dev_lock(hdev);
2440 
2441 	if (discov->state != DISCOVERY_FINDING)
2442 		goto unlock;
2443 
2444 	if (list_empty(&discov->resolve)) {
2445 		/* When BR/EDR inquiry is active and no LE scanning is in
2446 		 * progress, then change discovery state to indicate completion.
2447 		 *
2448 		 * When running LE scanning and BR/EDR inquiry simultaneously
2449 		 * and the LE scan already finished, then change the discovery
2450 		 * state to indicate completion.
2451 		 */
2452 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2453 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2454 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2455 		goto unlock;
2456 	}
2457 
2458 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2459 	if (e && hci_resolve_name(hdev, e) == 0) {
2460 		e->name_state = NAME_PENDING;
2461 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2462 	} else {
2463 		/* When BR/EDR inquiry is active and no LE scanning is in
2464 		 * progress, then change discovery state to indicate completion.
2465 		 *
2466 		 * When running LE scanning and BR/EDR inquiry simultaneously
2467 		 * and the LE scan already finished, then change the discovery
2468 		 * state to indicate completion.
2469 		 */
2470 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2471 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2472 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2473 	}
2474 
2475 unlock:
2476 	hci_dev_unlock(hdev);
2477 }
2478 
2479 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2480 {
2481 	struct inquiry_data data;
2482 	struct inquiry_info *info = (void *) (skb->data + 1);
2483 	int num_rsp = *((__u8 *) skb->data);
2484 
2485 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2486 
2487 	if (!num_rsp)
2488 		return;
2489 
2490 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2491 		return;
2492 
2493 	hci_dev_lock(hdev);
2494 
2495 	for (; num_rsp; num_rsp--, info++) {
2496 		u32 flags;
2497 
2498 		bacpy(&data.bdaddr, &info->bdaddr);
2499 		data.pscan_rep_mode	= info->pscan_rep_mode;
2500 		data.pscan_period_mode	= info->pscan_period_mode;
2501 		data.pscan_mode		= info->pscan_mode;
2502 		memcpy(data.dev_class, info->dev_class, 3);
2503 		data.clock_offset	= info->clock_offset;
2504 		data.rssi		= HCI_RSSI_INVALID;
2505 		data.ssp_mode		= 0x00;
2506 
2507 		flags = hci_inquiry_cache_update(hdev, &data, false);
2508 
2509 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2510 				  info->dev_class, HCI_RSSI_INVALID,
2511 				  flags, NULL, 0, NULL, 0);
2512 	}
2513 
2514 	hci_dev_unlock(hdev);
2515 }
2516 
2517 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2518 {
2519 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2520 	struct inquiry_entry *ie;
2521 	struct hci_conn *conn;
2522 
2523 	BT_DBG("%s", hdev->name);
2524 
2525 	hci_dev_lock(hdev);
2526 
2527 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2528 	if (!conn) {
2529 		/* Connection may not exist if auto-connected. Check the inquiry
2530 		 * cache to see if we've already discovered this bdaddr before.
2531 		 * If found and link is an ACL type, create a connection class
2532 		 * automatically.
2533 		 */
2534 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2535 		if (ie && ev->link_type == ACL_LINK) {
2536 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2537 					    HCI_ROLE_SLAVE);
2538 			if (!conn) {
2539 				bt_dev_err(hdev, "no memory for new conn");
2540 				goto unlock;
2541 			}
2542 		} else {
2543 			if (ev->link_type != SCO_LINK)
2544 				goto unlock;
2545 
2546 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2547 						       &ev->bdaddr);
2548 			if (!conn)
2549 				goto unlock;
2550 
2551 			conn->type = SCO_LINK;
2552 		}
2553 	}
2554 
2555 	if (!ev->status) {
2556 		conn->handle = __le16_to_cpu(ev->handle);
2557 
2558 		if (conn->type == ACL_LINK) {
2559 			conn->state = BT_CONFIG;
2560 			hci_conn_hold(conn);
2561 
2562 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2563 			    !hci_find_link_key(hdev, &ev->bdaddr))
2564 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2565 			else
2566 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2567 		} else
2568 			conn->state = BT_CONNECTED;
2569 
2570 		hci_debugfs_create_conn(conn);
2571 		hci_conn_add_sysfs(conn);
2572 
2573 		if (test_bit(HCI_AUTH, &hdev->flags))
2574 			set_bit(HCI_CONN_AUTH, &conn->flags);
2575 
2576 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2577 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2578 
2579 		/* Get remote features */
2580 		if (conn->type == ACL_LINK) {
2581 			struct hci_cp_read_remote_features cp;
2582 			cp.handle = ev->handle;
2583 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2584 				     sizeof(cp), &cp);
2585 
2586 			hci_req_update_scan(hdev);
2587 		}
2588 
2589 		/* Set packet type for incoming connection */
2590 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2591 			struct hci_cp_change_conn_ptype cp;
2592 			cp.handle = ev->handle;
2593 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2594 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2595 				     &cp);
2596 		}
2597 	} else {
2598 		conn->state = BT_CLOSED;
2599 		if (conn->type == ACL_LINK)
2600 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2601 					    conn->dst_type, ev->status);
2602 	}
2603 
2604 	if (conn->type == ACL_LINK)
2605 		hci_sco_setup(conn, ev->status);
2606 
2607 	if (ev->status) {
2608 		hci_connect_cfm(conn, ev->status);
2609 		hci_conn_del(conn);
2610 	} else if (ev->link_type != ACL_LINK)
2611 		hci_connect_cfm(conn, ev->status);
2612 
2613 unlock:
2614 	hci_dev_unlock(hdev);
2615 
2616 	hci_conn_check_pending(hdev);
2617 }
2618 
2619 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2620 {
2621 	struct hci_cp_reject_conn_req cp;
2622 
2623 	bacpy(&cp.bdaddr, bdaddr);
2624 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2625 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2626 }
2627 
2628 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2629 {
2630 	struct hci_ev_conn_request *ev = (void *) skb->data;
2631 	int mask = hdev->link_mode;
2632 	struct inquiry_entry *ie;
2633 	struct hci_conn *conn;
2634 	__u8 flags = 0;
2635 
2636 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2637 	       ev->link_type);
2638 
2639 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2640 				      &flags);
2641 
2642 	if (!(mask & HCI_LM_ACCEPT)) {
2643 		hci_reject_conn(hdev, &ev->bdaddr);
2644 		return;
2645 	}
2646 
2647 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2648 				   BDADDR_BREDR)) {
2649 		hci_reject_conn(hdev, &ev->bdaddr);
2650 		return;
2651 	}
2652 
2653 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2654 	 * connection. These features are only touched through mgmt so
2655 	 * only do the checks if HCI_MGMT is set.
2656 	 */
2657 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2658 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2659 	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2660 				    BDADDR_BREDR)) {
2661 		    hci_reject_conn(hdev, &ev->bdaddr);
2662 		    return;
2663 	}
2664 
2665 	/* Connection accepted */
2666 
2667 	hci_dev_lock(hdev);
2668 
2669 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2670 	if (ie)
2671 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2672 
2673 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2674 			&ev->bdaddr);
2675 	if (!conn) {
2676 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2677 				    HCI_ROLE_SLAVE);
2678 		if (!conn) {
2679 			bt_dev_err(hdev, "no memory for new connection");
2680 			hci_dev_unlock(hdev);
2681 			return;
2682 		}
2683 	}
2684 
2685 	memcpy(conn->dev_class, ev->dev_class, 3);
2686 
2687 	hci_dev_unlock(hdev);
2688 
2689 	if (ev->link_type == ACL_LINK ||
2690 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2691 		struct hci_cp_accept_conn_req cp;
2692 		conn->state = BT_CONNECT;
2693 
2694 		bacpy(&cp.bdaddr, &ev->bdaddr);
2695 
2696 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2697 			cp.role = 0x00; /* Become master */
2698 		else
2699 			cp.role = 0x01; /* Remain slave */
2700 
2701 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2702 	} else if (!(flags & HCI_PROTO_DEFER)) {
2703 		struct hci_cp_accept_sync_conn_req cp;
2704 		conn->state = BT_CONNECT;
2705 
2706 		bacpy(&cp.bdaddr, &ev->bdaddr);
2707 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2708 
2709 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2710 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2711 		cp.max_latency    = cpu_to_le16(0xffff);
2712 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2713 		cp.retrans_effort = 0xff;
2714 
2715 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2716 			     &cp);
2717 	} else {
2718 		conn->state = BT_CONNECT2;
2719 		hci_connect_cfm(conn, 0);
2720 	}
2721 }
2722 
2723 static u8 hci_to_mgmt_reason(u8 err)
2724 {
2725 	switch (err) {
2726 	case HCI_ERROR_CONNECTION_TIMEOUT:
2727 		return MGMT_DEV_DISCONN_TIMEOUT;
2728 	case HCI_ERROR_REMOTE_USER_TERM:
2729 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2730 	case HCI_ERROR_REMOTE_POWER_OFF:
2731 		return MGMT_DEV_DISCONN_REMOTE;
2732 	case HCI_ERROR_LOCAL_HOST_TERM:
2733 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2734 	default:
2735 		return MGMT_DEV_DISCONN_UNKNOWN;
2736 	}
2737 }
2738 
2739 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2740 {
2741 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2742 	u8 reason;
2743 	struct hci_conn_params *params;
2744 	struct hci_conn *conn;
2745 	bool mgmt_connected;
2746 	u8 type;
2747 
2748 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2749 
2750 	hci_dev_lock(hdev);
2751 
2752 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2753 	if (!conn)
2754 		goto unlock;
2755 
2756 	if (ev->status) {
2757 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2758 				       conn->dst_type, ev->status);
2759 		goto unlock;
2760 	}
2761 
2762 	conn->state = BT_CLOSED;
2763 
2764 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2765 
2766 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2767 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2768 	else
2769 		reason = hci_to_mgmt_reason(ev->reason);
2770 
2771 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2772 				reason, mgmt_connected);
2773 
2774 	if (conn->type == ACL_LINK) {
2775 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2776 			hci_remove_link_key(hdev, &conn->dst);
2777 
2778 		hci_req_update_scan(hdev);
2779 	}
2780 
2781 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2782 	if (params) {
2783 		switch (params->auto_connect) {
2784 		case HCI_AUTO_CONN_LINK_LOSS:
2785 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2786 				break;
2787 			/* Fall through */
2788 
2789 		case HCI_AUTO_CONN_DIRECT:
2790 		case HCI_AUTO_CONN_ALWAYS:
2791 			list_del_init(&params->action);
2792 			list_add(&params->action, &hdev->pend_le_conns);
2793 			hci_update_background_scan(hdev);
2794 			break;
2795 
2796 		default:
2797 			break;
2798 		}
2799 	}
2800 
2801 	type = conn->type;
2802 
2803 	hci_disconn_cfm(conn, ev->reason);
2804 	hci_conn_del(conn);
2805 
2806 	/* The suspend notifier is waiting for all devices to disconnect so
2807 	 * clear the bit from pending tasks and inform the wait queue.
2808 	 */
2809 	if (list_empty(&hdev->conn_hash.list) &&
2810 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2811 		wake_up(&hdev->suspend_wait_q);
2812 	}
2813 
2814 	/* Re-enable advertising if necessary, since it might
2815 	 * have been disabled by the connection. From the
2816 	 * HCI_LE_Set_Advertise_Enable command description in
2817 	 * the core specification (v4.0):
2818 	 * "The Controller shall continue advertising until the Host
2819 	 * issues an LE_Set_Advertise_Enable command with
2820 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2821 	 * or until a connection is created or until the Advertising
2822 	 * is timed out due to Directed Advertising."
2823 	 */
2824 	if (type == LE_LINK)
2825 		hci_req_reenable_advertising(hdev);
2826 
2827 unlock:
2828 	hci_dev_unlock(hdev);
2829 }
2830 
2831 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2832 {
2833 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2834 	struct hci_conn *conn;
2835 
2836 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2837 
2838 	hci_dev_lock(hdev);
2839 
2840 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2841 	if (!conn)
2842 		goto unlock;
2843 
2844 	if (!ev->status) {
2845 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2846 
2847 		if (!hci_conn_ssp_enabled(conn) &&
2848 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2849 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2850 		} else {
2851 			set_bit(HCI_CONN_AUTH, &conn->flags);
2852 			conn->sec_level = conn->pending_sec_level;
2853 		}
2854 	} else {
2855 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2856 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2857 
2858 		mgmt_auth_failed(conn, ev->status);
2859 	}
2860 
2861 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2862 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2863 
2864 	if (conn->state == BT_CONFIG) {
2865 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2866 			struct hci_cp_set_conn_encrypt cp;
2867 			cp.handle  = ev->handle;
2868 			cp.encrypt = 0x01;
2869 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2870 				     &cp);
2871 		} else {
2872 			conn->state = BT_CONNECTED;
2873 			hci_connect_cfm(conn, ev->status);
2874 			hci_conn_drop(conn);
2875 		}
2876 	} else {
2877 		hci_auth_cfm(conn, ev->status);
2878 
2879 		hci_conn_hold(conn);
2880 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2881 		hci_conn_drop(conn);
2882 	}
2883 
2884 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2885 		if (!ev->status) {
2886 			struct hci_cp_set_conn_encrypt cp;
2887 			cp.handle  = ev->handle;
2888 			cp.encrypt = 0x01;
2889 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2890 				     &cp);
2891 		} else {
2892 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2893 			hci_encrypt_cfm(conn, ev->status, 0x00);
2894 		}
2895 	}
2896 
2897 unlock:
2898 	hci_dev_unlock(hdev);
2899 }
2900 
2901 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2902 {
2903 	struct hci_ev_remote_name *ev = (void *) skb->data;
2904 	struct hci_conn *conn;
2905 
2906 	BT_DBG("%s", hdev->name);
2907 
2908 	hci_conn_check_pending(hdev);
2909 
2910 	hci_dev_lock(hdev);
2911 
2912 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2913 
2914 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2915 		goto check_auth;
2916 
2917 	if (ev->status == 0)
2918 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2919 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2920 	else
2921 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2922 
2923 check_auth:
2924 	if (!conn)
2925 		goto unlock;
2926 
2927 	if (!hci_outgoing_auth_needed(hdev, conn))
2928 		goto unlock;
2929 
2930 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2931 		struct hci_cp_auth_requested cp;
2932 
2933 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2934 
2935 		cp.handle = __cpu_to_le16(conn->handle);
2936 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2937 	}
2938 
2939 unlock:
2940 	hci_dev_unlock(hdev);
2941 }
2942 
2943 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2944 				       u16 opcode, struct sk_buff *skb)
2945 {
2946 	const struct hci_rp_read_enc_key_size *rp;
2947 	struct hci_conn *conn;
2948 	u16 handle;
2949 
2950 	BT_DBG("%s status 0x%02x", hdev->name, status);
2951 
2952 	if (!skb || skb->len < sizeof(*rp)) {
2953 		bt_dev_err(hdev, "invalid read key size response");
2954 		return;
2955 	}
2956 
2957 	rp = (void *)skb->data;
2958 	handle = le16_to_cpu(rp->handle);
2959 
2960 	hci_dev_lock(hdev);
2961 
2962 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2963 	if (!conn)
2964 		goto unlock;
2965 
2966 	/* While unexpected, the read_enc_key_size command may fail. The most
2967 	 * secure approach is to then assume the key size is 0 to force a
2968 	 * disconnection.
2969 	 */
2970 	if (rp->status) {
2971 		bt_dev_err(hdev, "failed to read key size for handle %u",
2972 			   handle);
2973 		conn->enc_key_size = 0;
2974 	} else {
2975 		conn->enc_key_size = rp->key_size;
2976 	}
2977 
2978 	if (conn->state == BT_CONFIG) {
2979 		conn->state = BT_CONNECTED;
2980 		hci_connect_cfm(conn, 0);
2981 		hci_conn_drop(conn);
2982 	} else {
2983 		u8 encrypt;
2984 
2985 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2986 			encrypt = 0x00;
2987 		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2988 			encrypt = 0x02;
2989 		else
2990 			encrypt = 0x01;
2991 
2992 		hci_encrypt_cfm(conn, 0, encrypt);
2993 	}
2994 
2995 unlock:
2996 	hci_dev_unlock(hdev);
2997 }
2998 
2999 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3000 {
3001 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
3002 	struct hci_conn *conn;
3003 
3004 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3005 
3006 	hci_dev_lock(hdev);
3007 
3008 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3009 	if (!conn)
3010 		goto unlock;
3011 
3012 	if (!ev->status) {
3013 		if (ev->encrypt) {
3014 			/* Encryption implies authentication */
3015 			set_bit(HCI_CONN_AUTH, &conn->flags);
3016 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3017 			conn->sec_level = conn->pending_sec_level;
3018 
3019 			/* P-256 authentication key implies FIPS */
3020 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3021 				set_bit(HCI_CONN_FIPS, &conn->flags);
3022 
3023 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3024 			    conn->type == LE_LINK)
3025 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3026 		} else {
3027 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3028 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3029 		}
3030 	}
3031 
3032 	/* We should disregard the current RPA and generate a new one
3033 	 * whenever the encryption procedure fails.
3034 	 */
3035 	if (ev->status && conn->type == LE_LINK) {
3036 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3037 		hci_adv_instances_set_rpa_expired(hdev, true);
3038 	}
3039 
3040 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3041 
3042 	if (ev->status && conn->state == BT_CONNECTED) {
3043 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3044 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3045 
3046 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3047 		hci_conn_drop(conn);
3048 		goto unlock;
3049 	}
3050 
3051 	/* In Secure Connections Only mode, do not allow any connections
3052 	 * that are not encrypted with AES-CCM using a P-256 authenticated
3053 	 * combination key.
3054 	 */
3055 	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
3056 	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
3057 	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
3058 		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
3059 		hci_conn_drop(conn);
3060 		goto unlock;
3061 	}
3062 
3063 	/* Try reading the encryption key size for encrypted ACL links */
3064 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3065 		struct hci_cp_read_enc_key_size cp;
3066 		struct hci_request req;
3067 
3068 		/* Only send HCI_Read_Encryption_Key_Size if the
3069 		 * controller really supports it. If it doesn't, assume
3070 		 * the default size (16).
3071 		 */
3072 		if (!(hdev->commands[20] & 0x10)) {
3073 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3074 			goto notify;
3075 		}
3076 
3077 		hci_req_init(&req, hdev);
3078 
3079 		cp.handle = cpu_to_le16(conn->handle);
3080 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3081 
3082 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3083 			bt_dev_err(hdev, "sending read key size failed");
3084 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3085 			goto notify;
3086 		}
3087 
3088 		goto unlock;
3089 	}
3090 
3091 	/* Set the default Authenticated Payload Timeout after
3092 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3093 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3094 	 * sent when the link is active and Encryption is enabled, the conn
3095 	 * type can be either LE or ACL and controller must support LMP Ping.
3096 	 * Ensure for AES-CCM encryption as well.
3097 	 */
3098 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3099 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3100 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3101 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3102 		struct hci_cp_write_auth_payload_to cp;
3103 
3104 		cp.handle = cpu_to_le16(conn->handle);
3105 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3106 		hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3107 			     sizeof(cp), &cp);
3108 	}
3109 
3110 notify:
3111 	if (conn->state == BT_CONFIG) {
3112 		if (!ev->status)
3113 			conn->state = BT_CONNECTED;
3114 
3115 		hci_connect_cfm(conn, ev->status);
3116 		hci_conn_drop(conn);
3117 	} else
3118 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
3119 
3120 unlock:
3121 	hci_dev_unlock(hdev);
3122 }
3123 
3124 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3125 					     struct sk_buff *skb)
3126 {
3127 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3128 	struct hci_conn *conn;
3129 
3130 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3131 
3132 	hci_dev_lock(hdev);
3133 
3134 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3135 	if (conn) {
3136 		if (!ev->status)
3137 			set_bit(HCI_CONN_SECURE, &conn->flags);
3138 
3139 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3140 
3141 		hci_key_change_cfm(conn, ev->status);
3142 	}
3143 
3144 	hci_dev_unlock(hdev);
3145 }
3146 
3147 static void hci_remote_features_evt(struct hci_dev *hdev,
3148 				    struct sk_buff *skb)
3149 {
3150 	struct hci_ev_remote_features *ev = (void *) skb->data;
3151 	struct hci_conn *conn;
3152 
3153 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3154 
3155 	hci_dev_lock(hdev);
3156 
3157 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3158 	if (!conn)
3159 		goto unlock;
3160 
3161 	if (!ev->status)
3162 		memcpy(conn->features[0], ev->features, 8);
3163 
3164 	if (conn->state != BT_CONFIG)
3165 		goto unlock;
3166 
3167 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3168 	    lmp_ext_feat_capable(conn)) {
3169 		struct hci_cp_read_remote_ext_features cp;
3170 		cp.handle = ev->handle;
3171 		cp.page = 0x01;
3172 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3173 			     sizeof(cp), &cp);
3174 		goto unlock;
3175 	}
3176 
3177 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3178 		struct hci_cp_remote_name_req cp;
3179 		memset(&cp, 0, sizeof(cp));
3180 		bacpy(&cp.bdaddr, &conn->dst);
3181 		cp.pscan_rep_mode = 0x02;
3182 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3183 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3184 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3185 
3186 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3187 		conn->state = BT_CONNECTED;
3188 		hci_connect_cfm(conn, ev->status);
3189 		hci_conn_drop(conn);
3190 	}
3191 
3192 unlock:
3193 	hci_dev_unlock(hdev);
3194 }
3195 
3196 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3197 				 u16 *opcode, u8 *status,
3198 				 hci_req_complete_t *req_complete,
3199 				 hci_req_complete_skb_t *req_complete_skb)
3200 {
3201 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
3202 
3203 	*opcode = __le16_to_cpu(ev->opcode);
3204 	*status = skb->data[sizeof(*ev)];
3205 
3206 	skb_pull(skb, sizeof(*ev));
3207 
3208 	switch (*opcode) {
3209 	case HCI_OP_INQUIRY_CANCEL:
3210 		hci_cc_inquiry_cancel(hdev, skb);
3211 		break;
3212 
3213 	case HCI_OP_PERIODIC_INQ:
3214 		hci_cc_periodic_inq(hdev, skb);
3215 		break;
3216 
3217 	case HCI_OP_EXIT_PERIODIC_INQ:
3218 		hci_cc_exit_periodic_inq(hdev, skb);
3219 		break;
3220 
3221 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3222 		hci_cc_remote_name_req_cancel(hdev, skb);
3223 		break;
3224 
3225 	case HCI_OP_ROLE_DISCOVERY:
3226 		hci_cc_role_discovery(hdev, skb);
3227 		break;
3228 
3229 	case HCI_OP_READ_LINK_POLICY:
3230 		hci_cc_read_link_policy(hdev, skb);
3231 		break;
3232 
3233 	case HCI_OP_WRITE_LINK_POLICY:
3234 		hci_cc_write_link_policy(hdev, skb);
3235 		break;
3236 
3237 	case HCI_OP_READ_DEF_LINK_POLICY:
3238 		hci_cc_read_def_link_policy(hdev, skb);
3239 		break;
3240 
3241 	case HCI_OP_WRITE_DEF_LINK_POLICY:
3242 		hci_cc_write_def_link_policy(hdev, skb);
3243 		break;
3244 
3245 	case HCI_OP_RESET:
3246 		hci_cc_reset(hdev, skb);
3247 		break;
3248 
3249 	case HCI_OP_READ_STORED_LINK_KEY:
3250 		hci_cc_read_stored_link_key(hdev, skb);
3251 		break;
3252 
3253 	case HCI_OP_DELETE_STORED_LINK_KEY:
3254 		hci_cc_delete_stored_link_key(hdev, skb);
3255 		break;
3256 
3257 	case HCI_OP_WRITE_LOCAL_NAME:
3258 		hci_cc_write_local_name(hdev, skb);
3259 		break;
3260 
3261 	case HCI_OP_READ_LOCAL_NAME:
3262 		hci_cc_read_local_name(hdev, skb);
3263 		break;
3264 
3265 	case HCI_OP_WRITE_AUTH_ENABLE:
3266 		hci_cc_write_auth_enable(hdev, skb);
3267 		break;
3268 
3269 	case HCI_OP_WRITE_ENCRYPT_MODE:
3270 		hci_cc_write_encrypt_mode(hdev, skb);
3271 		break;
3272 
3273 	case HCI_OP_WRITE_SCAN_ENABLE:
3274 		hci_cc_write_scan_enable(hdev, skb);
3275 		break;
3276 
3277 	case HCI_OP_READ_CLASS_OF_DEV:
3278 		hci_cc_read_class_of_dev(hdev, skb);
3279 		break;
3280 
3281 	case HCI_OP_WRITE_CLASS_OF_DEV:
3282 		hci_cc_write_class_of_dev(hdev, skb);
3283 		break;
3284 
3285 	case HCI_OP_READ_VOICE_SETTING:
3286 		hci_cc_read_voice_setting(hdev, skb);
3287 		break;
3288 
3289 	case HCI_OP_WRITE_VOICE_SETTING:
3290 		hci_cc_write_voice_setting(hdev, skb);
3291 		break;
3292 
3293 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
3294 		hci_cc_read_num_supported_iac(hdev, skb);
3295 		break;
3296 
3297 	case HCI_OP_WRITE_SSP_MODE:
3298 		hci_cc_write_ssp_mode(hdev, skb);
3299 		break;
3300 
3301 	case HCI_OP_WRITE_SC_SUPPORT:
3302 		hci_cc_write_sc_support(hdev, skb);
3303 		break;
3304 
3305 	case HCI_OP_READ_AUTH_PAYLOAD_TO:
3306 		hci_cc_read_auth_payload_timeout(hdev, skb);
3307 		break;
3308 
3309 	case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3310 		hci_cc_write_auth_payload_timeout(hdev, skb);
3311 		break;
3312 
3313 	case HCI_OP_READ_LOCAL_VERSION:
3314 		hci_cc_read_local_version(hdev, skb);
3315 		break;
3316 
3317 	case HCI_OP_READ_LOCAL_COMMANDS:
3318 		hci_cc_read_local_commands(hdev, skb);
3319 		break;
3320 
3321 	case HCI_OP_READ_LOCAL_FEATURES:
3322 		hci_cc_read_local_features(hdev, skb);
3323 		break;
3324 
3325 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
3326 		hci_cc_read_local_ext_features(hdev, skb);
3327 		break;
3328 
3329 	case HCI_OP_READ_BUFFER_SIZE:
3330 		hci_cc_read_buffer_size(hdev, skb);
3331 		break;
3332 
3333 	case HCI_OP_READ_BD_ADDR:
3334 		hci_cc_read_bd_addr(hdev, skb);
3335 		break;
3336 
3337 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3338 		hci_cc_read_page_scan_activity(hdev, skb);
3339 		break;
3340 
3341 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3342 		hci_cc_write_page_scan_activity(hdev, skb);
3343 		break;
3344 
3345 	case HCI_OP_READ_PAGE_SCAN_TYPE:
3346 		hci_cc_read_page_scan_type(hdev, skb);
3347 		break;
3348 
3349 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3350 		hci_cc_write_page_scan_type(hdev, skb);
3351 		break;
3352 
3353 	case HCI_OP_READ_DATA_BLOCK_SIZE:
3354 		hci_cc_read_data_block_size(hdev, skb);
3355 		break;
3356 
3357 	case HCI_OP_READ_FLOW_CONTROL_MODE:
3358 		hci_cc_read_flow_control_mode(hdev, skb);
3359 		break;
3360 
3361 	case HCI_OP_READ_LOCAL_AMP_INFO:
3362 		hci_cc_read_local_amp_info(hdev, skb);
3363 		break;
3364 
3365 	case HCI_OP_READ_CLOCK:
3366 		hci_cc_read_clock(hdev, skb);
3367 		break;
3368 
3369 	case HCI_OP_READ_INQ_RSP_TX_POWER:
3370 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
3371 		break;
3372 
3373 	case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3374 		hci_cc_read_def_err_data_reporting(hdev, skb);
3375 		break;
3376 
3377 	case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3378 		hci_cc_write_def_err_data_reporting(hdev, skb);
3379 		break;
3380 
3381 	case HCI_OP_PIN_CODE_REPLY:
3382 		hci_cc_pin_code_reply(hdev, skb);
3383 		break;
3384 
3385 	case HCI_OP_PIN_CODE_NEG_REPLY:
3386 		hci_cc_pin_code_neg_reply(hdev, skb);
3387 		break;
3388 
3389 	case HCI_OP_READ_LOCAL_OOB_DATA:
3390 		hci_cc_read_local_oob_data(hdev, skb);
3391 		break;
3392 
3393 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3394 		hci_cc_read_local_oob_ext_data(hdev, skb);
3395 		break;
3396 
3397 	case HCI_OP_LE_READ_BUFFER_SIZE:
3398 		hci_cc_le_read_buffer_size(hdev, skb);
3399 		break;
3400 
3401 	case HCI_OP_LE_READ_LOCAL_FEATURES:
3402 		hci_cc_le_read_local_features(hdev, skb);
3403 		break;
3404 
3405 	case HCI_OP_LE_READ_ADV_TX_POWER:
3406 		hci_cc_le_read_adv_tx_power(hdev, skb);
3407 		break;
3408 
3409 	case HCI_OP_USER_CONFIRM_REPLY:
3410 		hci_cc_user_confirm_reply(hdev, skb);
3411 		break;
3412 
3413 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
3414 		hci_cc_user_confirm_neg_reply(hdev, skb);
3415 		break;
3416 
3417 	case HCI_OP_USER_PASSKEY_REPLY:
3418 		hci_cc_user_passkey_reply(hdev, skb);
3419 		break;
3420 
3421 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
3422 		hci_cc_user_passkey_neg_reply(hdev, skb);
3423 		break;
3424 
3425 	case HCI_OP_LE_SET_RANDOM_ADDR:
3426 		hci_cc_le_set_random_addr(hdev, skb);
3427 		break;
3428 
3429 	case HCI_OP_LE_SET_ADV_ENABLE:
3430 		hci_cc_le_set_adv_enable(hdev, skb);
3431 		break;
3432 
3433 	case HCI_OP_LE_SET_SCAN_PARAM:
3434 		hci_cc_le_set_scan_param(hdev, skb);
3435 		break;
3436 
3437 	case HCI_OP_LE_SET_SCAN_ENABLE:
3438 		hci_cc_le_set_scan_enable(hdev, skb);
3439 		break;
3440 
3441 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3442 		hci_cc_le_read_white_list_size(hdev, skb);
3443 		break;
3444 
3445 	case HCI_OP_LE_CLEAR_WHITE_LIST:
3446 		hci_cc_le_clear_white_list(hdev, skb);
3447 		break;
3448 
3449 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
3450 		hci_cc_le_add_to_white_list(hdev, skb);
3451 		break;
3452 
3453 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3454 		hci_cc_le_del_from_white_list(hdev, skb);
3455 		break;
3456 
3457 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3458 		hci_cc_le_read_supported_states(hdev, skb);
3459 		break;
3460 
3461 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3462 		hci_cc_le_read_def_data_len(hdev, skb);
3463 		break;
3464 
3465 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3466 		hci_cc_le_write_def_data_len(hdev, skb);
3467 		break;
3468 
3469 	case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3470 		hci_cc_le_add_to_resolv_list(hdev, skb);
3471 		break;
3472 
3473 	case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3474 		hci_cc_le_del_from_resolv_list(hdev, skb);
3475 		break;
3476 
3477 	case HCI_OP_LE_CLEAR_RESOLV_LIST:
3478 		hci_cc_le_clear_resolv_list(hdev, skb);
3479 		break;
3480 
3481 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3482 		hci_cc_le_read_resolv_list_size(hdev, skb);
3483 		break;
3484 
3485 	case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3486 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
3487 		break;
3488 
3489 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3490 		hci_cc_le_read_max_data_len(hdev, skb);
3491 		break;
3492 
3493 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3494 		hci_cc_write_le_host_supported(hdev, skb);
3495 		break;
3496 
3497 	case HCI_OP_LE_SET_ADV_PARAM:
3498 		hci_cc_set_adv_param(hdev, skb);
3499 		break;
3500 
3501 	case HCI_OP_READ_RSSI:
3502 		hci_cc_read_rssi(hdev, skb);
3503 		break;
3504 
3505 	case HCI_OP_READ_TX_POWER:
3506 		hci_cc_read_tx_power(hdev, skb);
3507 		break;
3508 
3509 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3510 		hci_cc_write_ssp_debug_mode(hdev, skb);
3511 		break;
3512 
3513 	case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3514 		hci_cc_le_set_ext_scan_param(hdev, skb);
3515 		break;
3516 
3517 	case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3518 		hci_cc_le_set_ext_scan_enable(hdev, skb);
3519 		break;
3520 
3521 	case HCI_OP_LE_SET_DEFAULT_PHY:
3522 		hci_cc_le_set_default_phy(hdev, skb);
3523 		break;
3524 
3525 	case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3526 		hci_cc_le_read_num_adv_sets(hdev, skb);
3527 		break;
3528 
3529 	case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3530 		hci_cc_set_ext_adv_param(hdev, skb);
3531 		break;
3532 
3533 	case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3534 		hci_cc_le_set_ext_adv_enable(hdev, skb);
3535 		break;
3536 
3537 	case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3538 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
3539 		break;
3540 
3541 	default:
3542 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3543 		break;
3544 	}
3545 
3546 	if (*opcode != HCI_OP_NOP)
3547 		cancel_delayed_work(&hdev->cmd_timer);
3548 
3549 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3550 		atomic_set(&hdev->cmd_cnt, 1);
3551 
3552 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3553 			     req_complete_skb);
3554 
3555 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3556 		bt_dev_err(hdev,
3557 			   "unexpected event for opcode 0x%4.4x", *opcode);
3558 		return;
3559 	}
3560 
3561 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3562 		queue_work(hdev->workqueue, &hdev->cmd_work);
3563 }
3564 
3565 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3566 			       u16 *opcode, u8 *status,
3567 			       hci_req_complete_t *req_complete,
3568 			       hci_req_complete_skb_t *req_complete_skb)
3569 {
3570 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3571 
3572 	skb_pull(skb, sizeof(*ev));
3573 
3574 	*opcode = __le16_to_cpu(ev->opcode);
3575 	*status = ev->status;
3576 
3577 	switch (*opcode) {
3578 	case HCI_OP_INQUIRY:
3579 		hci_cs_inquiry(hdev, ev->status);
3580 		break;
3581 
3582 	case HCI_OP_CREATE_CONN:
3583 		hci_cs_create_conn(hdev, ev->status);
3584 		break;
3585 
3586 	case HCI_OP_DISCONNECT:
3587 		hci_cs_disconnect(hdev, ev->status);
3588 		break;
3589 
3590 	case HCI_OP_ADD_SCO:
3591 		hci_cs_add_sco(hdev, ev->status);
3592 		break;
3593 
3594 	case HCI_OP_AUTH_REQUESTED:
3595 		hci_cs_auth_requested(hdev, ev->status);
3596 		break;
3597 
3598 	case HCI_OP_SET_CONN_ENCRYPT:
3599 		hci_cs_set_conn_encrypt(hdev, ev->status);
3600 		break;
3601 
3602 	case HCI_OP_REMOTE_NAME_REQ:
3603 		hci_cs_remote_name_req(hdev, ev->status);
3604 		break;
3605 
3606 	case HCI_OP_READ_REMOTE_FEATURES:
3607 		hci_cs_read_remote_features(hdev, ev->status);
3608 		break;
3609 
3610 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3611 		hci_cs_read_remote_ext_features(hdev, ev->status);
3612 		break;
3613 
3614 	case HCI_OP_SETUP_SYNC_CONN:
3615 		hci_cs_setup_sync_conn(hdev, ev->status);
3616 		break;
3617 
3618 	case HCI_OP_SNIFF_MODE:
3619 		hci_cs_sniff_mode(hdev, ev->status);
3620 		break;
3621 
3622 	case HCI_OP_EXIT_SNIFF_MODE:
3623 		hci_cs_exit_sniff_mode(hdev, ev->status);
3624 		break;
3625 
3626 	case HCI_OP_SWITCH_ROLE:
3627 		hci_cs_switch_role(hdev, ev->status);
3628 		break;
3629 
3630 	case HCI_OP_LE_CREATE_CONN:
3631 		hci_cs_le_create_conn(hdev, ev->status);
3632 		break;
3633 
3634 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3635 		hci_cs_le_read_remote_features(hdev, ev->status);
3636 		break;
3637 
3638 	case HCI_OP_LE_START_ENC:
3639 		hci_cs_le_start_enc(hdev, ev->status);
3640 		break;
3641 
3642 	case HCI_OP_LE_EXT_CREATE_CONN:
3643 		hci_cs_le_ext_create_conn(hdev, ev->status);
3644 		break;
3645 
3646 	default:
3647 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3648 		break;
3649 	}
3650 
3651 	if (*opcode != HCI_OP_NOP)
3652 		cancel_delayed_work(&hdev->cmd_timer);
3653 
3654 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3655 		atomic_set(&hdev->cmd_cnt, 1);
3656 
3657 	/* Indicate request completion if the command failed. Also, if
3658 	 * we're not waiting for a special event and we get a success
3659 	 * command status we should try to flag the request as completed
3660 	 * (since for this kind of commands there will not be a command
3661 	 * complete event).
3662 	 */
3663 	if (ev->status ||
3664 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3665 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3666 				     req_complete_skb);
3667 
3668 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3669 		bt_dev_err(hdev,
3670 			   "unexpected event for opcode 0x%4.4x", *opcode);
3671 		return;
3672 	}
3673 
3674 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3675 		queue_work(hdev->workqueue, &hdev->cmd_work);
3676 }
3677 
3678 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3679 {
3680 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3681 
3682 	hdev->hw_error_code = ev->code;
3683 
3684 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3685 }
3686 
3687 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3688 {
3689 	struct hci_ev_role_change *ev = (void *) skb->data;
3690 	struct hci_conn *conn;
3691 
3692 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3693 
3694 	hci_dev_lock(hdev);
3695 
3696 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3697 	if (conn) {
3698 		if (!ev->status)
3699 			conn->role = ev->role;
3700 
3701 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3702 
3703 		hci_role_switch_cfm(conn, ev->status, ev->role);
3704 	}
3705 
3706 	hci_dev_unlock(hdev);
3707 }
3708 
3709 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3710 {
3711 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3712 	int i;
3713 
3714 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3715 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3716 		return;
3717 	}
3718 
3719 	if (skb->len < sizeof(*ev) ||
3720 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3721 		BT_DBG("%s bad parameters", hdev->name);
3722 		return;
3723 	}
3724 
3725 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3726 
3727 	for (i = 0; i < ev->num_hndl; i++) {
3728 		struct hci_comp_pkts_info *info = &ev->handles[i];
3729 		struct hci_conn *conn;
3730 		__u16  handle, count;
3731 
3732 		handle = __le16_to_cpu(info->handle);
3733 		count  = __le16_to_cpu(info->count);
3734 
3735 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3736 		if (!conn)
3737 			continue;
3738 
3739 		conn->sent -= count;
3740 
3741 		switch (conn->type) {
3742 		case ACL_LINK:
3743 			hdev->acl_cnt += count;
3744 			if (hdev->acl_cnt > hdev->acl_pkts)
3745 				hdev->acl_cnt = hdev->acl_pkts;
3746 			break;
3747 
3748 		case LE_LINK:
3749 			if (hdev->le_pkts) {
3750 				hdev->le_cnt += count;
3751 				if (hdev->le_cnt > hdev->le_pkts)
3752 					hdev->le_cnt = hdev->le_pkts;
3753 			} else {
3754 				hdev->acl_cnt += count;
3755 				if (hdev->acl_cnt > hdev->acl_pkts)
3756 					hdev->acl_cnt = hdev->acl_pkts;
3757 			}
3758 			break;
3759 
3760 		case SCO_LINK:
3761 			hdev->sco_cnt += count;
3762 			if (hdev->sco_cnt > hdev->sco_pkts)
3763 				hdev->sco_cnt = hdev->sco_pkts;
3764 			break;
3765 
3766 		default:
3767 			bt_dev_err(hdev, "unknown type %d conn %p",
3768 				   conn->type, conn);
3769 			break;
3770 		}
3771 	}
3772 
3773 	queue_work(hdev->workqueue, &hdev->tx_work);
3774 }
3775 
3776 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3777 						 __u16 handle)
3778 {
3779 	struct hci_chan *chan;
3780 
3781 	switch (hdev->dev_type) {
3782 	case HCI_PRIMARY:
3783 		return hci_conn_hash_lookup_handle(hdev, handle);
3784 	case HCI_AMP:
3785 		chan = hci_chan_lookup_handle(hdev, handle);
3786 		if (chan)
3787 			return chan->conn;
3788 		break;
3789 	default:
3790 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3791 		break;
3792 	}
3793 
3794 	return NULL;
3795 }
3796 
3797 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3798 {
3799 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3800 	int i;
3801 
3802 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3803 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3804 		return;
3805 	}
3806 
3807 	if (skb->len < sizeof(*ev) ||
3808 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3809 		BT_DBG("%s bad parameters", hdev->name);
3810 		return;
3811 	}
3812 
3813 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3814 	       ev->num_hndl);
3815 
3816 	for (i = 0; i < ev->num_hndl; i++) {
3817 		struct hci_comp_blocks_info *info = &ev->handles[i];
3818 		struct hci_conn *conn = NULL;
3819 		__u16  handle, block_count;
3820 
3821 		handle = __le16_to_cpu(info->handle);
3822 		block_count = __le16_to_cpu(info->blocks);
3823 
3824 		conn = __hci_conn_lookup_handle(hdev, handle);
3825 		if (!conn)
3826 			continue;
3827 
3828 		conn->sent -= block_count;
3829 
3830 		switch (conn->type) {
3831 		case ACL_LINK:
3832 		case AMP_LINK:
3833 			hdev->block_cnt += block_count;
3834 			if (hdev->block_cnt > hdev->num_blocks)
3835 				hdev->block_cnt = hdev->num_blocks;
3836 			break;
3837 
3838 		default:
3839 			bt_dev_err(hdev, "unknown type %d conn %p",
3840 				   conn->type, conn);
3841 			break;
3842 		}
3843 	}
3844 
3845 	queue_work(hdev->workqueue, &hdev->tx_work);
3846 }
3847 
3848 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3849 {
3850 	struct hci_ev_mode_change *ev = (void *) skb->data;
3851 	struct hci_conn *conn;
3852 
3853 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3854 
3855 	hci_dev_lock(hdev);
3856 
3857 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3858 	if (conn) {
3859 		conn->mode = ev->mode;
3860 
3861 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3862 					&conn->flags)) {
3863 			if (conn->mode == HCI_CM_ACTIVE)
3864 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3865 			else
3866 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3867 		}
3868 
3869 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3870 			hci_sco_setup(conn, ev->status);
3871 	}
3872 
3873 	hci_dev_unlock(hdev);
3874 }
3875 
3876 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3877 {
3878 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3879 	struct hci_conn *conn;
3880 
3881 	BT_DBG("%s", hdev->name);
3882 
3883 	hci_dev_lock(hdev);
3884 
3885 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3886 	if (!conn)
3887 		goto unlock;
3888 
3889 	if (conn->state == BT_CONNECTED) {
3890 		hci_conn_hold(conn);
3891 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3892 		hci_conn_drop(conn);
3893 	}
3894 
3895 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3896 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3897 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3898 			     sizeof(ev->bdaddr), &ev->bdaddr);
3899 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3900 		u8 secure;
3901 
3902 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3903 			secure = 1;
3904 		else
3905 			secure = 0;
3906 
3907 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3908 	}
3909 
3910 unlock:
3911 	hci_dev_unlock(hdev);
3912 }
3913 
3914 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3915 {
3916 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3917 		return;
3918 
3919 	conn->pin_length = pin_len;
3920 	conn->key_type = key_type;
3921 
3922 	switch (key_type) {
3923 	case HCI_LK_LOCAL_UNIT:
3924 	case HCI_LK_REMOTE_UNIT:
3925 	case HCI_LK_DEBUG_COMBINATION:
3926 		return;
3927 	case HCI_LK_COMBINATION:
3928 		if (pin_len == 16)
3929 			conn->pending_sec_level = BT_SECURITY_HIGH;
3930 		else
3931 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3932 		break;
3933 	case HCI_LK_UNAUTH_COMBINATION_P192:
3934 	case HCI_LK_UNAUTH_COMBINATION_P256:
3935 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3936 		break;
3937 	case HCI_LK_AUTH_COMBINATION_P192:
3938 		conn->pending_sec_level = BT_SECURITY_HIGH;
3939 		break;
3940 	case HCI_LK_AUTH_COMBINATION_P256:
3941 		conn->pending_sec_level = BT_SECURITY_FIPS;
3942 		break;
3943 	}
3944 }
3945 
3946 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3947 {
3948 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3949 	struct hci_cp_link_key_reply cp;
3950 	struct hci_conn *conn;
3951 	struct link_key *key;
3952 
3953 	BT_DBG("%s", hdev->name);
3954 
3955 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3956 		return;
3957 
3958 	hci_dev_lock(hdev);
3959 
3960 	key = hci_find_link_key(hdev, &ev->bdaddr);
3961 	if (!key) {
3962 		BT_DBG("%s link key not found for %pMR", hdev->name,
3963 		       &ev->bdaddr);
3964 		goto not_found;
3965 	}
3966 
3967 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3968 	       &ev->bdaddr);
3969 
3970 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3971 	if (conn) {
3972 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3973 
3974 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3975 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3976 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3977 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3978 			goto not_found;
3979 		}
3980 
3981 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3982 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3983 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3984 			BT_DBG("%s ignoring key unauthenticated for high security",
3985 			       hdev->name);
3986 			goto not_found;
3987 		}
3988 
3989 		conn_set_key(conn, key->type, key->pin_len);
3990 	}
3991 
3992 	bacpy(&cp.bdaddr, &ev->bdaddr);
3993 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3994 
3995 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3996 
3997 	hci_dev_unlock(hdev);
3998 
3999 	return;
4000 
4001 not_found:
4002 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4003 	hci_dev_unlock(hdev);
4004 }
4005 
4006 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4007 {
4008 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
4009 	struct hci_conn *conn;
4010 	struct link_key *key;
4011 	bool persistent;
4012 	u8 pin_len = 0;
4013 
4014 	BT_DBG("%s", hdev->name);
4015 
4016 	hci_dev_lock(hdev);
4017 
4018 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4019 	if (!conn)
4020 		goto unlock;
4021 
4022 	hci_conn_hold(conn);
4023 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4024 	hci_conn_drop(conn);
4025 
4026 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4027 	conn_set_key(conn, ev->key_type, conn->pin_length);
4028 
4029 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4030 		goto unlock;
4031 
4032 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4033 			        ev->key_type, pin_len, &persistent);
4034 	if (!key)
4035 		goto unlock;
4036 
4037 	/* Update connection information since adding the key will have
4038 	 * fixed up the type in the case of changed combination keys.
4039 	 */
4040 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4041 		conn_set_key(conn, key->type, key->pin_len);
4042 
4043 	mgmt_new_link_key(hdev, key, persistent);
4044 
4045 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4046 	 * is set. If it's not set simply remove the key from the kernel
4047 	 * list (we've still notified user space about it but with
4048 	 * store_hint being 0).
4049 	 */
4050 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4051 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4052 		list_del_rcu(&key->list);
4053 		kfree_rcu(key, rcu);
4054 		goto unlock;
4055 	}
4056 
4057 	if (persistent)
4058 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4059 	else
4060 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4061 
4062 unlock:
4063 	hci_dev_unlock(hdev);
4064 }
4065 
4066 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4067 {
4068 	struct hci_ev_clock_offset *ev = (void *) skb->data;
4069 	struct hci_conn *conn;
4070 
4071 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4072 
4073 	hci_dev_lock(hdev);
4074 
4075 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4076 	if (conn && !ev->status) {
4077 		struct inquiry_entry *ie;
4078 
4079 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4080 		if (ie) {
4081 			ie->data.clock_offset = ev->clock_offset;
4082 			ie->timestamp = jiffies;
4083 		}
4084 	}
4085 
4086 	hci_dev_unlock(hdev);
4087 }
4088 
4089 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4090 {
4091 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4092 	struct hci_conn *conn;
4093 
4094 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4095 
4096 	hci_dev_lock(hdev);
4097 
4098 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4099 	if (conn && !ev->status)
4100 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4101 
4102 	hci_dev_unlock(hdev);
4103 }
4104 
4105 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4106 {
4107 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4108 	struct inquiry_entry *ie;
4109 
4110 	BT_DBG("%s", hdev->name);
4111 
4112 	hci_dev_lock(hdev);
4113 
4114 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4115 	if (ie) {
4116 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4117 		ie->timestamp = jiffies;
4118 	}
4119 
4120 	hci_dev_unlock(hdev);
4121 }
4122 
4123 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4124 					     struct sk_buff *skb)
4125 {
4126 	struct inquiry_data data;
4127 	int num_rsp = *((__u8 *) skb->data);
4128 
4129 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4130 
4131 	if (!num_rsp)
4132 		return;
4133 
4134 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4135 		return;
4136 
4137 	hci_dev_lock(hdev);
4138 
4139 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4140 		struct inquiry_info_with_rssi_and_pscan_mode *info;
4141 		info = (void *) (skb->data + 1);
4142 
4143 		for (; num_rsp; num_rsp--, info++) {
4144 			u32 flags;
4145 
4146 			bacpy(&data.bdaddr, &info->bdaddr);
4147 			data.pscan_rep_mode	= info->pscan_rep_mode;
4148 			data.pscan_period_mode	= info->pscan_period_mode;
4149 			data.pscan_mode		= info->pscan_mode;
4150 			memcpy(data.dev_class, info->dev_class, 3);
4151 			data.clock_offset	= info->clock_offset;
4152 			data.rssi		= info->rssi;
4153 			data.ssp_mode		= 0x00;
4154 
4155 			flags = hci_inquiry_cache_update(hdev, &data, false);
4156 
4157 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4158 					  info->dev_class, info->rssi,
4159 					  flags, NULL, 0, NULL, 0);
4160 		}
4161 	} else {
4162 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4163 
4164 		for (; num_rsp; num_rsp--, info++) {
4165 			u32 flags;
4166 
4167 			bacpy(&data.bdaddr, &info->bdaddr);
4168 			data.pscan_rep_mode	= info->pscan_rep_mode;
4169 			data.pscan_period_mode	= info->pscan_period_mode;
4170 			data.pscan_mode		= 0x00;
4171 			memcpy(data.dev_class, info->dev_class, 3);
4172 			data.clock_offset	= info->clock_offset;
4173 			data.rssi		= info->rssi;
4174 			data.ssp_mode		= 0x00;
4175 
4176 			flags = hci_inquiry_cache_update(hdev, &data, false);
4177 
4178 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4179 					  info->dev_class, info->rssi,
4180 					  flags, NULL, 0, NULL, 0);
4181 		}
4182 	}
4183 
4184 	hci_dev_unlock(hdev);
4185 }
4186 
4187 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4188 					struct sk_buff *skb)
4189 {
4190 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4191 	struct hci_conn *conn;
4192 
4193 	BT_DBG("%s", hdev->name);
4194 
4195 	hci_dev_lock(hdev);
4196 
4197 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4198 	if (!conn)
4199 		goto unlock;
4200 
4201 	if (ev->page < HCI_MAX_PAGES)
4202 		memcpy(conn->features[ev->page], ev->features, 8);
4203 
4204 	if (!ev->status && ev->page == 0x01) {
4205 		struct inquiry_entry *ie;
4206 
4207 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4208 		if (ie)
4209 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4210 
4211 		if (ev->features[0] & LMP_HOST_SSP) {
4212 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4213 		} else {
4214 			/* It is mandatory by the Bluetooth specification that
4215 			 * Extended Inquiry Results are only used when Secure
4216 			 * Simple Pairing is enabled, but some devices violate
4217 			 * this.
4218 			 *
4219 			 * To make these devices work, the internal SSP
4220 			 * enabled flag needs to be cleared if the remote host
4221 			 * features do not indicate SSP support */
4222 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4223 		}
4224 
4225 		if (ev->features[0] & LMP_HOST_SC)
4226 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4227 	}
4228 
4229 	if (conn->state != BT_CONFIG)
4230 		goto unlock;
4231 
4232 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4233 		struct hci_cp_remote_name_req cp;
4234 		memset(&cp, 0, sizeof(cp));
4235 		bacpy(&cp.bdaddr, &conn->dst);
4236 		cp.pscan_rep_mode = 0x02;
4237 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4238 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4239 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4240 
4241 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4242 		conn->state = BT_CONNECTED;
4243 		hci_connect_cfm(conn, ev->status);
4244 		hci_conn_drop(conn);
4245 	}
4246 
4247 unlock:
4248 	hci_dev_unlock(hdev);
4249 }
4250 
4251 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4252 				       struct sk_buff *skb)
4253 {
4254 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4255 	struct hci_conn *conn;
4256 
4257 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4258 
4259 	hci_dev_lock(hdev);
4260 
4261 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4262 	if (!conn) {
4263 		if (ev->link_type == ESCO_LINK)
4264 			goto unlock;
4265 
4266 		/* When the link type in the event indicates SCO connection
4267 		 * and lookup of the connection object fails, then check
4268 		 * if an eSCO connection object exists.
4269 		 *
4270 		 * The core limits the synchronous connections to either
4271 		 * SCO or eSCO. The eSCO connection is preferred and tried
4272 		 * to be setup first and until successfully established,
4273 		 * the link type will be hinted as eSCO.
4274 		 */
4275 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4276 		if (!conn)
4277 			goto unlock;
4278 	}
4279 
4280 	switch (ev->status) {
4281 	case 0x00:
4282 		conn->handle = __le16_to_cpu(ev->handle);
4283 		conn->state  = BT_CONNECTED;
4284 		conn->type   = ev->link_type;
4285 
4286 		hci_debugfs_create_conn(conn);
4287 		hci_conn_add_sysfs(conn);
4288 		break;
4289 
4290 	case 0x10:	/* Connection Accept Timeout */
4291 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4292 	case 0x11:	/* Unsupported Feature or Parameter Value */
4293 	case 0x1c:	/* SCO interval rejected */
4294 	case 0x1a:	/* Unsupported Remote Feature */
4295 	case 0x1f:	/* Unspecified error */
4296 	case 0x20:	/* Unsupported LMP Parameter value */
4297 		if (conn->out) {
4298 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4299 					(hdev->esco_type & EDR_ESCO_MASK);
4300 			if (hci_setup_sync(conn, conn->link->handle))
4301 				goto unlock;
4302 		}
4303 		/* fall through */
4304 
4305 	default:
4306 		conn->state = BT_CLOSED;
4307 		break;
4308 	}
4309 
4310 	hci_connect_cfm(conn, ev->status);
4311 	if (ev->status)
4312 		hci_conn_del(conn);
4313 
4314 unlock:
4315 	hci_dev_unlock(hdev);
4316 }
4317 
4318 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4319 {
4320 	size_t parsed = 0;
4321 
4322 	while (parsed < eir_len) {
4323 		u8 field_len = eir[0];
4324 
4325 		if (field_len == 0)
4326 			return parsed;
4327 
4328 		parsed += field_len + 1;
4329 		eir += field_len + 1;
4330 	}
4331 
4332 	return eir_len;
4333 }
4334 
4335 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4336 					    struct sk_buff *skb)
4337 {
4338 	struct inquiry_data data;
4339 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
4340 	int num_rsp = *((__u8 *) skb->data);
4341 	size_t eir_len;
4342 
4343 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4344 
4345 	if (!num_rsp)
4346 		return;
4347 
4348 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4349 		return;
4350 
4351 	hci_dev_lock(hdev);
4352 
4353 	for (; num_rsp; num_rsp--, info++) {
4354 		u32 flags;
4355 		bool name_known;
4356 
4357 		bacpy(&data.bdaddr, &info->bdaddr);
4358 		data.pscan_rep_mode	= info->pscan_rep_mode;
4359 		data.pscan_period_mode	= info->pscan_period_mode;
4360 		data.pscan_mode		= 0x00;
4361 		memcpy(data.dev_class, info->dev_class, 3);
4362 		data.clock_offset	= info->clock_offset;
4363 		data.rssi		= info->rssi;
4364 		data.ssp_mode		= 0x01;
4365 
4366 		if (hci_dev_test_flag(hdev, HCI_MGMT))
4367 			name_known = eir_get_data(info->data,
4368 						  sizeof(info->data),
4369 						  EIR_NAME_COMPLETE, NULL);
4370 		else
4371 			name_known = true;
4372 
4373 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
4374 
4375 		eir_len = eir_get_length(info->data, sizeof(info->data));
4376 
4377 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4378 				  info->dev_class, info->rssi,
4379 				  flags, info->data, eir_len, NULL, 0);
4380 	}
4381 
4382 	hci_dev_unlock(hdev);
4383 }
4384 
4385 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4386 					 struct sk_buff *skb)
4387 {
4388 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4389 	struct hci_conn *conn;
4390 
4391 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4392 	       __le16_to_cpu(ev->handle));
4393 
4394 	hci_dev_lock(hdev);
4395 
4396 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4397 	if (!conn)
4398 		goto unlock;
4399 
4400 	/* For BR/EDR the necessary steps are taken through the
4401 	 * auth_complete event.
4402 	 */
4403 	if (conn->type != LE_LINK)
4404 		goto unlock;
4405 
4406 	if (!ev->status)
4407 		conn->sec_level = conn->pending_sec_level;
4408 
4409 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4410 
4411 	if (ev->status && conn->state == BT_CONNECTED) {
4412 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4413 		hci_conn_drop(conn);
4414 		goto unlock;
4415 	}
4416 
4417 	if (conn->state == BT_CONFIG) {
4418 		if (!ev->status)
4419 			conn->state = BT_CONNECTED;
4420 
4421 		hci_connect_cfm(conn, ev->status);
4422 		hci_conn_drop(conn);
4423 	} else {
4424 		hci_auth_cfm(conn, ev->status);
4425 
4426 		hci_conn_hold(conn);
4427 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4428 		hci_conn_drop(conn);
4429 	}
4430 
4431 unlock:
4432 	hci_dev_unlock(hdev);
4433 }
4434 
4435 static u8 hci_get_auth_req(struct hci_conn *conn)
4436 {
4437 	/* If remote requests no-bonding follow that lead */
4438 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
4439 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4440 		return conn->remote_auth | (conn->auth_type & 0x01);
4441 
4442 	/* If both remote and local have enough IO capabilities, require
4443 	 * MITM protection
4444 	 */
4445 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4446 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4447 		return conn->remote_auth | 0x01;
4448 
4449 	/* No MITM protection possible so ignore remote requirement */
4450 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4451 }
4452 
4453 static u8 bredr_oob_data_present(struct hci_conn *conn)
4454 {
4455 	struct hci_dev *hdev = conn->hdev;
4456 	struct oob_data *data;
4457 
4458 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4459 	if (!data)
4460 		return 0x00;
4461 
4462 	if (bredr_sc_enabled(hdev)) {
4463 		/* When Secure Connections is enabled, then just
4464 		 * return the present value stored with the OOB
4465 		 * data. The stored value contains the right present
4466 		 * information. However it can only be trusted when
4467 		 * not in Secure Connection Only mode.
4468 		 */
4469 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4470 			return data->present;
4471 
4472 		/* When Secure Connections Only mode is enabled, then
4473 		 * the P-256 values are required. If they are not
4474 		 * available, then do not declare that OOB data is
4475 		 * present.
4476 		 */
4477 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4478 		    !memcmp(data->hash256, ZERO_KEY, 16))
4479 			return 0x00;
4480 
4481 		return 0x02;
4482 	}
4483 
4484 	/* When Secure Connections is not enabled or actually
4485 	 * not supported by the hardware, then check that if
4486 	 * P-192 data values are present.
4487 	 */
4488 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4489 	    !memcmp(data->hash192, ZERO_KEY, 16))
4490 		return 0x00;
4491 
4492 	return 0x01;
4493 }
4494 
4495 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4496 {
4497 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4498 	struct hci_conn *conn;
4499 
4500 	BT_DBG("%s", hdev->name);
4501 
4502 	hci_dev_lock(hdev);
4503 
4504 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4505 	if (!conn)
4506 		goto unlock;
4507 
4508 	hci_conn_hold(conn);
4509 
4510 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4511 		goto unlock;
4512 
4513 	/* Allow pairing if we're pairable, the initiators of the
4514 	 * pairing or if the remote is not requesting bonding.
4515 	 */
4516 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4517 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4518 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4519 		struct hci_cp_io_capability_reply cp;
4520 
4521 		bacpy(&cp.bdaddr, &ev->bdaddr);
4522 		/* Change the IO capability from KeyboardDisplay
4523 		 * to DisplayYesNo as it is not supported by BT spec. */
4524 		cp.capability = (conn->io_capability == 0x04) ?
4525 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4526 
4527 		/* If we are initiators, there is no remote information yet */
4528 		if (conn->remote_auth == 0xff) {
4529 			/* Request MITM protection if our IO caps allow it
4530 			 * except for the no-bonding case.
4531 			 */
4532 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4533 			    conn->auth_type != HCI_AT_NO_BONDING)
4534 				conn->auth_type |= 0x01;
4535 		} else {
4536 			conn->auth_type = hci_get_auth_req(conn);
4537 		}
4538 
4539 		/* If we're not bondable, force one of the non-bondable
4540 		 * authentication requirement values.
4541 		 */
4542 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4543 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4544 
4545 		cp.authentication = conn->auth_type;
4546 		cp.oob_data = bredr_oob_data_present(conn);
4547 
4548 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4549 			     sizeof(cp), &cp);
4550 	} else {
4551 		struct hci_cp_io_capability_neg_reply cp;
4552 
4553 		bacpy(&cp.bdaddr, &ev->bdaddr);
4554 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4555 
4556 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4557 			     sizeof(cp), &cp);
4558 	}
4559 
4560 unlock:
4561 	hci_dev_unlock(hdev);
4562 }
4563 
4564 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4565 {
4566 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4567 	struct hci_conn *conn;
4568 
4569 	BT_DBG("%s", hdev->name);
4570 
4571 	hci_dev_lock(hdev);
4572 
4573 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4574 	if (!conn)
4575 		goto unlock;
4576 
4577 	conn->remote_cap = ev->capability;
4578 	conn->remote_auth = ev->authentication;
4579 
4580 unlock:
4581 	hci_dev_unlock(hdev);
4582 }
4583 
4584 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4585 					 struct sk_buff *skb)
4586 {
4587 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4588 	int loc_mitm, rem_mitm, confirm_hint = 0;
4589 	struct hci_conn *conn;
4590 
4591 	BT_DBG("%s", hdev->name);
4592 
4593 	hci_dev_lock(hdev);
4594 
4595 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4596 		goto unlock;
4597 
4598 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4599 	if (!conn)
4600 		goto unlock;
4601 
4602 	loc_mitm = (conn->auth_type & 0x01);
4603 	rem_mitm = (conn->remote_auth & 0x01);
4604 
4605 	/* If we require MITM but the remote device can't provide that
4606 	 * (it has NoInputNoOutput) then reject the confirmation
4607 	 * request. We check the security level here since it doesn't
4608 	 * necessarily match conn->auth_type.
4609 	 */
4610 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4611 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4612 		BT_DBG("Rejecting request: remote device can't provide MITM");
4613 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4614 			     sizeof(ev->bdaddr), &ev->bdaddr);
4615 		goto unlock;
4616 	}
4617 
4618 	/* If no side requires MITM protection; auto-accept */
4619 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4620 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4621 
4622 		/* If we're not the initiators request authorization to
4623 		 * proceed from user space (mgmt_user_confirm with
4624 		 * confirm_hint set to 1). The exception is if neither
4625 		 * side had MITM or if the local IO capability is
4626 		 * NoInputNoOutput, in which case we do auto-accept
4627 		 */
4628 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4629 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4630 		    (loc_mitm || rem_mitm)) {
4631 			BT_DBG("Confirming auto-accept as acceptor");
4632 			confirm_hint = 1;
4633 			goto confirm;
4634 		}
4635 
4636 		/* If there already exists link key in local host, leave the
4637 		 * decision to user space since the remote device could be
4638 		 * legitimate or malicious.
4639 		 */
4640 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
4641 			bt_dev_dbg(hdev, "Local host already has link key");
4642 			confirm_hint = 1;
4643 			goto confirm;
4644 		}
4645 
4646 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4647 		       hdev->auto_accept_delay);
4648 
4649 		if (hdev->auto_accept_delay > 0) {
4650 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4651 			queue_delayed_work(conn->hdev->workqueue,
4652 					   &conn->auto_accept_work, delay);
4653 			goto unlock;
4654 		}
4655 
4656 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4657 			     sizeof(ev->bdaddr), &ev->bdaddr);
4658 		goto unlock;
4659 	}
4660 
4661 confirm:
4662 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4663 				  le32_to_cpu(ev->passkey), confirm_hint);
4664 
4665 unlock:
4666 	hci_dev_unlock(hdev);
4667 }
4668 
4669 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4670 					 struct sk_buff *skb)
4671 {
4672 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4673 
4674 	BT_DBG("%s", hdev->name);
4675 
4676 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4677 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4678 }
4679 
4680 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4681 					struct sk_buff *skb)
4682 {
4683 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4684 	struct hci_conn *conn;
4685 
4686 	BT_DBG("%s", hdev->name);
4687 
4688 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4689 	if (!conn)
4690 		return;
4691 
4692 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4693 	conn->passkey_entered = 0;
4694 
4695 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4696 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4697 					 conn->dst_type, conn->passkey_notify,
4698 					 conn->passkey_entered);
4699 }
4700 
4701 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4702 {
4703 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4704 	struct hci_conn *conn;
4705 
4706 	BT_DBG("%s", hdev->name);
4707 
4708 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4709 	if (!conn)
4710 		return;
4711 
4712 	switch (ev->type) {
4713 	case HCI_KEYPRESS_STARTED:
4714 		conn->passkey_entered = 0;
4715 		return;
4716 
4717 	case HCI_KEYPRESS_ENTERED:
4718 		conn->passkey_entered++;
4719 		break;
4720 
4721 	case HCI_KEYPRESS_ERASED:
4722 		conn->passkey_entered--;
4723 		break;
4724 
4725 	case HCI_KEYPRESS_CLEARED:
4726 		conn->passkey_entered = 0;
4727 		break;
4728 
4729 	case HCI_KEYPRESS_COMPLETED:
4730 		return;
4731 	}
4732 
4733 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4734 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4735 					 conn->dst_type, conn->passkey_notify,
4736 					 conn->passkey_entered);
4737 }
4738 
4739 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4740 					 struct sk_buff *skb)
4741 {
4742 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4743 	struct hci_conn *conn;
4744 
4745 	BT_DBG("%s", hdev->name);
4746 
4747 	hci_dev_lock(hdev);
4748 
4749 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4750 	if (!conn)
4751 		goto unlock;
4752 
4753 	/* Reset the authentication requirement to unknown */
4754 	conn->remote_auth = 0xff;
4755 
4756 	/* To avoid duplicate auth_failed events to user space we check
4757 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4758 	 * initiated the authentication. A traditional auth_complete
4759 	 * event gets always produced as initiator and is also mapped to
4760 	 * the mgmt_auth_failed event */
4761 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4762 		mgmt_auth_failed(conn, ev->status);
4763 
4764 	hci_conn_drop(conn);
4765 
4766 unlock:
4767 	hci_dev_unlock(hdev);
4768 }
4769 
4770 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4771 					 struct sk_buff *skb)
4772 {
4773 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4774 	struct inquiry_entry *ie;
4775 	struct hci_conn *conn;
4776 
4777 	BT_DBG("%s", hdev->name);
4778 
4779 	hci_dev_lock(hdev);
4780 
4781 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4782 	if (conn)
4783 		memcpy(conn->features[1], ev->features, 8);
4784 
4785 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4786 	if (ie)
4787 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4788 
4789 	hci_dev_unlock(hdev);
4790 }
4791 
4792 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4793 					    struct sk_buff *skb)
4794 {
4795 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4796 	struct oob_data *data;
4797 
4798 	BT_DBG("%s", hdev->name);
4799 
4800 	hci_dev_lock(hdev);
4801 
4802 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4803 		goto unlock;
4804 
4805 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4806 	if (!data) {
4807 		struct hci_cp_remote_oob_data_neg_reply cp;
4808 
4809 		bacpy(&cp.bdaddr, &ev->bdaddr);
4810 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4811 			     sizeof(cp), &cp);
4812 		goto unlock;
4813 	}
4814 
4815 	if (bredr_sc_enabled(hdev)) {
4816 		struct hci_cp_remote_oob_ext_data_reply cp;
4817 
4818 		bacpy(&cp.bdaddr, &ev->bdaddr);
4819 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4820 			memset(cp.hash192, 0, sizeof(cp.hash192));
4821 			memset(cp.rand192, 0, sizeof(cp.rand192));
4822 		} else {
4823 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4824 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4825 		}
4826 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4827 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4828 
4829 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4830 			     sizeof(cp), &cp);
4831 	} else {
4832 		struct hci_cp_remote_oob_data_reply cp;
4833 
4834 		bacpy(&cp.bdaddr, &ev->bdaddr);
4835 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4836 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4837 
4838 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4839 			     sizeof(cp), &cp);
4840 	}
4841 
4842 unlock:
4843 	hci_dev_unlock(hdev);
4844 }
4845 
4846 #if IS_ENABLED(CONFIG_BT_HS)
4847 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4848 {
4849 	struct hci_ev_channel_selected *ev = (void *)skb->data;
4850 	struct hci_conn *hcon;
4851 
4852 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4853 
4854 	skb_pull(skb, sizeof(*ev));
4855 
4856 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4857 	if (!hcon)
4858 		return;
4859 
4860 	amp_read_loc_assoc_final_data(hdev, hcon);
4861 }
4862 
4863 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4864 				      struct sk_buff *skb)
4865 {
4866 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4867 	struct hci_conn *hcon, *bredr_hcon;
4868 
4869 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4870 	       ev->status);
4871 
4872 	hci_dev_lock(hdev);
4873 
4874 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4875 	if (!hcon) {
4876 		hci_dev_unlock(hdev);
4877 		return;
4878 	}
4879 
4880 	if (ev->status) {
4881 		hci_conn_del(hcon);
4882 		hci_dev_unlock(hdev);
4883 		return;
4884 	}
4885 
4886 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4887 
4888 	hcon->state = BT_CONNECTED;
4889 	bacpy(&hcon->dst, &bredr_hcon->dst);
4890 
4891 	hci_conn_hold(hcon);
4892 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4893 	hci_conn_drop(hcon);
4894 
4895 	hci_debugfs_create_conn(hcon);
4896 	hci_conn_add_sysfs(hcon);
4897 
4898 	amp_physical_cfm(bredr_hcon, hcon);
4899 
4900 	hci_dev_unlock(hdev);
4901 }
4902 
4903 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4904 {
4905 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4906 	struct hci_conn *hcon;
4907 	struct hci_chan *hchan;
4908 	struct amp_mgr *mgr;
4909 
4910 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4911 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4912 	       ev->status);
4913 
4914 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4915 	if (!hcon)
4916 		return;
4917 
4918 	/* Create AMP hchan */
4919 	hchan = hci_chan_create(hcon);
4920 	if (!hchan)
4921 		return;
4922 
4923 	hchan->handle = le16_to_cpu(ev->handle);
4924 
4925 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4926 
4927 	mgr = hcon->amp_mgr;
4928 	if (mgr && mgr->bredr_chan) {
4929 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4930 
4931 		l2cap_chan_lock(bredr_chan);
4932 
4933 		bredr_chan->conn->mtu = hdev->block_mtu;
4934 		l2cap_logical_cfm(bredr_chan, hchan, 0);
4935 		hci_conn_hold(hcon);
4936 
4937 		l2cap_chan_unlock(bredr_chan);
4938 	}
4939 }
4940 
4941 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4942 					     struct sk_buff *skb)
4943 {
4944 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4945 	struct hci_chan *hchan;
4946 
4947 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4948 	       le16_to_cpu(ev->handle), ev->status);
4949 
4950 	if (ev->status)
4951 		return;
4952 
4953 	hci_dev_lock(hdev);
4954 
4955 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4956 	if (!hchan)
4957 		goto unlock;
4958 
4959 	amp_destroy_logical_link(hchan, ev->reason);
4960 
4961 unlock:
4962 	hci_dev_unlock(hdev);
4963 }
4964 
4965 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4966 					     struct sk_buff *skb)
4967 {
4968 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4969 	struct hci_conn *hcon;
4970 
4971 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4972 
4973 	if (ev->status)
4974 		return;
4975 
4976 	hci_dev_lock(hdev);
4977 
4978 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4979 	if (hcon) {
4980 		hcon->state = BT_CLOSED;
4981 		hci_conn_del(hcon);
4982 	}
4983 
4984 	hci_dev_unlock(hdev);
4985 }
4986 #endif
4987 
4988 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4989 			bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4990 			u16 interval, u16 latency, u16 supervision_timeout)
4991 {
4992 	struct hci_conn_params *params;
4993 	struct hci_conn *conn;
4994 	struct smp_irk *irk;
4995 	u8 addr_type;
4996 
4997 	hci_dev_lock(hdev);
4998 
4999 	/* All controllers implicitly stop advertising in the event of a
5000 	 * connection, so ensure that the state bit is cleared.
5001 	 */
5002 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5003 
5004 	conn = hci_lookup_le_connect(hdev);
5005 	if (!conn) {
5006 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5007 		if (!conn) {
5008 			bt_dev_err(hdev, "no memory for new connection");
5009 			goto unlock;
5010 		}
5011 
5012 		conn->dst_type = bdaddr_type;
5013 
5014 		/* If we didn't have a hci_conn object previously
5015 		 * but we're in master role this must be something
5016 		 * initiated using a white list. Since white list based
5017 		 * connections are not "first class citizens" we don't
5018 		 * have full tracking of them. Therefore, we go ahead
5019 		 * with a "best effort" approach of determining the
5020 		 * initiator address based on the HCI_PRIVACY flag.
5021 		 */
5022 		if (conn->out) {
5023 			conn->resp_addr_type = bdaddr_type;
5024 			bacpy(&conn->resp_addr, bdaddr);
5025 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5026 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5027 				bacpy(&conn->init_addr, &hdev->rpa);
5028 			} else {
5029 				hci_copy_identity_address(hdev,
5030 							  &conn->init_addr,
5031 							  &conn->init_addr_type);
5032 			}
5033 		}
5034 	} else {
5035 		cancel_delayed_work(&conn->le_conn_timeout);
5036 	}
5037 
5038 	if (!conn->out) {
5039 		/* Set the responder (our side) address type based on
5040 		 * the advertising address type.
5041 		 */
5042 		conn->resp_addr_type = hdev->adv_addr_type;
5043 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5044 			/* In case of ext adv, resp_addr will be updated in
5045 			 * Adv Terminated event.
5046 			 */
5047 			if (!ext_adv_capable(hdev))
5048 				bacpy(&conn->resp_addr, &hdev->random_addr);
5049 		} else {
5050 			bacpy(&conn->resp_addr, &hdev->bdaddr);
5051 		}
5052 
5053 		conn->init_addr_type = bdaddr_type;
5054 		bacpy(&conn->init_addr, bdaddr);
5055 
5056 		/* For incoming connections, set the default minimum
5057 		 * and maximum connection interval. They will be used
5058 		 * to check if the parameters are in range and if not
5059 		 * trigger the connection update procedure.
5060 		 */
5061 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
5062 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
5063 	}
5064 
5065 	/* Lookup the identity address from the stored connection
5066 	 * address and address type.
5067 	 *
5068 	 * When establishing connections to an identity address, the
5069 	 * connection procedure will store the resolvable random
5070 	 * address first. Now if it can be converted back into the
5071 	 * identity address, start using the identity address from
5072 	 * now on.
5073 	 */
5074 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5075 	if (irk) {
5076 		bacpy(&conn->dst, &irk->bdaddr);
5077 		conn->dst_type = irk->addr_type;
5078 	}
5079 
5080 	if (status) {
5081 		hci_le_conn_failed(conn, status);
5082 		goto unlock;
5083 	}
5084 
5085 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5086 		addr_type = BDADDR_LE_PUBLIC;
5087 	else
5088 		addr_type = BDADDR_LE_RANDOM;
5089 
5090 	/* Drop the connection if the device is blocked */
5091 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5092 		hci_conn_drop(conn);
5093 		goto unlock;
5094 	}
5095 
5096 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5097 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
5098 
5099 	conn->sec_level = BT_SECURITY_LOW;
5100 	conn->handle = handle;
5101 	conn->state = BT_CONFIG;
5102 
5103 	conn->le_conn_interval = interval;
5104 	conn->le_conn_latency = latency;
5105 	conn->le_supv_timeout = supervision_timeout;
5106 
5107 	hci_debugfs_create_conn(conn);
5108 	hci_conn_add_sysfs(conn);
5109 
5110 	/* The remote features procedure is defined for master
5111 	 * role only. So only in case of an initiated connection
5112 	 * request the remote features.
5113 	 *
5114 	 * If the local controller supports slave-initiated features
5115 	 * exchange, then requesting the remote features in slave
5116 	 * role is possible. Otherwise just transition into the
5117 	 * connected state without requesting the remote features.
5118 	 */
5119 	if (conn->out ||
5120 	    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5121 		struct hci_cp_le_read_remote_features cp;
5122 
5123 		cp.handle = __cpu_to_le16(conn->handle);
5124 
5125 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5126 			     sizeof(cp), &cp);
5127 
5128 		hci_conn_hold(conn);
5129 	} else {
5130 		conn->state = BT_CONNECTED;
5131 		hci_connect_cfm(conn, status);
5132 	}
5133 
5134 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5135 					   conn->dst_type);
5136 	if (params) {
5137 		list_del_init(&params->action);
5138 		if (params->conn) {
5139 			hci_conn_drop(params->conn);
5140 			hci_conn_put(params->conn);
5141 			params->conn = NULL;
5142 		}
5143 	}
5144 
5145 unlock:
5146 	hci_update_background_scan(hdev);
5147 	hci_dev_unlock(hdev);
5148 }
5149 
5150 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5151 {
5152 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5153 
5154 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5155 
5156 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5157 			     ev->role, le16_to_cpu(ev->handle),
5158 			     le16_to_cpu(ev->interval),
5159 			     le16_to_cpu(ev->latency),
5160 			     le16_to_cpu(ev->supervision_timeout));
5161 }
5162 
5163 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5164 					 struct sk_buff *skb)
5165 {
5166 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5167 
5168 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5169 
5170 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5171 			     ev->role, le16_to_cpu(ev->handle),
5172 			     le16_to_cpu(ev->interval),
5173 			     le16_to_cpu(ev->latency),
5174 			     le16_to_cpu(ev->supervision_timeout));
5175 }
5176 
5177 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5178 {
5179 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5180 	struct hci_conn *conn;
5181 
5182 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5183 
5184 	if (ev->status)
5185 		return;
5186 
5187 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5188 	if (conn) {
5189 		struct adv_info *adv_instance;
5190 
5191 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5192 			return;
5193 
5194 		if (!hdev->cur_adv_instance) {
5195 			bacpy(&conn->resp_addr, &hdev->random_addr);
5196 			return;
5197 		}
5198 
5199 		adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5200 		if (adv_instance)
5201 			bacpy(&conn->resp_addr, &adv_instance->random_addr);
5202 	}
5203 }
5204 
5205 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5206 					    struct sk_buff *skb)
5207 {
5208 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5209 	struct hci_conn *conn;
5210 
5211 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5212 
5213 	if (ev->status)
5214 		return;
5215 
5216 	hci_dev_lock(hdev);
5217 
5218 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5219 	if (conn) {
5220 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5221 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5222 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5223 	}
5224 
5225 	hci_dev_unlock(hdev);
5226 }
5227 
5228 /* This function requires the caller holds hdev->lock */
5229 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5230 					      bdaddr_t *addr,
5231 					      u8 addr_type, u8 adv_type,
5232 					      bdaddr_t *direct_rpa)
5233 {
5234 	struct hci_conn *conn;
5235 	struct hci_conn_params *params;
5236 
5237 	/* If the event is not connectable don't proceed further */
5238 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5239 		return NULL;
5240 
5241 	/* Ignore if the device is blocked */
5242 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5243 		return NULL;
5244 
5245 	/* Most controller will fail if we try to create new connections
5246 	 * while we have an existing one in slave role.
5247 	 */
5248 	if (hdev->conn_hash.le_num_slave > 0)
5249 		return NULL;
5250 
5251 	/* If we're not connectable only connect devices that we have in
5252 	 * our pend_le_conns list.
5253 	 */
5254 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5255 					   addr_type);
5256 	if (!params)
5257 		return NULL;
5258 
5259 	if (!params->explicit_connect) {
5260 		switch (params->auto_connect) {
5261 		case HCI_AUTO_CONN_DIRECT:
5262 			/* Only devices advertising with ADV_DIRECT_IND are
5263 			 * triggering a connection attempt. This is allowing
5264 			 * incoming connections from slave devices.
5265 			 */
5266 			if (adv_type != LE_ADV_DIRECT_IND)
5267 				return NULL;
5268 			break;
5269 		case HCI_AUTO_CONN_ALWAYS:
5270 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5271 			 * are triggering a connection attempt. This means
5272 			 * that incoming connectioms from slave device are
5273 			 * accepted and also outgoing connections to slave
5274 			 * devices are established when found.
5275 			 */
5276 			break;
5277 		default:
5278 			return NULL;
5279 		}
5280 	}
5281 
5282 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5283 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5284 			      direct_rpa);
5285 	if (!IS_ERR(conn)) {
5286 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5287 		 * by higher layer that tried to connect, if no then
5288 		 * store the pointer since we don't really have any
5289 		 * other owner of the object besides the params that
5290 		 * triggered it. This way we can abort the connection if
5291 		 * the parameters get removed and keep the reference
5292 		 * count consistent once the connection is established.
5293 		 */
5294 
5295 		if (!params->explicit_connect)
5296 			params->conn = hci_conn_get(conn);
5297 
5298 		return conn;
5299 	}
5300 
5301 	switch (PTR_ERR(conn)) {
5302 	case -EBUSY:
5303 		/* If hci_connect() returns -EBUSY it means there is already
5304 		 * an LE connection attempt going on. Since controllers don't
5305 		 * support more than one connection attempt at the time, we
5306 		 * don't consider this an error case.
5307 		 */
5308 		break;
5309 	default:
5310 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5311 		return NULL;
5312 	}
5313 
5314 	return NULL;
5315 }
5316 
5317 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5318 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5319 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5320 {
5321 	struct discovery_state *d = &hdev->discovery;
5322 	struct smp_irk *irk;
5323 	struct hci_conn *conn;
5324 	bool match;
5325 	u32 flags;
5326 	u8 *ptr, real_len;
5327 
5328 	switch (type) {
5329 	case LE_ADV_IND:
5330 	case LE_ADV_DIRECT_IND:
5331 	case LE_ADV_SCAN_IND:
5332 	case LE_ADV_NONCONN_IND:
5333 	case LE_ADV_SCAN_RSP:
5334 		break;
5335 	default:
5336 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5337 				       "type: 0x%02x", type);
5338 		return;
5339 	}
5340 
5341 	/* Find the end of the data in case the report contains padded zero
5342 	 * bytes at the end causing an invalid length value.
5343 	 *
5344 	 * When data is NULL, len is 0 so there is no need for extra ptr
5345 	 * check as 'ptr < data + 0' is already false in such case.
5346 	 */
5347 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5348 		if (ptr + 1 + *ptr > data + len)
5349 			break;
5350 	}
5351 
5352 	real_len = ptr - data;
5353 
5354 	/* Adjust for actual length */
5355 	if (len != real_len) {
5356 		bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5357 		len = real_len;
5358 	}
5359 
5360 	/* If the direct address is present, then this report is from
5361 	 * a LE Direct Advertising Report event. In that case it is
5362 	 * important to see if the address is matching the local
5363 	 * controller address.
5364 	 */
5365 	if (direct_addr) {
5366 		/* Only resolvable random addresses are valid for these
5367 		 * kind of reports and others can be ignored.
5368 		 */
5369 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5370 			return;
5371 
5372 		/* If the controller is not using resolvable random
5373 		 * addresses, then this report can be ignored.
5374 		 */
5375 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5376 			return;
5377 
5378 		/* If the local IRK of the controller does not match
5379 		 * with the resolvable random address provided, then
5380 		 * this report can be ignored.
5381 		 */
5382 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5383 			return;
5384 	}
5385 
5386 	/* Check if we need to convert to identity address */
5387 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5388 	if (irk) {
5389 		bdaddr = &irk->bdaddr;
5390 		bdaddr_type = irk->addr_type;
5391 	}
5392 
5393 	/* Check if we have been requested to connect to this device.
5394 	 *
5395 	 * direct_addr is set only for directed advertising reports (it is NULL
5396 	 * for advertising reports) and is already verified to be RPA above.
5397 	 */
5398 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5399 								direct_addr);
5400 	if (conn && type == LE_ADV_IND) {
5401 		/* Store report for later inclusion by
5402 		 * mgmt_device_connected
5403 		 */
5404 		memcpy(conn->le_adv_data, data, len);
5405 		conn->le_adv_data_len = len;
5406 	}
5407 
5408 	/* Passive scanning shouldn't trigger any device found events,
5409 	 * except for devices marked as CONN_REPORT for which we do send
5410 	 * device found events.
5411 	 */
5412 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5413 		if (type == LE_ADV_DIRECT_IND)
5414 			return;
5415 
5416 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5417 					       bdaddr, bdaddr_type))
5418 			return;
5419 
5420 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5421 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5422 		else
5423 			flags = 0;
5424 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5425 				  rssi, flags, data, len, NULL, 0);
5426 		return;
5427 	}
5428 
5429 	/* When receiving non-connectable or scannable undirected
5430 	 * advertising reports, this means that the remote device is
5431 	 * not connectable and then clearly indicate this in the
5432 	 * device found event.
5433 	 *
5434 	 * When receiving a scan response, then there is no way to
5435 	 * know if the remote device is connectable or not. However
5436 	 * since scan responses are merged with a previously seen
5437 	 * advertising report, the flags field from that report
5438 	 * will be used.
5439 	 *
5440 	 * In the really unlikely case that a controller get confused
5441 	 * and just sends a scan response event, then it is marked as
5442 	 * not connectable as well.
5443 	 */
5444 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5445 	    type == LE_ADV_SCAN_RSP)
5446 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5447 	else
5448 		flags = 0;
5449 
5450 	/* If there's nothing pending either store the data from this
5451 	 * event or send an immediate device found event if the data
5452 	 * should not be stored for later.
5453 	 */
5454 	if (!has_pending_adv_report(hdev)) {
5455 		/* If the report will trigger a SCAN_REQ store it for
5456 		 * later merging.
5457 		 */
5458 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5459 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5460 						 rssi, flags, data, len);
5461 			return;
5462 		}
5463 
5464 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5465 				  rssi, flags, data, len, NULL, 0);
5466 		return;
5467 	}
5468 
5469 	/* Check if the pending report is for the same device as the new one */
5470 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5471 		 bdaddr_type == d->last_adv_addr_type);
5472 
5473 	/* If the pending data doesn't match this report or this isn't a
5474 	 * scan response (e.g. we got a duplicate ADV_IND) then force
5475 	 * sending of the pending data.
5476 	 */
5477 	if (type != LE_ADV_SCAN_RSP || !match) {
5478 		/* Send out whatever is in the cache, but skip duplicates */
5479 		if (!match)
5480 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5481 					  d->last_adv_addr_type, NULL,
5482 					  d->last_adv_rssi, d->last_adv_flags,
5483 					  d->last_adv_data,
5484 					  d->last_adv_data_len, NULL, 0);
5485 
5486 		/* If the new report will trigger a SCAN_REQ store it for
5487 		 * later merging.
5488 		 */
5489 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5490 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5491 						 rssi, flags, data, len);
5492 			return;
5493 		}
5494 
5495 		/* The advertising reports cannot be merged, so clear
5496 		 * the pending report and send out a device found event.
5497 		 */
5498 		clear_pending_adv_report(hdev);
5499 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5500 				  rssi, flags, data, len, NULL, 0);
5501 		return;
5502 	}
5503 
5504 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5505 	 * the new event is a SCAN_RSP. We can therefore proceed with
5506 	 * sending a merged device found event.
5507 	 */
5508 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5509 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5510 			  d->last_adv_data, d->last_adv_data_len, data, len);
5511 	clear_pending_adv_report(hdev);
5512 }
5513 
5514 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5515 {
5516 	u8 num_reports = skb->data[0];
5517 	void *ptr = &skb->data[1];
5518 
5519 	hci_dev_lock(hdev);
5520 
5521 	while (num_reports--) {
5522 		struct hci_ev_le_advertising_info *ev = ptr;
5523 		s8 rssi;
5524 
5525 		if (ev->length <= HCI_MAX_AD_LENGTH) {
5526 			rssi = ev->data[ev->length];
5527 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5528 					   ev->bdaddr_type, NULL, 0, rssi,
5529 					   ev->data, ev->length);
5530 		} else {
5531 			bt_dev_err(hdev, "Dropping invalid advertising data");
5532 		}
5533 
5534 		ptr += sizeof(*ev) + ev->length + 1;
5535 	}
5536 
5537 	hci_dev_unlock(hdev);
5538 }
5539 
5540 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5541 {
5542 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5543 		switch (evt_type) {
5544 		case LE_LEGACY_ADV_IND:
5545 			return LE_ADV_IND;
5546 		case LE_LEGACY_ADV_DIRECT_IND:
5547 			return LE_ADV_DIRECT_IND;
5548 		case LE_LEGACY_ADV_SCAN_IND:
5549 			return LE_ADV_SCAN_IND;
5550 		case LE_LEGACY_NONCONN_IND:
5551 			return LE_ADV_NONCONN_IND;
5552 		case LE_LEGACY_SCAN_RSP_ADV:
5553 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5554 			return LE_ADV_SCAN_RSP;
5555 		}
5556 
5557 		goto invalid;
5558 	}
5559 
5560 	if (evt_type & LE_EXT_ADV_CONN_IND) {
5561 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
5562 			return LE_ADV_DIRECT_IND;
5563 
5564 		return LE_ADV_IND;
5565 	}
5566 
5567 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
5568 		return LE_ADV_SCAN_RSP;
5569 
5570 	if (evt_type & LE_EXT_ADV_SCAN_IND)
5571 		return LE_ADV_SCAN_IND;
5572 
5573 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5574 	    evt_type & LE_EXT_ADV_DIRECT_IND)
5575 		return LE_ADV_NONCONN_IND;
5576 
5577 invalid:
5578 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5579 			       evt_type);
5580 
5581 	return LE_ADV_INVALID;
5582 }
5583 
5584 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5585 {
5586 	u8 num_reports = skb->data[0];
5587 	void *ptr = &skb->data[1];
5588 
5589 	hci_dev_lock(hdev);
5590 
5591 	while (num_reports--) {
5592 		struct hci_ev_le_ext_adv_report *ev = ptr;
5593 		u8 legacy_evt_type;
5594 		u16 evt_type;
5595 
5596 		evt_type = __le16_to_cpu(ev->evt_type);
5597 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5598 		if (legacy_evt_type != LE_ADV_INVALID) {
5599 			process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5600 					   ev->bdaddr_type, NULL, 0, ev->rssi,
5601 					   ev->data, ev->length);
5602 		}
5603 
5604 		ptr += sizeof(*ev) + ev->length;
5605 	}
5606 
5607 	hci_dev_unlock(hdev);
5608 }
5609 
5610 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5611 					    struct sk_buff *skb)
5612 {
5613 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5614 	struct hci_conn *conn;
5615 
5616 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5617 
5618 	hci_dev_lock(hdev);
5619 
5620 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5621 	if (conn) {
5622 		if (!ev->status)
5623 			memcpy(conn->features[0], ev->features, 8);
5624 
5625 		if (conn->state == BT_CONFIG) {
5626 			__u8 status;
5627 
5628 			/* If the local controller supports slave-initiated
5629 			 * features exchange, but the remote controller does
5630 			 * not, then it is possible that the error code 0x1a
5631 			 * for unsupported remote feature gets returned.
5632 			 *
5633 			 * In this specific case, allow the connection to
5634 			 * transition into connected state and mark it as
5635 			 * successful.
5636 			 */
5637 			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5638 			    !conn->out && ev->status == 0x1a)
5639 				status = 0x00;
5640 			else
5641 				status = ev->status;
5642 
5643 			conn->state = BT_CONNECTED;
5644 			hci_connect_cfm(conn, status);
5645 			hci_conn_drop(conn);
5646 		}
5647 	}
5648 
5649 	hci_dev_unlock(hdev);
5650 }
5651 
5652 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5653 {
5654 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5655 	struct hci_cp_le_ltk_reply cp;
5656 	struct hci_cp_le_ltk_neg_reply neg;
5657 	struct hci_conn *conn;
5658 	struct smp_ltk *ltk;
5659 
5660 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5661 
5662 	hci_dev_lock(hdev);
5663 
5664 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5665 	if (conn == NULL)
5666 		goto not_found;
5667 
5668 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5669 	if (!ltk)
5670 		goto not_found;
5671 
5672 	if (smp_ltk_is_sc(ltk)) {
5673 		/* With SC both EDiv and Rand are set to zero */
5674 		if (ev->ediv || ev->rand)
5675 			goto not_found;
5676 	} else {
5677 		/* For non-SC keys check that EDiv and Rand match */
5678 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5679 			goto not_found;
5680 	}
5681 
5682 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5683 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5684 	cp.handle = cpu_to_le16(conn->handle);
5685 
5686 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5687 
5688 	conn->enc_key_size = ltk->enc_size;
5689 
5690 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5691 
5692 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5693 	 * temporary key used to encrypt a connection following
5694 	 * pairing. It is used during the Encrypted Session Setup to
5695 	 * distribute the keys. Later, security can be re-established
5696 	 * using a distributed LTK.
5697 	 */
5698 	if (ltk->type == SMP_STK) {
5699 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5700 		list_del_rcu(&ltk->list);
5701 		kfree_rcu(ltk, rcu);
5702 	} else {
5703 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5704 	}
5705 
5706 	hci_dev_unlock(hdev);
5707 
5708 	return;
5709 
5710 not_found:
5711 	neg.handle = ev->handle;
5712 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5713 	hci_dev_unlock(hdev);
5714 }
5715 
5716 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5717 				      u8 reason)
5718 {
5719 	struct hci_cp_le_conn_param_req_neg_reply cp;
5720 
5721 	cp.handle = cpu_to_le16(handle);
5722 	cp.reason = reason;
5723 
5724 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5725 		     &cp);
5726 }
5727 
5728 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5729 					     struct sk_buff *skb)
5730 {
5731 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5732 	struct hci_cp_le_conn_param_req_reply cp;
5733 	struct hci_conn *hcon;
5734 	u16 handle, min, max, latency, timeout;
5735 
5736 	handle = le16_to_cpu(ev->handle);
5737 	min = le16_to_cpu(ev->interval_min);
5738 	max = le16_to_cpu(ev->interval_max);
5739 	latency = le16_to_cpu(ev->latency);
5740 	timeout = le16_to_cpu(ev->timeout);
5741 
5742 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5743 	if (!hcon || hcon->state != BT_CONNECTED)
5744 		return send_conn_param_neg_reply(hdev, handle,
5745 						 HCI_ERROR_UNKNOWN_CONN_ID);
5746 
5747 	if (hci_check_conn_params(min, max, latency, timeout))
5748 		return send_conn_param_neg_reply(hdev, handle,
5749 						 HCI_ERROR_INVALID_LL_PARAMS);
5750 
5751 	if (hcon->role == HCI_ROLE_MASTER) {
5752 		struct hci_conn_params *params;
5753 		u8 store_hint;
5754 
5755 		hci_dev_lock(hdev);
5756 
5757 		params = hci_conn_params_lookup(hdev, &hcon->dst,
5758 						hcon->dst_type);
5759 		if (params) {
5760 			params->conn_min_interval = min;
5761 			params->conn_max_interval = max;
5762 			params->conn_latency = latency;
5763 			params->supervision_timeout = timeout;
5764 			store_hint = 0x01;
5765 		} else{
5766 			store_hint = 0x00;
5767 		}
5768 
5769 		hci_dev_unlock(hdev);
5770 
5771 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5772 				    store_hint, min, max, latency, timeout);
5773 	}
5774 
5775 	cp.handle = ev->handle;
5776 	cp.interval_min = ev->interval_min;
5777 	cp.interval_max = ev->interval_max;
5778 	cp.latency = ev->latency;
5779 	cp.timeout = ev->timeout;
5780 	cp.min_ce_len = 0;
5781 	cp.max_ce_len = 0;
5782 
5783 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5784 }
5785 
5786 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5787 					 struct sk_buff *skb)
5788 {
5789 	u8 num_reports = skb->data[0];
5790 	void *ptr = &skb->data[1];
5791 
5792 	hci_dev_lock(hdev);
5793 
5794 	while (num_reports--) {
5795 		struct hci_ev_le_direct_adv_info *ev = ptr;
5796 
5797 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5798 				   ev->bdaddr_type, &ev->direct_addr,
5799 				   ev->direct_addr_type, ev->rssi, NULL, 0);
5800 
5801 		ptr += sizeof(*ev);
5802 	}
5803 
5804 	hci_dev_unlock(hdev);
5805 }
5806 
5807 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
5808 {
5809 	struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
5810 	struct hci_conn *conn;
5811 
5812 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5813 
5814 	if (!ev->status)
5815 		return;
5816 
5817 	hci_dev_lock(hdev);
5818 
5819 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5820 	if (!conn)
5821 		goto unlock;
5822 
5823 	conn->le_tx_phy = ev->tx_phy;
5824 	conn->le_rx_phy = ev->rx_phy;
5825 
5826 unlock:
5827 	hci_dev_unlock(hdev);
5828 }
5829 
5830 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5831 {
5832 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5833 
5834 	skb_pull(skb, sizeof(*le_ev));
5835 
5836 	switch (le_ev->subevent) {
5837 	case HCI_EV_LE_CONN_COMPLETE:
5838 		hci_le_conn_complete_evt(hdev, skb);
5839 		break;
5840 
5841 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5842 		hci_le_conn_update_complete_evt(hdev, skb);
5843 		break;
5844 
5845 	case HCI_EV_LE_ADVERTISING_REPORT:
5846 		hci_le_adv_report_evt(hdev, skb);
5847 		break;
5848 
5849 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5850 		hci_le_remote_feat_complete_evt(hdev, skb);
5851 		break;
5852 
5853 	case HCI_EV_LE_LTK_REQ:
5854 		hci_le_ltk_request_evt(hdev, skb);
5855 		break;
5856 
5857 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5858 		hci_le_remote_conn_param_req_evt(hdev, skb);
5859 		break;
5860 
5861 	case HCI_EV_LE_DIRECT_ADV_REPORT:
5862 		hci_le_direct_adv_report_evt(hdev, skb);
5863 		break;
5864 
5865 	case HCI_EV_LE_PHY_UPDATE_COMPLETE:
5866 		hci_le_phy_update_evt(hdev, skb);
5867 		break;
5868 
5869 	case HCI_EV_LE_EXT_ADV_REPORT:
5870 		hci_le_ext_adv_report_evt(hdev, skb);
5871 		break;
5872 
5873 	case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5874 		hci_le_enh_conn_complete_evt(hdev, skb);
5875 		break;
5876 
5877 	case HCI_EV_LE_EXT_ADV_SET_TERM:
5878 		hci_le_ext_adv_term_evt(hdev, skb);
5879 		break;
5880 
5881 	default:
5882 		break;
5883 	}
5884 }
5885 
5886 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5887 				 u8 event, struct sk_buff *skb)
5888 {
5889 	struct hci_ev_cmd_complete *ev;
5890 	struct hci_event_hdr *hdr;
5891 
5892 	if (!skb)
5893 		return false;
5894 
5895 	if (skb->len < sizeof(*hdr)) {
5896 		bt_dev_err(hdev, "too short HCI event");
5897 		return false;
5898 	}
5899 
5900 	hdr = (void *) skb->data;
5901 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5902 
5903 	if (event) {
5904 		if (hdr->evt != event)
5905 			return false;
5906 		return true;
5907 	}
5908 
5909 	/* Check if request ended in Command Status - no way to retreive
5910 	 * any extra parameters in this case.
5911 	 */
5912 	if (hdr->evt == HCI_EV_CMD_STATUS)
5913 		return false;
5914 
5915 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5916 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5917 			   hdr->evt);
5918 		return false;
5919 	}
5920 
5921 	if (skb->len < sizeof(*ev)) {
5922 		bt_dev_err(hdev, "too short cmd_complete event");
5923 		return false;
5924 	}
5925 
5926 	ev = (void *) skb->data;
5927 	skb_pull(skb, sizeof(*ev));
5928 
5929 	if (opcode != __le16_to_cpu(ev->opcode)) {
5930 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5931 		       __le16_to_cpu(ev->opcode));
5932 		return false;
5933 	}
5934 
5935 	return true;
5936 }
5937 
5938 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5939 {
5940 	struct hci_event_hdr *hdr = (void *) skb->data;
5941 	hci_req_complete_t req_complete = NULL;
5942 	hci_req_complete_skb_t req_complete_skb = NULL;
5943 	struct sk_buff *orig_skb = NULL;
5944 	u8 status = 0, event = hdr->evt, req_evt = 0;
5945 	u16 opcode = HCI_OP_NOP;
5946 
5947 	if (!event) {
5948 		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5949 		goto done;
5950 	}
5951 
5952 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5953 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5954 		opcode = __le16_to_cpu(cmd_hdr->opcode);
5955 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5956 				     &req_complete_skb);
5957 		req_evt = event;
5958 	}
5959 
5960 	/* If it looks like we might end up having to call
5961 	 * req_complete_skb, store a pristine copy of the skb since the
5962 	 * various handlers may modify the original one through
5963 	 * skb_pull() calls, etc.
5964 	 */
5965 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5966 	    event == HCI_EV_CMD_COMPLETE)
5967 		orig_skb = skb_clone(skb, GFP_KERNEL);
5968 
5969 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5970 
5971 	switch (event) {
5972 	case HCI_EV_INQUIRY_COMPLETE:
5973 		hci_inquiry_complete_evt(hdev, skb);
5974 		break;
5975 
5976 	case HCI_EV_INQUIRY_RESULT:
5977 		hci_inquiry_result_evt(hdev, skb);
5978 		break;
5979 
5980 	case HCI_EV_CONN_COMPLETE:
5981 		hci_conn_complete_evt(hdev, skb);
5982 		break;
5983 
5984 	case HCI_EV_CONN_REQUEST:
5985 		hci_conn_request_evt(hdev, skb);
5986 		break;
5987 
5988 	case HCI_EV_DISCONN_COMPLETE:
5989 		hci_disconn_complete_evt(hdev, skb);
5990 		break;
5991 
5992 	case HCI_EV_AUTH_COMPLETE:
5993 		hci_auth_complete_evt(hdev, skb);
5994 		break;
5995 
5996 	case HCI_EV_REMOTE_NAME:
5997 		hci_remote_name_evt(hdev, skb);
5998 		break;
5999 
6000 	case HCI_EV_ENCRYPT_CHANGE:
6001 		hci_encrypt_change_evt(hdev, skb);
6002 		break;
6003 
6004 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6005 		hci_change_link_key_complete_evt(hdev, skb);
6006 		break;
6007 
6008 	case HCI_EV_REMOTE_FEATURES:
6009 		hci_remote_features_evt(hdev, skb);
6010 		break;
6011 
6012 	case HCI_EV_CMD_COMPLETE:
6013 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6014 				     &req_complete, &req_complete_skb);
6015 		break;
6016 
6017 	case HCI_EV_CMD_STATUS:
6018 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6019 				   &req_complete_skb);
6020 		break;
6021 
6022 	case HCI_EV_HARDWARE_ERROR:
6023 		hci_hardware_error_evt(hdev, skb);
6024 		break;
6025 
6026 	case HCI_EV_ROLE_CHANGE:
6027 		hci_role_change_evt(hdev, skb);
6028 		break;
6029 
6030 	case HCI_EV_NUM_COMP_PKTS:
6031 		hci_num_comp_pkts_evt(hdev, skb);
6032 		break;
6033 
6034 	case HCI_EV_MODE_CHANGE:
6035 		hci_mode_change_evt(hdev, skb);
6036 		break;
6037 
6038 	case HCI_EV_PIN_CODE_REQ:
6039 		hci_pin_code_request_evt(hdev, skb);
6040 		break;
6041 
6042 	case HCI_EV_LINK_KEY_REQ:
6043 		hci_link_key_request_evt(hdev, skb);
6044 		break;
6045 
6046 	case HCI_EV_LINK_KEY_NOTIFY:
6047 		hci_link_key_notify_evt(hdev, skb);
6048 		break;
6049 
6050 	case HCI_EV_CLOCK_OFFSET:
6051 		hci_clock_offset_evt(hdev, skb);
6052 		break;
6053 
6054 	case HCI_EV_PKT_TYPE_CHANGE:
6055 		hci_pkt_type_change_evt(hdev, skb);
6056 		break;
6057 
6058 	case HCI_EV_PSCAN_REP_MODE:
6059 		hci_pscan_rep_mode_evt(hdev, skb);
6060 		break;
6061 
6062 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6063 		hci_inquiry_result_with_rssi_evt(hdev, skb);
6064 		break;
6065 
6066 	case HCI_EV_REMOTE_EXT_FEATURES:
6067 		hci_remote_ext_features_evt(hdev, skb);
6068 		break;
6069 
6070 	case HCI_EV_SYNC_CONN_COMPLETE:
6071 		hci_sync_conn_complete_evt(hdev, skb);
6072 		break;
6073 
6074 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
6075 		hci_extended_inquiry_result_evt(hdev, skb);
6076 		break;
6077 
6078 	case HCI_EV_KEY_REFRESH_COMPLETE:
6079 		hci_key_refresh_complete_evt(hdev, skb);
6080 		break;
6081 
6082 	case HCI_EV_IO_CAPA_REQUEST:
6083 		hci_io_capa_request_evt(hdev, skb);
6084 		break;
6085 
6086 	case HCI_EV_IO_CAPA_REPLY:
6087 		hci_io_capa_reply_evt(hdev, skb);
6088 		break;
6089 
6090 	case HCI_EV_USER_CONFIRM_REQUEST:
6091 		hci_user_confirm_request_evt(hdev, skb);
6092 		break;
6093 
6094 	case HCI_EV_USER_PASSKEY_REQUEST:
6095 		hci_user_passkey_request_evt(hdev, skb);
6096 		break;
6097 
6098 	case HCI_EV_USER_PASSKEY_NOTIFY:
6099 		hci_user_passkey_notify_evt(hdev, skb);
6100 		break;
6101 
6102 	case HCI_EV_KEYPRESS_NOTIFY:
6103 		hci_keypress_notify_evt(hdev, skb);
6104 		break;
6105 
6106 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
6107 		hci_simple_pair_complete_evt(hdev, skb);
6108 		break;
6109 
6110 	case HCI_EV_REMOTE_HOST_FEATURES:
6111 		hci_remote_host_features_evt(hdev, skb);
6112 		break;
6113 
6114 	case HCI_EV_LE_META:
6115 		hci_le_meta_evt(hdev, skb);
6116 		break;
6117 
6118 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6119 		hci_remote_oob_data_request_evt(hdev, skb);
6120 		break;
6121 
6122 #if IS_ENABLED(CONFIG_BT_HS)
6123 	case HCI_EV_CHANNEL_SELECTED:
6124 		hci_chan_selected_evt(hdev, skb);
6125 		break;
6126 
6127 	case HCI_EV_PHY_LINK_COMPLETE:
6128 		hci_phy_link_complete_evt(hdev, skb);
6129 		break;
6130 
6131 	case HCI_EV_LOGICAL_LINK_COMPLETE:
6132 		hci_loglink_complete_evt(hdev, skb);
6133 		break;
6134 
6135 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6136 		hci_disconn_loglink_complete_evt(hdev, skb);
6137 		break;
6138 
6139 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6140 		hci_disconn_phylink_complete_evt(hdev, skb);
6141 		break;
6142 #endif
6143 
6144 	case HCI_EV_NUM_COMP_BLOCKS:
6145 		hci_num_comp_blocks_evt(hdev, skb);
6146 		break;
6147 
6148 	default:
6149 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
6150 		break;
6151 	}
6152 
6153 	if (req_complete) {
6154 		req_complete(hdev, status, opcode);
6155 	} else if (req_complete_skb) {
6156 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6157 			kfree_skb(orig_skb);
6158 			orig_skb = NULL;
6159 		}
6160 		req_complete_skb(hdev, status, opcode, orig_skb);
6161 	}
6162 
6163 done:
6164 	kfree_skb(orig_skb);
6165 	kfree_skb(skb);
6166 	hdev->stat.evt_rx++;
6167 }
6168