xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 1f012283)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39 #include "eir.h"
40 
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 
46 /* Handle HCI Event packets */
47 
48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
49 				  u8 *new_status)
50 {
51 	__u8 status = *((__u8 *) skb->data);
52 
53 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 
55 	/* It is possible that we receive Inquiry Complete event right
56 	 * before we receive Inquiry Cancel Command Complete event, in
57 	 * which case the latter event should have status of Command
58 	 * Disallowed (0x0c). This should not be treated as error, since
59 	 * we actually achieve what Inquiry Cancel wants to achieve,
60 	 * which is to end the last Inquiry session.
61 	 */
62 	if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
63 		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
64 		status = 0x00;
65 	}
66 
67 	*new_status = status;
68 
69 	if (status)
70 		return;
71 
72 	clear_bit(HCI_INQUIRY, &hdev->flags);
73 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
74 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
75 
76 	hci_dev_lock(hdev);
77 	/* Set discovery state to stopped if we're not doing LE active
78 	 * scanning.
79 	 */
80 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
81 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
82 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
83 	hci_dev_unlock(hdev);
84 
85 	hci_conn_check_pending(hdev);
86 }
87 
88 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 	__u8 status = *((__u8 *) skb->data);
91 
92 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
93 
94 	if (status)
95 		return;
96 
97 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
98 }
99 
100 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 {
102 	__u8 status = *((__u8 *) skb->data);
103 
104 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
105 
106 	if (status)
107 		return;
108 
109 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 
111 	hci_conn_check_pending(hdev);
112 }
113 
114 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
115 					  struct sk_buff *skb)
116 {
117 	BT_DBG("%s", hdev->name);
118 }
119 
120 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 {
122 	struct hci_rp_role_discovery *rp = (void *) skb->data;
123 	struct hci_conn *conn;
124 
125 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126 
127 	if (rp->status)
128 		return;
129 
130 	hci_dev_lock(hdev);
131 
132 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 	if (conn)
134 		conn->role = rp->role;
135 
136 	hci_dev_unlock(hdev);
137 }
138 
139 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 {
141 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
142 	struct hci_conn *conn;
143 
144 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
145 
146 	if (rp->status)
147 		return;
148 
149 	hci_dev_lock(hdev);
150 
151 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 	if (conn)
153 		conn->link_policy = __le16_to_cpu(rp->policy);
154 
155 	hci_dev_unlock(hdev);
156 }
157 
158 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 {
160 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
161 	struct hci_conn *conn;
162 	void *sent;
163 
164 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 
166 	if (rp->status)
167 		return;
168 
169 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
170 	if (!sent)
171 		return;
172 
173 	hci_dev_lock(hdev);
174 
175 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
176 	if (conn)
177 		conn->link_policy = get_unaligned_le16(sent + 2);
178 
179 	hci_dev_unlock(hdev);
180 }
181 
182 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
183 					struct sk_buff *skb)
184 {
185 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
186 
187 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
188 
189 	if (rp->status)
190 		return;
191 
192 	hdev->link_policy = __le16_to_cpu(rp->policy);
193 }
194 
195 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
196 					 struct sk_buff *skb)
197 {
198 	__u8 status = *((__u8 *) skb->data);
199 	void *sent;
200 
201 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
202 
203 	if (status)
204 		return;
205 
206 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
207 	if (!sent)
208 		return;
209 
210 	hdev->link_policy = get_unaligned_le16(sent);
211 }
212 
213 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
214 {
215 	__u8 status = *((__u8 *) skb->data);
216 
217 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 
219 	clear_bit(HCI_RESET, &hdev->flags);
220 
221 	if (status)
222 		return;
223 
224 	/* Reset all non-persistent flags */
225 	hci_dev_clear_volatile_flags(hdev);
226 
227 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
228 
229 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
230 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
231 
232 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
233 	hdev->adv_data_len = 0;
234 
235 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
236 	hdev->scan_rsp_data_len = 0;
237 
238 	hdev->le_scan_type = LE_SCAN_PASSIVE;
239 
240 	hdev->ssp_debug_mode = 0;
241 
242 	hci_bdaddr_list_clear(&hdev->le_accept_list);
243 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
244 }
245 
246 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
247 					struct sk_buff *skb)
248 {
249 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
250 	struct hci_cp_read_stored_link_key *sent;
251 
252 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
253 
254 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
255 	if (!sent)
256 		return;
257 
258 	if (!rp->status && sent->read_all == 0x01) {
259 		hdev->stored_max_keys = rp->max_keys;
260 		hdev->stored_num_keys = rp->num_keys;
261 	}
262 }
263 
264 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
265 					  struct sk_buff *skb)
266 {
267 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
268 
269 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
270 
271 	if (rp->status)
272 		return;
273 
274 	if (rp->num_keys <= hdev->stored_num_keys)
275 		hdev->stored_num_keys -= rp->num_keys;
276 	else
277 		hdev->stored_num_keys = 0;
278 }
279 
280 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
281 {
282 	__u8 status = *((__u8 *) skb->data);
283 	void *sent;
284 
285 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
286 
287 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
288 	if (!sent)
289 		return;
290 
291 	hci_dev_lock(hdev);
292 
293 	if (hci_dev_test_flag(hdev, HCI_MGMT))
294 		mgmt_set_local_name_complete(hdev, sent, status);
295 	else if (!status)
296 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
297 
298 	hci_dev_unlock(hdev);
299 }
300 
301 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
302 {
303 	struct hci_rp_read_local_name *rp = (void *) skb->data;
304 
305 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
306 
307 	if (rp->status)
308 		return;
309 
310 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
311 	    hci_dev_test_flag(hdev, HCI_CONFIG))
312 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
313 }
314 
315 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
316 {
317 	__u8 status = *((__u8 *) skb->data);
318 	void *sent;
319 
320 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
321 
322 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
323 	if (!sent)
324 		return;
325 
326 	hci_dev_lock(hdev);
327 
328 	if (!status) {
329 		__u8 param = *((__u8 *) sent);
330 
331 		if (param == AUTH_ENABLED)
332 			set_bit(HCI_AUTH, &hdev->flags);
333 		else
334 			clear_bit(HCI_AUTH, &hdev->flags);
335 	}
336 
337 	if (hci_dev_test_flag(hdev, HCI_MGMT))
338 		mgmt_auth_enable_complete(hdev, status);
339 
340 	hci_dev_unlock(hdev);
341 }
342 
343 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
344 {
345 	__u8 status = *((__u8 *) skb->data);
346 	__u8 param;
347 	void *sent;
348 
349 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
350 
351 	if (status)
352 		return;
353 
354 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
355 	if (!sent)
356 		return;
357 
358 	param = *((__u8 *) sent);
359 
360 	if (param)
361 		set_bit(HCI_ENCRYPT, &hdev->flags);
362 	else
363 		clear_bit(HCI_ENCRYPT, &hdev->flags);
364 }
365 
366 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
367 {
368 	__u8 status = *((__u8 *) skb->data);
369 	__u8 param;
370 	void *sent;
371 
372 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
373 
374 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
375 	if (!sent)
376 		return;
377 
378 	param = *((__u8 *) sent);
379 
380 	hci_dev_lock(hdev);
381 
382 	if (status) {
383 		hdev->discov_timeout = 0;
384 		goto done;
385 	}
386 
387 	if (param & SCAN_INQUIRY)
388 		set_bit(HCI_ISCAN, &hdev->flags);
389 	else
390 		clear_bit(HCI_ISCAN, &hdev->flags);
391 
392 	if (param & SCAN_PAGE)
393 		set_bit(HCI_PSCAN, &hdev->flags);
394 	else
395 		clear_bit(HCI_PSCAN, &hdev->flags);
396 
397 done:
398 	hci_dev_unlock(hdev);
399 }
400 
401 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
402 {
403 	__u8 status = *((__u8 *)skb->data);
404 	struct hci_cp_set_event_filter *cp;
405 	void *sent;
406 
407 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
408 
409 	if (status)
410 		return;
411 
412 	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
413 	if (!sent)
414 		return;
415 
416 	cp = (struct hci_cp_set_event_filter *)sent;
417 
418 	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
419 		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
420 	else
421 		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
422 }
423 
424 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
425 {
426 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
427 
428 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 
430 	if (rp->status)
431 		return;
432 
433 	memcpy(hdev->dev_class, rp->dev_class, 3);
434 
435 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
436 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
437 }
438 
439 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 {
441 	__u8 status = *((__u8 *) skb->data);
442 	void *sent;
443 
444 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 
446 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
447 	if (!sent)
448 		return;
449 
450 	hci_dev_lock(hdev);
451 
452 	if (status == 0)
453 		memcpy(hdev->dev_class, sent, 3);
454 
455 	if (hci_dev_test_flag(hdev, HCI_MGMT))
456 		mgmt_set_class_of_dev_complete(hdev, sent, status);
457 
458 	hci_dev_unlock(hdev);
459 }
460 
461 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
462 {
463 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
464 	__u16 setting;
465 
466 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
467 
468 	if (rp->status)
469 		return;
470 
471 	setting = __le16_to_cpu(rp->voice_setting);
472 
473 	if (hdev->voice_setting == setting)
474 		return;
475 
476 	hdev->voice_setting = setting;
477 
478 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
479 
480 	if (hdev->notify)
481 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
482 }
483 
484 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
485 				       struct sk_buff *skb)
486 {
487 	__u8 status = *((__u8 *) skb->data);
488 	__u16 setting;
489 	void *sent;
490 
491 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
492 
493 	if (status)
494 		return;
495 
496 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
497 	if (!sent)
498 		return;
499 
500 	setting = get_unaligned_le16(sent);
501 
502 	if (hdev->voice_setting == setting)
503 		return;
504 
505 	hdev->voice_setting = setting;
506 
507 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
508 
509 	if (hdev->notify)
510 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
511 }
512 
513 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
514 					  struct sk_buff *skb)
515 {
516 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
517 
518 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
519 
520 	if (rp->status)
521 		return;
522 
523 	hdev->num_iac = rp->num_iac;
524 
525 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
526 }
527 
528 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
529 {
530 	__u8 status = *((__u8 *) skb->data);
531 	struct hci_cp_write_ssp_mode *sent;
532 
533 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
534 
535 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
536 	if (!sent)
537 		return;
538 
539 	hci_dev_lock(hdev);
540 
541 	if (!status) {
542 		if (sent->mode)
543 			hdev->features[1][0] |= LMP_HOST_SSP;
544 		else
545 			hdev->features[1][0] &= ~LMP_HOST_SSP;
546 	}
547 
548 	if (hci_dev_test_flag(hdev, HCI_MGMT))
549 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
550 	else if (!status) {
551 		if (sent->mode)
552 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
553 		else
554 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
555 	}
556 
557 	hci_dev_unlock(hdev);
558 }
559 
560 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
561 {
562 	u8 status = *((u8 *) skb->data);
563 	struct hci_cp_write_sc_support *sent;
564 
565 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
566 
567 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
568 	if (!sent)
569 		return;
570 
571 	hci_dev_lock(hdev);
572 
573 	if (!status) {
574 		if (sent->support)
575 			hdev->features[1][0] |= LMP_HOST_SC;
576 		else
577 			hdev->features[1][0] &= ~LMP_HOST_SC;
578 	}
579 
580 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
581 		if (sent->support)
582 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
583 		else
584 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
585 	}
586 
587 	hci_dev_unlock(hdev);
588 }
589 
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 	struct hci_rp_read_local_version *rp = (void *) skb->data;
593 
594 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595 
596 	if (rp->status)
597 		return;
598 
599 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
600 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
601 		hdev->hci_ver = rp->hci_ver;
602 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
603 		hdev->lmp_ver = rp->lmp_ver;
604 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
605 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
606 	}
607 }
608 
609 static void hci_cc_read_local_commands(struct hci_dev *hdev,
610 				       struct sk_buff *skb)
611 {
612 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
613 
614 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
615 
616 	if (rp->status)
617 		return;
618 
619 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
620 	    hci_dev_test_flag(hdev, HCI_CONFIG))
621 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
622 }
623 
624 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
625 					     struct sk_buff *skb)
626 {
627 	struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
628 	struct hci_conn *conn;
629 
630 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
631 
632 	if (rp->status)
633 		return;
634 
635 	hci_dev_lock(hdev);
636 
637 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
638 	if (conn)
639 		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
640 
641 	hci_dev_unlock(hdev);
642 }
643 
644 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
645 					      struct sk_buff *skb)
646 {
647 	struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
648 	struct hci_conn *conn;
649 	void *sent;
650 
651 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
652 
653 	if (rp->status)
654 		return;
655 
656 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
657 	if (!sent)
658 		return;
659 
660 	hci_dev_lock(hdev);
661 
662 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
663 	if (conn)
664 		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
665 
666 	hci_dev_unlock(hdev);
667 }
668 
669 static void hci_cc_read_local_features(struct hci_dev *hdev,
670 				       struct sk_buff *skb)
671 {
672 	struct hci_rp_read_local_features *rp = (void *) skb->data;
673 
674 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
675 
676 	if (rp->status)
677 		return;
678 
679 	memcpy(hdev->features, rp->features, 8);
680 
681 	/* Adjust default settings according to features
682 	 * supported by device. */
683 
684 	if (hdev->features[0][0] & LMP_3SLOT)
685 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
686 
687 	if (hdev->features[0][0] & LMP_5SLOT)
688 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
689 
690 	if (hdev->features[0][1] & LMP_HV2) {
691 		hdev->pkt_type  |= (HCI_HV2);
692 		hdev->esco_type |= (ESCO_HV2);
693 	}
694 
695 	if (hdev->features[0][1] & LMP_HV3) {
696 		hdev->pkt_type  |= (HCI_HV3);
697 		hdev->esco_type |= (ESCO_HV3);
698 	}
699 
700 	if (lmp_esco_capable(hdev))
701 		hdev->esco_type |= (ESCO_EV3);
702 
703 	if (hdev->features[0][4] & LMP_EV4)
704 		hdev->esco_type |= (ESCO_EV4);
705 
706 	if (hdev->features[0][4] & LMP_EV5)
707 		hdev->esco_type |= (ESCO_EV5);
708 
709 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
710 		hdev->esco_type |= (ESCO_2EV3);
711 
712 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
713 		hdev->esco_type |= (ESCO_3EV3);
714 
715 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
716 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
717 }
718 
719 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
720 					   struct sk_buff *skb)
721 {
722 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
723 
724 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
725 
726 	if (rp->status)
727 		return;
728 
729 	if (hdev->max_page < rp->max_page)
730 		hdev->max_page = rp->max_page;
731 
732 	if (rp->page < HCI_MAX_PAGES)
733 		memcpy(hdev->features[rp->page], rp->features, 8);
734 }
735 
736 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
737 					  struct sk_buff *skb)
738 {
739 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
740 
741 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
742 
743 	if (rp->status)
744 		return;
745 
746 	hdev->flow_ctl_mode = rp->mode;
747 }
748 
749 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
750 {
751 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
752 
753 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
754 
755 	if (rp->status)
756 		return;
757 
758 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
759 	hdev->sco_mtu  = rp->sco_mtu;
760 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
761 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
762 
763 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
764 		hdev->sco_mtu  = 64;
765 		hdev->sco_pkts = 8;
766 	}
767 
768 	hdev->acl_cnt = hdev->acl_pkts;
769 	hdev->sco_cnt = hdev->sco_pkts;
770 
771 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
772 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
773 }
774 
775 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
776 {
777 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
778 
779 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
780 
781 	if (rp->status)
782 		return;
783 
784 	if (test_bit(HCI_INIT, &hdev->flags))
785 		bacpy(&hdev->bdaddr, &rp->bdaddr);
786 
787 	if (hci_dev_test_flag(hdev, HCI_SETUP))
788 		bacpy(&hdev->setup_addr, &rp->bdaddr);
789 }
790 
791 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
792 					   struct sk_buff *skb)
793 {
794 	struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
795 
796 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797 
798 	if (rp->status)
799 		return;
800 
801 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
802 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
803 		hdev->pairing_opts = rp->pairing_opts;
804 		hdev->max_enc_key_size = rp->max_key_size;
805 	}
806 }
807 
808 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
809 					   struct sk_buff *skb)
810 {
811 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
812 
813 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
814 
815 	if (rp->status)
816 		return;
817 
818 	if (test_bit(HCI_INIT, &hdev->flags)) {
819 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
820 		hdev->page_scan_window = __le16_to_cpu(rp->window);
821 	}
822 }
823 
824 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
825 					    struct sk_buff *skb)
826 {
827 	u8 status = *((u8 *) skb->data);
828 	struct hci_cp_write_page_scan_activity *sent;
829 
830 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
831 
832 	if (status)
833 		return;
834 
835 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
836 	if (!sent)
837 		return;
838 
839 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
840 	hdev->page_scan_window = __le16_to_cpu(sent->window);
841 }
842 
843 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
844 					   struct sk_buff *skb)
845 {
846 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
847 
848 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
849 
850 	if (rp->status)
851 		return;
852 
853 	if (test_bit(HCI_INIT, &hdev->flags))
854 		hdev->page_scan_type = rp->type;
855 }
856 
857 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
858 					struct sk_buff *skb)
859 {
860 	u8 status = *((u8 *) skb->data);
861 	u8 *type;
862 
863 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
864 
865 	if (status)
866 		return;
867 
868 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
869 	if (type)
870 		hdev->page_scan_type = *type;
871 }
872 
873 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
874 					struct sk_buff *skb)
875 {
876 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
877 
878 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
879 
880 	if (rp->status)
881 		return;
882 
883 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
884 	hdev->block_len = __le16_to_cpu(rp->block_len);
885 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
886 
887 	hdev->block_cnt = hdev->num_blocks;
888 
889 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
890 	       hdev->block_cnt, hdev->block_len);
891 }
892 
893 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
894 {
895 	struct hci_rp_read_clock *rp = (void *) skb->data;
896 	struct hci_cp_read_clock *cp;
897 	struct hci_conn *conn;
898 
899 	BT_DBG("%s", hdev->name);
900 
901 	if (skb->len < sizeof(*rp))
902 		return;
903 
904 	if (rp->status)
905 		return;
906 
907 	hci_dev_lock(hdev);
908 
909 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
910 	if (!cp)
911 		goto unlock;
912 
913 	if (cp->which == 0x00) {
914 		hdev->clock = le32_to_cpu(rp->clock);
915 		goto unlock;
916 	}
917 
918 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
919 	if (conn) {
920 		conn->clock = le32_to_cpu(rp->clock);
921 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
922 	}
923 
924 unlock:
925 	hci_dev_unlock(hdev);
926 }
927 
928 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
929 				       struct sk_buff *skb)
930 {
931 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
932 
933 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
934 
935 	if (rp->status)
936 		return;
937 
938 	hdev->amp_status = rp->amp_status;
939 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
940 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
941 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
942 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
943 	hdev->amp_type = rp->amp_type;
944 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
945 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
946 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
947 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
948 }
949 
950 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
951 					 struct sk_buff *skb)
952 {
953 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
954 
955 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
956 
957 	if (rp->status)
958 		return;
959 
960 	hdev->inq_tx_power = rp->tx_power;
961 }
962 
963 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
964 					       struct sk_buff *skb)
965 {
966 	struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
967 
968 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
969 
970 	if (rp->status)
971 		return;
972 
973 	hdev->err_data_reporting = rp->err_data_reporting;
974 }
975 
976 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
977 						struct sk_buff *skb)
978 {
979 	__u8 status = *((__u8 *)skb->data);
980 	struct hci_cp_write_def_err_data_reporting *cp;
981 
982 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
983 
984 	if (status)
985 		return;
986 
987 	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
988 	if (!cp)
989 		return;
990 
991 	hdev->err_data_reporting = cp->err_data_reporting;
992 }
993 
994 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
995 {
996 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
997 	struct hci_cp_pin_code_reply *cp;
998 	struct hci_conn *conn;
999 
1000 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 
1002 	hci_dev_lock(hdev);
1003 
1004 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1006 
1007 	if (rp->status)
1008 		goto unlock;
1009 
1010 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1011 	if (!cp)
1012 		goto unlock;
1013 
1014 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1015 	if (conn)
1016 		conn->pin_length = cp->pin_len;
1017 
1018 unlock:
1019 	hci_dev_unlock(hdev);
1020 }
1021 
1022 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 {
1024 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1025 
1026 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1027 
1028 	hci_dev_lock(hdev);
1029 
1030 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1032 						 rp->status);
1033 
1034 	hci_dev_unlock(hdev);
1035 }
1036 
1037 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1038 				       struct sk_buff *skb)
1039 {
1040 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1041 
1042 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1043 
1044 	if (rp->status)
1045 		return;
1046 
1047 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1048 	hdev->le_pkts = rp->le_max_pkt;
1049 
1050 	hdev->le_cnt = hdev->le_pkts;
1051 
1052 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1053 }
1054 
1055 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1056 					  struct sk_buff *skb)
1057 {
1058 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1059 
1060 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 
1062 	if (rp->status)
1063 		return;
1064 
1065 	memcpy(hdev->le_features, rp->features, 8);
1066 }
1067 
1068 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1069 					struct sk_buff *skb)
1070 {
1071 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1072 
1073 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1074 
1075 	if (rp->status)
1076 		return;
1077 
1078 	hdev->adv_tx_power = rp->tx_power;
1079 }
1080 
1081 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1082 {
1083 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1084 
1085 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1086 
1087 	hci_dev_lock(hdev);
1088 
1089 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1090 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1091 						 rp->status);
1092 
1093 	hci_dev_unlock(hdev);
1094 }
1095 
1096 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1097 					  struct sk_buff *skb)
1098 {
1099 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1100 
1101 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1102 
1103 	hci_dev_lock(hdev);
1104 
1105 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1106 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1107 						     ACL_LINK, 0, rp->status);
1108 
1109 	hci_dev_unlock(hdev);
1110 }
1111 
1112 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1113 {
1114 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 
1116 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1117 
1118 	hci_dev_lock(hdev);
1119 
1120 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1121 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1122 						 0, rp->status);
1123 
1124 	hci_dev_unlock(hdev);
1125 }
1126 
1127 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1128 					  struct sk_buff *skb)
1129 {
1130 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1131 
1132 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1133 
1134 	hci_dev_lock(hdev);
1135 
1136 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1137 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1138 						     ACL_LINK, 0, rp->status);
1139 
1140 	hci_dev_unlock(hdev);
1141 }
1142 
1143 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1144 				       struct sk_buff *skb)
1145 {
1146 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1147 
1148 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1149 }
1150 
1151 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1152 					   struct sk_buff *skb)
1153 {
1154 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1155 
1156 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1157 }
1158 
1159 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1160 {
1161 	__u8 status = *((__u8 *) skb->data);
1162 	bdaddr_t *sent;
1163 
1164 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1165 
1166 	if (status)
1167 		return;
1168 
1169 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1170 	if (!sent)
1171 		return;
1172 
1173 	hci_dev_lock(hdev);
1174 
1175 	bacpy(&hdev->random_addr, sent);
1176 
1177 	if (!bacmp(&hdev->rpa, sent)) {
1178 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1179 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1180 				   secs_to_jiffies(hdev->rpa_timeout));
1181 	}
1182 
1183 	hci_dev_unlock(hdev);
1184 }
1185 
1186 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1187 {
1188 	__u8 status = *((__u8 *) skb->data);
1189 	struct hci_cp_le_set_default_phy *cp;
1190 
1191 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1192 
1193 	if (status)
1194 		return;
1195 
1196 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1197 	if (!cp)
1198 		return;
1199 
1200 	hci_dev_lock(hdev);
1201 
1202 	hdev->le_tx_def_phys = cp->tx_phys;
1203 	hdev->le_rx_def_phys = cp->rx_phys;
1204 
1205 	hci_dev_unlock(hdev);
1206 }
1207 
1208 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1209                                               struct sk_buff *skb)
1210 {
1211 	__u8 status = *((__u8 *) skb->data);
1212 	struct hci_cp_le_set_adv_set_rand_addr *cp;
1213 	struct adv_info *adv;
1214 
1215 	if (status)
1216 		return;
1217 
1218 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1219 	/* Update only in case the adv instance since handle 0x00 shall be using
1220 	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1221 	 * non-extended adverting.
1222 	 */
1223 	if (!cp || !cp->handle)
1224 		return;
1225 
1226 	hci_dev_lock(hdev);
1227 
1228 	adv = hci_find_adv_instance(hdev, cp->handle);
1229 	if (adv) {
1230 		bacpy(&adv->random_addr, &cp->bdaddr);
1231 		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1232 			adv->rpa_expired = false;
1233 			queue_delayed_work(hdev->workqueue,
1234 					   &adv->rpa_expired_cb,
1235 					   secs_to_jiffies(hdev->rpa_timeout));
1236 		}
1237 	}
1238 
1239 	hci_dev_unlock(hdev);
1240 }
1241 
1242 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1243 					  struct sk_buff *skb)
1244 {
1245 	struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1246 
1247 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1248 
1249 	if (rp->status)
1250 		return;
1251 
1252 	hdev->min_le_tx_power = rp->min_le_tx_power;
1253 	hdev->max_le_tx_power = rp->max_le_tx_power;
1254 }
1255 
1256 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1257 {
1258 	__u8 *sent, status = *((__u8 *) skb->data);
1259 
1260 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1261 
1262 	if (status)
1263 		return;
1264 
1265 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1266 	if (!sent)
1267 		return;
1268 
1269 	hci_dev_lock(hdev);
1270 
1271 	/* If we're doing connection initiation as peripheral. Set a
1272 	 * timeout in case something goes wrong.
1273 	 */
1274 	if (*sent) {
1275 		struct hci_conn *conn;
1276 
1277 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1278 
1279 		conn = hci_lookup_le_connect(hdev);
1280 		if (conn)
1281 			queue_delayed_work(hdev->workqueue,
1282 					   &conn->le_conn_timeout,
1283 					   conn->conn_timeout);
1284 	} else {
1285 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1286 	}
1287 
1288 	hci_dev_unlock(hdev);
1289 }
1290 
1291 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1292 					 struct sk_buff *skb)
1293 {
1294 	struct hci_cp_le_set_ext_adv_enable *cp;
1295 	struct hci_cp_ext_adv_set *set;
1296 	__u8 status = *((__u8 *) skb->data);
1297 	struct adv_info *adv = NULL, *n;
1298 
1299 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1300 
1301 	if (status)
1302 		return;
1303 
1304 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1305 	if (!cp)
1306 		return;
1307 
1308 	set = (void *)cp->data;
1309 
1310 	hci_dev_lock(hdev);
1311 
1312 	if (cp->num_of_sets)
1313 		adv = hci_find_adv_instance(hdev, set->handle);
1314 
1315 	if (cp->enable) {
1316 		struct hci_conn *conn;
1317 
1318 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1319 
1320 		if (adv)
1321 			adv->enabled = true;
1322 
1323 		conn = hci_lookup_le_connect(hdev);
1324 		if (conn)
1325 			queue_delayed_work(hdev->workqueue,
1326 					   &conn->le_conn_timeout,
1327 					   conn->conn_timeout);
1328 	} else {
1329 		if (adv) {
1330 			adv->enabled = false;
1331 			/* If just one instance was disabled check if there are
1332 			 * any other instance enabled before clearing HCI_LE_ADV
1333 			 */
1334 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1335 						 list) {
1336 				if (adv->enabled)
1337 					goto unlock;
1338 			}
1339 		} else {
1340 			/* All instances shall be considered disabled */
1341 			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1342 						 list)
1343 				adv->enabled = false;
1344 		}
1345 
1346 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1347 	}
1348 
1349 unlock:
1350 	hci_dev_unlock(hdev);
1351 }
1352 
1353 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1354 {
1355 	struct hci_cp_le_set_scan_param *cp;
1356 	__u8 status = *((__u8 *) skb->data);
1357 
1358 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1359 
1360 	if (status)
1361 		return;
1362 
1363 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1364 	if (!cp)
1365 		return;
1366 
1367 	hci_dev_lock(hdev);
1368 
1369 	hdev->le_scan_type = cp->type;
1370 
1371 	hci_dev_unlock(hdev);
1372 }
1373 
1374 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1375 					 struct sk_buff *skb)
1376 {
1377 	struct hci_cp_le_set_ext_scan_params *cp;
1378 	__u8 status = *((__u8 *) skb->data);
1379 	struct hci_cp_le_scan_phy_params *phy_param;
1380 
1381 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1382 
1383 	if (status)
1384 		return;
1385 
1386 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1387 	if (!cp)
1388 		return;
1389 
1390 	phy_param = (void *)cp->data;
1391 
1392 	hci_dev_lock(hdev);
1393 
1394 	hdev->le_scan_type = phy_param->type;
1395 
1396 	hci_dev_unlock(hdev);
1397 }
1398 
1399 static bool has_pending_adv_report(struct hci_dev *hdev)
1400 {
1401 	struct discovery_state *d = &hdev->discovery;
1402 
1403 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1404 }
1405 
1406 static void clear_pending_adv_report(struct hci_dev *hdev)
1407 {
1408 	struct discovery_state *d = &hdev->discovery;
1409 
1410 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1411 	d->last_adv_data_len = 0;
1412 }
1413 
1414 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1415 				     u8 bdaddr_type, s8 rssi, u32 flags,
1416 				     u8 *data, u8 len)
1417 {
1418 	struct discovery_state *d = &hdev->discovery;
1419 
1420 	if (len > HCI_MAX_AD_LENGTH)
1421 		return;
1422 
1423 	bacpy(&d->last_adv_addr, bdaddr);
1424 	d->last_adv_addr_type = bdaddr_type;
1425 	d->last_adv_rssi = rssi;
1426 	d->last_adv_flags = flags;
1427 	memcpy(d->last_adv_data, data, len);
1428 	d->last_adv_data_len = len;
1429 }
1430 
1431 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1432 {
1433 	hci_dev_lock(hdev);
1434 
1435 	switch (enable) {
1436 	case LE_SCAN_ENABLE:
1437 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1438 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1439 			clear_pending_adv_report(hdev);
1440 		break;
1441 
1442 	case LE_SCAN_DISABLE:
1443 		/* We do this here instead of when setting DISCOVERY_STOPPED
1444 		 * since the latter would potentially require waiting for
1445 		 * inquiry to stop too.
1446 		 */
1447 		if (has_pending_adv_report(hdev)) {
1448 			struct discovery_state *d = &hdev->discovery;
1449 
1450 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1451 					  d->last_adv_addr_type, NULL,
1452 					  d->last_adv_rssi, d->last_adv_flags,
1453 					  d->last_adv_data,
1454 					  d->last_adv_data_len, NULL, 0);
1455 		}
1456 
1457 		/* Cancel this timer so that we don't try to disable scanning
1458 		 * when it's already disabled.
1459 		 */
1460 		cancel_delayed_work(&hdev->le_scan_disable);
1461 
1462 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1463 
1464 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1465 		 * interrupted scanning due to a connect request. Mark
1466 		 * therefore discovery as stopped. If this was not
1467 		 * because of a connect request advertising might have
1468 		 * been disabled because of active scanning, so
1469 		 * re-enable it again if necessary.
1470 		 */
1471 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1472 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1473 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1474 			 hdev->discovery.state == DISCOVERY_FINDING)
1475 			hci_req_reenable_advertising(hdev);
1476 
1477 		break;
1478 
1479 	default:
1480 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1481 			   enable);
1482 		break;
1483 	}
1484 
1485 	hci_dev_unlock(hdev);
1486 }
1487 
1488 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1489 				      struct sk_buff *skb)
1490 {
1491 	struct hci_cp_le_set_scan_enable *cp;
1492 	__u8 status = *((__u8 *) skb->data);
1493 
1494 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1495 
1496 	if (status)
1497 		return;
1498 
1499 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1500 	if (!cp)
1501 		return;
1502 
1503 	le_set_scan_enable_complete(hdev, cp->enable);
1504 }
1505 
1506 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1507 				      struct sk_buff *skb)
1508 {
1509 	struct hci_cp_le_set_ext_scan_enable *cp;
1510 	__u8 status = *((__u8 *) skb->data);
1511 
1512 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1513 
1514 	if (status)
1515 		return;
1516 
1517 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1518 	if (!cp)
1519 		return;
1520 
1521 	le_set_scan_enable_complete(hdev, cp->enable);
1522 }
1523 
1524 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1525 				      struct sk_buff *skb)
1526 {
1527 	struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1528 
1529 	BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1530 	       rp->num_of_sets);
1531 
1532 	if (rp->status)
1533 		return;
1534 
1535 	hdev->le_num_of_adv_sets = rp->num_of_sets;
1536 }
1537 
1538 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1539 					    struct sk_buff *skb)
1540 {
1541 	struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1542 
1543 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1544 
1545 	if (rp->status)
1546 		return;
1547 
1548 	hdev->le_accept_list_size = rp->size;
1549 }
1550 
1551 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1552 					struct sk_buff *skb)
1553 {
1554 	__u8 status = *((__u8 *) skb->data);
1555 
1556 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1557 
1558 	if (status)
1559 		return;
1560 
1561 	hci_bdaddr_list_clear(&hdev->le_accept_list);
1562 }
1563 
1564 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1565 					 struct sk_buff *skb)
1566 {
1567 	struct hci_cp_le_add_to_accept_list *sent;
1568 	__u8 status = *((__u8 *) skb->data);
1569 
1570 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1571 
1572 	if (status)
1573 		return;
1574 
1575 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1576 	if (!sent)
1577 		return;
1578 
1579 	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1580 			    sent->bdaddr_type);
1581 }
1582 
1583 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1584 					   struct sk_buff *skb)
1585 {
1586 	struct hci_cp_le_del_from_accept_list *sent;
1587 	__u8 status = *((__u8 *) skb->data);
1588 
1589 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1590 
1591 	if (status)
1592 		return;
1593 
1594 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1595 	if (!sent)
1596 		return;
1597 
1598 	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1599 			    sent->bdaddr_type);
1600 }
1601 
1602 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1603 					    struct sk_buff *skb)
1604 {
1605 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1606 
1607 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1608 
1609 	if (rp->status)
1610 		return;
1611 
1612 	memcpy(hdev->le_states, rp->le_states, 8);
1613 }
1614 
1615 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1616 					struct sk_buff *skb)
1617 {
1618 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1619 
1620 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1621 
1622 	if (rp->status)
1623 		return;
1624 
1625 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1626 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1627 }
1628 
1629 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1630 					 struct sk_buff *skb)
1631 {
1632 	struct hci_cp_le_write_def_data_len *sent;
1633 	__u8 status = *((__u8 *) skb->data);
1634 
1635 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1636 
1637 	if (status)
1638 		return;
1639 
1640 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1641 	if (!sent)
1642 		return;
1643 
1644 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1645 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1646 }
1647 
1648 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1649 					 struct sk_buff *skb)
1650 {
1651 	struct hci_cp_le_add_to_resolv_list *sent;
1652 	__u8 status = *((__u8 *) skb->data);
1653 
1654 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1655 
1656 	if (status)
1657 		return;
1658 
1659 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1660 	if (!sent)
1661 		return;
1662 
1663 	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1664 				sent->bdaddr_type, sent->peer_irk,
1665 				sent->local_irk);
1666 }
1667 
1668 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1669 					  struct sk_buff *skb)
1670 {
1671 	struct hci_cp_le_del_from_resolv_list *sent;
1672 	__u8 status = *((__u8 *) skb->data);
1673 
1674 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1675 
1676 	if (status)
1677 		return;
1678 
1679 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1680 	if (!sent)
1681 		return;
1682 
1683 	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1684 			    sent->bdaddr_type);
1685 }
1686 
1687 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1688 				       struct sk_buff *skb)
1689 {
1690 	__u8 status = *((__u8 *) skb->data);
1691 
1692 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1693 
1694 	if (status)
1695 		return;
1696 
1697 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
1698 }
1699 
1700 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1701 					   struct sk_buff *skb)
1702 {
1703 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1704 
1705 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1706 
1707 	if (rp->status)
1708 		return;
1709 
1710 	hdev->le_resolv_list_size = rp->size;
1711 }
1712 
1713 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1714 						struct sk_buff *skb)
1715 {
1716 	__u8 *sent, status = *((__u8 *) skb->data);
1717 
1718 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1719 
1720 	if (status)
1721 		return;
1722 
1723 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1724 	if (!sent)
1725 		return;
1726 
1727 	hci_dev_lock(hdev);
1728 
1729 	if (*sent)
1730 		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1731 	else
1732 		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1733 
1734 	hci_dev_unlock(hdev);
1735 }
1736 
1737 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1738 					struct sk_buff *skb)
1739 {
1740 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1741 
1742 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1743 
1744 	if (rp->status)
1745 		return;
1746 
1747 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1748 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1749 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1750 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1751 }
1752 
1753 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1754 					   struct sk_buff *skb)
1755 {
1756 	struct hci_cp_write_le_host_supported *sent;
1757 	__u8 status = *((__u8 *) skb->data);
1758 
1759 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1760 
1761 	if (status)
1762 		return;
1763 
1764 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1765 	if (!sent)
1766 		return;
1767 
1768 	hci_dev_lock(hdev);
1769 
1770 	if (sent->le) {
1771 		hdev->features[1][0] |= LMP_HOST_LE;
1772 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1773 	} else {
1774 		hdev->features[1][0] &= ~LMP_HOST_LE;
1775 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1776 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1777 	}
1778 
1779 	if (sent->simul)
1780 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1781 	else
1782 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1783 
1784 	hci_dev_unlock(hdev);
1785 }
1786 
1787 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1788 {
1789 	struct hci_cp_le_set_adv_param *cp;
1790 	u8 status = *((u8 *) skb->data);
1791 
1792 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793 
1794 	if (status)
1795 		return;
1796 
1797 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1798 	if (!cp)
1799 		return;
1800 
1801 	hci_dev_lock(hdev);
1802 	hdev->adv_addr_type = cp->own_address_type;
1803 	hci_dev_unlock(hdev);
1804 }
1805 
1806 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1807 {
1808 	struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1809 	struct hci_cp_le_set_ext_adv_params *cp;
1810 	struct adv_info *adv_instance;
1811 
1812 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1813 
1814 	if (rp->status)
1815 		return;
1816 
1817 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1818 	if (!cp)
1819 		return;
1820 
1821 	hci_dev_lock(hdev);
1822 	hdev->adv_addr_type = cp->own_addr_type;
1823 	if (!cp->handle) {
1824 		/* Store in hdev for instance 0 */
1825 		hdev->adv_tx_power = rp->tx_power;
1826 	} else {
1827 		adv_instance = hci_find_adv_instance(hdev, cp->handle);
1828 		if (adv_instance)
1829 			adv_instance->tx_power = rp->tx_power;
1830 	}
1831 	/* Update adv data as tx power is known now */
1832 	hci_req_update_adv_data(hdev, cp->handle);
1833 
1834 	hci_dev_unlock(hdev);
1835 }
1836 
1837 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1838 {
1839 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1840 	struct hci_conn *conn;
1841 
1842 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1843 
1844 	if (rp->status)
1845 		return;
1846 
1847 	hci_dev_lock(hdev);
1848 
1849 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1850 	if (conn)
1851 		conn->rssi = rp->rssi;
1852 
1853 	hci_dev_unlock(hdev);
1854 }
1855 
1856 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1857 {
1858 	struct hci_cp_read_tx_power *sent;
1859 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1860 	struct hci_conn *conn;
1861 
1862 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1863 
1864 	if (rp->status)
1865 		return;
1866 
1867 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1868 	if (!sent)
1869 		return;
1870 
1871 	hci_dev_lock(hdev);
1872 
1873 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1874 	if (!conn)
1875 		goto unlock;
1876 
1877 	switch (sent->type) {
1878 	case 0x00:
1879 		conn->tx_power = rp->tx_power;
1880 		break;
1881 	case 0x01:
1882 		conn->max_tx_power = rp->tx_power;
1883 		break;
1884 	}
1885 
1886 unlock:
1887 	hci_dev_unlock(hdev);
1888 }
1889 
1890 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1891 {
1892 	u8 status = *((u8 *) skb->data);
1893 	u8 *mode;
1894 
1895 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1896 
1897 	if (status)
1898 		return;
1899 
1900 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1901 	if (mode)
1902 		hdev->ssp_debug_mode = *mode;
1903 }
1904 
1905 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1906 {
1907 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1908 
1909 	if (status) {
1910 		hci_conn_check_pending(hdev);
1911 		return;
1912 	}
1913 
1914 	set_bit(HCI_INQUIRY, &hdev->flags);
1915 }
1916 
1917 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1918 {
1919 	struct hci_cp_create_conn *cp;
1920 	struct hci_conn *conn;
1921 
1922 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1923 
1924 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1925 	if (!cp)
1926 		return;
1927 
1928 	hci_dev_lock(hdev);
1929 
1930 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1931 
1932 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1933 
1934 	if (status) {
1935 		if (conn && conn->state == BT_CONNECT) {
1936 			if (status != 0x0c || conn->attempt > 2) {
1937 				conn->state = BT_CLOSED;
1938 				hci_connect_cfm(conn, status);
1939 				hci_conn_del(conn);
1940 			} else
1941 				conn->state = BT_CONNECT2;
1942 		}
1943 	} else {
1944 		if (!conn) {
1945 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1946 					    HCI_ROLE_MASTER);
1947 			if (!conn)
1948 				bt_dev_err(hdev, "no memory for new connection");
1949 		}
1950 	}
1951 
1952 	hci_dev_unlock(hdev);
1953 }
1954 
1955 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1956 {
1957 	struct hci_cp_add_sco *cp;
1958 	struct hci_conn *acl, *sco;
1959 	__u16 handle;
1960 
1961 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1962 
1963 	if (!status)
1964 		return;
1965 
1966 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1967 	if (!cp)
1968 		return;
1969 
1970 	handle = __le16_to_cpu(cp->handle);
1971 
1972 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1973 
1974 	hci_dev_lock(hdev);
1975 
1976 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1977 	if (acl) {
1978 		sco = acl->link;
1979 		if (sco) {
1980 			sco->state = BT_CLOSED;
1981 
1982 			hci_connect_cfm(sco, status);
1983 			hci_conn_del(sco);
1984 		}
1985 	}
1986 
1987 	hci_dev_unlock(hdev);
1988 }
1989 
1990 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1991 {
1992 	struct hci_cp_auth_requested *cp;
1993 	struct hci_conn *conn;
1994 
1995 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1996 
1997 	if (!status)
1998 		return;
1999 
2000 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2001 	if (!cp)
2002 		return;
2003 
2004 	hci_dev_lock(hdev);
2005 
2006 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2007 	if (conn) {
2008 		if (conn->state == BT_CONFIG) {
2009 			hci_connect_cfm(conn, status);
2010 			hci_conn_drop(conn);
2011 		}
2012 	}
2013 
2014 	hci_dev_unlock(hdev);
2015 }
2016 
2017 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2018 {
2019 	struct hci_cp_set_conn_encrypt *cp;
2020 	struct hci_conn *conn;
2021 
2022 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2023 
2024 	if (!status)
2025 		return;
2026 
2027 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2028 	if (!cp)
2029 		return;
2030 
2031 	hci_dev_lock(hdev);
2032 
2033 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2034 	if (conn) {
2035 		if (conn->state == BT_CONFIG) {
2036 			hci_connect_cfm(conn, status);
2037 			hci_conn_drop(conn);
2038 		}
2039 	}
2040 
2041 	hci_dev_unlock(hdev);
2042 }
2043 
2044 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2045 				    struct hci_conn *conn)
2046 {
2047 	if (conn->state != BT_CONFIG || !conn->out)
2048 		return 0;
2049 
2050 	if (conn->pending_sec_level == BT_SECURITY_SDP)
2051 		return 0;
2052 
2053 	/* Only request authentication for SSP connections or non-SSP
2054 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2055 	 * is requested.
2056 	 */
2057 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2058 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2059 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2060 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2061 		return 0;
2062 
2063 	return 1;
2064 }
2065 
2066 static int hci_resolve_name(struct hci_dev *hdev,
2067 				   struct inquiry_entry *e)
2068 {
2069 	struct hci_cp_remote_name_req cp;
2070 
2071 	memset(&cp, 0, sizeof(cp));
2072 
2073 	bacpy(&cp.bdaddr, &e->data.bdaddr);
2074 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2075 	cp.pscan_mode = e->data.pscan_mode;
2076 	cp.clock_offset = e->data.clock_offset;
2077 
2078 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2079 }
2080 
2081 static bool hci_resolve_next_name(struct hci_dev *hdev)
2082 {
2083 	struct discovery_state *discov = &hdev->discovery;
2084 	struct inquiry_entry *e;
2085 
2086 	if (list_empty(&discov->resolve))
2087 		return false;
2088 
2089 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2090 	if (!e)
2091 		return false;
2092 
2093 	if (hci_resolve_name(hdev, e) == 0) {
2094 		e->name_state = NAME_PENDING;
2095 		return true;
2096 	}
2097 
2098 	return false;
2099 }
2100 
2101 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2102 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2103 {
2104 	struct discovery_state *discov = &hdev->discovery;
2105 	struct inquiry_entry *e;
2106 
2107 	/* Update the mgmt connected state if necessary. Be careful with
2108 	 * conn objects that exist but are not (yet) connected however.
2109 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2110 	 * considered connected.
2111 	 */
2112 	if (conn &&
2113 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2114 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2115 		mgmt_device_connected(hdev, conn, name, name_len);
2116 
2117 	if (discov->state == DISCOVERY_STOPPED)
2118 		return;
2119 
2120 	if (discov->state == DISCOVERY_STOPPING)
2121 		goto discov_complete;
2122 
2123 	if (discov->state != DISCOVERY_RESOLVING)
2124 		return;
2125 
2126 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2127 	/* If the device was not found in a list of found devices names of which
2128 	 * are pending. there is no need to continue resolving a next name as it
2129 	 * will be done upon receiving another Remote Name Request Complete
2130 	 * Event */
2131 	if (!e)
2132 		return;
2133 
2134 	list_del(&e->list);
2135 	if (name) {
2136 		e->name_state = NAME_KNOWN;
2137 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2138 				 e->data.rssi, name, name_len);
2139 	} else {
2140 		e->name_state = NAME_NOT_KNOWN;
2141 	}
2142 
2143 	if (hci_resolve_next_name(hdev))
2144 		return;
2145 
2146 discov_complete:
2147 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2148 }
2149 
2150 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2151 {
2152 	struct hci_cp_remote_name_req *cp;
2153 	struct hci_conn *conn;
2154 
2155 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2156 
2157 	/* If successful wait for the name req complete event before
2158 	 * checking for the need to do authentication */
2159 	if (!status)
2160 		return;
2161 
2162 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2163 	if (!cp)
2164 		return;
2165 
2166 	hci_dev_lock(hdev);
2167 
2168 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2169 
2170 	if (hci_dev_test_flag(hdev, HCI_MGMT))
2171 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2172 
2173 	if (!conn)
2174 		goto unlock;
2175 
2176 	if (!hci_outgoing_auth_needed(hdev, conn))
2177 		goto unlock;
2178 
2179 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2180 		struct hci_cp_auth_requested auth_cp;
2181 
2182 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2183 
2184 		auth_cp.handle = __cpu_to_le16(conn->handle);
2185 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2186 			     sizeof(auth_cp), &auth_cp);
2187 	}
2188 
2189 unlock:
2190 	hci_dev_unlock(hdev);
2191 }
2192 
2193 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2194 {
2195 	struct hci_cp_read_remote_features *cp;
2196 	struct hci_conn *conn;
2197 
2198 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2199 
2200 	if (!status)
2201 		return;
2202 
2203 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2204 	if (!cp)
2205 		return;
2206 
2207 	hci_dev_lock(hdev);
2208 
2209 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2210 	if (conn) {
2211 		if (conn->state == BT_CONFIG) {
2212 			hci_connect_cfm(conn, status);
2213 			hci_conn_drop(conn);
2214 		}
2215 	}
2216 
2217 	hci_dev_unlock(hdev);
2218 }
2219 
2220 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2221 {
2222 	struct hci_cp_read_remote_ext_features *cp;
2223 	struct hci_conn *conn;
2224 
2225 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2226 
2227 	if (!status)
2228 		return;
2229 
2230 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2231 	if (!cp)
2232 		return;
2233 
2234 	hci_dev_lock(hdev);
2235 
2236 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2237 	if (conn) {
2238 		if (conn->state == BT_CONFIG) {
2239 			hci_connect_cfm(conn, status);
2240 			hci_conn_drop(conn);
2241 		}
2242 	}
2243 
2244 	hci_dev_unlock(hdev);
2245 }
2246 
2247 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2248 {
2249 	struct hci_cp_setup_sync_conn *cp;
2250 	struct hci_conn *acl, *sco;
2251 	__u16 handle;
2252 
2253 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2254 
2255 	if (!status)
2256 		return;
2257 
2258 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2259 	if (!cp)
2260 		return;
2261 
2262 	handle = __le16_to_cpu(cp->handle);
2263 
2264 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2265 
2266 	hci_dev_lock(hdev);
2267 
2268 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2269 	if (acl) {
2270 		sco = acl->link;
2271 		if (sco) {
2272 			sco->state = BT_CLOSED;
2273 
2274 			hci_connect_cfm(sco, status);
2275 			hci_conn_del(sco);
2276 		}
2277 	}
2278 
2279 	hci_dev_unlock(hdev);
2280 }
2281 
2282 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2283 {
2284 	struct hci_cp_enhanced_setup_sync_conn *cp;
2285 	struct hci_conn *acl, *sco;
2286 	__u16 handle;
2287 
2288 	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2289 
2290 	if (!status)
2291 		return;
2292 
2293 	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2294 	if (!cp)
2295 		return;
2296 
2297 	handle = __le16_to_cpu(cp->handle);
2298 
2299 	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2300 
2301 	hci_dev_lock(hdev);
2302 
2303 	acl = hci_conn_hash_lookup_handle(hdev, handle);
2304 	if (acl) {
2305 		sco = acl->link;
2306 		if (sco) {
2307 			sco->state = BT_CLOSED;
2308 
2309 			hci_connect_cfm(sco, status);
2310 			hci_conn_del(sco);
2311 		}
2312 	}
2313 
2314 	hci_dev_unlock(hdev);
2315 }
2316 
2317 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2318 {
2319 	struct hci_cp_sniff_mode *cp;
2320 	struct hci_conn *conn;
2321 
2322 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2323 
2324 	if (!status)
2325 		return;
2326 
2327 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2328 	if (!cp)
2329 		return;
2330 
2331 	hci_dev_lock(hdev);
2332 
2333 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2334 	if (conn) {
2335 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2336 
2337 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2338 			hci_sco_setup(conn, status);
2339 	}
2340 
2341 	hci_dev_unlock(hdev);
2342 }
2343 
2344 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2345 {
2346 	struct hci_cp_exit_sniff_mode *cp;
2347 	struct hci_conn *conn;
2348 
2349 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2350 
2351 	if (!status)
2352 		return;
2353 
2354 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2355 	if (!cp)
2356 		return;
2357 
2358 	hci_dev_lock(hdev);
2359 
2360 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2361 	if (conn) {
2362 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2363 
2364 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2365 			hci_sco_setup(conn, status);
2366 	}
2367 
2368 	hci_dev_unlock(hdev);
2369 }
2370 
2371 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2372 {
2373 	struct hci_cp_disconnect *cp;
2374 	struct hci_conn *conn;
2375 
2376 	if (!status)
2377 		return;
2378 
2379 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2380 	if (!cp)
2381 		return;
2382 
2383 	hci_dev_lock(hdev);
2384 
2385 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2386 	if (conn) {
2387 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2388 				       conn->dst_type, status);
2389 
2390 		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2391 			hdev->cur_adv_instance = conn->adv_instance;
2392 			hci_req_reenable_advertising(hdev);
2393 		}
2394 
2395 		/* If the disconnection failed for any reason, the upper layer
2396 		 * does not retry to disconnect in current implementation.
2397 		 * Hence, we need to do some basic cleanup here and re-enable
2398 		 * advertising if necessary.
2399 		 */
2400 		hci_conn_del(conn);
2401 	}
2402 
2403 	hci_dev_unlock(hdev);
2404 }
2405 
2406 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2407 {
2408 	/* When using controller based address resolution, then the new
2409 	 * address types 0x02 and 0x03 are used. These types need to be
2410 	 * converted back into either public address or random address type
2411 	 */
2412 	switch (type) {
2413 	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2414 		if (resolved)
2415 			*resolved = true;
2416 		return ADDR_LE_DEV_PUBLIC;
2417 	case ADDR_LE_DEV_RANDOM_RESOLVED:
2418 		if (resolved)
2419 			*resolved = true;
2420 		return ADDR_LE_DEV_RANDOM;
2421 	}
2422 
2423 	if (resolved)
2424 		*resolved = false;
2425 	return type;
2426 }
2427 
2428 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2429 			      u8 peer_addr_type, u8 own_address_type,
2430 			      u8 filter_policy)
2431 {
2432 	struct hci_conn *conn;
2433 
2434 	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2435 				       peer_addr_type);
2436 	if (!conn)
2437 		return;
2438 
2439 	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2440 
2441 	/* Store the initiator and responder address information which
2442 	 * is needed for SMP. These values will not change during the
2443 	 * lifetime of the connection.
2444 	 */
2445 	conn->init_addr_type = own_address_type;
2446 	if (own_address_type == ADDR_LE_DEV_RANDOM)
2447 		bacpy(&conn->init_addr, &hdev->random_addr);
2448 	else
2449 		bacpy(&conn->init_addr, &hdev->bdaddr);
2450 
2451 	conn->resp_addr_type = peer_addr_type;
2452 	bacpy(&conn->resp_addr, peer_addr);
2453 
2454 	/* We don't want the connection attempt to stick around
2455 	 * indefinitely since LE doesn't have a page timeout concept
2456 	 * like BR/EDR. Set a timer for any connection that doesn't use
2457 	 * the accept list for connecting.
2458 	 */
2459 	if (filter_policy == HCI_LE_USE_PEER_ADDR)
2460 		queue_delayed_work(conn->hdev->workqueue,
2461 				   &conn->le_conn_timeout,
2462 				   conn->conn_timeout);
2463 }
2464 
2465 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2466 {
2467 	struct hci_cp_le_create_conn *cp;
2468 
2469 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2470 
2471 	/* All connection failure handling is taken care of by the
2472 	 * hci_le_conn_failed function which is triggered by the HCI
2473 	 * request completion callbacks used for connecting.
2474 	 */
2475 	if (status)
2476 		return;
2477 
2478 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2479 	if (!cp)
2480 		return;
2481 
2482 	hci_dev_lock(hdev);
2483 
2484 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2485 			  cp->own_address_type, cp->filter_policy);
2486 
2487 	hci_dev_unlock(hdev);
2488 }
2489 
2490 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2491 {
2492 	struct hci_cp_le_ext_create_conn *cp;
2493 
2494 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2495 
2496 	/* All connection failure handling is taken care of by the
2497 	 * hci_le_conn_failed function which is triggered by the HCI
2498 	 * request completion callbacks used for connecting.
2499 	 */
2500 	if (status)
2501 		return;
2502 
2503 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2504 	if (!cp)
2505 		return;
2506 
2507 	hci_dev_lock(hdev);
2508 
2509 	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2510 			  cp->own_addr_type, cp->filter_policy);
2511 
2512 	hci_dev_unlock(hdev);
2513 }
2514 
2515 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2516 {
2517 	struct hci_cp_le_read_remote_features *cp;
2518 	struct hci_conn *conn;
2519 
2520 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2521 
2522 	if (!status)
2523 		return;
2524 
2525 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2526 	if (!cp)
2527 		return;
2528 
2529 	hci_dev_lock(hdev);
2530 
2531 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2532 	if (conn) {
2533 		if (conn->state == BT_CONFIG) {
2534 			hci_connect_cfm(conn, status);
2535 			hci_conn_drop(conn);
2536 		}
2537 	}
2538 
2539 	hci_dev_unlock(hdev);
2540 }
2541 
2542 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2543 {
2544 	struct hci_cp_le_start_enc *cp;
2545 	struct hci_conn *conn;
2546 
2547 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2548 
2549 	if (!status)
2550 		return;
2551 
2552 	hci_dev_lock(hdev);
2553 
2554 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2555 	if (!cp)
2556 		goto unlock;
2557 
2558 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2559 	if (!conn)
2560 		goto unlock;
2561 
2562 	if (conn->state != BT_CONNECTED)
2563 		goto unlock;
2564 
2565 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2566 	hci_conn_drop(conn);
2567 
2568 unlock:
2569 	hci_dev_unlock(hdev);
2570 }
2571 
2572 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2573 {
2574 	struct hci_cp_switch_role *cp;
2575 	struct hci_conn *conn;
2576 
2577 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2578 
2579 	if (!status)
2580 		return;
2581 
2582 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2583 	if (!cp)
2584 		return;
2585 
2586 	hci_dev_lock(hdev);
2587 
2588 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2589 	if (conn)
2590 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2591 
2592 	hci_dev_unlock(hdev);
2593 }
2594 
2595 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2596 {
2597 	__u8 status = *((__u8 *) skb->data);
2598 	struct discovery_state *discov = &hdev->discovery;
2599 	struct inquiry_entry *e;
2600 
2601 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2602 
2603 	hci_conn_check_pending(hdev);
2604 
2605 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2606 		return;
2607 
2608 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2609 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2610 
2611 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2612 		return;
2613 
2614 	hci_dev_lock(hdev);
2615 
2616 	if (discov->state != DISCOVERY_FINDING)
2617 		goto unlock;
2618 
2619 	if (list_empty(&discov->resolve)) {
2620 		/* When BR/EDR inquiry is active and no LE scanning is in
2621 		 * progress, then change discovery state to indicate completion.
2622 		 *
2623 		 * When running LE scanning and BR/EDR inquiry simultaneously
2624 		 * and the LE scan already finished, then change the discovery
2625 		 * state to indicate completion.
2626 		 */
2627 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2628 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2629 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2630 		goto unlock;
2631 	}
2632 
2633 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2634 	if (e && hci_resolve_name(hdev, e) == 0) {
2635 		e->name_state = NAME_PENDING;
2636 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2637 	} else {
2638 		/* When BR/EDR inquiry is active and no LE scanning is in
2639 		 * progress, then change discovery state to indicate completion.
2640 		 *
2641 		 * When running LE scanning and BR/EDR inquiry simultaneously
2642 		 * and the LE scan already finished, then change the discovery
2643 		 * state to indicate completion.
2644 		 */
2645 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2646 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2647 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2648 	}
2649 
2650 unlock:
2651 	hci_dev_unlock(hdev);
2652 }
2653 
2654 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2655 {
2656 	struct inquiry_data data;
2657 	struct inquiry_info *info = (void *) (skb->data + 1);
2658 	int num_rsp = *((__u8 *) skb->data);
2659 
2660 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2661 
2662 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2663 		return;
2664 
2665 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2666 		return;
2667 
2668 	hci_dev_lock(hdev);
2669 
2670 	for (; num_rsp; num_rsp--, info++) {
2671 		u32 flags;
2672 
2673 		bacpy(&data.bdaddr, &info->bdaddr);
2674 		data.pscan_rep_mode	= info->pscan_rep_mode;
2675 		data.pscan_period_mode	= info->pscan_period_mode;
2676 		data.pscan_mode		= info->pscan_mode;
2677 		memcpy(data.dev_class, info->dev_class, 3);
2678 		data.clock_offset	= info->clock_offset;
2679 		data.rssi		= HCI_RSSI_INVALID;
2680 		data.ssp_mode		= 0x00;
2681 
2682 		flags = hci_inquiry_cache_update(hdev, &data, false);
2683 
2684 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2685 				  info->dev_class, HCI_RSSI_INVALID,
2686 				  flags, NULL, 0, NULL, 0);
2687 	}
2688 
2689 	hci_dev_unlock(hdev);
2690 }
2691 
2692 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2693 {
2694 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2695 	struct hci_conn *conn;
2696 
2697 	BT_DBG("%s", hdev->name);
2698 
2699 	hci_dev_lock(hdev);
2700 
2701 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2702 	if (!conn) {
2703 		/* Connection may not exist if auto-connected. Check the bredr
2704 		 * allowlist to see if this device is allowed to auto connect.
2705 		 * If link is an ACL type, create a connection class
2706 		 * automatically.
2707 		 *
2708 		 * Auto-connect will only occur if the event filter is
2709 		 * programmed with a given address. Right now, event filter is
2710 		 * only used during suspend.
2711 		 */
2712 		if (ev->link_type == ACL_LINK &&
2713 		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2714 						      &ev->bdaddr,
2715 						      BDADDR_BREDR)) {
2716 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2717 					    HCI_ROLE_SLAVE);
2718 			if (!conn) {
2719 				bt_dev_err(hdev, "no memory for new conn");
2720 				goto unlock;
2721 			}
2722 		} else {
2723 			if (ev->link_type != SCO_LINK)
2724 				goto unlock;
2725 
2726 			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2727 						       &ev->bdaddr);
2728 			if (!conn)
2729 				goto unlock;
2730 
2731 			conn->type = SCO_LINK;
2732 		}
2733 	}
2734 
2735 	if (!ev->status) {
2736 		conn->handle = __le16_to_cpu(ev->handle);
2737 
2738 		if (conn->type == ACL_LINK) {
2739 			conn->state = BT_CONFIG;
2740 			hci_conn_hold(conn);
2741 
2742 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2743 			    !hci_find_link_key(hdev, &ev->bdaddr))
2744 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2745 			else
2746 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2747 		} else
2748 			conn->state = BT_CONNECTED;
2749 
2750 		hci_debugfs_create_conn(conn);
2751 		hci_conn_add_sysfs(conn);
2752 
2753 		if (test_bit(HCI_AUTH, &hdev->flags))
2754 			set_bit(HCI_CONN_AUTH, &conn->flags);
2755 
2756 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2757 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2758 
2759 		/* Get remote features */
2760 		if (conn->type == ACL_LINK) {
2761 			struct hci_cp_read_remote_features cp;
2762 			cp.handle = ev->handle;
2763 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2764 				     sizeof(cp), &cp);
2765 
2766 			hci_req_update_scan(hdev);
2767 		}
2768 
2769 		/* Set packet type for incoming connection */
2770 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2771 			struct hci_cp_change_conn_ptype cp;
2772 			cp.handle = ev->handle;
2773 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2774 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2775 				     &cp);
2776 		}
2777 	} else {
2778 		conn->state = BT_CLOSED;
2779 		if (conn->type == ACL_LINK)
2780 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2781 					    conn->dst_type, ev->status);
2782 	}
2783 
2784 	if (conn->type == ACL_LINK)
2785 		hci_sco_setup(conn, ev->status);
2786 
2787 	if (ev->status) {
2788 		hci_connect_cfm(conn, ev->status);
2789 		hci_conn_del(conn);
2790 	} else if (ev->link_type == SCO_LINK) {
2791 		switch (conn->setting & SCO_AIRMODE_MASK) {
2792 		case SCO_AIRMODE_CVSD:
2793 			if (hdev->notify)
2794 				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2795 			break;
2796 		}
2797 
2798 		hci_connect_cfm(conn, ev->status);
2799 	}
2800 
2801 unlock:
2802 	hci_dev_unlock(hdev);
2803 
2804 	hci_conn_check_pending(hdev);
2805 }
2806 
2807 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2808 {
2809 	struct hci_cp_reject_conn_req cp;
2810 
2811 	bacpy(&cp.bdaddr, bdaddr);
2812 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2813 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2814 }
2815 
2816 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2817 {
2818 	struct hci_ev_conn_request *ev = (void *) skb->data;
2819 	int mask = hdev->link_mode;
2820 	struct inquiry_entry *ie;
2821 	struct hci_conn *conn;
2822 	__u8 flags = 0;
2823 
2824 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2825 	       ev->link_type);
2826 
2827 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2828 				      &flags);
2829 
2830 	if (!(mask & HCI_LM_ACCEPT)) {
2831 		hci_reject_conn(hdev, &ev->bdaddr);
2832 		return;
2833 	}
2834 
2835 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2836 				   BDADDR_BREDR)) {
2837 		hci_reject_conn(hdev, &ev->bdaddr);
2838 		return;
2839 	}
2840 
2841 	/* Require HCI_CONNECTABLE or an accept list entry to accept the
2842 	 * connection. These features are only touched through mgmt so
2843 	 * only do the checks if HCI_MGMT is set.
2844 	 */
2845 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2846 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2847 	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2848 					       BDADDR_BREDR)) {
2849 		hci_reject_conn(hdev, &ev->bdaddr);
2850 		return;
2851 	}
2852 
2853 	/* Connection accepted */
2854 
2855 	hci_dev_lock(hdev);
2856 
2857 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2858 	if (ie)
2859 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2860 
2861 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2862 			&ev->bdaddr);
2863 	if (!conn) {
2864 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2865 				    HCI_ROLE_SLAVE);
2866 		if (!conn) {
2867 			bt_dev_err(hdev, "no memory for new connection");
2868 			hci_dev_unlock(hdev);
2869 			return;
2870 		}
2871 	}
2872 
2873 	memcpy(conn->dev_class, ev->dev_class, 3);
2874 
2875 	hci_dev_unlock(hdev);
2876 
2877 	if (ev->link_type == ACL_LINK ||
2878 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2879 		struct hci_cp_accept_conn_req cp;
2880 		conn->state = BT_CONNECT;
2881 
2882 		bacpy(&cp.bdaddr, &ev->bdaddr);
2883 
2884 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2885 			cp.role = 0x00; /* Become central */
2886 		else
2887 			cp.role = 0x01; /* Remain peripheral */
2888 
2889 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2890 	} else if (!(flags & HCI_PROTO_DEFER)) {
2891 		struct hci_cp_accept_sync_conn_req cp;
2892 		conn->state = BT_CONNECT;
2893 
2894 		bacpy(&cp.bdaddr, &ev->bdaddr);
2895 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2896 
2897 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2898 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2899 		cp.max_latency    = cpu_to_le16(0xffff);
2900 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2901 		cp.retrans_effort = 0xff;
2902 
2903 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2904 			     &cp);
2905 	} else {
2906 		conn->state = BT_CONNECT2;
2907 		hci_connect_cfm(conn, 0);
2908 	}
2909 }
2910 
2911 static u8 hci_to_mgmt_reason(u8 err)
2912 {
2913 	switch (err) {
2914 	case HCI_ERROR_CONNECTION_TIMEOUT:
2915 		return MGMT_DEV_DISCONN_TIMEOUT;
2916 	case HCI_ERROR_REMOTE_USER_TERM:
2917 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2918 	case HCI_ERROR_REMOTE_POWER_OFF:
2919 		return MGMT_DEV_DISCONN_REMOTE;
2920 	case HCI_ERROR_LOCAL_HOST_TERM:
2921 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2922 	default:
2923 		return MGMT_DEV_DISCONN_UNKNOWN;
2924 	}
2925 }
2926 
2927 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2928 {
2929 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2930 	u8 reason;
2931 	struct hci_conn_params *params;
2932 	struct hci_conn *conn;
2933 	bool mgmt_connected;
2934 
2935 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2936 
2937 	hci_dev_lock(hdev);
2938 
2939 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2940 	if (!conn)
2941 		goto unlock;
2942 
2943 	if (ev->status) {
2944 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2945 				       conn->dst_type, ev->status);
2946 		goto unlock;
2947 	}
2948 
2949 	conn->state = BT_CLOSED;
2950 
2951 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2952 
2953 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2954 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2955 	else
2956 		reason = hci_to_mgmt_reason(ev->reason);
2957 
2958 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2959 				reason, mgmt_connected);
2960 
2961 	if (conn->type == ACL_LINK) {
2962 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2963 			hci_remove_link_key(hdev, &conn->dst);
2964 
2965 		hci_req_update_scan(hdev);
2966 	}
2967 
2968 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2969 	if (params) {
2970 		switch (params->auto_connect) {
2971 		case HCI_AUTO_CONN_LINK_LOSS:
2972 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2973 				break;
2974 			fallthrough;
2975 
2976 		case HCI_AUTO_CONN_DIRECT:
2977 		case HCI_AUTO_CONN_ALWAYS:
2978 			list_del_init(&params->action);
2979 			list_add(&params->action, &hdev->pend_le_conns);
2980 			hci_update_background_scan(hdev);
2981 			break;
2982 
2983 		default:
2984 			break;
2985 		}
2986 	}
2987 
2988 	hci_disconn_cfm(conn, ev->reason);
2989 
2990 	/* The suspend notifier is waiting for all devices to disconnect so
2991 	 * clear the bit from pending tasks and inform the wait queue.
2992 	 */
2993 	if (list_empty(&hdev->conn_hash.list) &&
2994 	    test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2995 		wake_up(&hdev->suspend_wait_q);
2996 	}
2997 
2998 	/* Re-enable advertising if necessary, since it might
2999 	 * have been disabled by the connection. From the
3000 	 * HCI_LE_Set_Advertise_Enable command description in
3001 	 * the core specification (v4.0):
3002 	 * "The Controller shall continue advertising until the Host
3003 	 * issues an LE_Set_Advertise_Enable command with
3004 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3005 	 * or until a connection is created or until the Advertising
3006 	 * is timed out due to Directed Advertising."
3007 	 */
3008 	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3009 		hdev->cur_adv_instance = conn->adv_instance;
3010 		hci_req_reenable_advertising(hdev);
3011 	}
3012 
3013 	hci_conn_del(conn);
3014 
3015 unlock:
3016 	hci_dev_unlock(hdev);
3017 }
3018 
3019 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3020 {
3021 	struct hci_ev_auth_complete *ev = (void *) skb->data;
3022 	struct hci_conn *conn;
3023 
3024 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3025 
3026 	hci_dev_lock(hdev);
3027 
3028 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3029 	if (!conn)
3030 		goto unlock;
3031 
3032 	if (!ev->status) {
3033 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3034 
3035 		if (!hci_conn_ssp_enabled(conn) &&
3036 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3037 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3038 		} else {
3039 			set_bit(HCI_CONN_AUTH, &conn->flags);
3040 			conn->sec_level = conn->pending_sec_level;
3041 		}
3042 	} else {
3043 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3044 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3045 
3046 		mgmt_auth_failed(conn, ev->status);
3047 	}
3048 
3049 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3050 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3051 
3052 	if (conn->state == BT_CONFIG) {
3053 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3054 			struct hci_cp_set_conn_encrypt cp;
3055 			cp.handle  = ev->handle;
3056 			cp.encrypt = 0x01;
3057 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3058 				     &cp);
3059 		} else {
3060 			conn->state = BT_CONNECTED;
3061 			hci_connect_cfm(conn, ev->status);
3062 			hci_conn_drop(conn);
3063 		}
3064 	} else {
3065 		hci_auth_cfm(conn, ev->status);
3066 
3067 		hci_conn_hold(conn);
3068 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3069 		hci_conn_drop(conn);
3070 	}
3071 
3072 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3073 		if (!ev->status) {
3074 			struct hci_cp_set_conn_encrypt cp;
3075 			cp.handle  = ev->handle;
3076 			cp.encrypt = 0x01;
3077 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3078 				     &cp);
3079 		} else {
3080 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3081 			hci_encrypt_cfm(conn, ev->status);
3082 		}
3083 	}
3084 
3085 unlock:
3086 	hci_dev_unlock(hdev);
3087 }
3088 
3089 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3090 {
3091 	struct hci_ev_remote_name *ev = (void *) skb->data;
3092 	struct hci_conn *conn;
3093 
3094 	BT_DBG("%s", hdev->name);
3095 
3096 	hci_conn_check_pending(hdev);
3097 
3098 	hci_dev_lock(hdev);
3099 
3100 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3101 
3102 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3103 		goto check_auth;
3104 
3105 	if (ev->status == 0)
3106 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3107 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3108 	else
3109 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3110 
3111 check_auth:
3112 	if (!conn)
3113 		goto unlock;
3114 
3115 	if (!hci_outgoing_auth_needed(hdev, conn))
3116 		goto unlock;
3117 
3118 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3119 		struct hci_cp_auth_requested cp;
3120 
3121 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3122 
3123 		cp.handle = __cpu_to_le16(conn->handle);
3124 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3125 	}
3126 
3127 unlock:
3128 	hci_dev_unlock(hdev);
3129 }
3130 
3131 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3132 				       u16 opcode, struct sk_buff *skb)
3133 {
3134 	const struct hci_rp_read_enc_key_size *rp;
3135 	struct hci_conn *conn;
3136 	u16 handle;
3137 
3138 	BT_DBG("%s status 0x%02x", hdev->name, status);
3139 
3140 	if (!skb || skb->len < sizeof(*rp)) {
3141 		bt_dev_err(hdev, "invalid read key size response");
3142 		return;
3143 	}
3144 
3145 	rp = (void *)skb->data;
3146 	handle = le16_to_cpu(rp->handle);
3147 
3148 	hci_dev_lock(hdev);
3149 
3150 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3151 	if (!conn)
3152 		goto unlock;
3153 
3154 	/* While unexpected, the read_enc_key_size command may fail. The most
3155 	 * secure approach is to then assume the key size is 0 to force a
3156 	 * disconnection.
3157 	 */
3158 	if (rp->status) {
3159 		bt_dev_err(hdev, "failed to read key size for handle %u",
3160 			   handle);
3161 		conn->enc_key_size = 0;
3162 	} else {
3163 		conn->enc_key_size = rp->key_size;
3164 	}
3165 
3166 	hci_encrypt_cfm(conn, 0);
3167 
3168 unlock:
3169 	hci_dev_unlock(hdev);
3170 }
3171 
3172 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3173 {
3174 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
3175 	struct hci_conn *conn;
3176 
3177 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3178 
3179 	hci_dev_lock(hdev);
3180 
3181 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3182 	if (!conn)
3183 		goto unlock;
3184 
3185 	if (!ev->status) {
3186 		if (ev->encrypt) {
3187 			/* Encryption implies authentication */
3188 			set_bit(HCI_CONN_AUTH, &conn->flags);
3189 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3190 			conn->sec_level = conn->pending_sec_level;
3191 
3192 			/* P-256 authentication key implies FIPS */
3193 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3194 				set_bit(HCI_CONN_FIPS, &conn->flags);
3195 
3196 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3197 			    conn->type == LE_LINK)
3198 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3199 		} else {
3200 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3201 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3202 		}
3203 	}
3204 
3205 	/* We should disregard the current RPA and generate a new one
3206 	 * whenever the encryption procedure fails.
3207 	 */
3208 	if (ev->status && conn->type == LE_LINK) {
3209 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3210 		hci_adv_instances_set_rpa_expired(hdev, true);
3211 	}
3212 
3213 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3214 
3215 	/* Check link security requirements are met */
3216 	if (!hci_conn_check_link_mode(conn))
3217 		ev->status = HCI_ERROR_AUTH_FAILURE;
3218 
3219 	if (ev->status && conn->state == BT_CONNECTED) {
3220 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3221 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3222 
3223 		/* Notify upper layers so they can cleanup before
3224 		 * disconnecting.
3225 		 */
3226 		hci_encrypt_cfm(conn, ev->status);
3227 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3228 		hci_conn_drop(conn);
3229 		goto unlock;
3230 	}
3231 
3232 	/* Try reading the encryption key size for encrypted ACL links */
3233 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3234 		struct hci_cp_read_enc_key_size cp;
3235 		struct hci_request req;
3236 
3237 		/* Only send HCI_Read_Encryption_Key_Size if the
3238 		 * controller really supports it. If it doesn't, assume
3239 		 * the default size (16).
3240 		 */
3241 		if (!(hdev->commands[20] & 0x10)) {
3242 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3243 			goto notify;
3244 		}
3245 
3246 		hci_req_init(&req, hdev);
3247 
3248 		cp.handle = cpu_to_le16(conn->handle);
3249 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3250 
3251 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3252 			bt_dev_err(hdev, "sending read key size failed");
3253 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3254 			goto notify;
3255 		}
3256 
3257 		goto unlock;
3258 	}
3259 
3260 	/* Set the default Authenticated Payload Timeout after
3261 	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3262 	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3263 	 * sent when the link is active and Encryption is enabled, the conn
3264 	 * type can be either LE or ACL and controller must support LMP Ping.
3265 	 * Ensure for AES-CCM encryption as well.
3266 	 */
3267 	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3268 	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3269 	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3270 	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3271 		struct hci_cp_write_auth_payload_to cp;
3272 
3273 		cp.handle = cpu_to_le16(conn->handle);
3274 		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3275 		hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3276 			     sizeof(cp), &cp);
3277 	}
3278 
3279 notify:
3280 	hci_encrypt_cfm(conn, ev->status);
3281 
3282 unlock:
3283 	hci_dev_unlock(hdev);
3284 }
3285 
3286 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3287 					     struct sk_buff *skb)
3288 {
3289 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3290 	struct hci_conn *conn;
3291 
3292 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3293 
3294 	hci_dev_lock(hdev);
3295 
3296 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3297 	if (conn) {
3298 		if (!ev->status)
3299 			set_bit(HCI_CONN_SECURE, &conn->flags);
3300 
3301 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3302 
3303 		hci_key_change_cfm(conn, ev->status);
3304 	}
3305 
3306 	hci_dev_unlock(hdev);
3307 }
3308 
3309 static void hci_remote_features_evt(struct hci_dev *hdev,
3310 				    struct sk_buff *skb)
3311 {
3312 	struct hci_ev_remote_features *ev = (void *) skb->data;
3313 	struct hci_conn *conn;
3314 
3315 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3316 
3317 	hci_dev_lock(hdev);
3318 
3319 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3320 	if (!conn)
3321 		goto unlock;
3322 
3323 	if (!ev->status)
3324 		memcpy(conn->features[0], ev->features, 8);
3325 
3326 	if (conn->state != BT_CONFIG)
3327 		goto unlock;
3328 
3329 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3330 	    lmp_ext_feat_capable(conn)) {
3331 		struct hci_cp_read_remote_ext_features cp;
3332 		cp.handle = ev->handle;
3333 		cp.page = 0x01;
3334 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3335 			     sizeof(cp), &cp);
3336 		goto unlock;
3337 	}
3338 
3339 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3340 		struct hci_cp_remote_name_req cp;
3341 		memset(&cp, 0, sizeof(cp));
3342 		bacpy(&cp.bdaddr, &conn->dst);
3343 		cp.pscan_rep_mode = 0x02;
3344 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3345 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3346 		mgmt_device_connected(hdev, conn, NULL, 0);
3347 
3348 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3349 		conn->state = BT_CONNECTED;
3350 		hci_connect_cfm(conn, ev->status);
3351 		hci_conn_drop(conn);
3352 	}
3353 
3354 unlock:
3355 	hci_dev_unlock(hdev);
3356 }
3357 
3358 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3359 {
3360 	cancel_delayed_work(&hdev->cmd_timer);
3361 
3362 	if (!test_bit(HCI_RESET, &hdev->flags)) {
3363 		if (ncmd) {
3364 			cancel_delayed_work(&hdev->ncmd_timer);
3365 			atomic_set(&hdev->cmd_cnt, 1);
3366 		} else {
3367 			schedule_delayed_work(&hdev->ncmd_timer,
3368 					      HCI_NCMD_TIMEOUT);
3369 		}
3370 	}
3371 }
3372 
3373 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3374 				 u16 *opcode, u8 *status,
3375 				 hci_req_complete_t *req_complete,
3376 				 hci_req_complete_skb_t *req_complete_skb)
3377 {
3378 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
3379 
3380 	*opcode = __le16_to_cpu(ev->opcode);
3381 	*status = skb->data[sizeof(*ev)];
3382 
3383 	skb_pull(skb, sizeof(*ev));
3384 
3385 	switch (*opcode) {
3386 	case HCI_OP_INQUIRY_CANCEL:
3387 		hci_cc_inquiry_cancel(hdev, skb, status);
3388 		break;
3389 
3390 	case HCI_OP_PERIODIC_INQ:
3391 		hci_cc_periodic_inq(hdev, skb);
3392 		break;
3393 
3394 	case HCI_OP_EXIT_PERIODIC_INQ:
3395 		hci_cc_exit_periodic_inq(hdev, skb);
3396 		break;
3397 
3398 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3399 		hci_cc_remote_name_req_cancel(hdev, skb);
3400 		break;
3401 
3402 	case HCI_OP_ROLE_DISCOVERY:
3403 		hci_cc_role_discovery(hdev, skb);
3404 		break;
3405 
3406 	case HCI_OP_READ_LINK_POLICY:
3407 		hci_cc_read_link_policy(hdev, skb);
3408 		break;
3409 
3410 	case HCI_OP_WRITE_LINK_POLICY:
3411 		hci_cc_write_link_policy(hdev, skb);
3412 		break;
3413 
3414 	case HCI_OP_READ_DEF_LINK_POLICY:
3415 		hci_cc_read_def_link_policy(hdev, skb);
3416 		break;
3417 
3418 	case HCI_OP_WRITE_DEF_LINK_POLICY:
3419 		hci_cc_write_def_link_policy(hdev, skb);
3420 		break;
3421 
3422 	case HCI_OP_RESET:
3423 		hci_cc_reset(hdev, skb);
3424 		break;
3425 
3426 	case HCI_OP_READ_STORED_LINK_KEY:
3427 		hci_cc_read_stored_link_key(hdev, skb);
3428 		break;
3429 
3430 	case HCI_OP_DELETE_STORED_LINK_KEY:
3431 		hci_cc_delete_stored_link_key(hdev, skb);
3432 		break;
3433 
3434 	case HCI_OP_WRITE_LOCAL_NAME:
3435 		hci_cc_write_local_name(hdev, skb);
3436 		break;
3437 
3438 	case HCI_OP_READ_LOCAL_NAME:
3439 		hci_cc_read_local_name(hdev, skb);
3440 		break;
3441 
3442 	case HCI_OP_WRITE_AUTH_ENABLE:
3443 		hci_cc_write_auth_enable(hdev, skb);
3444 		break;
3445 
3446 	case HCI_OP_WRITE_ENCRYPT_MODE:
3447 		hci_cc_write_encrypt_mode(hdev, skb);
3448 		break;
3449 
3450 	case HCI_OP_WRITE_SCAN_ENABLE:
3451 		hci_cc_write_scan_enable(hdev, skb);
3452 		break;
3453 
3454 	case HCI_OP_SET_EVENT_FLT:
3455 		hci_cc_set_event_filter(hdev, skb);
3456 		break;
3457 
3458 	case HCI_OP_READ_CLASS_OF_DEV:
3459 		hci_cc_read_class_of_dev(hdev, skb);
3460 		break;
3461 
3462 	case HCI_OP_WRITE_CLASS_OF_DEV:
3463 		hci_cc_write_class_of_dev(hdev, skb);
3464 		break;
3465 
3466 	case HCI_OP_READ_VOICE_SETTING:
3467 		hci_cc_read_voice_setting(hdev, skb);
3468 		break;
3469 
3470 	case HCI_OP_WRITE_VOICE_SETTING:
3471 		hci_cc_write_voice_setting(hdev, skb);
3472 		break;
3473 
3474 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
3475 		hci_cc_read_num_supported_iac(hdev, skb);
3476 		break;
3477 
3478 	case HCI_OP_WRITE_SSP_MODE:
3479 		hci_cc_write_ssp_mode(hdev, skb);
3480 		break;
3481 
3482 	case HCI_OP_WRITE_SC_SUPPORT:
3483 		hci_cc_write_sc_support(hdev, skb);
3484 		break;
3485 
3486 	case HCI_OP_READ_AUTH_PAYLOAD_TO:
3487 		hci_cc_read_auth_payload_timeout(hdev, skb);
3488 		break;
3489 
3490 	case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3491 		hci_cc_write_auth_payload_timeout(hdev, skb);
3492 		break;
3493 
3494 	case HCI_OP_READ_LOCAL_VERSION:
3495 		hci_cc_read_local_version(hdev, skb);
3496 		break;
3497 
3498 	case HCI_OP_READ_LOCAL_COMMANDS:
3499 		hci_cc_read_local_commands(hdev, skb);
3500 		break;
3501 
3502 	case HCI_OP_READ_LOCAL_FEATURES:
3503 		hci_cc_read_local_features(hdev, skb);
3504 		break;
3505 
3506 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
3507 		hci_cc_read_local_ext_features(hdev, skb);
3508 		break;
3509 
3510 	case HCI_OP_READ_BUFFER_SIZE:
3511 		hci_cc_read_buffer_size(hdev, skb);
3512 		break;
3513 
3514 	case HCI_OP_READ_BD_ADDR:
3515 		hci_cc_read_bd_addr(hdev, skb);
3516 		break;
3517 
3518 	case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3519 		hci_cc_read_local_pairing_opts(hdev, skb);
3520 		break;
3521 
3522 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3523 		hci_cc_read_page_scan_activity(hdev, skb);
3524 		break;
3525 
3526 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3527 		hci_cc_write_page_scan_activity(hdev, skb);
3528 		break;
3529 
3530 	case HCI_OP_READ_PAGE_SCAN_TYPE:
3531 		hci_cc_read_page_scan_type(hdev, skb);
3532 		break;
3533 
3534 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3535 		hci_cc_write_page_scan_type(hdev, skb);
3536 		break;
3537 
3538 	case HCI_OP_READ_DATA_BLOCK_SIZE:
3539 		hci_cc_read_data_block_size(hdev, skb);
3540 		break;
3541 
3542 	case HCI_OP_READ_FLOW_CONTROL_MODE:
3543 		hci_cc_read_flow_control_mode(hdev, skb);
3544 		break;
3545 
3546 	case HCI_OP_READ_LOCAL_AMP_INFO:
3547 		hci_cc_read_local_amp_info(hdev, skb);
3548 		break;
3549 
3550 	case HCI_OP_READ_CLOCK:
3551 		hci_cc_read_clock(hdev, skb);
3552 		break;
3553 
3554 	case HCI_OP_READ_INQ_RSP_TX_POWER:
3555 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
3556 		break;
3557 
3558 	case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3559 		hci_cc_read_def_err_data_reporting(hdev, skb);
3560 		break;
3561 
3562 	case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3563 		hci_cc_write_def_err_data_reporting(hdev, skb);
3564 		break;
3565 
3566 	case HCI_OP_PIN_CODE_REPLY:
3567 		hci_cc_pin_code_reply(hdev, skb);
3568 		break;
3569 
3570 	case HCI_OP_PIN_CODE_NEG_REPLY:
3571 		hci_cc_pin_code_neg_reply(hdev, skb);
3572 		break;
3573 
3574 	case HCI_OP_READ_LOCAL_OOB_DATA:
3575 		hci_cc_read_local_oob_data(hdev, skb);
3576 		break;
3577 
3578 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3579 		hci_cc_read_local_oob_ext_data(hdev, skb);
3580 		break;
3581 
3582 	case HCI_OP_LE_READ_BUFFER_SIZE:
3583 		hci_cc_le_read_buffer_size(hdev, skb);
3584 		break;
3585 
3586 	case HCI_OP_LE_READ_LOCAL_FEATURES:
3587 		hci_cc_le_read_local_features(hdev, skb);
3588 		break;
3589 
3590 	case HCI_OP_LE_READ_ADV_TX_POWER:
3591 		hci_cc_le_read_adv_tx_power(hdev, skb);
3592 		break;
3593 
3594 	case HCI_OP_USER_CONFIRM_REPLY:
3595 		hci_cc_user_confirm_reply(hdev, skb);
3596 		break;
3597 
3598 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
3599 		hci_cc_user_confirm_neg_reply(hdev, skb);
3600 		break;
3601 
3602 	case HCI_OP_USER_PASSKEY_REPLY:
3603 		hci_cc_user_passkey_reply(hdev, skb);
3604 		break;
3605 
3606 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
3607 		hci_cc_user_passkey_neg_reply(hdev, skb);
3608 		break;
3609 
3610 	case HCI_OP_LE_SET_RANDOM_ADDR:
3611 		hci_cc_le_set_random_addr(hdev, skb);
3612 		break;
3613 
3614 	case HCI_OP_LE_SET_ADV_ENABLE:
3615 		hci_cc_le_set_adv_enable(hdev, skb);
3616 		break;
3617 
3618 	case HCI_OP_LE_SET_SCAN_PARAM:
3619 		hci_cc_le_set_scan_param(hdev, skb);
3620 		break;
3621 
3622 	case HCI_OP_LE_SET_SCAN_ENABLE:
3623 		hci_cc_le_set_scan_enable(hdev, skb);
3624 		break;
3625 
3626 	case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3627 		hci_cc_le_read_accept_list_size(hdev, skb);
3628 		break;
3629 
3630 	case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3631 		hci_cc_le_clear_accept_list(hdev, skb);
3632 		break;
3633 
3634 	case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3635 		hci_cc_le_add_to_accept_list(hdev, skb);
3636 		break;
3637 
3638 	case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3639 		hci_cc_le_del_from_accept_list(hdev, skb);
3640 		break;
3641 
3642 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3643 		hci_cc_le_read_supported_states(hdev, skb);
3644 		break;
3645 
3646 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3647 		hci_cc_le_read_def_data_len(hdev, skb);
3648 		break;
3649 
3650 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3651 		hci_cc_le_write_def_data_len(hdev, skb);
3652 		break;
3653 
3654 	case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3655 		hci_cc_le_add_to_resolv_list(hdev, skb);
3656 		break;
3657 
3658 	case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3659 		hci_cc_le_del_from_resolv_list(hdev, skb);
3660 		break;
3661 
3662 	case HCI_OP_LE_CLEAR_RESOLV_LIST:
3663 		hci_cc_le_clear_resolv_list(hdev, skb);
3664 		break;
3665 
3666 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3667 		hci_cc_le_read_resolv_list_size(hdev, skb);
3668 		break;
3669 
3670 	case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3671 		hci_cc_le_set_addr_resolution_enable(hdev, skb);
3672 		break;
3673 
3674 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3675 		hci_cc_le_read_max_data_len(hdev, skb);
3676 		break;
3677 
3678 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3679 		hci_cc_write_le_host_supported(hdev, skb);
3680 		break;
3681 
3682 	case HCI_OP_LE_SET_ADV_PARAM:
3683 		hci_cc_set_adv_param(hdev, skb);
3684 		break;
3685 
3686 	case HCI_OP_READ_RSSI:
3687 		hci_cc_read_rssi(hdev, skb);
3688 		break;
3689 
3690 	case HCI_OP_READ_TX_POWER:
3691 		hci_cc_read_tx_power(hdev, skb);
3692 		break;
3693 
3694 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3695 		hci_cc_write_ssp_debug_mode(hdev, skb);
3696 		break;
3697 
3698 	case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3699 		hci_cc_le_set_ext_scan_param(hdev, skb);
3700 		break;
3701 
3702 	case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3703 		hci_cc_le_set_ext_scan_enable(hdev, skb);
3704 		break;
3705 
3706 	case HCI_OP_LE_SET_DEFAULT_PHY:
3707 		hci_cc_le_set_default_phy(hdev, skb);
3708 		break;
3709 
3710 	case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3711 		hci_cc_le_read_num_adv_sets(hdev, skb);
3712 		break;
3713 
3714 	case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3715 		hci_cc_set_ext_adv_param(hdev, skb);
3716 		break;
3717 
3718 	case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3719 		hci_cc_le_set_ext_adv_enable(hdev, skb);
3720 		break;
3721 
3722 	case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3723 		hci_cc_le_set_adv_set_random_addr(hdev, skb);
3724 		break;
3725 
3726 	case HCI_OP_LE_READ_TRANSMIT_POWER:
3727 		hci_cc_le_read_transmit_power(hdev, skb);
3728 		break;
3729 
3730 	default:
3731 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3732 		break;
3733 	}
3734 
3735 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3736 
3737 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3738 			     req_complete_skb);
3739 
3740 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3741 		bt_dev_err(hdev,
3742 			   "unexpected event for opcode 0x%4.4x", *opcode);
3743 		return;
3744 	}
3745 
3746 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3747 		queue_work(hdev->workqueue, &hdev->cmd_work);
3748 }
3749 
3750 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3751 			       u16 *opcode, u8 *status,
3752 			       hci_req_complete_t *req_complete,
3753 			       hci_req_complete_skb_t *req_complete_skb)
3754 {
3755 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3756 
3757 	skb_pull(skb, sizeof(*ev));
3758 
3759 	*opcode = __le16_to_cpu(ev->opcode);
3760 	*status = ev->status;
3761 
3762 	switch (*opcode) {
3763 	case HCI_OP_INQUIRY:
3764 		hci_cs_inquiry(hdev, ev->status);
3765 		break;
3766 
3767 	case HCI_OP_CREATE_CONN:
3768 		hci_cs_create_conn(hdev, ev->status);
3769 		break;
3770 
3771 	case HCI_OP_DISCONNECT:
3772 		hci_cs_disconnect(hdev, ev->status);
3773 		break;
3774 
3775 	case HCI_OP_ADD_SCO:
3776 		hci_cs_add_sco(hdev, ev->status);
3777 		break;
3778 
3779 	case HCI_OP_AUTH_REQUESTED:
3780 		hci_cs_auth_requested(hdev, ev->status);
3781 		break;
3782 
3783 	case HCI_OP_SET_CONN_ENCRYPT:
3784 		hci_cs_set_conn_encrypt(hdev, ev->status);
3785 		break;
3786 
3787 	case HCI_OP_REMOTE_NAME_REQ:
3788 		hci_cs_remote_name_req(hdev, ev->status);
3789 		break;
3790 
3791 	case HCI_OP_READ_REMOTE_FEATURES:
3792 		hci_cs_read_remote_features(hdev, ev->status);
3793 		break;
3794 
3795 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3796 		hci_cs_read_remote_ext_features(hdev, ev->status);
3797 		break;
3798 
3799 	case HCI_OP_SETUP_SYNC_CONN:
3800 		hci_cs_setup_sync_conn(hdev, ev->status);
3801 		break;
3802 
3803 	case HCI_OP_ENHANCED_SETUP_SYNC_CONN:
3804 		hci_cs_enhanced_setup_sync_conn(hdev, ev->status);
3805 		break;
3806 
3807 	case HCI_OP_SNIFF_MODE:
3808 		hci_cs_sniff_mode(hdev, ev->status);
3809 		break;
3810 
3811 	case HCI_OP_EXIT_SNIFF_MODE:
3812 		hci_cs_exit_sniff_mode(hdev, ev->status);
3813 		break;
3814 
3815 	case HCI_OP_SWITCH_ROLE:
3816 		hci_cs_switch_role(hdev, ev->status);
3817 		break;
3818 
3819 	case HCI_OP_LE_CREATE_CONN:
3820 		hci_cs_le_create_conn(hdev, ev->status);
3821 		break;
3822 
3823 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3824 		hci_cs_le_read_remote_features(hdev, ev->status);
3825 		break;
3826 
3827 	case HCI_OP_LE_START_ENC:
3828 		hci_cs_le_start_enc(hdev, ev->status);
3829 		break;
3830 
3831 	case HCI_OP_LE_EXT_CREATE_CONN:
3832 		hci_cs_le_ext_create_conn(hdev, ev->status);
3833 		break;
3834 
3835 	default:
3836 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3837 		break;
3838 	}
3839 
3840 	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3841 
3842 	/* Indicate request completion if the command failed. Also, if
3843 	 * we're not waiting for a special event and we get a success
3844 	 * command status we should try to flag the request as completed
3845 	 * (since for this kind of commands there will not be a command
3846 	 * complete event).
3847 	 */
3848 	if (ev->status ||
3849 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3850 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3851 				     req_complete_skb);
3852 
3853 	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3854 		bt_dev_err(hdev,
3855 			   "unexpected event for opcode 0x%4.4x", *opcode);
3856 		return;
3857 	}
3858 
3859 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3860 		queue_work(hdev->workqueue, &hdev->cmd_work);
3861 }
3862 
3863 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3864 {
3865 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3866 
3867 	hdev->hw_error_code = ev->code;
3868 
3869 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3870 }
3871 
3872 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3873 {
3874 	struct hci_ev_role_change *ev = (void *) skb->data;
3875 	struct hci_conn *conn;
3876 
3877 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3878 
3879 	hci_dev_lock(hdev);
3880 
3881 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3882 	if (conn) {
3883 		if (!ev->status)
3884 			conn->role = ev->role;
3885 
3886 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3887 
3888 		hci_role_switch_cfm(conn, ev->status, ev->role);
3889 	}
3890 
3891 	hci_dev_unlock(hdev);
3892 }
3893 
3894 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3895 {
3896 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3897 	int i;
3898 
3899 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3900 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3901 		return;
3902 	}
3903 
3904 	if (skb->len < sizeof(*ev) ||
3905 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3906 		BT_DBG("%s bad parameters", hdev->name);
3907 		return;
3908 	}
3909 
3910 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3911 
3912 	for (i = 0; i < ev->num_hndl; i++) {
3913 		struct hci_comp_pkts_info *info = &ev->handles[i];
3914 		struct hci_conn *conn;
3915 		__u16  handle, count;
3916 
3917 		handle = __le16_to_cpu(info->handle);
3918 		count  = __le16_to_cpu(info->count);
3919 
3920 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3921 		if (!conn)
3922 			continue;
3923 
3924 		conn->sent -= count;
3925 
3926 		switch (conn->type) {
3927 		case ACL_LINK:
3928 			hdev->acl_cnt += count;
3929 			if (hdev->acl_cnt > hdev->acl_pkts)
3930 				hdev->acl_cnt = hdev->acl_pkts;
3931 			break;
3932 
3933 		case LE_LINK:
3934 			if (hdev->le_pkts) {
3935 				hdev->le_cnt += count;
3936 				if (hdev->le_cnt > hdev->le_pkts)
3937 					hdev->le_cnt = hdev->le_pkts;
3938 			} else {
3939 				hdev->acl_cnt += count;
3940 				if (hdev->acl_cnt > hdev->acl_pkts)
3941 					hdev->acl_cnt = hdev->acl_pkts;
3942 			}
3943 			break;
3944 
3945 		case SCO_LINK:
3946 			hdev->sco_cnt += count;
3947 			if (hdev->sco_cnt > hdev->sco_pkts)
3948 				hdev->sco_cnt = hdev->sco_pkts;
3949 			break;
3950 
3951 		default:
3952 			bt_dev_err(hdev, "unknown type %d conn %p",
3953 				   conn->type, conn);
3954 			break;
3955 		}
3956 	}
3957 
3958 	queue_work(hdev->workqueue, &hdev->tx_work);
3959 }
3960 
3961 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3962 						 __u16 handle)
3963 {
3964 	struct hci_chan *chan;
3965 
3966 	switch (hdev->dev_type) {
3967 	case HCI_PRIMARY:
3968 		return hci_conn_hash_lookup_handle(hdev, handle);
3969 	case HCI_AMP:
3970 		chan = hci_chan_lookup_handle(hdev, handle);
3971 		if (chan)
3972 			return chan->conn;
3973 		break;
3974 	default:
3975 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3976 		break;
3977 	}
3978 
3979 	return NULL;
3980 }
3981 
3982 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3983 {
3984 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3985 	int i;
3986 
3987 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3988 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3989 		return;
3990 	}
3991 
3992 	if (skb->len < sizeof(*ev) ||
3993 	    skb->len < struct_size(ev, handles, ev->num_hndl)) {
3994 		BT_DBG("%s bad parameters", hdev->name);
3995 		return;
3996 	}
3997 
3998 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3999 	       ev->num_hndl);
4000 
4001 	for (i = 0; i < ev->num_hndl; i++) {
4002 		struct hci_comp_blocks_info *info = &ev->handles[i];
4003 		struct hci_conn *conn = NULL;
4004 		__u16  handle, block_count;
4005 
4006 		handle = __le16_to_cpu(info->handle);
4007 		block_count = __le16_to_cpu(info->blocks);
4008 
4009 		conn = __hci_conn_lookup_handle(hdev, handle);
4010 		if (!conn)
4011 			continue;
4012 
4013 		conn->sent -= block_count;
4014 
4015 		switch (conn->type) {
4016 		case ACL_LINK:
4017 		case AMP_LINK:
4018 			hdev->block_cnt += block_count;
4019 			if (hdev->block_cnt > hdev->num_blocks)
4020 				hdev->block_cnt = hdev->num_blocks;
4021 			break;
4022 
4023 		default:
4024 			bt_dev_err(hdev, "unknown type %d conn %p",
4025 				   conn->type, conn);
4026 			break;
4027 		}
4028 	}
4029 
4030 	queue_work(hdev->workqueue, &hdev->tx_work);
4031 }
4032 
4033 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4034 {
4035 	struct hci_ev_mode_change *ev = (void *) skb->data;
4036 	struct hci_conn *conn;
4037 
4038 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4039 
4040 	hci_dev_lock(hdev);
4041 
4042 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4043 	if (conn) {
4044 		conn->mode = ev->mode;
4045 
4046 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4047 					&conn->flags)) {
4048 			if (conn->mode == HCI_CM_ACTIVE)
4049 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4050 			else
4051 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4052 		}
4053 
4054 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4055 			hci_sco_setup(conn, ev->status);
4056 	}
4057 
4058 	hci_dev_unlock(hdev);
4059 }
4060 
4061 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4062 {
4063 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
4064 	struct hci_conn *conn;
4065 
4066 	BT_DBG("%s", hdev->name);
4067 
4068 	hci_dev_lock(hdev);
4069 
4070 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4071 	if (!conn)
4072 		goto unlock;
4073 
4074 	if (conn->state == BT_CONNECTED) {
4075 		hci_conn_hold(conn);
4076 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4077 		hci_conn_drop(conn);
4078 	}
4079 
4080 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4081 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4082 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4083 			     sizeof(ev->bdaddr), &ev->bdaddr);
4084 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4085 		u8 secure;
4086 
4087 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4088 			secure = 1;
4089 		else
4090 			secure = 0;
4091 
4092 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4093 	}
4094 
4095 unlock:
4096 	hci_dev_unlock(hdev);
4097 }
4098 
4099 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4100 {
4101 	if (key_type == HCI_LK_CHANGED_COMBINATION)
4102 		return;
4103 
4104 	conn->pin_length = pin_len;
4105 	conn->key_type = key_type;
4106 
4107 	switch (key_type) {
4108 	case HCI_LK_LOCAL_UNIT:
4109 	case HCI_LK_REMOTE_UNIT:
4110 	case HCI_LK_DEBUG_COMBINATION:
4111 		return;
4112 	case HCI_LK_COMBINATION:
4113 		if (pin_len == 16)
4114 			conn->pending_sec_level = BT_SECURITY_HIGH;
4115 		else
4116 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4117 		break;
4118 	case HCI_LK_UNAUTH_COMBINATION_P192:
4119 	case HCI_LK_UNAUTH_COMBINATION_P256:
4120 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4121 		break;
4122 	case HCI_LK_AUTH_COMBINATION_P192:
4123 		conn->pending_sec_level = BT_SECURITY_HIGH;
4124 		break;
4125 	case HCI_LK_AUTH_COMBINATION_P256:
4126 		conn->pending_sec_level = BT_SECURITY_FIPS;
4127 		break;
4128 	}
4129 }
4130 
4131 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4132 {
4133 	struct hci_ev_link_key_req *ev = (void *) skb->data;
4134 	struct hci_cp_link_key_reply cp;
4135 	struct hci_conn *conn;
4136 	struct link_key *key;
4137 
4138 	BT_DBG("%s", hdev->name);
4139 
4140 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4141 		return;
4142 
4143 	hci_dev_lock(hdev);
4144 
4145 	key = hci_find_link_key(hdev, &ev->bdaddr);
4146 	if (!key) {
4147 		BT_DBG("%s link key not found for %pMR", hdev->name,
4148 		       &ev->bdaddr);
4149 		goto not_found;
4150 	}
4151 
4152 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4153 	       &ev->bdaddr);
4154 
4155 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4156 	if (conn) {
4157 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4158 
4159 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4160 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4161 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4162 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
4163 			goto not_found;
4164 		}
4165 
4166 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4167 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4168 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4169 			BT_DBG("%s ignoring key unauthenticated for high security",
4170 			       hdev->name);
4171 			goto not_found;
4172 		}
4173 
4174 		conn_set_key(conn, key->type, key->pin_len);
4175 	}
4176 
4177 	bacpy(&cp.bdaddr, &ev->bdaddr);
4178 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4179 
4180 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4181 
4182 	hci_dev_unlock(hdev);
4183 
4184 	return;
4185 
4186 not_found:
4187 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4188 	hci_dev_unlock(hdev);
4189 }
4190 
4191 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4192 {
4193 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
4194 	struct hci_conn *conn;
4195 	struct link_key *key;
4196 	bool persistent;
4197 	u8 pin_len = 0;
4198 
4199 	BT_DBG("%s", hdev->name);
4200 
4201 	hci_dev_lock(hdev);
4202 
4203 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4204 	if (!conn)
4205 		goto unlock;
4206 
4207 	hci_conn_hold(conn);
4208 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4209 	hci_conn_drop(conn);
4210 
4211 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4212 	conn_set_key(conn, ev->key_type, conn->pin_length);
4213 
4214 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4215 		goto unlock;
4216 
4217 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4218 			        ev->key_type, pin_len, &persistent);
4219 	if (!key)
4220 		goto unlock;
4221 
4222 	/* Update connection information since adding the key will have
4223 	 * fixed up the type in the case of changed combination keys.
4224 	 */
4225 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4226 		conn_set_key(conn, key->type, key->pin_len);
4227 
4228 	mgmt_new_link_key(hdev, key, persistent);
4229 
4230 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4231 	 * is set. If it's not set simply remove the key from the kernel
4232 	 * list (we've still notified user space about it but with
4233 	 * store_hint being 0).
4234 	 */
4235 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4236 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4237 		list_del_rcu(&key->list);
4238 		kfree_rcu(key, rcu);
4239 		goto unlock;
4240 	}
4241 
4242 	if (persistent)
4243 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4244 	else
4245 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4246 
4247 unlock:
4248 	hci_dev_unlock(hdev);
4249 }
4250 
4251 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4252 {
4253 	struct hci_ev_clock_offset *ev = (void *) skb->data;
4254 	struct hci_conn *conn;
4255 
4256 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4257 
4258 	hci_dev_lock(hdev);
4259 
4260 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4261 	if (conn && !ev->status) {
4262 		struct inquiry_entry *ie;
4263 
4264 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4265 		if (ie) {
4266 			ie->data.clock_offset = ev->clock_offset;
4267 			ie->timestamp = jiffies;
4268 		}
4269 	}
4270 
4271 	hci_dev_unlock(hdev);
4272 }
4273 
4274 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4275 {
4276 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4277 	struct hci_conn *conn;
4278 
4279 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4280 
4281 	hci_dev_lock(hdev);
4282 
4283 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4284 	if (conn && !ev->status)
4285 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4286 
4287 	hci_dev_unlock(hdev);
4288 }
4289 
4290 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4291 {
4292 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4293 	struct inquiry_entry *ie;
4294 
4295 	BT_DBG("%s", hdev->name);
4296 
4297 	hci_dev_lock(hdev);
4298 
4299 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4300 	if (ie) {
4301 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4302 		ie->timestamp = jiffies;
4303 	}
4304 
4305 	hci_dev_unlock(hdev);
4306 }
4307 
4308 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4309 					     struct sk_buff *skb)
4310 {
4311 	struct inquiry_data data;
4312 	int num_rsp = *((__u8 *) skb->data);
4313 
4314 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4315 
4316 	if (!num_rsp)
4317 		return;
4318 
4319 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4320 		return;
4321 
4322 	hci_dev_lock(hdev);
4323 
4324 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4325 		struct inquiry_info_with_rssi_and_pscan_mode *info;
4326 		info = (void *) (skb->data + 1);
4327 
4328 		if (skb->len < num_rsp * sizeof(*info) + 1)
4329 			goto unlock;
4330 
4331 		for (; num_rsp; num_rsp--, info++) {
4332 			u32 flags;
4333 
4334 			bacpy(&data.bdaddr, &info->bdaddr);
4335 			data.pscan_rep_mode	= info->pscan_rep_mode;
4336 			data.pscan_period_mode	= info->pscan_period_mode;
4337 			data.pscan_mode		= info->pscan_mode;
4338 			memcpy(data.dev_class, info->dev_class, 3);
4339 			data.clock_offset	= info->clock_offset;
4340 			data.rssi		= info->rssi;
4341 			data.ssp_mode		= 0x00;
4342 
4343 			flags = hci_inquiry_cache_update(hdev, &data, false);
4344 
4345 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4346 					  info->dev_class, info->rssi,
4347 					  flags, NULL, 0, NULL, 0);
4348 		}
4349 	} else {
4350 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4351 
4352 		if (skb->len < num_rsp * sizeof(*info) + 1)
4353 			goto unlock;
4354 
4355 		for (; num_rsp; num_rsp--, info++) {
4356 			u32 flags;
4357 
4358 			bacpy(&data.bdaddr, &info->bdaddr);
4359 			data.pscan_rep_mode	= info->pscan_rep_mode;
4360 			data.pscan_period_mode	= info->pscan_period_mode;
4361 			data.pscan_mode		= 0x00;
4362 			memcpy(data.dev_class, info->dev_class, 3);
4363 			data.clock_offset	= info->clock_offset;
4364 			data.rssi		= info->rssi;
4365 			data.ssp_mode		= 0x00;
4366 
4367 			flags = hci_inquiry_cache_update(hdev, &data, false);
4368 
4369 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4370 					  info->dev_class, info->rssi,
4371 					  flags, NULL, 0, NULL, 0);
4372 		}
4373 	}
4374 
4375 unlock:
4376 	hci_dev_unlock(hdev);
4377 }
4378 
4379 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4380 					struct sk_buff *skb)
4381 {
4382 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4383 	struct hci_conn *conn;
4384 
4385 	BT_DBG("%s", hdev->name);
4386 
4387 	hci_dev_lock(hdev);
4388 
4389 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4390 	if (!conn)
4391 		goto unlock;
4392 
4393 	if (ev->page < HCI_MAX_PAGES)
4394 		memcpy(conn->features[ev->page], ev->features, 8);
4395 
4396 	if (!ev->status && ev->page == 0x01) {
4397 		struct inquiry_entry *ie;
4398 
4399 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4400 		if (ie)
4401 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4402 
4403 		if (ev->features[0] & LMP_HOST_SSP) {
4404 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4405 		} else {
4406 			/* It is mandatory by the Bluetooth specification that
4407 			 * Extended Inquiry Results are only used when Secure
4408 			 * Simple Pairing is enabled, but some devices violate
4409 			 * this.
4410 			 *
4411 			 * To make these devices work, the internal SSP
4412 			 * enabled flag needs to be cleared if the remote host
4413 			 * features do not indicate SSP support */
4414 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4415 		}
4416 
4417 		if (ev->features[0] & LMP_HOST_SC)
4418 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4419 	}
4420 
4421 	if (conn->state != BT_CONFIG)
4422 		goto unlock;
4423 
4424 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4425 		struct hci_cp_remote_name_req cp;
4426 		memset(&cp, 0, sizeof(cp));
4427 		bacpy(&cp.bdaddr, &conn->dst);
4428 		cp.pscan_rep_mode = 0x02;
4429 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4430 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4431 		mgmt_device_connected(hdev, conn, NULL, 0);
4432 
4433 	if (!hci_outgoing_auth_needed(hdev, conn)) {
4434 		conn->state = BT_CONNECTED;
4435 		hci_connect_cfm(conn, ev->status);
4436 		hci_conn_drop(conn);
4437 	}
4438 
4439 unlock:
4440 	hci_dev_unlock(hdev);
4441 }
4442 
4443 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4444 				       struct sk_buff *skb)
4445 {
4446 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4447 	struct hci_conn *conn;
4448 	unsigned int notify_evt;
4449 
4450 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4451 
4452 	hci_dev_lock(hdev);
4453 
4454 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4455 	if (!conn) {
4456 		if (ev->link_type == ESCO_LINK)
4457 			goto unlock;
4458 
4459 		/* When the link type in the event indicates SCO connection
4460 		 * and lookup of the connection object fails, then check
4461 		 * if an eSCO connection object exists.
4462 		 *
4463 		 * The core limits the synchronous connections to either
4464 		 * SCO or eSCO. The eSCO connection is preferred and tried
4465 		 * to be setup first and until successfully established,
4466 		 * the link type will be hinted as eSCO.
4467 		 */
4468 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4469 		if (!conn)
4470 			goto unlock;
4471 	}
4472 
4473 	switch (ev->status) {
4474 	case 0x00:
4475 		/* The synchronous connection complete event should only be
4476 		 * sent once per new connection. Receiving a successful
4477 		 * complete event when the connection status is already
4478 		 * BT_CONNECTED means that the device is misbehaving and sent
4479 		 * multiple complete event packets for the same new connection.
4480 		 *
4481 		 * Registering the device more than once can corrupt kernel
4482 		 * memory, hence upon detecting this invalid event, we report
4483 		 * an error and ignore the packet.
4484 		 */
4485 		if (conn->state == BT_CONNECTED) {
4486 			bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4487 			goto unlock;
4488 		}
4489 
4490 		conn->handle = __le16_to_cpu(ev->handle);
4491 		conn->state  = BT_CONNECTED;
4492 		conn->type   = ev->link_type;
4493 
4494 		hci_debugfs_create_conn(conn);
4495 		hci_conn_add_sysfs(conn);
4496 		break;
4497 
4498 	case 0x10:	/* Connection Accept Timeout */
4499 	case 0x0d:	/* Connection Rejected due to Limited Resources */
4500 	case 0x11:	/* Unsupported Feature or Parameter Value */
4501 	case 0x1c:	/* SCO interval rejected */
4502 	case 0x1a:	/* Unsupported Remote Feature */
4503 	case 0x1e:	/* Invalid LMP Parameters */
4504 	case 0x1f:	/* Unspecified error */
4505 	case 0x20:	/* Unsupported LMP Parameter value */
4506 		if (conn->out) {
4507 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4508 					(hdev->esco_type & EDR_ESCO_MASK);
4509 			if (hci_setup_sync(conn, conn->link->handle))
4510 				goto unlock;
4511 		}
4512 		fallthrough;
4513 
4514 	default:
4515 		conn->state = BT_CLOSED;
4516 		break;
4517 	}
4518 
4519 	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4520 
4521 	switch (ev->air_mode) {
4522 	case 0x02:
4523 		notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD;
4524 		break;
4525 	case 0x03:
4526 		notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP;
4527 		break;
4528 	}
4529 
4530 	/* Notify only in case of SCO over HCI transport data path which
4531 	 * is zero and non-zero value shall be non-HCI transport data path
4532 	 */
4533 	if (conn->codec.data_path == 0) {
4534 		if (hdev->notify)
4535 			hdev->notify(hdev, notify_evt);
4536 	}
4537 
4538 	hci_connect_cfm(conn, ev->status);
4539 	if (ev->status)
4540 		hci_conn_del(conn);
4541 
4542 unlock:
4543 	hci_dev_unlock(hdev);
4544 }
4545 
4546 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4547 {
4548 	size_t parsed = 0;
4549 
4550 	while (parsed < eir_len) {
4551 		u8 field_len = eir[0];
4552 
4553 		if (field_len == 0)
4554 			return parsed;
4555 
4556 		parsed += field_len + 1;
4557 		eir += field_len + 1;
4558 	}
4559 
4560 	return eir_len;
4561 }
4562 
4563 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4564 					    struct sk_buff *skb)
4565 {
4566 	struct inquiry_data data;
4567 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
4568 	int num_rsp = *((__u8 *) skb->data);
4569 	size_t eir_len;
4570 
4571 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4572 
4573 	if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4574 		return;
4575 
4576 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4577 		return;
4578 
4579 	hci_dev_lock(hdev);
4580 
4581 	for (; num_rsp; num_rsp--, info++) {
4582 		u32 flags;
4583 		bool name_known;
4584 
4585 		bacpy(&data.bdaddr, &info->bdaddr);
4586 		data.pscan_rep_mode	= info->pscan_rep_mode;
4587 		data.pscan_period_mode	= info->pscan_period_mode;
4588 		data.pscan_mode		= 0x00;
4589 		memcpy(data.dev_class, info->dev_class, 3);
4590 		data.clock_offset	= info->clock_offset;
4591 		data.rssi		= info->rssi;
4592 		data.ssp_mode		= 0x01;
4593 
4594 		if (hci_dev_test_flag(hdev, HCI_MGMT))
4595 			name_known = eir_get_data(info->data,
4596 						  sizeof(info->data),
4597 						  EIR_NAME_COMPLETE, NULL);
4598 		else
4599 			name_known = true;
4600 
4601 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
4602 
4603 		eir_len = eir_get_length(info->data, sizeof(info->data));
4604 
4605 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4606 				  info->dev_class, info->rssi,
4607 				  flags, info->data, eir_len, NULL, 0);
4608 	}
4609 
4610 	hci_dev_unlock(hdev);
4611 }
4612 
4613 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4614 					 struct sk_buff *skb)
4615 {
4616 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4617 	struct hci_conn *conn;
4618 
4619 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4620 	       __le16_to_cpu(ev->handle));
4621 
4622 	hci_dev_lock(hdev);
4623 
4624 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4625 	if (!conn)
4626 		goto unlock;
4627 
4628 	/* For BR/EDR the necessary steps are taken through the
4629 	 * auth_complete event.
4630 	 */
4631 	if (conn->type != LE_LINK)
4632 		goto unlock;
4633 
4634 	if (!ev->status)
4635 		conn->sec_level = conn->pending_sec_level;
4636 
4637 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4638 
4639 	if (ev->status && conn->state == BT_CONNECTED) {
4640 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4641 		hci_conn_drop(conn);
4642 		goto unlock;
4643 	}
4644 
4645 	if (conn->state == BT_CONFIG) {
4646 		if (!ev->status)
4647 			conn->state = BT_CONNECTED;
4648 
4649 		hci_connect_cfm(conn, ev->status);
4650 		hci_conn_drop(conn);
4651 	} else {
4652 		hci_auth_cfm(conn, ev->status);
4653 
4654 		hci_conn_hold(conn);
4655 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4656 		hci_conn_drop(conn);
4657 	}
4658 
4659 unlock:
4660 	hci_dev_unlock(hdev);
4661 }
4662 
4663 static u8 hci_get_auth_req(struct hci_conn *conn)
4664 {
4665 	/* If remote requests no-bonding follow that lead */
4666 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
4667 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4668 		return conn->remote_auth | (conn->auth_type & 0x01);
4669 
4670 	/* If both remote and local have enough IO capabilities, require
4671 	 * MITM protection
4672 	 */
4673 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4674 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4675 		return conn->remote_auth | 0x01;
4676 
4677 	/* No MITM protection possible so ignore remote requirement */
4678 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4679 }
4680 
4681 static u8 bredr_oob_data_present(struct hci_conn *conn)
4682 {
4683 	struct hci_dev *hdev = conn->hdev;
4684 	struct oob_data *data;
4685 
4686 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4687 	if (!data)
4688 		return 0x00;
4689 
4690 	if (bredr_sc_enabled(hdev)) {
4691 		/* When Secure Connections is enabled, then just
4692 		 * return the present value stored with the OOB
4693 		 * data. The stored value contains the right present
4694 		 * information. However it can only be trusted when
4695 		 * not in Secure Connection Only mode.
4696 		 */
4697 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4698 			return data->present;
4699 
4700 		/* When Secure Connections Only mode is enabled, then
4701 		 * the P-256 values are required. If they are not
4702 		 * available, then do not declare that OOB data is
4703 		 * present.
4704 		 */
4705 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4706 		    !memcmp(data->hash256, ZERO_KEY, 16))
4707 			return 0x00;
4708 
4709 		return 0x02;
4710 	}
4711 
4712 	/* When Secure Connections is not enabled or actually
4713 	 * not supported by the hardware, then check that if
4714 	 * P-192 data values are present.
4715 	 */
4716 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4717 	    !memcmp(data->hash192, ZERO_KEY, 16))
4718 		return 0x00;
4719 
4720 	return 0x01;
4721 }
4722 
4723 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4724 {
4725 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4726 	struct hci_conn *conn;
4727 
4728 	BT_DBG("%s", hdev->name);
4729 
4730 	hci_dev_lock(hdev);
4731 
4732 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4733 	if (!conn)
4734 		goto unlock;
4735 
4736 	hci_conn_hold(conn);
4737 
4738 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4739 		goto unlock;
4740 
4741 	/* Allow pairing if we're pairable, the initiators of the
4742 	 * pairing or if the remote is not requesting bonding.
4743 	 */
4744 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4745 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4746 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4747 		struct hci_cp_io_capability_reply cp;
4748 
4749 		bacpy(&cp.bdaddr, &ev->bdaddr);
4750 		/* Change the IO capability from KeyboardDisplay
4751 		 * to DisplayYesNo as it is not supported by BT spec. */
4752 		cp.capability = (conn->io_capability == 0x04) ?
4753 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4754 
4755 		/* If we are initiators, there is no remote information yet */
4756 		if (conn->remote_auth == 0xff) {
4757 			/* Request MITM protection if our IO caps allow it
4758 			 * except for the no-bonding case.
4759 			 */
4760 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4761 			    conn->auth_type != HCI_AT_NO_BONDING)
4762 				conn->auth_type |= 0x01;
4763 		} else {
4764 			conn->auth_type = hci_get_auth_req(conn);
4765 		}
4766 
4767 		/* If we're not bondable, force one of the non-bondable
4768 		 * authentication requirement values.
4769 		 */
4770 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4771 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4772 
4773 		cp.authentication = conn->auth_type;
4774 		cp.oob_data = bredr_oob_data_present(conn);
4775 
4776 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4777 			     sizeof(cp), &cp);
4778 	} else {
4779 		struct hci_cp_io_capability_neg_reply cp;
4780 
4781 		bacpy(&cp.bdaddr, &ev->bdaddr);
4782 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4783 
4784 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4785 			     sizeof(cp), &cp);
4786 	}
4787 
4788 unlock:
4789 	hci_dev_unlock(hdev);
4790 }
4791 
4792 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4793 {
4794 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4795 	struct hci_conn *conn;
4796 
4797 	BT_DBG("%s", hdev->name);
4798 
4799 	hci_dev_lock(hdev);
4800 
4801 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4802 	if (!conn)
4803 		goto unlock;
4804 
4805 	conn->remote_cap = ev->capability;
4806 	conn->remote_auth = ev->authentication;
4807 
4808 unlock:
4809 	hci_dev_unlock(hdev);
4810 }
4811 
4812 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4813 					 struct sk_buff *skb)
4814 {
4815 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4816 	int loc_mitm, rem_mitm, confirm_hint = 0;
4817 	struct hci_conn *conn;
4818 
4819 	BT_DBG("%s", hdev->name);
4820 
4821 	hci_dev_lock(hdev);
4822 
4823 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4824 		goto unlock;
4825 
4826 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4827 	if (!conn)
4828 		goto unlock;
4829 
4830 	loc_mitm = (conn->auth_type & 0x01);
4831 	rem_mitm = (conn->remote_auth & 0x01);
4832 
4833 	/* If we require MITM but the remote device can't provide that
4834 	 * (it has NoInputNoOutput) then reject the confirmation
4835 	 * request. We check the security level here since it doesn't
4836 	 * necessarily match conn->auth_type.
4837 	 */
4838 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4839 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4840 		BT_DBG("Rejecting request: remote device can't provide MITM");
4841 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4842 			     sizeof(ev->bdaddr), &ev->bdaddr);
4843 		goto unlock;
4844 	}
4845 
4846 	/* If no side requires MITM protection; auto-accept */
4847 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4848 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4849 
4850 		/* If we're not the initiators request authorization to
4851 		 * proceed from user space (mgmt_user_confirm with
4852 		 * confirm_hint set to 1). The exception is if neither
4853 		 * side had MITM or if the local IO capability is
4854 		 * NoInputNoOutput, in which case we do auto-accept
4855 		 */
4856 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4857 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4858 		    (loc_mitm || rem_mitm)) {
4859 			BT_DBG("Confirming auto-accept as acceptor");
4860 			confirm_hint = 1;
4861 			goto confirm;
4862 		}
4863 
4864 		/* If there already exists link key in local host, leave the
4865 		 * decision to user space since the remote device could be
4866 		 * legitimate or malicious.
4867 		 */
4868 		if (hci_find_link_key(hdev, &ev->bdaddr)) {
4869 			bt_dev_dbg(hdev, "Local host already has link key");
4870 			confirm_hint = 1;
4871 			goto confirm;
4872 		}
4873 
4874 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4875 		       hdev->auto_accept_delay);
4876 
4877 		if (hdev->auto_accept_delay > 0) {
4878 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4879 			queue_delayed_work(conn->hdev->workqueue,
4880 					   &conn->auto_accept_work, delay);
4881 			goto unlock;
4882 		}
4883 
4884 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4885 			     sizeof(ev->bdaddr), &ev->bdaddr);
4886 		goto unlock;
4887 	}
4888 
4889 confirm:
4890 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4891 				  le32_to_cpu(ev->passkey), confirm_hint);
4892 
4893 unlock:
4894 	hci_dev_unlock(hdev);
4895 }
4896 
4897 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4898 					 struct sk_buff *skb)
4899 {
4900 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4901 
4902 	BT_DBG("%s", hdev->name);
4903 
4904 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4905 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4906 }
4907 
4908 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4909 					struct sk_buff *skb)
4910 {
4911 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4912 	struct hci_conn *conn;
4913 
4914 	BT_DBG("%s", hdev->name);
4915 
4916 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4917 	if (!conn)
4918 		return;
4919 
4920 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4921 	conn->passkey_entered = 0;
4922 
4923 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4924 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4925 					 conn->dst_type, conn->passkey_notify,
4926 					 conn->passkey_entered);
4927 }
4928 
4929 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4930 {
4931 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4932 	struct hci_conn *conn;
4933 
4934 	BT_DBG("%s", hdev->name);
4935 
4936 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4937 	if (!conn)
4938 		return;
4939 
4940 	switch (ev->type) {
4941 	case HCI_KEYPRESS_STARTED:
4942 		conn->passkey_entered = 0;
4943 		return;
4944 
4945 	case HCI_KEYPRESS_ENTERED:
4946 		conn->passkey_entered++;
4947 		break;
4948 
4949 	case HCI_KEYPRESS_ERASED:
4950 		conn->passkey_entered--;
4951 		break;
4952 
4953 	case HCI_KEYPRESS_CLEARED:
4954 		conn->passkey_entered = 0;
4955 		break;
4956 
4957 	case HCI_KEYPRESS_COMPLETED:
4958 		return;
4959 	}
4960 
4961 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4962 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4963 					 conn->dst_type, conn->passkey_notify,
4964 					 conn->passkey_entered);
4965 }
4966 
4967 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4968 					 struct sk_buff *skb)
4969 {
4970 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4971 	struct hci_conn *conn;
4972 
4973 	BT_DBG("%s", hdev->name);
4974 
4975 	hci_dev_lock(hdev);
4976 
4977 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4978 	if (!conn)
4979 		goto unlock;
4980 
4981 	/* Reset the authentication requirement to unknown */
4982 	conn->remote_auth = 0xff;
4983 
4984 	/* To avoid duplicate auth_failed events to user space we check
4985 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4986 	 * initiated the authentication. A traditional auth_complete
4987 	 * event gets always produced as initiator and is also mapped to
4988 	 * the mgmt_auth_failed event */
4989 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4990 		mgmt_auth_failed(conn, ev->status);
4991 
4992 	hci_conn_drop(conn);
4993 
4994 unlock:
4995 	hci_dev_unlock(hdev);
4996 }
4997 
4998 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4999 					 struct sk_buff *skb)
5000 {
5001 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
5002 	struct inquiry_entry *ie;
5003 	struct hci_conn *conn;
5004 
5005 	BT_DBG("%s", hdev->name);
5006 
5007 	hci_dev_lock(hdev);
5008 
5009 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5010 	if (conn)
5011 		memcpy(conn->features[1], ev->features, 8);
5012 
5013 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5014 	if (ie)
5015 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5016 
5017 	hci_dev_unlock(hdev);
5018 }
5019 
5020 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5021 					    struct sk_buff *skb)
5022 {
5023 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5024 	struct oob_data *data;
5025 
5026 	BT_DBG("%s", hdev->name);
5027 
5028 	hci_dev_lock(hdev);
5029 
5030 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5031 		goto unlock;
5032 
5033 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5034 	if (!data) {
5035 		struct hci_cp_remote_oob_data_neg_reply cp;
5036 
5037 		bacpy(&cp.bdaddr, &ev->bdaddr);
5038 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5039 			     sizeof(cp), &cp);
5040 		goto unlock;
5041 	}
5042 
5043 	if (bredr_sc_enabled(hdev)) {
5044 		struct hci_cp_remote_oob_ext_data_reply cp;
5045 
5046 		bacpy(&cp.bdaddr, &ev->bdaddr);
5047 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5048 			memset(cp.hash192, 0, sizeof(cp.hash192));
5049 			memset(cp.rand192, 0, sizeof(cp.rand192));
5050 		} else {
5051 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5052 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5053 		}
5054 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5055 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5056 
5057 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5058 			     sizeof(cp), &cp);
5059 	} else {
5060 		struct hci_cp_remote_oob_data_reply cp;
5061 
5062 		bacpy(&cp.bdaddr, &ev->bdaddr);
5063 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5064 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5065 
5066 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5067 			     sizeof(cp), &cp);
5068 	}
5069 
5070 unlock:
5071 	hci_dev_unlock(hdev);
5072 }
5073 
5074 #if IS_ENABLED(CONFIG_BT_HS)
5075 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5076 {
5077 	struct hci_ev_channel_selected *ev = (void *)skb->data;
5078 	struct hci_conn *hcon;
5079 
5080 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5081 
5082 	skb_pull(skb, sizeof(*ev));
5083 
5084 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5085 	if (!hcon)
5086 		return;
5087 
5088 	amp_read_loc_assoc_final_data(hdev, hcon);
5089 }
5090 
5091 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5092 				      struct sk_buff *skb)
5093 {
5094 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5095 	struct hci_conn *hcon, *bredr_hcon;
5096 
5097 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5098 	       ev->status);
5099 
5100 	hci_dev_lock(hdev);
5101 
5102 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5103 	if (!hcon)
5104 		goto unlock;
5105 
5106 	if (!hcon->amp_mgr)
5107 		goto unlock;
5108 
5109 	if (ev->status) {
5110 		hci_conn_del(hcon);
5111 		goto unlock;
5112 	}
5113 
5114 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5115 
5116 	hcon->state = BT_CONNECTED;
5117 	bacpy(&hcon->dst, &bredr_hcon->dst);
5118 
5119 	hci_conn_hold(hcon);
5120 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5121 	hci_conn_drop(hcon);
5122 
5123 	hci_debugfs_create_conn(hcon);
5124 	hci_conn_add_sysfs(hcon);
5125 
5126 	amp_physical_cfm(bredr_hcon, hcon);
5127 
5128 unlock:
5129 	hci_dev_unlock(hdev);
5130 }
5131 
5132 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5133 {
5134 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5135 	struct hci_conn *hcon;
5136 	struct hci_chan *hchan;
5137 	struct amp_mgr *mgr;
5138 
5139 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5140 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5141 	       ev->status);
5142 
5143 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5144 	if (!hcon)
5145 		return;
5146 
5147 	/* Create AMP hchan */
5148 	hchan = hci_chan_create(hcon);
5149 	if (!hchan)
5150 		return;
5151 
5152 	hchan->handle = le16_to_cpu(ev->handle);
5153 	hchan->amp = true;
5154 
5155 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5156 
5157 	mgr = hcon->amp_mgr;
5158 	if (mgr && mgr->bredr_chan) {
5159 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5160 
5161 		l2cap_chan_lock(bredr_chan);
5162 
5163 		bredr_chan->conn->mtu = hdev->block_mtu;
5164 		l2cap_logical_cfm(bredr_chan, hchan, 0);
5165 		hci_conn_hold(hcon);
5166 
5167 		l2cap_chan_unlock(bredr_chan);
5168 	}
5169 }
5170 
5171 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5172 					     struct sk_buff *skb)
5173 {
5174 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5175 	struct hci_chan *hchan;
5176 
5177 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5178 	       le16_to_cpu(ev->handle), ev->status);
5179 
5180 	if (ev->status)
5181 		return;
5182 
5183 	hci_dev_lock(hdev);
5184 
5185 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5186 	if (!hchan || !hchan->amp)
5187 		goto unlock;
5188 
5189 	amp_destroy_logical_link(hchan, ev->reason);
5190 
5191 unlock:
5192 	hci_dev_unlock(hdev);
5193 }
5194 
5195 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5196 					     struct sk_buff *skb)
5197 {
5198 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5199 	struct hci_conn *hcon;
5200 
5201 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5202 
5203 	if (ev->status)
5204 		return;
5205 
5206 	hci_dev_lock(hdev);
5207 
5208 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5209 	if (hcon) {
5210 		hcon->state = BT_CLOSED;
5211 		hci_conn_del(hcon);
5212 	}
5213 
5214 	hci_dev_unlock(hdev);
5215 }
5216 #endif
5217 
5218 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5219 				u8 bdaddr_type, bdaddr_t *local_rpa)
5220 {
5221 	if (conn->out) {
5222 		conn->dst_type = bdaddr_type;
5223 		conn->resp_addr_type = bdaddr_type;
5224 		bacpy(&conn->resp_addr, bdaddr);
5225 
5226 		/* Check if the controller has set a Local RPA then it must be
5227 		 * used instead or hdev->rpa.
5228 		 */
5229 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5230 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5231 			bacpy(&conn->init_addr, local_rpa);
5232 		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5233 			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5234 			bacpy(&conn->init_addr, &conn->hdev->rpa);
5235 		} else {
5236 			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5237 						  &conn->init_addr_type);
5238 		}
5239 	} else {
5240 		conn->resp_addr_type = conn->hdev->adv_addr_type;
5241 		/* Check if the controller has set a Local RPA then it must be
5242 		 * used instead or hdev->rpa.
5243 		 */
5244 		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5245 			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5246 			bacpy(&conn->resp_addr, local_rpa);
5247 		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5248 			/* In case of ext adv, resp_addr will be updated in
5249 			 * Adv Terminated event.
5250 			 */
5251 			if (!ext_adv_capable(conn->hdev))
5252 				bacpy(&conn->resp_addr,
5253 				      &conn->hdev->random_addr);
5254 		} else {
5255 			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5256 		}
5257 
5258 		conn->init_addr_type = bdaddr_type;
5259 		bacpy(&conn->init_addr, bdaddr);
5260 
5261 		/* For incoming connections, set the default minimum
5262 		 * and maximum connection interval. They will be used
5263 		 * to check if the parameters are in range and if not
5264 		 * trigger the connection update procedure.
5265 		 */
5266 		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5267 		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5268 	}
5269 }
5270 
5271 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5272 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5273 				 bdaddr_t *local_rpa, u8 role, u16 handle,
5274 				 u16 interval, u16 latency,
5275 				 u16 supervision_timeout)
5276 {
5277 	struct hci_conn_params *params;
5278 	struct hci_conn *conn;
5279 	struct smp_irk *irk;
5280 	u8 addr_type;
5281 
5282 	hci_dev_lock(hdev);
5283 
5284 	/* All controllers implicitly stop advertising in the event of a
5285 	 * connection, so ensure that the state bit is cleared.
5286 	 */
5287 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5288 
5289 	conn = hci_lookup_le_connect(hdev);
5290 	if (!conn) {
5291 		conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5292 		if (!conn) {
5293 			bt_dev_err(hdev, "no memory for new connection");
5294 			goto unlock;
5295 		}
5296 
5297 		conn->dst_type = bdaddr_type;
5298 
5299 		/* If we didn't have a hci_conn object previously
5300 		 * but we're in central role this must be something
5301 		 * initiated using an accept list. Since accept list based
5302 		 * connections are not "first class citizens" we don't
5303 		 * have full tracking of them. Therefore, we go ahead
5304 		 * with a "best effort" approach of determining the
5305 		 * initiator address based on the HCI_PRIVACY flag.
5306 		 */
5307 		if (conn->out) {
5308 			conn->resp_addr_type = bdaddr_type;
5309 			bacpy(&conn->resp_addr, bdaddr);
5310 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5311 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5312 				bacpy(&conn->init_addr, &hdev->rpa);
5313 			} else {
5314 				hci_copy_identity_address(hdev,
5315 							  &conn->init_addr,
5316 							  &conn->init_addr_type);
5317 			}
5318 		}
5319 	} else {
5320 		cancel_delayed_work(&conn->le_conn_timeout);
5321 	}
5322 
5323 	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5324 
5325 	/* Lookup the identity address from the stored connection
5326 	 * address and address type.
5327 	 *
5328 	 * When establishing connections to an identity address, the
5329 	 * connection procedure will store the resolvable random
5330 	 * address first. Now if it can be converted back into the
5331 	 * identity address, start using the identity address from
5332 	 * now on.
5333 	 */
5334 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5335 	if (irk) {
5336 		bacpy(&conn->dst, &irk->bdaddr);
5337 		conn->dst_type = irk->addr_type;
5338 	}
5339 
5340 	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5341 
5342 	if (status) {
5343 		hci_le_conn_failed(conn, status);
5344 		goto unlock;
5345 	}
5346 
5347 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5348 		addr_type = BDADDR_LE_PUBLIC;
5349 	else
5350 		addr_type = BDADDR_LE_RANDOM;
5351 
5352 	/* Drop the connection if the device is blocked */
5353 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5354 		hci_conn_drop(conn);
5355 		goto unlock;
5356 	}
5357 
5358 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5359 		mgmt_device_connected(hdev, conn, NULL, 0);
5360 
5361 	conn->sec_level = BT_SECURITY_LOW;
5362 	conn->handle = handle;
5363 	conn->state = BT_CONFIG;
5364 
5365 	/* Store current advertising instance as connection advertising instance
5366 	 * when sotfware rotation is in use so it can be re-enabled when
5367 	 * disconnected.
5368 	 */
5369 	if (!ext_adv_capable(hdev))
5370 		conn->adv_instance = hdev->cur_adv_instance;
5371 
5372 	conn->le_conn_interval = interval;
5373 	conn->le_conn_latency = latency;
5374 	conn->le_supv_timeout = supervision_timeout;
5375 
5376 	hci_debugfs_create_conn(conn);
5377 	hci_conn_add_sysfs(conn);
5378 
5379 	/* The remote features procedure is defined for central
5380 	 * role only. So only in case of an initiated connection
5381 	 * request the remote features.
5382 	 *
5383 	 * If the local controller supports peripheral-initiated features
5384 	 * exchange, then requesting the remote features in peripheral
5385 	 * role is possible. Otherwise just transition into the
5386 	 * connected state without requesting the remote features.
5387 	 */
5388 	if (conn->out ||
5389 	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5390 		struct hci_cp_le_read_remote_features cp;
5391 
5392 		cp.handle = __cpu_to_le16(conn->handle);
5393 
5394 		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5395 			     sizeof(cp), &cp);
5396 
5397 		hci_conn_hold(conn);
5398 	} else {
5399 		conn->state = BT_CONNECTED;
5400 		hci_connect_cfm(conn, status);
5401 	}
5402 
5403 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5404 					   conn->dst_type);
5405 	if (params) {
5406 		list_del_init(&params->action);
5407 		if (params->conn) {
5408 			hci_conn_drop(params->conn);
5409 			hci_conn_put(params->conn);
5410 			params->conn = NULL;
5411 		}
5412 	}
5413 
5414 unlock:
5415 	hci_update_background_scan(hdev);
5416 	hci_dev_unlock(hdev);
5417 }
5418 
5419 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5420 {
5421 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5422 
5423 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5424 
5425 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5426 			     NULL, ev->role, le16_to_cpu(ev->handle),
5427 			     le16_to_cpu(ev->interval),
5428 			     le16_to_cpu(ev->latency),
5429 			     le16_to_cpu(ev->supervision_timeout));
5430 }
5431 
5432 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5433 					 struct sk_buff *skb)
5434 {
5435 	struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5436 
5437 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5438 
5439 	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5440 			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5441 			     le16_to_cpu(ev->interval),
5442 			     le16_to_cpu(ev->latency),
5443 			     le16_to_cpu(ev->supervision_timeout));
5444 
5445 	if (use_ll_privacy(hdev) &&
5446 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5447 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5448 		hci_req_disable_address_resolution(hdev);
5449 }
5450 
5451 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5452 {
5453 	struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5454 	struct hci_conn *conn;
5455 	struct adv_info *adv;
5456 
5457 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5458 
5459 	adv = hci_find_adv_instance(hdev, ev->handle);
5460 
5461 	if (ev->status) {
5462 		if (!adv)
5463 			return;
5464 
5465 		/* Remove advertising as it has been terminated */
5466 		hci_remove_adv_instance(hdev, ev->handle);
5467 		mgmt_advertising_removed(NULL, hdev, ev->handle);
5468 
5469 		return;
5470 	}
5471 
5472 	if (adv)
5473 		adv->enabled = false;
5474 
5475 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5476 	if (conn) {
5477 		/* Store handle in the connection so the correct advertising
5478 		 * instance can be re-enabled when disconnected.
5479 		 */
5480 		conn->adv_instance = ev->handle;
5481 
5482 		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5483 		    bacmp(&conn->resp_addr, BDADDR_ANY))
5484 			return;
5485 
5486 		if (!ev->handle) {
5487 			bacpy(&conn->resp_addr, &hdev->random_addr);
5488 			return;
5489 		}
5490 
5491 		if (adv)
5492 			bacpy(&conn->resp_addr, &adv->random_addr);
5493 	}
5494 }
5495 
5496 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5497 					    struct sk_buff *skb)
5498 {
5499 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5500 	struct hci_conn *conn;
5501 
5502 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5503 
5504 	if (ev->status)
5505 		return;
5506 
5507 	hci_dev_lock(hdev);
5508 
5509 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5510 	if (conn) {
5511 		conn->le_conn_interval = le16_to_cpu(ev->interval);
5512 		conn->le_conn_latency = le16_to_cpu(ev->latency);
5513 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5514 	}
5515 
5516 	hci_dev_unlock(hdev);
5517 }
5518 
5519 /* This function requires the caller holds hdev->lock */
5520 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5521 					      bdaddr_t *addr,
5522 					      u8 addr_type, bool addr_resolved,
5523 					      u8 adv_type, bdaddr_t *direct_rpa)
5524 {
5525 	struct hci_conn *conn;
5526 	struct hci_conn_params *params;
5527 
5528 	/* If the event is not connectable don't proceed further */
5529 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5530 		return NULL;
5531 
5532 	/* Ignore if the device is blocked */
5533 	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5534 		return NULL;
5535 
5536 	/* Most controller will fail if we try to create new connections
5537 	 * while we have an existing one in peripheral role.
5538 	 */
5539 	if (hdev->conn_hash.le_num_peripheral > 0 &&
5540 	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5541 	     !(hdev->le_states[3] & 0x10)))
5542 		return NULL;
5543 
5544 	/* If we're not connectable only connect devices that we have in
5545 	 * our pend_le_conns list.
5546 	 */
5547 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5548 					   addr_type);
5549 	if (!params)
5550 		return NULL;
5551 
5552 	if (!params->explicit_connect) {
5553 		switch (params->auto_connect) {
5554 		case HCI_AUTO_CONN_DIRECT:
5555 			/* Only devices advertising with ADV_DIRECT_IND are
5556 			 * triggering a connection attempt. This is allowing
5557 			 * incoming connections from peripheral devices.
5558 			 */
5559 			if (adv_type != LE_ADV_DIRECT_IND)
5560 				return NULL;
5561 			break;
5562 		case HCI_AUTO_CONN_ALWAYS:
5563 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
5564 			 * are triggering a connection attempt. This means
5565 			 * that incoming connections from peripheral device are
5566 			 * accepted and also outgoing connections to peripheral
5567 			 * devices are established when found.
5568 			 */
5569 			break;
5570 		default:
5571 			return NULL;
5572 		}
5573 	}
5574 
5575 	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
5576 			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
5577 			      HCI_ROLE_MASTER, direct_rpa);
5578 	if (!IS_ERR(conn)) {
5579 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5580 		 * by higher layer that tried to connect, if no then
5581 		 * store the pointer since we don't really have any
5582 		 * other owner of the object besides the params that
5583 		 * triggered it. This way we can abort the connection if
5584 		 * the parameters get removed and keep the reference
5585 		 * count consistent once the connection is established.
5586 		 */
5587 
5588 		if (!params->explicit_connect)
5589 			params->conn = hci_conn_get(conn);
5590 
5591 		return conn;
5592 	}
5593 
5594 	switch (PTR_ERR(conn)) {
5595 	case -EBUSY:
5596 		/* If hci_connect() returns -EBUSY it means there is already
5597 		 * an LE connection attempt going on. Since controllers don't
5598 		 * support more than one connection attempt at the time, we
5599 		 * don't consider this an error case.
5600 		 */
5601 		break;
5602 	default:
5603 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5604 		return NULL;
5605 	}
5606 
5607 	return NULL;
5608 }
5609 
5610 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5611 			       u8 bdaddr_type, bdaddr_t *direct_addr,
5612 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5613 			       bool ext_adv)
5614 {
5615 	struct discovery_state *d = &hdev->discovery;
5616 	struct smp_irk *irk;
5617 	struct hci_conn *conn;
5618 	bool match, bdaddr_resolved;
5619 	u32 flags;
5620 	u8 *ptr;
5621 
5622 	switch (type) {
5623 	case LE_ADV_IND:
5624 	case LE_ADV_DIRECT_IND:
5625 	case LE_ADV_SCAN_IND:
5626 	case LE_ADV_NONCONN_IND:
5627 	case LE_ADV_SCAN_RSP:
5628 		break;
5629 	default:
5630 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5631 				       "type: 0x%02x", type);
5632 		return;
5633 	}
5634 
5635 	if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5636 		bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5637 		return;
5638 	}
5639 
5640 	/* Find the end of the data in case the report contains padded zero
5641 	 * bytes at the end causing an invalid length value.
5642 	 *
5643 	 * When data is NULL, len is 0 so there is no need for extra ptr
5644 	 * check as 'ptr < data + 0' is already false in such case.
5645 	 */
5646 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5647 		if (ptr + 1 + *ptr > data + len)
5648 			break;
5649 	}
5650 
5651 	/* Adjust for actual length. This handles the case when remote
5652 	 * device is advertising with incorrect data length.
5653 	 */
5654 	len = ptr - data;
5655 
5656 	/* If the direct address is present, then this report is from
5657 	 * a LE Direct Advertising Report event. In that case it is
5658 	 * important to see if the address is matching the local
5659 	 * controller address.
5660 	 */
5661 	if (direct_addr) {
5662 		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
5663 						  &bdaddr_resolved);
5664 
5665 		/* Only resolvable random addresses are valid for these
5666 		 * kind of reports and others can be ignored.
5667 		 */
5668 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5669 			return;
5670 
5671 		/* If the controller is not using resolvable random
5672 		 * addresses, then this report can be ignored.
5673 		 */
5674 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5675 			return;
5676 
5677 		/* If the local IRK of the controller does not match
5678 		 * with the resolvable random address provided, then
5679 		 * this report can be ignored.
5680 		 */
5681 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5682 			return;
5683 	}
5684 
5685 	/* Check if we need to convert to identity address */
5686 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5687 	if (irk) {
5688 		bdaddr = &irk->bdaddr;
5689 		bdaddr_type = irk->addr_type;
5690 	}
5691 
5692 	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
5693 
5694 	/* Check if we have been requested to connect to this device.
5695 	 *
5696 	 * direct_addr is set only for directed advertising reports (it is NULL
5697 	 * for advertising reports) and is already verified to be RPA above.
5698 	 */
5699 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
5700 				     type, direct_addr);
5701 	if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5702 		/* Store report for later inclusion by
5703 		 * mgmt_device_connected
5704 		 */
5705 		memcpy(conn->le_adv_data, data, len);
5706 		conn->le_adv_data_len = len;
5707 	}
5708 
5709 	/* Passive scanning shouldn't trigger any device found events,
5710 	 * except for devices marked as CONN_REPORT for which we do send
5711 	 * device found events, or advertisement monitoring requested.
5712 	 */
5713 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5714 		if (type == LE_ADV_DIRECT_IND)
5715 			return;
5716 
5717 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5718 					       bdaddr, bdaddr_type) &&
5719 		    idr_is_empty(&hdev->adv_monitors_idr))
5720 			return;
5721 
5722 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5723 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5724 		else
5725 			flags = 0;
5726 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5727 				  rssi, flags, data, len, NULL, 0);
5728 		return;
5729 	}
5730 
5731 	/* When receiving non-connectable or scannable undirected
5732 	 * advertising reports, this means that the remote device is
5733 	 * not connectable and then clearly indicate this in the
5734 	 * device found event.
5735 	 *
5736 	 * When receiving a scan response, then there is no way to
5737 	 * know if the remote device is connectable or not. However
5738 	 * since scan responses are merged with a previously seen
5739 	 * advertising report, the flags field from that report
5740 	 * will be used.
5741 	 *
5742 	 * In the really unlikely case that a controller get confused
5743 	 * and just sends a scan response event, then it is marked as
5744 	 * not connectable as well.
5745 	 */
5746 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5747 	    type == LE_ADV_SCAN_RSP)
5748 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5749 	else
5750 		flags = 0;
5751 
5752 	/* If there's nothing pending either store the data from this
5753 	 * event or send an immediate device found event if the data
5754 	 * should not be stored for later.
5755 	 */
5756 	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
5757 		/* If the report will trigger a SCAN_REQ store it for
5758 		 * later merging.
5759 		 */
5760 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5761 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5762 						 rssi, flags, data, len);
5763 			return;
5764 		}
5765 
5766 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5767 				  rssi, flags, data, len, NULL, 0);
5768 		return;
5769 	}
5770 
5771 	/* Check if the pending report is for the same device as the new one */
5772 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5773 		 bdaddr_type == d->last_adv_addr_type);
5774 
5775 	/* If the pending data doesn't match this report or this isn't a
5776 	 * scan response (e.g. we got a duplicate ADV_IND) then force
5777 	 * sending of the pending data.
5778 	 */
5779 	if (type != LE_ADV_SCAN_RSP || !match) {
5780 		/* Send out whatever is in the cache, but skip duplicates */
5781 		if (!match)
5782 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5783 					  d->last_adv_addr_type, NULL,
5784 					  d->last_adv_rssi, d->last_adv_flags,
5785 					  d->last_adv_data,
5786 					  d->last_adv_data_len, NULL, 0);
5787 
5788 		/* If the new report will trigger a SCAN_REQ store it for
5789 		 * later merging.
5790 		 */
5791 		if (!ext_adv && (type == LE_ADV_IND ||
5792 				 type == LE_ADV_SCAN_IND)) {
5793 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5794 						 rssi, flags, data, len);
5795 			return;
5796 		}
5797 
5798 		/* The advertising reports cannot be merged, so clear
5799 		 * the pending report and send out a device found event.
5800 		 */
5801 		clear_pending_adv_report(hdev);
5802 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5803 				  rssi, flags, data, len, NULL, 0);
5804 		return;
5805 	}
5806 
5807 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5808 	 * the new event is a SCAN_RSP. We can therefore proceed with
5809 	 * sending a merged device found event.
5810 	 */
5811 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5812 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5813 			  d->last_adv_data, d->last_adv_data_len, data, len);
5814 	clear_pending_adv_report(hdev);
5815 }
5816 
5817 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5818 {
5819 	u8 num_reports = skb->data[0];
5820 	void *ptr = &skb->data[1];
5821 
5822 	hci_dev_lock(hdev);
5823 
5824 	while (num_reports--) {
5825 		struct hci_ev_le_advertising_info *ev = ptr;
5826 		s8 rssi;
5827 
5828 		if (ev->length <= HCI_MAX_AD_LENGTH) {
5829 			rssi = ev->data[ev->length];
5830 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5831 					   ev->bdaddr_type, NULL, 0, rssi,
5832 					   ev->data, ev->length, false);
5833 		} else {
5834 			bt_dev_err(hdev, "Dropping invalid advertising data");
5835 		}
5836 
5837 		ptr += sizeof(*ev) + ev->length + 1;
5838 	}
5839 
5840 	hci_dev_unlock(hdev);
5841 }
5842 
5843 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5844 {
5845 	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5846 		switch (evt_type) {
5847 		case LE_LEGACY_ADV_IND:
5848 			return LE_ADV_IND;
5849 		case LE_LEGACY_ADV_DIRECT_IND:
5850 			return LE_ADV_DIRECT_IND;
5851 		case LE_LEGACY_ADV_SCAN_IND:
5852 			return LE_ADV_SCAN_IND;
5853 		case LE_LEGACY_NONCONN_IND:
5854 			return LE_ADV_NONCONN_IND;
5855 		case LE_LEGACY_SCAN_RSP_ADV:
5856 		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5857 			return LE_ADV_SCAN_RSP;
5858 		}
5859 
5860 		goto invalid;
5861 	}
5862 
5863 	if (evt_type & LE_EXT_ADV_CONN_IND) {
5864 		if (evt_type & LE_EXT_ADV_DIRECT_IND)
5865 			return LE_ADV_DIRECT_IND;
5866 
5867 		return LE_ADV_IND;
5868 	}
5869 
5870 	if (evt_type & LE_EXT_ADV_SCAN_RSP)
5871 		return LE_ADV_SCAN_RSP;
5872 
5873 	if (evt_type & LE_EXT_ADV_SCAN_IND)
5874 		return LE_ADV_SCAN_IND;
5875 
5876 	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5877 	    evt_type & LE_EXT_ADV_DIRECT_IND)
5878 		return LE_ADV_NONCONN_IND;
5879 
5880 invalid:
5881 	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5882 			       evt_type);
5883 
5884 	return LE_ADV_INVALID;
5885 }
5886 
5887 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5888 {
5889 	u8 num_reports = skb->data[0];
5890 	void *ptr = &skb->data[1];
5891 
5892 	hci_dev_lock(hdev);
5893 
5894 	while (num_reports--) {
5895 		struct hci_ev_le_ext_adv_report *ev = ptr;
5896 		u8 legacy_evt_type;
5897 		u16 evt_type;
5898 
5899 		evt_type = __le16_to_cpu(ev->evt_type);
5900 		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5901 		if (legacy_evt_type != LE_ADV_INVALID) {
5902 			process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5903 					   ev->bdaddr_type, NULL, 0, ev->rssi,
5904 					   ev->data, ev->length,
5905 					   !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5906 		}
5907 
5908 		ptr += sizeof(*ev) + ev->length;
5909 	}
5910 
5911 	hci_dev_unlock(hdev);
5912 }
5913 
5914 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5915 					    struct sk_buff *skb)
5916 {
5917 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5918 	struct hci_conn *conn;
5919 
5920 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5921 
5922 	hci_dev_lock(hdev);
5923 
5924 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5925 	if (conn) {
5926 		if (!ev->status)
5927 			memcpy(conn->features[0], ev->features, 8);
5928 
5929 		if (conn->state == BT_CONFIG) {
5930 			__u8 status;
5931 
5932 			/* If the local controller supports peripheral-initiated
5933 			 * features exchange, but the remote controller does
5934 			 * not, then it is possible that the error code 0x1a
5935 			 * for unsupported remote feature gets returned.
5936 			 *
5937 			 * In this specific case, allow the connection to
5938 			 * transition into connected state and mark it as
5939 			 * successful.
5940 			 */
5941 			if (!conn->out && ev->status == 0x1a &&
5942 			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5943 				status = 0x00;
5944 			else
5945 				status = ev->status;
5946 
5947 			conn->state = BT_CONNECTED;
5948 			hci_connect_cfm(conn, status);
5949 			hci_conn_drop(conn);
5950 		}
5951 	}
5952 
5953 	hci_dev_unlock(hdev);
5954 }
5955 
5956 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5957 {
5958 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5959 	struct hci_cp_le_ltk_reply cp;
5960 	struct hci_cp_le_ltk_neg_reply neg;
5961 	struct hci_conn *conn;
5962 	struct smp_ltk *ltk;
5963 
5964 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5965 
5966 	hci_dev_lock(hdev);
5967 
5968 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5969 	if (conn == NULL)
5970 		goto not_found;
5971 
5972 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5973 	if (!ltk)
5974 		goto not_found;
5975 
5976 	if (smp_ltk_is_sc(ltk)) {
5977 		/* With SC both EDiv and Rand are set to zero */
5978 		if (ev->ediv || ev->rand)
5979 			goto not_found;
5980 	} else {
5981 		/* For non-SC keys check that EDiv and Rand match */
5982 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5983 			goto not_found;
5984 	}
5985 
5986 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5987 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5988 	cp.handle = cpu_to_le16(conn->handle);
5989 
5990 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5991 
5992 	conn->enc_key_size = ltk->enc_size;
5993 
5994 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5995 
5996 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5997 	 * temporary key used to encrypt a connection following
5998 	 * pairing. It is used during the Encrypted Session Setup to
5999 	 * distribute the keys. Later, security can be re-established
6000 	 * using a distributed LTK.
6001 	 */
6002 	if (ltk->type == SMP_STK) {
6003 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6004 		list_del_rcu(&ltk->list);
6005 		kfree_rcu(ltk, rcu);
6006 	} else {
6007 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6008 	}
6009 
6010 	hci_dev_unlock(hdev);
6011 
6012 	return;
6013 
6014 not_found:
6015 	neg.handle = ev->handle;
6016 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6017 	hci_dev_unlock(hdev);
6018 }
6019 
6020 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6021 				      u8 reason)
6022 {
6023 	struct hci_cp_le_conn_param_req_neg_reply cp;
6024 
6025 	cp.handle = cpu_to_le16(handle);
6026 	cp.reason = reason;
6027 
6028 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6029 		     &cp);
6030 }
6031 
6032 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6033 					     struct sk_buff *skb)
6034 {
6035 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6036 	struct hci_cp_le_conn_param_req_reply cp;
6037 	struct hci_conn *hcon;
6038 	u16 handle, min, max, latency, timeout;
6039 
6040 	handle = le16_to_cpu(ev->handle);
6041 	min = le16_to_cpu(ev->interval_min);
6042 	max = le16_to_cpu(ev->interval_max);
6043 	latency = le16_to_cpu(ev->latency);
6044 	timeout = le16_to_cpu(ev->timeout);
6045 
6046 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6047 	if (!hcon || hcon->state != BT_CONNECTED)
6048 		return send_conn_param_neg_reply(hdev, handle,
6049 						 HCI_ERROR_UNKNOWN_CONN_ID);
6050 
6051 	if (hci_check_conn_params(min, max, latency, timeout))
6052 		return send_conn_param_neg_reply(hdev, handle,
6053 						 HCI_ERROR_INVALID_LL_PARAMS);
6054 
6055 	if (hcon->role == HCI_ROLE_MASTER) {
6056 		struct hci_conn_params *params;
6057 		u8 store_hint;
6058 
6059 		hci_dev_lock(hdev);
6060 
6061 		params = hci_conn_params_lookup(hdev, &hcon->dst,
6062 						hcon->dst_type);
6063 		if (params) {
6064 			params->conn_min_interval = min;
6065 			params->conn_max_interval = max;
6066 			params->conn_latency = latency;
6067 			params->supervision_timeout = timeout;
6068 			store_hint = 0x01;
6069 		} else {
6070 			store_hint = 0x00;
6071 		}
6072 
6073 		hci_dev_unlock(hdev);
6074 
6075 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6076 				    store_hint, min, max, latency, timeout);
6077 	}
6078 
6079 	cp.handle = ev->handle;
6080 	cp.interval_min = ev->interval_min;
6081 	cp.interval_max = ev->interval_max;
6082 	cp.latency = ev->latency;
6083 	cp.timeout = ev->timeout;
6084 	cp.min_ce_len = 0;
6085 	cp.max_ce_len = 0;
6086 
6087 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6088 }
6089 
6090 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6091 					 struct sk_buff *skb)
6092 {
6093 	u8 num_reports = skb->data[0];
6094 	struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6095 
6096 	if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6097 		return;
6098 
6099 	hci_dev_lock(hdev);
6100 
6101 	for (; num_reports; num_reports--, ev++)
6102 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6103 				   ev->bdaddr_type, &ev->direct_addr,
6104 				   ev->direct_addr_type, ev->rssi, NULL, 0,
6105 				   false);
6106 
6107 	hci_dev_unlock(hdev);
6108 }
6109 
6110 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6111 {
6112 	struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6113 	struct hci_conn *conn;
6114 
6115 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6116 
6117 	if (ev->status)
6118 		return;
6119 
6120 	hci_dev_lock(hdev);
6121 
6122 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6123 	if (!conn)
6124 		goto unlock;
6125 
6126 	conn->le_tx_phy = ev->tx_phy;
6127 	conn->le_rx_phy = ev->rx_phy;
6128 
6129 unlock:
6130 	hci_dev_unlock(hdev);
6131 }
6132 
6133 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6134 {
6135 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
6136 
6137 	skb_pull(skb, sizeof(*le_ev));
6138 
6139 	switch (le_ev->subevent) {
6140 	case HCI_EV_LE_CONN_COMPLETE:
6141 		hci_le_conn_complete_evt(hdev, skb);
6142 		break;
6143 
6144 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6145 		hci_le_conn_update_complete_evt(hdev, skb);
6146 		break;
6147 
6148 	case HCI_EV_LE_ADVERTISING_REPORT:
6149 		hci_le_adv_report_evt(hdev, skb);
6150 		break;
6151 
6152 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6153 		hci_le_remote_feat_complete_evt(hdev, skb);
6154 		break;
6155 
6156 	case HCI_EV_LE_LTK_REQ:
6157 		hci_le_ltk_request_evt(hdev, skb);
6158 		break;
6159 
6160 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6161 		hci_le_remote_conn_param_req_evt(hdev, skb);
6162 		break;
6163 
6164 	case HCI_EV_LE_DIRECT_ADV_REPORT:
6165 		hci_le_direct_adv_report_evt(hdev, skb);
6166 		break;
6167 
6168 	case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6169 		hci_le_phy_update_evt(hdev, skb);
6170 		break;
6171 
6172 	case HCI_EV_LE_EXT_ADV_REPORT:
6173 		hci_le_ext_adv_report_evt(hdev, skb);
6174 		break;
6175 
6176 	case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6177 		hci_le_enh_conn_complete_evt(hdev, skb);
6178 		break;
6179 
6180 	case HCI_EV_LE_EXT_ADV_SET_TERM:
6181 		hci_le_ext_adv_term_evt(hdev, skb);
6182 		break;
6183 
6184 	default:
6185 		break;
6186 	}
6187 }
6188 
6189 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6190 				 u8 event, struct sk_buff *skb)
6191 {
6192 	struct hci_ev_cmd_complete *ev;
6193 	struct hci_event_hdr *hdr;
6194 
6195 	if (!skb)
6196 		return false;
6197 
6198 	if (skb->len < sizeof(*hdr)) {
6199 		bt_dev_err(hdev, "too short HCI event");
6200 		return false;
6201 	}
6202 
6203 	hdr = (void *) skb->data;
6204 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6205 
6206 	if (event) {
6207 		if (hdr->evt != event)
6208 			return false;
6209 		return true;
6210 	}
6211 
6212 	/* Check if request ended in Command Status - no way to retrieve
6213 	 * any extra parameters in this case.
6214 	 */
6215 	if (hdr->evt == HCI_EV_CMD_STATUS)
6216 		return false;
6217 
6218 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6219 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6220 			   hdr->evt);
6221 		return false;
6222 	}
6223 
6224 	if (skb->len < sizeof(*ev)) {
6225 		bt_dev_err(hdev, "too short cmd_complete event");
6226 		return false;
6227 	}
6228 
6229 	ev = (void *) skb->data;
6230 	skb_pull(skb, sizeof(*ev));
6231 
6232 	if (opcode != __le16_to_cpu(ev->opcode)) {
6233 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6234 		       __le16_to_cpu(ev->opcode));
6235 		return false;
6236 	}
6237 
6238 	return true;
6239 }
6240 
6241 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6242 				  struct sk_buff *skb)
6243 {
6244 	struct hci_ev_le_advertising_info *adv;
6245 	struct hci_ev_le_direct_adv_info *direct_adv;
6246 	struct hci_ev_le_ext_adv_report *ext_adv;
6247 	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6248 	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6249 
6250 	hci_dev_lock(hdev);
6251 
6252 	/* If we are currently suspended and this is the first BT event seen,
6253 	 * save the wake reason associated with the event.
6254 	 */
6255 	if (!hdev->suspended || hdev->wake_reason)
6256 		goto unlock;
6257 
6258 	/* Default to remote wake. Values for wake_reason are documented in the
6259 	 * Bluez mgmt api docs.
6260 	 */
6261 	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6262 
6263 	/* Once configured for remote wakeup, we should only wake up for
6264 	 * reconnections. It's useful to see which device is waking us up so
6265 	 * keep track of the bdaddr of the connection event that woke us up.
6266 	 */
6267 	if (event == HCI_EV_CONN_REQUEST) {
6268 		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6269 		hdev->wake_addr_type = BDADDR_BREDR;
6270 	} else if (event == HCI_EV_CONN_COMPLETE) {
6271 		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6272 		hdev->wake_addr_type = BDADDR_BREDR;
6273 	} else if (event == HCI_EV_LE_META) {
6274 		struct hci_ev_le_meta *le_ev = (void *)skb->data;
6275 		u8 subevent = le_ev->subevent;
6276 		u8 *ptr = &skb->data[sizeof(*le_ev)];
6277 		u8 num_reports = *ptr;
6278 
6279 		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6280 		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6281 		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6282 		    num_reports) {
6283 			adv = (void *)(ptr + 1);
6284 			direct_adv = (void *)(ptr + 1);
6285 			ext_adv = (void *)(ptr + 1);
6286 
6287 			switch (subevent) {
6288 			case HCI_EV_LE_ADVERTISING_REPORT:
6289 				bacpy(&hdev->wake_addr, &adv->bdaddr);
6290 				hdev->wake_addr_type = adv->bdaddr_type;
6291 				break;
6292 			case HCI_EV_LE_DIRECT_ADV_REPORT:
6293 				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6294 				hdev->wake_addr_type = direct_adv->bdaddr_type;
6295 				break;
6296 			case HCI_EV_LE_EXT_ADV_REPORT:
6297 				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6298 				hdev->wake_addr_type = ext_adv->bdaddr_type;
6299 				break;
6300 			}
6301 		}
6302 	} else {
6303 		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6304 	}
6305 
6306 unlock:
6307 	hci_dev_unlock(hdev);
6308 }
6309 
6310 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6311 {
6312 	struct hci_event_hdr *hdr = (void *) skb->data;
6313 	hci_req_complete_t req_complete = NULL;
6314 	hci_req_complete_skb_t req_complete_skb = NULL;
6315 	struct sk_buff *orig_skb = NULL;
6316 	u8 status = 0, event = hdr->evt, req_evt = 0;
6317 	u16 opcode = HCI_OP_NOP;
6318 
6319 	if (!event) {
6320 		bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6321 		goto done;
6322 	}
6323 
6324 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6325 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6326 		opcode = __le16_to_cpu(cmd_hdr->opcode);
6327 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6328 				     &req_complete_skb);
6329 		req_evt = event;
6330 	}
6331 
6332 	/* If it looks like we might end up having to call
6333 	 * req_complete_skb, store a pristine copy of the skb since the
6334 	 * various handlers may modify the original one through
6335 	 * skb_pull() calls, etc.
6336 	 */
6337 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6338 	    event == HCI_EV_CMD_COMPLETE)
6339 		orig_skb = skb_clone(skb, GFP_KERNEL);
6340 
6341 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
6342 
6343 	/* Store wake reason if we're suspended */
6344 	hci_store_wake_reason(hdev, event, skb);
6345 
6346 	switch (event) {
6347 	case HCI_EV_INQUIRY_COMPLETE:
6348 		hci_inquiry_complete_evt(hdev, skb);
6349 		break;
6350 
6351 	case HCI_EV_INQUIRY_RESULT:
6352 		hci_inquiry_result_evt(hdev, skb);
6353 		break;
6354 
6355 	case HCI_EV_CONN_COMPLETE:
6356 		hci_conn_complete_evt(hdev, skb);
6357 		break;
6358 
6359 	case HCI_EV_CONN_REQUEST:
6360 		hci_conn_request_evt(hdev, skb);
6361 		break;
6362 
6363 	case HCI_EV_DISCONN_COMPLETE:
6364 		hci_disconn_complete_evt(hdev, skb);
6365 		break;
6366 
6367 	case HCI_EV_AUTH_COMPLETE:
6368 		hci_auth_complete_evt(hdev, skb);
6369 		break;
6370 
6371 	case HCI_EV_REMOTE_NAME:
6372 		hci_remote_name_evt(hdev, skb);
6373 		break;
6374 
6375 	case HCI_EV_ENCRYPT_CHANGE:
6376 		hci_encrypt_change_evt(hdev, skb);
6377 		break;
6378 
6379 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6380 		hci_change_link_key_complete_evt(hdev, skb);
6381 		break;
6382 
6383 	case HCI_EV_REMOTE_FEATURES:
6384 		hci_remote_features_evt(hdev, skb);
6385 		break;
6386 
6387 	case HCI_EV_CMD_COMPLETE:
6388 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6389 				     &req_complete, &req_complete_skb);
6390 		break;
6391 
6392 	case HCI_EV_CMD_STATUS:
6393 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6394 				   &req_complete_skb);
6395 		break;
6396 
6397 	case HCI_EV_HARDWARE_ERROR:
6398 		hci_hardware_error_evt(hdev, skb);
6399 		break;
6400 
6401 	case HCI_EV_ROLE_CHANGE:
6402 		hci_role_change_evt(hdev, skb);
6403 		break;
6404 
6405 	case HCI_EV_NUM_COMP_PKTS:
6406 		hci_num_comp_pkts_evt(hdev, skb);
6407 		break;
6408 
6409 	case HCI_EV_MODE_CHANGE:
6410 		hci_mode_change_evt(hdev, skb);
6411 		break;
6412 
6413 	case HCI_EV_PIN_CODE_REQ:
6414 		hci_pin_code_request_evt(hdev, skb);
6415 		break;
6416 
6417 	case HCI_EV_LINK_KEY_REQ:
6418 		hci_link_key_request_evt(hdev, skb);
6419 		break;
6420 
6421 	case HCI_EV_LINK_KEY_NOTIFY:
6422 		hci_link_key_notify_evt(hdev, skb);
6423 		break;
6424 
6425 	case HCI_EV_CLOCK_OFFSET:
6426 		hci_clock_offset_evt(hdev, skb);
6427 		break;
6428 
6429 	case HCI_EV_PKT_TYPE_CHANGE:
6430 		hci_pkt_type_change_evt(hdev, skb);
6431 		break;
6432 
6433 	case HCI_EV_PSCAN_REP_MODE:
6434 		hci_pscan_rep_mode_evt(hdev, skb);
6435 		break;
6436 
6437 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6438 		hci_inquiry_result_with_rssi_evt(hdev, skb);
6439 		break;
6440 
6441 	case HCI_EV_REMOTE_EXT_FEATURES:
6442 		hci_remote_ext_features_evt(hdev, skb);
6443 		break;
6444 
6445 	case HCI_EV_SYNC_CONN_COMPLETE:
6446 		hci_sync_conn_complete_evt(hdev, skb);
6447 		break;
6448 
6449 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
6450 		hci_extended_inquiry_result_evt(hdev, skb);
6451 		break;
6452 
6453 	case HCI_EV_KEY_REFRESH_COMPLETE:
6454 		hci_key_refresh_complete_evt(hdev, skb);
6455 		break;
6456 
6457 	case HCI_EV_IO_CAPA_REQUEST:
6458 		hci_io_capa_request_evt(hdev, skb);
6459 		break;
6460 
6461 	case HCI_EV_IO_CAPA_REPLY:
6462 		hci_io_capa_reply_evt(hdev, skb);
6463 		break;
6464 
6465 	case HCI_EV_USER_CONFIRM_REQUEST:
6466 		hci_user_confirm_request_evt(hdev, skb);
6467 		break;
6468 
6469 	case HCI_EV_USER_PASSKEY_REQUEST:
6470 		hci_user_passkey_request_evt(hdev, skb);
6471 		break;
6472 
6473 	case HCI_EV_USER_PASSKEY_NOTIFY:
6474 		hci_user_passkey_notify_evt(hdev, skb);
6475 		break;
6476 
6477 	case HCI_EV_KEYPRESS_NOTIFY:
6478 		hci_keypress_notify_evt(hdev, skb);
6479 		break;
6480 
6481 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
6482 		hci_simple_pair_complete_evt(hdev, skb);
6483 		break;
6484 
6485 	case HCI_EV_REMOTE_HOST_FEATURES:
6486 		hci_remote_host_features_evt(hdev, skb);
6487 		break;
6488 
6489 	case HCI_EV_LE_META:
6490 		hci_le_meta_evt(hdev, skb);
6491 		break;
6492 
6493 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6494 		hci_remote_oob_data_request_evt(hdev, skb);
6495 		break;
6496 
6497 #if IS_ENABLED(CONFIG_BT_HS)
6498 	case HCI_EV_CHANNEL_SELECTED:
6499 		hci_chan_selected_evt(hdev, skb);
6500 		break;
6501 
6502 	case HCI_EV_PHY_LINK_COMPLETE:
6503 		hci_phy_link_complete_evt(hdev, skb);
6504 		break;
6505 
6506 	case HCI_EV_LOGICAL_LINK_COMPLETE:
6507 		hci_loglink_complete_evt(hdev, skb);
6508 		break;
6509 
6510 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6511 		hci_disconn_loglink_complete_evt(hdev, skb);
6512 		break;
6513 
6514 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6515 		hci_disconn_phylink_complete_evt(hdev, skb);
6516 		break;
6517 #endif
6518 
6519 	case HCI_EV_NUM_COMP_BLOCKS:
6520 		hci_num_comp_blocks_evt(hdev, skb);
6521 		break;
6522 
6523 	case HCI_EV_VENDOR:
6524 		msft_vendor_evt(hdev, skb);
6525 		break;
6526 
6527 	default:
6528 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
6529 		break;
6530 	}
6531 
6532 	if (req_complete) {
6533 		req_complete(hdev, status, opcode);
6534 	} else if (req_complete_skb) {
6535 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6536 			kfree_skb(orig_skb);
6537 			orig_skb = NULL;
6538 		}
6539 		req_complete_skb(hdev, status, opcode, orig_skb);
6540 	}
6541 
6542 done:
6543 	kfree_skb(orig_skb);
6544 	kfree_skb(skb);
6545 	hdev->stat.evt_rx++;
6546 }
6547