xref: /openbmc/linux/net/bluetooth/hci_event.c (revision eb3fcf00)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
41 
42 /* Handle HCI Event packets */
43 
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 	__u8 status = *((__u8 *) skb->data);
47 
48 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
49 
50 	if (status)
51 		return;
52 
53 	clear_bit(HCI_INQUIRY, &hdev->flags);
54 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
56 
57 	hci_dev_lock(hdev);
58 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
59 	hci_dev_unlock(hdev);
60 
61 	hci_conn_check_pending(hdev);
62 }
63 
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
65 {
66 	__u8 status = *((__u8 *) skb->data);
67 
68 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
69 
70 	if (status)
71 		return;
72 
73 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
74 }
75 
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 	__u8 status = *((__u8 *) skb->data);
79 
80 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
81 
82 	if (status)
83 		return;
84 
85 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
86 
87 	hci_conn_check_pending(hdev);
88 }
89 
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
91 					  struct sk_buff *skb)
92 {
93 	BT_DBG("%s", hdev->name);
94 }
95 
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
97 {
98 	struct hci_rp_role_discovery *rp = (void *) skb->data;
99 	struct hci_conn *conn;
100 
101 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
102 
103 	if (rp->status)
104 		return;
105 
106 	hci_dev_lock(hdev);
107 
108 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
109 	if (conn)
110 		conn->role = rp->role;
111 
112 	hci_dev_unlock(hdev);
113 }
114 
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 {
117 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 	struct hci_conn *conn;
119 
120 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121 
122 	if (rp->status)
123 		return;
124 
125 	hci_dev_lock(hdev);
126 
127 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 	if (conn)
129 		conn->link_policy = __le16_to_cpu(rp->policy);
130 
131 	hci_dev_unlock(hdev);
132 }
133 
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 {
136 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 	struct hci_conn *conn;
138 	void *sent;
139 
140 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
141 
142 	if (rp->status)
143 		return;
144 
145 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 	if (!sent)
147 		return;
148 
149 	hci_dev_lock(hdev);
150 
151 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 	if (conn)
153 		conn->link_policy = get_unaligned_le16(sent + 2);
154 
155 	hci_dev_unlock(hdev);
156 }
157 
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 					struct sk_buff *skb)
160 {
161 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162 
163 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164 
165 	if (rp->status)
166 		return;
167 
168 	hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170 
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 					 struct sk_buff *skb)
173 {
174 	__u8 status = *((__u8 *) skb->data);
175 	void *sent;
176 
177 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
178 
179 	if (status)
180 		return;
181 
182 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
183 	if (!sent)
184 		return;
185 
186 	hdev->link_policy = get_unaligned_le16(sent);
187 }
188 
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 {
191 	__u8 status = *((__u8 *) skb->data);
192 
193 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
194 
195 	clear_bit(HCI_RESET, &hdev->flags);
196 
197 	if (status)
198 		return;
199 
200 	/* Reset all non-persistent flags */
201 	hci_dev_clear_volatile_flags(hdev);
202 
203 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
204 
205 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207 
208 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 	hdev->adv_data_len = 0;
210 
211 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 	hdev->scan_rsp_data_len = 0;
213 
214 	hdev->le_scan_type = LE_SCAN_PASSIVE;
215 
216 	hdev->ssp_debug_mode = 0;
217 
218 	hci_bdaddr_list_clear(&hdev->le_white_list);
219 }
220 
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
222 					struct sk_buff *skb)
223 {
224 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 	struct hci_cp_read_stored_link_key *sent;
226 
227 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
228 
229 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
230 	if (!sent)
231 		return;
232 
233 	if (!rp->status && sent->read_all == 0x01) {
234 		hdev->stored_max_keys = rp->max_keys;
235 		hdev->stored_num_keys = rp->num_keys;
236 	}
237 }
238 
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
240 					  struct sk_buff *skb)
241 {
242 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
243 
244 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
245 
246 	if (rp->status)
247 		return;
248 
249 	if (rp->num_keys <= hdev->stored_num_keys)
250 		hdev->stored_num_keys -= rp->num_keys;
251 	else
252 		hdev->stored_num_keys = 0;
253 }
254 
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
256 {
257 	__u8 status = *((__u8 *) skb->data);
258 	void *sent;
259 
260 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
261 
262 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
263 	if (!sent)
264 		return;
265 
266 	hci_dev_lock(hdev);
267 
268 	if (hci_dev_test_flag(hdev, HCI_MGMT))
269 		mgmt_set_local_name_complete(hdev, sent, status);
270 	else if (!status)
271 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
272 
273 	hci_dev_unlock(hdev);
274 }
275 
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 {
278 	struct hci_rp_read_local_name *rp = (void *) skb->data;
279 
280 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
281 
282 	if (rp->status)
283 		return;
284 
285 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 	    hci_dev_test_flag(hdev, HCI_CONFIG))
287 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
288 }
289 
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 	__u8 status = *((__u8 *) skb->data);
293 	void *sent;
294 
295 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
296 
297 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
298 	if (!sent)
299 		return;
300 
301 	hci_dev_lock(hdev);
302 
303 	if (!status) {
304 		__u8 param = *((__u8 *) sent);
305 
306 		if (param == AUTH_ENABLED)
307 			set_bit(HCI_AUTH, &hdev->flags);
308 		else
309 			clear_bit(HCI_AUTH, &hdev->flags);
310 	}
311 
312 	if (hci_dev_test_flag(hdev, HCI_MGMT))
313 		mgmt_auth_enable_complete(hdev, status);
314 
315 	hci_dev_unlock(hdev);
316 }
317 
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
319 {
320 	__u8 status = *((__u8 *) skb->data);
321 	__u8 param;
322 	void *sent;
323 
324 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
325 
326 	if (status)
327 		return;
328 
329 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
330 	if (!sent)
331 		return;
332 
333 	param = *((__u8 *) sent);
334 
335 	if (param)
336 		set_bit(HCI_ENCRYPT, &hdev->flags);
337 	else
338 		clear_bit(HCI_ENCRYPT, &hdev->flags);
339 }
340 
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 	__u8 status = *((__u8 *) skb->data);
344 	__u8 param;
345 	void *sent;
346 
347 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
348 
349 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
350 	if (!sent)
351 		return;
352 
353 	param = *((__u8 *) sent);
354 
355 	hci_dev_lock(hdev);
356 
357 	if (status) {
358 		hdev->discov_timeout = 0;
359 		goto done;
360 	}
361 
362 	if (param & SCAN_INQUIRY)
363 		set_bit(HCI_ISCAN, &hdev->flags);
364 	else
365 		clear_bit(HCI_ISCAN, &hdev->flags);
366 
367 	if (param & SCAN_PAGE)
368 		set_bit(HCI_PSCAN, &hdev->flags);
369 	else
370 		clear_bit(HCI_PSCAN, &hdev->flags);
371 
372 done:
373 	hci_dev_unlock(hdev);
374 }
375 
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
379 
380 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381 
382 	if (rp->status)
383 		return;
384 
385 	memcpy(hdev->dev_class, rp->dev_class, 3);
386 
387 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
389 }
390 
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
392 {
393 	__u8 status = *((__u8 *) skb->data);
394 	void *sent;
395 
396 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
397 
398 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
399 	if (!sent)
400 		return;
401 
402 	hci_dev_lock(hdev);
403 
404 	if (status == 0)
405 		memcpy(hdev->dev_class, sent, 3);
406 
407 	if (hci_dev_test_flag(hdev, HCI_MGMT))
408 		mgmt_set_class_of_dev_complete(hdev, sent, status);
409 
410 	hci_dev_unlock(hdev);
411 }
412 
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
416 	__u16 setting;
417 
418 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
419 
420 	if (rp->status)
421 		return;
422 
423 	setting = __le16_to_cpu(rp->voice_setting);
424 
425 	if (hdev->voice_setting == setting)
426 		return;
427 
428 	hdev->voice_setting = setting;
429 
430 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
431 
432 	if (hdev->notify)
433 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
434 }
435 
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
437 				       struct sk_buff *skb)
438 {
439 	__u8 status = *((__u8 *) skb->data);
440 	__u16 setting;
441 	void *sent;
442 
443 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
444 
445 	if (status)
446 		return;
447 
448 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
449 	if (!sent)
450 		return;
451 
452 	setting = get_unaligned_le16(sent);
453 
454 	if (hdev->voice_setting == setting)
455 		return;
456 
457 	hdev->voice_setting = setting;
458 
459 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
460 
461 	if (hdev->notify)
462 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
463 }
464 
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
466 					  struct sk_buff *skb)
467 {
468 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
469 
470 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
471 
472 	if (rp->status)
473 		return;
474 
475 	hdev->num_iac = rp->num_iac;
476 
477 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
478 }
479 
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
481 {
482 	__u8 status = *((__u8 *) skb->data);
483 	struct hci_cp_write_ssp_mode *sent;
484 
485 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
486 
487 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
488 	if (!sent)
489 		return;
490 
491 	hci_dev_lock(hdev);
492 
493 	if (!status) {
494 		if (sent->mode)
495 			hdev->features[1][0] |= LMP_HOST_SSP;
496 		else
497 			hdev->features[1][0] &= ~LMP_HOST_SSP;
498 	}
499 
500 	if (hci_dev_test_flag(hdev, HCI_MGMT))
501 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
502 	else if (!status) {
503 		if (sent->mode)
504 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
505 		else
506 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
507 	}
508 
509 	hci_dev_unlock(hdev);
510 }
511 
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
513 {
514 	u8 status = *((u8 *) skb->data);
515 	struct hci_cp_write_sc_support *sent;
516 
517 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
518 
519 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
520 	if (!sent)
521 		return;
522 
523 	hci_dev_lock(hdev);
524 
525 	if (!status) {
526 		if (sent->support)
527 			hdev->features[1][0] |= LMP_HOST_SC;
528 		else
529 			hdev->features[1][0] &= ~LMP_HOST_SC;
530 	}
531 
532 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
533 		if (sent->support)
534 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
535 		else
536 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
537 	}
538 
539 	hci_dev_unlock(hdev);
540 }
541 
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
543 {
544 	struct hci_rp_read_local_version *rp = (void *) skb->data;
545 
546 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
547 
548 	if (rp->status)
549 		return;
550 
551 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 		hdev->hci_ver = rp->hci_ver;
554 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 		hdev->lmp_ver = rp->lmp_ver;
556 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
558 	}
559 }
560 
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
562 				       struct sk_buff *skb)
563 {
564 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
565 
566 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
567 
568 	if (rp->status)
569 		return;
570 
571 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 	    hci_dev_test_flag(hdev, HCI_CONFIG))
573 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
574 }
575 
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
577 				       struct sk_buff *skb)
578 {
579 	struct hci_rp_read_local_features *rp = (void *) skb->data;
580 
581 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582 
583 	if (rp->status)
584 		return;
585 
586 	memcpy(hdev->features, rp->features, 8);
587 
588 	/* Adjust default settings according to features
589 	 * supported by device. */
590 
591 	if (hdev->features[0][0] & LMP_3SLOT)
592 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
593 
594 	if (hdev->features[0][0] & LMP_5SLOT)
595 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
596 
597 	if (hdev->features[0][1] & LMP_HV2) {
598 		hdev->pkt_type  |= (HCI_HV2);
599 		hdev->esco_type |= (ESCO_HV2);
600 	}
601 
602 	if (hdev->features[0][1] & LMP_HV3) {
603 		hdev->pkt_type  |= (HCI_HV3);
604 		hdev->esco_type |= (ESCO_HV3);
605 	}
606 
607 	if (lmp_esco_capable(hdev))
608 		hdev->esco_type |= (ESCO_EV3);
609 
610 	if (hdev->features[0][4] & LMP_EV4)
611 		hdev->esco_type |= (ESCO_EV4);
612 
613 	if (hdev->features[0][4] & LMP_EV5)
614 		hdev->esco_type |= (ESCO_EV5);
615 
616 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 		hdev->esco_type |= (ESCO_2EV3);
618 
619 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 		hdev->esco_type |= (ESCO_3EV3);
621 
622 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
624 }
625 
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
627 					   struct sk_buff *skb)
628 {
629 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
630 
631 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632 
633 	if (rp->status)
634 		return;
635 
636 	if (hdev->max_page < rp->max_page)
637 		hdev->max_page = rp->max_page;
638 
639 	if (rp->page < HCI_MAX_PAGES)
640 		memcpy(hdev->features[rp->page], rp->features, 8);
641 }
642 
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
644 					  struct sk_buff *skb)
645 {
646 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
647 
648 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649 
650 	if (rp->status)
651 		return;
652 
653 	hdev->flow_ctl_mode = rp->mode;
654 }
655 
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
657 {
658 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
659 
660 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
661 
662 	if (rp->status)
663 		return;
664 
665 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
666 	hdev->sco_mtu  = rp->sco_mtu;
667 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
669 
670 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
671 		hdev->sco_mtu  = 64;
672 		hdev->sco_pkts = 8;
673 	}
674 
675 	hdev->acl_cnt = hdev->acl_pkts;
676 	hdev->sco_cnt = hdev->sco_pkts;
677 
678 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
680 }
681 
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
683 {
684 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
685 
686 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687 
688 	if (rp->status)
689 		return;
690 
691 	if (test_bit(HCI_INIT, &hdev->flags))
692 		bacpy(&hdev->bdaddr, &rp->bdaddr);
693 
694 	if (hci_dev_test_flag(hdev, HCI_SETUP))
695 		bacpy(&hdev->setup_addr, &rp->bdaddr);
696 }
697 
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
699 					   struct sk_buff *skb)
700 {
701 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
702 
703 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704 
705 	if (rp->status)
706 		return;
707 
708 	if (test_bit(HCI_INIT, &hdev->flags)) {
709 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 		hdev->page_scan_window = __le16_to_cpu(rp->window);
711 	}
712 }
713 
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
715 					    struct sk_buff *skb)
716 {
717 	u8 status = *((u8 *) skb->data);
718 	struct hci_cp_write_page_scan_activity *sent;
719 
720 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
721 
722 	if (status)
723 		return;
724 
725 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
726 	if (!sent)
727 		return;
728 
729 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 	hdev->page_scan_window = __le16_to_cpu(sent->window);
731 }
732 
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
734 					   struct sk_buff *skb)
735 {
736 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
737 
738 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739 
740 	if (rp->status)
741 		return;
742 
743 	if (test_bit(HCI_INIT, &hdev->flags))
744 		hdev->page_scan_type = rp->type;
745 }
746 
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
748 					struct sk_buff *skb)
749 {
750 	u8 status = *((u8 *) skb->data);
751 	u8 *type;
752 
753 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
754 
755 	if (status)
756 		return;
757 
758 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
759 	if (type)
760 		hdev->page_scan_type = *type;
761 }
762 
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
764 					struct sk_buff *skb)
765 {
766 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
767 
768 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
769 
770 	if (rp->status)
771 		return;
772 
773 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 	hdev->block_len = __le16_to_cpu(rp->block_len);
775 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
776 
777 	hdev->block_cnt = hdev->num_blocks;
778 
779 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 	       hdev->block_cnt, hdev->block_len);
781 }
782 
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
784 {
785 	struct hci_rp_read_clock *rp = (void *) skb->data;
786 	struct hci_cp_read_clock *cp;
787 	struct hci_conn *conn;
788 
789 	BT_DBG("%s", hdev->name);
790 
791 	if (skb->len < sizeof(*rp))
792 		return;
793 
794 	if (rp->status)
795 		return;
796 
797 	hci_dev_lock(hdev);
798 
799 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
800 	if (!cp)
801 		goto unlock;
802 
803 	if (cp->which == 0x00) {
804 		hdev->clock = le32_to_cpu(rp->clock);
805 		goto unlock;
806 	}
807 
808 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
809 	if (conn) {
810 		conn->clock = le32_to_cpu(rp->clock);
811 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
812 	}
813 
814 unlock:
815 	hci_dev_unlock(hdev);
816 }
817 
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
819 				       struct sk_buff *skb)
820 {
821 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
822 
823 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824 
825 	if (rp->status)
826 		return;
827 
828 	hdev->amp_status = rp->amp_status;
829 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 	hdev->amp_type = rp->amp_type;
834 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
838 }
839 
840 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
841 					 struct sk_buff *skb)
842 {
843 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
844 
845 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846 
847 	if (rp->status)
848 		return;
849 
850 	hdev->inq_tx_power = rp->tx_power;
851 }
852 
853 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
854 {
855 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
856 	struct hci_cp_pin_code_reply *cp;
857 	struct hci_conn *conn;
858 
859 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860 
861 	hci_dev_lock(hdev);
862 
863 	if (hci_dev_test_flag(hdev, HCI_MGMT))
864 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
865 
866 	if (rp->status)
867 		goto unlock;
868 
869 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
870 	if (!cp)
871 		goto unlock;
872 
873 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
874 	if (conn)
875 		conn->pin_length = cp->pin_len;
876 
877 unlock:
878 	hci_dev_unlock(hdev);
879 }
880 
881 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
882 {
883 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
884 
885 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886 
887 	hci_dev_lock(hdev);
888 
889 	if (hci_dev_test_flag(hdev, HCI_MGMT))
890 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
891 						 rp->status);
892 
893 	hci_dev_unlock(hdev);
894 }
895 
896 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
897 				       struct sk_buff *skb)
898 {
899 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
900 
901 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902 
903 	if (rp->status)
904 		return;
905 
906 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
907 	hdev->le_pkts = rp->le_max_pkt;
908 
909 	hdev->le_cnt = hdev->le_pkts;
910 
911 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
912 }
913 
914 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
915 					  struct sk_buff *skb)
916 {
917 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
918 
919 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920 
921 	if (rp->status)
922 		return;
923 
924 	memcpy(hdev->le_features, rp->features, 8);
925 }
926 
927 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
928 					struct sk_buff *skb)
929 {
930 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
931 
932 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
933 
934 	if (rp->status)
935 		return;
936 
937 	hdev->adv_tx_power = rp->tx_power;
938 }
939 
940 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
941 {
942 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
943 
944 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945 
946 	hci_dev_lock(hdev);
947 
948 	if (hci_dev_test_flag(hdev, HCI_MGMT))
949 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
950 						 rp->status);
951 
952 	hci_dev_unlock(hdev);
953 }
954 
955 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
956 					  struct sk_buff *skb)
957 {
958 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
959 
960 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961 
962 	hci_dev_lock(hdev);
963 
964 	if (hci_dev_test_flag(hdev, HCI_MGMT))
965 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
966 						     ACL_LINK, 0, rp->status);
967 
968 	hci_dev_unlock(hdev);
969 }
970 
971 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
972 {
973 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
974 
975 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
976 
977 	hci_dev_lock(hdev);
978 
979 	if (hci_dev_test_flag(hdev, HCI_MGMT))
980 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
981 						 0, rp->status);
982 
983 	hci_dev_unlock(hdev);
984 }
985 
986 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
987 					  struct sk_buff *skb)
988 {
989 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
990 
991 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
992 
993 	hci_dev_lock(hdev);
994 
995 	if (hci_dev_test_flag(hdev, HCI_MGMT))
996 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
997 						     ACL_LINK, 0, rp->status);
998 
999 	hci_dev_unlock(hdev);
1000 }
1001 
1002 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1003 				       struct sk_buff *skb)
1004 {
1005 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1006 
1007 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008 }
1009 
1010 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1011 					   struct sk_buff *skb)
1012 {
1013 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1014 
1015 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 }
1017 
1018 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1019 {
1020 	__u8 status = *((__u8 *) skb->data);
1021 	bdaddr_t *sent;
1022 
1023 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1024 
1025 	if (status)
1026 		return;
1027 
1028 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1029 	if (!sent)
1030 		return;
1031 
1032 	hci_dev_lock(hdev);
1033 
1034 	bacpy(&hdev->random_addr, sent);
1035 
1036 	hci_dev_unlock(hdev);
1037 }
1038 
1039 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1040 {
1041 	__u8 *sent, status = *((__u8 *) skb->data);
1042 
1043 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1044 
1045 	if (status)
1046 		return;
1047 
1048 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1049 	if (!sent)
1050 		return;
1051 
1052 	hci_dev_lock(hdev);
1053 
1054 	/* If we're doing connection initiation as peripheral. Set a
1055 	 * timeout in case something goes wrong.
1056 	 */
1057 	if (*sent) {
1058 		struct hci_conn *conn;
1059 
1060 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1061 
1062 		conn = hci_lookup_le_connect(hdev);
1063 		if (conn)
1064 			queue_delayed_work(hdev->workqueue,
1065 					   &conn->le_conn_timeout,
1066 					   conn->conn_timeout);
1067 	} else {
1068 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1069 	}
1070 
1071 	hci_dev_unlock(hdev);
1072 }
1073 
1074 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1075 {
1076 	struct hci_cp_le_set_scan_param *cp;
1077 	__u8 status = *((__u8 *) skb->data);
1078 
1079 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1080 
1081 	if (status)
1082 		return;
1083 
1084 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1085 	if (!cp)
1086 		return;
1087 
1088 	hci_dev_lock(hdev);
1089 
1090 	hdev->le_scan_type = cp->type;
1091 
1092 	hci_dev_unlock(hdev);
1093 }
1094 
1095 static bool has_pending_adv_report(struct hci_dev *hdev)
1096 {
1097 	struct discovery_state *d = &hdev->discovery;
1098 
1099 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1100 }
1101 
1102 static void clear_pending_adv_report(struct hci_dev *hdev)
1103 {
1104 	struct discovery_state *d = &hdev->discovery;
1105 
1106 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1107 	d->last_adv_data_len = 0;
1108 }
1109 
1110 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1111 				     u8 bdaddr_type, s8 rssi, u32 flags,
1112 				     u8 *data, u8 len)
1113 {
1114 	struct discovery_state *d = &hdev->discovery;
1115 
1116 	bacpy(&d->last_adv_addr, bdaddr);
1117 	d->last_adv_addr_type = bdaddr_type;
1118 	d->last_adv_rssi = rssi;
1119 	d->last_adv_flags = flags;
1120 	memcpy(d->last_adv_data, data, len);
1121 	d->last_adv_data_len = len;
1122 }
1123 
1124 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1125 				      struct sk_buff *skb)
1126 {
1127 	struct hci_cp_le_set_scan_enable *cp;
1128 	__u8 status = *((__u8 *) skb->data);
1129 
1130 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1131 
1132 	if (status)
1133 		return;
1134 
1135 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1136 	if (!cp)
1137 		return;
1138 
1139 	hci_dev_lock(hdev);
1140 
1141 	switch (cp->enable) {
1142 	case LE_SCAN_ENABLE:
1143 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1144 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1145 			clear_pending_adv_report(hdev);
1146 		break;
1147 
1148 	case LE_SCAN_DISABLE:
1149 		/* We do this here instead of when setting DISCOVERY_STOPPED
1150 		 * since the latter would potentially require waiting for
1151 		 * inquiry to stop too.
1152 		 */
1153 		if (has_pending_adv_report(hdev)) {
1154 			struct discovery_state *d = &hdev->discovery;
1155 
1156 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1157 					  d->last_adv_addr_type, NULL,
1158 					  d->last_adv_rssi, d->last_adv_flags,
1159 					  d->last_adv_data,
1160 					  d->last_adv_data_len, NULL, 0);
1161 		}
1162 
1163 		/* Cancel this timer so that we don't try to disable scanning
1164 		 * when it's already disabled.
1165 		 */
1166 		cancel_delayed_work(&hdev->le_scan_disable);
1167 
1168 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1169 
1170 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1171 		 * interrupted scanning due to a connect request. Mark
1172 		 * therefore discovery as stopped. If this was not
1173 		 * because of a connect request advertising might have
1174 		 * been disabled because of active scanning, so
1175 		 * re-enable it again if necessary.
1176 		 */
1177 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1178 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1179 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1180 			 hdev->discovery.state == DISCOVERY_FINDING)
1181 			mgmt_reenable_advertising(hdev);
1182 
1183 		break;
1184 
1185 	default:
1186 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1187 		break;
1188 	}
1189 
1190 	hci_dev_unlock(hdev);
1191 }
1192 
1193 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1194 					   struct sk_buff *skb)
1195 {
1196 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1197 
1198 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1199 
1200 	if (rp->status)
1201 		return;
1202 
1203 	hdev->le_white_list_size = rp->size;
1204 }
1205 
1206 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1207 				       struct sk_buff *skb)
1208 {
1209 	__u8 status = *((__u8 *) skb->data);
1210 
1211 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1212 
1213 	if (status)
1214 		return;
1215 
1216 	hci_bdaddr_list_clear(&hdev->le_white_list);
1217 }
1218 
1219 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1220 					struct sk_buff *skb)
1221 {
1222 	struct hci_cp_le_add_to_white_list *sent;
1223 	__u8 status = *((__u8 *) skb->data);
1224 
1225 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1226 
1227 	if (status)
1228 		return;
1229 
1230 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1231 	if (!sent)
1232 		return;
1233 
1234 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1235 			   sent->bdaddr_type);
1236 }
1237 
1238 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1239 					  struct sk_buff *skb)
1240 {
1241 	struct hci_cp_le_del_from_white_list *sent;
1242 	__u8 status = *((__u8 *) skb->data);
1243 
1244 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1245 
1246 	if (status)
1247 		return;
1248 
1249 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1250 	if (!sent)
1251 		return;
1252 
1253 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1254 			    sent->bdaddr_type);
1255 }
1256 
1257 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1258 					    struct sk_buff *skb)
1259 {
1260 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1261 
1262 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1263 
1264 	if (rp->status)
1265 		return;
1266 
1267 	memcpy(hdev->le_states, rp->le_states, 8);
1268 }
1269 
1270 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1271 					struct sk_buff *skb)
1272 {
1273 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1274 
1275 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1276 
1277 	if (rp->status)
1278 		return;
1279 
1280 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1281 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1282 }
1283 
1284 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1285 					 struct sk_buff *skb)
1286 {
1287 	struct hci_cp_le_write_def_data_len *sent;
1288 	__u8 status = *((__u8 *) skb->data);
1289 
1290 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1291 
1292 	if (status)
1293 		return;
1294 
1295 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1296 	if (!sent)
1297 		return;
1298 
1299 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1300 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1301 }
1302 
1303 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1304 					struct sk_buff *skb)
1305 {
1306 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1307 
1308 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1309 
1310 	if (rp->status)
1311 		return;
1312 
1313 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1314 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1315 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1316 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1317 }
1318 
1319 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1320 					   struct sk_buff *skb)
1321 {
1322 	struct hci_cp_write_le_host_supported *sent;
1323 	__u8 status = *((__u8 *) skb->data);
1324 
1325 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1326 
1327 	if (status)
1328 		return;
1329 
1330 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1331 	if (!sent)
1332 		return;
1333 
1334 	hci_dev_lock(hdev);
1335 
1336 	if (sent->le) {
1337 		hdev->features[1][0] |= LMP_HOST_LE;
1338 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1339 	} else {
1340 		hdev->features[1][0] &= ~LMP_HOST_LE;
1341 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1342 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1343 	}
1344 
1345 	if (sent->simul)
1346 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1347 	else
1348 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1349 
1350 	hci_dev_unlock(hdev);
1351 }
1352 
1353 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1354 {
1355 	struct hci_cp_le_set_adv_param *cp;
1356 	u8 status = *((u8 *) skb->data);
1357 
1358 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1359 
1360 	if (status)
1361 		return;
1362 
1363 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1364 	if (!cp)
1365 		return;
1366 
1367 	hci_dev_lock(hdev);
1368 	hdev->adv_addr_type = cp->own_address_type;
1369 	hci_dev_unlock(hdev);
1370 }
1371 
1372 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1373 {
1374 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1375 	struct hci_conn *conn;
1376 
1377 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1378 
1379 	if (rp->status)
1380 		return;
1381 
1382 	hci_dev_lock(hdev);
1383 
1384 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1385 	if (conn)
1386 		conn->rssi = rp->rssi;
1387 
1388 	hci_dev_unlock(hdev);
1389 }
1390 
1391 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1392 {
1393 	struct hci_cp_read_tx_power *sent;
1394 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1395 	struct hci_conn *conn;
1396 
1397 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1398 
1399 	if (rp->status)
1400 		return;
1401 
1402 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1403 	if (!sent)
1404 		return;
1405 
1406 	hci_dev_lock(hdev);
1407 
1408 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1409 	if (!conn)
1410 		goto unlock;
1411 
1412 	switch (sent->type) {
1413 	case 0x00:
1414 		conn->tx_power = rp->tx_power;
1415 		break;
1416 	case 0x01:
1417 		conn->max_tx_power = rp->tx_power;
1418 		break;
1419 	}
1420 
1421 unlock:
1422 	hci_dev_unlock(hdev);
1423 }
1424 
1425 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1426 {
1427 	u8 status = *((u8 *) skb->data);
1428 	u8 *mode;
1429 
1430 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1431 
1432 	if (status)
1433 		return;
1434 
1435 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1436 	if (mode)
1437 		hdev->ssp_debug_mode = *mode;
1438 }
1439 
1440 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1441 {
1442 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1443 
1444 	if (status) {
1445 		hci_conn_check_pending(hdev);
1446 		return;
1447 	}
1448 
1449 	set_bit(HCI_INQUIRY, &hdev->flags);
1450 }
1451 
1452 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1453 {
1454 	struct hci_cp_create_conn *cp;
1455 	struct hci_conn *conn;
1456 
1457 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1458 
1459 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1460 	if (!cp)
1461 		return;
1462 
1463 	hci_dev_lock(hdev);
1464 
1465 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1466 
1467 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1468 
1469 	if (status) {
1470 		if (conn && conn->state == BT_CONNECT) {
1471 			if (status != 0x0c || conn->attempt > 2) {
1472 				conn->state = BT_CLOSED;
1473 				hci_connect_cfm(conn, status);
1474 				hci_conn_del(conn);
1475 			} else
1476 				conn->state = BT_CONNECT2;
1477 		}
1478 	} else {
1479 		if (!conn) {
1480 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1481 					    HCI_ROLE_MASTER);
1482 			if (!conn)
1483 				BT_ERR("No memory for new connection");
1484 		}
1485 	}
1486 
1487 	hci_dev_unlock(hdev);
1488 }
1489 
1490 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1491 {
1492 	struct hci_cp_add_sco *cp;
1493 	struct hci_conn *acl, *sco;
1494 	__u16 handle;
1495 
1496 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1497 
1498 	if (!status)
1499 		return;
1500 
1501 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1502 	if (!cp)
1503 		return;
1504 
1505 	handle = __le16_to_cpu(cp->handle);
1506 
1507 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1508 
1509 	hci_dev_lock(hdev);
1510 
1511 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1512 	if (acl) {
1513 		sco = acl->link;
1514 		if (sco) {
1515 			sco->state = BT_CLOSED;
1516 
1517 			hci_connect_cfm(sco, status);
1518 			hci_conn_del(sco);
1519 		}
1520 	}
1521 
1522 	hci_dev_unlock(hdev);
1523 }
1524 
1525 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1526 {
1527 	struct hci_cp_auth_requested *cp;
1528 	struct hci_conn *conn;
1529 
1530 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1531 
1532 	if (!status)
1533 		return;
1534 
1535 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1536 	if (!cp)
1537 		return;
1538 
1539 	hci_dev_lock(hdev);
1540 
1541 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1542 	if (conn) {
1543 		if (conn->state == BT_CONFIG) {
1544 			hci_connect_cfm(conn, status);
1545 			hci_conn_drop(conn);
1546 		}
1547 	}
1548 
1549 	hci_dev_unlock(hdev);
1550 }
1551 
1552 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1553 {
1554 	struct hci_cp_set_conn_encrypt *cp;
1555 	struct hci_conn *conn;
1556 
1557 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1558 
1559 	if (!status)
1560 		return;
1561 
1562 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1563 	if (!cp)
1564 		return;
1565 
1566 	hci_dev_lock(hdev);
1567 
1568 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1569 	if (conn) {
1570 		if (conn->state == BT_CONFIG) {
1571 			hci_connect_cfm(conn, status);
1572 			hci_conn_drop(conn);
1573 		}
1574 	}
1575 
1576 	hci_dev_unlock(hdev);
1577 }
1578 
1579 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1580 				    struct hci_conn *conn)
1581 {
1582 	if (conn->state != BT_CONFIG || !conn->out)
1583 		return 0;
1584 
1585 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1586 		return 0;
1587 
1588 	/* Only request authentication for SSP connections or non-SSP
1589 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1590 	 * is requested.
1591 	 */
1592 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1593 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1594 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1595 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1596 		return 0;
1597 
1598 	return 1;
1599 }
1600 
1601 static int hci_resolve_name(struct hci_dev *hdev,
1602 				   struct inquiry_entry *e)
1603 {
1604 	struct hci_cp_remote_name_req cp;
1605 
1606 	memset(&cp, 0, sizeof(cp));
1607 
1608 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1609 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1610 	cp.pscan_mode = e->data.pscan_mode;
1611 	cp.clock_offset = e->data.clock_offset;
1612 
1613 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1614 }
1615 
1616 static bool hci_resolve_next_name(struct hci_dev *hdev)
1617 {
1618 	struct discovery_state *discov = &hdev->discovery;
1619 	struct inquiry_entry *e;
1620 
1621 	if (list_empty(&discov->resolve))
1622 		return false;
1623 
1624 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1625 	if (!e)
1626 		return false;
1627 
1628 	if (hci_resolve_name(hdev, e) == 0) {
1629 		e->name_state = NAME_PENDING;
1630 		return true;
1631 	}
1632 
1633 	return false;
1634 }
1635 
1636 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1637 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1638 {
1639 	struct discovery_state *discov = &hdev->discovery;
1640 	struct inquiry_entry *e;
1641 
1642 	/* Update the mgmt connected state if necessary. Be careful with
1643 	 * conn objects that exist but are not (yet) connected however.
1644 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1645 	 * considered connected.
1646 	 */
1647 	if (conn &&
1648 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1649 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1650 		mgmt_device_connected(hdev, conn, 0, name, name_len);
1651 
1652 	if (discov->state == DISCOVERY_STOPPED)
1653 		return;
1654 
1655 	if (discov->state == DISCOVERY_STOPPING)
1656 		goto discov_complete;
1657 
1658 	if (discov->state != DISCOVERY_RESOLVING)
1659 		return;
1660 
1661 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1662 	/* If the device was not found in a list of found devices names of which
1663 	 * are pending. there is no need to continue resolving a next name as it
1664 	 * will be done upon receiving another Remote Name Request Complete
1665 	 * Event */
1666 	if (!e)
1667 		return;
1668 
1669 	list_del(&e->list);
1670 	if (name) {
1671 		e->name_state = NAME_KNOWN;
1672 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1673 				 e->data.rssi, name, name_len);
1674 	} else {
1675 		e->name_state = NAME_NOT_KNOWN;
1676 	}
1677 
1678 	if (hci_resolve_next_name(hdev))
1679 		return;
1680 
1681 discov_complete:
1682 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1683 }
1684 
1685 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1686 {
1687 	struct hci_cp_remote_name_req *cp;
1688 	struct hci_conn *conn;
1689 
1690 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1691 
1692 	/* If successful wait for the name req complete event before
1693 	 * checking for the need to do authentication */
1694 	if (!status)
1695 		return;
1696 
1697 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1698 	if (!cp)
1699 		return;
1700 
1701 	hci_dev_lock(hdev);
1702 
1703 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1704 
1705 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1706 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1707 
1708 	if (!conn)
1709 		goto unlock;
1710 
1711 	if (!hci_outgoing_auth_needed(hdev, conn))
1712 		goto unlock;
1713 
1714 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1715 		struct hci_cp_auth_requested auth_cp;
1716 
1717 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1718 
1719 		auth_cp.handle = __cpu_to_le16(conn->handle);
1720 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1721 			     sizeof(auth_cp), &auth_cp);
1722 	}
1723 
1724 unlock:
1725 	hci_dev_unlock(hdev);
1726 }
1727 
1728 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1729 {
1730 	struct hci_cp_read_remote_features *cp;
1731 	struct hci_conn *conn;
1732 
1733 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1734 
1735 	if (!status)
1736 		return;
1737 
1738 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1739 	if (!cp)
1740 		return;
1741 
1742 	hci_dev_lock(hdev);
1743 
1744 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1745 	if (conn) {
1746 		if (conn->state == BT_CONFIG) {
1747 			hci_connect_cfm(conn, status);
1748 			hci_conn_drop(conn);
1749 		}
1750 	}
1751 
1752 	hci_dev_unlock(hdev);
1753 }
1754 
1755 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1756 {
1757 	struct hci_cp_read_remote_ext_features *cp;
1758 	struct hci_conn *conn;
1759 
1760 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1761 
1762 	if (!status)
1763 		return;
1764 
1765 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1766 	if (!cp)
1767 		return;
1768 
1769 	hci_dev_lock(hdev);
1770 
1771 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1772 	if (conn) {
1773 		if (conn->state == BT_CONFIG) {
1774 			hci_connect_cfm(conn, status);
1775 			hci_conn_drop(conn);
1776 		}
1777 	}
1778 
1779 	hci_dev_unlock(hdev);
1780 }
1781 
1782 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1783 {
1784 	struct hci_cp_setup_sync_conn *cp;
1785 	struct hci_conn *acl, *sco;
1786 	__u16 handle;
1787 
1788 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1789 
1790 	if (!status)
1791 		return;
1792 
1793 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1794 	if (!cp)
1795 		return;
1796 
1797 	handle = __le16_to_cpu(cp->handle);
1798 
1799 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1800 
1801 	hci_dev_lock(hdev);
1802 
1803 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1804 	if (acl) {
1805 		sco = acl->link;
1806 		if (sco) {
1807 			sco->state = BT_CLOSED;
1808 
1809 			hci_connect_cfm(sco, status);
1810 			hci_conn_del(sco);
1811 		}
1812 	}
1813 
1814 	hci_dev_unlock(hdev);
1815 }
1816 
1817 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1818 {
1819 	struct hci_cp_sniff_mode *cp;
1820 	struct hci_conn *conn;
1821 
1822 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1823 
1824 	if (!status)
1825 		return;
1826 
1827 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1828 	if (!cp)
1829 		return;
1830 
1831 	hci_dev_lock(hdev);
1832 
1833 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1834 	if (conn) {
1835 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1836 
1837 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1838 			hci_sco_setup(conn, status);
1839 	}
1840 
1841 	hci_dev_unlock(hdev);
1842 }
1843 
1844 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1845 {
1846 	struct hci_cp_exit_sniff_mode *cp;
1847 	struct hci_conn *conn;
1848 
1849 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1850 
1851 	if (!status)
1852 		return;
1853 
1854 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1855 	if (!cp)
1856 		return;
1857 
1858 	hci_dev_lock(hdev);
1859 
1860 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1861 	if (conn) {
1862 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1863 
1864 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1865 			hci_sco_setup(conn, status);
1866 	}
1867 
1868 	hci_dev_unlock(hdev);
1869 }
1870 
1871 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1872 {
1873 	struct hci_cp_disconnect *cp;
1874 	struct hci_conn *conn;
1875 
1876 	if (!status)
1877 		return;
1878 
1879 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1880 	if (!cp)
1881 		return;
1882 
1883 	hci_dev_lock(hdev);
1884 
1885 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1886 	if (conn)
1887 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1888 				       conn->dst_type, status);
1889 
1890 	hci_dev_unlock(hdev);
1891 }
1892 
1893 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1894 {
1895 	struct hci_cp_le_create_conn *cp;
1896 	struct hci_conn *conn;
1897 
1898 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1899 
1900 	/* All connection failure handling is taken care of by the
1901 	 * hci_le_conn_failed function which is triggered by the HCI
1902 	 * request completion callbacks used for connecting.
1903 	 */
1904 	if (status)
1905 		return;
1906 
1907 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1908 	if (!cp)
1909 		return;
1910 
1911 	hci_dev_lock(hdev);
1912 
1913 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1914 	if (!conn)
1915 		goto unlock;
1916 
1917 	/* Store the initiator and responder address information which
1918 	 * is needed for SMP. These values will not change during the
1919 	 * lifetime of the connection.
1920 	 */
1921 	conn->init_addr_type = cp->own_address_type;
1922 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1923 		bacpy(&conn->init_addr, &hdev->random_addr);
1924 	else
1925 		bacpy(&conn->init_addr, &hdev->bdaddr);
1926 
1927 	conn->resp_addr_type = cp->peer_addr_type;
1928 	bacpy(&conn->resp_addr, &cp->peer_addr);
1929 
1930 	/* We don't want the connection attempt to stick around
1931 	 * indefinitely since LE doesn't have a page timeout concept
1932 	 * like BR/EDR. Set a timer for any connection that doesn't use
1933 	 * the white list for connecting.
1934 	 */
1935 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1936 		queue_delayed_work(conn->hdev->workqueue,
1937 				   &conn->le_conn_timeout,
1938 				   conn->conn_timeout);
1939 
1940 unlock:
1941 	hci_dev_unlock(hdev);
1942 }
1943 
1944 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1945 {
1946 	struct hci_cp_le_read_remote_features *cp;
1947 	struct hci_conn *conn;
1948 
1949 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1950 
1951 	if (!status)
1952 		return;
1953 
1954 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1955 	if (!cp)
1956 		return;
1957 
1958 	hci_dev_lock(hdev);
1959 
1960 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1961 	if (conn) {
1962 		if (conn->state == BT_CONFIG) {
1963 			hci_connect_cfm(conn, status);
1964 			hci_conn_drop(conn);
1965 		}
1966 	}
1967 
1968 	hci_dev_unlock(hdev);
1969 }
1970 
1971 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1972 {
1973 	struct hci_cp_le_start_enc *cp;
1974 	struct hci_conn *conn;
1975 
1976 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1977 
1978 	if (!status)
1979 		return;
1980 
1981 	hci_dev_lock(hdev);
1982 
1983 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1984 	if (!cp)
1985 		goto unlock;
1986 
1987 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1988 	if (!conn)
1989 		goto unlock;
1990 
1991 	if (conn->state != BT_CONNECTED)
1992 		goto unlock;
1993 
1994 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1995 	hci_conn_drop(conn);
1996 
1997 unlock:
1998 	hci_dev_unlock(hdev);
1999 }
2000 
2001 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2002 {
2003 	struct hci_cp_switch_role *cp;
2004 	struct hci_conn *conn;
2005 
2006 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007 
2008 	if (!status)
2009 		return;
2010 
2011 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2012 	if (!cp)
2013 		return;
2014 
2015 	hci_dev_lock(hdev);
2016 
2017 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2018 	if (conn)
2019 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2020 
2021 	hci_dev_unlock(hdev);
2022 }
2023 
2024 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2025 {
2026 	__u8 status = *((__u8 *) skb->data);
2027 	struct discovery_state *discov = &hdev->discovery;
2028 	struct inquiry_entry *e;
2029 
2030 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2031 
2032 	hci_conn_check_pending(hdev);
2033 
2034 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2035 		return;
2036 
2037 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2038 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2039 
2040 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2041 		return;
2042 
2043 	hci_dev_lock(hdev);
2044 
2045 	if (discov->state != DISCOVERY_FINDING)
2046 		goto unlock;
2047 
2048 	if (list_empty(&discov->resolve)) {
2049 		/* When BR/EDR inquiry is active and no LE scanning is in
2050 		 * progress, then change discovery state to indicate completion.
2051 		 *
2052 		 * When running LE scanning and BR/EDR inquiry simultaneously
2053 		 * and the LE scan already finished, then change the discovery
2054 		 * state to indicate completion.
2055 		 */
2056 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2057 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2058 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2059 		goto unlock;
2060 	}
2061 
2062 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2063 	if (e && hci_resolve_name(hdev, e) == 0) {
2064 		e->name_state = NAME_PENDING;
2065 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2066 	} else {
2067 		/* When BR/EDR inquiry is active and no LE scanning is in
2068 		 * progress, then change discovery state to indicate completion.
2069 		 *
2070 		 * When running LE scanning and BR/EDR inquiry simultaneously
2071 		 * and the LE scan already finished, then change the discovery
2072 		 * state to indicate completion.
2073 		 */
2074 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2075 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2076 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2077 	}
2078 
2079 unlock:
2080 	hci_dev_unlock(hdev);
2081 }
2082 
2083 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2084 {
2085 	struct inquiry_data data;
2086 	struct inquiry_info *info = (void *) (skb->data + 1);
2087 	int num_rsp = *((__u8 *) skb->data);
2088 
2089 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2090 
2091 	if (!num_rsp)
2092 		return;
2093 
2094 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2095 		return;
2096 
2097 	hci_dev_lock(hdev);
2098 
2099 	for (; num_rsp; num_rsp--, info++) {
2100 		u32 flags;
2101 
2102 		bacpy(&data.bdaddr, &info->bdaddr);
2103 		data.pscan_rep_mode	= info->pscan_rep_mode;
2104 		data.pscan_period_mode	= info->pscan_period_mode;
2105 		data.pscan_mode		= info->pscan_mode;
2106 		memcpy(data.dev_class, info->dev_class, 3);
2107 		data.clock_offset	= info->clock_offset;
2108 		data.rssi		= HCI_RSSI_INVALID;
2109 		data.ssp_mode		= 0x00;
2110 
2111 		flags = hci_inquiry_cache_update(hdev, &data, false);
2112 
2113 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2114 				  info->dev_class, HCI_RSSI_INVALID,
2115 				  flags, NULL, 0, NULL, 0);
2116 	}
2117 
2118 	hci_dev_unlock(hdev);
2119 }
2120 
2121 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2122 {
2123 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2124 	struct hci_conn *conn;
2125 
2126 	BT_DBG("%s", hdev->name);
2127 
2128 	hci_dev_lock(hdev);
2129 
2130 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2131 	if (!conn) {
2132 		if (ev->link_type != SCO_LINK)
2133 			goto unlock;
2134 
2135 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2136 		if (!conn)
2137 			goto unlock;
2138 
2139 		conn->type = SCO_LINK;
2140 	}
2141 
2142 	if (!ev->status) {
2143 		conn->handle = __le16_to_cpu(ev->handle);
2144 
2145 		if (conn->type == ACL_LINK) {
2146 			conn->state = BT_CONFIG;
2147 			hci_conn_hold(conn);
2148 
2149 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2150 			    !hci_find_link_key(hdev, &ev->bdaddr))
2151 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2152 			else
2153 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2154 		} else
2155 			conn->state = BT_CONNECTED;
2156 
2157 		hci_debugfs_create_conn(conn);
2158 		hci_conn_add_sysfs(conn);
2159 
2160 		if (test_bit(HCI_AUTH, &hdev->flags))
2161 			set_bit(HCI_CONN_AUTH, &conn->flags);
2162 
2163 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2164 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2165 
2166 		/* Get remote features */
2167 		if (conn->type == ACL_LINK) {
2168 			struct hci_cp_read_remote_features cp;
2169 			cp.handle = ev->handle;
2170 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2171 				     sizeof(cp), &cp);
2172 
2173 			hci_update_page_scan(hdev);
2174 		}
2175 
2176 		/* Set packet type for incoming connection */
2177 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2178 			struct hci_cp_change_conn_ptype cp;
2179 			cp.handle = ev->handle;
2180 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2181 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2182 				     &cp);
2183 		}
2184 	} else {
2185 		conn->state = BT_CLOSED;
2186 		if (conn->type == ACL_LINK)
2187 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2188 					    conn->dst_type, ev->status);
2189 	}
2190 
2191 	if (conn->type == ACL_LINK)
2192 		hci_sco_setup(conn, ev->status);
2193 
2194 	if (ev->status) {
2195 		hci_connect_cfm(conn, ev->status);
2196 		hci_conn_del(conn);
2197 	} else if (ev->link_type != ACL_LINK)
2198 		hci_connect_cfm(conn, ev->status);
2199 
2200 unlock:
2201 	hci_dev_unlock(hdev);
2202 
2203 	hci_conn_check_pending(hdev);
2204 }
2205 
2206 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2207 {
2208 	struct hci_cp_reject_conn_req cp;
2209 
2210 	bacpy(&cp.bdaddr, bdaddr);
2211 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2212 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2213 }
2214 
2215 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2216 {
2217 	struct hci_ev_conn_request *ev = (void *) skb->data;
2218 	int mask = hdev->link_mode;
2219 	struct inquiry_entry *ie;
2220 	struct hci_conn *conn;
2221 	__u8 flags = 0;
2222 
2223 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2224 	       ev->link_type);
2225 
2226 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2227 				      &flags);
2228 
2229 	if (!(mask & HCI_LM_ACCEPT)) {
2230 		hci_reject_conn(hdev, &ev->bdaddr);
2231 		return;
2232 	}
2233 
2234 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2235 				   BDADDR_BREDR)) {
2236 		hci_reject_conn(hdev, &ev->bdaddr);
2237 		return;
2238 	}
2239 
2240 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2241 	 * connection. These features are only touched through mgmt so
2242 	 * only do the checks if HCI_MGMT is set.
2243 	 */
2244 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2245 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2246 	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2247 				    BDADDR_BREDR)) {
2248 		    hci_reject_conn(hdev, &ev->bdaddr);
2249 		    return;
2250 	}
2251 
2252 	/* Connection accepted */
2253 
2254 	hci_dev_lock(hdev);
2255 
2256 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2257 	if (ie)
2258 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2259 
2260 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2261 			&ev->bdaddr);
2262 	if (!conn) {
2263 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2264 				    HCI_ROLE_SLAVE);
2265 		if (!conn) {
2266 			BT_ERR("No memory for new connection");
2267 			hci_dev_unlock(hdev);
2268 			return;
2269 		}
2270 	}
2271 
2272 	memcpy(conn->dev_class, ev->dev_class, 3);
2273 
2274 	hci_dev_unlock(hdev);
2275 
2276 	if (ev->link_type == ACL_LINK ||
2277 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2278 		struct hci_cp_accept_conn_req cp;
2279 		conn->state = BT_CONNECT;
2280 
2281 		bacpy(&cp.bdaddr, &ev->bdaddr);
2282 
2283 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2284 			cp.role = 0x00; /* Become master */
2285 		else
2286 			cp.role = 0x01; /* Remain slave */
2287 
2288 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2289 	} else if (!(flags & HCI_PROTO_DEFER)) {
2290 		struct hci_cp_accept_sync_conn_req cp;
2291 		conn->state = BT_CONNECT;
2292 
2293 		bacpy(&cp.bdaddr, &ev->bdaddr);
2294 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2295 
2296 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2297 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2298 		cp.max_latency    = cpu_to_le16(0xffff);
2299 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2300 		cp.retrans_effort = 0xff;
2301 
2302 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2303 			     &cp);
2304 	} else {
2305 		conn->state = BT_CONNECT2;
2306 		hci_connect_cfm(conn, 0);
2307 	}
2308 }
2309 
2310 static u8 hci_to_mgmt_reason(u8 err)
2311 {
2312 	switch (err) {
2313 	case HCI_ERROR_CONNECTION_TIMEOUT:
2314 		return MGMT_DEV_DISCONN_TIMEOUT;
2315 	case HCI_ERROR_REMOTE_USER_TERM:
2316 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2317 	case HCI_ERROR_REMOTE_POWER_OFF:
2318 		return MGMT_DEV_DISCONN_REMOTE;
2319 	case HCI_ERROR_LOCAL_HOST_TERM:
2320 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2321 	default:
2322 		return MGMT_DEV_DISCONN_UNKNOWN;
2323 	}
2324 }
2325 
2326 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2327 {
2328 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2329 	u8 reason = hci_to_mgmt_reason(ev->reason);
2330 	struct hci_conn_params *params;
2331 	struct hci_conn *conn;
2332 	bool mgmt_connected;
2333 	u8 type;
2334 
2335 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2336 
2337 	hci_dev_lock(hdev);
2338 
2339 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2340 	if (!conn)
2341 		goto unlock;
2342 
2343 	if (ev->status) {
2344 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2345 				       conn->dst_type, ev->status);
2346 		goto unlock;
2347 	}
2348 
2349 	conn->state = BT_CLOSED;
2350 
2351 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2352 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2353 				reason, mgmt_connected);
2354 
2355 	if (conn->type == ACL_LINK) {
2356 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2357 			hci_remove_link_key(hdev, &conn->dst);
2358 
2359 		hci_update_page_scan(hdev);
2360 	}
2361 
2362 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2363 	if (params) {
2364 		switch (params->auto_connect) {
2365 		case HCI_AUTO_CONN_LINK_LOSS:
2366 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2367 				break;
2368 			/* Fall through */
2369 
2370 		case HCI_AUTO_CONN_DIRECT:
2371 		case HCI_AUTO_CONN_ALWAYS:
2372 			list_del_init(&params->action);
2373 			list_add(&params->action, &hdev->pend_le_conns);
2374 			hci_update_background_scan(hdev);
2375 			break;
2376 
2377 		default:
2378 			break;
2379 		}
2380 	}
2381 
2382 	type = conn->type;
2383 
2384 	hci_disconn_cfm(conn, ev->reason);
2385 	hci_conn_del(conn);
2386 
2387 	/* Re-enable advertising if necessary, since it might
2388 	 * have been disabled by the connection. From the
2389 	 * HCI_LE_Set_Advertise_Enable command description in
2390 	 * the core specification (v4.0):
2391 	 * "The Controller shall continue advertising until the Host
2392 	 * issues an LE_Set_Advertise_Enable command with
2393 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2394 	 * or until a connection is created or until the Advertising
2395 	 * is timed out due to Directed Advertising."
2396 	 */
2397 	if (type == LE_LINK)
2398 		mgmt_reenable_advertising(hdev);
2399 
2400 unlock:
2401 	hci_dev_unlock(hdev);
2402 }
2403 
2404 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2405 {
2406 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2407 	struct hci_conn *conn;
2408 
2409 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2410 
2411 	hci_dev_lock(hdev);
2412 
2413 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2414 	if (!conn)
2415 		goto unlock;
2416 
2417 	if (!ev->status) {
2418 		if (!hci_conn_ssp_enabled(conn) &&
2419 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2420 			BT_INFO("re-auth of legacy device is not possible.");
2421 		} else {
2422 			set_bit(HCI_CONN_AUTH, &conn->flags);
2423 			conn->sec_level = conn->pending_sec_level;
2424 		}
2425 	} else {
2426 		mgmt_auth_failed(conn, ev->status);
2427 	}
2428 
2429 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2430 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2431 
2432 	if (conn->state == BT_CONFIG) {
2433 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2434 			struct hci_cp_set_conn_encrypt cp;
2435 			cp.handle  = ev->handle;
2436 			cp.encrypt = 0x01;
2437 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2438 				     &cp);
2439 		} else {
2440 			conn->state = BT_CONNECTED;
2441 			hci_connect_cfm(conn, ev->status);
2442 			hci_conn_drop(conn);
2443 		}
2444 	} else {
2445 		hci_auth_cfm(conn, ev->status);
2446 
2447 		hci_conn_hold(conn);
2448 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2449 		hci_conn_drop(conn);
2450 	}
2451 
2452 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2453 		if (!ev->status) {
2454 			struct hci_cp_set_conn_encrypt cp;
2455 			cp.handle  = ev->handle;
2456 			cp.encrypt = 0x01;
2457 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2458 				     &cp);
2459 		} else {
2460 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2461 			hci_encrypt_cfm(conn, ev->status, 0x00);
2462 		}
2463 	}
2464 
2465 unlock:
2466 	hci_dev_unlock(hdev);
2467 }
2468 
2469 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2470 {
2471 	struct hci_ev_remote_name *ev = (void *) skb->data;
2472 	struct hci_conn *conn;
2473 
2474 	BT_DBG("%s", hdev->name);
2475 
2476 	hci_conn_check_pending(hdev);
2477 
2478 	hci_dev_lock(hdev);
2479 
2480 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2481 
2482 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2483 		goto check_auth;
2484 
2485 	if (ev->status == 0)
2486 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2487 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2488 	else
2489 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2490 
2491 check_auth:
2492 	if (!conn)
2493 		goto unlock;
2494 
2495 	if (!hci_outgoing_auth_needed(hdev, conn))
2496 		goto unlock;
2497 
2498 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2499 		struct hci_cp_auth_requested cp;
2500 
2501 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2502 
2503 		cp.handle = __cpu_to_le16(conn->handle);
2504 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2505 	}
2506 
2507 unlock:
2508 	hci_dev_unlock(hdev);
2509 }
2510 
2511 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2512 				       u16 opcode, struct sk_buff *skb)
2513 {
2514 	const struct hci_rp_read_enc_key_size *rp;
2515 	struct hci_conn *conn;
2516 	u16 handle;
2517 
2518 	BT_DBG("%s status 0x%02x", hdev->name, status);
2519 
2520 	if (!skb || skb->len < sizeof(*rp)) {
2521 		BT_ERR("%s invalid HCI Read Encryption Key Size response",
2522 		       hdev->name);
2523 		return;
2524 	}
2525 
2526 	rp = (void *)skb->data;
2527 	handle = le16_to_cpu(rp->handle);
2528 
2529 	hci_dev_lock(hdev);
2530 
2531 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2532 	if (!conn)
2533 		goto unlock;
2534 
2535 	/* If we fail to read the encryption key size, assume maximum
2536 	 * (which is the same we do also when this HCI command isn't
2537 	 * supported.
2538 	 */
2539 	if (rp->status) {
2540 		BT_ERR("%s failed to read key size for handle %u", hdev->name,
2541 		       handle);
2542 		conn->enc_key_size = HCI_LINK_KEY_SIZE;
2543 	} else {
2544 		conn->enc_key_size = rp->key_size;
2545 	}
2546 
2547 	if (conn->state == BT_CONFIG) {
2548 		conn->state = BT_CONNECTED;
2549 		hci_connect_cfm(conn, 0);
2550 		hci_conn_drop(conn);
2551 	} else {
2552 		u8 encrypt;
2553 
2554 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2555 			encrypt = 0x00;
2556 		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2557 			encrypt = 0x02;
2558 		else
2559 			encrypt = 0x01;
2560 
2561 		hci_encrypt_cfm(conn, 0, encrypt);
2562 	}
2563 
2564 unlock:
2565 	hci_dev_unlock(hdev);
2566 }
2567 
2568 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2569 {
2570 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2571 	struct hci_conn *conn;
2572 
2573 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2574 
2575 	hci_dev_lock(hdev);
2576 
2577 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2578 	if (!conn)
2579 		goto unlock;
2580 
2581 	if (!ev->status) {
2582 		if (ev->encrypt) {
2583 			/* Encryption implies authentication */
2584 			set_bit(HCI_CONN_AUTH, &conn->flags);
2585 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2586 			conn->sec_level = conn->pending_sec_level;
2587 
2588 			/* P-256 authentication key implies FIPS */
2589 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2590 				set_bit(HCI_CONN_FIPS, &conn->flags);
2591 
2592 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2593 			    conn->type == LE_LINK)
2594 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2595 		} else {
2596 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2597 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2598 		}
2599 	}
2600 
2601 	/* We should disregard the current RPA and generate a new one
2602 	 * whenever the encryption procedure fails.
2603 	 */
2604 	if (ev->status && conn->type == LE_LINK)
2605 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2606 
2607 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2608 
2609 	if (ev->status && conn->state == BT_CONNECTED) {
2610 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2611 		hci_conn_drop(conn);
2612 		goto unlock;
2613 	}
2614 
2615 	/* In Secure Connections Only mode, do not allow any connections
2616 	 * that are not encrypted with AES-CCM using a P-256 authenticated
2617 	 * combination key.
2618 	 */
2619 	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2620 	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2621 	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2622 		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2623 		hci_conn_drop(conn);
2624 		goto unlock;
2625 	}
2626 
2627 	/* Try reading the encryption key size for encrypted ACL links */
2628 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2629 		struct hci_cp_read_enc_key_size cp;
2630 		struct hci_request req;
2631 
2632 		/* Only send HCI_Read_Encryption_Key_Size if the
2633 		 * controller really supports it. If it doesn't, assume
2634 		 * the default size (16).
2635 		 */
2636 		if (!(hdev->commands[20] & 0x10)) {
2637 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2638 			goto notify;
2639 		}
2640 
2641 		hci_req_init(&req, hdev);
2642 
2643 		cp.handle = cpu_to_le16(conn->handle);
2644 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2645 
2646 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2647 			BT_ERR("Sending HCI Read Encryption Key Size failed");
2648 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2649 			goto notify;
2650 		}
2651 
2652 		goto unlock;
2653 	}
2654 
2655 notify:
2656 	if (conn->state == BT_CONFIG) {
2657 		if (!ev->status)
2658 			conn->state = BT_CONNECTED;
2659 
2660 		hci_connect_cfm(conn, ev->status);
2661 		hci_conn_drop(conn);
2662 	} else
2663 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2664 
2665 unlock:
2666 	hci_dev_unlock(hdev);
2667 }
2668 
2669 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2670 					     struct sk_buff *skb)
2671 {
2672 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2673 	struct hci_conn *conn;
2674 
2675 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2676 
2677 	hci_dev_lock(hdev);
2678 
2679 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2680 	if (conn) {
2681 		if (!ev->status)
2682 			set_bit(HCI_CONN_SECURE, &conn->flags);
2683 
2684 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2685 
2686 		hci_key_change_cfm(conn, ev->status);
2687 	}
2688 
2689 	hci_dev_unlock(hdev);
2690 }
2691 
2692 static void hci_remote_features_evt(struct hci_dev *hdev,
2693 				    struct sk_buff *skb)
2694 {
2695 	struct hci_ev_remote_features *ev = (void *) skb->data;
2696 	struct hci_conn *conn;
2697 
2698 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2699 
2700 	hci_dev_lock(hdev);
2701 
2702 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2703 	if (!conn)
2704 		goto unlock;
2705 
2706 	if (!ev->status)
2707 		memcpy(conn->features[0], ev->features, 8);
2708 
2709 	if (conn->state != BT_CONFIG)
2710 		goto unlock;
2711 
2712 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
2713 	    lmp_ext_feat_capable(conn)) {
2714 		struct hci_cp_read_remote_ext_features cp;
2715 		cp.handle = ev->handle;
2716 		cp.page = 0x01;
2717 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2718 			     sizeof(cp), &cp);
2719 		goto unlock;
2720 	}
2721 
2722 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2723 		struct hci_cp_remote_name_req cp;
2724 		memset(&cp, 0, sizeof(cp));
2725 		bacpy(&cp.bdaddr, &conn->dst);
2726 		cp.pscan_rep_mode = 0x02;
2727 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2728 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2729 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
2730 
2731 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2732 		conn->state = BT_CONNECTED;
2733 		hci_connect_cfm(conn, ev->status);
2734 		hci_conn_drop(conn);
2735 	}
2736 
2737 unlock:
2738 	hci_dev_unlock(hdev);
2739 }
2740 
2741 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2742 				 u16 *opcode, u8 *status,
2743 				 hci_req_complete_t *req_complete,
2744 				 hci_req_complete_skb_t *req_complete_skb)
2745 {
2746 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2747 
2748 	*opcode = __le16_to_cpu(ev->opcode);
2749 	*status = skb->data[sizeof(*ev)];
2750 
2751 	skb_pull(skb, sizeof(*ev));
2752 
2753 	switch (*opcode) {
2754 	case HCI_OP_INQUIRY_CANCEL:
2755 		hci_cc_inquiry_cancel(hdev, skb);
2756 		break;
2757 
2758 	case HCI_OP_PERIODIC_INQ:
2759 		hci_cc_periodic_inq(hdev, skb);
2760 		break;
2761 
2762 	case HCI_OP_EXIT_PERIODIC_INQ:
2763 		hci_cc_exit_periodic_inq(hdev, skb);
2764 		break;
2765 
2766 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2767 		hci_cc_remote_name_req_cancel(hdev, skb);
2768 		break;
2769 
2770 	case HCI_OP_ROLE_DISCOVERY:
2771 		hci_cc_role_discovery(hdev, skb);
2772 		break;
2773 
2774 	case HCI_OP_READ_LINK_POLICY:
2775 		hci_cc_read_link_policy(hdev, skb);
2776 		break;
2777 
2778 	case HCI_OP_WRITE_LINK_POLICY:
2779 		hci_cc_write_link_policy(hdev, skb);
2780 		break;
2781 
2782 	case HCI_OP_READ_DEF_LINK_POLICY:
2783 		hci_cc_read_def_link_policy(hdev, skb);
2784 		break;
2785 
2786 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2787 		hci_cc_write_def_link_policy(hdev, skb);
2788 		break;
2789 
2790 	case HCI_OP_RESET:
2791 		hci_cc_reset(hdev, skb);
2792 		break;
2793 
2794 	case HCI_OP_READ_STORED_LINK_KEY:
2795 		hci_cc_read_stored_link_key(hdev, skb);
2796 		break;
2797 
2798 	case HCI_OP_DELETE_STORED_LINK_KEY:
2799 		hci_cc_delete_stored_link_key(hdev, skb);
2800 		break;
2801 
2802 	case HCI_OP_WRITE_LOCAL_NAME:
2803 		hci_cc_write_local_name(hdev, skb);
2804 		break;
2805 
2806 	case HCI_OP_READ_LOCAL_NAME:
2807 		hci_cc_read_local_name(hdev, skb);
2808 		break;
2809 
2810 	case HCI_OP_WRITE_AUTH_ENABLE:
2811 		hci_cc_write_auth_enable(hdev, skb);
2812 		break;
2813 
2814 	case HCI_OP_WRITE_ENCRYPT_MODE:
2815 		hci_cc_write_encrypt_mode(hdev, skb);
2816 		break;
2817 
2818 	case HCI_OP_WRITE_SCAN_ENABLE:
2819 		hci_cc_write_scan_enable(hdev, skb);
2820 		break;
2821 
2822 	case HCI_OP_READ_CLASS_OF_DEV:
2823 		hci_cc_read_class_of_dev(hdev, skb);
2824 		break;
2825 
2826 	case HCI_OP_WRITE_CLASS_OF_DEV:
2827 		hci_cc_write_class_of_dev(hdev, skb);
2828 		break;
2829 
2830 	case HCI_OP_READ_VOICE_SETTING:
2831 		hci_cc_read_voice_setting(hdev, skb);
2832 		break;
2833 
2834 	case HCI_OP_WRITE_VOICE_SETTING:
2835 		hci_cc_write_voice_setting(hdev, skb);
2836 		break;
2837 
2838 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2839 		hci_cc_read_num_supported_iac(hdev, skb);
2840 		break;
2841 
2842 	case HCI_OP_WRITE_SSP_MODE:
2843 		hci_cc_write_ssp_mode(hdev, skb);
2844 		break;
2845 
2846 	case HCI_OP_WRITE_SC_SUPPORT:
2847 		hci_cc_write_sc_support(hdev, skb);
2848 		break;
2849 
2850 	case HCI_OP_READ_LOCAL_VERSION:
2851 		hci_cc_read_local_version(hdev, skb);
2852 		break;
2853 
2854 	case HCI_OP_READ_LOCAL_COMMANDS:
2855 		hci_cc_read_local_commands(hdev, skb);
2856 		break;
2857 
2858 	case HCI_OP_READ_LOCAL_FEATURES:
2859 		hci_cc_read_local_features(hdev, skb);
2860 		break;
2861 
2862 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2863 		hci_cc_read_local_ext_features(hdev, skb);
2864 		break;
2865 
2866 	case HCI_OP_READ_BUFFER_SIZE:
2867 		hci_cc_read_buffer_size(hdev, skb);
2868 		break;
2869 
2870 	case HCI_OP_READ_BD_ADDR:
2871 		hci_cc_read_bd_addr(hdev, skb);
2872 		break;
2873 
2874 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2875 		hci_cc_read_page_scan_activity(hdev, skb);
2876 		break;
2877 
2878 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2879 		hci_cc_write_page_scan_activity(hdev, skb);
2880 		break;
2881 
2882 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2883 		hci_cc_read_page_scan_type(hdev, skb);
2884 		break;
2885 
2886 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2887 		hci_cc_write_page_scan_type(hdev, skb);
2888 		break;
2889 
2890 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2891 		hci_cc_read_data_block_size(hdev, skb);
2892 		break;
2893 
2894 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2895 		hci_cc_read_flow_control_mode(hdev, skb);
2896 		break;
2897 
2898 	case HCI_OP_READ_LOCAL_AMP_INFO:
2899 		hci_cc_read_local_amp_info(hdev, skb);
2900 		break;
2901 
2902 	case HCI_OP_READ_CLOCK:
2903 		hci_cc_read_clock(hdev, skb);
2904 		break;
2905 
2906 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2907 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2908 		break;
2909 
2910 	case HCI_OP_PIN_CODE_REPLY:
2911 		hci_cc_pin_code_reply(hdev, skb);
2912 		break;
2913 
2914 	case HCI_OP_PIN_CODE_NEG_REPLY:
2915 		hci_cc_pin_code_neg_reply(hdev, skb);
2916 		break;
2917 
2918 	case HCI_OP_READ_LOCAL_OOB_DATA:
2919 		hci_cc_read_local_oob_data(hdev, skb);
2920 		break;
2921 
2922 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2923 		hci_cc_read_local_oob_ext_data(hdev, skb);
2924 		break;
2925 
2926 	case HCI_OP_LE_READ_BUFFER_SIZE:
2927 		hci_cc_le_read_buffer_size(hdev, skb);
2928 		break;
2929 
2930 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2931 		hci_cc_le_read_local_features(hdev, skb);
2932 		break;
2933 
2934 	case HCI_OP_LE_READ_ADV_TX_POWER:
2935 		hci_cc_le_read_adv_tx_power(hdev, skb);
2936 		break;
2937 
2938 	case HCI_OP_USER_CONFIRM_REPLY:
2939 		hci_cc_user_confirm_reply(hdev, skb);
2940 		break;
2941 
2942 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2943 		hci_cc_user_confirm_neg_reply(hdev, skb);
2944 		break;
2945 
2946 	case HCI_OP_USER_PASSKEY_REPLY:
2947 		hci_cc_user_passkey_reply(hdev, skb);
2948 		break;
2949 
2950 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2951 		hci_cc_user_passkey_neg_reply(hdev, skb);
2952 		break;
2953 
2954 	case HCI_OP_LE_SET_RANDOM_ADDR:
2955 		hci_cc_le_set_random_addr(hdev, skb);
2956 		break;
2957 
2958 	case HCI_OP_LE_SET_ADV_ENABLE:
2959 		hci_cc_le_set_adv_enable(hdev, skb);
2960 		break;
2961 
2962 	case HCI_OP_LE_SET_SCAN_PARAM:
2963 		hci_cc_le_set_scan_param(hdev, skb);
2964 		break;
2965 
2966 	case HCI_OP_LE_SET_SCAN_ENABLE:
2967 		hci_cc_le_set_scan_enable(hdev, skb);
2968 		break;
2969 
2970 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2971 		hci_cc_le_read_white_list_size(hdev, skb);
2972 		break;
2973 
2974 	case HCI_OP_LE_CLEAR_WHITE_LIST:
2975 		hci_cc_le_clear_white_list(hdev, skb);
2976 		break;
2977 
2978 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2979 		hci_cc_le_add_to_white_list(hdev, skb);
2980 		break;
2981 
2982 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2983 		hci_cc_le_del_from_white_list(hdev, skb);
2984 		break;
2985 
2986 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2987 		hci_cc_le_read_supported_states(hdev, skb);
2988 		break;
2989 
2990 	case HCI_OP_LE_READ_DEF_DATA_LEN:
2991 		hci_cc_le_read_def_data_len(hdev, skb);
2992 		break;
2993 
2994 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2995 		hci_cc_le_write_def_data_len(hdev, skb);
2996 		break;
2997 
2998 	case HCI_OP_LE_READ_MAX_DATA_LEN:
2999 		hci_cc_le_read_max_data_len(hdev, skb);
3000 		break;
3001 
3002 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3003 		hci_cc_write_le_host_supported(hdev, skb);
3004 		break;
3005 
3006 	case HCI_OP_LE_SET_ADV_PARAM:
3007 		hci_cc_set_adv_param(hdev, skb);
3008 		break;
3009 
3010 	case HCI_OP_READ_RSSI:
3011 		hci_cc_read_rssi(hdev, skb);
3012 		break;
3013 
3014 	case HCI_OP_READ_TX_POWER:
3015 		hci_cc_read_tx_power(hdev, skb);
3016 		break;
3017 
3018 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3019 		hci_cc_write_ssp_debug_mode(hdev, skb);
3020 		break;
3021 
3022 	default:
3023 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3024 		break;
3025 	}
3026 
3027 	if (*opcode != HCI_OP_NOP)
3028 		cancel_delayed_work(&hdev->cmd_timer);
3029 
3030 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3031 		atomic_set(&hdev->cmd_cnt, 1);
3032 
3033 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3034 			     req_complete_skb);
3035 
3036 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3037 		queue_work(hdev->workqueue, &hdev->cmd_work);
3038 }
3039 
3040 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3041 			       u16 *opcode, u8 *status,
3042 			       hci_req_complete_t *req_complete,
3043 			       hci_req_complete_skb_t *req_complete_skb)
3044 {
3045 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3046 
3047 	skb_pull(skb, sizeof(*ev));
3048 
3049 	*opcode = __le16_to_cpu(ev->opcode);
3050 	*status = ev->status;
3051 
3052 	switch (*opcode) {
3053 	case HCI_OP_INQUIRY:
3054 		hci_cs_inquiry(hdev, ev->status);
3055 		break;
3056 
3057 	case HCI_OP_CREATE_CONN:
3058 		hci_cs_create_conn(hdev, ev->status);
3059 		break;
3060 
3061 	case HCI_OP_DISCONNECT:
3062 		hci_cs_disconnect(hdev, ev->status);
3063 		break;
3064 
3065 	case HCI_OP_ADD_SCO:
3066 		hci_cs_add_sco(hdev, ev->status);
3067 		break;
3068 
3069 	case HCI_OP_AUTH_REQUESTED:
3070 		hci_cs_auth_requested(hdev, ev->status);
3071 		break;
3072 
3073 	case HCI_OP_SET_CONN_ENCRYPT:
3074 		hci_cs_set_conn_encrypt(hdev, ev->status);
3075 		break;
3076 
3077 	case HCI_OP_REMOTE_NAME_REQ:
3078 		hci_cs_remote_name_req(hdev, ev->status);
3079 		break;
3080 
3081 	case HCI_OP_READ_REMOTE_FEATURES:
3082 		hci_cs_read_remote_features(hdev, ev->status);
3083 		break;
3084 
3085 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3086 		hci_cs_read_remote_ext_features(hdev, ev->status);
3087 		break;
3088 
3089 	case HCI_OP_SETUP_SYNC_CONN:
3090 		hci_cs_setup_sync_conn(hdev, ev->status);
3091 		break;
3092 
3093 	case HCI_OP_SNIFF_MODE:
3094 		hci_cs_sniff_mode(hdev, ev->status);
3095 		break;
3096 
3097 	case HCI_OP_EXIT_SNIFF_MODE:
3098 		hci_cs_exit_sniff_mode(hdev, ev->status);
3099 		break;
3100 
3101 	case HCI_OP_SWITCH_ROLE:
3102 		hci_cs_switch_role(hdev, ev->status);
3103 		break;
3104 
3105 	case HCI_OP_LE_CREATE_CONN:
3106 		hci_cs_le_create_conn(hdev, ev->status);
3107 		break;
3108 
3109 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3110 		hci_cs_le_read_remote_features(hdev, ev->status);
3111 		break;
3112 
3113 	case HCI_OP_LE_START_ENC:
3114 		hci_cs_le_start_enc(hdev, ev->status);
3115 		break;
3116 
3117 	default:
3118 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3119 		break;
3120 	}
3121 
3122 	if (*opcode != HCI_OP_NOP)
3123 		cancel_delayed_work(&hdev->cmd_timer);
3124 
3125 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3126 		atomic_set(&hdev->cmd_cnt, 1);
3127 
3128 	/* Indicate request completion if the command failed. Also, if
3129 	 * we're not waiting for a special event and we get a success
3130 	 * command status we should try to flag the request as completed
3131 	 * (since for this kind of commands there will not be a command
3132 	 * complete event).
3133 	 */
3134 	if (ev->status ||
3135 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3136 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3137 				     req_complete_skb);
3138 
3139 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3140 		queue_work(hdev->workqueue, &hdev->cmd_work);
3141 }
3142 
3143 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3144 {
3145 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3146 
3147 	hdev->hw_error_code = ev->code;
3148 
3149 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3150 }
3151 
3152 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3153 {
3154 	struct hci_ev_role_change *ev = (void *) skb->data;
3155 	struct hci_conn *conn;
3156 
3157 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3158 
3159 	hci_dev_lock(hdev);
3160 
3161 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3162 	if (conn) {
3163 		if (!ev->status)
3164 			conn->role = ev->role;
3165 
3166 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3167 
3168 		hci_role_switch_cfm(conn, ev->status, ev->role);
3169 	}
3170 
3171 	hci_dev_unlock(hdev);
3172 }
3173 
3174 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3175 {
3176 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3177 	int i;
3178 
3179 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3180 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3181 		return;
3182 	}
3183 
3184 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3185 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3186 		BT_DBG("%s bad parameters", hdev->name);
3187 		return;
3188 	}
3189 
3190 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3191 
3192 	for (i = 0; i < ev->num_hndl; i++) {
3193 		struct hci_comp_pkts_info *info = &ev->handles[i];
3194 		struct hci_conn *conn;
3195 		__u16  handle, count;
3196 
3197 		handle = __le16_to_cpu(info->handle);
3198 		count  = __le16_to_cpu(info->count);
3199 
3200 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3201 		if (!conn)
3202 			continue;
3203 
3204 		conn->sent -= count;
3205 
3206 		switch (conn->type) {
3207 		case ACL_LINK:
3208 			hdev->acl_cnt += count;
3209 			if (hdev->acl_cnt > hdev->acl_pkts)
3210 				hdev->acl_cnt = hdev->acl_pkts;
3211 			break;
3212 
3213 		case LE_LINK:
3214 			if (hdev->le_pkts) {
3215 				hdev->le_cnt += count;
3216 				if (hdev->le_cnt > hdev->le_pkts)
3217 					hdev->le_cnt = hdev->le_pkts;
3218 			} else {
3219 				hdev->acl_cnt += count;
3220 				if (hdev->acl_cnt > hdev->acl_pkts)
3221 					hdev->acl_cnt = hdev->acl_pkts;
3222 			}
3223 			break;
3224 
3225 		case SCO_LINK:
3226 			hdev->sco_cnt += count;
3227 			if (hdev->sco_cnt > hdev->sco_pkts)
3228 				hdev->sco_cnt = hdev->sco_pkts;
3229 			break;
3230 
3231 		default:
3232 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3233 			break;
3234 		}
3235 	}
3236 
3237 	queue_work(hdev->workqueue, &hdev->tx_work);
3238 }
3239 
3240 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3241 						 __u16 handle)
3242 {
3243 	struct hci_chan *chan;
3244 
3245 	switch (hdev->dev_type) {
3246 	case HCI_BREDR:
3247 		return hci_conn_hash_lookup_handle(hdev, handle);
3248 	case HCI_AMP:
3249 		chan = hci_chan_lookup_handle(hdev, handle);
3250 		if (chan)
3251 			return chan->conn;
3252 		break;
3253 	default:
3254 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3255 		break;
3256 	}
3257 
3258 	return NULL;
3259 }
3260 
3261 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3262 {
3263 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3264 	int i;
3265 
3266 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3267 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3268 		return;
3269 	}
3270 
3271 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3272 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3273 		BT_DBG("%s bad parameters", hdev->name);
3274 		return;
3275 	}
3276 
3277 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3278 	       ev->num_hndl);
3279 
3280 	for (i = 0; i < ev->num_hndl; i++) {
3281 		struct hci_comp_blocks_info *info = &ev->handles[i];
3282 		struct hci_conn *conn = NULL;
3283 		__u16  handle, block_count;
3284 
3285 		handle = __le16_to_cpu(info->handle);
3286 		block_count = __le16_to_cpu(info->blocks);
3287 
3288 		conn = __hci_conn_lookup_handle(hdev, handle);
3289 		if (!conn)
3290 			continue;
3291 
3292 		conn->sent -= block_count;
3293 
3294 		switch (conn->type) {
3295 		case ACL_LINK:
3296 		case AMP_LINK:
3297 			hdev->block_cnt += block_count;
3298 			if (hdev->block_cnt > hdev->num_blocks)
3299 				hdev->block_cnt = hdev->num_blocks;
3300 			break;
3301 
3302 		default:
3303 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
3304 			break;
3305 		}
3306 	}
3307 
3308 	queue_work(hdev->workqueue, &hdev->tx_work);
3309 }
3310 
3311 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3312 {
3313 	struct hci_ev_mode_change *ev = (void *) skb->data;
3314 	struct hci_conn *conn;
3315 
3316 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3317 
3318 	hci_dev_lock(hdev);
3319 
3320 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3321 	if (conn) {
3322 		conn->mode = ev->mode;
3323 
3324 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3325 					&conn->flags)) {
3326 			if (conn->mode == HCI_CM_ACTIVE)
3327 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3328 			else
3329 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3330 		}
3331 
3332 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3333 			hci_sco_setup(conn, ev->status);
3334 	}
3335 
3336 	hci_dev_unlock(hdev);
3337 }
3338 
3339 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3340 {
3341 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3342 	struct hci_conn *conn;
3343 
3344 	BT_DBG("%s", hdev->name);
3345 
3346 	hci_dev_lock(hdev);
3347 
3348 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3349 	if (!conn)
3350 		goto unlock;
3351 
3352 	if (conn->state == BT_CONNECTED) {
3353 		hci_conn_hold(conn);
3354 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3355 		hci_conn_drop(conn);
3356 	}
3357 
3358 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3359 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3360 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3361 			     sizeof(ev->bdaddr), &ev->bdaddr);
3362 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3363 		u8 secure;
3364 
3365 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3366 			secure = 1;
3367 		else
3368 			secure = 0;
3369 
3370 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3371 	}
3372 
3373 unlock:
3374 	hci_dev_unlock(hdev);
3375 }
3376 
3377 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3378 {
3379 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3380 		return;
3381 
3382 	conn->pin_length = pin_len;
3383 	conn->key_type = key_type;
3384 
3385 	switch (key_type) {
3386 	case HCI_LK_LOCAL_UNIT:
3387 	case HCI_LK_REMOTE_UNIT:
3388 	case HCI_LK_DEBUG_COMBINATION:
3389 		return;
3390 	case HCI_LK_COMBINATION:
3391 		if (pin_len == 16)
3392 			conn->pending_sec_level = BT_SECURITY_HIGH;
3393 		else
3394 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3395 		break;
3396 	case HCI_LK_UNAUTH_COMBINATION_P192:
3397 	case HCI_LK_UNAUTH_COMBINATION_P256:
3398 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3399 		break;
3400 	case HCI_LK_AUTH_COMBINATION_P192:
3401 		conn->pending_sec_level = BT_SECURITY_HIGH;
3402 		break;
3403 	case HCI_LK_AUTH_COMBINATION_P256:
3404 		conn->pending_sec_level = BT_SECURITY_FIPS;
3405 		break;
3406 	}
3407 }
3408 
3409 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3410 {
3411 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3412 	struct hci_cp_link_key_reply cp;
3413 	struct hci_conn *conn;
3414 	struct link_key *key;
3415 
3416 	BT_DBG("%s", hdev->name);
3417 
3418 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3419 		return;
3420 
3421 	hci_dev_lock(hdev);
3422 
3423 	key = hci_find_link_key(hdev, &ev->bdaddr);
3424 	if (!key) {
3425 		BT_DBG("%s link key not found for %pMR", hdev->name,
3426 		       &ev->bdaddr);
3427 		goto not_found;
3428 	}
3429 
3430 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3431 	       &ev->bdaddr);
3432 
3433 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3434 	if (conn) {
3435 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3436 
3437 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3438 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3439 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3440 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3441 			goto not_found;
3442 		}
3443 
3444 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3445 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3446 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3447 			BT_DBG("%s ignoring key unauthenticated for high security",
3448 			       hdev->name);
3449 			goto not_found;
3450 		}
3451 
3452 		conn_set_key(conn, key->type, key->pin_len);
3453 	}
3454 
3455 	bacpy(&cp.bdaddr, &ev->bdaddr);
3456 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3457 
3458 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3459 
3460 	hci_dev_unlock(hdev);
3461 
3462 	return;
3463 
3464 not_found:
3465 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3466 	hci_dev_unlock(hdev);
3467 }
3468 
3469 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3470 {
3471 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3472 	struct hci_conn *conn;
3473 	struct link_key *key;
3474 	bool persistent;
3475 	u8 pin_len = 0;
3476 
3477 	BT_DBG("%s", hdev->name);
3478 
3479 	hci_dev_lock(hdev);
3480 
3481 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3482 	if (!conn)
3483 		goto unlock;
3484 
3485 	hci_conn_hold(conn);
3486 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3487 	hci_conn_drop(conn);
3488 
3489 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3490 	conn_set_key(conn, ev->key_type, conn->pin_length);
3491 
3492 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3493 		goto unlock;
3494 
3495 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3496 			        ev->key_type, pin_len, &persistent);
3497 	if (!key)
3498 		goto unlock;
3499 
3500 	/* Update connection information since adding the key will have
3501 	 * fixed up the type in the case of changed combination keys.
3502 	 */
3503 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3504 		conn_set_key(conn, key->type, key->pin_len);
3505 
3506 	mgmt_new_link_key(hdev, key, persistent);
3507 
3508 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3509 	 * is set. If it's not set simply remove the key from the kernel
3510 	 * list (we've still notified user space about it but with
3511 	 * store_hint being 0).
3512 	 */
3513 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3514 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3515 		list_del_rcu(&key->list);
3516 		kfree_rcu(key, rcu);
3517 		goto unlock;
3518 	}
3519 
3520 	if (persistent)
3521 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3522 	else
3523 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3524 
3525 unlock:
3526 	hci_dev_unlock(hdev);
3527 }
3528 
3529 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3530 {
3531 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3532 	struct hci_conn *conn;
3533 
3534 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3535 
3536 	hci_dev_lock(hdev);
3537 
3538 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3539 	if (conn && !ev->status) {
3540 		struct inquiry_entry *ie;
3541 
3542 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3543 		if (ie) {
3544 			ie->data.clock_offset = ev->clock_offset;
3545 			ie->timestamp = jiffies;
3546 		}
3547 	}
3548 
3549 	hci_dev_unlock(hdev);
3550 }
3551 
3552 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3553 {
3554 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3555 	struct hci_conn *conn;
3556 
3557 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3558 
3559 	hci_dev_lock(hdev);
3560 
3561 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3562 	if (conn && !ev->status)
3563 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3564 
3565 	hci_dev_unlock(hdev);
3566 }
3567 
3568 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3569 {
3570 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3571 	struct inquiry_entry *ie;
3572 
3573 	BT_DBG("%s", hdev->name);
3574 
3575 	hci_dev_lock(hdev);
3576 
3577 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3578 	if (ie) {
3579 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3580 		ie->timestamp = jiffies;
3581 	}
3582 
3583 	hci_dev_unlock(hdev);
3584 }
3585 
3586 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3587 					     struct sk_buff *skb)
3588 {
3589 	struct inquiry_data data;
3590 	int num_rsp = *((__u8 *) skb->data);
3591 
3592 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3593 
3594 	if (!num_rsp)
3595 		return;
3596 
3597 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3598 		return;
3599 
3600 	hci_dev_lock(hdev);
3601 
3602 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3603 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3604 		info = (void *) (skb->data + 1);
3605 
3606 		for (; num_rsp; num_rsp--, info++) {
3607 			u32 flags;
3608 
3609 			bacpy(&data.bdaddr, &info->bdaddr);
3610 			data.pscan_rep_mode	= info->pscan_rep_mode;
3611 			data.pscan_period_mode	= info->pscan_period_mode;
3612 			data.pscan_mode		= info->pscan_mode;
3613 			memcpy(data.dev_class, info->dev_class, 3);
3614 			data.clock_offset	= info->clock_offset;
3615 			data.rssi		= info->rssi;
3616 			data.ssp_mode		= 0x00;
3617 
3618 			flags = hci_inquiry_cache_update(hdev, &data, false);
3619 
3620 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3621 					  info->dev_class, info->rssi,
3622 					  flags, NULL, 0, NULL, 0);
3623 		}
3624 	} else {
3625 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3626 
3627 		for (; num_rsp; num_rsp--, info++) {
3628 			u32 flags;
3629 
3630 			bacpy(&data.bdaddr, &info->bdaddr);
3631 			data.pscan_rep_mode	= info->pscan_rep_mode;
3632 			data.pscan_period_mode	= info->pscan_period_mode;
3633 			data.pscan_mode		= 0x00;
3634 			memcpy(data.dev_class, info->dev_class, 3);
3635 			data.clock_offset	= info->clock_offset;
3636 			data.rssi		= info->rssi;
3637 			data.ssp_mode		= 0x00;
3638 
3639 			flags = hci_inquiry_cache_update(hdev, &data, false);
3640 
3641 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3642 					  info->dev_class, info->rssi,
3643 					  flags, NULL, 0, NULL, 0);
3644 		}
3645 	}
3646 
3647 	hci_dev_unlock(hdev);
3648 }
3649 
3650 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3651 					struct sk_buff *skb)
3652 {
3653 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3654 	struct hci_conn *conn;
3655 
3656 	BT_DBG("%s", hdev->name);
3657 
3658 	hci_dev_lock(hdev);
3659 
3660 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3661 	if (!conn)
3662 		goto unlock;
3663 
3664 	if (ev->page < HCI_MAX_PAGES)
3665 		memcpy(conn->features[ev->page], ev->features, 8);
3666 
3667 	if (!ev->status && ev->page == 0x01) {
3668 		struct inquiry_entry *ie;
3669 
3670 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3671 		if (ie)
3672 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3673 
3674 		if (ev->features[0] & LMP_HOST_SSP) {
3675 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3676 		} else {
3677 			/* It is mandatory by the Bluetooth specification that
3678 			 * Extended Inquiry Results are only used when Secure
3679 			 * Simple Pairing is enabled, but some devices violate
3680 			 * this.
3681 			 *
3682 			 * To make these devices work, the internal SSP
3683 			 * enabled flag needs to be cleared if the remote host
3684 			 * features do not indicate SSP support */
3685 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3686 		}
3687 
3688 		if (ev->features[0] & LMP_HOST_SC)
3689 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3690 	}
3691 
3692 	if (conn->state != BT_CONFIG)
3693 		goto unlock;
3694 
3695 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3696 		struct hci_cp_remote_name_req cp;
3697 		memset(&cp, 0, sizeof(cp));
3698 		bacpy(&cp.bdaddr, &conn->dst);
3699 		cp.pscan_rep_mode = 0x02;
3700 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3701 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3702 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3703 
3704 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3705 		conn->state = BT_CONNECTED;
3706 		hci_connect_cfm(conn, ev->status);
3707 		hci_conn_drop(conn);
3708 	}
3709 
3710 unlock:
3711 	hci_dev_unlock(hdev);
3712 }
3713 
3714 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3715 				       struct sk_buff *skb)
3716 {
3717 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3718 	struct hci_conn *conn;
3719 
3720 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3721 
3722 	hci_dev_lock(hdev);
3723 
3724 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3725 	if (!conn) {
3726 		if (ev->link_type == ESCO_LINK)
3727 			goto unlock;
3728 
3729 		/* When the link type in the event indicates SCO connection
3730 		 * and lookup of the connection object fails, then check
3731 		 * if an eSCO connection object exists.
3732 		 *
3733 		 * The core limits the synchronous connections to either
3734 		 * SCO or eSCO. The eSCO connection is preferred and tried
3735 		 * to be setup first and until successfully established,
3736 		 * the link type will be hinted as eSCO.
3737 		 */
3738 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3739 		if (!conn)
3740 			goto unlock;
3741 	}
3742 
3743 	switch (ev->status) {
3744 	case 0x00:
3745 		conn->handle = __le16_to_cpu(ev->handle);
3746 		conn->state  = BT_CONNECTED;
3747 		conn->type   = ev->link_type;
3748 
3749 		hci_debugfs_create_conn(conn);
3750 		hci_conn_add_sysfs(conn);
3751 		break;
3752 
3753 	case 0x10:	/* Connection Accept Timeout */
3754 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3755 	case 0x11:	/* Unsupported Feature or Parameter Value */
3756 	case 0x1c:	/* SCO interval rejected */
3757 	case 0x1a:	/* Unsupported Remote Feature */
3758 	case 0x1f:	/* Unspecified error */
3759 	case 0x20:	/* Unsupported LMP Parameter value */
3760 		if (conn->out) {
3761 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3762 					(hdev->esco_type & EDR_ESCO_MASK);
3763 			if (hci_setup_sync(conn, conn->link->handle))
3764 				goto unlock;
3765 		}
3766 		/* fall through */
3767 
3768 	default:
3769 		conn->state = BT_CLOSED;
3770 		break;
3771 	}
3772 
3773 	hci_connect_cfm(conn, ev->status);
3774 	if (ev->status)
3775 		hci_conn_del(conn);
3776 
3777 unlock:
3778 	hci_dev_unlock(hdev);
3779 }
3780 
3781 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3782 {
3783 	size_t parsed = 0;
3784 
3785 	while (parsed < eir_len) {
3786 		u8 field_len = eir[0];
3787 
3788 		if (field_len == 0)
3789 			return parsed;
3790 
3791 		parsed += field_len + 1;
3792 		eir += field_len + 1;
3793 	}
3794 
3795 	return eir_len;
3796 }
3797 
3798 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3799 					    struct sk_buff *skb)
3800 {
3801 	struct inquiry_data data;
3802 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3803 	int num_rsp = *((__u8 *) skb->data);
3804 	size_t eir_len;
3805 
3806 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3807 
3808 	if (!num_rsp)
3809 		return;
3810 
3811 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3812 		return;
3813 
3814 	hci_dev_lock(hdev);
3815 
3816 	for (; num_rsp; num_rsp--, info++) {
3817 		u32 flags;
3818 		bool name_known;
3819 
3820 		bacpy(&data.bdaddr, &info->bdaddr);
3821 		data.pscan_rep_mode	= info->pscan_rep_mode;
3822 		data.pscan_period_mode	= info->pscan_period_mode;
3823 		data.pscan_mode		= 0x00;
3824 		memcpy(data.dev_class, info->dev_class, 3);
3825 		data.clock_offset	= info->clock_offset;
3826 		data.rssi		= info->rssi;
3827 		data.ssp_mode		= 0x01;
3828 
3829 		if (hci_dev_test_flag(hdev, HCI_MGMT))
3830 			name_known = eir_has_data_type(info->data,
3831 						       sizeof(info->data),
3832 						       EIR_NAME_COMPLETE);
3833 		else
3834 			name_known = true;
3835 
3836 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3837 
3838 		eir_len = eir_get_length(info->data, sizeof(info->data));
3839 
3840 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3841 				  info->dev_class, info->rssi,
3842 				  flags, info->data, eir_len, NULL, 0);
3843 	}
3844 
3845 	hci_dev_unlock(hdev);
3846 }
3847 
3848 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3849 					 struct sk_buff *skb)
3850 {
3851 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3852 	struct hci_conn *conn;
3853 
3854 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3855 	       __le16_to_cpu(ev->handle));
3856 
3857 	hci_dev_lock(hdev);
3858 
3859 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3860 	if (!conn)
3861 		goto unlock;
3862 
3863 	/* For BR/EDR the necessary steps are taken through the
3864 	 * auth_complete event.
3865 	 */
3866 	if (conn->type != LE_LINK)
3867 		goto unlock;
3868 
3869 	if (!ev->status)
3870 		conn->sec_level = conn->pending_sec_level;
3871 
3872 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3873 
3874 	if (ev->status && conn->state == BT_CONNECTED) {
3875 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3876 		hci_conn_drop(conn);
3877 		goto unlock;
3878 	}
3879 
3880 	if (conn->state == BT_CONFIG) {
3881 		if (!ev->status)
3882 			conn->state = BT_CONNECTED;
3883 
3884 		hci_connect_cfm(conn, ev->status);
3885 		hci_conn_drop(conn);
3886 	} else {
3887 		hci_auth_cfm(conn, ev->status);
3888 
3889 		hci_conn_hold(conn);
3890 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3891 		hci_conn_drop(conn);
3892 	}
3893 
3894 unlock:
3895 	hci_dev_unlock(hdev);
3896 }
3897 
3898 static u8 hci_get_auth_req(struct hci_conn *conn)
3899 {
3900 	/* If remote requests no-bonding follow that lead */
3901 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3902 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3903 		return conn->remote_auth | (conn->auth_type & 0x01);
3904 
3905 	/* If both remote and local have enough IO capabilities, require
3906 	 * MITM protection
3907 	 */
3908 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3909 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3910 		return conn->remote_auth | 0x01;
3911 
3912 	/* No MITM protection possible so ignore remote requirement */
3913 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3914 }
3915 
3916 static u8 bredr_oob_data_present(struct hci_conn *conn)
3917 {
3918 	struct hci_dev *hdev = conn->hdev;
3919 	struct oob_data *data;
3920 
3921 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3922 	if (!data)
3923 		return 0x00;
3924 
3925 	if (bredr_sc_enabled(hdev)) {
3926 		/* When Secure Connections is enabled, then just
3927 		 * return the present value stored with the OOB
3928 		 * data. The stored value contains the right present
3929 		 * information. However it can only be trusted when
3930 		 * not in Secure Connection Only mode.
3931 		 */
3932 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3933 			return data->present;
3934 
3935 		/* When Secure Connections Only mode is enabled, then
3936 		 * the P-256 values are required. If they are not
3937 		 * available, then do not declare that OOB data is
3938 		 * present.
3939 		 */
3940 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3941 		    !memcmp(data->hash256, ZERO_KEY, 16))
3942 			return 0x00;
3943 
3944 		return 0x02;
3945 	}
3946 
3947 	/* When Secure Connections is not enabled or actually
3948 	 * not supported by the hardware, then check that if
3949 	 * P-192 data values are present.
3950 	 */
3951 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3952 	    !memcmp(data->hash192, ZERO_KEY, 16))
3953 		return 0x00;
3954 
3955 	return 0x01;
3956 }
3957 
3958 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3959 {
3960 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3961 	struct hci_conn *conn;
3962 
3963 	BT_DBG("%s", hdev->name);
3964 
3965 	hci_dev_lock(hdev);
3966 
3967 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3968 	if (!conn)
3969 		goto unlock;
3970 
3971 	hci_conn_hold(conn);
3972 
3973 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3974 		goto unlock;
3975 
3976 	/* Allow pairing if we're pairable, the initiators of the
3977 	 * pairing or if the remote is not requesting bonding.
3978 	 */
3979 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3980 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3981 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3982 		struct hci_cp_io_capability_reply cp;
3983 
3984 		bacpy(&cp.bdaddr, &ev->bdaddr);
3985 		/* Change the IO capability from KeyboardDisplay
3986 		 * to DisplayYesNo as it is not supported by BT spec. */
3987 		cp.capability = (conn->io_capability == 0x04) ?
3988 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3989 
3990 		/* If we are initiators, there is no remote information yet */
3991 		if (conn->remote_auth == 0xff) {
3992 			/* Request MITM protection if our IO caps allow it
3993 			 * except for the no-bonding case.
3994 			 */
3995 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3996 			    conn->auth_type != HCI_AT_NO_BONDING)
3997 				conn->auth_type |= 0x01;
3998 		} else {
3999 			conn->auth_type = hci_get_auth_req(conn);
4000 		}
4001 
4002 		/* If we're not bondable, force one of the non-bondable
4003 		 * authentication requirement values.
4004 		 */
4005 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4006 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4007 
4008 		cp.authentication = conn->auth_type;
4009 		cp.oob_data = bredr_oob_data_present(conn);
4010 
4011 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4012 			     sizeof(cp), &cp);
4013 	} else {
4014 		struct hci_cp_io_capability_neg_reply cp;
4015 
4016 		bacpy(&cp.bdaddr, &ev->bdaddr);
4017 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4018 
4019 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4020 			     sizeof(cp), &cp);
4021 	}
4022 
4023 unlock:
4024 	hci_dev_unlock(hdev);
4025 }
4026 
4027 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4028 {
4029 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4030 	struct hci_conn *conn;
4031 
4032 	BT_DBG("%s", hdev->name);
4033 
4034 	hci_dev_lock(hdev);
4035 
4036 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4037 	if (!conn)
4038 		goto unlock;
4039 
4040 	conn->remote_cap = ev->capability;
4041 	conn->remote_auth = ev->authentication;
4042 
4043 unlock:
4044 	hci_dev_unlock(hdev);
4045 }
4046 
4047 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4048 					 struct sk_buff *skb)
4049 {
4050 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4051 	int loc_mitm, rem_mitm, confirm_hint = 0;
4052 	struct hci_conn *conn;
4053 
4054 	BT_DBG("%s", hdev->name);
4055 
4056 	hci_dev_lock(hdev);
4057 
4058 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4059 		goto unlock;
4060 
4061 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4062 	if (!conn)
4063 		goto unlock;
4064 
4065 	loc_mitm = (conn->auth_type & 0x01);
4066 	rem_mitm = (conn->remote_auth & 0x01);
4067 
4068 	/* If we require MITM but the remote device can't provide that
4069 	 * (it has NoInputNoOutput) then reject the confirmation
4070 	 * request. We check the security level here since it doesn't
4071 	 * necessarily match conn->auth_type.
4072 	 */
4073 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4074 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4075 		BT_DBG("Rejecting request: remote device can't provide MITM");
4076 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4077 			     sizeof(ev->bdaddr), &ev->bdaddr);
4078 		goto unlock;
4079 	}
4080 
4081 	/* If no side requires MITM protection; auto-accept */
4082 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4083 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4084 
4085 		/* If we're not the initiators request authorization to
4086 		 * proceed from user space (mgmt_user_confirm with
4087 		 * confirm_hint set to 1). The exception is if neither
4088 		 * side had MITM or if the local IO capability is
4089 		 * NoInputNoOutput, in which case we do auto-accept
4090 		 */
4091 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4092 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4093 		    (loc_mitm || rem_mitm)) {
4094 			BT_DBG("Confirming auto-accept as acceptor");
4095 			confirm_hint = 1;
4096 			goto confirm;
4097 		}
4098 
4099 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4100 		       hdev->auto_accept_delay);
4101 
4102 		if (hdev->auto_accept_delay > 0) {
4103 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4104 			queue_delayed_work(conn->hdev->workqueue,
4105 					   &conn->auto_accept_work, delay);
4106 			goto unlock;
4107 		}
4108 
4109 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4110 			     sizeof(ev->bdaddr), &ev->bdaddr);
4111 		goto unlock;
4112 	}
4113 
4114 confirm:
4115 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4116 				  le32_to_cpu(ev->passkey), confirm_hint);
4117 
4118 unlock:
4119 	hci_dev_unlock(hdev);
4120 }
4121 
4122 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4123 					 struct sk_buff *skb)
4124 {
4125 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4126 
4127 	BT_DBG("%s", hdev->name);
4128 
4129 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4130 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4131 }
4132 
4133 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4134 					struct sk_buff *skb)
4135 {
4136 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4137 	struct hci_conn *conn;
4138 
4139 	BT_DBG("%s", hdev->name);
4140 
4141 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4142 	if (!conn)
4143 		return;
4144 
4145 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4146 	conn->passkey_entered = 0;
4147 
4148 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4149 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4150 					 conn->dst_type, conn->passkey_notify,
4151 					 conn->passkey_entered);
4152 }
4153 
4154 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4155 {
4156 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4157 	struct hci_conn *conn;
4158 
4159 	BT_DBG("%s", hdev->name);
4160 
4161 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4162 	if (!conn)
4163 		return;
4164 
4165 	switch (ev->type) {
4166 	case HCI_KEYPRESS_STARTED:
4167 		conn->passkey_entered = 0;
4168 		return;
4169 
4170 	case HCI_KEYPRESS_ENTERED:
4171 		conn->passkey_entered++;
4172 		break;
4173 
4174 	case HCI_KEYPRESS_ERASED:
4175 		conn->passkey_entered--;
4176 		break;
4177 
4178 	case HCI_KEYPRESS_CLEARED:
4179 		conn->passkey_entered = 0;
4180 		break;
4181 
4182 	case HCI_KEYPRESS_COMPLETED:
4183 		return;
4184 	}
4185 
4186 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4187 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4188 					 conn->dst_type, conn->passkey_notify,
4189 					 conn->passkey_entered);
4190 }
4191 
4192 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4193 					 struct sk_buff *skb)
4194 {
4195 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4196 	struct hci_conn *conn;
4197 
4198 	BT_DBG("%s", hdev->name);
4199 
4200 	hci_dev_lock(hdev);
4201 
4202 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4203 	if (!conn)
4204 		goto unlock;
4205 
4206 	/* Reset the authentication requirement to unknown */
4207 	conn->remote_auth = 0xff;
4208 
4209 	/* To avoid duplicate auth_failed events to user space we check
4210 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4211 	 * initiated the authentication. A traditional auth_complete
4212 	 * event gets always produced as initiator and is also mapped to
4213 	 * the mgmt_auth_failed event */
4214 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4215 		mgmt_auth_failed(conn, ev->status);
4216 
4217 	hci_conn_drop(conn);
4218 
4219 unlock:
4220 	hci_dev_unlock(hdev);
4221 }
4222 
4223 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4224 					 struct sk_buff *skb)
4225 {
4226 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4227 	struct inquiry_entry *ie;
4228 	struct hci_conn *conn;
4229 
4230 	BT_DBG("%s", hdev->name);
4231 
4232 	hci_dev_lock(hdev);
4233 
4234 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4235 	if (conn)
4236 		memcpy(conn->features[1], ev->features, 8);
4237 
4238 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4239 	if (ie)
4240 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4241 
4242 	hci_dev_unlock(hdev);
4243 }
4244 
4245 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4246 					    struct sk_buff *skb)
4247 {
4248 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4249 	struct oob_data *data;
4250 
4251 	BT_DBG("%s", hdev->name);
4252 
4253 	hci_dev_lock(hdev);
4254 
4255 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4256 		goto unlock;
4257 
4258 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4259 	if (!data) {
4260 		struct hci_cp_remote_oob_data_neg_reply cp;
4261 
4262 		bacpy(&cp.bdaddr, &ev->bdaddr);
4263 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4264 			     sizeof(cp), &cp);
4265 		goto unlock;
4266 	}
4267 
4268 	if (bredr_sc_enabled(hdev)) {
4269 		struct hci_cp_remote_oob_ext_data_reply cp;
4270 
4271 		bacpy(&cp.bdaddr, &ev->bdaddr);
4272 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4273 			memset(cp.hash192, 0, sizeof(cp.hash192));
4274 			memset(cp.rand192, 0, sizeof(cp.rand192));
4275 		} else {
4276 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4277 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4278 		}
4279 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4280 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4281 
4282 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4283 			     sizeof(cp), &cp);
4284 	} else {
4285 		struct hci_cp_remote_oob_data_reply cp;
4286 
4287 		bacpy(&cp.bdaddr, &ev->bdaddr);
4288 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4289 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4290 
4291 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4292 			     sizeof(cp), &cp);
4293 	}
4294 
4295 unlock:
4296 	hci_dev_unlock(hdev);
4297 }
4298 
4299 #if IS_ENABLED(CONFIG_BT_HS)
4300 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4301 {
4302 	struct hci_ev_channel_selected *ev = (void *)skb->data;
4303 	struct hci_conn *hcon;
4304 
4305 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4306 
4307 	skb_pull(skb, sizeof(*ev));
4308 
4309 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4310 	if (!hcon)
4311 		return;
4312 
4313 	amp_read_loc_assoc_final_data(hdev, hcon);
4314 }
4315 
4316 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4317 				      struct sk_buff *skb)
4318 {
4319 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4320 	struct hci_conn *hcon, *bredr_hcon;
4321 
4322 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4323 	       ev->status);
4324 
4325 	hci_dev_lock(hdev);
4326 
4327 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4328 	if (!hcon) {
4329 		hci_dev_unlock(hdev);
4330 		return;
4331 	}
4332 
4333 	if (ev->status) {
4334 		hci_conn_del(hcon);
4335 		hci_dev_unlock(hdev);
4336 		return;
4337 	}
4338 
4339 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4340 
4341 	hcon->state = BT_CONNECTED;
4342 	bacpy(&hcon->dst, &bredr_hcon->dst);
4343 
4344 	hci_conn_hold(hcon);
4345 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4346 	hci_conn_drop(hcon);
4347 
4348 	hci_debugfs_create_conn(hcon);
4349 	hci_conn_add_sysfs(hcon);
4350 
4351 	amp_physical_cfm(bredr_hcon, hcon);
4352 
4353 	hci_dev_unlock(hdev);
4354 }
4355 
4356 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4357 {
4358 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4359 	struct hci_conn *hcon;
4360 	struct hci_chan *hchan;
4361 	struct amp_mgr *mgr;
4362 
4363 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4364 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4365 	       ev->status);
4366 
4367 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4368 	if (!hcon)
4369 		return;
4370 
4371 	/* Create AMP hchan */
4372 	hchan = hci_chan_create(hcon);
4373 	if (!hchan)
4374 		return;
4375 
4376 	hchan->handle = le16_to_cpu(ev->handle);
4377 
4378 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4379 
4380 	mgr = hcon->amp_mgr;
4381 	if (mgr && mgr->bredr_chan) {
4382 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4383 
4384 		l2cap_chan_lock(bredr_chan);
4385 
4386 		bredr_chan->conn->mtu = hdev->block_mtu;
4387 		l2cap_logical_cfm(bredr_chan, hchan, 0);
4388 		hci_conn_hold(hcon);
4389 
4390 		l2cap_chan_unlock(bredr_chan);
4391 	}
4392 }
4393 
4394 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4395 					     struct sk_buff *skb)
4396 {
4397 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4398 	struct hci_chan *hchan;
4399 
4400 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4401 	       le16_to_cpu(ev->handle), ev->status);
4402 
4403 	if (ev->status)
4404 		return;
4405 
4406 	hci_dev_lock(hdev);
4407 
4408 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4409 	if (!hchan)
4410 		goto unlock;
4411 
4412 	amp_destroy_logical_link(hchan, ev->reason);
4413 
4414 unlock:
4415 	hci_dev_unlock(hdev);
4416 }
4417 
4418 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4419 					     struct sk_buff *skb)
4420 {
4421 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4422 	struct hci_conn *hcon;
4423 
4424 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4425 
4426 	if (ev->status)
4427 		return;
4428 
4429 	hci_dev_lock(hdev);
4430 
4431 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4432 	if (hcon) {
4433 		hcon->state = BT_CLOSED;
4434 		hci_conn_del(hcon);
4435 	}
4436 
4437 	hci_dev_unlock(hdev);
4438 }
4439 #endif
4440 
4441 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4442 {
4443 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4444 	struct hci_conn_params *params;
4445 	struct hci_conn *conn;
4446 	struct smp_irk *irk;
4447 	u8 addr_type;
4448 
4449 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4450 
4451 	hci_dev_lock(hdev);
4452 
4453 	/* All controllers implicitly stop advertising in the event of a
4454 	 * connection, so ensure that the state bit is cleared.
4455 	 */
4456 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
4457 
4458 	conn = hci_lookup_le_connect(hdev);
4459 	if (!conn) {
4460 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4461 		if (!conn) {
4462 			BT_ERR("No memory for new connection");
4463 			goto unlock;
4464 		}
4465 
4466 		conn->dst_type = ev->bdaddr_type;
4467 
4468 		/* If we didn't have a hci_conn object previously
4469 		 * but we're in master role this must be something
4470 		 * initiated using a white list. Since white list based
4471 		 * connections are not "first class citizens" we don't
4472 		 * have full tracking of them. Therefore, we go ahead
4473 		 * with a "best effort" approach of determining the
4474 		 * initiator address based on the HCI_PRIVACY flag.
4475 		 */
4476 		if (conn->out) {
4477 			conn->resp_addr_type = ev->bdaddr_type;
4478 			bacpy(&conn->resp_addr, &ev->bdaddr);
4479 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4480 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4481 				bacpy(&conn->init_addr, &hdev->rpa);
4482 			} else {
4483 				hci_copy_identity_address(hdev,
4484 							  &conn->init_addr,
4485 							  &conn->init_addr_type);
4486 			}
4487 		}
4488 	} else {
4489 		cancel_delayed_work(&conn->le_conn_timeout);
4490 	}
4491 
4492 	if (!conn->out) {
4493 		/* Set the responder (our side) address type based on
4494 		 * the advertising address type.
4495 		 */
4496 		conn->resp_addr_type = hdev->adv_addr_type;
4497 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4498 			bacpy(&conn->resp_addr, &hdev->random_addr);
4499 		else
4500 			bacpy(&conn->resp_addr, &hdev->bdaddr);
4501 
4502 		conn->init_addr_type = ev->bdaddr_type;
4503 		bacpy(&conn->init_addr, &ev->bdaddr);
4504 
4505 		/* For incoming connections, set the default minimum
4506 		 * and maximum connection interval. They will be used
4507 		 * to check if the parameters are in range and if not
4508 		 * trigger the connection update procedure.
4509 		 */
4510 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4511 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4512 	}
4513 
4514 	/* Lookup the identity address from the stored connection
4515 	 * address and address type.
4516 	 *
4517 	 * When establishing connections to an identity address, the
4518 	 * connection procedure will store the resolvable random
4519 	 * address first. Now if it can be converted back into the
4520 	 * identity address, start using the identity address from
4521 	 * now on.
4522 	 */
4523 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4524 	if (irk) {
4525 		bacpy(&conn->dst, &irk->bdaddr);
4526 		conn->dst_type = irk->addr_type;
4527 	}
4528 
4529 	if (ev->status) {
4530 		hci_le_conn_failed(conn, ev->status);
4531 		goto unlock;
4532 	}
4533 
4534 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4535 		addr_type = BDADDR_LE_PUBLIC;
4536 	else
4537 		addr_type = BDADDR_LE_RANDOM;
4538 
4539 	/* Drop the connection if the device is blocked */
4540 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4541 		hci_conn_drop(conn);
4542 		goto unlock;
4543 	}
4544 
4545 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4546 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4547 
4548 	conn->sec_level = BT_SECURITY_LOW;
4549 	conn->handle = __le16_to_cpu(ev->handle);
4550 	conn->state = BT_CONFIG;
4551 
4552 	conn->le_conn_interval = le16_to_cpu(ev->interval);
4553 	conn->le_conn_latency = le16_to_cpu(ev->latency);
4554 	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4555 
4556 	hci_debugfs_create_conn(conn);
4557 	hci_conn_add_sysfs(conn);
4558 
4559 	if (!ev->status) {
4560 		/* The remote features procedure is defined for master
4561 		 * role only. So only in case of an initiated connection
4562 		 * request the remote features.
4563 		 *
4564 		 * If the local controller supports slave-initiated features
4565 		 * exchange, then requesting the remote features in slave
4566 		 * role is possible. Otherwise just transition into the
4567 		 * connected state without requesting the remote features.
4568 		 */
4569 		if (conn->out ||
4570 		    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4571 			struct hci_cp_le_read_remote_features cp;
4572 
4573 			cp.handle = __cpu_to_le16(conn->handle);
4574 
4575 			hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4576 				     sizeof(cp), &cp);
4577 
4578 			hci_conn_hold(conn);
4579 		} else {
4580 			conn->state = BT_CONNECTED;
4581 			hci_connect_cfm(conn, ev->status);
4582 		}
4583 	} else {
4584 		hci_connect_cfm(conn, ev->status);
4585 	}
4586 
4587 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4588 					   conn->dst_type);
4589 	if (params) {
4590 		list_del_init(&params->action);
4591 		if (params->conn) {
4592 			hci_conn_drop(params->conn);
4593 			hci_conn_put(params->conn);
4594 			params->conn = NULL;
4595 		}
4596 	}
4597 
4598 unlock:
4599 	hci_update_background_scan(hdev);
4600 	hci_dev_unlock(hdev);
4601 }
4602 
4603 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4604 					    struct sk_buff *skb)
4605 {
4606 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4607 	struct hci_conn *conn;
4608 
4609 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4610 
4611 	if (ev->status)
4612 		return;
4613 
4614 	hci_dev_lock(hdev);
4615 
4616 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4617 	if (conn) {
4618 		conn->le_conn_interval = le16_to_cpu(ev->interval);
4619 		conn->le_conn_latency = le16_to_cpu(ev->latency);
4620 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4621 	}
4622 
4623 	hci_dev_unlock(hdev);
4624 }
4625 
4626 /* This function requires the caller holds hdev->lock */
4627 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4628 					      bdaddr_t *addr,
4629 					      u8 addr_type, u8 adv_type)
4630 {
4631 	struct hci_conn *conn;
4632 	struct hci_conn_params *params;
4633 
4634 	/* If the event is not connectable don't proceed further */
4635 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4636 		return NULL;
4637 
4638 	/* Ignore if the device is blocked */
4639 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4640 		return NULL;
4641 
4642 	/* Most controller will fail if we try to create new connections
4643 	 * while we have an existing one in slave role.
4644 	 */
4645 	if (hdev->conn_hash.le_num_slave > 0)
4646 		return NULL;
4647 
4648 	/* If we're not connectable only connect devices that we have in
4649 	 * our pend_le_conns list.
4650 	 */
4651 	params = hci_explicit_connect_lookup(hdev, addr, addr_type);
4652 
4653 	if (!params)
4654 		return NULL;
4655 
4656 	if (!params->explicit_connect) {
4657 		switch (params->auto_connect) {
4658 		case HCI_AUTO_CONN_DIRECT:
4659 			/* Only devices advertising with ADV_DIRECT_IND are
4660 			 * triggering a connection attempt. This is allowing
4661 			 * incoming connections from slave devices.
4662 			 */
4663 			if (adv_type != LE_ADV_DIRECT_IND)
4664 				return NULL;
4665 			break;
4666 		case HCI_AUTO_CONN_ALWAYS:
4667 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4668 			 * are triggering a connection attempt. This means
4669 			 * that incoming connectioms from slave device are
4670 			 * accepted and also outgoing connections to slave
4671 			 * devices are established when found.
4672 			 */
4673 			break;
4674 		default:
4675 			return NULL;
4676 		}
4677 	}
4678 
4679 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4680 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4681 	if (!IS_ERR(conn)) {
4682 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4683 		 * by higher layer that tried to connect, if no then
4684 		 * store the pointer since we don't really have any
4685 		 * other owner of the object besides the params that
4686 		 * triggered it. This way we can abort the connection if
4687 		 * the parameters get removed and keep the reference
4688 		 * count consistent once the connection is established.
4689 		 */
4690 
4691 		if (!params->explicit_connect)
4692 			params->conn = hci_conn_get(conn);
4693 
4694 		return conn;
4695 	}
4696 
4697 	switch (PTR_ERR(conn)) {
4698 	case -EBUSY:
4699 		/* If hci_connect() returns -EBUSY it means there is already
4700 		 * an LE connection attempt going on. Since controllers don't
4701 		 * support more than one connection attempt at the time, we
4702 		 * don't consider this an error case.
4703 		 */
4704 		break;
4705 	default:
4706 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4707 		return NULL;
4708 	}
4709 
4710 	return NULL;
4711 }
4712 
4713 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4714 			       u8 bdaddr_type, bdaddr_t *direct_addr,
4715 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4716 {
4717 	struct discovery_state *d = &hdev->discovery;
4718 	struct smp_irk *irk;
4719 	struct hci_conn *conn;
4720 	bool match;
4721 	u32 flags;
4722 
4723 	/* If the direct address is present, then this report is from
4724 	 * a LE Direct Advertising Report event. In that case it is
4725 	 * important to see if the address is matching the local
4726 	 * controller address.
4727 	 */
4728 	if (direct_addr) {
4729 		/* Only resolvable random addresses are valid for these
4730 		 * kind of reports and others can be ignored.
4731 		 */
4732 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4733 			return;
4734 
4735 		/* If the controller is not using resolvable random
4736 		 * addresses, then this report can be ignored.
4737 		 */
4738 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4739 			return;
4740 
4741 		/* If the local IRK of the controller does not match
4742 		 * with the resolvable random address provided, then
4743 		 * this report can be ignored.
4744 		 */
4745 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4746 			return;
4747 	}
4748 
4749 	/* Check if we need to convert to identity address */
4750 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4751 	if (irk) {
4752 		bdaddr = &irk->bdaddr;
4753 		bdaddr_type = irk->addr_type;
4754 	}
4755 
4756 	/* Check if we have been requested to connect to this device */
4757 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4758 	if (conn && type == LE_ADV_IND) {
4759 		/* Store report for later inclusion by
4760 		 * mgmt_device_connected
4761 		 */
4762 		memcpy(conn->le_adv_data, data, len);
4763 		conn->le_adv_data_len = len;
4764 	}
4765 
4766 	/* Passive scanning shouldn't trigger any device found events,
4767 	 * except for devices marked as CONN_REPORT for which we do send
4768 	 * device found events.
4769 	 */
4770 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4771 		if (type == LE_ADV_DIRECT_IND)
4772 			return;
4773 
4774 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4775 					       bdaddr, bdaddr_type))
4776 			return;
4777 
4778 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4779 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4780 		else
4781 			flags = 0;
4782 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4783 				  rssi, flags, data, len, NULL, 0);
4784 		return;
4785 	}
4786 
4787 	/* When receiving non-connectable or scannable undirected
4788 	 * advertising reports, this means that the remote device is
4789 	 * not connectable and then clearly indicate this in the
4790 	 * device found event.
4791 	 *
4792 	 * When receiving a scan response, then there is no way to
4793 	 * know if the remote device is connectable or not. However
4794 	 * since scan responses are merged with a previously seen
4795 	 * advertising report, the flags field from that report
4796 	 * will be used.
4797 	 *
4798 	 * In the really unlikely case that a controller get confused
4799 	 * and just sends a scan response event, then it is marked as
4800 	 * not connectable as well.
4801 	 */
4802 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4803 	    type == LE_ADV_SCAN_RSP)
4804 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4805 	else
4806 		flags = 0;
4807 
4808 	/* If there's nothing pending either store the data from this
4809 	 * event or send an immediate device found event if the data
4810 	 * should not be stored for later.
4811 	 */
4812 	if (!has_pending_adv_report(hdev)) {
4813 		/* If the report will trigger a SCAN_REQ store it for
4814 		 * later merging.
4815 		 */
4816 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4817 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4818 						 rssi, flags, data, len);
4819 			return;
4820 		}
4821 
4822 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4823 				  rssi, flags, data, len, NULL, 0);
4824 		return;
4825 	}
4826 
4827 	/* Check if the pending report is for the same device as the new one */
4828 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4829 		 bdaddr_type == d->last_adv_addr_type);
4830 
4831 	/* If the pending data doesn't match this report or this isn't a
4832 	 * scan response (e.g. we got a duplicate ADV_IND) then force
4833 	 * sending of the pending data.
4834 	 */
4835 	if (type != LE_ADV_SCAN_RSP || !match) {
4836 		/* Send out whatever is in the cache, but skip duplicates */
4837 		if (!match)
4838 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4839 					  d->last_adv_addr_type, NULL,
4840 					  d->last_adv_rssi, d->last_adv_flags,
4841 					  d->last_adv_data,
4842 					  d->last_adv_data_len, NULL, 0);
4843 
4844 		/* If the new report will trigger a SCAN_REQ store it for
4845 		 * later merging.
4846 		 */
4847 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4848 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4849 						 rssi, flags, data, len);
4850 			return;
4851 		}
4852 
4853 		/* The advertising reports cannot be merged, so clear
4854 		 * the pending report and send out a device found event.
4855 		 */
4856 		clear_pending_adv_report(hdev);
4857 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4858 				  rssi, flags, data, len, NULL, 0);
4859 		return;
4860 	}
4861 
4862 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4863 	 * the new event is a SCAN_RSP. We can therefore proceed with
4864 	 * sending a merged device found event.
4865 	 */
4866 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4867 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4868 			  d->last_adv_data, d->last_adv_data_len, data, len);
4869 	clear_pending_adv_report(hdev);
4870 }
4871 
4872 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4873 {
4874 	u8 num_reports = skb->data[0];
4875 	void *ptr = &skb->data[1];
4876 
4877 	hci_dev_lock(hdev);
4878 
4879 	while (num_reports--) {
4880 		struct hci_ev_le_advertising_info *ev = ptr;
4881 		s8 rssi;
4882 
4883 		rssi = ev->data[ev->length];
4884 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4885 				   ev->bdaddr_type, NULL, 0, rssi,
4886 				   ev->data, ev->length);
4887 
4888 		ptr += sizeof(*ev) + ev->length + 1;
4889 	}
4890 
4891 	hci_dev_unlock(hdev);
4892 }
4893 
4894 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4895 					    struct sk_buff *skb)
4896 {
4897 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4898 	struct hci_conn *conn;
4899 
4900 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4901 
4902 	hci_dev_lock(hdev);
4903 
4904 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4905 	if (conn) {
4906 		if (!ev->status)
4907 			memcpy(conn->features[0], ev->features, 8);
4908 
4909 		if (conn->state == BT_CONFIG) {
4910 			__u8 status;
4911 
4912 			/* If the local controller supports slave-initiated
4913 			 * features exchange, but the remote controller does
4914 			 * not, then it is possible that the error code 0x1a
4915 			 * for unsupported remote feature gets returned.
4916 			 *
4917 			 * In this specific case, allow the connection to
4918 			 * transition into connected state and mark it as
4919 			 * successful.
4920 			 */
4921 			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4922 			    !conn->out && ev->status == 0x1a)
4923 				status = 0x00;
4924 			else
4925 				status = ev->status;
4926 
4927 			conn->state = BT_CONNECTED;
4928 			hci_connect_cfm(conn, status);
4929 			hci_conn_drop(conn);
4930 		}
4931 	}
4932 
4933 	hci_dev_unlock(hdev);
4934 }
4935 
4936 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4937 {
4938 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4939 	struct hci_cp_le_ltk_reply cp;
4940 	struct hci_cp_le_ltk_neg_reply neg;
4941 	struct hci_conn *conn;
4942 	struct smp_ltk *ltk;
4943 
4944 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4945 
4946 	hci_dev_lock(hdev);
4947 
4948 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4949 	if (conn == NULL)
4950 		goto not_found;
4951 
4952 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4953 	if (!ltk)
4954 		goto not_found;
4955 
4956 	if (smp_ltk_is_sc(ltk)) {
4957 		/* With SC both EDiv and Rand are set to zero */
4958 		if (ev->ediv || ev->rand)
4959 			goto not_found;
4960 	} else {
4961 		/* For non-SC keys check that EDiv and Rand match */
4962 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4963 			goto not_found;
4964 	}
4965 
4966 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
4967 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
4968 	cp.handle = cpu_to_le16(conn->handle);
4969 
4970 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
4971 
4972 	conn->enc_key_size = ltk->enc_size;
4973 
4974 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4975 
4976 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4977 	 * temporary key used to encrypt a connection following
4978 	 * pairing. It is used during the Encrypted Session Setup to
4979 	 * distribute the keys. Later, security can be re-established
4980 	 * using a distributed LTK.
4981 	 */
4982 	if (ltk->type == SMP_STK) {
4983 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4984 		list_del_rcu(&ltk->list);
4985 		kfree_rcu(ltk, rcu);
4986 	} else {
4987 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4988 	}
4989 
4990 	hci_dev_unlock(hdev);
4991 
4992 	return;
4993 
4994 not_found:
4995 	neg.handle = ev->handle;
4996 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4997 	hci_dev_unlock(hdev);
4998 }
4999 
5000 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5001 				      u8 reason)
5002 {
5003 	struct hci_cp_le_conn_param_req_neg_reply cp;
5004 
5005 	cp.handle = cpu_to_le16(handle);
5006 	cp.reason = reason;
5007 
5008 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5009 		     &cp);
5010 }
5011 
5012 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5013 					     struct sk_buff *skb)
5014 {
5015 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5016 	struct hci_cp_le_conn_param_req_reply cp;
5017 	struct hci_conn *hcon;
5018 	u16 handle, min, max, latency, timeout;
5019 
5020 	handle = le16_to_cpu(ev->handle);
5021 	min = le16_to_cpu(ev->interval_min);
5022 	max = le16_to_cpu(ev->interval_max);
5023 	latency = le16_to_cpu(ev->latency);
5024 	timeout = le16_to_cpu(ev->timeout);
5025 
5026 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5027 	if (!hcon || hcon->state != BT_CONNECTED)
5028 		return send_conn_param_neg_reply(hdev, handle,
5029 						 HCI_ERROR_UNKNOWN_CONN_ID);
5030 
5031 	if (hci_check_conn_params(min, max, latency, timeout))
5032 		return send_conn_param_neg_reply(hdev, handle,
5033 						 HCI_ERROR_INVALID_LL_PARAMS);
5034 
5035 	if (hcon->role == HCI_ROLE_MASTER) {
5036 		struct hci_conn_params *params;
5037 		u8 store_hint;
5038 
5039 		hci_dev_lock(hdev);
5040 
5041 		params = hci_conn_params_lookup(hdev, &hcon->dst,
5042 						hcon->dst_type);
5043 		if (params) {
5044 			params->conn_min_interval = min;
5045 			params->conn_max_interval = max;
5046 			params->conn_latency = latency;
5047 			params->supervision_timeout = timeout;
5048 			store_hint = 0x01;
5049 		} else{
5050 			store_hint = 0x00;
5051 		}
5052 
5053 		hci_dev_unlock(hdev);
5054 
5055 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5056 				    store_hint, min, max, latency, timeout);
5057 	}
5058 
5059 	cp.handle = ev->handle;
5060 	cp.interval_min = ev->interval_min;
5061 	cp.interval_max = ev->interval_max;
5062 	cp.latency = ev->latency;
5063 	cp.timeout = ev->timeout;
5064 	cp.min_ce_len = 0;
5065 	cp.max_ce_len = 0;
5066 
5067 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5068 }
5069 
5070 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5071 					 struct sk_buff *skb)
5072 {
5073 	u8 num_reports = skb->data[0];
5074 	void *ptr = &skb->data[1];
5075 
5076 	hci_dev_lock(hdev);
5077 
5078 	while (num_reports--) {
5079 		struct hci_ev_le_direct_adv_info *ev = ptr;
5080 
5081 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5082 				   ev->bdaddr_type, &ev->direct_addr,
5083 				   ev->direct_addr_type, ev->rssi, NULL, 0);
5084 
5085 		ptr += sizeof(*ev);
5086 	}
5087 
5088 	hci_dev_unlock(hdev);
5089 }
5090 
5091 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5092 {
5093 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5094 
5095 	skb_pull(skb, sizeof(*le_ev));
5096 
5097 	switch (le_ev->subevent) {
5098 	case HCI_EV_LE_CONN_COMPLETE:
5099 		hci_le_conn_complete_evt(hdev, skb);
5100 		break;
5101 
5102 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5103 		hci_le_conn_update_complete_evt(hdev, skb);
5104 		break;
5105 
5106 	case HCI_EV_LE_ADVERTISING_REPORT:
5107 		hci_le_adv_report_evt(hdev, skb);
5108 		break;
5109 
5110 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5111 		hci_le_remote_feat_complete_evt(hdev, skb);
5112 		break;
5113 
5114 	case HCI_EV_LE_LTK_REQ:
5115 		hci_le_ltk_request_evt(hdev, skb);
5116 		break;
5117 
5118 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5119 		hci_le_remote_conn_param_req_evt(hdev, skb);
5120 		break;
5121 
5122 	case HCI_EV_LE_DIRECT_ADV_REPORT:
5123 		hci_le_direct_adv_report_evt(hdev, skb);
5124 		break;
5125 
5126 	default:
5127 		break;
5128 	}
5129 }
5130 
5131 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5132 				 u8 event, struct sk_buff *skb)
5133 {
5134 	struct hci_ev_cmd_complete *ev;
5135 	struct hci_event_hdr *hdr;
5136 
5137 	if (!skb)
5138 		return false;
5139 
5140 	if (skb->len < sizeof(*hdr)) {
5141 		BT_ERR("Too short HCI event");
5142 		return false;
5143 	}
5144 
5145 	hdr = (void *) skb->data;
5146 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5147 
5148 	if (event) {
5149 		if (hdr->evt != event)
5150 			return false;
5151 		return true;
5152 	}
5153 
5154 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5155 		BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5156 		return false;
5157 	}
5158 
5159 	if (skb->len < sizeof(*ev)) {
5160 		BT_ERR("Too short cmd_complete event");
5161 		return false;
5162 	}
5163 
5164 	ev = (void *) skb->data;
5165 	skb_pull(skb, sizeof(*ev));
5166 
5167 	if (opcode != __le16_to_cpu(ev->opcode)) {
5168 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5169 		       __le16_to_cpu(ev->opcode));
5170 		return false;
5171 	}
5172 
5173 	return true;
5174 }
5175 
5176 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5177 {
5178 	struct hci_event_hdr *hdr = (void *) skb->data;
5179 	hci_req_complete_t req_complete = NULL;
5180 	hci_req_complete_skb_t req_complete_skb = NULL;
5181 	struct sk_buff *orig_skb = NULL;
5182 	u8 status = 0, event = hdr->evt, req_evt = 0;
5183 	u16 opcode = HCI_OP_NOP;
5184 
5185 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5186 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5187 		opcode = __le16_to_cpu(cmd_hdr->opcode);
5188 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5189 				     &req_complete_skb);
5190 		req_evt = event;
5191 	}
5192 
5193 	/* If it looks like we might end up having to call
5194 	 * req_complete_skb, store a pristine copy of the skb since the
5195 	 * various handlers may modify the original one through
5196 	 * skb_pull() calls, etc.
5197 	 */
5198 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5199 	    event == HCI_EV_CMD_COMPLETE)
5200 		orig_skb = skb_clone(skb, GFP_KERNEL);
5201 
5202 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5203 
5204 	switch (event) {
5205 	case HCI_EV_INQUIRY_COMPLETE:
5206 		hci_inquiry_complete_evt(hdev, skb);
5207 		break;
5208 
5209 	case HCI_EV_INQUIRY_RESULT:
5210 		hci_inquiry_result_evt(hdev, skb);
5211 		break;
5212 
5213 	case HCI_EV_CONN_COMPLETE:
5214 		hci_conn_complete_evt(hdev, skb);
5215 		break;
5216 
5217 	case HCI_EV_CONN_REQUEST:
5218 		hci_conn_request_evt(hdev, skb);
5219 		break;
5220 
5221 	case HCI_EV_DISCONN_COMPLETE:
5222 		hci_disconn_complete_evt(hdev, skb);
5223 		break;
5224 
5225 	case HCI_EV_AUTH_COMPLETE:
5226 		hci_auth_complete_evt(hdev, skb);
5227 		break;
5228 
5229 	case HCI_EV_REMOTE_NAME:
5230 		hci_remote_name_evt(hdev, skb);
5231 		break;
5232 
5233 	case HCI_EV_ENCRYPT_CHANGE:
5234 		hci_encrypt_change_evt(hdev, skb);
5235 		break;
5236 
5237 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5238 		hci_change_link_key_complete_evt(hdev, skb);
5239 		break;
5240 
5241 	case HCI_EV_REMOTE_FEATURES:
5242 		hci_remote_features_evt(hdev, skb);
5243 		break;
5244 
5245 	case HCI_EV_CMD_COMPLETE:
5246 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5247 				     &req_complete, &req_complete_skb);
5248 		break;
5249 
5250 	case HCI_EV_CMD_STATUS:
5251 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5252 				   &req_complete_skb);
5253 		break;
5254 
5255 	case HCI_EV_HARDWARE_ERROR:
5256 		hci_hardware_error_evt(hdev, skb);
5257 		break;
5258 
5259 	case HCI_EV_ROLE_CHANGE:
5260 		hci_role_change_evt(hdev, skb);
5261 		break;
5262 
5263 	case HCI_EV_NUM_COMP_PKTS:
5264 		hci_num_comp_pkts_evt(hdev, skb);
5265 		break;
5266 
5267 	case HCI_EV_MODE_CHANGE:
5268 		hci_mode_change_evt(hdev, skb);
5269 		break;
5270 
5271 	case HCI_EV_PIN_CODE_REQ:
5272 		hci_pin_code_request_evt(hdev, skb);
5273 		break;
5274 
5275 	case HCI_EV_LINK_KEY_REQ:
5276 		hci_link_key_request_evt(hdev, skb);
5277 		break;
5278 
5279 	case HCI_EV_LINK_KEY_NOTIFY:
5280 		hci_link_key_notify_evt(hdev, skb);
5281 		break;
5282 
5283 	case HCI_EV_CLOCK_OFFSET:
5284 		hci_clock_offset_evt(hdev, skb);
5285 		break;
5286 
5287 	case HCI_EV_PKT_TYPE_CHANGE:
5288 		hci_pkt_type_change_evt(hdev, skb);
5289 		break;
5290 
5291 	case HCI_EV_PSCAN_REP_MODE:
5292 		hci_pscan_rep_mode_evt(hdev, skb);
5293 		break;
5294 
5295 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5296 		hci_inquiry_result_with_rssi_evt(hdev, skb);
5297 		break;
5298 
5299 	case HCI_EV_REMOTE_EXT_FEATURES:
5300 		hci_remote_ext_features_evt(hdev, skb);
5301 		break;
5302 
5303 	case HCI_EV_SYNC_CONN_COMPLETE:
5304 		hci_sync_conn_complete_evt(hdev, skb);
5305 		break;
5306 
5307 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
5308 		hci_extended_inquiry_result_evt(hdev, skb);
5309 		break;
5310 
5311 	case HCI_EV_KEY_REFRESH_COMPLETE:
5312 		hci_key_refresh_complete_evt(hdev, skb);
5313 		break;
5314 
5315 	case HCI_EV_IO_CAPA_REQUEST:
5316 		hci_io_capa_request_evt(hdev, skb);
5317 		break;
5318 
5319 	case HCI_EV_IO_CAPA_REPLY:
5320 		hci_io_capa_reply_evt(hdev, skb);
5321 		break;
5322 
5323 	case HCI_EV_USER_CONFIRM_REQUEST:
5324 		hci_user_confirm_request_evt(hdev, skb);
5325 		break;
5326 
5327 	case HCI_EV_USER_PASSKEY_REQUEST:
5328 		hci_user_passkey_request_evt(hdev, skb);
5329 		break;
5330 
5331 	case HCI_EV_USER_PASSKEY_NOTIFY:
5332 		hci_user_passkey_notify_evt(hdev, skb);
5333 		break;
5334 
5335 	case HCI_EV_KEYPRESS_NOTIFY:
5336 		hci_keypress_notify_evt(hdev, skb);
5337 		break;
5338 
5339 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
5340 		hci_simple_pair_complete_evt(hdev, skb);
5341 		break;
5342 
5343 	case HCI_EV_REMOTE_HOST_FEATURES:
5344 		hci_remote_host_features_evt(hdev, skb);
5345 		break;
5346 
5347 	case HCI_EV_LE_META:
5348 		hci_le_meta_evt(hdev, skb);
5349 		break;
5350 
5351 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5352 		hci_remote_oob_data_request_evt(hdev, skb);
5353 		break;
5354 
5355 #if IS_ENABLED(CONFIG_BT_HS)
5356 	case HCI_EV_CHANNEL_SELECTED:
5357 		hci_chan_selected_evt(hdev, skb);
5358 		break;
5359 
5360 	case HCI_EV_PHY_LINK_COMPLETE:
5361 		hci_phy_link_complete_evt(hdev, skb);
5362 		break;
5363 
5364 	case HCI_EV_LOGICAL_LINK_COMPLETE:
5365 		hci_loglink_complete_evt(hdev, skb);
5366 		break;
5367 
5368 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5369 		hci_disconn_loglink_complete_evt(hdev, skb);
5370 		break;
5371 
5372 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5373 		hci_disconn_phylink_complete_evt(hdev, skb);
5374 		break;
5375 #endif
5376 
5377 	case HCI_EV_NUM_COMP_BLOCKS:
5378 		hci_num_comp_blocks_evt(hdev, skb);
5379 		break;
5380 
5381 	default:
5382 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
5383 		break;
5384 	}
5385 
5386 	if (req_complete) {
5387 		req_complete(hdev, status, opcode);
5388 	} else if (req_complete_skb) {
5389 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5390 			kfree_skb(orig_skb);
5391 			orig_skb = NULL;
5392 		}
5393 		req_complete_skb(hdev, status, opcode, orig_skb);
5394 	}
5395 
5396 	kfree_skb(orig_skb);
5397 	kfree_skb(skb);
5398 	hdev->stat.evt_rx++;
5399 }
5400