xref: /openbmc/linux/net/bluetooth/hci_event.c (revision cfdb0c2d095ac5d7f09cac1317b7d0a9e8178134)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
41 
42 /* Handle HCI Event packets */
43 
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 	__u8 status = *((__u8 *) skb->data);
47 
48 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
49 
50 	if (status)
51 		return;
52 
53 	clear_bit(HCI_INQUIRY, &hdev->flags);
54 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
56 
57 	hci_dev_lock(hdev);
58 	/* Set discovery state to stopped if we're not doing LE active
59 	 * scanning.
60 	 */
61 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 	    hdev->le_scan_type != LE_SCAN_ACTIVE)
63 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
64 	hci_dev_unlock(hdev);
65 
66 	hci_conn_check_pending(hdev);
67 }
68 
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
70 {
71 	__u8 status = *((__u8 *) skb->data);
72 
73 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
74 
75 	if (status)
76 		return;
77 
78 	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
79 }
80 
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 {
83 	__u8 status = *((__u8 *) skb->data);
84 
85 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
86 
87 	if (status)
88 		return;
89 
90 	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
91 
92 	hci_conn_check_pending(hdev);
93 }
94 
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
96 					  struct sk_buff *skb)
97 {
98 	BT_DBG("%s", hdev->name);
99 }
100 
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 	struct hci_rp_role_discovery *rp = (void *) skb->data;
104 	struct hci_conn *conn;
105 
106 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
107 
108 	if (rp->status)
109 		return;
110 
111 	hci_dev_lock(hdev);
112 
113 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
114 	if (conn)
115 		conn->role = rp->role;
116 
117 	hci_dev_unlock(hdev);
118 }
119 
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
121 {
122 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 	struct hci_conn *conn;
124 
125 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126 
127 	if (rp->status)
128 		return;
129 
130 	hci_dev_lock(hdev);
131 
132 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 	if (conn)
134 		conn->link_policy = __le16_to_cpu(rp->policy);
135 
136 	hci_dev_unlock(hdev);
137 }
138 
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 {
141 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 	struct hci_conn *conn;
143 	void *sent;
144 
145 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146 
147 	if (rp->status)
148 		return;
149 
150 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 	if (!sent)
152 		return;
153 
154 	hci_dev_lock(hdev);
155 
156 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
157 	if (conn)
158 		conn->link_policy = get_unaligned_le16(sent + 2);
159 
160 	hci_dev_unlock(hdev);
161 }
162 
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
164 					struct sk_buff *skb)
165 {
166 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
167 
168 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
169 
170 	if (rp->status)
171 		return;
172 
173 	hdev->link_policy = __le16_to_cpu(rp->policy);
174 }
175 
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
177 					 struct sk_buff *skb)
178 {
179 	__u8 status = *((__u8 *) skb->data);
180 	void *sent;
181 
182 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
183 
184 	if (status)
185 		return;
186 
187 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
188 	if (!sent)
189 		return;
190 
191 	hdev->link_policy = get_unaligned_le16(sent);
192 }
193 
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 	__u8 status = *((__u8 *) skb->data);
197 
198 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
199 
200 	clear_bit(HCI_RESET, &hdev->flags);
201 
202 	if (status)
203 		return;
204 
205 	/* Reset all non-persistent flags */
206 	hci_dev_clear_volatile_flags(hdev);
207 
208 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
209 
210 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
212 
213 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 	hdev->adv_data_len = 0;
215 
216 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 	hdev->scan_rsp_data_len = 0;
218 
219 	hdev->le_scan_type = LE_SCAN_PASSIVE;
220 
221 	hdev->ssp_debug_mode = 0;
222 
223 	hci_bdaddr_list_clear(&hdev->le_white_list);
224 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
225 }
226 
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
228 					struct sk_buff *skb)
229 {
230 	struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 	struct hci_cp_read_stored_link_key *sent;
232 
233 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234 
235 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
236 	if (!sent)
237 		return;
238 
239 	if (!rp->status && sent->read_all == 0x01) {
240 		hdev->stored_max_keys = rp->max_keys;
241 		hdev->stored_num_keys = rp->num_keys;
242 	}
243 }
244 
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
246 					  struct sk_buff *skb)
247 {
248 	struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
249 
250 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251 
252 	if (rp->status)
253 		return;
254 
255 	if (rp->num_keys <= hdev->stored_num_keys)
256 		hdev->stored_num_keys -= rp->num_keys;
257 	else
258 		hdev->stored_num_keys = 0;
259 }
260 
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
262 {
263 	__u8 status = *((__u8 *) skb->data);
264 	void *sent;
265 
266 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
267 
268 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
269 	if (!sent)
270 		return;
271 
272 	hci_dev_lock(hdev);
273 
274 	if (hci_dev_test_flag(hdev, HCI_MGMT))
275 		mgmt_set_local_name_complete(hdev, sent, status);
276 	else if (!status)
277 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
278 
279 	hci_dev_unlock(hdev);
280 }
281 
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 	struct hci_rp_read_local_name *rp = (void *) skb->data;
285 
286 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
287 
288 	if (rp->status)
289 		return;
290 
291 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 	    hci_dev_test_flag(hdev, HCI_CONFIG))
293 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
294 }
295 
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 	__u8 status = *((__u8 *) skb->data);
299 	void *sent;
300 
301 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
302 
303 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 	if (!sent)
305 		return;
306 
307 	hci_dev_lock(hdev);
308 
309 	if (!status) {
310 		__u8 param = *((__u8 *) sent);
311 
312 		if (param == AUTH_ENABLED)
313 			set_bit(HCI_AUTH, &hdev->flags);
314 		else
315 			clear_bit(HCI_AUTH, &hdev->flags);
316 	}
317 
318 	if (hci_dev_test_flag(hdev, HCI_MGMT))
319 		mgmt_auth_enable_complete(hdev, status);
320 
321 	hci_dev_unlock(hdev);
322 }
323 
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
325 {
326 	__u8 status = *((__u8 *) skb->data);
327 	__u8 param;
328 	void *sent;
329 
330 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
331 
332 	if (status)
333 		return;
334 
335 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
336 	if (!sent)
337 		return;
338 
339 	param = *((__u8 *) sent);
340 
341 	if (param)
342 		set_bit(HCI_ENCRYPT, &hdev->flags);
343 	else
344 		clear_bit(HCI_ENCRYPT, &hdev->flags);
345 }
346 
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 	__u8 status = *((__u8 *) skb->data);
350 	__u8 param;
351 	void *sent;
352 
353 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
354 
355 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
356 	if (!sent)
357 		return;
358 
359 	param = *((__u8 *) sent);
360 
361 	hci_dev_lock(hdev);
362 
363 	if (status) {
364 		hdev->discov_timeout = 0;
365 		goto done;
366 	}
367 
368 	if (param & SCAN_INQUIRY)
369 		set_bit(HCI_ISCAN, &hdev->flags);
370 	else
371 		clear_bit(HCI_ISCAN, &hdev->flags);
372 
373 	if (param & SCAN_PAGE)
374 		set_bit(HCI_PSCAN, &hdev->flags);
375 	else
376 		clear_bit(HCI_PSCAN, &hdev->flags);
377 
378 done:
379 	hci_dev_unlock(hdev);
380 }
381 
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
383 {
384 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
385 
386 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
387 
388 	if (rp->status)
389 		return;
390 
391 	memcpy(hdev->dev_class, rp->dev_class, 3);
392 
393 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
395 }
396 
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 {
399 	__u8 status = *((__u8 *) skb->data);
400 	void *sent;
401 
402 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
403 
404 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 	if (!sent)
406 		return;
407 
408 	hci_dev_lock(hdev);
409 
410 	if (status == 0)
411 		memcpy(hdev->dev_class, sent, 3);
412 
413 	if (hci_dev_test_flag(hdev, HCI_MGMT))
414 		mgmt_set_class_of_dev_complete(hdev, sent, status);
415 
416 	hci_dev_unlock(hdev);
417 }
418 
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
422 	__u16 setting;
423 
424 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425 
426 	if (rp->status)
427 		return;
428 
429 	setting = __le16_to_cpu(rp->voice_setting);
430 
431 	if (hdev->voice_setting == setting)
432 		return;
433 
434 	hdev->voice_setting = setting;
435 
436 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
437 
438 	if (hdev->notify)
439 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
440 }
441 
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
443 				       struct sk_buff *skb)
444 {
445 	__u8 status = *((__u8 *) skb->data);
446 	__u16 setting;
447 	void *sent;
448 
449 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
450 
451 	if (status)
452 		return;
453 
454 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
455 	if (!sent)
456 		return;
457 
458 	setting = get_unaligned_le16(sent);
459 
460 	if (hdev->voice_setting == setting)
461 		return;
462 
463 	hdev->voice_setting = setting;
464 
465 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
466 
467 	if (hdev->notify)
468 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
469 }
470 
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
472 					  struct sk_buff *skb)
473 {
474 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
475 
476 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
477 
478 	if (rp->status)
479 		return;
480 
481 	hdev->num_iac = rp->num_iac;
482 
483 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
484 }
485 
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
487 {
488 	__u8 status = *((__u8 *) skb->data);
489 	struct hci_cp_write_ssp_mode *sent;
490 
491 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
492 
493 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
494 	if (!sent)
495 		return;
496 
497 	hci_dev_lock(hdev);
498 
499 	if (!status) {
500 		if (sent->mode)
501 			hdev->features[1][0] |= LMP_HOST_SSP;
502 		else
503 			hdev->features[1][0] &= ~LMP_HOST_SSP;
504 	}
505 
506 	if (hci_dev_test_flag(hdev, HCI_MGMT))
507 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
508 	else if (!status) {
509 		if (sent->mode)
510 			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
511 		else
512 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
513 	}
514 
515 	hci_dev_unlock(hdev);
516 }
517 
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
519 {
520 	u8 status = *((u8 *) skb->data);
521 	struct hci_cp_write_sc_support *sent;
522 
523 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
524 
525 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
526 	if (!sent)
527 		return;
528 
529 	hci_dev_lock(hdev);
530 
531 	if (!status) {
532 		if (sent->support)
533 			hdev->features[1][0] |= LMP_HOST_SC;
534 		else
535 			hdev->features[1][0] &= ~LMP_HOST_SC;
536 	}
537 
538 	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
539 		if (sent->support)
540 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
541 		else
542 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
543 	}
544 
545 	hci_dev_unlock(hdev);
546 }
547 
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
549 {
550 	struct hci_rp_read_local_version *rp = (void *) skb->data;
551 
552 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
553 
554 	if (rp->status)
555 		return;
556 
557 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 		hdev->hci_ver = rp->hci_ver;
560 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 		hdev->lmp_ver = rp->lmp_ver;
562 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
564 	}
565 }
566 
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 				       struct sk_buff *skb)
569 {
570 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
571 
572 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573 
574 	if (rp->status)
575 		return;
576 
577 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 	    hci_dev_test_flag(hdev, HCI_CONFIG))
579 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
580 }
581 
582 static void hci_cc_read_local_features(struct hci_dev *hdev,
583 				       struct sk_buff *skb)
584 {
585 	struct hci_rp_read_local_features *rp = (void *) skb->data;
586 
587 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
588 
589 	if (rp->status)
590 		return;
591 
592 	memcpy(hdev->features, rp->features, 8);
593 
594 	/* Adjust default settings according to features
595 	 * supported by device. */
596 
597 	if (hdev->features[0][0] & LMP_3SLOT)
598 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
599 
600 	if (hdev->features[0][0] & LMP_5SLOT)
601 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
602 
603 	if (hdev->features[0][1] & LMP_HV2) {
604 		hdev->pkt_type  |= (HCI_HV2);
605 		hdev->esco_type |= (ESCO_HV2);
606 	}
607 
608 	if (hdev->features[0][1] & LMP_HV3) {
609 		hdev->pkt_type  |= (HCI_HV3);
610 		hdev->esco_type |= (ESCO_HV3);
611 	}
612 
613 	if (lmp_esco_capable(hdev))
614 		hdev->esco_type |= (ESCO_EV3);
615 
616 	if (hdev->features[0][4] & LMP_EV4)
617 		hdev->esco_type |= (ESCO_EV4);
618 
619 	if (hdev->features[0][4] & LMP_EV5)
620 		hdev->esco_type |= (ESCO_EV5);
621 
622 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
623 		hdev->esco_type |= (ESCO_2EV3);
624 
625 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
626 		hdev->esco_type |= (ESCO_3EV3);
627 
628 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
629 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
630 }
631 
632 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
633 					   struct sk_buff *skb)
634 {
635 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
636 
637 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
638 
639 	if (rp->status)
640 		return;
641 
642 	if (hdev->max_page < rp->max_page)
643 		hdev->max_page = rp->max_page;
644 
645 	if (rp->page < HCI_MAX_PAGES)
646 		memcpy(hdev->features[rp->page], rp->features, 8);
647 }
648 
649 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
650 					  struct sk_buff *skb)
651 {
652 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
653 
654 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 
656 	if (rp->status)
657 		return;
658 
659 	hdev->flow_ctl_mode = rp->mode;
660 }
661 
662 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
663 {
664 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
665 
666 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
667 
668 	if (rp->status)
669 		return;
670 
671 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
672 	hdev->sco_mtu  = rp->sco_mtu;
673 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
674 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
675 
676 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
677 		hdev->sco_mtu  = 64;
678 		hdev->sco_pkts = 8;
679 	}
680 
681 	hdev->acl_cnt = hdev->acl_pkts;
682 	hdev->sco_cnt = hdev->sco_pkts;
683 
684 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
685 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
686 }
687 
688 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
689 {
690 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
691 
692 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
693 
694 	if (rp->status)
695 		return;
696 
697 	if (test_bit(HCI_INIT, &hdev->flags))
698 		bacpy(&hdev->bdaddr, &rp->bdaddr);
699 
700 	if (hci_dev_test_flag(hdev, HCI_SETUP))
701 		bacpy(&hdev->setup_addr, &rp->bdaddr);
702 }
703 
704 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
705 					   struct sk_buff *skb)
706 {
707 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
708 
709 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
710 
711 	if (rp->status)
712 		return;
713 
714 	if (test_bit(HCI_INIT, &hdev->flags)) {
715 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
716 		hdev->page_scan_window = __le16_to_cpu(rp->window);
717 	}
718 }
719 
720 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
721 					    struct sk_buff *skb)
722 {
723 	u8 status = *((u8 *) skb->data);
724 	struct hci_cp_write_page_scan_activity *sent;
725 
726 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
727 
728 	if (status)
729 		return;
730 
731 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
732 	if (!sent)
733 		return;
734 
735 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
736 	hdev->page_scan_window = __le16_to_cpu(sent->window);
737 }
738 
739 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
740 					   struct sk_buff *skb)
741 {
742 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
743 
744 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745 
746 	if (rp->status)
747 		return;
748 
749 	if (test_bit(HCI_INIT, &hdev->flags))
750 		hdev->page_scan_type = rp->type;
751 }
752 
753 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
754 					struct sk_buff *skb)
755 {
756 	u8 status = *((u8 *) skb->data);
757 	u8 *type;
758 
759 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
760 
761 	if (status)
762 		return;
763 
764 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
765 	if (type)
766 		hdev->page_scan_type = *type;
767 }
768 
769 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
770 					struct sk_buff *skb)
771 {
772 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
773 
774 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
775 
776 	if (rp->status)
777 		return;
778 
779 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
780 	hdev->block_len = __le16_to_cpu(rp->block_len);
781 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
782 
783 	hdev->block_cnt = hdev->num_blocks;
784 
785 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
786 	       hdev->block_cnt, hdev->block_len);
787 }
788 
789 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
790 {
791 	struct hci_rp_read_clock *rp = (void *) skb->data;
792 	struct hci_cp_read_clock *cp;
793 	struct hci_conn *conn;
794 
795 	BT_DBG("%s", hdev->name);
796 
797 	if (skb->len < sizeof(*rp))
798 		return;
799 
800 	if (rp->status)
801 		return;
802 
803 	hci_dev_lock(hdev);
804 
805 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
806 	if (!cp)
807 		goto unlock;
808 
809 	if (cp->which == 0x00) {
810 		hdev->clock = le32_to_cpu(rp->clock);
811 		goto unlock;
812 	}
813 
814 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
815 	if (conn) {
816 		conn->clock = le32_to_cpu(rp->clock);
817 		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
818 	}
819 
820 unlock:
821 	hci_dev_unlock(hdev);
822 }
823 
824 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
825 				       struct sk_buff *skb)
826 {
827 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
828 
829 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
830 
831 	if (rp->status)
832 		return;
833 
834 	hdev->amp_status = rp->amp_status;
835 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
836 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
837 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
838 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
839 	hdev->amp_type = rp->amp_type;
840 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
841 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
842 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
843 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
844 }
845 
846 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
847 					 struct sk_buff *skb)
848 {
849 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
850 
851 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 
853 	if (rp->status)
854 		return;
855 
856 	hdev->inq_tx_power = rp->tx_power;
857 }
858 
859 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
860 {
861 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
862 	struct hci_cp_pin_code_reply *cp;
863 	struct hci_conn *conn;
864 
865 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
866 
867 	hci_dev_lock(hdev);
868 
869 	if (hci_dev_test_flag(hdev, HCI_MGMT))
870 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
871 
872 	if (rp->status)
873 		goto unlock;
874 
875 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
876 	if (!cp)
877 		goto unlock;
878 
879 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
880 	if (conn)
881 		conn->pin_length = cp->pin_len;
882 
883 unlock:
884 	hci_dev_unlock(hdev);
885 }
886 
887 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
888 {
889 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
890 
891 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
892 
893 	hci_dev_lock(hdev);
894 
895 	if (hci_dev_test_flag(hdev, HCI_MGMT))
896 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
897 						 rp->status);
898 
899 	hci_dev_unlock(hdev);
900 }
901 
902 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
903 				       struct sk_buff *skb)
904 {
905 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
906 
907 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908 
909 	if (rp->status)
910 		return;
911 
912 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
913 	hdev->le_pkts = rp->le_max_pkt;
914 
915 	hdev->le_cnt = hdev->le_pkts;
916 
917 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
918 }
919 
920 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
921 					  struct sk_buff *skb)
922 {
923 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
924 
925 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926 
927 	if (rp->status)
928 		return;
929 
930 	memcpy(hdev->le_features, rp->features, 8);
931 }
932 
933 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
934 					struct sk_buff *skb)
935 {
936 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
937 
938 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
939 
940 	if (rp->status)
941 		return;
942 
943 	hdev->adv_tx_power = rp->tx_power;
944 }
945 
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
947 {
948 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949 
950 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
951 
952 	hci_dev_lock(hdev);
953 
954 	if (hci_dev_test_flag(hdev, HCI_MGMT))
955 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
956 						 rp->status);
957 
958 	hci_dev_unlock(hdev);
959 }
960 
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 					  struct sk_buff *skb)
963 {
964 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965 
966 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
967 
968 	hci_dev_lock(hdev);
969 
970 	if (hci_dev_test_flag(hdev, HCI_MGMT))
971 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 						     ACL_LINK, 0, rp->status);
973 
974 	hci_dev_unlock(hdev);
975 }
976 
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
978 {
979 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980 
981 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
982 
983 	hci_dev_lock(hdev);
984 
985 	if (hci_dev_test_flag(hdev, HCI_MGMT))
986 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
987 						 0, rp->status);
988 
989 	hci_dev_unlock(hdev);
990 }
991 
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 					  struct sk_buff *skb)
994 {
995 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996 
997 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
998 
999 	hci_dev_lock(hdev);
1000 
1001 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 						     ACL_LINK, 0, rp->status);
1004 
1005 	hci_dev_unlock(hdev);
1006 }
1007 
1008 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1009 				       struct sk_buff *skb)
1010 {
1011 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1012 
1013 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014 }
1015 
1016 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1017 					   struct sk_buff *skb)
1018 {
1019 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1020 
1021 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1022 }
1023 
1024 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1025 {
1026 	__u8 status = *((__u8 *) skb->data);
1027 	bdaddr_t *sent;
1028 
1029 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1030 
1031 	if (status)
1032 		return;
1033 
1034 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1035 	if (!sent)
1036 		return;
1037 
1038 	hci_dev_lock(hdev);
1039 
1040 	bacpy(&hdev->random_addr, sent);
1041 
1042 	hci_dev_unlock(hdev);
1043 }
1044 
1045 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1046 {
1047 	__u8 *sent, status = *((__u8 *) skb->data);
1048 
1049 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1050 
1051 	if (status)
1052 		return;
1053 
1054 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1055 	if (!sent)
1056 		return;
1057 
1058 	hci_dev_lock(hdev);
1059 
1060 	/* If we're doing connection initiation as peripheral. Set a
1061 	 * timeout in case something goes wrong.
1062 	 */
1063 	if (*sent) {
1064 		struct hci_conn *conn;
1065 
1066 		hci_dev_set_flag(hdev, HCI_LE_ADV);
1067 
1068 		conn = hci_lookup_le_connect(hdev);
1069 		if (conn)
1070 			queue_delayed_work(hdev->workqueue,
1071 					   &conn->le_conn_timeout,
1072 					   conn->conn_timeout);
1073 	} else {
1074 		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1075 	}
1076 
1077 	hci_dev_unlock(hdev);
1078 }
1079 
1080 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1081 {
1082 	struct hci_cp_le_set_scan_param *cp;
1083 	__u8 status = *((__u8 *) skb->data);
1084 
1085 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1086 
1087 	if (status)
1088 		return;
1089 
1090 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1091 	if (!cp)
1092 		return;
1093 
1094 	hci_dev_lock(hdev);
1095 
1096 	hdev->le_scan_type = cp->type;
1097 
1098 	hci_dev_unlock(hdev);
1099 }
1100 
1101 static bool has_pending_adv_report(struct hci_dev *hdev)
1102 {
1103 	struct discovery_state *d = &hdev->discovery;
1104 
1105 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1106 }
1107 
1108 static void clear_pending_adv_report(struct hci_dev *hdev)
1109 {
1110 	struct discovery_state *d = &hdev->discovery;
1111 
1112 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1113 	d->last_adv_data_len = 0;
1114 }
1115 
1116 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1117 				     u8 bdaddr_type, s8 rssi, u32 flags,
1118 				     u8 *data, u8 len)
1119 {
1120 	struct discovery_state *d = &hdev->discovery;
1121 
1122 	bacpy(&d->last_adv_addr, bdaddr);
1123 	d->last_adv_addr_type = bdaddr_type;
1124 	d->last_adv_rssi = rssi;
1125 	d->last_adv_flags = flags;
1126 	memcpy(d->last_adv_data, data, len);
1127 	d->last_adv_data_len = len;
1128 }
1129 
1130 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1131 				      struct sk_buff *skb)
1132 {
1133 	struct hci_cp_le_set_scan_enable *cp;
1134 	__u8 status = *((__u8 *) skb->data);
1135 
1136 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1137 
1138 	if (status)
1139 		return;
1140 
1141 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1142 	if (!cp)
1143 		return;
1144 
1145 	hci_dev_lock(hdev);
1146 
1147 	switch (cp->enable) {
1148 	case LE_SCAN_ENABLE:
1149 		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1150 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1151 			clear_pending_adv_report(hdev);
1152 		break;
1153 
1154 	case LE_SCAN_DISABLE:
1155 		/* We do this here instead of when setting DISCOVERY_STOPPED
1156 		 * since the latter would potentially require waiting for
1157 		 * inquiry to stop too.
1158 		 */
1159 		if (has_pending_adv_report(hdev)) {
1160 			struct discovery_state *d = &hdev->discovery;
1161 
1162 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1163 					  d->last_adv_addr_type, NULL,
1164 					  d->last_adv_rssi, d->last_adv_flags,
1165 					  d->last_adv_data,
1166 					  d->last_adv_data_len, NULL, 0);
1167 		}
1168 
1169 		/* Cancel this timer so that we don't try to disable scanning
1170 		 * when it's already disabled.
1171 		 */
1172 		cancel_delayed_work(&hdev->le_scan_disable);
1173 
1174 		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1175 
1176 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1177 		 * interrupted scanning due to a connect request. Mark
1178 		 * therefore discovery as stopped. If this was not
1179 		 * because of a connect request advertising might have
1180 		 * been disabled because of active scanning, so
1181 		 * re-enable it again if necessary.
1182 		 */
1183 		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1184 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1185 		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1186 			 hdev->discovery.state == DISCOVERY_FINDING)
1187 			hci_req_reenable_advertising(hdev);
1188 
1189 		break;
1190 
1191 	default:
1192 		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1193 			   cp->enable);
1194 		break;
1195 	}
1196 
1197 	hci_dev_unlock(hdev);
1198 }
1199 
1200 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1201 					   struct sk_buff *skb)
1202 {
1203 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1204 
1205 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1206 
1207 	if (rp->status)
1208 		return;
1209 
1210 	hdev->le_white_list_size = rp->size;
1211 }
1212 
1213 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1214 				       struct sk_buff *skb)
1215 {
1216 	__u8 status = *((__u8 *) skb->data);
1217 
1218 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1219 
1220 	if (status)
1221 		return;
1222 
1223 	hci_bdaddr_list_clear(&hdev->le_white_list);
1224 }
1225 
1226 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1227 					struct sk_buff *skb)
1228 {
1229 	struct hci_cp_le_add_to_white_list *sent;
1230 	__u8 status = *((__u8 *) skb->data);
1231 
1232 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1233 
1234 	if (status)
1235 		return;
1236 
1237 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1238 	if (!sent)
1239 		return;
1240 
1241 	hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1242 			   sent->bdaddr_type);
1243 }
1244 
1245 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1246 					  struct sk_buff *skb)
1247 {
1248 	struct hci_cp_le_del_from_white_list *sent;
1249 	__u8 status = *((__u8 *) skb->data);
1250 
1251 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1252 
1253 	if (status)
1254 		return;
1255 
1256 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1257 	if (!sent)
1258 		return;
1259 
1260 	hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1261 			    sent->bdaddr_type);
1262 }
1263 
1264 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1265 					    struct sk_buff *skb)
1266 {
1267 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1268 
1269 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1270 
1271 	if (rp->status)
1272 		return;
1273 
1274 	memcpy(hdev->le_states, rp->le_states, 8);
1275 }
1276 
1277 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1278 					struct sk_buff *skb)
1279 {
1280 	struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1281 
1282 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1283 
1284 	if (rp->status)
1285 		return;
1286 
1287 	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1288 	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1289 }
1290 
1291 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1292 					 struct sk_buff *skb)
1293 {
1294 	struct hci_cp_le_write_def_data_len *sent;
1295 	__u8 status = *((__u8 *) skb->data);
1296 
1297 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1298 
1299 	if (status)
1300 		return;
1301 
1302 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1303 	if (!sent)
1304 		return;
1305 
1306 	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1307 	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1308 }
1309 
1310 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1311 					   struct sk_buff *skb)
1312 {
1313 	struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1314 
1315 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1316 
1317 	if (rp->status)
1318 		return;
1319 
1320 	hdev->le_resolv_list_size = rp->size;
1321 }
1322 
1323 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1324 					struct sk_buff *skb)
1325 {
1326 	struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1327 
1328 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1329 
1330 	if (rp->status)
1331 		return;
1332 
1333 	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1334 	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1335 	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1336 	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1337 }
1338 
1339 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1340 					   struct sk_buff *skb)
1341 {
1342 	struct hci_cp_write_le_host_supported *sent;
1343 	__u8 status = *((__u8 *) skb->data);
1344 
1345 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1346 
1347 	if (status)
1348 		return;
1349 
1350 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1351 	if (!sent)
1352 		return;
1353 
1354 	hci_dev_lock(hdev);
1355 
1356 	if (sent->le) {
1357 		hdev->features[1][0] |= LMP_HOST_LE;
1358 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1359 	} else {
1360 		hdev->features[1][0] &= ~LMP_HOST_LE;
1361 		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1362 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1363 	}
1364 
1365 	if (sent->simul)
1366 		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1367 	else
1368 		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1369 
1370 	hci_dev_unlock(hdev);
1371 }
1372 
1373 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1374 {
1375 	struct hci_cp_le_set_adv_param *cp;
1376 	u8 status = *((u8 *) skb->data);
1377 
1378 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1379 
1380 	if (status)
1381 		return;
1382 
1383 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1384 	if (!cp)
1385 		return;
1386 
1387 	hci_dev_lock(hdev);
1388 	hdev->adv_addr_type = cp->own_address_type;
1389 	hci_dev_unlock(hdev);
1390 }
1391 
1392 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1393 {
1394 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1395 	struct hci_conn *conn;
1396 
1397 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1398 
1399 	if (rp->status)
1400 		return;
1401 
1402 	hci_dev_lock(hdev);
1403 
1404 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1405 	if (conn)
1406 		conn->rssi = rp->rssi;
1407 
1408 	hci_dev_unlock(hdev);
1409 }
1410 
1411 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1412 {
1413 	struct hci_cp_read_tx_power *sent;
1414 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1415 	struct hci_conn *conn;
1416 
1417 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1418 
1419 	if (rp->status)
1420 		return;
1421 
1422 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1423 	if (!sent)
1424 		return;
1425 
1426 	hci_dev_lock(hdev);
1427 
1428 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1429 	if (!conn)
1430 		goto unlock;
1431 
1432 	switch (sent->type) {
1433 	case 0x00:
1434 		conn->tx_power = rp->tx_power;
1435 		break;
1436 	case 0x01:
1437 		conn->max_tx_power = rp->tx_power;
1438 		break;
1439 	}
1440 
1441 unlock:
1442 	hci_dev_unlock(hdev);
1443 }
1444 
1445 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1446 {
1447 	u8 status = *((u8 *) skb->data);
1448 	u8 *mode;
1449 
1450 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1451 
1452 	if (status)
1453 		return;
1454 
1455 	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1456 	if (mode)
1457 		hdev->ssp_debug_mode = *mode;
1458 }
1459 
1460 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1461 {
1462 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1463 
1464 	if (status) {
1465 		hci_conn_check_pending(hdev);
1466 		return;
1467 	}
1468 
1469 	set_bit(HCI_INQUIRY, &hdev->flags);
1470 }
1471 
1472 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1473 {
1474 	struct hci_cp_create_conn *cp;
1475 	struct hci_conn *conn;
1476 
1477 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1478 
1479 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1480 	if (!cp)
1481 		return;
1482 
1483 	hci_dev_lock(hdev);
1484 
1485 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1486 
1487 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1488 
1489 	if (status) {
1490 		if (conn && conn->state == BT_CONNECT) {
1491 			if (status != 0x0c || conn->attempt > 2) {
1492 				conn->state = BT_CLOSED;
1493 				hci_connect_cfm(conn, status);
1494 				hci_conn_del(conn);
1495 			} else
1496 				conn->state = BT_CONNECT2;
1497 		}
1498 	} else {
1499 		if (!conn) {
1500 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1501 					    HCI_ROLE_MASTER);
1502 			if (!conn)
1503 				bt_dev_err(hdev, "no memory for new connection");
1504 		}
1505 	}
1506 
1507 	hci_dev_unlock(hdev);
1508 }
1509 
1510 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1511 {
1512 	struct hci_cp_add_sco *cp;
1513 	struct hci_conn *acl, *sco;
1514 	__u16 handle;
1515 
1516 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1517 
1518 	if (!status)
1519 		return;
1520 
1521 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1522 	if (!cp)
1523 		return;
1524 
1525 	handle = __le16_to_cpu(cp->handle);
1526 
1527 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1528 
1529 	hci_dev_lock(hdev);
1530 
1531 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1532 	if (acl) {
1533 		sco = acl->link;
1534 		if (sco) {
1535 			sco->state = BT_CLOSED;
1536 
1537 			hci_connect_cfm(sco, status);
1538 			hci_conn_del(sco);
1539 		}
1540 	}
1541 
1542 	hci_dev_unlock(hdev);
1543 }
1544 
1545 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1546 {
1547 	struct hci_cp_auth_requested *cp;
1548 	struct hci_conn *conn;
1549 
1550 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1551 
1552 	if (!status)
1553 		return;
1554 
1555 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1556 	if (!cp)
1557 		return;
1558 
1559 	hci_dev_lock(hdev);
1560 
1561 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1562 	if (conn) {
1563 		if (conn->state == BT_CONFIG) {
1564 			hci_connect_cfm(conn, status);
1565 			hci_conn_drop(conn);
1566 		}
1567 	}
1568 
1569 	hci_dev_unlock(hdev);
1570 }
1571 
1572 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1573 {
1574 	struct hci_cp_set_conn_encrypt *cp;
1575 	struct hci_conn *conn;
1576 
1577 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578 
1579 	if (!status)
1580 		return;
1581 
1582 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1583 	if (!cp)
1584 		return;
1585 
1586 	hci_dev_lock(hdev);
1587 
1588 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1589 	if (conn) {
1590 		if (conn->state == BT_CONFIG) {
1591 			hci_connect_cfm(conn, status);
1592 			hci_conn_drop(conn);
1593 		}
1594 	}
1595 
1596 	hci_dev_unlock(hdev);
1597 }
1598 
1599 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1600 				    struct hci_conn *conn)
1601 {
1602 	if (conn->state != BT_CONFIG || !conn->out)
1603 		return 0;
1604 
1605 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1606 		return 0;
1607 
1608 	/* Only request authentication for SSP connections or non-SSP
1609 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1610 	 * is requested.
1611 	 */
1612 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1613 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1614 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1615 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1616 		return 0;
1617 
1618 	return 1;
1619 }
1620 
1621 static int hci_resolve_name(struct hci_dev *hdev,
1622 				   struct inquiry_entry *e)
1623 {
1624 	struct hci_cp_remote_name_req cp;
1625 
1626 	memset(&cp, 0, sizeof(cp));
1627 
1628 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1629 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1630 	cp.pscan_mode = e->data.pscan_mode;
1631 	cp.clock_offset = e->data.clock_offset;
1632 
1633 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1634 }
1635 
1636 static bool hci_resolve_next_name(struct hci_dev *hdev)
1637 {
1638 	struct discovery_state *discov = &hdev->discovery;
1639 	struct inquiry_entry *e;
1640 
1641 	if (list_empty(&discov->resolve))
1642 		return false;
1643 
1644 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1645 	if (!e)
1646 		return false;
1647 
1648 	if (hci_resolve_name(hdev, e) == 0) {
1649 		e->name_state = NAME_PENDING;
1650 		return true;
1651 	}
1652 
1653 	return false;
1654 }
1655 
1656 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1657 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1658 {
1659 	struct discovery_state *discov = &hdev->discovery;
1660 	struct inquiry_entry *e;
1661 
1662 	/* Update the mgmt connected state if necessary. Be careful with
1663 	 * conn objects that exist but are not (yet) connected however.
1664 	 * Only those in BT_CONFIG or BT_CONNECTED states can be
1665 	 * considered connected.
1666 	 */
1667 	if (conn &&
1668 	    (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1669 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1670 		mgmt_device_connected(hdev, conn, 0, name, name_len);
1671 
1672 	if (discov->state == DISCOVERY_STOPPED)
1673 		return;
1674 
1675 	if (discov->state == DISCOVERY_STOPPING)
1676 		goto discov_complete;
1677 
1678 	if (discov->state != DISCOVERY_RESOLVING)
1679 		return;
1680 
1681 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1682 	/* If the device was not found in a list of found devices names of which
1683 	 * are pending. there is no need to continue resolving a next name as it
1684 	 * will be done upon receiving another Remote Name Request Complete
1685 	 * Event */
1686 	if (!e)
1687 		return;
1688 
1689 	list_del(&e->list);
1690 	if (name) {
1691 		e->name_state = NAME_KNOWN;
1692 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1693 				 e->data.rssi, name, name_len);
1694 	} else {
1695 		e->name_state = NAME_NOT_KNOWN;
1696 	}
1697 
1698 	if (hci_resolve_next_name(hdev))
1699 		return;
1700 
1701 discov_complete:
1702 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1703 }
1704 
1705 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1706 {
1707 	struct hci_cp_remote_name_req *cp;
1708 	struct hci_conn *conn;
1709 
1710 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1711 
1712 	/* If successful wait for the name req complete event before
1713 	 * checking for the need to do authentication */
1714 	if (!status)
1715 		return;
1716 
1717 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1718 	if (!cp)
1719 		return;
1720 
1721 	hci_dev_lock(hdev);
1722 
1723 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1724 
1725 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1726 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1727 
1728 	if (!conn)
1729 		goto unlock;
1730 
1731 	if (!hci_outgoing_auth_needed(hdev, conn))
1732 		goto unlock;
1733 
1734 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1735 		struct hci_cp_auth_requested auth_cp;
1736 
1737 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1738 
1739 		auth_cp.handle = __cpu_to_le16(conn->handle);
1740 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1741 			     sizeof(auth_cp), &auth_cp);
1742 	}
1743 
1744 unlock:
1745 	hci_dev_unlock(hdev);
1746 }
1747 
1748 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1749 {
1750 	struct hci_cp_read_remote_features *cp;
1751 	struct hci_conn *conn;
1752 
1753 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754 
1755 	if (!status)
1756 		return;
1757 
1758 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1759 	if (!cp)
1760 		return;
1761 
1762 	hci_dev_lock(hdev);
1763 
1764 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1765 	if (conn) {
1766 		if (conn->state == BT_CONFIG) {
1767 			hci_connect_cfm(conn, status);
1768 			hci_conn_drop(conn);
1769 		}
1770 	}
1771 
1772 	hci_dev_unlock(hdev);
1773 }
1774 
1775 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1776 {
1777 	struct hci_cp_read_remote_ext_features *cp;
1778 	struct hci_conn *conn;
1779 
1780 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781 
1782 	if (!status)
1783 		return;
1784 
1785 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1786 	if (!cp)
1787 		return;
1788 
1789 	hci_dev_lock(hdev);
1790 
1791 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1792 	if (conn) {
1793 		if (conn->state == BT_CONFIG) {
1794 			hci_connect_cfm(conn, status);
1795 			hci_conn_drop(conn);
1796 		}
1797 	}
1798 
1799 	hci_dev_unlock(hdev);
1800 }
1801 
1802 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1803 {
1804 	struct hci_cp_setup_sync_conn *cp;
1805 	struct hci_conn *acl, *sco;
1806 	__u16 handle;
1807 
1808 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1809 
1810 	if (!status)
1811 		return;
1812 
1813 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1814 	if (!cp)
1815 		return;
1816 
1817 	handle = __le16_to_cpu(cp->handle);
1818 
1819 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1820 
1821 	hci_dev_lock(hdev);
1822 
1823 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1824 	if (acl) {
1825 		sco = acl->link;
1826 		if (sco) {
1827 			sco->state = BT_CLOSED;
1828 
1829 			hci_connect_cfm(sco, status);
1830 			hci_conn_del(sco);
1831 		}
1832 	}
1833 
1834 	hci_dev_unlock(hdev);
1835 }
1836 
1837 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1838 {
1839 	struct hci_cp_sniff_mode *cp;
1840 	struct hci_conn *conn;
1841 
1842 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1843 
1844 	if (!status)
1845 		return;
1846 
1847 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1848 	if (!cp)
1849 		return;
1850 
1851 	hci_dev_lock(hdev);
1852 
1853 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1854 	if (conn) {
1855 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1856 
1857 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1858 			hci_sco_setup(conn, status);
1859 	}
1860 
1861 	hci_dev_unlock(hdev);
1862 }
1863 
1864 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1865 {
1866 	struct hci_cp_exit_sniff_mode *cp;
1867 	struct hci_conn *conn;
1868 
1869 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1870 
1871 	if (!status)
1872 		return;
1873 
1874 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1875 	if (!cp)
1876 		return;
1877 
1878 	hci_dev_lock(hdev);
1879 
1880 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1881 	if (conn) {
1882 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1883 
1884 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1885 			hci_sco_setup(conn, status);
1886 	}
1887 
1888 	hci_dev_unlock(hdev);
1889 }
1890 
1891 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1892 {
1893 	struct hci_cp_disconnect *cp;
1894 	struct hci_conn *conn;
1895 
1896 	if (!status)
1897 		return;
1898 
1899 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1900 	if (!cp)
1901 		return;
1902 
1903 	hci_dev_lock(hdev);
1904 
1905 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1906 	if (conn)
1907 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1908 				       conn->dst_type, status);
1909 
1910 	hci_dev_unlock(hdev);
1911 }
1912 
1913 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1914 {
1915 	struct hci_cp_le_create_conn *cp;
1916 	struct hci_conn *conn;
1917 
1918 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1919 
1920 	/* All connection failure handling is taken care of by the
1921 	 * hci_le_conn_failed function which is triggered by the HCI
1922 	 * request completion callbacks used for connecting.
1923 	 */
1924 	if (status)
1925 		return;
1926 
1927 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1928 	if (!cp)
1929 		return;
1930 
1931 	hci_dev_lock(hdev);
1932 
1933 	conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1934 				       cp->peer_addr_type);
1935 	if (!conn)
1936 		goto unlock;
1937 
1938 	/* Store the initiator and responder address information which
1939 	 * is needed for SMP. These values will not change during the
1940 	 * lifetime of the connection.
1941 	 */
1942 	conn->init_addr_type = cp->own_address_type;
1943 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1944 		bacpy(&conn->init_addr, &hdev->random_addr);
1945 	else
1946 		bacpy(&conn->init_addr, &hdev->bdaddr);
1947 
1948 	conn->resp_addr_type = cp->peer_addr_type;
1949 	bacpy(&conn->resp_addr, &cp->peer_addr);
1950 
1951 	/* We don't want the connection attempt to stick around
1952 	 * indefinitely since LE doesn't have a page timeout concept
1953 	 * like BR/EDR. Set a timer for any connection that doesn't use
1954 	 * the white list for connecting.
1955 	 */
1956 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1957 		queue_delayed_work(conn->hdev->workqueue,
1958 				   &conn->le_conn_timeout,
1959 				   conn->conn_timeout);
1960 
1961 unlock:
1962 	hci_dev_unlock(hdev);
1963 }
1964 
1965 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1966 {
1967 	struct hci_cp_le_read_remote_features *cp;
1968 	struct hci_conn *conn;
1969 
1970 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1971 
1972 	if (!status)
1973 		return;
1974 
1975 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1976 	if (!cp)
1977 		return;
1978 
1979 	hci_dev_lock(hdev);
1980 
1981 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1982 	if (conn) {
1983 		if (conn->state == BT_CONFIG) {
1984 			hci_connect_cfm(conn, status);
1985 			hci_conn_drop(conn);
1986 		}
1987 	}
1988 
1989 	hci_dev_unlock(hdev);
1990 }
1991 
1992 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1993 {
1994 	struct hci_cp_le_start_enc *cp;
1995 	struct hci_conn *conn;
1996 
1997 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1998 
1999 	if (!status)
2000 		return;
2001 
2002 	hci_dev_lock(hdev);
2003 
2004 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2005 	if (!cp)
2006 		goto unlock;
2007 
2008 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2009 	if (!conn)
2010 		goto unlock;
2011 
2012 	if (conn->state != BT_CONNECTED)
2013 		goto unlock;
2014 
2015 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2016 	hci_conn_drop(conn);
2017 
2018 unlock:
2019 	hci_dev_unlock(hdev);
2020 }
2021 
2022 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2023 {
2024 	struct hci_cp_switch_role *cp;
2025 	struct hci_conn *conn;
2026 
2027 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2028 
2029 	if (!status)
2030 		return;
2031 
2032 	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2033 	if (!cp)
2034 		return;
2035 
2036 	hci_dev_lock(hdev);
2037 
2038 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2039 	if (conn)
2040 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2041 
2042 	hci_dev_unlock(hdev);
2043 }
2044 
2045 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2046 {
2047 	__u8 status = *((__u8 *) skb->data);
2048 	struct discovery_state *discov = &hdev->discovery;
2049 	struct inquiry_entry *e;
2050 
2051 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
2052 
2053 	hci_conn_check_pending(hdev);
2054 
2055 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2056 		return;
2057 
2058 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2059 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
2060 
2061 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2062 		return;
2063 
2064 	hci_dev_lock(hdev);
2065 
2066 	if (discov->state != DISCOVERY_FINDING)
2067 		goto unlock;
2068 
2069 	if (list_empty(&discov->resolve)) {
2070 		/* When BR/EDR inquiry is active and no LE scanning is in
2071 		 * progress, then change discovery state to indicate completion.
2072 		 *
2073 		 * When running LE scanning and BR/EDR inquiry simultaneously
2074 		 * and the LE scan already finished, then change the discovery
2075 		 * state to indicate completion.
2076 		 */
2077 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2078 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2079 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2080 		goto unlock;
2081 	}
2082 
2083 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2084 	if (e && hci_resolve_name(hdev, e) == 0) {
2085 		e->name_state = NAME_PENDING;
2086 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2087 	} else {
2088 		/* When BR/EDR inquiry is active and no LE scanning is in
2089 		 * progress, then change discovery state to indicate completion.
2090 		 *
2091 		 * When running LE scanning and BR/EDR inquiry simultaneously
2092 		 * and the LE scan already finished, then change the discovery
2093 		 * state to indicate completion.
2094 		 */
2095 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2096 		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2097 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2098 	}
2099 
2100 unlock:
2101 	hci_dev_unlock(hdev);
2102 }
2103 
2104 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2105 {
2106 	struct inquiry_data data;
2107 	struct inquiry_info *info = (void *) (skb->data + 1);
2108 	int num_rsp = *((__u8 *) skb->data);
2109 
2110 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2111 
2112 	if (!num_rsp)
2113 		return;
2114 
2115 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2116 		return;
2117 
2118 	hci_dev_lock(hdev);
2119 
2120 	for (; num_rsp; num_rsp--, info++) {
2121 		u32 flags;
2122 
2123 		bacpy(&data.bdaddr, &info->bdaddr);
2124 		data.pscan_rep_mode	= info->pscan_rep_mode;
2125 		data.pscan_period_mode	= info->pscan_period_mode;
2126 		data.pscan_mode		= info->pscan_mode;
2127 		memcpy(data.dev_class, info->dev_class, 3);
2128 		data.clock_offset	= info->clock_offset;
2129 		data.rssi		= HCI_RSSI_INVALID;
2130 		data.ssp_mode		= 0x00;
2131 
2132 		flags = hci_inquiry_cache_update(hdev, &data, false);
2133 
2134 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2135 				  info->dev_class, HCI_RSSI_INVALID,
2136 				  flags, NULL, 0, NULL, 0);
2137 	}
2138 
2139 	hci_dev_unlock(hdev);
2140 }
2141 
2142 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2143 {
2144 	struct hci_ev_conn_complete *ev = (void *) skb->data;
2145 	struct hci_conn *conn;
2146 
2147 	BT_DBG("%s", hdev->name);
2148 
2149 	hci_dev_lock(hdev);
2150 
2151 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2152 	if (!conn) {
2153 		if (ev->link_type != SCO_LINK)
2154 			goto unlock;
2155 
2156 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2157 		if (!conn)
2158 			goto unlock;
2159 
2160 		conn->type = SCO_LINK;
2161 	}
2162 
2163 	if (!ev->status) {
2164 		conn->handle = __le16_to_cpu(ev->handle);
2165 
2166 		if (conn->type == ACL_LINK) {
2167 			conn->state = BT_CONFIG;
2168 			hci_conn_hold(conn);
2169 
2170 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2171 			    !hci_find_link_key(hdev, &ev->bdaddr))
2172 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2173 			else
2174 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2175 		} else
2176 			conn->state = BT_CONNECTED;
2177 
2178 		hci_debugfs_create_conn(conn);
2179 		hci_conn_add_sysfs(conn);
2180 
2181 		if (test_bit(HCI_AUTH, &hdev->flags))
2182 			set_bit(HCI_CONN_AUTH, &conn->flags);
2183 
2184 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
2185 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2186 
2187 		/* Get remote features */
2188 		if (conn->type == ACL_LINK) {
2189 			struct hci_cp_read_remote_features cp;
2190 			cp.handle = ev->handle;
2191 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2192 				     sizeof(cp), &cp);
2193 
2194 			hci_req_update_scan(hdev);
2195 		}
2196 
2197 		/* Set packet type for incoming connection */
2198 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2199 			struct hci_cp_change_conn_ptype cp;
2200 			cp.handle = ev->handle;
2201 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2202 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2203 				     &cp);
2204 		}
2205 	} else {
2206 		conn->state = BT_CLOSED;
2207 		if (conn->type == ACL_LINK)
2208 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2209 					    conn->dst_type, ev->status);
2210 	}
2211 
2212 	if (conn->type == ACL_LINK)
2213 		hci_sco_setup(conn, ev->status);
2214 
2215 	if (ev->status) {
2216 		hci_connect_cfm(conn, ev->status);
2217 		hci_conn_del(conn);
2218 	} else if (ev->link_type != ACL_LINK)
2219 		hci_connect_cfm(conn, ev->status);
2220 
2221 unlock:
2222 	hci_dev_unlock(hdev);
2223 
2224 	hci_conn_check_pending(hdev);
2225 }
2226 
2227 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2228 {
2229 	struct hci_cp_reject_conn_req cp;
2230 
2231 	bacpy(&cp.bdaddr, bdaddr);
2232 	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2233 	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2234 }
2235 
2236 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2237 {
2238 	struct hci_ev_conn_request *ev = (void *) skb->data;
2239 	int mask = hdev->link_mode;
2240 	struct inquiry_entry *ie;
2241 	struct hci_conn *conn;
2242 	__u8 flags = 0;
2243 
2244 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2245 	       ev->link_type);
2246 
2247 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2248 				      &flags);
2249 
2250 	if (!(mask & HCI_LM_ACCEPT)) {
2251 		hci_reject_conn(hdev, &ev->bdaddr);
2252 		return;
2253 	}
2254 
2255 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2256 				   BDADDR_BREDR)) {
2257 		hci_reject_conn(hdev, &ev->bdaddr);
2258 		return;
2259 	}
2260 
2261 	/* Require HCI_CONNECTABLE or a whitelist entry to accept the
2262 	 * connection. These features are only touched through mgmt so
2263 	 * only do the checks if HCI_MGMT is set.
2264 	 */
2265 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2266 	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2267 	    !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2268 				    BDADDR_BREDR)) {
2269 		    hci_reject_conn(hdev, &ev->bdaddr);
2270 		    return;
2271 	}
2272 
2273 	/* Connection accepted */
2274 
2275 	hci_dev_lock(hdev);
2276 
2277 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2278 	if (ie)
2279 		memcpy(ie->data.dev_class, ev->dev_class, 3);
2280 
2281 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2282 			&ev->bdaddr);
2283 	if (!conn) {
2284 		conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2285 				    HCI_ROLE_SLAVE);
2286 		if (!conn) {
2287 			bt_dev_err(hdev, "no memory for new connection");
2288 			hci_dev_unlock(hdev);
2289 			return;
2290 		}
2291 	}
2292 
2293 	memcpy(conn->dev_class, ev->dev_class, 3);
2294 
2295 	hci_dev_unlock(hdev);
2296 
2297 	if (ev->link_type == ACL_LINK ||
2298 	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2299 		struct hci_cp_accept_conn_req cp;
2300 		conn->state = BT_CONNECT;
2301 
2302 		bacpy(&cp.bdaddr, &ev->bdaddr);
2303 
2304 		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2305 			cp.role = 0x00; /* Become master */
2306 		else
2307 			cp.role = 0x01; /* Remain slave */
2308 
2309 		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2310 	} else if (!(flags & HCI_PROTO_DEFER)) {
2311 		struct hci_cp_accept_sync_conn_req cp;
2312 		conn->state = BT_CONNECT;
2313 
2314 		bacpy(&cp.bdaddr, &ev->bdaddr);
2315 		cp.pkt_type = cpu_to_le16(conn->pkt_type);
2316 
2317 		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2318 		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2319 		cp.max_latency    = cpu_to_le16(0xffff);
2320 		cp.content_format = cpu_to_le16(hdev->voice_setting);
2321 		cp.retrans_effort = 0xff;
2322 
2323 		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2324 			     &cp);
2325 	} else {
2326 		conn->state = BT_CONNECT2;
2327 		hci_connect_cfm(conn, 0);
2328 	}
2329 }
2330 
2331 static u8 hci_to_mgmt_reason(u8 err)
2332 {
2333 	switch (err) {
2334 	case HCI_ERROR_CONNECTION_TIMEOUT:
2335 		return MGMT_DEV_DISCONN_TIMEOUT;
2336 	case HCI_ERROR_REMOTE_USER_TERM:
2337 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2338 	case HCI_ERROR_REMOTE_POWER_OFF:
2339 		return MGMT_DEV_DISCONN_REMOTE;
2340 	case HCI_ERROR_LOCAL_HOST_TERM:
2341 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2342 	default:
2343 		return MGMT_DEV_DISCONN_UNKNOWN;
2344 	}
2345 }
2346 
2347 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2348 {
2349 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2350 	u8 reason;
2351 	struct hci_conn_params *params;
2352 	struct hci_conn *conn;
2353 	bool mgmt_connected;
2354 	u8 type;
2355 
2356 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2357 
2358 	hci_dev_lock(hdev);
2359 
2360 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2361 	if (!conn)
2362 		goto unlock;
2363 
2364 	if (ev->status) {
2365 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2366 				       conn->dst_type, ev->status);
2367 		goto unlock;
2368 	}
2369 
2370 	conn->state = BT_CLOSED;
2371 
2372 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2373 
2374 	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2375 		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2376 	else
2377 		reason = hci_to_mgmt_reason(ev->reason);
2378 
2379 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2380 				reason, mgmt_connected);
2381 
2382 	if (conn->type == ACL_LINK) {
2383 		if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2384 			hci_remove_link_key(hdev, &conn->dst);
2385 
2386 		hci_req_update_scan(hdev);
2387 	}
2388 
2389 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2390 	if (params) {
2391 		switch (params->auto_connect) {
2392 		case HCI_AUTO_CONN_LINK_LOSS:
2393 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2394 				break;
2395 			/* Fall through */
2396 
2397 		case HCI_AUTO_CONN_DIRECT:
2398 		case HCI_AUTO_CONN_ALWAYS:
2399 			list_del_init(&params->action);
2400 			list_add(&params->action, &hdev->pend_le_conns);
2401 			hci_update_background_scan(hdev);
2402 			break;
2403 
2404 		default:
2405 			break;
2406 		}
2407 	}
2408 
2409 	type = conn->type;
2410 
2411 	hci_disconn_cfm(conn, ev->reason);
2412 	hci_conn_del(conn);
2413 
2414 	/* Re-enable advertising if necessary, since it might
2415 	 * have been disabled by the connection. From the
2416 	 * HCI_LE_Set_Advertise_Enable command description in
2417 	 * the core specification (v4.0):
2418 	 * "The Controller shall continue advertising until the Host
2419 	 * issues an LE_Set_Advertise_Enable command with
2420 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2421 	 * or until a connection is created or until the Advertising
2422 	 * is timed out due to Directed Advertising."
2423 	 */
2424 	if (type == LE_LINK)
2425 		hci_req_reenable_advertising(hdev);
2426 
2427 unlock:
2428 	hci_dev_unlock(hdev);
2429 }
2430 
2431 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2432 {
2433 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2434 	struct hci_conn *conn;
2435 
2436 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2437 
2438 	hci_dev_lock(hdev);
2439 
2440 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2441 	if (!conn)
2442 		goto unlock;
2443 
2444 	if (!ev->status) {
2445 		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2446 
2447 		if (!hci_conn_ssp_enabled(conn) &&
2448 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2449 			bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2450 		} else {
2451 			set_bit(HCI_CONN_AUTH, &conn->flags);
2452 			conn->sec_level = conn->pending_sec_level;
2453 		}
2454 	} else {
2455 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2456 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2457 
2458 		mgmt_auth_failed(conn, ev->status);
2459 	}
2460 
2461 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2462 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2463 
2464 	if (conn->state == BT_CONFIG) {
2465 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2466 			struct hci_cp_set_conn_encrypt cp;
2467 			cp.handle  = ev->handle;
2468 			cp.encrypt = 0x01;
2469 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2470 				     &cp);
2471 		} else {
2472 			conn->state = BT_CONNECTED;
2473 			hci_connect_cfm(conn, ev->status);
2474 			hci_conn_drop(conn);
2475 		}
2476 	} else {
2477 		hci_auth_cfm(conn, ev->status);
2478 
2479 		hci_conn_hold(conn);
2480 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2481 		hci_conn_drop(conn);
2482 	}
2483 
2484 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2485 		if (!ev->status) {
2486 			struct hci_cp_set_conn_encrypt cp;
2487 			cp.handle  = ev->handle;
2488 			cp.encrypt = 0x01;
2489 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2490 				     &cp);
2491 		} else {
2492 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2493 			hci_encrypt_cfm(conn, ev->status, 0x00);
2494 		}
2495 	}
2496 
2497 unlock:
2498 	hci_dev_unlock(hdev);
2499 }
2500 
2501 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2502 {
2503 	struct hci_ev_remote_name *ev = (void *) skb->data;
2504 	struct hci_conn *conn;
2505 
2506 	BT_DBG("%s", hdev->name);
2507 
2508 	hci_conn_check_pending(hdev);
2509 
2510 	hci_dev_lock(hdev);
2511 
2512 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2513 
2514 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
2515 		goto check_auth;
2516 
2517 	if (ev->status == 0)
2518 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2519 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2520 	else
2521 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2522 
2523 check_auth:
2524 	if (!conn)
2525 		goto unlock;
2526 
2527 	if (!hci_outgoing_auth_needed(hdev, conn))
2528 		goto unlock;
2529 
2530 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2531 		struct hci_cp_auth_requested cp;
2532 
2533 		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2534 
2535 		cp.handle = __cpu_to_le16(conn->handle);
2536 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2537 	}
2538 
2539 unlock:
2540 	hci_dev_unlock(hdev);
2541 }
2542 
2543 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2544 				       u16 opcode, struct sk_buff *skb)
2545 {
2546 	const struct hci_rp_read_enc_key_size *rp;
2547 	struct hci_conn *conn;
2548 	u16 handle;
2549 
2550 	BT_DBG("%s status 0x%02x", hdev->name, status);
2551 
2552 	if (!skb || skb->len < sizeof(*rp)) {
2553 		bt_dev_err(hdev, "invalid read key size response");
2554 		return;
2555 	}
2556 
2557 	rp = (void *)skb->data;
2558 	handle = le16_to_cpu(rp->handle);
2559 
2560 	hci_dev_lock(hdev);
2561 
2562 	conn = hci_conn_hash_lookup_handle(hdev, handle);
2563 	if (!conn)
2564 		goto unlock;
2565 
2566 	/* If we fail to read the encryption key size, assume maximum
2567 	 * (which is the same we do also when this HCI command isn't
2568 	 * supported.
2569 	 */
2570 	if (rp->status) {
2571 		bt_dev_err(hdev, "failed to read key size for handle %u",
2572 			   handle);
2573 		conn->enc_key_size = HCI_LINK_KEY_SIZE;
2574 	} else {
2575 		conn->enc_key_size = rp->key_size;
2576 	}
2577 
2578 	if (conn->state == BT_CONFIG) {
2579 		conn->state = BT_CONNECTED;
2580 		hci_connect_cfm(conn, 0);
2581 		hci_conn_drop(conn);
2582 	} else {
2583 		u8 encrypt;
2584 
2585 		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2586 			encrypt = 0x00;
2587 		else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2588 			encrypt = 0x02;
2589 		else
2590 			encrypt = 0x01;
2591 
2592 		hci_encrypt_cfm(conn, 0, encrypt);
2593 	}
2594 
2595 unlock:
2596 	hci_dev_unlock(hdev);
2597 }
2598 
2599 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2600 {
2601 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2602 	struct hci_conn *conn;
2603 
2604 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2605 
2606 	hci_dev_lock(hdev);
2607 
2608 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2609 	if (!conn)
2610 		goto unlock;
2611 
2612 	if (!ev->status) {
2613 		if (ev->encrypt) {
2614 			/* Encryption implies authentication */
2615 			set_bit(HCI_CONN_AUTH, &conn->flags);
2616 			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2617 			conn->sec_level = conn->pending_sec_level;
2618 
2619 			/* P-256 authentication key implies FIPS */
2620 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2621 				set_bit(HCI_CONN_FIPS, &conn->flags);
2622 
2623 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2624 			    conn->type == LE_LINK)
2625 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2626 		} else {
2627 			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2628 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2629 		}
2630 	}
2631 
2632 	/* We should disregard the current RPA and generate a new one
2633 	 * whenever the encryption procedure fails.
2634 	 */
2635 	if (ev->status && conn->type == LE_LINK)
2636 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2637 
2638 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2639 
2640 	if (ev->status && conn->state == BT_CONNECTED) {
2641 		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2642 			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2643 
2644 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2645 		hci_conn_drop(conn);
2646 		goto unlock;
2647 	}
2648 
2649 	/* In Secure Connections Only mode, do not allow any connections
2650 	 * that are not encrypted with AES-CCM using a P-256 authenticated
2651 	 * combination key.
2652 	 */
2653 	if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2654 	    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2655 	     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2656 		hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2657 		hci_conn_drop(conn);
2658 		goto unlock;
2659 	}
2660 
2661 	/* Try reading the encryption key size for encrypted ACL links */
2662 	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2663 		struct hci_cp_read_enc_key_size cp;
2664 		struct hci_request req;
2665 
2666 		/* Only send HCI_Read_Encryption_Key_Size if the
2667 		 * controller really supports it. If it doesn't, assume
2668 		 * the default size (16).
2669 		 */
2670 		if (!(hdev->commands[20] & 0x10)) {
2671 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2672 			goto notify;
2673 		}
2674 
2675 		hci_req_init(&req, hdev);
2676 
2677 		cp.handle = cpu_to_le16(conn->handle);
2678 		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2679 
2680 		if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2681 			bt_dev_err(hdev, "sending read key size failed");
2682 			conn->enc_key_size = HCI_LINK_KEY_SIZE;
2683 			goto notify;
2684 		}
2685 
2686 		goto unlock;
2687 	}
2688 
2689 notify:
2690 	if (conn->state == BT_CONFIG) {
2691 		if (!ev->status)
2692 			conn->state = BT_CONNECTED;
2693 
2694 		hci_connect_cfm(conn, ev->status);
2695 		hci_conn_drop(conn);
2696 	} else
2697 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2698 
2699 unlock:
2700 	hci_dev_unlock(hdev);
2701 }
2702 
2703 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2704 					     struct sk_buff *skb)
2705 {
2706 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2707 	struct hci_conn *conn;
2708 
2709 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2710 
2711 	hci_dev_lock(hdev);
2712 
2713 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2714 	if (conn) {
2715 		if (!ev->status)
2716 			set_bit(HCI_CONN_SECURE, &conn->flags);
2717 
2718 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2719 
2720 		hci_key_change_cfm(conn, ev->status);
2721 	}
2722 
2723 	hci_dev_unlock(hdev);
2724 }
2725 
2726 static void hci_remote_features_evt(struct hci_dev *hdev,
2727 				    struct sk_buff *skb)
2728 {
2729 	struct hci_ev_remote_features *ev = (void *) skb->data;
2730 	struct hci_conn *conn;
2731 
2732 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2733 
2734 	hci_dev_lock(hdev);
2735 
2736 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2737 	if (!conn)
2738 		goto unlock;
2739 
2740 	if (!ev->status)
2741 		memcpy(conn->features[0], ev->features, 8);
2742 
2743 	if (conn->state != BT_CONFIG)
2744 		goto unlock;
2745 
2746 	if (!ev->status && lmp_ext_feat_capable(hdev) &&
2747 	    lmp_ext_feat_capable(conn)) {
2748 		struct hci_cp_read_remote_ext_features cp;
2749 		cp.handle = ev->handle;
2750 		cp.page = 0x01;
2751 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2752 			     sizeof(cp), &cp);
2753 		goto unlock;
2754 	}
2755 
2756 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2757 		struct hci_cp_remote_name_req cp;
2758 		memset(&cp, 0, sizeof(cp));
2759 		bacpy(&cp.bdaddr, &conn->dst);
2760 		cp.pscan_rep_mode = 0x02;
2761 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2762 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2763 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
2764 
2765 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2766 		conn->state = BT_CONNECTED;
2767 		hci_connect_cfm(conn, ev->status);
2768 		hci_conn_drop(conn);
2769 	}
2770 
2771 unlock:
2772 	hci_dev_unlock(hdev);
2773 }
2774 
2775 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2776 				 u16 *opcode, u8 *status,
2777 				 hci_req_complete_t *req_complete,
2778 				 hci_req_complete_skb_t *req_complete_skb)
2779 {
2780 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2781 
2782 	*opcode = __le16_to_cpu(ev->opcode);
2783 	*status = skb->data[sizeof(*ev)];
2784 
2785 	skb_pull(skb, sizeof(*ev));
2786 
2787 	switch (*opcode) {
2788 	case HCI_OP_INQUIRY_CANCEL:
2789 		hci_cc_inquiry_cancel(hdev, skb);
2790 		break;
2791 
2792 	case HCI_OP_PERIODIC_INQ:
2793 		hci_cc_periodic_inq(hdev, skb);
2794 		break;
2795 
2796 	case HCI_OP_EXIT_PERIODIC_INQ:
2797 		hci_cc_exit_periodic_inq(hdev, skb);
2798 		break;
2799 
2800 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2801 		hci_cc_remote_name_req_cancel(hdev, skb);
2802 		break;
2803 
2804 	case HCI_OP_ROLE_DISCOVERY:
2805 		hci_cc_role_discovery(hdev, skb);
2806 		break;
2807 
2808 	case HCI_OP_READ_LINK_POLICY:
2809 		hci_cc_read_link_policy(hdev, skb);
2810 		break;
2811 
2812 	case HCI_OP_WRITE_LINK_POLICY:
2813 		hci_cc_write_link_policy(hdev, skb);
2814 		break;
2815 
2816 	case HCI_OP_READ_DEF_LINK_POLICY:
2817 		hci_cc_read_def_link_policy(hdev, skb);
2818 		break;
2819 
2820 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2821 		hci_cc_write_def_link_policy(hdev, skb);
2822 		break;
2823 
2824 	case HCI_OP_RESET:
2825 		hci_cc_reset(hdev, skb);
2826 		break;
2827 
2828 	case HCI_OP_READ_STORED_LINK_KEY:
2829 		hci_cc_read_stored_link_key(hdev, skb);
2830 		break;
2831 
2832 	case HCI_OP_DELETE_STORED_LINK_KEY:
2833 		hci_cc_delete_stored_link_key(hdev, skb);
2834 		break;
2835 
2836 	case HCI_OP_WRITE_LOCAL_NAME:
2837 		hci_cc_write_local_name(hdev, skb);
2838 		break;
2839 
2840 	case HCI_OP_READ_LOCAL_NAME:
2841 		hci_cc_read_local_name(hdev, skb);
2842 		break;
2843 
2844 	case HCI_OP_WRITE_AUTH_ENABLE:
2845 		hci_cc_write_auth_enable(hdev, skb);
2846 		break;
2847 
2848 	case HCI_OP_WRITE_ENCRYPT_MODE:
2849 		hci_cc_write_encrypt_mode(hdev, skb);
2850 		break;
2851 
2852 	case HCI_OP_WRITE_SCAN_ENABLE:
2853 		hci_cc_write_scan_enable(hdev, skb);
2854 		break;
2855 
2856 	case HCI_OP_READ_CLASS_OF_DEV:
2857 		hci_cc_read_class_of_dev(hdev, skb);
2858 		break;
2859 
2860 	case HCI_OP_WRITE_CLASS_OF_DEV:
2861 		hci_cc_write_class_of_dev(hdev, skb);
2862 		break;
2863 
2864 	case HCI_OP_READ_VOICE_SETTING:
2865 		hci_cc_read_voice_setting(hdev, skb);
2866 		break;
2867 
2868 	case HCI_OP_WRITE_VOICE_SETTING:
2869 		hci_cc_write_voice_setting(hdev, skb);
2870 		break;
2871 
2872 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2873 		hci_cc_read_num_supported_iac(hdev, skb);
2874 		break;
2875 
2876 	case HCI_OP_WRITE_SSP_MODE:
2877 		hci_cc_write_ssp_mode(hdev, skb);
2878 		break;
2879 
2880 	case HCI_OP_WRITE_SC_SUPPORT:
2881 		hci_cc_write_sc_support(hdev, skb);
2882 		break;
2883 
2884 	case HCI_OP_READ_LOCAL_VERSION:
2885 		hci_cc_read_local_version(hdev, skb);
2886 		break;
2887 
2888 	case HCI_OP_READ_LOCAL_COMMANDS:
2889 		hci_cc_read_local_commands(hdev, skb);
2890 		break;
2891 
2892 	case HCI_OP_READ_LOCAL_FEATURES:
2893 		hci_cc_read_local_features(hdev, skb);
2894 		break;
2895 
2896 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2897 		hci_cc_read_local_ext_features(hdev, skb);
2898 		break;
2899 
2900 	case HCI_OP_READ_BUFFER_SIZE:
2901 		hci_cc_read_buffer_size(hdev, skb);
2902 		break;
2903 
2904 	case HCI_OP_READ_BD_ADDR:
2905 		hci_cc_read_bd_addr(hdev, skb);
2906 		break;
2907 
2908 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2909 		hci_cc_read_page_scan_activity(hdev, skb);
2910 		break;
2911 
2912 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2913 		hci_cc_write_page_scan_activity(hdev, skb);
2914 		break;
2915 
2916 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2917 		hci_cc_read_page_scan_type(hdev, skb);
2918 		break;
2919 
2920 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2921 		hci_cc_write_page_scan_type(hdev, skb);
2922 		break;
2923 
2924 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2925 		hci_cc_read_data_block_size(hdev, skb);
2926 		break;
2927 
2928 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2929 		hci_cc_read_flow_control_mode(hdev, skb);
2930 		break;
2931 
2932 	case HCI_OP_READ_LOCAL_AMP_INFO:
2933 		hci_cc_read_local_amp_info(hdev, skb);
2934 		break;
2935 
2936 	case HCI_OP_READ_CLOCK:
2937 		hci_cc_read_clock(hdev, skb);
2938 		break;
2939 
2940 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2941 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2942 		break;
2943 
2944 	case HCI_OP_PIN_CODE_REPLY:
2945 		hci_cc_pin_code_reply(hdev, skb);
2946 		break;
2947 
2948 	case HCI_OP_PIN_CODE_NEG_REPLY:
2949 		hci_cc_pin_code_neg_reply(hdev, skb);
2950 		break;
2951 
2952 	case HCI_OP_READ_LOCAL_OOB_DATA:
2953 		hci_cc_read_local_oob_data(hdev, skb);
2954 		break;
2955 
2956 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2957 		hci_cc_read_local_oob_ext_data(hdev, skb);
2958 		break;
2959 
2960 	case HCI_OP_LE_READ_BUFFER_SIZE:
2961 		hci_cc_le_read_buffer_size(hdev, skb);
2962 		break;
2963 
2964 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2965 		hci_cc_le_read_local_features(hdev, skb);
2966 		break;
2967 
2968 	case HCI_OP_LE_READ_ADV_TX_POWER:
2969 		hci_cc_le_read_adv_tx_power(hdev, skb);
2970 		break;
2971 
2972 	case HCI_OP_USER_CONFIRM_REPLY:
2973 		hci_cc_user_confirm_reply(hdev, skb);
2974 		break;
2975 
2976 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2977 		hci_cc_user_confirm_neg_reply(hdev, skb);
2978 		break;
2979 
2980 	case HCI_OP_USER_PASSKEY_REPLY:
2981 		hci_cc_user_passkey_reply(hdev, skb);
2982 		break;
2983 
2984 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2985 		hci_cc_user_passkey_neg_reply(hdev, skb);
2986 		break;
2987 
2988 	case HCI_OP_LE_SET_RANDOM_ADDR:
2989 		hci_cc_le_set_random_addr(hdev, skb);
2990 		break;
2991 
2992 	case HCI_OP_LE_SET_ADV_ENABLE:
2993 		hci_cc_le_set_adv_enable(hdev, skb);
2994 		break;
2995 
2996 	case HCI_OP_LE_SET_SCAN_PARAM:
2997 		hci_cc_le_set_scan_param(hdev, skb);
2998 		break;
2999 
3000 	case HCI_OP_LE_SET_SCAN_ENABLE:
3001 		hci_cc_le_set_scan_enable(hdev, skb);
3002 		break;
3003 
3004 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3005 		hci_cc_le_read_white_list_size(hdev, skb);
3006 		break;
3007 
3008 	case HCI_OP_LE_CLEAR_WHITE_LIST:
3009 		hci_cc_le_clear_white_list(hdev, skb);
3010 		break;
3011 
3012 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
3013 		hci_cc_le_add_to_white_list(hdev, skb);
3014 		break;
3015 
3016 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3017 		hci_cc_le_del_from_white_list(hdev, skb);
3018 		break;
3019 
3020 	case HCI_OP_LE_READ_SUPPORTED_STATES:
3021 		hci_cc_le_read_supported_states(hdev, skb);
3022 		break;
3023 
3024 	case HCI_OP_LE_READ_DEF_DATA_LEN:
3025 		hci_cc_le_read_def_data_len(hdev, skb);
3026 		break;
3027 
3028 	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3029 		hci_cc_le_write_def_data_len(hdev, skb);
3030 		break;
3031 
3032 	case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3033 		hci_cc_le_read_resolv_list_size(hdev, skb);
3034 		break;
3035 
3036 	case HCI_OP_LE_READ_MAX_DATA_LEN:
3037 		hci_cc_le_read_max_data_len(hdev, skb);
3038 		break;
3039 
3040 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3041 		hci_cc_write_le_host_supported(hdev, skb);
3042 		break;
3043 
3044 	case HCI_OP_LE_SET_ADV_PARAM:
3045 		hci_cc_set_adv_param(hdev, skb);
3046 		break;
3047 
3048 	case HCI_OP_READ_RSSI:
3049 		hci_cc_read_rssi(hdev, skb);
3050 		break;
3051 
3052 	case HCI_OP_READ_TX_POWER:
3053 		hci_cc_read_tx_power(hdev, skb);
3054 		break;
3055 
3056 	case HCI_OP_WRITE_SSP_DEBUG_MODE:
3057 		hci_cc_write_ssp_debug_mode(hdev, skb);
3058 		break;
3059 
3060 	default:
3061 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3062 		break;
3063 	}
3064 
3065 	if (*opcode != HCI_OP_NOP)
3066 		cancel_delayed_work(&hdev->cmd_timer);
3067 
3068 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3069 		atomic_set(&hdev->cmd_cnt, 1);
3070 
3071 	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3072 			     req_complete_skb);
3073 
3074 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3075 		queue_work(hdev->workqueue, &hdev->cmd_work);
3076 }
3077 
3078 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3079 			       u16 *opcode, u8 *status,
3080 			       hci_req_complete_t *req_complete,
3081 			       hci_req_complete_skb_t *req_complete_skb)
3082 {
3083 	struct hci_ev_cmd_status *ev = (void *) skb->data;
3084 
3085 	skb_pull(skb, sizeof(*ev));
3086 
3087 	*opcode = __le16_to_cpu(ev->opcode);
3088 	*status = ev->status;
3089 
3090 	switch (*opcode) {
3091 	case HCI_OP_INQUIRY:
3092 		hci_cs_inquiry(hdev, ev->status);
3093 		break;
3094 
3095 	case HCI_OP_CREATE_CONN:
3096 		hci_cs_create_conn(hdev, ev->status);
3097 		break;
3098 
3099 	case HCI_OP_DISCONNECT:
3100 		hci_cs_disconnect(hdev, ev->status);
3101 		break;
3102 
3103 	case HCI_OP_ADD_SCO:
3104 		hci_cs_add_sco(hdev, ev->status);
3105 		break;
3106 
3107 	case HCI_OP_AUTH_REQUESTED:
3108 		hci_cs_auth_requested(hdev, ev->status);
3109 		break;
3110 
3111 	case HCI_OP_SET_CONN_ENCRYPT:
3112 		hci_cs_set_conn_encrypt(hdev, ev->status);
3113 		break;
3114 
3115 	case HCI_OP_REMOTE_NAME_REQ:
3116 		hci_cs_remote_name_req(hdev, ev->status);
3117 		break;
3118 
3119 	case HCI_OP_READ_REMOTE_FEATURES:
3120 		hci_cs_read_remote_features(hdev, ev->status);
3121 		break;
3122 
3123 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
3124 		hci_cs_read_remote_ext_features(hdev, ev->status);
3125 		break;
3126 
3127 	case HCI_OP_SETUP_SYNC_CONN:
3128 		hci_cs_setup_sync_conn(hdev, ev->status);
3129 		break;
3130 
3131 	case HCI_OP_SNIFF_MODE:
3132 		hci_cs_sniff_mode(hdev, ev->status);
3133 		break;
3134 
3135 	case HCI_OP_EXIT_SNIFF_MODE:
3136 		hci_cs_exit_sniff_mode(hdev, ev->status);
3137 		break;
3138 
3139 	case HCI_OP_SWITCH_ROLE:
3140 		hci_cs_switch_role(hdev, ev->status);
3141 		break;
3142 
3143 	case HCI_OP_LE_CREATE_CONN:
3144 		hci_cs_le_create_conn(hdev, ev->status);
3145 		break;
3146 
3147 	case HCI_OP_LE_READ_REMOTE_FEATURES:
3148 		hci_cs_le_read_remote_features(hdev, ev->status);
3149 		break;
3150 
3151 	case HCI_OP_LE_START_ENC:
3152 		hci_cs_le_start_enc(hdev, ev->status);
3153 		break;
3154 
3155 	default:
3156 		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3157 		break;
3158 	}
3159 
3160 	if (*opcode != HCI_OP_NOP)
3161 		cancel_delayed_work(&hdev->cmd_timer);
3162 
3163 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3164 		atomic_set(&hdev->cmd_cnt, 1);
3165 
3166 	/* Indicate request completion if the command failed. Also, if
3167 	 * we're not waiting for a special event and we get a success
3168 	 * command status we should try to flag the request as completed
3169 	 * (since for this kind of commands there will not be a command
3170 	 * complete event).
3171 	 */
3172 	if (ev->status ||
3173 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3174 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3175 				     req_complete_skb);
3176 
3177 	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3178 		queue_work(hdev->workqueue, &hdev->cmd_work);
3179 }
3180 
3181 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3182 {
3183 	struct hci_ev_hardware_error *ev = (void *) skb->data;
3184 
3185 	hdev->hw_error_code = ev->code;
3186 
3187 	queue_work(hdev->req_workqueue, &hdev->error_reset);
3188 }
3189 
3190 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3191 {
3192 	struct hci_ev_role_change *ev = (void *) skb->data;
3193 	struct hci_conn *conn;
3194 
3195 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3196 
3197 	hci_dev_lock(hdev);
3198 
3199 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3200 	if (conn) {
3201 		if (!ev->status)
3202 			conn->role = ev->role;
3203 
3204 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3205 
3206 		hci_role_switch_cfm(conn, ev->status, ev->role);
3207 	}
3208 
3209 	hci_dev_unlock(hdev);
3210 }
3211 
3212 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3213 {
3214 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3215 	int i;
3216 
3217 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3218 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3219 		return;
3220 	}
3221 
3222 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3223 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3224 		BT_DBG("%s bad parameters", hdev->name);
3225 		return;
3226 	}
3227 
3228 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3229 
3230 	for (i = 0; i < ev->num_hndl; i++) {
3231 		struct hci_comp_pkts_info *info = &ev->handles[i];
3232 		struct hci_conn *conn;
3233 		__u16  handle, count;
3234 
3235 		handle = __le16_to_cpu(info->handle);
3236 		count  = __le16_to_cpu(info->count);
3237 
3238 		conn = hci_conn_hash_lookup_handle(hdev, handle);
3239 		if (!conn)
3240 			continue;
3241 
3242 		conn->sent -= count;
3243 
3244 		switch (conn->type) {
3245 		case ACL_LINK:
3246 			hdev->acl_cnt += count;
3247 			if (hdev->acl_cnt > hdev->acl_pkts)
3248 				hdev->acl_cnt = hdev->acl_pkts;
3249 			break;
3250 
3251 		case LE_LINK:
3252 			if (hdev->le_pkts) {
3253 				hdev->le_cnt += count;
3254 				if (hdev->le_cnt > hdev->le_pkts)
3255 					hdev->le_cnt = hdev->le_pkts;
3256 			} else {
3257 				hdev->acl_cnt += count;
3258 				if (hdev->acl_cnt > hdev->acl_pkts)
3259 					hdev->acl_cnt = hdev->acl_pkts;
3260 			}
3261 			break;
3262 
3263 		case SCO_LINK:
3264 			hdev->sco_cnt += count;
3265 			if (hdev->sco_cnt > hdev->sco_pkts)
3266 				hdev->sco_cnt = hdev->sco_pkts;
3267 			break;
3268 
3269 		default:
3270 			bt_dev_err(hdev, "unknown type %d conn %p",
3271 				   conn->type, conn);
3272 			break;
3273 		}
3274 	}
3275 
3276 	queue_work(hdev->workqueue, &hdev->tx_work);
3277 }
3278 
3279 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3280 						 __u16 handle)
3281 {
3282 	struct hci_chan *chan;
3283 
3284 	switch (hdev->dev_type) {
3285 	case HCI_PRIMARY:
3286 		return hci_conn_hash_lookup_handle(hdev, handle);
3287 	case HCI_AMP:
3288 		chan = hci_chan_lookup_handle(hdev, handle);
3289 		if (chan)
3290 			return chan->conn;
3291 		break;
3292 	default:
3293 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3294 		break;
3295 	}
3296 
3297 	return NULL;
3298 }
3299 
3300 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3301 {
3302 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3303 	int i;
3304 
3305 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3306 		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3307 		return;
3308 	}
3309 
3310 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3311 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3312 		BT_DBG("%s bad parameters", hdev->name);
3313 		return;
3314 	}
3315 
3316 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3317 	       ev->num_hndl);
3318 
3319 	for (i = 0; i < ev->num_hndl; i++) {
3320 		struct hci_comp_blocks_info *info = &ev->handles[i];
3321 		struct hci_conn *conn = NULL;
3322 		__u16  handle, block_count;
3323 
3324 		handle = __le16_to_cpu(info->handle);
3325 		block_count = __le16_to_cpu(info->blocks);
3326 
3327 		conn = __hci_conn_lookup_handle(hdev, handle);
3328 		if (!conn)
3329 			continue;
3330 
3331 		conn->sent -= block_count;
3332 
3333 		switch (conn->type) {
3334 		case ACL_LINK:
3335 		case AMP_LINK:
3336 			hdev->block_cnt += block_count;
3337 			if (hdev->block_cnt > hdev->num_blocks)
3338 				hdev->block_cnt = hdev->num_blocks;
3339 			break;
3340 
3341 		default:
3342 			bt_dev_err(hdev, "unknown type %d conn %p",
3343 				   conn->type, conn);
3344 			break;
3345 		}
3346 	}
3347 
3348 	queue_work(hdev->workqueue, &hdev->tx_work);
3349 }
3350 
3351 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3352 {
3353 	struct hci_ev_mode_change *ev = (void *) skb->data;
3354 	struct hci_conn *conn;
3355 
3356 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3357 
3358 	hci_dev_lock(hdev);
3359 
3360 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3361 	if (conn) {
3362 		conn->mode = ev->mode;
3363 
3364 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3365 					&conn->flags)) {
3366 			if (conn->mode == HCI_CM_ACTIVE)
3367 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3368 			else
3369 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3370 		}
3371 
3372 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3373 			hci_sco_setup(conn, ev->status);
3374 	}
3375 
3376 	hci_dev_unlock(hdev);
3377 }
3378 
3379 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3380 {
3381 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3382 	struct hci_conn *conn;
3383 
3384 	BT_DBG("%s", hdev->name);
3385 
3386 	hci_dev_lock(hdev);
3387 
3388 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3389 	if (!conn)
3390 		goto unlock;
3391 
3392 	if (conn->state == BT_CONNECTED) {
3393 		hci_conn_hold(conn);
3394 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3395 		hci_conn_drop(conn);
3396 	}
3397 
3398 	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3399 	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3400 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3401 			     sizeof(ev->bdaddr), &ev->bdaddr);
3402 	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3403 		u8 secure;
3404 
3405 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3406 			secure = 1;
3407 		else
3408 			secure = 0;
3409 
3410 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3411 	}
3412 
3413 unlock:
3414 	hci_dev_unlock(hdev);
3415 }
3416 
3417 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3418 {
3419 	if (key_type == HCI_LK_CHANGED_COMBINATION)
3420 		return;
3421 
3422 	conn->pin_length = pin_len;
3423 	conn->key_type = key_type;
3424 
3425 	switch (key_type) {
3426 	case HCI_LK_LOCAL_UNIT:
3427 	case HCI_LK_REMOTE_UNIT:
3428 	case HCI_LK_DEBUG_COMBINATION:
3429 		return;
3430 	case HCI_LK_COMBINATION:
3431 		if (pin_len == 16)
3432 			conn->pending_sec_level = BT_SECURITY_HIGH;
3433 		else
3434 			conn->pending_sec_level = BT_SECURITY_MEDIUM;
3435 		break;
3436 	case HCI_LK_UNAUTH_COMBINATION_P192:
3437 	case HCI_LK_UNAUTH_COMBINATION_P256:
3438 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3439 		break;
3440 	case HCI_LK_AUTH_COMBINATION_P192:
3441 		conn->pending_sec_level = BT_SECURITY_HIGH;
3442 		break;
3443 	case HCI_LK_AUTH_COMBINATION_P256:
3444 		conn->pending_sec_level = BT_SECURITY_FIPS;
3445 		break;
3446 	}
3447 }
3448 
3449 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3450 {
3451 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3452 	struct hci_cp_link_key_reply cp;
3453 	struct hci_conn *conn;
3454 	struct link_key *key;
3455 
3456 	BT_DBG("%s", hdev->name);
3457 
3458 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3459 		return;
3460 
3461 	hci_dev_lock(hdev);
3462 
3463 	key = hci_find_link_key(hdev, &ev->bdaddr);
3464 	if (!key) {
3465 		BT_DBG("%s link key not found for %pMR", hdev->name,
3466 		       &ev->bdaddr);
3467 		goto not_found;
3468 	}
3469 
3470 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3471 	       &ev->bdaddr);
3472 
3473 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3474 	if (conn) {
3475 		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3476 
3477 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3478 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3479 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3480 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3481 			goto not_found;
3482 		}
3483 
3484 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3485 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3486 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3487 			BT_DBG("%s ignoring key unauthenticated for high security",
3488 			       hdev->name);
3489 			goto not_found;
3490 		}
3491 
3492 		conn_set_key(conn, key->type, key->pin_len);
3493 	}
3494 
3495 	bacpy(&cp.bdaddr, &ev->bdaddr);
3496 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3497 
3498 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3499 
3500 	hci_dev_unlock(hdev);
3501 
3502 	return;
3503 
3504 not_found:
3505 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3506 	hci_dev_unlock(hdev);
3507 }
3508 
3509 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3510 {
3511 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3512 	struct hci_conn *conn;
3513 	struct link_key *key;
3514 	bool persistent;
3515 	u8 pin_len = 0;
3516 
3517 	BT_DBG("%s", hdev->name);
3518 
3519 	hci_dev_lock(hdev);
3520 
3521 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3522 	if (!conn)
3523 		goto unlock;
3524 
3525 	hci_conn_hold(conn);
3526 	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3527 	hci_conn_drop(conn);
3528 
3529 	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3530 	conn_set_key(conn, ev->key_type, conn->pin_length);
3531 
3532 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3533 		goto unlock;
3534 
3535 	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3536 			        ev->key_type, pin_len, &persistent);
3537 	if (!key)
3538 		goto unlock;
3539 
3540 	/* Update connection information since adding the key will have
3541 	 * fixed up the type in the case of changed combination keys.
3542 	 */
3543 	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3544 		conn_set_key(conn, key->type, key->pin_len);
3545 
3546 	mgmt_new_link_key(hdev, key, persistent);
3547 
3548 	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3549 	 * is set. If it's not set simply remove the key from the kernel
3550 	 * list (we've still notified user space about it but with
3551 	 * store_hint being 0).
3552 	 */
3553 	if (key->type == HCI_LK_DEBUG_COMBINATION &&
3554 	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3555 		list_del_rcu(&key->list);
3556 		kfree_rcu(key, rcu);
3557 		goto unlock;
3558 	}
3559 
3560 	if (persistent)
3561 		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3562 	else
3563 		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3564 
3565 unlock:
3566 	hci_dev_unlock(hdev);
3567 }
3568 
3569 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3570 {
3571 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3572 	struct hci_conn *conn;
3573 
3574 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3575 
3576 	hci_dev_lock(hdev);
3577 
3578 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3579 	if (conn && !ev->status) {
3580 		struct inquiry_entry *ie;
3581 
3582 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3583 		if (ie) {
3584 			ie->data.clock_offset = ev->clock_offset;
3585 			ie->timestamp = jiffies;
3586 		}
3587 	}
3588 
3589 	hci_dev_unlock(hdev);
3590 }
3591 
3592 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3593 {
3594 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3595 	struct hci_conn *conn;
3596 
3597 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3598 
3599 	hci_dev_lock(hdev);
3600 
3601 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3602 	if (conn && !ev->status)
3603 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3604 
3605 	hci_dev_unlock(hdev);
3606 }
3607 
3608 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3609 {
3610 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3611 	struct inquiry_entry *ie;
3612 
3613 	BT_DBG("%s", hdev->name);
3614 
3615 	hci_dev_lock(hdev);
3616 
3617 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3618 	if (ie) {
3619 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3620 		ie->timestamp = jiffies;
3621 	}
3622 
3623 	hci_dev_unlock(hdev);
3624 }
3625 
3626 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3627 					     struct sk_buff *skb)
3628 {
3629 	struct inquiry_data data;
3630 	int num_rsp = *((__u8 *) skb->data);
3631 
3632 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3633 
3634 	if (!num_rsp)
3635 		return;
3636 
3637 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3638 		return;
3639 
3640 	hci_dev_lock(hdev);
3641 
3642 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3643 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3644 		info = (void *) (skb->data + 1);
3645 
3646 		for (; num_rsp; num_rsp--, info++) {
3647 			u32 flags;
3648 
3649 			bacpy(&data.bdaddr, &info->bdaddr);
3650 			data.pscan_rep_mode	= info->pscan_rep_mode;
3651 			data.pscan_period_mode	= info->pscan_period_mode;
3652 			data.pscan_mode		= info->pscan_mode;
3653 			memcpy(data.dev_class, info->dev_class, 3);
3654 			data.clock_offset	= info->clock_offset;
3655 			data.rssi		= info->rssi;
3656 			data.ssp_mode		= 0x00;
3657 
3658 			flags = hci_inquiry_cache_update(hdev, &data, false);
3659 
3660 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3661 					  info->dev_class, info->rssi,
3662 					  flags, NULL, 0, NULL, 0);
3663 		}
3664 	} else {
3665 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3666 
3667 		for (; num_rsp; num_rsp--, info++) {
3668 			u32 flags;
3669 
3670 			bacpy(&data.bdaddr, &info->bdaddr);
3671 			data.pscan_rep_mode	= info->pscan_rep_mode;
3672 			data.pscan_period_mode	= info->pscan_period_mode;
3673 			data.pscan_mode		= 0x00;
3674 			memcpy(data.dev_class, info->dev_class, 3);
3675 			data.clock_offset	= info->clock_offset;
3676 			data.rssi		= info->rssi;
3677 			data.ssp_mode		= 0x00;
3678 
3679 			flags = hci_inquiry_cache_update(hdev, &data, false);
3680 
3681 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3682 					  info->dev_class, info->rssi,
3683 					  flags, NULL, 0, NULL, 0);
3684 		}
3685 	}
3686 
3687 	hci_dev_unlock(hdev);
3688 }
3689 
3690 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3691 					struct sk_buff *skb)
3692 {
3693 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3694 	struct hci_conn *conn;
3695 
3696 	BT_DBG("%s", hdev->name);
3697 
3698 	hci_dev_lock(hdev);
3699 
3700 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3701 	if (!conn)
3702 		goto unlock;
3703 
3704 	if (ev->page < HCI_MAX_PAGES)
3705 		memcpy(conn->features[ev->page], ev->features, 8);
3706 
3707 	if (!ev->status && ev->page == 0x01) {
3708 		struct inquiry_entry *ie;
3709 
3710 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3711 		if (ie)
3712 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3713 
3714 		if (ev->features[0] & LMP_HOST_SSP) {
3715 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3716 		} else {
3717 			/* It is mandatory by the Bluetooth specification that
3718 			 * Extended Inquiry Results are only used when Secure
3719 			 * Simple Pairing is enabled, but some devices violate
3720 			 * this.
3721 			 *
3722 			 * To make these devices work, the internal SSP
3723 			 * enabled flag needs to be cleared if the remote host
3724 			 * features do not indicate SSP support */
3725 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3726 		}
3727 
3728 		if (ev->features[0] & LMP_HOST_SC)
3729 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3730 	}
3731 
3732 	if (conn->state != BT_CONFIG)
3733 		goto unlock;
3734 
3735 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3736 		struct hci_cp_remote_name_req cp;
3737 		memset(&cp, 0, sizeof(cp));
3738 		bacpy(&cp.bdaddr, &conn->dst);
3739 		cp.pscan_rep_mode = 0x02;
3740 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3741 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3742 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
3743 
3744 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3745 		conn->state = BT_CONNECTED;
3746 		hci_connect_cfm(conn, ev->status);
3747 		hci_conn_drop(conn);
3748 	}
3749 
3750 unlock:
3751 	hci_dev_unlock(hdev);
3752 }
3753 
3754 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3755 				       struct sk_buff *skb)
3756 {
3757 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3758 	struct hci_conn *conn;
3759 
3760 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3761 
3762 	hci_dev_lock(hdev);
3763 
3764 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3765 	if (!conn) {
3766 		if (ev->link_type == ESCO_LINK)
3767 			goto unlock;
3768 
3769 		/* When the link type in the event indicates SCO connection
3770 		 * and lookup of the connection object fails, then check
3771 		 * if an eSCO connection object exists.
3772 		 *
3773 		 * The core limits the synchronous connections to either
3774 		 * SCO or eSCO. The eSCO connection is preferred and tried
3775 		 * to be setup first and until successfully established,
3776 		 * the link type will be hinted as eSCO.
3777 		 */
3778 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3779 		if (!conn)
3780 			goto unlock;
3781 	}
3782 
3783 	switch (ev->status) {
3784 	case 0x00:
3785 		conn->handle = __le16_to_cpu(ev->handle);
3786 		conn->state  = BT_CONNECTED;
3787 		conn->type   = ev->link_type;
3788 
3789 		hci_debugfs_create_conn(conn);
3790 		hci_conn_add_sysfs(conn);
3791 		break;
3792 
3793 	case 0x10:	/* Connection Accept Timeout */
3794 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3795 	case 0x11:	/* Unsupported Feature or Parameter Value */
3796 	case 0x1c:	/* SCO interval rejected */
3797 	case 0x1a:	/* Unsupported Remote Feature */
3798 	case 0x1f:	/* Unspecified error */
3799 	case 0x20:	/* Unsupported LMP Parameter value */
3800 		if (conn->out) {
3801 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3802 					(hdev->esco_type & EDR_ESCO_MASK);
3803 			if (hci_setup_sync(conn, conn->link->handle))
3804 				goto unlock;
3805 		}
3806 		/* fall through */
3807 
3808 	default:
3809 		conn->state = BT_CLOSED;
3810 		break;
3811 	}
3812 
3813 	hci_connect_cfm(conn, ev->status);
3814 	if (ev->status)
3815 		hci_conn_del(conn);
3816 
3817 unlock:
3818 	hci_dev_unlock(hdev);
3819 }
3820 
3821 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3822 {
3823 	size_t parsed = 0;
3824 
3825 	while (parsed < eir_len) {
3826 		u8 field_len = eir[0];
3827 
3828 		if (field_len == 0)
3829 			return parsed;
3830 
3831 		parsed += field_len + 1;
3832 		eir += field_len + 1;
3833 	}
3834 
3835 	return eir_len;
3836 }
3837 
3838 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3839 					    struct sk_buff *skb)
3840 {
3841 	struct inquiry_data data;
3842 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3843 	int num_rsp = *((__u8 *) skb->data);
3844 	size_t eir_len;
3845 
3846 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3847 
3848 	if (!num_rsp)
3849 		return;
3850 
3851 	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3852 		return;
3853 
3854 	hci_dev_lock(hdev);
3855 
3856 	for (; num_rsp; num_rsp--, info++) {
3857 		u32 flags;
3858 		bool name_known;
3859 
3860 		bacpy(&data.bdaddr, &info->bdaddr);
3861 		data.pscan_rep_mode	= info->pscan_rep_mode;
3862 		data.pscan_period_mode	= info->pscan_period_mode;
3863 		data.pscan_mode		= 0x00;
3864 		memcpy(data.dev_class, info->dev_class, 3);
3865 		data.clock_offset	= info->clock_offset;
3866 		data.rssi		= info->rssi;
3867 		data.ssp_mode		= 0x01;
3868 
3869 		if (hci_dev_test_flag(hdev, HCI_MGMT))
3870 			name_known = eir_get_data(info->data,
3871 						  sizeof(info->data),
3872 						  EIR_NAME_COMPLETE, NULL);
3873 		else
3874 			name_known = true;
3875 
3876 		flags = hci_inquiry_cache_update(hdev, &data, name_known);
3877 
3878 		eir_len = eir_get_length(info->data, sizeof(info->data));
3879 
3880 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3881 				  info->dev_class, info->rssi,
3882 				  flags, info->data, eir_len, NULL, 0);
3883 	}
3884 
3885 	hci_dev_unlock(hdev);
3886 }
3887 
3888 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3889 					 struct sk_buff *skb)
3890 {
3891 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3892 	struct hci_conn *conn;
3893 
3894 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3895 	       __le16_to_cpu(ev->handle));
3896 
3897 	hci_dev_lock(hdev);
3898 
3899 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3900 	if (!conn)
3901 		goto unlock;
3902 
3903 	/* For BR/EDR the necessary steps are taken through the
3904 	 * auth_complete event.
3905 	 */
3906 	if (conn->type != LE_LINK)
3907 		goto unlock;
3908 
3909 	if (!ev->status)
3910 		conn->sec_level = conn->pending_sec_level;
3911 
3912 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3913 
3914 	if (ev->status && conn->state == BT_CONNECTED) {
3915 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3916 		hci_conn_drop(conn);
3917 		goto unlock;
3918 	}
3919 
3920 	if (conn->state == BT_CONFIG) {
3921 		if (!ev->status)
3922 			conn->state = BT_CONNECTED;
3923 
3924 		hci_connect_cfm(conn, ev->status);
3925 		hci_conn_drop(conn);
3926 	} else {
3927 		hci_auth_cfm(conn, ev->status);
3928 
3929 		hci_conn_hold(conn);
3930 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3931 		hci_conn_drop(conn);
3932 	}
3933 
3934 unlock:
3935 	hci_dev_unlock(hdev);
3936 }
3937 
3938 static u8 hci_get_auth_req(struct hci_conn *conn)
3939 {
3940 	/* If remote requests no-bonding follow that lead */
3941 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3942 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3943 		return conn->remote_auth | (conn->auth_type & 0x01);
3944 
3945 	/* If both remote and local have enough IO capabilities, require
3946 	 * MITM protection
3947 	 */
3948 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3949 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3950 		return conn->remote_auth | 0x01;
3951 
3952 	/* No MITM protection possible so ignore remote requirement */
3953 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3954 }
3955 
3956 static u8 bredr_oob_data_present(struct hci_conn *conn)
3957 {
3958 	struct hci_dev *hdev = conn->hdev;
3959 	struct oob_data *data;
3960 
3961 	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3962 	if (!data)
3963 		return 0x00;
3964 
3965 	if (bredr_sc_enabled(hdev)) {
3966 		/* When Secure Connections is enabled, then just
3967 		 * return the present value stored with the OOB
3968 		 * data. The stored value contains the right present
3969 		 * information. However it can only be trusted when
3970 		 * not in Secure Connection Only mode.
3971 		 */
3972 		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3973 			return data->present;
3974 
3975 		/* When Secure Connections Only mode is enabled, then
3976 		 * the P-256 values are required. If they are not
3977 		 * available, then do not declare that OOB data is
3978 		 * present.
3979 		 */
3980 		if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3981 		    !memcmp(data->hash256, ZERO_KEY, 16))
3982 			return 0x00;
3983 
3984 		return 0x02;
3985 	}
3986 
3987 	/* When Secure Connections is not enabled or actually
3988 	 * not supported by the hardware, then check that if
3989 	 * P-192 data values are present.
3990 	 */
3991 	if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3992 	    !memcmp(data->hash192, ZERO_KEY, 16))
3993 		return 0x00;
3994 
3995 	return 0x01;
3996 }
3997 
3998 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3999 {
4000 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
4001 	struct hci_conn *conn;
4002 
4003 	BT_DBG("%s", hdev->name);
4004 
4005 	hci_dev_lock(hdev);
4006 
4007 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4008 	if (!conn)
4009 		goto unlock;
4010 
4011 	hci_conn_hold(conn);
4012 
4013 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4014 		goto unlock;
4015 
4016 	/* Allow pairing if we're pairable, the initiators of the
4017 	 * pairing or if the remote is not requesting bonding.
4018 	 */
4019 	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4020 	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4021 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4022 		struct hci_cp_io_capability_reply cp;
4023 
4024 		bacpy(&cp.bdaddr, &ev->bdaddr);
4025 		/* Change the IO capability from KeyboardDisplay
4026 		 * to DisplayYesNo as it is not supported by BT spec. */
4027 		cp.capability = (conn->io_capability == 0x04) ?
4028 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
4029 
4030 		/* If we are initiators, there is no remote information yet */
4031 		if (conn->remote_auth == 0xff) {
4032 			/* Request MITM protection if our IO caps allow it
4033 			 * except for the no-bonding case.
4034 			 */
4035 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4036 			    conn->auth_type != HCI_AT_NO_BONDING)
4037 				conn->auth_type |= 0x01;
4038 		} else {
4039 			conn->auth_type = hci_get_auth_req(conn);
4040 		}
4041 
4042 		/* If we're not bondable, force one of the non-bondable
4043 		 * authentication requirement values.
4044 		 */
4045 		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4046 			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4047 
4048 		cp.authentication = conn->auth_type;
4049 		cp.oob_data = bredr_oob_data_present(conn);
4050 
4051 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4052 			     sizeof(cp), &cp);
4053 	} else {
4054 		struct hci_cp_io_capability_neg_reply cp;
4055 
4056 		bacpy(&cp.bdaddr, &ev->bdaddr);
4057 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4058 
4059 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4060 			     sizeof(cp), &cp);
4061 	}
4062 
4063 unlock:
4064 	hci_dev_unlock(hdev);
4065 }
4066 
4067 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4068 {
4069 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4070 	struct hci_conn *conn;
4071 
4072 	BT_DBG("%s", hdev->name);
4073 
4074 	hci_dev_lock(hdev);
4075 
4076 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4077 	if (!conn)
4078 		goto unlock;
4079 
4080 	conn->remote_cap = ev->capability;
4081 	conn->remote_auth = ev->authentication;
4082 
4083 unlock:
4084 	hci_dev_unlock(hdev);
4085 }
4086 
4087 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4088 					 struct sk_buff *skb)
4089 {
4090 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4091 	int loc_mitm, rem_mitm, confirm_hint = 0;
4092 	struct hci_conn *conn;
4093 
4094 	BT_DBG("%s", hdev->name);
4095 
4096 	hci_dev_lock(hdev);
4097 
4098 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4099 		goto unlock;
4100 
4101 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4102 	if (!conn)
4103 		goto unlock;
4104 
4105 	loc_mitm = (conn->auth_type & 0x01);
4106 	rem_mitm = (conn->remote_auth & 0x01);
4107 
4108 	/* If we require MITM but the remote device can't provide that
4109 	 * (it has NoInputNoOutput) then reject the confirmation
4110 	 * request. We check the security level here since it doesn't
4111 	 * necessarily match conn->auth_type.
4112 	 */
4113 	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4114 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4115 		BT_DBG("Rejecting request: remote device can't provide MITM");
4116 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4117 			     sizeof(ev->bdaddr), &ev->bdaddr);
4118 		goto unlock;
4119 	}
4120 
4121 	/* If no side requires MITM protection; auto-accept */
4122 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4123 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4124 
4125 		/* If we're not the initiators request authorization to
4126 		 * proceed from user space (mgmt_user_confirm with
4127 		 * confirm_hint set to 1). The exception is if neither
4128 		 * side had MITM or if the local IO capability is
4129 		 * NoInputNoOutput, in which case we do auto-accept
4130 		 */
4131 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4132 		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4133 		    (loc_mitm || rem_mitm)) {
4134 			BT_DBG("Confirming auto-accept as acceptor");
4135 			confirm_hint = 1;
4136 			goto confirm;
4137 		}
4138 
4139 		BT_DBG("Auto-accept of user confirmation with %ums delay",
4140 		       hdev->auto_accept_delay);
4141 
4142 		if (hdev->auto_accept_delay > 0) {
4143 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4144 			queue_delayed_work(conn->hdev->workqueue,
4145 					   &conn->auto_accept_work, delay);
4146 			goto unlock;
4147 		}
4148 
4149 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4150 			     sizeof(ev->bdaddr), &ev->bdaddr);
4151 		goto unlock;
4152 	}
4153 
4154 confirm:
4155 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4156 				  le32_to_cpu(ev->passkey), confirm_hint);
4157 
4158 unlock:
4159 	hci_dev_unlock(hdev);
4160 }
4161 
4162 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4163 					 struct sk_buff *skb)
4164 {
4165 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4166 
4167 	BT_DBG("%s", hdev->name);
4168 
4169 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4170 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4171 }
4172 
4173 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4174 					struct sk_buff *skb)
4175 {
4176 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4177 	struct hci_conn *conn;
4178 
4179 	BT_DBG("%s", hdev->name);
4180 
4181 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4182 	if (!conn)
4183 		return;
4184 
4185 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
4186 	conn->passkey_entered = 0;
4187 
4188 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4189 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4190 					 conn->dst_type, conn->passkey_notify,
4191 					 conn->passkey_entered);
4192 }
4193 
4194 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4195 {
4196 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
4197 	struct hci_conn *conn;
4198 
4199 	BT_DBG("%s", hdev->name);
4200 
4201 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4202 	if (!conn)
4203 		return;
4204 
4205 	switch (ev->type) {
4206 	case HCI_KEYPRESS_STARTED:
4207 		conn->passkey_entered = 0;
4208 		return;
4209 
4210 	case HCI_KEYPRESS_ENTERED:
4211 		conn->passkey_entered++;
4212 		break;
4213 
4214 	case HCI_KEYPRESS_ERASED:
4215 		conn->passkey_entered--;
4216 		break;
4217 
4218 	case HCI_KEYPRESS_CLEARED:
4219 		conn->passkey_entered = 0;
4220 		break;
4221 
4222 	case HCI_KEYPRESS_COMPLETED:
4223 		return;
4224 	}
4225 
4226 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4227 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4228 					 conn->dst_type, conn->passkey_notify,
4229 					 conn->passkey_entered);
4230 }
4231 
4232 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4233 					 struct sk_buff *skb)
4234 {
4235 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4236 	struct hci_conn *conn;
4237 
4238 	BT_DBG("%s", hdev->name);
4239 
4240 	hci_dev_lock(hdev);
4241 
4242 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4243 	if (!conn)
4244 		goto unlock;
4245 
4246 	/* Reset the authentication requirement to unknown */
4247 	conn->remote_auth = 0xff;
4248 
4249 	/* To avoid duplicate auth_failed events to user space we check
4250 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
4251 	 * initiated the authentication. A traditional auth_complete
4252 	 * event gets always produced as initiator and is also mapped to
4253 	 * the mgmt_auth_failed event */
4254 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4255 		mgmt_auth_failed(conn, ev->status);
4256 
4257 	hci_conn_drop(conn);
4258 
4259 unlock:
4260 	hci_dev_unlock(hdev);
4261 }
4262 
4263 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4264 					 struct sk_buff *skb)
4265 {
4266 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
4267 	struct inquiry_entry *ie;
4268 	struct hci_conn *conn;
4269 
4270 	BT_DBG("%s", hdev->name);
4271 
4272 	hci_dev_lock(hdev);
4273 
4274 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4275 	if (conn)
4276 		memcpy(conn->features[1], ev->features, 8);
4277 
4278 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4279 	if (ie)
4280 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4281 
4282 	hci_dev_unlock(hdev);
4283 }
4284 
4285 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4286 					    struct sk_buff *skb)
4287 {
4288 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4289 	struct oob_data *data;
4290 
4291 	BT_DBG("%s", hdev->name);
4292 
4293 	hci_dev_lock(hdev);
4294 
4295 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4296 		goto unlock;
4297 
4298 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4299 	if (!data) {
4300 		struct hci_cp_remote_oob_data_neg_reply cp;
4301 
4302 		bacpy(&cp.bdaddr, &ev->bdaddr);
4303 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4304 			     sizeof(cp), &cp);
4305 		goto unlock;
4306 	}
4307 
4308 	if (bredr_sc_enabled(hdev)) {
4309 		struct hci_cp_remote_oob_ext_data_reply cp;
4310 
4311 		bacpy(&cp.bdaddr, &ev->bdaddr);
4312 		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4313 			memset(cp.hash192, 0, sizeof(cp.hash192));
4314 			memset(cp.rand192, 0, sizeof(cp.rand192));
4315 		} else {
4316 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4317 			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4318 		}
4319 		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4320 		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4321 
4322 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4323 			     sizeof(cp), &cp);
4324 	} else {
4325 		struct hci_cp_remote_oob_data_reply cp;
4326 
4327 		bacpy(&cp.bdaddr, &ev->bdaddr);
4328 		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4329 		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4330 
4331 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4332 			     sizeof(cp), &cp);
4333 	}
4334 
4335 unlock:
4336 	hci_dev_unlock(hdev);
4337 }
4338 
4339 #if IS_ENABLED(CONFIG_BT_HS)
4340 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4341 {
4342 	struct hci_ev_channel_selected *ev = (void *)skb->data;
4343 	struct hci_conn *hcon;
4344 
4345 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4346 
4347 	skb_pull(skb, sizeof(*ev));
4348 
4349 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4350 	if (!hcon)
4351 		return;
4352 
4353 	amp_read_loc_assoc_final_data(hdev, hcon);
4354 }
4355 
4356 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4357 				      struct sk_buff *skb)
4358 {
4359 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4360 	struct hci_conn *hcon, *bredr_hcon;
4361 
4362 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4363 	       ev->status);
4364 
4365 	hci_dev_lock(hdev);
4366 
4367 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4368 	if (!hcon) {
4369 		hci_dev_unlock(hdev);
4370 		return;
4371 	}
4372 
4373 	if (ev->status) {
4374 		hci_conn_del(hcon);
4375 		hci_dev_unlock(hdev);
4376 		return;
4377 	}
4378 
4379 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4380 
4381 	hcon->state = BT_CONNECTED;
4382 	bacpy(&hcon->dst, &bredr_hcon->dst);
4383 
4384 	hci_conn_hold(hcon);
4385 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4386 	hci_conn_drop(hcon);
4387 
4388 	hci_debugfs_create_conn(hcon);
4389 	hci_conn_add_sysfs(hcon);
4390 
4391 	amp_physical_cfm(bredr_hcon, hcon);
4392 
4393 	hci_dev_unlock(hdev);
4394 }
4395 
4396 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4397 {
4398 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4399 	struct hci_conn *hcon;
4400 	struct hci_chan *hchan;
4401 	struct amp_mgr *mgr;
4402 
4403 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4404 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4405 	       ev->status);
4406 
4407 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4408 	if (!hcon)
4409 		return;
4410 
4411 	/* Create AMP hchan */
4412 	hchan = hci_chan_create(hcon);
4413 	if (!hchan)
4414 		return;
4415 
4416 	hchan->handle = le16_to_cpu(ev->handle);
4417 
4418 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4419 
4420 	mgr = hcon->amp_mgr;
4421 	if (mgr && mgr->bredr_chan) {
4422 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4423 
4424 		l2cap_chan_lock(bredr_chan);
4425 
4426 		bredr_chan->conn->mtu = hdev->block_mtu;
4427 		l2cap_logical_cfm(bredr_chan, hchan, 0);
4428 		hci_conn_hold(hcon);
4429 
4430 		l2cap_chan_unlock(bredr_chan);
4431 	}
4432 }
4433 
4434 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4435 					     struct sk_buff *skb)
4436 {
4437 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4438 	struct hci_chan *hchan;
4439 
4440 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4441 	       le16_to_cpu(ev->handle), ev->status);
4442 
4443 	if (ev->status)
4444 		return;
4445 
4446 	hci_dev_lock(hdev);
4447 
4448 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4449 	if (!hchan)
4450 		goto unlock;
4451 
4452 	amp_destroy_logical_link(hchan, ev->reason);
4453 
4454 unlock:
4455 	hci_dev_unlock(hdev);
4456 }
4457 
4458 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4459 					     struct sk_buff *skb)
4460 {
4461 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4462 	struct hci_conn *hcon;
4463 
4464 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4465 
4466 	if (ev->status)
4467 		return;
4468 
4469 	hci_dev_lock(hdev);
4470 
4471 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4472 	if (hcon) {
4473 		hcon->state = BT_CLOSED;
4474 		hci_conn_del(hcon);
4475 	}
4476 
4477 	hci_dev_unlock(hdev);
4478 }
4479 #endif
4480 
4481 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4482 {
4483 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4484 	struct hci_conn_params *params;
4485 	struct hci_conn *conn;
4486 	struct smp_irk *irk;
4487 	u8 addr_type;
4488 
4489 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4490 
4491 	hci_dev_lock(hdev);
4492 
4493 	/* All controllers implicitly stop advertising in the event of a
4494 	 * connection, so ensure that the state bit is cleared.
4495 	 */
4496 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
4497 
4498 	conn = hci_lookup_le_connect(hdev);
4499 	if (!conn) {
4500 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4501 		if (!conn) {
4502 			bt_dev_err(hdev, "no memory for new connection");
4503 			goto unlock;
4504 		}
4505 
4506 		conn->dst_type = ev->bdaddr_type;
4507 
4508 		/* If we didn't have a hci_conn object previously
4509 		 * but we're in master role this must be something
4510 		 * initiated using a white list. Since white list based
4511 		 * connections are not "first class citizens" we don't
4512 		 * have full tracking of them. Therefore, we go ahead
4513 		 * with a "best effort" approach of determining the
4514 		 * initiator address based on the HCI_PRIVACY flag.
4515 		 */
4516 		if (conn->out) {
4517 			conn->resp_addr_type = ev->bdaddr_type;
4518 			bacpy(&conn->resp_addr, &ev->bdaddr);
4519 			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4520 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4521 				bacpy(&conn->init_addr, &hdev->rpa);
4522 			} else {
4523 				hci_copy_identity_address(hdev,
4524 							  &conn->init_addr,
4525 							  &conn->init_addr_type);
4526 			}
4527 		}
4528 	} else {
4529 		cancel_delayed_work(&conn->le_conn_timeout);
4530 	}
4531 
4532 	if (!conn->out) {
4533 		/* Set the responder (our side) address type based on
4534 		 * the advertising address type.
4535 		 */
4536 		conn->resp_addr_type = hdev->adv_addr_type;
4537 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4538 			bacpy(&conn->resp_addr, &hdev->random_addr);
4539 		else
4540 			bacpy(&conn->resp_addr, &hdev->bdaddr);
4541 
4542 		conn->init_addr_type = ev->bdaddr_type;
4543 		bacpy(&conn->init_addr, &ev->bdaddr);
4544 
4545 		/* For incoming connections, set the default minimum
4546 		 * and maximum connection interval. They will be used
4547 		 * to check if the parameters are in range and if not
4548 		 * trigger the connection update procedure.
4549 		 */
4550 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
4551 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
4552 	}
4553 
4554 	/* Lookup the identity address from the stored connection
4555 	 * address and address type.
4556 	 *
4557 	 * When establishing connections to an identity address, the
4558 	 * connection procedure will store the resolvable random
4559 	 * address first. Now if it can be converted back into the
4560 	 * identity address, start using the identity address from
4561 	 * now on.
4562 	 */
4563 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4564 	if (irk) {
4565 		bacpy(&conn->dst, &irk->bdaddr);
4566 		conn->dst_type = irk->addr_type;
4567 	}
4568 
4569 	if (ev->status) {
4570 		hci_le_conn_failed(conn, ev->status);
4571 		goto unlock;
4572 	}
4573 
4574 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4575 		addr_type = BDADDR_LE_PUBLIC;
4576 	else
4577 		addr_type = BDADDR_LE_RANDOM;
4578 
4579 	/* Drop the connection if the device is blocked */
4580 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4581 		hci_conn_drop(conn);
4582 		goto unlock;
4583 	}
4584 
4585 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4586 		mgmt_device_connected(hdev, conn, 0, NULL, 0);
4587 
4588 	conn->sec_level = BT_SECURITY_LOW;
4589 	conn->handle = __le16_to_cpu(ev->handle);
4590 	conn->state = BT_CONFIG;
4591 
4592 	conn->le_conn_interval = le16_to_cpu(ev->interval);
4593 	conn->le_conn_latency = le16_to_cpu(ev->latency);
4594 	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4595 
4596 	hci_debugfs_create_conn(conn);
4597 	hci_conn_add_sysfs(conn);
4598 
4599 	if (!ev->status) {
4600 		/* The remote features procedure is defined for master
4601 		 * role only. So only in case of an initiated connection
4602 		 * request the remote features.
4603 		 *
4604 		 * If the local controller supports slave-initiated features
4605 		 * exchange, then requesting the remote features in slave
4606 		 * role is possible. Otherwise just transition into the
4607 		 * connected state without requesting the remote features.
4608 		 */
4609 		if (conn->out ||
4610 		    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4611 			struct hci_cp_le_read_remote_features cp;
4612 
4613 			cp.handle = __cpu_to_le16(conn->handle);
4614 
4615 			hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4616 				     sizeof(cp), &cp);
4617 
4618 			hci_conn_hold(conn);
4619 		} else {
4620 			conn->state = BT_CONNECTED;
4621 			hci_connect_cfm(conn, ev->status);
4622 		}
4623 	} else {
4624 		hci_connect_cfm(conn, ev->status);
4625 	}
4626 
4627 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4628 					   conn->dst_type);
4629 	if (params) {
4630 		list_del_init(&params->action);
4631 		if (params->conn) {
4632 			hci_conn_drop(params->conn);
4633 			hci_conn_put(params->conn);
4634 			params->conn = NULL;
4635 		}
4636 	}
4637 
4638 unlock:
4639 	hci_update_background_scan(hdev);
4640 	hci_dev_unlock(hdev);
4641 }
4642 
4643 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4644 					    struct sk_buff *skb)
4645 {
4646 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4647 	struct hci_conn *conn;
4648 
4649 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4650 
4651 	if (ev->status)
4652 		return;
4653 
4654 	hci_dev_lock(hdev);
4655 
4656 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4657 	if (conn) {
4658 		conn->le_conn_interval = le16_to_cpu(ev->interval);
4659 		conn->le_conn_latency = le16_to_cpu(ev->latency);
4660 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4661 	}
4662 
4663 	hci_dev_unlock(hdev);
4664 }
4665 
4666 /* This function requires the caller holds hdev->lock */
4667 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4668 					      bdaddr_t *addr,
4669 					      u8 addr_type, u8 adv_type,
4670 					      bdaddr_t *direct_rpa)
4671 {
4672 	struct hci_conn *conn;
4673 	struct hci_conn_params *params;
4674 
4675 	/* If the event is not connectable don't proceed further */
4676 	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4677 		return NULL;
4678 
4679 	/* Ignore if the device is blocked */
4680 	if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4681 		return NULL;
4682 
4683 	/* Most controller will fail if we try to create new connections
4684 	 * while we have an existing one in slave role.
4685 	 */
4686 	if (hdev->conn_hash.le_num_slave > 0)
4687 		return NULL;
4688 
4689 	/* If we're not connectable only connect devices that we have in
4690 	 * our pend_le_conns list.
4691 	 */
4692 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4693 					   addr_type);
4694 	if (!params)
4695 		return NULL;
4696 
4697 	if (!params->explicit_connect) {
4698 		switch (params->auto_connect) {
4699 		case HCI_AUTO_CONN_DIRECT:
4700 			/* Only devices advertising with ADV_DIRECT_IND are
4701 			 * triggering a connection attempt. This is allowing
4702 			 * incoming connections from slave devices.
4703 			 */
4704 			if (adv_type != LE_ADV_DIRECT_IND)
4705 				return NULL;
4706 			break;
4707 		case HCI_AUTO_CONN_ALWAYS:
4708 			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
4709 			 * are triggering a connection attempt. This means
4710 			 * that incoming connectioms from slave device are
4711 			 * accepted and also outgoing connections to slave
4712 			 * devices are established when found.
4713 			 */
4714 			break;
4715 		default:
4716 			return NULL;
4717 		}
4718 	}
4719 
4720 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4721 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
4722 			      direct_rpa);
4723 	if (!IS_ERR(conn)) {
4724 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4725 		 * by higher layer that tried to connect, if no then
4726 		 * store the pointer since we don't really have any
4727 		 * other owner of the object besides the params that
4728 		 * triggered it. This way we can abort the connection if
4729 		 * the parameters get removed and keep the reference
4730 		 * count consistent once the connection is established.
4731 		 */
4732 
4733 		if (!params->explicit_connect)
4734 			params->conn = hci_conn_get(conn);
4735 
4736 		return conn;
4737 	}
4738 
4739 	switch (PTR_ERR(conn)) {
4740 	case -EBUSY:
4741 		/* If hci_connect() returns -EBUSY it means there is already
4742 		 * an LE connection attempt going on. Since controllers don't
4743 		 * support more than one connection attempt at the time, we
4744 		 * don't consider this an error case.
4745 		 */
4746 		break;
4747 	default:
4748 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4749 		return NULL;
4750 	}
4751 
4752 	return NULL;
4753 }
4754 
4755 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4756 			       u8 bdaddr_type, bdaddr_t *direct_addr,
4757 			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4758 {
4759 	struct discovery_state *d = &hdev->discovery;
4760 	struct smp_irk *irk;
4761 	struct hci_conn *conn;
4762 	bool match;
4763 	u32 flags;
4764 	u8 *ptr, real_len;
4765 
4766 	switch (type) {
4767 	case LE_ADV_IND:
4768 	case LE_ADV_DIRECT_IND:
4769 	case LE_ADV_SCAN_IND:
4770 	case LE_ADV_NONCONN_IND:
4771 	case LE_ADV_SCAN_RSP:
4772 		break;
4773 	default:
4774 		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
4775 				       "type: 0x%02x", type);
4776 		return;
4777 	}
4778 
4779 	/* Find the end of the data in case the report contains padded zero
4780 	 * bytes at the end causing an invalid length value.
4781 	 *
4782 	 * When data is NULL, len is 0 so there is no need for extra ptr
4783 	 * check as 'ptr < data + 0' is already false in such case.
4784 	 */
4785 	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4786 		if (ptr + 1 + *ptr > data + len)
4787 			break;
4788 	}
4789 
4790 	real_len = ptr - data;
4791 
4792 	/* Adjust for actual length */
4793 	if (len != real_len) {
4794 		bt_dev_err_ratelimited(hdev, "advertising data len corrected");
4795 		len = real_len;
4796 	}
4797 
4798 	/* If the direct address is present, then this report is from
4799 	 * a LE Direct Advertising Report event. In that case it is
4800 	 * important to see if the address is matching the local
4801 	 * controller address.
4802 	 */
4803 	if (direct_addr) {
4804 		/* Only resolvable random addresses are valid for these
4805 		 * kind of reports and others can be ignored.
4806 		 */
4807 		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4808 			return;
4809 
4810 		/* If the controller is not using resolvable random
4811 		 * addresses, then this report can be ignored.
4812 		 */
4813 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4814 			return;
4815 
4816 		/* If the local IRK of the controller does not match
4817 		 * with the resolvable random address provided, then
4818 		 * this report can be ignored.
4819 		 */
4820 		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4821 			return;
4822 	}
4823 
4824 	/* Check if we need to convert to identity address */
4825 	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4826 	if (irk) {
4827 		bdaddr = &irk->bdaddr;
4828 		bdaddr_type = irk->addr_type;
4829 	}
4830 
4831 	/* Check if we have been requested to connect to this device.
4832 	 *
4833 	 * direct_addr is set only for directed advertising reports (it is NULL
4834 	 * for advertising reports) and is already verified to be RPA above.
4835 	 */
4836 	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
4837 								direct_addr);
4838 	if (conn && type == LE_ADV_IND) {
4839 		/* Store report for later inclusion by
4840 		 * mgmt_device_connected
4841 		 */
4842 		memcpy(conn->le_adv_data, data, len);
4843 		conn->le_adv_data_len = len;
4844 	}
4845 
4846 	/* Passive scanning shouldn't trigger any device found events,
4847 	 * except for devices marked as CONN_REPORT for which we do send
4848 	 * device found events.
4849 	 */
4850 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4851 		if (type == LE_ADV_DIRECT_IND)
4852 			return;
4853 
4854 		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4855 					       bdaddr, bdaddr_type))
4856 			return;
4857 
4858 		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4859 			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4860 		else
4861 			flags = 0;
4862 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4863 				  rssi, flags, data, len, NULL, 0);
4864 		return;
4865 	}
4866 
4867 	/* When receiving non-connectable or scannable undirected
4868 	 * advertising reports, this means that the remote device is
4869 	 * not connectable and then clearly indicate this in the
4870 	 * device found event.
4871 	 *
4872 	 * When receiving a scan response, then there is no way to
4873 	 * know if the remote device is connectable or not. However
4874 	 * since scan responses are merged with a previously seen
4875 	 * advertising report, the flags field from that report
4876 	 * will be used.
4877 	 *
4878 	 * In the really unlikely case that a controller get confused
4879 	 * and just sends a scan response event, then it is marked as
4880 	 * not connectable as well.
4881 	 */
4882 	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4883 	    type == LE_ADV_SCAN_RSP)
4884 		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4885 	else
4886 		flags = 0;
4887 
4888 	/* If there's nothing pending either store the data from this
4889 	 * event or send an immediate device found event if the data
4890 	 * should not be stored for later.
4891 	 */
4892 	if (!has_pending_adv_report(hdev)) {
4893 		/* If the report will trigger a SCAN_REQ store it for
4894 		 * later merging.
4895 		 */
4896 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4897 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4898 						 rssi, flags, data, len);
4899 			return;
4900 		}
4901 
4902 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4903 				  rssi, flags, data, len, NULL, 0);
4904 		return;
4905 	}
4906 
4907 	/* Check if the pending report is for the same device as the new one */
4908 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4909 		 bdaddr_type == d->last_adv_addr_type);
4910 
4911 	/* If the pending data doesn't match this report or this isn't a
4912 	 * scan response (e.g. we got a duplicate ADV_IND) then force
4913 	 * sending of the pending data.
4914 	 */
4915 	if (type != LE_ADV_SCAN_RSP || !match) {
4916 		/* Send out whatever is in the cache, but skip duplicates */
4917 		if (!match)
4918 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4919 					  d->last_adv_addr_type, NULL,
4920 					  d->last_adv_rssi, d->last_adv_flags,
4921 					  d->last_adv_data,
4922 					  d->last_adv_data_len, NULL, 0);
4923 
4924 		/* If the new report will trigger a SCAN_REQ store it for
4925 		 * later merging.
4926 		 */
4927 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4928 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4929 						 rssi, flags, data, len);
4930 			return;
4931 		}
4932 
4933 		/* The advertising reports cannot be merged, so clear
4934 		 * the pending report and send out a device found event.
4935 		 */
4936 		clear_pending_adv_report(hdev);
4937 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4938 				  rssi, flags, data, len, NULL, 0);
4939 		return;
4940 	}
4941 
4942 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4943 	 * the new event is a SCAN_RSP. We can therefore proceed with
4944 	 * sending a merged device found event.
4945 	 */
4946 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4947 			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4948 			  d->last_adv_data, d->last_adv_data_len, data, len);
4949 	clear_pending_adv_report(hdev);
4950 }
4951 
4952 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4953 {
4954 	u8 num_reports = skb->data[0];
4955 	void *ptr = &skb->data[1];
4956 
4957 	hci_dev_lock(hdev);
4958 
4959 	while (num_reports--) {
4960 		struct hci_ev_le_advertising_info *ev = ptr;
4961 		s8 rssi;
4962 
4963 		if (ev->length <= HCI_MAX_AD_LENGTH) {
4964 			rssi = ev->data[ev->length];
4965 			process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4966 					   ev->bdaddr_type, NULL, 0, rssi,
4967 					   ev->data, ev->length);
4968 		} else {
4969 			bt_dev_err(hdev, "Dropping invalid advertising data");
4970 		}
4971 
4972 		ptr += sizeof(*ev) + ev->length + 1;
4973 	}
4974 
4975 	hci_dev_unlock(hdev);
4976 }
4977 
4978 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4979 					    struct sk_buff *skb)
4980 {
4981 	struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4982 	struct hci_conn *conn;
4983 
4984 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4985 
4986 	hci_dev_lock(hdev);
4987 
4988 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4989 	if (conn) {
4990 		if (!ev->status)
4991 			memcpy(conn->features[0], ev->features, 8);
4992 
4993 		if (conn->state == BT_CONFIG) {
4994 			__u8 status;
4995 
4996 			/* If the local controller supports slave-initiated
4997 			 * features exchange, but the remote controller does
4998 			 * not, then it is possible that the error code 0x1a
4999 			 * for unsupported remote feature gets returned.
5000 			 *
5001 			 * In this specific case, allow the connection to
5002 			 * transition into connected state and mark it as
5003 			 * successful.
5004 			 */
5005 			if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5006 			    !conn->out && ev->status == 0x1a)
5007 				status = 0x00;
5008 			else
5009 				status = ev->status;
5010 
5011 			conn->state = BT_CONNECTED;
5012 			hci_connect_cfm(conn, status);
5013 			hci_conn_drop(conn);
5014 		}
5015 	}
5016 
5017 	hci_dev_unlock(hdev);
5018 }
5019 
5020 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5021 {
5022 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5023 	struct hci_cp_le_ltk_reply cp;
5024 	struct hci_cp_le_ltk_neg_reply neg;
5025 	struct hci_conn *conn;
5026 	struct smp_ltk *ltk;
5027 
5028 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5029 
5030 	hci_dev_lock(hdev);
5031 
5032 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5033 	if (conn == NULL)
5034 		goto not_found;
5035 
5036 	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5037 	if (!ltk)
5038 		goto not_found;
5039 
5040 	if (smp_ltk_is_sc(ltk)) {
5041 		/* With SC both EDiv and Rand are set to zero */
5042 		if (ev->ediv || ev->rand)
5043 			goto not_found;
5044 	} else {
5045 		/* For non-SC keys check that EDiv and Rand match */
5046 		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5047 			goto not_found;
5048 	}
5049 
5050 	memcpy(cp.ltk, ltk->val, ltk->enc_size);
5051 	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5052 	cp.handle = cpu_to_le16(conn->handle);
5053 
5054 	conn->pending_sec_level = smp_ltk_sec_level(ltk);
5055 
5056 	conn->enc_key_size = ltk->enc_size;
5057 
5058 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5059 
5060 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5061 	 * temporary key used to encrypt a connection following
5062 	 * pairing. It is used during the Encrypted Session Setup to
5063 	 * distribute the keys. Later, security can be re-established
5064 	 * using a distributed LTK.
5065 	 */
5066 	if (ltk->type == SMP_STK) {
5067 		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5068 		list_del_rcu(&ltk->list);
5069 		kfree_rcu(ltk, rcu);
5070 	} else {
5071 		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5072 	}
5073 
5074 	hci_dev_unlock(hdev);
5075 
5076 	return;
5077 
5078 not_found:
5079 	neg.handle = ev->handle;
5080 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5081 	hci_dev_unlock(hdev);
5082 }
5083 
5084 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5085 				      u8 reason)
5086 {
5087 	struct hci_cp_le_conn_param_req_neg_reply cp;
5088 
5089 	cp.handle = cpu_to_le16(handle);
5090 	cp.reason = reason;
5091 
5092 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5093 		     &cp);
5094 }
5095 
5096 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5097 					     struct sk_buff *skb)
5098 {
5099 	struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5100 	struct hci_cp_le_conn_param_req_reply cp;
5101 	struct hci_conn *hcon;
5102 	u16 handle, min, max, latency, timeout;
5103 
5104 	handle = le16_to_cpu(ev->handle);
5105 	min = le16_to_cpu(ev->interval_min);
5106 	max = le16_to_cpu(ev->interval_max);
5107 	latency = le16_to_cpu(ev->latency);
5108 	timeout = le16_to_cpu(ev->timeout);
5109 
5110 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
5111 	if (!hcon || hcon->state != BT_CONNECTED)
5112 		return send_conn_param_neg_reply(hdev, handle,
5113 						 HCI_ERROR_UNKNOWN_CONN_ID);
5114 
5115 	if (hci_check_conn_params(min, max, latency, timeout))
5116 		return send_conn_param_neg_reply(hdev, handle,
5117 						 HCI_ERROR_INVALID_LL_PARAMS);
5118 
5119 	if (hcon->role == HCI_ROLE_MASTER) {
5120 		struct hci_conn_params *params;
5121 		u8 store_hint;
5122 
5123 		hci_dev_lock(hdev);
5124 
5125 		params = hci_conn_params_lookup(hdev, &hcon->dst,
5126 						hcon->dst_type);
5127 		if (params) {
5128 			params->conn_min_interval = min;
5129 			params->conn_max_interval = max;
5130 			params->conn_latency = latency;
5131 			params->supervision_timeout = timeout;
5132 			store_hint = 0x01;
5133 		} else{
5134 			store_hint = 0x00;
5135 		}
5136 
5137 		hci_dev_unlock(hdev);
5138 
5139 		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5140 				    store_hint, min, max, latency, timeout);
5141 	}
5142 
5143 	cp.handle = ev->handle;
5144 	cp.interval_min = ev->interval_min;
5145 	cp.interval_max = ev->interval_max;
5146 	cp.latency = ev->latency;
5147 	cp.timeout = ev->timeout;
5148 	cp.min_ce_len = 0;
5149 	cp.max_ce_len = 0;
5150 
5151 	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5152 }
5153 
5154 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5155 					 struct sk_buff *skb)
5156 {
5157 	u8 num_reports = skb->data[0];
5158 	void *ptr = &skb->data[1];
5159 
5160 	hci_dev_lock(hdev);
5161 
5162 	while (num_reports--) {
5163 		struct hci_ev_le_direct_adv_info *ev = ptr;
5164 
5165 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5166 				   ev->bdaddr_type, &ev->direct_addr,
5167 				   ev->direct_addr_type, ev->rssi, NULL, 0);
5168 
5169 		ptr += sizeof(*ev);
5170 	}
5171 
5172 	hci_dev_unlock(hdev);
5173 }
5174 
5175 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5176 {
5177 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
5178 
5179 	skb_pull(skb, sizeof(*le_ev));
5180 
5181 	switch (le_ev->subevent) {
5182 	case HCI_EV_LE_CONN_COMPLETE:
5183 		hci_le_conn_complete_evt(hdev, skb);
5184 		break;
5185 
5186 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5187 		hci_le_conn_update_complete_evt(hdev, skb);
5188 		break;
5189 
5190 	case HCI_EV_LE_ADVERTISING_REPORT:
5191 		hci_le_adv_report_evt(hdev, skb);
5192 		break;
5193 
5194 	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5195 		hci_le_remote_feat_complete_evt(hdev, skb);
5196 		break;
5197 
5198 	case HCI_EV_LE_LTK_REQ:
5199 		hci_le_ltk_request_evt(hdev, skb);
5200 		break;
5201 
5202 	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5203 		hci_le_remote_conn_param_req_evt(hdev, skb);
5204 		break;
5205 
5206 	case HCI_EV_LE_DIRECT_ADV_REPORT:
5207 		hci_le_direct_adv_report_evt(hdev, skb);
5208 		break;
5209 
5210 	default:
5211 		break;
5212 	}
5213 }
5214 
5215 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5216 				 u8 event, struct sk_buff *skb)
5217 {
5218 	struct hci_ev_cmd_complete *ev;
5219 	struct hci_event_hdr *hdr;
5220 
5221 	if (!skb)
5222 		return false;
5223 
5224 	if (skb->len < sizeof(*hdr)) {
5225 		bt_dev_err(hdev, "too short HCI event");
5226 		return false;
5227 	}
5228 
5229 	hdr = (void *) skb->data;
5230 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5231 
5232 	if (event) {
5233 		if (hdr->evt != event)
5234 			return false;
5235 		return true;
5236 	}
5237 
5238 	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5239 		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5240 			   hdr->evt);
5241 		return false;
5242 	}
5243 
5244 	if (skb->len < sizeof(*ev)) {
5245 		bt_dev_err(hdev, "too short cmd_complete event");
5246 		return false;
5247 	}
5248 
5249 	ev = (void *) skb->data;
5250 	skb_pull(skb, sizeof(*ev));
5251 
5252 	if (opcode != __le16_to_cpu(ev->opcode)) {
5253 		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5254 		       __le16_to_cpu(ev->opcode));
5255 		return false;
5256 	}
5257 
5258 	return true;
5259 }
5260 
5261 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5262 {
5263 	struct hci_event_hdr *hdr = (void *) skb->data;
5264 	hci_req_complete_t req_complete = NULL;
5265 	hci_req_complete_skb_t req_complete_skb = NULL;
5266 	struct sk_buff *orig_skb = NULL;
5267 	u8 status = 0, event = hdr->evt, req_evt = 0;
5268 	u16 opcode = HCI_OP_NOP;
5269 
5270 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5271 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5272 		opcode = __le16_to_cpu(cmd_hdr->opcode);
5273 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5274 				     &req_complete_skb);
5275 		req_evt = event;
5276 	}
5277 
5278 	/* If it looks like we might end up having to call
5279 	 * req_complete_skb, store a pristine copy of the skb since the
5280 	 * various handlers may modify the original one through
5281 	 * skb_pull() calls, etc.
5282 	 */
5283 	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5284 	    event == HCI_EV_CMD_COMPLETE)
5285 		orig_skb = skb_clone(skb, GFP_KERNEL);
5286 
5287 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
5288 
5289 	switch (event) {
5290 	case HCI_EV_INQUIRY_COMPLETE:
5291 		hci_inquiry_complete_evt(hdev, skb);
5292 		break;
5293 
5294 	case HCI_EV_INQUIRY_RESULT:
5295 		hci_inquiry_result_evt(hdev, skb);
5296 		break;
5297 
5298 	case HCI_EV_CONN_COMPLETE:
5299 		hci_conn_complete_evt(hdev, skb);
5300 		break;
5301 
5302 	case HCI_EV_CONN_REQUEST:
5303 		hci_conn_request_evt(hdev, skb);
5304 		break;
5305 
5306 	case HCI_EV_DISCONN_COMPLETE:
5307 		hci_disconn_complete_evt(hdev, skb);
5308 		break;
5309 
5310 	case HCI_EV_AUTH_COMPLETE:
5311 		hci_auth_complete_evt(hdev, skb);
5312 		break;
5313 
5314 	case HCI_EV_REMOTE_NAME:
5315 		hci_remote_name_evt(hdev, skb);
5316 		break;
5317 
5318 	case HCI_EV_ENCRYPT_CHANGE:
5319 		hci_encrypt_change_evt(hdev, skb);
5320 		break;
5321 
5322 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5323 		hci_change_link_key_complete_evt(hdev, skb);
5324 		break;
5325 
5326 	case HCI_EV_REMOTE_FEATURES:
5327 		hci_remote_features_evt(hdev, skb);
5328 		break;
5329 
5330 	case HCI_EV_CMD_COMPLETE:
5331 		hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5332 				     &req_complete, &req_complete_skb);
5333 		break;
5334 
5335 	case HCI_EV_CMD_STATUS:
5336 		hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5337 				   &req_complete_skb);
5338 		break;
5339 
5340 	case HCI_EV_HARDWARE_ERROR:
5341 		hci_hardware_error_evt(hdev, skb);
5342 		break;
5343 
5344 	case HCI_EV_ROLE_CHANGE:
5345 		hci_role_change_evt(hdev, skb);
5346 		break;
5347 
5348 	case HCI_EV_NUM_COMP_PKTS:
5349 		hci_num_comp_pkts_evt(hdev, skb);
5350 		break;
5351 
5352 	case HCI_EV_MODE_CHANGE:
5353 		hci_mode_change_evt(hdev, skb);
5354 		break;
5355 
5356 	case HCI_EV_PIN_CODE_REQ:
5357 		hci_pin_code_request_evt(hdev, skb);
5358 		break;
5359 
5360 	case HCI_EV_LINK_KEY_REQ:
5361 		hci_link_key_request_evt(hdev, skb);
5362 		break;
5363 
5364 	case HCI_EV_LINK_KEY_NOTIFY:
5365 		hci_link_key_notify_evt(hdev, skb);
5366 		break;
5367 
5368 	case HCI_EV_CLOCK_OFFSET:
5369 		hci_clock_offset_evt(hdev, skb);
5370 		break;
5371 
5372 	case HCI_EV_PKT_TYPE_CHANGE:
5373 		hci_pkt_type_change_evt(hdev, skb);
5374 		break;
5375 
5376 	case HCI_EV_PSCAN_REP_MODE:
5377 		hci_pscan_rep_mode_evt(hdev, skb);
5378 		break;
5379 
5380 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5381 		hci_inquiry_result_with_rssi_evt(hdev, skb);
5382 		break;
5383 
5384 	case HCI_EV_REMOTE_EXT_FEATURES:
5385 		hci_remote_ext_features_evt(hdev, skb);
5386 		break;
5387 
5388 	case HCI_EV_SYNC_CONN_COMPLETE:
5389 		hci_sync_conn_complete_evt(hdev, skb);
5390 		break;
5391 
5392 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
5393 		hci_extended_inquiry_result_evt(hdev, skb);
5394 		break;
5395 
5396 	case HCI_EV_KEY_REFRESH_COMPLETE:
5397 		hci_key_refresh_complete_evt(hdev, skb);
5398 		break;
5399 
5400 	case HCI_EV_IO_CAPA_REQUEST:
5401 		hci_io_capa_request_evt(hdev, skb);
5402 		break;
5403 
5404 	case HCI_EV_IO_CAPA_REPLY:
5405 		hci_io_capa_reply_evt(hdev, skb);
5406 		break;
5407 
5408 	case HCI_EV_USER_CONFIRM_REQUEST:
5409 		hci_user_confirm_request_evt(hdev, skb);
5410 		break;
5411 
5412 	case HCI_EV_USER_PASSKEY_REQUEST:
5413 		hci_user_passkey_request_evt(hdev, skb);
5414 		break;
5415 
5416 	case HCI_EV_USER_PASSKEY_NOTIFY:
5417 		hci_user_passkey_notify_evt(hdev, skb);
5418 		break;
5419 
5420 	case HCI_EV_KEYPRESS_NOTIFY:
5421 		hci_keypress_notify_evt(hdev, skb);
5422 		break;
5423 
5424 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
5425 		hci_simple_pair_complete_evt(hdev, skb);
5426 		break;
5427 
5428 	case HCI_EV_REMOTE_HOST_FEATURES:
5429 		hci_remote_host_features_evt(hdev, skb);
5430 		break;
5431 
5432 	case HCI_EV_LE_META:
5433 		hci_le_meta_evt(hdev, skb);
5434 		break;
5435 
5436 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5437 		hci_remote_oob_data_request_evt(hdev, skb);
5438 		break;
5439 
5440 #if IS_ENABLED(CONFIG_BT_HS)
5441 	case HCI_EV_CHANNEL_SELECTED:
5442 		hci_chan_selected_evt(hdev, skb);
5443 		break;
5444 
5445 	case HCI_EV_PHY_LINK_COMPLETE:
5446 		hci_phy_link_complete_evt(hdev, skb);
5447 		break;
5448 
5449 	case HCI_EV_LOGICAL_LINK_COMPLETE:
5450 		hci_loglink_complete_evt(hdev, skb);
5451 		break;
5452 
5453 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5454 		hci_disconn_loglink_complete_evt(hdev, skb);
5455 		break;
5456 
5457 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5458 		hci_disconn_phylink_complete_evt(hdev, skb);
5459 		break;
5460 #endif
5461 
5462 	case HCI_EV_NUM_COMP_BLOCKS:
5463 		hci_num_comp_blocks_evt(hdev, skb);
5464 		break;
5465 
5466 	default:
5467 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
5468 		break;
5469 	}
5470 
5471 	if (req_complete) {
5472 		req_complete(hdev, status, opcode);
5473 	} else if (req_complete_skb) {
5474 		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5475 			kfree_skb(orig_skb);
5476 			orig_skb = NULL;
5477 		}
5478 		req_complete_skb(hdev, status, opcode, orig_skb);
5479 	}
5480 
5481 	kfree_skb(orig_skb);
5482 	kfree_skb(skb);
5483 	hdev->stat.evt_rx++;
5484 }
5485