xref: /openbmc/linux/net/bluetooth/hci_event.c (revision d2999e1b)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "a2mp.h"
34 #include "amp.h"
35 
36 /* Handle HCI Event packets */
37 
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 	__u8 status = *((__u8 *) skb->data);
41 
42 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
43 
44 	if (status)
45 		return;
46 
47 	clear_bit(HCI_INQUIRY, &hdev->flags);
48 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 
51 	hci_dev_lock(hdev);
52 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 	hci_dev_unlock(hdev);
54 
55 	hci_conn_check_pending(hdev);
56 }
57 
58 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
59 {
60 	__u8 status = *((__u8 *) skb->data);
61 
62 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
63 
64 	if (status)
65 		return;
66 
67 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
68 }
69 
70 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 {
72 	__u8 status = *((__u8 *) skb->data);
73 
74 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
75 
76 	if (status)
77 		return;
78 
79 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
80 
81 	hci_conn_check_pending(hdev);
82 }
83 
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
85 					  struct sk_buff *skb)
86 {
87 	BT_DBG("%s", hdev->name);
88 }
89 
90 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91 {
92 	struct hci_rp_role_discovery *rp = (void *) skb->data;
93 	struct hci_conn *conn;
94 
95 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
96 
97 	if (rp->status)
98 		return;
99 
100 	hci_dev_lock(hdev);
101 
102 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 	if (conn) {
104 		if (rp->role)
105 			conn->link_mode &= ~HCI_LM_MASTER;
106 		else
107 			conn->link_mode |= HCI_LM_MASTER;
108 	}
109 
110 	hci_dev_unlock(hdev);
111 }
112 
113 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 {
115 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
116 	struct hci_conn *conn;
117 
118 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
119 
120 	if (rp->status)
121 		return;
122 
123 	hci_dev_lock(hdev);
124 
125 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 	if (conn)
127 		conn->link_policy = __le16_to_cpu(rp->policy);
128 
129 	hci_dev_unlock(hdev);
130 }
131 
132 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 {
134 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
135 	struct hci_conn *conn;
136 	void *sent;
137 
138 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
139 
140 	if (rp->status)
141 		return;
142 
143 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
144 	if (!sent)
145 		return;
146 
147 	hci_dev_lock(hdev);
148 
149 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 	if (conn)
151 		conn->link_policy = get_unaligned_le16(sent + 2);
152 
153 	hci_dev_unlock(hdev);
154 }
155 
156 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
157 					struct sk_buff *skb)
158 {
159 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
160 
161 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
162 
163 	if (rp->status)
164 		return;
165 
166 	hdev->link_policy = __le16_to_cpu(rp->policy);
167 }
168 
169 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
170 					 struct sk_buff *skb)
171 {
172 	__u8 status = *((__u8 *) skb->data);
173 	void *sent;
174 
175 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
176 
177 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 	if (!sent)
179 		return;
180 
181 	if (!status)
182 		hdev->link_policy = get_unaligned_le16(sent);
183 }
184 
185 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 {
187 	__u8 status = *((__u8 *) skb->data);
188 
189 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
190 
191 	clear_bit(HCI_RESET, &hdev->flags);
192 
193 	/* Reset all non-persistent flags */
194 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
195 
196 	hdev->discovery.state = DISCOVERY_STOPPED;
197 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
198 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
199 
200 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
201 	hdev->adv_data_len = 0;
202 
203 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
204 	hdev->scan_rsp_data_len = 0;
205 
206 	hdev->le_scan_type = LE_SCAN_PASSIVE;
207 
208 	hdev->ssp_debug_mode = 0;
209 }
210 
211 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
212 {
213 	__u8 status = *((__u8 *) skb->data);
214 	void *sent;
215 
216 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
217 
218 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
219 	if (!sent)
220 		return;
221 
222 	hci_dev_lock(hdev);
223 
224 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
225 		mgmt_set_local_name_complete(hdev, sent, status);
226 	else if (!status)
227 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
228 
229 	hci_dev_unlock(hdev);
230 }
231 
232 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
233 {
234 	struct hci_rp_read_local_name *rp = (void *) skb->data;
235 
236 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
237 
238 	if (rp->status)
239 		return;
240 
241 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
242 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
243 }
244 
245 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
246 {
247 	__u8 status = *((__u8 *) skb->data);
248 	void *sent;
249 
250 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
251 
252 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
253 	if (!sent)
254 		return;
255 
256 	if (!status) {
257 		__u8 param = *((__u8 *) sent);
258 
259 		if (param == AUTH_ENABLED)
260 			set_bit(HCI_AUTH, &hdev->flags);
261 		else
262 			clear_bit(HCI_AUTH, &hdev->flags);
263 	}
264 
265 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
266 		mgmt_auth_enable_complete(hdev, status);
267 }
268 
269 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
270 {
271 	__u8 status = *((__u8 *) skb->data);
272 	void *sent;
273 
274 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
275 
276 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
277 	if (!sent)
278 		return;
279 
280 	if (!status) {
281 		__u8 param = *((__u8 *) sent);
282 
283 		if (param)
284 			set_bit(HCI_ENCRYPT, &hdev->flags);
285 		else
286 			clear_bit(HCI_ENCRYPT, &hdev->flags);
287 	}
288 }
289 
290 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 	__u8 param, status = *((__u8 *) skb->data);
293 	int old_pscan, old_iscan;
294 	void *sent;
295 
296 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 
298 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
299 	if (!sent)
300 		return;
301 
302 	param = *((__u8 *) sent);
303 
304 	hci_dev_lock(hdev);
305 
306 	if (status) {
307 		mgmt_write_scan_failed(hdev, param, status);
308 		hdev->discov_timeout = 0;
309 		goto done;
310 	}
311 
312 	/* We need to ensure that we set this back on if someone changed
313 	 * the scan mode through a raw HCI socket.
314 	 */
315 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
316 
317 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
318 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
319 
320 	if (param & SCAN_INQUIRY) {
321 		set_bit(HCI_ISCAN, &hdev->flags);
322 		if (!old_iscan)
323 			mgmt_discoverable(hdev, 1);
324 	} else if (old_iscan)
325 		mgmt_discoverable(hdev, 0);
326 
327 	if (param & SCAN_PAGE) {
328 		set_bit(HCI_PSCAN, &hdev->flags);
329 		if (!old_pscan)
330 			mgmt_connectable(hdev, 1);
331 	} else if (old_pscan)
332 		mgmt_connectable(hdev, 0);
333 
334 done:
335 	hci_dev_unlock(hdev);
336 }
337 
338 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
339 {
340 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
341 
342 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
343 
344 	if (rp->status)
345 		return;
346 
347 	memcpy(hdev->dev_class, rp->dev_class, 3);
348 
349 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
350 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
351 }
352 
353 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
354 {
355 	__u8 status = *((__u8 *) skb->data);
356 	void *sent;
357 
358 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
359 
360 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
361 	if (!sent)
362 		return;
363 
364 	hci_dev_lock(hdev);
365 
366 	if (status == 0)
367 		memcpy(hdev->dev_class, sent, 3);
368 
369 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
370 		mgmt_set_class_of_dev_complete(hdev, sent, status);
371 
372 	hci_dev_unlock(hdev);
373 }
374 
375 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
376 {
377 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
378 	__u16 setting;
379 
380 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381 
382 	if (rp->status)
383 		return;
384 
385 	setting = __le16_to_cpu(rp->voice_setting);
386 
387 	if (hdev->voice_setting == setting)
388 		return;
389 
390 	hdev->voice_setting = setting;
391 
392 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
393 
394 	if (hdev->notify)
395 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
396 }
397 
398 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
399 				       struct sk_buff *skb)
400 {
401 	__u8 status = *((__u8 *) skb->data);
402 	__u16 setting;
403 	void *sent;
404 
405 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
406 
407 	if (status)
408 		return;
409 
410 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
411 	if (!sent)
412 		return;
413 
414 	setting = get_unaligned_le16(sent);
415 
416 	if (hdev->voice_setting == setting)
417 		return;
418 
419 	hdev->voice_setting = setting;
420 
421 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
422 
423 	if (hdev->notify)
424 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
425 }
426 
427 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
428 					  struct sk_buff *skb)
429 {
430 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
431 
432 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
433 
434 	if (rp->status)
435 		return;
436 
437 	hdev->num_iac = rp->num_iac;
438 
439 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
440 }
441 
442 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
443 {
444 	__u8 status = *((__u8 *) skb->data);
445 	struct hci_cp_write_ssp_mode *sent;
446 
447 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
448 
449 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
450 	if (!sent)
451 		return;
452 
453 	if (!status) {
454 		if (sent->mode)
455 			hdev->features[1][0] |= LMP_HOST_SSP;
456 		else
457 			hdev->features[1][0] &= ~LMP_HOST_SSP;
458 	}
459 
460 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
461 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
462 	else if (!status) {
463 		if (sent->mode)
464 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
465 		else
466 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
467 	}
468 }
469 
470 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
471 {
472 	u8 status = *((u8 *) skb->data);
473 	struct hci_cp_write_sc_support *sent;
474 
475 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
476 
477 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
478 	if (!sent)
479 		return;
480 
481 	if (!status) {
482 		if (sent->support)
483 			hdev->features[1][0] |= LMP_HOST_SC;
484 		else
485 			hdev->features[1][0] &= ~LMP_HOST_SC;
486 	}
487 
488 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
489 		mgmt_sc_enable_complete(hdev, sent->support, status);
490 	else if (!status) {
491 		if (sent->support)
492 			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
493 		else
494 			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
495 	}
496 }
497 
498 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
499 {
500 	struct hci_rp_read_local_version *rp = (void *) skb->data;
501 
502 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
503 
504 	if (rp->status)
505 		return;
506 
507 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
508 		hdev->hci_ver = rp->hci_ver;
509 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
510 		hdev->lmp_ver = rp->lmp_ver;
511 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
512 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
513 	}
514 }
515 
516 static void hci_cc_read_local_commands(struct hci_dev *hdev,
517 				       struct sk_buff *skb)
518 {
519 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
520 
521 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 
523 	if (rp->status)
524 		return;
525 
526 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
527 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
528 }
529 
530 static void hci_cc_read_local_features(struct hci_dev *hdev,
531 				       struct sk_buff *skb)
532 {
533 	struct hci_rp_read_local_features *rp = (void *) skb->data;
534 
535 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
536 
537 	if (rp->status)
538 		return;
539 
540 	memcpy(hdev->features, rp->features, 8);
541 
542 	/* Adjust default settings according to features
543 	 * supported by device. */
544 
545 	if (hdev->features[0][0] & LMP_3SLOT)
546 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
547 
548 	if (hdev->features[0][0] & LMP_5SLOT)
549 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
550 
551 	if (hdev->features[0][1] & LMP_HV2) {
552 		hdev->pkt_type  |= (HCI_HV2);
553 		hdev->esco_type |= (ESCO_HV2);
554 	}
555 
556 	if (hdev->features[0][1] & LMP_HV3) {
557 		hdev->pkt_type  |= (HCI_HV3);
558 		hdev->esco_type |= (ESCO_HV3);
559 	}
560 
561 	if (lmp_esco_capable(hdev))
562 		hdev->esco_type |= (ESCO_EV3);
563 
564 	if (hdev->features[0][4] & LMP_EV4)
565 		hdev->esco_type |= (ESCO_EV4);
566 
567 	if (hdev->features[0][4] & LMP_EV5)
568 		hdev->esco_type |= (ESCO_EV5);
569 
570 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
571 		hdev->esco_type |= (ESCO_2EV3);
572 
573 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
574 		hdev->esco_type |= (ESCO_3EV3);
575 
576 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
577 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
578 }
579 
580 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
581 					   struct sk_buff *skb)
582 {
583 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
584 
585 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
586 
587 	if (rp->status)
588 		return;
589 
590 	if (hdev->max_page < rp->max_page)
591 		hdev->max_page = rp->max_page;
592 
593 	if (rp->page < HCI_MAX_PAGES)
594 		memcpy(hdev->features[rp->page], rp->features, 8);
595 }
596 
597 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
598 					  struct sk_buff *skb)
599 {
600 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
601 
602 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603 
604 	if (!rp->status)
605 		hdev->flow_ctl_mode = rp->mode;
606 }
607 
608 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
609 {
610 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
611 
612 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 
614 	if (rp->status)
615 		return;
616 
617 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
618 	hdev->sco_mtu  = rp->sco_mtu;
619 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
620 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
621 
622 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
623 		hdev->sco_mtu  = 64;
624 		hdev->sco_pkts = 8;
625 	}
626 
627 	hdev->acl_cnt = hdev->acl_pkts;
628 	hdev->sco_cnt = hdev->sco_pkts;
629 
630 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
631 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
632 }
633 
634 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
635 {
636 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
637 
638 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
639 
640 	if (!rp->status)
641 		bacpy(&hdev->bdaddr, &rp->bdaddr);
642 }
643 
644 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
645 					   struct sk_buff *skb)
646 {
647 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
648 
649 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
650 
651 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
652 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
653 		hdev->page_scan_window = __le16_to_cpu(rp->window);
654 	}
655 }
656 
657 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
658 					    struct sk_buff *skb)
659 {
660 	u8 status = *((u8 *) skb->data);
661 	struct hci_cp_write_page_scan_activity *sent;
662 
663 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
664 
665 	if (status)
666 		return;
667 
668 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
669 	if (!sent)
670 		return;
671 
672 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
673 	hdev->page_scan_window = __le16_to_cpu(sent->window);
674 }
675 
676 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
677 					   struct sk_buff *skb)
678 {
679 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
680 
681 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
682 
683 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
684 		hdev->page_scan_type = rp->type;
685 }
686 
687 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
688 					struct sk_buff *skb)
689 {
690 	u8 status = *((u8 *) skb->data);
691 	u8 *type;
692 
693 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
694 
695 	if (status)
696 		return;
697 
698 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
699 	if (type)
700 		hdev->page_scan_type = *type;
701 }
702 
703 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
704 					struct sk_buff *skb)
705 {
706 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
707 
708 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
709 
710 	if (rp->status)
711 		return;
712 
713 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
714 	hdev->block_len = __le16_to_cpu(rp->block_len);
715 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
716 
717 	hdev->block_cnt = hdev->num_blocks;
718 
719 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
720 	       hdev->block_cnt, hdev->block_len);
721 }
722 
723 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
724 				       struct sk_buff *skb)
725 {
726 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
727 
728 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 
730 	if (rp->status)
731 		goto a2mp_rsp;
732 
733 	hdev->amp_status = rp->amp_status;
734 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
735 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
736 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
737 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
738 	hdev->amp_type = rp->amp_type;
739 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
740 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
741 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
742 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
743 
744 a2mp_rsp:
745 	a2mp_send_getinfo_rsp(hdev);
746 }
747 
748 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
749 					struct sk_buff *skb)
750 {
751 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
752 	struct amp_assoc *assoc = &hdev->loc_assoc;
753 	size_t rem_len, frag_len;
754 
755 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756 
757 	if (rp->status)
758 		goto a2mp_rsp;
759 
760 	frag_len = skb->len - sizeof(*rp);
761 	rem_len = __le16_to_cpu(rp->rem_len);
762 
763 	if (rem_len > frag_len) {
764 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
765 
766 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
767 		assoc->offset += frag_len;
768 
769 		/* Read other fragments */
770 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
771 
772 		return;
773 	}
774 
775 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
776 	assoc->len = assoc->offset + rem_len;
777 	assoc->offset = 0;
778 
779 a2mp_rsp:
780 	/* Send A2MP Rsp when all fragments are received */
781 	a2mp_send_getampassoc_rsp(hdev, rp->status);
782 	a2mp_send_create_phy_link_req(hdev, rp->status);
783 }
784 
785 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
786 					 struct sk_buff *skb)
787 {
788 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
789 
790 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
791 
792 	if (!rp->status)
793 		hdev->inq_tx_power = rp->tx_power;
794 }
795 
796 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
797 {
798 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
799 	struct hci_cp_pin_code_reply *cp;
800 	struct hci_conn *conn;
801 
802 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
803 
804 	hci_dev_lock(hdev);
805 
806 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
807 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
808 
809 	if (rp->status)
810 		goto unlock;
811 
812 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
813 	if (!cp)
814 		goto unlock;
815 
816 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
817 	if (conn)
818 		conn->pin_length = cp->pin_len;
819 
820 unlock:
821 	hci_dev_unlock(hdev);
822 }
823 
824 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
825 {
826 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
827 
828 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
829 
830 	hci_dev_lock(hdev);
831 
832 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
833 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
834 						 rp->status);
835 
836 	hci_dev_unlock(hdev);
837 }
838 
839 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
840 				       struct sk_buff *skb)
841 {
842 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
843 
844 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845 
846 	if (rp->status)
847 		return;
848 
849 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
850 	hdev->le_pkts = rp->le_max_pkt;
851 
852 	hdev->le_cnt = hdev->le_pkts;
853 
854 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
855 }
856 
857 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
858 					  struct sk_buff *skb)
859 {
860 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
861 
862 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
863 
864 	if (!rp->status)
865 		memcpy(hdev->le_features, rp->features, 8);
866 }
867 
868 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
869 					struct sk_buff *skb)
870 {
871 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
872 
873 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
874 
875 	if (!rp->status)
876 		hdev->adv_tx_power = rp->tx_power;
877 }
878 
879 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
880 {
881 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
882 
883 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884 
885 	hci_dev_lock(hdev);
886 
887 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
888 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
889 						 rp->status);
890 
891 	hci_dev_unlock(hdev);
892 }
893 
894 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
895 					  struct sk_buff *skb)
896 {
897 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
898 
899 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900 
901 	hci_dev_lock(hdev);
902 
903 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
904 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
905 						     ACL_LINK, 0, rp->status);
906 
907 	hci_dev_unlock(hdev);
908 }
909 
910 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
911 {
912 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
913 
914 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
915 
916 	hci_dev_lock(hdev);
917 
918 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
919 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
920 						 0, rp->status);
921 
922 	hci_dev_unlock(hdev);
923 }
924 
925 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
926 					  struct sk_buff *skb)
927 {
928 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
929 
930 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
931 
932 	hci_dev_lock(hdev);
933 
934 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
935 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
936 						     ACL_LINK, 0, rp->status);
937 
938 	hci_dev_unlock(hdev);
939 }
940 
941 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
942 				       struct sk_buff *skb)
943 {
944 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
945 
946 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
947 
948 	hci_dev_lock(hdev);
949 	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
950 					  NULL, NULL, rp->status);
951 	hci_dev_unlock(hdev);
952 }
953 
954 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
955 					   struct sk_buff *skb)
956 {
957 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
958 
959 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
960 
961 	hci_dev_lock(hdev);
962 	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
963 					  rp->hash256, rp->randomizer256,
964 					  rp->status);
965 	hci_dev_unlock(hdev);
966 }
967 
968 
969 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
970 {
971 	__u8 status = *((__u8 *) skb->data);
972 	bdaddr_t *sent;
973 
974 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
975 
976 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
977 	if (!sent)
978 		return;
979 
980 	hci_dev_lock(hdev);
981 
982 	if (!status)
983 		bacpy(&hdev->random_addr, sent);
984 
985 	hci_dev_unlock(hdev);
986 }
987 
988 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
989 {
990 	__u8 *sent, status = *((__u8 *) skb->data);
991 
992 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
993 
994 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
995 	if (!sent)
996 		return;
997 
998 	if (status)
999 		return;
1000 
1001 	hci_dev_lock(hdev);
1002 
1003 	/* If we're doing connection initation as peripheral. Set a
1004 	 * timeout in case something goes wrong.
1005 	 */
1006 	if (*sent) {
1007 		struct hci_conn *conn;
1008 
1009 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1010 		if (conn)
1011 			queue_delayed_work(hdev->workqueue,
1012 					   &conn->le_conn_timeout,
1013 					   HCI_LE_CONN_TIMEOUT);
1014 	}
1015 
1016 	mgmt_advertising(hdev, *sent);
1017 
1018 	hci_dev_unlock(hdev);
1019 }
1020 
1021 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 	struct hci_cp_le_set_scan_param *cp;
1024 	__u8 status = *((__u8 *) skb->data);
1025 
1026 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1027 
1028 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1029 	if (!cp)
1030 		return;
1031 
1032 	hci_dev_lock(hdev);
1033 
1034 	if (!status)
1035 		hdev->le_scan_type = cp->type;
1036 
1037 	hci_dev_unlock(hdev);
1038 }
1039 
1040 static bool has_pending_adv_report(struct hci_dev *hdev)
1041 {
1042 	struct discovery_state *d = &hdev->discovery;
1043 
1044 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1045 }
1046 
1047 static void clear_pending_adv_report(struct hci_dev *hdev)
1048 {
1049 	struct discovery_state *d = &hdev->discovery;
1050 
1051 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1052 	d->last_adv_data_len = 0;
1053 }
1054 
1055 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1056 				     u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1057 {
1058 	struct discovery_state *d = &hdev->discovery;
1059 
1060 	bacpy(&d->last_adv_addr, bdaddr);
1061 	d->last_adv_addr_type = bdaddr_type;
1062 	d->last_adv_rssi = rssi;
1063 	memcpy(d->last_adv_data, data, len);
1064 	d->last_adv_data_len = len;
1065 }
1066 
1067 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1068 				      struct sk_buff *skb)
1069 {
1070 	struct hci_cp_le_set_scan_enable *cp;
1071 	__u8 status = *((__u8 *) skb->data);
1072 
1073 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1074 
1075 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1076 	if (!cp)
1077 		return;
1078 
1079 	if (status)
1080 		return;
1081 
1082 	switch (cp->enable) {
1083 	case LE_SCAN_ENABLE:
1084 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1085 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1086 			clear_pending_adv_report(hdev);
1087 		break;
1088 
1089 	case LE_SCAN_DISABLE:
1090 		/* We do this here instead of when setting DISCOVERY_STOPPED
1091 		 * since the latter would potentially require waiting for
1092 		 * inquiry to stop too.
1093 		 */
1094 		if (has_pending_adv_report(hdev)) {
1095 			struct discovery_state *d = &hdev->discovery;
1096 
1097 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1098 					  d->last_adv_addr_type, NULL,
1099 					  d->last_adv_rssi, 0, 1,
1100 					  d->last_adv_data,
1101 					  d->last_adv_data_len, NULL, 0);
1102 		}
1103 
1104 		/* Cancel this timer so that we don't try to disable scanning
1105 		 * when it's already disabled.
1106 		 */
1107 		cancel_delayed_work(&hdev->le_scan_disable);
1108 
1109 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1110 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1111 		 * interrupted scanning due to a connect request. Mark
1112 		 * therefore discovery as stopped.
1113 		 */
1114 		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1115 				       &hdev->dev_flags))
1116 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1117 		break;
1118 
1119 	default:
1120 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1121 		break;
1122 	}
1123 }
1124 
1125 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1126 					   struct sk_buff *skb)
1127 {
1128 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1129 
1130 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1131 
1132 	if (!rp->status)
1133 		hdev->le_white_list_size = rp->size;
1134 }
1135 
1136 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1137 				       struct sk_buff *skb)
1138 {
1139 	__u8 status = *((__u8 *) skb->data);
1140 
1141 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142 
1143 	if (!status)
1144 		hci_white_list_clear(hdev);
1145 }
1146 
1147 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1148 					struct sk_buff *skb)
1149 {
1150 	struct hci_cp_le_add_to_white_list *sent;
1151 	__u8 status = *((__u8 *) skb->data);
1152 
1153 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1154 
1155 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1156 	if (!sent)
1157 		return;
1158 
1159 	if (!status)
1160 		hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1161 }
1162 
1163 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1164 					  struct sk_buff *skb)
1165 {
1166 	struct hci_cp_le_del_from_white_list *sent;
1167 	__u8 status = *((__u8 *) skb->data);
1168 
1169 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1170 
1171 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1172 	if (!sent)
1173 		return;
1174 
1175 	if (!status)
1176 		hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1177 }
1178 
1179 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1180 					    struct sk_buff *skb)
1181 {
1182 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1183 
1184 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1185 
1186 	if (!rp->status)
1187 		memcpy(hdev->le_states, rp->le_states, 8);
1188 }
1189 
1190 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1191 					   struct sk_buff *skb)
1192 {
1193 	struct hci_cp_write_le_host_supported *sent;
1194 	__u8 status = *((__u8 *) skb->data);
1195 
1196 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1197 
1198 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1199 	if (!sent)
1200 		return;
1201 
1202 	if (!status) {
1203 		if (sent->le) {
1204 			hdev->features[1][0] |= LMP_HOST_LE;
1205 			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1206 		} else {
1207 			hdev->features[1][0] &= ~LMP_HOST_LE;
1208 			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1209 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1210 		}
1211 
1212 		if (sent->simul)
1213 			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1214 		else
1215 			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1216 	}
1217 }
1218 
1219 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1220 {
1221 	struct hci_cp_le_set_adv_param *cp;
1222 	u8 status = *((u8 *) skb->data);
1223 
1224 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225 
1226 	if (status)
1227 		return;
1228 
1229 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1230 	if (!cp)
1231 		return;
1232 
1233 	hci_dev_lock(hdev);
1234 	hdev->adv_addr_type = cp->own_address_type;
1235 	hci_dev_unlock(hdev);
1236 }
1237 
1238 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1239 					  struct sk_buff *skb)
1240 {
1241 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1242 
1243 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1244 	       hdev->name, rp->status, rp->phy_handle);
1245 
1246 	if (rp->status)
1247 		return;
1248 
1249 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1250 }
1251 
1252 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1253 {
1254 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1255 	struct hci_conn *conn;
1256 
1257 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1258 
1259 	if (rp->status)
1260 		return;
1261 
1262 	hci_dev_lock(hdev);
1263 
1264 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1265 	if (conn)
1266 		conn->rssi = rp->rssi;
1267 
1268 	hci_dev_unlock(hdev);
1269 }
1270 
1271 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1272 {
1273 	struct hci_cp_read_tx_power *sent;
1274 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1275 	struct hci_conn *conn;
1276 
1277 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1278 
1279 	if (rp->status)
1280 		return;
1281 
1282 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1283 	if (!sent)
1284 		return;
1285 
1286 	hci_dev_lock(hdev);
1287 
1288 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1289 	if (!conn)
1290 		goto unlock;
1291 
1292 	switch (sent->type) {
1293 	case 0x00:
1294 		conn->tx_power = rp->tx_power;
1295 		break;
1296 	case 0x01:
1297 		conn->max_tx_power = rp->tx_power;
1298 		break;
1299 	}
1300 
1301 unlock:
1302 	hci_dev_unlock(hdev);
1303 }
1304 
1305 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1306 {
1307 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1308 
1309 	if (status) {
1310 		hci_conn_check_pending(hdev);
1311 		return;
1312 	}
1313 
1314 	set_bit(HCI_INQUIRY, &hdev->flags);
1315 }
1316 
1317 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1318 {
1319 	struct hci_cp_create_conn *cp;
1320 	struct hci_conn *conn;
1321 
1322 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1323 
1324 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1325 	if (!cp)
1326 		return;
1327 
1328 	hci_dev_lock(hdev);
1329 
1330 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1331 
1332 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1333 
1334 	if (status) {
1335 		if (conn && conn->state == BT_CONNECT) {
1336 			if (status != 0x0c || conn->attempt > 2) {
1337 				conn->state = BT_CLOSED;
1338 				hci_proto_connect_cfm(conn, status);
1339 				hci_conn_del(conn);
1340 			} else
1341 				conn->state = BT_CONNECT2;
1342 		}
1343 	} else {
1344 		if (!conn) {
1345 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1346 			if (conn) {
1347 				conn->out = true;
1348 				conn->link_mode |= HCI_LM_MASTER;
1349 			} else
1350 				BT_ERR("No memory for new connection");
1351 		}
1352 	}
1353 
1354 	hci_dev_unlock(hdev);
1355 }
1356 
1357 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1358 {
1359 	struct hci_cp_add_sco *cp;
1360 	struct hci_conn *acl, *sco;
1361 	__u16 handle;
1362 
1363 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364 
1365 	if (!status)
1366 		return;
1367 
1368 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1369 	if (!cp)
1370 		return;
1371 
1372 	handle = __le16_to_cpu(cp->handle);
1373 
1374 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1375 
1376 	hci_dev_lock(hdev);
1377 
1378 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1379 	if (acl) {
1380 		sco = acl->link;
1381 		if (sco) {
1382 			sco->state = BT_CLOSED;
1383 
1384 			hci_proto_connect_cfm(sco, status);
1385 			hci_conn_del(sco);
1386 		}
1387 	}
1388 
1389 	hci_dev_unlock(hdev);
1390 }
1391 
1392 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1393 {
1394 	struct hci_cp_auth_requested *cp;
1395 	struct hci_conn *conn;
1396 
1397 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1398 
1399 	if (!status)
1400 		return;
1401 
1402 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1403 	if (!cp)
1404 		return;
1405 
1406 	hci_dev_lock(hdev);
1407 
1408 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1409 	if (conn) {
1410 		if (conn->state == BT_CONFIG) {
1411 			hci_proto_connect_cfm(conn, status);
1412 			hci_conn_drop(conn);
1413 		}
1414 	}
1415 
1416 	hci_dev_unlock(hdev);
1417 }
1418 
1419 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1420 {
1421 	struct hci_cp_set_conn_encrypt *cp;
1422 	struct hci_conn *conn;
1423 
1424 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1425 
1426 	if (!status)
1427 		return;
1428 
1429 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1430 	if (!cp)
1431 		return;
1432 
1433 	hci_dev_lock(hdev);
1434 
1435 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1436 	if (conn) {
1437 		if (conn->state == BT_CONFIG) {
1438 			hci_proto_connect_cfm(conn, status);
1439 			hci_conn_drop(conn);
1440 		}
1441 	}
1442 
1443 	hci_dev_unlock(hdev);
1444 }
1445 
1446 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1447 				    struct hci_conn *conn)
1448 {
1449 	if (conn->state != BT_CONFIG || !conn->out)
1450 		return 0;
1451 
1452 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1453 		return 0;
1454 
1455 	/* Only request authentication for SSP connections or non-SSP
1456 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1457 	 * is requested.
1458 	 */
1459 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1460 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1461 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1462 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1463 		return 0;
1464 
1465 	return 1;
1466 }
1467 
1468 static int hci_resolve_name(struct hci_dev *hdev,
1469 				   struct inquiry_entry *e)
1470 {
1471 	struct hci_cp_remote_name_req cp;
1472 
1473 	memset(&cp, 0, sizeof(cp));
1474 
1475 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1476 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1477 	cp.pscan_mode = e->data.pscan_mode;
1478 	cp.clock_offset = e->data.clock_offset;
1479 
1480 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1481 }
1482 
1483 static bool hci_resolve_next_name(struct hci_dev *hdev)
1484 {
1485 	struct discovery_state *discov = &hdev->discovery;
1486 	struct inquiry_entry *e;
1487 
1488 	if (list_empty(&discov->resolve))
1489 		return false;
1490 
1491 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1492 	if (!e)
1493 		return false;
1494 
1495 	if (hci_resolve_name(hdev, e) == 0) {
1496 		e->name_state = NAME_PENDING;
1497 		return true;
1498 	}
1499 
1500 	return false;
1501 }
1502 
1503 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1504 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1505 {
1506 	struct discovery_state *discov = &hdev->discovery;
1507 	struct inquiry_entry *e;
1508 
1509 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1510 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1511 				      name_len, conn->dev_class);
1512 
1513 	if (discov->state == DISCOVERY_STOPPED)
1514 		return;
1515 
1516 	if (discov->state == DISCOVERY_STOPPING)
1517 		goto discov_complete;
1518 
1519 	if (discov->state != DISCOVERY_RESOLVING)
1520 		return;
1521 
1522 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1523 	/* If the device was not found in a list of found devices names of which
1524 	 * are pending. there is no need to continue resolving a next name as it
1525 	 * will be done upon receiving another Remote Name Request Complete
1526 	 * Event */
1527 	if (!e)
1528 		return;
1529 
1530 	list_del(&e->list);
1531 	if (name) {
1532 		e->name_state = NAME_KNOWN;
1533 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1534 				 e->data.rssi, name, name_len);
1535 	} else {
1536 		e->name_state = NAME_NOT_KNOWN;
1537 	}
1538 
1539 	if (hci_resolve_next_name(hdev))
1540 		return;
1541 
1542 discov_complete:
1543 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1544 }
1545 
1546 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1547 {
1548 	struct hci_cp_remote_name_req *cp;
1549 	struct hci_conn *conn;
1550 
1551 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1552 
1553 	/* If successful wait for the name req complete event before
1554 	 * checking for the need to do authentication */
1555 	if (!status)
1556 		return;
1557 
1558 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1559 	if (!cp)
1560 		return;
1561 
1562 	hci_dev_lock(hdev);
1563 
1564 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1565 
1566 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1567 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1568 
1569 	if (!conn)
1570 		goto unlock;
1571 
1572 	if (!hci_outgoing_auth_needed(hdev, conn))
1573 		goto unlock;
1574 
1575 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1576 		struct hci_cp_auth_requested auth_cp;
1577 
1578 		auth_cp.handle = __cpu_to_le16(conn->handle);
1579 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1580 			     sizeof(auth_cp), &auth_cp);
1581 	}
1582 
1583 unlock:
1584 	hci_dev_unlock(hdev);
1585 }
1586 
1587 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1588 {
1589 	struct hci_cp_read_remote_features *cp;
1590 	struct hci_conn *conn;
1591 
1592 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593 
1594 	if (!status)
1595 		return;
1596 
1597 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1598 	if (!cp)
1599 		return;
1600 
1601 	hci_dev_lock(hdev);
1602 
1603 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1604 	if (conn) {
1605 		if (conn->state == BT_CONFIG) {
1606 			hci_proto_connect_cfm(conn, status);
1607 			hci_conn_drop(conn);
1608 		}
1609 	}
1610 
1611 	hci_dev_unlock(hdev);
1612 }
1613 
1614 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1615 {
1616 	struct hci_cp_read_remote_ext_features *cp;
1617 	struct hci_conn *conn;
1618 
1619 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620 
1621 	if (!status)
1622 		return;
1623 
1624 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1625 	if (!cp)
1626 		return;
1627 
1628 	hci_dev_lock(hdev);
1629 
1630 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1631 	if (conn) {
1632 		if (conn->state == BT_CONFIG) {
1633 			hci_proto_connect_cfm(conn, status);
1634 			hci_conn_drop(conn);
1635 		}
1636 	}
1637 
1638 	hci_dev_unlock(hdev);
1639 }
1640 
1641 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1642 {
1643 	struct hci_cp_setup_sync_conn *cp;
1644 	struct hci_conn *acl, *sco;
1645 	__u16 handle;
1646 
1647 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1648 
1649 	if (!status)
1650 		return;
1651 
1652 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1653 	if (!cp)
1654 		return;
1655 
1656 	handle = __le16_to_cpu(cp->handle);
1657 
1658 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1659 
1660 	hci_dev_lock(hdev);
1661 
1662 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1663 	if (acl) {
1664 		sco = acl->link;
1665 		if (sco) {
1666 			sco->state = BT_CLOSED;
1667 
1668 			hci_proto_connect_cfm(sco, status);
1669 			hci_conn_del(sco);
1670 		}
1671 	}
1672 
1673 	hci_dev_unlock(hdev);
1674 }
1675 
1676 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1677 {
1678 	struct hci_cp_sniff_mode *cp;
1679 	struct hci_conn *conn;
1680 
1681 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1682 
1683 	if (!status)
1684 		return;
1685 
1686 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1687 	if (!cp)
1688 		return;
1689 
1690 	hci_dev_lock(hdev);
1691 
1692 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1693 	if (conn) {
1694 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1695 
1696 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1697 			hci_sco_setup(conn, status);
1698 	}
1699 
1700 	hci_dev_unlock(hdev);
1701 }
1702 
1703 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1704 {
1705 	struct hci_cp_exit_sniff_mode *cp;
1706 	struct hci_conn *conn;
1707 
1708 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1709 
1710 	if (!status)
1711 		return;
1712 
1713 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1714 	if (!cp)
1715 		return;
1716 
1717 	hci_dev_lock(hdev);
1718 
1719 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1720 	if (conn) {
1721 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1722 
1723 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1724 			hci_sco_setup(conn, status);
1725 	}
1726 
1727 	hci_dev_unlock(hdev);
1728 }
1729 
1730 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1731 {
1732 	struct hci_cp_disconnect *cp;
1733 	struct hci_conn *conn;
1734 
1735 	if (!status)
1736 		return;
1737 
1738 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1739 	if (!cp)
1740 		return;
1741 
1742 	hci_dev_lock(hdev);
1743 
1744 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1745 	if (conn)
1746 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1747 				       conn->dst_type, status);
1748 
1749 	hci_dev_unlock(hdev);
1750 }
1751 
1752 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1753 {
1754 	struct hci_cp_create_phy_link *cp;
1755 
1756 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1757 
1758 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1759 	if (!cp)
1760 		return;
1761 
1762 	hci_dev_lock(hdev);
1763 
1764 	if (status) {
1765 		struct hci_conn *hcon;
1766 
1767 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1768 		if (hcon)
1769 			hci_conn_del(hcon);
1770 	} else {
1771 		amp_write_remote_assoc(hdev, cp->phy_handle);
1772 	}
1773 
1774 	hci_dev_unlock(hdev);
1775 }
1776 
1777 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1778 {
1779 	struct hci_cp_accept_phy_link *cp;
1780 
1781 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1782 
1783 	if (status)
1784 		return;
1785 
1786 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1787 	if (!cp)
1788 		return;
1789 
1790 	amp_write_remote_assoc(hdev, cp->phy_handle);
1791 }
1792 
1793 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1794 {
1795 	struct hci_cp_le_create_conn *cp;
1796 	struct hci_conn *conn;
1797 
1798 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1799 
1800 	/* All connection failure handling is taken care of by the
1801 	 * hci_le_conn_failed function which is triggered by the HCI
1802 	 * request completion callbacks used for connecting.
1803 	 */
1804 	if (status)
1805 		return;
1806 
1807 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1808 	if (!cp)
1809 		return;
1810 
1811 	hci_dev_lock(hdev);
1812 
1813 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1814 	if (!conn)
1815 		goto unlock;
1816 
1817 	/* Store the initiator and responder address information which
1818 	 * is needed for SMP. These values will not change during the
1819 	 * lifetime of the connection.
1820 	 */
1821 	conn->init_addr_type = cp->own_address_type;
1822 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1823 		bacpy(&conn->init_addr, &hdev->random_addr);
1824 	else
1825 		bacpy(&conn->init_addr, &hdev->bdaddr);
1826 
1827 	conn->resp_addr_type = cp->peer_addr_type;
1828 	bacpy(&conn->resp_addr, &cp->peer_addr);
1829 
1830 	/* We don't want the connection attempt to stick around
1831 	 * indefinitely since LE doesn't have a page timeout concept
1832 	 * like BR/EDR. Set a timer for any connection that doesn't use
1833 	 * the white list for connecting.
1834 	 */
1835 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1836 		queue_delayed_work(conn->hdev->workqueue,
1837 				   &conn->le_conn_timeout,
1838 				   HCI_LE_CONN_TIMEOUT);
1839 
1840 unlock:
1841 	hci_dev_unlock(hdev);
1842 }
1843 
1844 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1845 {
1846 	struct hci_cp_le_start_enc *cp;
1847 	struct hci_conn *conn;
1848 
1849 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1850 
1851 	if (!status)
1852 		return;
1853 
1854 	hci_dev_lock(hdev);
1855 
1856 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1857 	if (!cp)
1858 		goto unlock;
1859 
1860 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1861 	if (!conn)
1862 		goto unlock;
1863 
1864 	if (conn->state != BT_CONNECTED)
1865 		goto unlock;
1866 
1867 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1868 	hci_conn_drop(conn);
1869 
1870 unlock:
1871 	hci_dev_unlock(hdev);
1872 }
1873 
1874 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1875 {
1876 	__u8 status = *((__u8 *) skb->data);
1877 	struct discovery_state *discov = &hdev->discovery;
1878 	struct inquiry_entry *e;
1879 
1880 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1881 
1882 	hci_conn_check_pending(hdev);
1883 
1884 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1885 		return;
1886 
1887 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1888 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1889 
1890 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1891 		return;
1892 
1893 	hci_dev_lock(hdev);
1894 
1895 	if (discov->state != DISCOVERY_FINDING)
1896 		goto unlock;
1897 
1898 	if (list_empty(&discov->resolve)) {
1899 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1900 		goto unlock;
1901 	}
1902 
1903 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1904 	if (e && hci_resolve_name(hdev, e) == 0) {
1905 		e->name_state = NAME_PENDING;
1906 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1907 	} else {
1908 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1909 	}
1910 
1911 unlock:
1912 	hci_dev_unlock(hdev);
1913 }
1914 
1915 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1916 {
1917 	struct inquiry_data data;
1918 	struct inquiry_info *info = (void *) (skb->data + 1);
1919 	int num_rsp = *((__u8 *) skb->data);
1920 
1921 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1922 
1923 	if (!num_rsp)
1924 		return;
1925 
1926 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1927 		return;
1928 
1929 	hci_dev_lock(hdev);
1930 
1931 	for (; num_rsp; num_rsp--, info++) {
1932 		bool name_known, ssp;
1933 
1934 		bacpy(&data.bdaddr, &info->bdaddr);
1935 		data.pscan_rep_mode	= info->pscan_rep_mode;
1936 		data.pscan_period_mode	= info->pscan_period_mode;
1937 		data.pscan_mode		= info->pscan_mode;
1938 		memcpy(data.dev_class, info->dev_class, 3);
1939 		data.clock_offset	= info->clock_offset;
1940 		data.rssi		= 0x00;
1941 		data.ssp_mode		= 0x00;
1942 
1943 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1944 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1945 				  info->dev_class, 0, !name_known, ssp, NULL,
1946 				  0, NULL, 0);
1947 	}
1948 
1949 	hci_dev_unlock(hdev);
1950 }
1951 
1952 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1953 {
1954 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1955 	struct hci_conn *conn;
1956 
1957 	BT_DBG("%s", hdev->name);
1958 
1959 	hci_dev_lock(hdev);
1960 
1961 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1962 	if (!conn) {
1963 		if (ev->link_type != SCO_LINK)
1964 			goto unlock;
1965 
1966 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1967 		if (!conn)
1968 			goto unlock;
1969 
1970 		conn->type = SCO_LINK;
1971 	}
1972 
1973 	if (!ev->status) {
1974 		conn->handle = __le16_to_cpu(ev->handle);
1975 
1976 		if (conn->type == ACL_LINK) {
1977 			conn->state = BT_CONFIG;
1978 			hci_conn_hold(conn);
1979 
1980 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1981 			    !hci_find_link_key(hdev, &ev->bdaddr))
1982 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1983 			else
1984 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1985 		} else
1986 			conn->state = BT_CONNECTED;
1987 
1988 		hci_conn_add_sysfs(conn);
1989 
1990 		if (test_bit(HCI_AUTH, &hdev->flags))
1991 			conn->link_mode |= HCI_LM_AUTH;
1992 
1993 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1994 			conn->link_mode |= HCI_LM_ENCRYPT;
1995 
1996 		/* Get remote features */
1997 		if (conn->type == ACL_LINK) {
1998 			struct hci_cp_read_remote_features cp;
1999 			cp.handle = ev->handle;
2000 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2001 				     sizeof(cp), &cp);
2002 		}
2003 
2004 		/* Set packet type for incoming connection */
2005 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2006 			struct hci_cp_change_conn_ptype cp;
2007 			cp.handle = ev->handle;
2008 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2009 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2010 				     &cp);
2011 		}
2012 	} else {
2013 		conn->state = BT_CLOSED;
2014 		if (conn->type == ACL_LINK)
2015 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2016 					    conn->dst_type, ev->status);
2017 	}
2018 
2019 	if (conn->type == ACL_LINK)
2020 		hci_sco_setup(conn, ev->status);
2021 
2022 	if (ev->status) {
2023 		hci_proto_connect_cfm(conn, ev->status);
2024 		hci_conn_del(conn);
2025 	} else if (ev->link_type != ACL_LINK)
2026 		hci_proto_connect_cfm(conn, ev->status);
2027 
2028 unlock:
2029 	hci_dev_unlock(hdev);
2030 
2031 	hci_conn_check_pending(hdev);
2032 }
2033 
2034 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2035 {
2036 	struct hci_ev_conn_request *ev = (void *) skb->data;
2037 	int mask = hdev->link_mode;
2038 	__u8 flags = 0;
2039 
2040 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2041 	       ev->link_type);
2042 
2043 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2044 				      &flags);
2045 
2046 	if ((mask & HCI_LM_ACCEPT) &&
2047 	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2048 		/* Connection accepted */
2049 		struct inquiry_entry *ie;
2050 		struct hci_conn *conn;
2051 
2052 		hci_dev_lock(hdev);
2053 
2054 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2055 		if (ie)
2056 			memcpy(ie->data.dev_class, ev->dev_class, 3);
2057 
2058 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2059 					       &ev->bdaddr);
2060 		if (!conn) {
2061 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2062 			if (!conn) {
2063 				BT_ERR("No memory for new connection");
2064 				hci_dev_unlock(hdev);
2065 				return;
2066 			}
2067 		}
2068 
2069 		memcpy(conn->dev_class, ev->dev_class, 3);
2070 
2071 		hci_dev_unlock(hdev);
2072 
2073 		if (ev->link_type == ACL_LINK ||
2074 		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2075 			struct hci_cp_accept_conn_req cp;
2076 			conn->state = BT_CONNECT;
2077 
2078 			bacpy(&cp.bdaddr, &ev->bdaddr);
2079 
2080 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2081 				cp.role = 0x00; /* Become master */
2082 			else
2083 				cp.role = 0x01; /* Remain slave */
2084 
2085 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2086 				     &cp);
2087 		} else if (!(flags & HCI_PROTO_DEFER)) {
2088 			struct hci_cp_accept_sync_conn_req cp;
2089 			conn->state = BT_CONNECT;
2090 
2091 			bacpy(&cp.bdaddr, &ev->bdaddr);
2092 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2093 
2094 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2095 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2096 			cp.max_latency    = cpu_to_le16(0xffff);
2097 			cp.content_format = cpu_to_le16(hdev->voice_setting);
2098 			cp.retrans_effort = 0xff;
2099 
2100 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2101 				     sizeof(cp), &cp);
2102 		} else {
2103 			conn->state = BT_CONNECT2;
2104 			hci_proto_connect_cfm(conn, 0);
2105 		}
2106 	} else {
2107 		/* Connection rejected */
2108 		struct hci_cp_reject_conn_req cp;
2109 
2110 		bacpy(&cp.bdaddr, &ev->bdaddr);
2111 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2112 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2113 	}
2114 }
2115 
2116 static u8 hci_to_mgmt_reason(u8 err)
2117 {
2118 	switch (err) {
2119 	case HCI_ERROR_CONNECTION_TIMEOUT:
2120 		return MGMT_DEV_DISCONN_TIMEOUT;
2121 	case HCI_ERROR_REMOTE_USER_TERM:
2122 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2123 	case HCI_ERROR_REMOTE_POWER_OFF:
2124 		return MGMT_DEV_DISCONN_REMOTE;
2125 	case HCI_ERROR_LOCAL_HOST_TERM:
2126 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2127 	default:
2128 		return MGMT_DEV_DISCONN_UNKNOWN;
2129 	}
2130 }
2131 
2132 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2133 {
2134 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2135 	u8 reason = hci_to_mgmt_reason(ev->reason);
2136 	struct hci_conn_params *params;
2137 	struct hci_conn *conn;
2138 	bool mgmt_connected;
2139 	u8 type;
2140 
2141 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2142 
2143 	hci_dev_lock(hdev);
2144 
2145 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2146 	if (!conn)
2147 		goto unlock;
2148 
2149 	if (ev->status) {
2150 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2151 				       conn->dst_type, ev->status);
2152 		goto unlock;
2153 	}
2154 
2155 	conn->state = BT_CLOSED;
2156 
2157 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2158 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2159 				reason, mgmt_connected);
2160 
2161 	if (conn->type == ACL_LINK && conn->flush_key)
2162 		hci_remove_link_key(hdev, &conn->dst);
2163 
2164 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2165 	if (params) {
2166 		switch (params->auto_connect) {
2167 		case HCI_AUTO_CONN_LINK_LOSS:
2168 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2169 				break;
2170 			/* Fall through */
2171 
2172 		case HCI_AUTO_CONN_ALWAYS:
2173 			hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2174 			break;
2175 
2176 		default:
2177 			break;
2178 		}
2179 	}
2180 
2181 	type = conn->type;
2182 
2183 	hci_proto_disconn_cfm(conn, ev->reason);
2184 	hci_conn_del(conn);
2185 
2186 	/* Re-enable advertising if necessary, since it might
2187 	 * have been disabled by the connection. From the
2188 	 * HCI_LE_Set_Advertise_Enable command description in
2189 	 * the core specification (v4.0):
2190 	 * "The Controller shall continue advertising until the Host
2191 	 * issues an LE_Set_Advertise_Enable command with
2192 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2193 	 * or until a connection is created or until the Advertising
2194 	 * is timed out due to Directed Advertising."
2195 	 */
2196 	if (type == LE_LINK)
2197 		mgmt_reenable_advertising(hdev);
2198 
2199 unlock:
2200 	hci_dev_unlock(hdev);
2201 }
2202 
2203 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2204 {
2205 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2206 	struct hci_conn *conn;
2207 
2208 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2209 
2210 	hci_dev_lock(hdev);
2211 
2212 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2213 	if (!conn)
2214 		goto unlock;
2215 
2216 	if (!ev->status) {
2217 		if (!hci_conn_ssp_enabled(conn) &&
2218 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2219 			BT_INFO("re-auth of legacy device is not possible.");
2220 		} else {
2221 			conn->link_mode |= HCI_LM_AUTH;
2222 			conn->sec_level = conn->pending_sec_level;
2223 		}
2224 	} else {
2225 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2226 				 ev->status);
2227 	}
2228 
2229 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2230 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2231 
2232 	if (conn->state == BT_CONFIG) {
2233 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2234 			struct hci_cp_set_conn_encrypt cp;
2235 			cp.handle  = ev->handle;
2236 			cp.encrypt = 0x01;
2237 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2238 				     &cp);
2239 		} else {
2240 			conn->state = BT_CONNECTED;
2241 			hci_proto_connect_cfm(conn, ev->status);
2242 			hci_conn_drop(conn);
2243 		}
2244 	} else {
2245 		hci_auth_cfm(conn, ev->status);
2246 
2247 		hci_conn_hold(conn);
2248 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2249 		hci_conn_drop(conn);
2250 	}
2251 
2252 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2253 		if (!ev->status) {
2254 			struct hci_cp_set_conn_encrypt cp;
2255 			cp.handle  = ev->handle;
2256 			cp.encrypt = 0x01;
2257 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2258 				     &cp);
2259 		} else {
2260 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2261 			hci_encrypt_cfm(conn, ev->status, 0x00);
2262 		}
2263 	}
2264 
2265 unlock:
2266 	hci_dev_unlock(hdev);
2267 }
2268 
2269 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2270 {
2271 	struct hci_ev_remote_name *ev = (void *) skb->data;
2272 	struct hci_conn *conn;
2273 
2274 	BT_DBG("%s", hdev->name);
2275 
2276 	hci_conn_check_pending(hdev);
2277 
2278 	hci_dev_lock(hdev);
2279 
2280 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2281 
2282 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2283 		goto check_auth;
2284 
2285 	if (ev->status == 0)
2286 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2287 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2288 	else
2289 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2290 
2291 check_auth:
2292 	if (!conn)
2293 		goto unlock;
2294 
2295 	if (!hci_outgoing_auth_needed(hdev, conn))
2296 		goto unlock;
2297 
2298 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2299 		struct hci_cp_auth_requested cp;
2300 		cp.handle = __cpu_to_le16(conn->handle);
2301 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2302 	}
2303 
2304 unlock:
2305 	hci_dev_unlock(hdev);
2306 }
2307 
2308 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2309 {
2310 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2311 	struct hci_conn *conn;
2312 
2313 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2314 
2315 	hci_dev_lock(hdev);
2316 
2317 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2318 	if (!conn)
2319 		goto unlock;
2320 
2321 	if (!ev->status) {
2322 		if (ev->encrypt) {
2323 			/* Encryption implies authentication */
2324 			conn->link_mode |= HCI_LM_AUTH;
2325 			conn->link_mode |= HCI_LM_ENCRYPT;
2326 			conn->sec_level = conn->pending_sec_level;
2327 
2328 			/* P-256 authentication key implies FIPS */
2329 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2330 				conn->link_mode |= HCI_LM_FIPS;
2331 
2332 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2333 			    conn->type == LE_LINK)
2334 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2335 		} else {
2336 			conn->link_mode &= ~HCI_LM_ENCRYPT;
2337 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2338 		}
2339 	}
2340 
2341 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2342 
2343 	if (ev->status && conn->state == BT_CONNECTED) {
2344 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2345 		hci_conn_drop(conn);
2346 		goto unlock;
2347 	}
2348 
2349 	if (conn->state == BT_CONFIG) {
2350 		if (!ev->status)
2351 			conn->state = BT_CONNECTED;
2352 
2353 		/* In Secure Connections Only mode, do not allow any
2354 		 * connections that are not encrypted with AES-CCM
2355 		 * using a P-256 authenticated combination key.
2356 		 */
2357 		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2358 		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2359 		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2360 			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2361 			hci_conn_drop(conn);
2362 			goto unlock;
2363 		}
2364 
2365 		hci_proto_connect_cfm(conn, ev->status);
2366 		hci_conn_drop(conn);
2367 	} else
2368 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2369 
2370 unlock:
2371 	hci_dev_unlock(hdev);
2372 }
2373 
2374 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2375 					     struct sk_buff *skb)
2376 {
2377 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2378 	struct hci_conn *conn;
2379 
2380 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2381 
2382 	hci_dev_lock(hdev);
2383 
2384 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2385 	if (conn) {
2386 		if (!ev->status)
2387 			conn->link_mode |= HCI_LM_SECURE;
2388 
2389 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2390 
2391 		hci_key_change_cfm(conn, ev->status);
2392 	}
2393 
2394 	hci_dev_unlock(hdev);
2395 }
2396 
2397 static void hci_remote_features_evt(struct hci_dev *hdev,
2398 				    struct sk_buff *skb)
2399 {
2400 	struct hci_ev_remote_features *ev = (void *) skb->data;
2401 	struct hci_conn *conn;
2402 
2403 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2404 
2405 	hci_dev_lock(hdev);
2406 
2407 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2408 	if (!conn)
2409 		goto unlock;
2410 
2411 	if (!ev->status)
2412 		memcpy(conn->features[0], ev->features, 8);
2413 
2414 	if (conn->state != BT_CONFIG)
2415 		goto unlock;
2416 
2417 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2418 		struct hci_cp_read_remote_ext_features cp;
2419 		cp.handle = ev->handle;
2420 		cp.page = 0x01;
2421 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2422 			     sizeof(cp), &cp);
2423 		goto unlock;
2424 	}
2425 
2426 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2427 		struct hci_cp_remote_name_req cp;
2428 		memset(&cp, 0, sizeof(cp));
2429 		bacpy(&cp.bdaddr, &conn->dst);
2430 		cp.pscan_rep_mode = 0x02;
2431 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2432 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2433 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2434 				      conn->dst_type, 0, NULL, 0,
2435 				      conn->dev_class);
2436 
2437 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2438 		conn->state = BT_CONNECTED;
2439 		hci_proto_connect_cfm(conn, ev->status);
2440 		hci_conn_drop(conn);
2441 	}
2442 
2443 unlock:
2444 	hci_dev_unlock(hdev);
2445 }
2446 
2447 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2448 {
2449 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2450 	u8 status = skb->data[sizeof(*ev)];
2451 	__u16 opcode;
2452 
2453 	skb_pull(skb, sizeof(*ev));
2454 
2455 	opcode = __le16_to_cpu(ev->opcode);
2456 
2457 	switch (opcode) {
2458 	case HCI_OP_INQUIRY_CANCEL:
2459 		hci_cc_inquiry_cancel(hdev, skb);
2460 		break;
2461 
2462 	case HCI_OP_PERIODIC_INQ:
2463 		hci_cc_periodic_inq(hdev, skb);
2464 		break;
2465 
2466 	case HCI_OP_EXIT_PERIODIC_INQ:
2467 		hci_cc_exit_periodic_inq(hdev, skb);
2468 		break;
2469 
2470 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2471 		hci_cc_remote_name_req_cancel(hdev, skb);
2472 		break;
2473 
2474 	case HCI_OP_ROLE_DISCOVERY:
2475 		hci_cc_role_discovery(hdev, skb);
2476 		break;
2477 
2478 	case HCI_OP_READ_LINK_POLICY:
2479 		hci_cc_read_link_policy(hdev, skb);
2480 		break;
2481 
2482 	case HCI_OP_WRITE_LINK_POLICY:
2483 		hci_cc_write_link_policy(hdev, skb);
2484 		break;
2485 
2486 	case HCI_OP_READ_DEF_LINK_POLICY:
2487 		hci_cc_read_def_link_policy(hdev, skb);
2488 		break;
2489 
2490 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2491 		hci_cc_write_def_link_policy(hdev, skb);
2492 		break;
2493 
2494 	case HCI_OP_RESET:
2495 		hci_cc_reset(hdev, skb);
2496 		break;
2497 
2498 	case HCI_OP_WRITE_LOCAL_NAME:
2499 		hci_cc_write_local_name(hdev, skb);
2500 		break;
2501 
2502 	case HCI_OP_READ_LOCAL_NAME:
2503 		hci_cc_read_local_name(hdev, skb);
2504 		break;
2505 
2506 	case HCI_OP_WRITE_AUTH_ENABLE:
2507 		hci_cc_write_auth_enable(hdev, skb);
2508 		break;
2509 
2510 	case HCI_OP_WRITE_ENCRYPT_MODE:
2511 		hci_cc_write_encrypt_mode(hdev, skb);
2512 		break;
2513 
2514 	case HCI_OP_WRITE_SCAN_ENABLE:
2515 		hci_cc_write_scan_enable(hdev, skb);
2516 		break;
2517 
2518 	case HCI_OP_READ_CLASS_OF_DEV:
2519 		hci_cc_read_class_of_dev(hdev, skb);
2520 		break;
2521 
2522 	case HCI_OP_WRITE_CLASS_OF_DEV:
2523 		hci_cc_write_class_of_dev(hdev, skb);
2524 		break;
2525 
2526 	case HCI_OP_READ_VOICE_SETTING:
2527 		hci_cc_read_voice_setting(hdev, skb);
2528 		break;
2529 
2530 	case HCI_OP_WRITE_VOICE_SETTING:
2531 		hci_cc_write_voice_setting(hdev, skb);
2532 		break;
2533 
2534 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2535 		hci_cc_read_num_supported_iac(hdev, skb);
2536 		break;
2537 
2538 	case HCI_OP_WRITE_SSP_MODE:
2539 		hci_cc_write_ssp_mode(hdev, skb);
2540 		break;
2541 
2542 	case HCI_OP_WRITE_SC_SUPPORT:
2543 		hci_cc_write_sc_support(hdev, skb);
2544 		break;
2545 
2546 	case HCI_OP_READ_LOCAL_VERSION:
2547 		hci_cc_read_local_version(hdev, skb);
2548 		break;
2549 
2550 	case HCI_OP_READ_LOCAL_COMMANDS:
2551 		hci_cc_read_local_commands(hdev, skb);
2552 		break;
2553 
2554 	case HCI_OP_READ_LOCAL_FEATURES:
2555 		hci_cc_read_local_features(hdev, skb);
2556 		break;
2557 
2558 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2559 		hci_cc_read_local_ext_features(hdev, skb);
2560 		break;
2561 
2562 	case HCI_OP_READ_BUFFER_SIZE:
2563 		hci_cc_read_buffer_size(hdev, skb);
2564 		break;
2565 
2566 	case HCI_OP_READ_BD_ADDR:
2567 		hci_cc_read_bd_addr(hdev, skb);
2568 		break;
2569 
2570 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2571 		hci_cc_read_page_scan_activity(hdev, skb);
2572 		break;
2573 
2574 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2575 		hci_cc_write_page_scan_activity(hdev, skb);
2576 		break;
2577 
2578 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2579 		hci_cc_read_page_scan_type(hdev, skb);
2580 		break;
2581 
2582 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2583 		hci_cc_write_page_scan_type(hdev, skb);
2584 		break;
2585 
2586 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2587 		hci_cc_read_data_block_size(hdev, skb);
2588 		break;
2589 
2590 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2591 		hci_cc_read_flow_control_mode(hdev, skb);
2592 		break;
2593 
2594 	case HCI_OP_READ_LOCAL_AMP_INFO:
2595 		hci_cc_read_local_amp_info(hdev, skb);
2596 		break;
2597 
2598 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2599 		hci_cc_read_local_amp_assoc(hdev, skb);
2600 		break;
2601 
2602 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2603 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2604 		break;
2605 
2606 	case HCI_OP_PIN_CODE_REPLY:
2607 		hci_cc_pin_code_reply(hdev, skb);
2608 		break;
2609 
2610 	case HCI_OP_PIN_CODE_NEG_REPLY:
2611 		hci_cc_pin_code_neg_reply(hdev, skb);
2612 		break;
2613 
2614 	case HCI_OP_READ_LOCAL_OOB_DATA:
2615 		hci_cc_read_local_oob_data(hdev, skb);
2616 		break;
2617 
2618 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2619 		hci_cc_read_local_oob_ext_data(hdev, skb);
2620 		break;
2621 
2622 	case HCI_OP_LE_READ_BUFFER_SIZE:
2623 		hci_cc_le_read_buffer_size(hdev, skb);
2624 		break;
2625 
2626 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2627 		hci_cc_le_read_local_features(hdev, skb);
2628 		break;
2629 
2630 	case HCI_OP_LE_READ_ADV_TX_POWER:
2631 		hci_cc_le_read_adv_tx_power(hdev, skb);
2632 		break;
2633 
2634 	case HCI_OP_USER_CONFIRM_REPLY:
2635 		hci_cc_user_confirm_reply(hdev, skb);
2636 		break;
2637 
2638 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2639 		hci_cc_user_confirm_neg_reply(hdev, skb);
2640 		break;
2641 
2642 	case HCI_OP_USER_PASSKEY_REPLY:
2643 		hci_cc_user_passkey_reply(hdev, skb);
2644 		break;
2645 
2646 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2647 		hci_cc_user_passkey_neg_reply(hdev, skb);
2648 		break;
2649 
2650 	case HCI_OP_LE_SET_RANDOM_ADDR:
2651 		hci_cc_le_set_random_addr(hdev, skb);
2652 		break;
2653 
2654 	case HCI_OP_LE_SET_ADV_ENABLE:
2655 		hci_cc_le_set_adv_enable(hdev, skb);
2656 		break;
2657 
2658 	case HCI_OP_LE_SET_SCAN_PARAM:
2659 		hci_cc_le_set_scan_param(hdev, skb);
2660 		break;
2661 
2662 	case HCI_OP_LE_SET_SCAN_ENABLE:
2663 		hci_cc_le_set_scan_enable(hdev, skb);
2664 		break;
2665 
2666 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2667 		hci_cc_le_read_white_list_size(hdev, skb);
2668 		break;
2669 
2670 	case HCI_OP_LE_CLEAR_WHITE_LIST:
2671 		hci_cc_le_clear_white_list(hdev, skb);
2672 		break;
2673 
2674 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2675 		hci_cc_le_add_to_white_list(hdev, skb);
2676 		break;
2677 
2678 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2679 		hci_cc_le_del_from_white_list(hdev, skb);
2680 		break;
2681 
2682 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2683 		hci_cc_le_read_supported_states(hdev, skb);
2684 		break;
2685 
2686 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2687 		hci_cc_write_le_host_supported(hdev, skb);
2688 		break;
2689 
2690 	case HCI_OP_LE_SET_ADV_PARAM:
2691 		hci_cc_set_adv_param(hdev, skb);
2692 		break;
2693 
2694 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2695 		hci_cc_write_remote_amp_assoc(hdev, skb);
2696 		break;
2697 
2698 	case HCI_OP_READ_RSSI:
2699 		hci_cc_read_rssi(hdev, skb);
2700 		break;
2701 
2702 	case HCI_OP_READ_TX_POWER:
2703 		hci_cc_read_tx_power(hdev, skb);
2704 		break;
2705 
2706 	default:
2707 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2708 		break;
2709 	}
2710 
2711 	if (opcode != HCI_OP_NOP)
2712 		del_timer(&hdev->cmd_timer);
2713 
2714 	hci_req_cmd_complete(hdev, opcode, status);
2715 
2716 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2717 		atomic_set(&hdev->cmd_cnt, 1);
2718 		if (!skb_queue_empty(&hdev->cmd_q))
2719 			queue_work(hdev->workqueue, &hdev->cmd_work);
2720 	}
2721 }
2722 
2723 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2724 {
2725 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2726 	__u16 opcode;
2727 
2728 	skb_pull(skb, sizeof(*ev));
2729 
2730 	opcode = __le16_to_cpu(ev->opcode);
2731 
2732 	switch (opcode) {
2733 	case HCI_OP_INQUIRY:
2734 		hci_cs_inquiry(hdev, ev->status);
2735 		break;
2736 
2737 	case HCI_OP_CREATE_CONN:
2738 		hci_cs_create_conn(hdev, ev->status);
2739 		break;
2740 
2741 	case HCI_OP_ADD_SCO:
2742 		hci_cs_add_sco(hdev, ev->status);
2743 		break;
2744 
2745 	case HCI_OP_AUTH_REQUESTED:
2746 		hci_cs_auth_requested(hdev, ev->status);
2747 		break;
2748 
2749 	case HCI_OP_SET_CONN_ENCRYPT:
2750 		hci_cs_set_conn_encrypt(hdev, ev->status);
2751 		break;
2752 
2753 	case HCI_OP_REMOTE_NAME_REQ:
2754 		hci_cs_remote_name_req(hdev, ev->status);
2755 		break;
2756 
2757 	case HCI_OP_READ_REMOTE_FEATURES:
2758 		hci_cs_read_remote_features(hdev, ev->status);
2759 		break;
2760 
2761 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2762 		hci_cs_read_remote_ext_features(hdev, ev->status);
2763 		break;
2764 
2765 	case HCI_OP_SETUP_SYNC_CONN:
2766 		hci_cs_setup_sync_conn(hdev, ev->status);
2767 		break;
2768 
2769 	case HCI_OP_SNIFF_MODE:
2770 		hci_cs_sniff_mode(hdev, ev->status);
2771 		break;
2772 
2773 	case HCI_OP_EXIT_SNIFF_MODE:
2774 		hci_cs_exit_sniff_mode(hdev, ev->status);
2775 		break;
2776 
2777 	case HCI_OP_DISCONNECT:
2778 		hci_cs_disconnect(hdev, ev->status);
2779 		break;
2780 
2781 	case HCI_OP_CREATE_PHY_LINK:
2782 		hci_cs_create_phylink(hdev, ev->status);
2783 		break;
2784 
2785 	case HCI_OP_ACCEPT_PHY_LINK:
2786 		hci_cs_accept_phylink(hdev, ev->status);
2787 		break;
2788 
2789 	case HCI_OP_LE_CREATE_CONN:
2790 		hci_cs_le_create_conn(hdev, ev->status);
2791 		break;
2792 
2793 	case HCI_OP_LE_START_ENC:
2794 		hci_cs_le_start_enc(hdev, ev->status);
2795 		break;
2796 
2797 	default:
2798 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2799 		break;
2800 	}
2801 
2802 	if (opcode != HCI_OP_NOP)
2803 		del_timer(&hdev->cmd_timer);
2804 
2805 	if (ev->status ||
2806 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2807 		hci_req_cmd_complete(hdev, opcode, ev->status);
2808 
2809 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2810 		atomic_set(&hdev->cmd_cnt, 1);
2811 		if (!skb_queue_empty(&hdev->cmd_q))
2812 			queue_work(hdev->workqueue, &hdev->cmd_work);
2813 	}
2814 }
2815 
2816 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2817 {
2818 	struct hci_ev_role_change *ev = (void *) skb->data;
2819 	struct hci_conn *conn;
2820 
2821 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2822 
2823 	hci_dev_lock(hdev);
2824 
2825 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2826 	if (conn) {
2827 		if (!ev->status) {
2828 			if (ev->role)
2829 				conn->link_mode &= ~HCI_LM_MASTER;
2830 			else
2831 				conn->link_mode |= HCI_LM_MASTER;
2832 		}
2833 
2834 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2835 
2836 		hci_role_switch_cfm(conn, ev->status, ev->role);
2837 	}
2838 
2839 	hci_dev_unlock(hdev);
2840 }
2841 
2842 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2843 {
2844 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2845 	int i;
2846 
2847 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2848 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2849 		return;
2850 	}
2851 
2852 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2853 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2854 		BT_DBG("%s bad parameters", hdev->name);
2855 		return;
2856 	}
2857 
2858 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2859 
2860 	for (i = 0; i < ev->num_hndl; i++) {
2861 		struct hci_comp_pkts_info *info = &ev->handles[i];
2862 		struct hci_conn *conn;
2863 		__u16  handle, count;
2864 
2865 		handle = __le16_to_cpu(info->handle);
2866 		count  = __le16_to_cpu(info->count);
2867 
2868 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2869 		if (!conn)
2870 			continue;
2871 
2872 		conn->sent -= count;
2873 
2874 		switch (conn->type) {
2875 		case ACL_LINK:
2876 			hdev->acl_cnt += count;
2877 			if (hdev->acl_cnt > hdev->acl_pkts)
2878 				hdev->acl_cnt = hdev->acl_pkts;
2879 			break;
2880 
2881 		case LE_LINK:
2882 			if (hdev->le_pkts) {
2883 				hdev->le_cnt += count;
2884 				if (hdev->le_cnt > hdev->le_pkts)
2885 					hdev->le_cnt = hdev->le_pkts;
2886 			} else {
2887 				hdev->acl_cnt += count;
2888 				if (hdev->acl_cnt > hdev->acl_pkts)
2889 					hdev->acl_cnt = hdev->acl_pkts;
2890 			}
2891 			break;
2892 
2893 		case SCO_LINK:
2894 			hdev->sco_cnt += count;
2895 			if (hdev->sco_cnt > hdev->sco_pkts)
2896 				hdev->sco_cnt = hdev->sco_pkts;
2897 			break;
2898 
2899 		default:
2900 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2901 			break;
2902 		}
2903 	}
2904 
2905 	queue_work(hdev->workqueue, &hdev->tx_work);
2906 }
2907 
2908 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2909 						 __u16 handle)
2910 {
2911 	struct hci_chan *chan;
2912 
2913 	switch (hdev->dev_type) {
2914 	case HCI_BREDR:
2915 		return hci_conn_hash_lookup_handle(hdev, handle);
2916 	case HCI_AMP:
2917 		chan = hci_chan_lookup_handle(hdev, handle);
2918 		if (chan)
2919 			return chan->conn;
2920 		break;
2921 	default:
2922 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2923 		break;
2924 	}
2925 
2926 	return NULL;
2927 }
2928 
2929 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2930 {
2931 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2932 	int i;
2933 
2934 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2935 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2936 		return;
2937 	}
2938 
2939 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2940 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2941 		BT_DBG("%s bad parameters", hdev->name);
2942 		return;
2943 	}
2944 
2945 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2946 	       ev->num_hndl);
2947 
2948 	for (i = 0; i < ev->num_hndl; i++) {
2949 		struct hci_comp_blocks_info *info = &ev->handles[i];
2950 		struct hci_conn *conn = NULL;
2951 		__u16  handle, block_count;
2952 
2953 		handle = __le16_to_cpu(info->handle);
2954 		block_count = __le16_to_cpu(info->blocks);
2955 
2956 		conn = __hci_conn_lookup_handle(hdev, handle);
2957 		if (!conn)
2958 			continue;
2959 
2960 		conn->sent -= block_count;
2961 
2962 		switch (conn->type) {
2963 		case ACL_LINK:
2964 		case AMP_LINK:
2965 			hdev->block_cnt += block_count;
2966 			if (hdev->block_cnt > hdev->num_blocks)
2967 				hdev->block_cnt = hdev->num_blocks;
2968 			break;
2969 
2970 		default:
2971 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2972 			break;
2973 		}
2974 	}
2975 
2976 	queue_work(hdev->workqueue, &hdev->tx_work);
2977 }
2978 
2979 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2980 {
2981 	struct hci_ev_mode_change *ev = (void *) skb->data;
2982 	struct hci_conn *conn;
2983 
2984 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2985 
2986 	hci_dev_lock(hdev);
2987 
2988 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2989 	if (conn) {
2990 		conn->mode = ev->mode;
2991 
2992 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2993 					&conn->flags)) {
2994 			if (conn->mode == HCI_CM_ACTIVE)
2995 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2996 			else
2997 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2998 		}
2999 
3000 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3001 			hci_sco_setup(conn, ev->status);
3002 	}
3003 
3004 	hci_dev_unlock(hdev);
3005 }
3006 
3007 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3008 {
3009 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3010 	struct hci_conn *conn;
3011 
3012 	BT_DBG("%s", hdev->name);
3013 
3014 	hci_dev_lock(hdev);
3015 
3016 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3017 	if (!conn)
3018 		goto unlock;
3019 
3020 	if (conn->state == BT_CONNECTED) {
3021 		hci_conn_hold(conn);
3022 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3023 		hci_conn_drop(conn);
3024 	}
3025 
3026 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3027 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3028 			     sizeof(ev->bdaddr), &ev->bdaddr);
3029 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3030 		u8 secure;
3031 
3032 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3033 			secure = 1;
3034 		else
3035 			secure = 0;
3036 
3037 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3038 	}
3039 
3040 unlock:
3041 	hci_dev_unlock(hdev);
3042 }
3043 
3044 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3045 {
3046 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3047 	struct hci_cp_link_key_reply cp;
3048 	struct hci_conn *conn;
3049 	struct link_key *key;
3050 
3051 	BT_DBG("%s", hdev->name);
3052 
3053 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3054 		return;
3055 
3056 	hci_dev_lock(hdev);
3057 
3058 	key = hci_find_link_key(hdev, &ev->bdaddr);
3059 	if (!key) {
3060 		BT_DBG("%s link key not found for %pMR", hdev->name,
3061 		       &ev->bdaddr);
3062 		goto not_found;
3063 	}
3064 
3065 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3066 	       &ev->bdaddr);
3067 
3068 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3069 	    key->type == HCI_LK_DEBUG_COMBINATION) {
3070 		BT_DBG("%s ignoring debug key", hdev->name);
3071 		goto not_found;
3072 	}
3073 
3074 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3075 	if (conn) {
3076 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3077 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3078 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3079 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3080 			goto not_found;
3081 		}
3082 
3083 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3084 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3085 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3086 			BT_DBG("%s ignoring key unauthenticated for high security",
3087 			       hdev->name);
3088 			goto not_found;
3089 		}
3090 
3091 		conn->key_type = key->type;
3092 		conn->pin_length = key->pin_len;
3093 	}
3094 
3095 	bacpy(&cp.bdaddr, &ev->bdaddr);
3096 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3097 
3098 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3099 
3100 	hci_dev_unlock(hdev);
3101 
3102 	return;
3103 
3104 not_found:
3105 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3106 	hci_dev_unlock(hdev);
3107 }
3108 
3109 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3110 {
3111 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3112 	struct hci_conn *conn;
3113 	u8 pin_len = 0;
3114 
3115 	BT_DBG("%s", hdev->name);
3116 
3117 	hci_dev_lock(hdev);
3118 
3119 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3120 	if (conn) {
3121 		hci_conn_hold(conn);
3122 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3123 		pin_len = conn->pin_length;
3124 
3125 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3126 			conn->key_type = ev->key_type;
3127 
3128 		hci_conn_drop(conn);
3129 	}
3130 
3131 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3132 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3133 				 ev->key_type, pin_len);
3134 
3135 	hci_dev_unlock(hdev);
3136 }
3137 
3138 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3139 {
3140 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3141 	struct hci_conn *conn;
3142 
3143 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3144 
3145 	hci_dev_lock(hdev);
3146 
3147 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3148 	if (conn && !ev->status) {
3149 		struct inquiry_entry *ie;
3150 
3151 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3152 		if (ie) {
3153 			ie->data.clock_offset = ev->clock_offset;
3154 			ie->timestamp = jiffies;
3155 		}
3156 	}
3157 
3158 	hci_dev_unlock(hdev);
3159 }
3160 
3161 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3162 {
3163 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3164 	struct hci_conn *conn;
3165 
3166 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3167 
3168 	hci_dev_lock(hdev);
3169 
3170 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3171 	if (conn && !ev->status)
3172 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3173 
3174 	hci_dev_unlock(hdev);
3175 }
3176 
3177 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3178 {
3179 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3180 	struct inquiry_entry *ie;
3181 
3182 	BT_DBG("%s", hdev->name);
3183 
3184 	hci_dev_lock(hdev);
3185 
3186 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3187 	if (ie) {
3188 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3189 		ie->timestamp = jiffies;
3190 	}
3191 
3192 	hci_dev_unlock(hdev);
3193 }
3194 
3195 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3196 					     struct sk_buff *skb)
3197 {
3198 	struct inquiry_data data;
3199 	int num_rsp = *((__u8 *) skb->data);
3200 	bool name_known, ssp;
3201 
3202 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3203 
3204 	if (!num_rsp)
3205 		return;
3206 
3207 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3208 		return;
3209 
3210 	hci_dev_lock(hdev);
3211 
3212 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3213 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3214 		info = (void *) (skb->data + 1);
3215 
3216 		for (; num_rsp; num_rsp--, info++) {
3217 			bacpy(&data.bdaddr, &info->bdaddr);
3218 			data.pscan_rep_mode	= info->pscan_rep_mode;
3219 			data.pscan_period_mode	= info->pscan_period_mode;
3220 			data.pscan_mode		= info->pscan_mode;
3221 			memcpy(data.dev_class, info->dev_class, 3);
3222 			data.clock_offset	= info->clock_offset;
3223 			data.rssi		= info->rssi;
3224 			data.ssp_mode		= 0x00;
3225 
3226 			name_known = hci_inquiry_cache_update(hdev, &data,
3227 							      false, &ssp);
3228 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3229 					  info->dev_class, info->rssi,
3230 					  !name_known, ssp, NULL, 0, NULL, 0);
3231 		}
3232 	} else {
3233 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3234 
3235 		for (; num_rsp; num_rsp--, info++) {
3236 			bacpy(&data.bdaddr, &info->bdaddr);
3237 			data.pscan_rep_mode	= info->pscan_rep_mode;
3238 			data.pscan_period_mode	= info->pscan_period_mode;
3239 			data.pscan_mode		= 0x00;
3240 			memcpy(data.dev_class, info->dev_class, 3);
3241 			data.clock_offset	= info->clock_offset;
3242 			data.rssi		= info->rssi;
3243 			data.ssp_mode		= 0x00;
3244 			name_known = hci_inquiry_cache_update(hdev, &data,
3245 							      false, &ssp);
3246 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3247 					  info->dev_class, info->rssi,
3248 					  !name_known, ssp, NULL, 0, NULL, 0);
3249 		}
3250 	}
3251 
3252 	hci_dev_unlock(hdev);
3253 }
3254 
3255 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3256 					struct sk_buff *skb)
3257 {
3258 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3259 	struct hci_conn *conn;
3260 
3261 	BT_DBG("%s", hdev->name);
3262 
3263 	hci_dev_lock(hdev);
3264 
3265 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3266 	if (!conn)
3267 		goto unlock;
3268 
3269 	if (ev->page < HCI_MAX_PAGES)
3270 		memcpy(conn->features[ev->page], ev->features, 8);
3271 
3272 	if (!ev->status && ev->page == 0x01) {
3273 		struct inquiry_entry *ie;
3274 
3275 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3276 		if (ie)
3277 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3278 
3279 		if (ev->features[0] & LMP_HOST_SSP) {
3280 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3281 		} else {
3282 			/* It is mandatory by the Bluetooth specification that
3283 			 * Extended Inquiry Results are only used when Secure
3284 			 * Simple Pairing is enabled, but some devices violate
3285 			 * this.
3286 			 *
3287 			 * To make these devices work, the internal SSP
3288 			 * enabled flag needs to be cleared if the remote host
3289 			 * features do not indicate SSP support */
3290 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3291 		}
3292 
3293 		if (ev->features[0] & LMP_HOST_SC)
3294 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3295 	}
3296 
3297 	if (conn->state != BT_CONFIG)
3298 		goto unlock;
3299 
3300 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3301 		struct hci_cp_remote_name_req cp;
3302 		memset(&cp, 0, sizeof(cp));
3303 		bacpy(&cp.bdaddr, &conn->dst);
3304 		cp.pscan_rep_mode = 0x02;
3305 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3306 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3307 		mgmt_device_connected(hdev, &conn->dst, conn->type,
3308 				      conn->dst_type, 0, NULL, 0,
3309 				      conn->dev_class);
3310 
3311 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3312 		conn->state = BT_CONNECTED;
3313 		hci_proto_connect_cfm(conn, ev->status);
3314 		hci_conn_drop(conn);
3315 	}
3316 
3317 unlock:
3318 	hci_dev_unlock(hdev);
3319 }
3320 
3321 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3322 				       struct sk_buff *skb)
3323 {
3324 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3325 	struct hci_conn *conn;
3326 
3327 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3328 
3329 	hci_dev_lock(hdev);
3330 
3331 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3332 	if (!conn) {
3333 		if (ev->link_type == ESCO_LINK)
3334 			goto unlock;
3335 
3336 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3337 		if (!conn)
3338 			goto unlock;
3339 
3340 		conn->type = SCO_LINK;
3341 	}
3342 
3343 	switch (ev->status) {
3344 	case 0x00:
3345 		conn->handle = __le16_to_cpu(ev->handle);
3346 		conn->state  = BT_CONNECTED;
3347 
3348 		hci_conn_add_sysfs(conn);
3349 		break;
3350 
3351 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3352 	case 0x11:	/* Unsupported Feature or Parameter Value */
3353 	case 0x1c:	/* SCO interval rejected */
3354 	case 0x1a:	/* Unsupported Remote Feature */
3355 	case 0x1f:	/* Unspecified error */
3356 	case 0x20:	/* Unsupported LMP Parameter value */
3357 		if (conn->out) {
3358 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3359 					(hdev->esco_type & EDR_ESCO_MASK);
3360 			if (hci_setup_sync(conn, conn->link->handle))
3361 				goto unlock;
3362 		}
3363 		/* fall through */
3364 
3365 	default:
3366 		conn->state = BT_CLOSED;
3367 		break;
3368 	}
3369 
3370 	hci_proto_connect_cfm(conn, ev->status);
3371 	if (ev->status)
3372 		hci_conn_del(conn);
3373 
3374 unlock:
3375 	hci_dev_unlock(hdev);
3376 }
3377 
3378 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3379 {
3380 	size_t parsed = 0;
3381 
3382 	while (parsed < eir_len) {
3383 		u8 field_len = eir[0];
3384 
3385 		if (field_len == 0)
3386 			return parsed;
3387 
3388 		parsed += field_len + 1;
3389 		eir += field_len + 1;
3390 	}
3391 
3392 	return eir_len;
3393 }
3394 
3395 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3396 					    struct sk_buff *skb)
3397 {
3398 	struct inquiry_data data;
3399 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3400 	int num_rsp = *((__u8 *) skb->data);
3401 	size_t eir_len;
3402 
3403 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3404 
3405 	if (!num_rsp)
3406 		return;
3407 
3408 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3409 		return;
3410 
3411 	hci_dev_lock(hdev);
3412 
3413 	for (; num_rsp; num_rsp--, info++) {
3414 		bool name_known, ssp;
3415 
3416 		bacpy(&data.bdaddr, &info->bdaddr);
3417 		data.pscan_rep_mode	= info->pscan_rep_mode;
3418 		data.pscan_period_mode	= info->pscan_period_mode;
3419 		data.pscan_mode		= 0x00;
3420 		memcpy(data.dev_class, info->dev_class, 3);
3421 		data.clock_offset	= info->clock_offset;
3422 		data.rssi		= info->rssi;
3423 		data.ssp_mode		= 0x01;
3424 
3425 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3426 			name_known = eir_has_data_type(info->data,
3427 						       sizeof(info->data),
3428 						       EIR_NAME_COMPLETE);
3429 		else
3430 			name_known = true;
3431 
3432 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3433 						      &ssp);
3434 		eir_len = eir_get_length(info->data, sizeof(info->data));
3435 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3436 				  info->dev_class, info->rssi, !name_known,
3437 				  ssp, info->data, eir_len, NULL, 0);
3438 	}
3439 
3440 	hci_dev_unlock(hdev);
3441 }
3442 
3443 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3444 					 struct sk_buff *skb)
3445 {
3446 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3447 	struct hci_conn *conn;
3448 
3449 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3450 	       __le16_to_cpu(ev->handle));
3451 
3452 	hci_dev_lock(hdev);
3453 
3454 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3455 	if (!conn)
3456 		goto unlock;
3457 
3458 	/* For BR/EDR the necessary steps are taken through the
3459 	 * auth_complete event.
3460 	 */
3461 	if (conn->type != LE_LINK)
3462 		goto unlock;
3463 
3464 	if (!ev->status)
3465 		conn->sec_level = conn->pending_sec_level;
3466 
3467 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3468 
3469 	if (ev->status && conn->state == BT_CONNECTED) {
3470 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3471 		hci_conn_drop(conn);
3472 		goto unlock;
3473 	}
3474 
3475 	if (conn->state == BT_CONFIG) {
3476 		if (!ev->status)
3477 			conn->state = BT_CONNECTED;
3478 
3479 		hci_proto_connect_cfm(conn, ev->status);
3480 		hci_conn_drop(conn);
3481 	} else {
3482 		hci_auth_cfm(conn, ev->status);
3483 
3484 		hci_conn_hold(conn);
3485 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3486 		hci_conn_drop(conn);
3487 	}
3488 
3489 unlock:
3490 	hci_dev_unlock(hdev);
3491 }
3492 
3493 static u8 hci_get_auth_req(struct hci_conn *conn)
3494 {
3495 	/* If remote requests no-bonding follow that lead */
3496 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3497 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3498 		return conn->remote_auth | (conn->auth_type & 0x01);
3499 
3500 	/* If both remote and local have enough IO capabilities, require
3501 	 * MITM protection
3502 	 */
3503 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3504 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3505 		return conn->remote_auth | 0x01;
3506 
3507 	/* No MITM protection possible so ignore remote requirement */
3508 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3509 }
3510 
3511 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3512 {
3513 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3514 	struct hci_conn *conn;
3515 
3516 	BT_DBG("%s", hdev->name);
3517 
3518 	hci_dev_lock(hdev);
3519 
3520 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3521 	if (!conn)
3522 		goto unlock;
3523 
3524 	hci_conn_hold(conn);
3525 
3526 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3527 		goto unlock;
3528 
3529 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3530 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3531 		struct hci_cp_io_capability_reply cp;
3532 
3533 		bacpy(&cp.bdaddr, &ev->bdaddr);
3534 		/* Change the IO capability from KeyboardDisplay
3535 		 * to DisplayYesNo as it is not supported by BT spec. */
3536 		cp.capability = (conn->io_capability == 0x04) ?
3537 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3538 
3539 		/* If we are initiators, there is no remote information yet */
3540 		if (conn->remote_auth == 0xff) {
3541 			cp.authentication = conn->auth_type;
3542 
3543 			/* Request MITM protection if our IO caps allow it
3544 			 * except for the no-bonding case.
3545 			 * conn->auth_type is not updated here since
3546 			 * that might cause the user confirmation to be
3547 			 * rejected in case the remote doesn't have the
3548 			 * IO capabilities for MITM.
3549 			 */
3550 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3551 			    cp.authentication != HCI_AT_NO_BONDING)
3552 				cp.authentication |= 0x01;
3553 		} else {
3554 			conn->auth_type = hci_get_auth_req(conn);
3555 			cp.authentication = conn->auth_type;
3556 		}
3557 
3558 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3559 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3560 			cp.oob_data = 0x01;
3561 		else
3562 			cp.oob_data = 0x00;
3563 
3564 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3565 			     sizeof(cp), &cp);
3566 	} else {
3567 		struct hci_cp_io_capability_neg_reply cp;
3568 
3569 		bacpy(&cp.bdaddr, &ev->bdaddr);
3570 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3571 
3572 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3573 			     sizeof(cp), &cp);
3574 	}
3575 
3576 unlock:
3577 	hci_dev_unlock(hdev);
3578 }
3579 
3580 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3581 {
3582 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3583 	struct hci_conn *conn;
3584 
3585 	BT_DBG("%s", hdev->name);
3586 
3587 	hci_dev_lock(hdev);
3588 
3589 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3590 	if (!conn)
3591 		goto unlock;
3592 
3593 	conn->remote_cap = ev->capability;
3594 	conn->remote_auth = ev->authentication;
3595 	if (ev->oob_data)
3596 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3597 
3598 unlock:
3599 	hci_dev_unlock(hdev);
3600 }
3601 
3602 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3603 					 struct sk_buff *skb)
3604 {
3605 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3606 	int loc_mitm, rem_mitm, confirm_hint = 0;
3607 	struct hci_conn *conn;
3608 
3609 	BT_DBG("%s", hdev->name);
3610 
3611 	hci_dev_lock(hdev);
3612 
3613 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3614 		goto unlock;
3615 
3616 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3617 	if (!conn)
3618 		goto unlock;
3619 
3620 	loc_mitm = (conn->auth_type & 0x01);
3621 	rem_mitm = (conn->remote_auth & 0x01);
3622 
3623 	/* If we require MITM but the remote device can't provide that
3624 	 * (it has NoInputNoOutput) then reject the confirmation request
3625 	 */
3626 	if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3627 		BT_DBG("Rejecting request: remote device can't provide MITM");
3628 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3629 			     sizeof(ev->bdaddr), &ev->bdaddr);
3630 		goto unlock;
3631 	}
3632 
3633 	/* If no side requires MITM protection; auto-accept */
3634 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3635 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3636 
3637 		/* If we're not the initiators request authorization to
3638 		 * proceed from user space (mgmt_user_confirm with
3639 		 * confirm_hint set to 1). The exception is if neither
3640 		 * side had MITM in which case we do auto-accept.
3641 		 */
3642 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3643 		    (loc_mitm || rem_mitm)) {
3644 			BT_DBG("Confirming auto-accept as acceptor");
3645 			confirm_hint = 1;
3646 			goto confirm;
3647 		}
3648 
3649 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3650 		       hdev->auto_accept_delay);
3651 
3652 		if (hdev->auto_accept_delay > 0) {
3653 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3654 			queue_delayed_work(conn->hdev->workqueue,
3655 					   &conn->auto_accept_work, delay);
3656 			goto unlock;
3657 		}
3658 
3659 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3660 			     sizeof(ev->bdaddr), &ev->bdaddr);
3661 		goto unlock;
3662 	}
3663 
3664 confirm:
3665 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3666 				  le32_to_cpu(ev->passkey), confirm_hint);
3667 
3668 unlock:
3669 	hci_dev_unlock(hdev);
3670 }
3671 
3672 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3673 					 struct sk_buff *skb)
3674 {
3675 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3676 
3677 	BT_DBG("%s", hdev->name);
3678 
3679 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3680 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3681 }
3682 
3683 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3684 					struct sk_buff *skb)
3685 {
3686 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3687 	struct hci_conn *conn;
3688 
3689 	BT_DBG("%s", hdev->name);
3690 
3691 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3692 	if (!conn)
3693 		return;
3694 
3695 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3696 	conn->passkey_entered = 0;
3697 
3698 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3699 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3700 					 conn->dst_type, conn->passkey_notify,
3701 					 conn->passkey_entered);
3702 }
3703 
3704 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3705 {
3706 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3707 	struct hci_conn *conn;
3708 
3709 	BT_DBG("%s", hdev->name);
3710 
3711 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3712 	if (!conn)
3713 		return;
3714 
3715 	switch (ev->type) {
3716 	case HCI_KEYPRESS_STARTED:
3717 		conn->passkey_entered = 0;
3718 		return;
3719 
3720 	case HCI_KEYPRESS_ENTERED:
3721 		conn->passkey_entered++;
3722 		break;
3723 
3724 	case HCI_KEYPRESS_ERASED:
3725 		conn->passkey_entered--;
3726 		break;
3727 
3728 	case HCI_KEYPRESS_CLEARED:
3729 		conn->passkey_entered = 0;
3730 		break;
3731 
3732 	case HCI_KEYPRESS_COMPLETED:
3733 		return;
3734 	}
3735 
3736 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3737 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3738 					 conn->dst_type, conn->passkey_notify,
3739 					 conn->passkey_entered);
3740 }
3741 
3742 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3743 					 struct sk_buff *skb)
3744 {
3745 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3746 	struct hci_conn *conn;
3747 
3748 	BT_DBG("%s", hdev->name);
3749 
3750 	hci_dev_lock(hdev);
3751 
3752 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3753 	if (!conn)
3754 		goto unlock;
3755 
3756 	/* To avoid duplicate auth_failed events to user space we check
3757 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3758 	 * initiated the authentication. A traditional auth_complete
3759 	 * event gets always produced as initiator and is also mapped to
3760 	 * the mgmt_auth_failed event */
3761 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3762 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3763 				 ev->status);
3764 
3765 	hci_conn_drop(conn);
3766 
3767 unlock:
3768 	hci_dev_unlock(hdev);
3769 }
3770 
3771 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3772 					 struct sk_buff *skb)
3773 {
3774 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3775 	struct inquiry_entry *ie;
3776 	struct hci_conn *conn;
3777 
3778 	BT_DBG("%s", hdev->name);
3779 
3780 	hci_dev_lock(hdev);
3781 
3782 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3783 	if (conn)
3784 		memcpy(conn->features[1], ev->features, 8);
3785 
3786 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3787 	if (ie)
3788 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3789 
3790 	hci_dev_unlock(hdev);
3791 }
3792 
3793 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3794 					    struct sk_buff *skb)
3795 {
3796 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3797 	struct oob_data *data;
3798 
3799 	BT_DBG("%s", hdev->name);
3800 
3801 	hci_dev_lock(hdev);
3802 
3803 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3804 		goto unlock;
3805 
3806 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3807 	if (data) {
3808 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3809 			struct hci_cp_remote_oob_ext_data_reply cp;
3810 
3811 			bacpy(&cp.bdaddr, &ev->bdaddr);
3812 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3813 			memcpy(cp.randomizer192, data->randomizer192,
3814 			       sizeof(cp.randomizer192));
3815 			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3816 			memcpy(cp.randomizer256, data->randomizer256,
3817 			       sizeof(cp.randomizer256));
3818 
3819 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3820 				     sizeof(cp), &cp);
3821 		} else {
3822 			struct hci_cp_remote_oob_data_reply cp;
3823 
3824 			bacpy(&cp.bdaddr, &ev->bdaddr);
3825 			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3826 			memcpy(cp.randomizer, data->randomizer192,
3827 			       sizeof(cp.randomizer));
3828 
3829 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3830 				     sizeof(cp), &cp);
3831 		}
3832 	} else {
3833 		struct hci_cp_remote_oob_data_neg_reply cp;
3834 
3835 		bacpy(&cp.bdaddr, &ev->bdaddr);
3836 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3837 			     sizeof(cp), &cp);
3838 	}
3839 
3840 unlock:
3841 	hci_dev_unlock(hdev);
3842 }
3843 
3844 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3845 				      struct sk_buff *skb)
3846 {
3847 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3848 	struct hci_conn *hcon, *bredr_hcon;
3849 
3850 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3851 	       ev->status);
3852 
3853 	hci_dev_lock(hdev);
3854 
3855 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3856 	if (!hcon) {
3857 		hci_dev_unlock(hdev);
3858 		return;
3859 	}
3860 
3861 	if (ev->status) {
3862 		hci_conn_del(hcon);
3863 		hci_dev_unlock(hdev);
3864 		return;
3865 	}
3866 
3867 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3868 
3869 	hcon->state = BT_CONNECTED;
3870 	bacpy(&hcon->dst, &bredr_hcon->dst);
3871 
3872 	hci_conn_hold(hcon);
3873 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3874 	hci_conn_drop(hcon);
3875 
3876 	hci_conn_add_sysfs(hcon);
3877 
3878 	amp_physical_cfm(bredr_hcon, hcon);
3879 
3880 	hci_dev_unlock(hdev);
3881 }
3882 
3883 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3884 {
3885 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3886 	struct hci_conn *hcon;
3887 	struct hci_chan *hchan;
3888 	struct amp_mgr *mgr;
3889 
3890 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3891 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3892 	       ev->status);
3893 
3894 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3895 	if (!hcon)
3896 		return;
3897 
3898 	/* Create AMP hchan */
3899 	hchan = hci_chan_create(hcon);
3900 	if (!hchan)
3901 		return;
3902 
3903 	hchan->handle = le16_to_cpu(ev->handle);
3904 
3905 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3906 
3907 	mgr = hcon->amp_mgr;
3908 	if (mgr && mgr->bredr_chan) {
3909 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3910 
3911 		l2cap_chan_lock(bredr_chan);
3912 
3913 		bredr_chan->conn->mtu = hdev->block_mtu;
3914 		l2cap_logical_cfm(bredr_chan, hchan, 0);
3915 		hci_conn_hold(hcon);
3916 
3917 		l2cap_chan_unlock(bredr_chan);
3918 	}
3919 }
3920 
3921 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3922 					     struct sk_buff *skb)
3923 {
3924 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3925 	struct hci_chan *hchan;
3926 
3927 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3928 	       le16_to_cpu(ev->handle), ev->status);
3929 
3930 	if (ev->status)
3931 		return;
3932 
3933 	hci_dev_lock(hdev);
3934 
3935 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3936 	if (!hchan)
3937 		goto unlock;
3938 
3939 	amp_destroy_logical_link(hchan, ev->reason);
3940 
3941 unlock:
3942 	hci_dev_unlock(hdev);
3943 }
3944 
3945 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3946 					     struct sk_buff *skb)
3947 {
3948 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3949 	struct hci_conn *hcon;
3950 
3951 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3952 
3953 	if (ev->status)
3954 		return;
3955 
3956 	hci_dev_lock(hdev);
3957 
3958 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3959 	if (hcon) {
3960 		hcon->state = BT_CLOSED;
3961 		hci_conn_del(hcon);
3962 	}
3963 
3964 	hci_dev_unlock(hdev);
3965 }
3966 
3967 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3968 {
3969 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3970 	struct hci_conn *conn;
3971 	struct smp_irk *irk;
3972 
3973 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3974 
3975 	hci_dev_lock(hdev);
3976 
3977 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3978 	if (!conn) {
3979 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3980 		if (!conn) {
3981 			BT_ERR("No memory for new connection");
3982 			goto unlock;
3983 		}
3984 
3985 		conn->dst_type = ev->bdaddr_type;
3986 
3987 		if (ev->role == LE_CONN_ROLE_MASTER) {
3988 			conn->out = true;
3989 			conn->link_mode |= HCI_LM_MASTER;
3990 		}
3991 
3992 		/* If we didn't have a hci_conn object previously
3993 		 * but we're in master role this must be something
3994 		 * initiated using a white list. Since white list based
3995 		 * connections are not "first class citizens" we don't
3996 		 * have full tracking of them. Therefore, we go ahead
3997 		 * with a "best effort" approach of determining the
3998 		 * initiator address based on the HCI_PRIVACY flag.
3999 		 */
4000 		if (conn->out) {
4001 			conn->resp_addr_type = ev->bdaddr_type;
4002 			bacpy(&conn->resp_addr, &ev->bdaddr);
4003 			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4004 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4005 				bacpy(&conn->init_addr, &hdev->rpa);
4006 			} else {
4007 				hci_copy_identity_address(hdev,
4008 							  &conn->init_addr,
4009 							  &conn->init_addr_type);
4010 			}
4011 		}
4012 	} else {
4013 		cancel_delayed_work(&conn->le_conn_timeout);
4014 	}
4015 
4016 	if (!conn->out) {
4017 		/* Set the responder (our side) address type based on
4018 		 * the advertising address type.
4019 		 */
4020 		conn->resp_addr_type = hdev->adv_addr_type;
4021 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4022 			bacpy(&conn->resp_addr, &hdev->random_addr);
4023 		else
4024 			bacpy(&conn->resp_addr, &hdev->bdaddr);
4025 
4026 		conn->init_addr_type = ev->bdaddr_type;
4027 		bacpy(&conn->init_addr, &ev->bdaddr);
4028 	}
4029 
4030 	/* Lookup the identity address from the stored connection
4031 	 * address and address type.
4032 	 *
4033 	 * When establishing connections to an identity address, the
4034 	 * connection procedure will store the resolvable random
4035 	 * address first. Now if it can be converted back into the
4036 	 * identity address, start using the identity address from
4037 	 * now on.
4038 	 */
4039 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4040 	if (irk) {
4041 		bacpy(&conn->dst, &irk->bdaddr);
4042 		conn->dst_type = irk->addr_type;
4043 	}
4044 
4045 	if (ev->status) {
4046 		hci_le_conn_failed(conn, ev->status);
4047 		goto unlock;
4048 	}
4049 
4050 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4051 		mgmt_device_connected(hdev, &conn->dst, conn->type,
4052 				      conn->dst_type, 0, NULL, 0, NULL);
4053 
4054 	conn->sec_level = BT_SECURITY_LOW;
4055 	conn->handle = __le16_to_cpu(ev->handle);
4056 	conn->state = BT_CONNECTED;
4057 
4058 	if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
4059 		set_bit(HCI_CONN_6LOWPAN, &conn->flags);
4060 
4061 	hci_conn_add_sysfs(conn);
4062 
4063 	hci_proto_connect_cfm(conn, ev->status);
4064 
4065 	hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4066 
4067 unlock:
4068 	hci_dev_unlock(hdev);
4069 }
4070 
4071 /* This function requires the caller holds hdev->lock */
4072 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4073 				  u8 addr_type)
4074 {
4075 	struct hci_conn *conn;
4076 	struct smp_irk *irk;
4077 
4078 	/* If this is a resolvable address, we should resolve it and then
4079 	 * update address and address type variables.
4080 	 */
4081 	irk = hci_get_irk(hdev, addr, addr_type);
4082 	if (irk) {
4083 		addr = &irk->bdaddr;
4084 		addr_type = irk->addr_type;
4085 	}
4086 
4087 	if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4088 		return;
4089 
4090 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4091 			      HCI_AT_NO_BONDING);
4092 	if (!IS_ERR(conn))
4093 		return;
4094 
4095 	switch (PTR_ERR(conn)) {
4096 	case -EBUSY:
4097 		/* If hci_connect() returns -EBUSY it means there is already
4098 		 * an LE connection attempt going on. Since controllers don't
4099 		 * support more than one connection attempt at the time, we
4100 		 * don't consider this an error case.
4101 		 */
4102 		break;
4103 	default:
4104 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4105 	}
4106 }
4107 
4108 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4109 			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4110 {
4111 	struct discovery_state *d = &hdev->discovery;
4112 	bool match;
4113 
4114 	/* Passive scanning shouldn't trigger any device found events */
4115 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4116 		if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4117 			check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4118 		return;
4119 	}
4120 
4121 	/* If there's nothing pending either store the data from this
4122 	 * event or send an immediate device found event if the data
4123 	 * should not be stored for later.
4124 	 */
4125 	if (!has_pending_adv_report(hdev)) {
4126 		/* If the report will trigger a SCAN_REQ store it for
4127 		 * later merging.
4128 		 */
4129 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4130 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4131 						 rssi, data, len);
4132 			return;
4133 		}
4134 
4135 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4136 				  rssi, 0, 1, data, len, NULL, 0);
4137 		return;
4138 	}
4139 
4140 	/* Check if the pending report is for the same device as the new one */
4141 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4142 		 bdaddr_type == d->last_adv_addr_type);
4143 
4144 	/* If the pending data doesn't match this report or this isn't a
4145 	 * scan response (e.g. we got a duplicate ADV_IND) then force
4146 	 * sending of the pending data.
4147 	 */
4148 	if (type != LE_ADV_SCAN_RSP || !match) {
4149 		/* Send out whatever is in the cache, but skip duplicates */
4150 		if (!match)
4151 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4152 					  d->last_adv_addr_type, NULL,
4153 					  d->last_adv_rssi, 0, 1,
4154 					  d->last_adv_data,
4155 					  d->last_adv_data_len, NULL, 0);
4156 
4157 		/* If the new report will trigger a SCAN_REQ store it for
4158 		 * later merging.
4159 		 */
4160 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4161 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4162 						 rssi, data, len);
4163 			return;
4164 		}
4165 
4166 		/* The advertising reports cannot be merged, so clear
4167 		 * the pending report and send out a device found event.
4168 		 */
4169 		clear_pending_adv_report(hdev);
4170 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4171 				  rssi, 0, 1, data, len, NULL, 0);
4172 		return;
4173 	}
4174 
4175 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4176 	 * the new event is a SCAN_RSP. We can therefore proceed with
4177 	 * sending a merged device found event.
4178 	 */
4179 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4180 			  d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4181 			  d->last_adv_data, d->last_adv_data_len);
4182 	clear_pending_adv_report(hdev);
4183 }
4184 
4185 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4186 {
4187 	u8 num_reports = skb->data[0];
4188 	void *ptr = &skb->data[1];
4189 
4190 	hci_dev_lock(hdev);
4191 
4192 	while (num_reports--) {
4193 		struct hci_ev_le_advertising_info *ev = ptr;
4194 		s8 rssi;
4195 
4196 		rssi = ev->data[ev->length];
4197 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4198 				   ev->bdaddr_type, rssi, ev->data, ev->length);
4199 
4200 		ptr += sizeof(*ev) + ev->length + 1;
4201 	}
4202 
4203 	hci_dev_unlock(hdev);
4204 }
4205 
4206 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4207 {
4208 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4209 	struct hci_cp_le_ltk_reply cp;
4210 	struct hci_cp_le_ltk_neg_reply neg;
4211 	struct hci_conn *conn;
4212 	struct smp_ltk *ltk;
4213 
4214 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4215 
4216 	hci_dev_lock(hdev);
4217 
4218 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4219 	if (conn == NULL)
4220 		goto not_found;
4221 
4222 	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4223 	if (ltk == NULL)
4224 		goto not_found;
4225 
4226 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4227 	cp.handle = cpu_to_le16(conn->handle);
4228 
4229 	if (ltk->authenticated)
4230 		conn->pending_sec_level = BT_SECURITY_HIGH;
4231 	else
4232 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4233 
4234 	conn->enc_key_size = ltk->enc_size;
4235 
4236 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4237 
4238 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4239 	 * temporary key used to encrypt a connection following
4240 	 * pairing. It is used during the Encrypted Session Setup to
4241 	 * distribute the keys. Later, security can be re-established
4242 	 * using a distributed LTK.
4243 	 */
4244 	if (ltk->type == HCI_SMP_STK_SLAVE) {
4245 		list_del(&ltk->list);
4246 		kfree(ltk);
4247 	}
4248 
4249 	hci_dev_unlock(hdev);
4250 
4251 	return;
4252 
4253 not_found:
4254 	neg.handle = ev->handle;
4255 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4256 	hci_dev_unlock(hdev);
4257 }
4258 
4259 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4260 {
4261 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4262 
4263 	skb_pull(skb, sizeof(*le_ev));
4264 
4265 	switch (le_ev->subevent) {
4266 	case HCI_EV_LE_CONN_COMPLETE:
4267 		hci_le_conn_complete_evt(hdev, skb);
4268 		break;
4269 
4270 	case HCI_EV_LE_ADVERTISING_REPORT:
4271 		hci_le_adv_report_evt(hdev, skb);
4272 		break;
4273 
4274 	case HCI_EV_LE_LTK_REQ:
4275 		hci_le_ltk_request_evt(hdev, skb);
4276 		break;
4277 
4278 	default:
4279 		break;
4280 	}
4281 }
4282 
4283 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4284 {
4285 	struct hci_ev_channel_selected *ev = (void *) skb->data;
4286 	struct hci_conn *hcon;
4287 
4288 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4289 
4290 	skb_pull(skb, sizeof(*ev));
4291 
4292 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4293 	if (!hcon)
4294 		return;
4295 
4296 	amp_read_loc_assoc_final_data(hdev, hcon);
4297 }
4298 
4299 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4300 {
4301 	struct hci_event_hdr *hdr = (void *) skb->data;
4302 	__u8 event = hdr->evt;
4303 
4304 	hci_dev_lock(hdev);
4305 
4306 	/* Received events are (currently) only needed when a request is
4307 	 * ongoing so avoid unnecessary memory allocation.
4308 	 */
4309 	if (hdev->req_status == HCI_REQ_PEND) {
4310 		kfree_skb(hdev->recv_evt);
4311 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4312 	}
4313 
4314 	hci_dev_unlock(hdev);
4315 
4316 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4317 
4318 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4319 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4320 		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4321 
4322 		hci_req_cmd_complete(hdev, opcode, 0);
4323 	}
4324 
4325 	switch (event) {
4326 	case HCI_EV_INQUIRY_COMPLETE:
4327 		hci_inquiry_complete_evt(hdev, skb);
4328 		break;
4329 
4330 	case HCI_EV_INQUIRY_RESULT:
4331 		hci_inquiry_result_evt(hdev, skb);
4332 		break;
4333 
4334 	case HCI_EV_CONN_COMPLETE:
4335 		hci_conn_complete_evt(hdev, skb);
4336 		break;
4337 
4338 	case HCI_EV_CONN_REQUEST:
4339 		hci_conn_request_evt(hdev, skb);
4340 		break;
4341 
4342 	case HCI_EV_DISCONN_COMPLETE:
4343 		hci_disconn_complete_evt(hdev, skb);
4344 		break;
4345 
4346 	case HCI_EV_AUTH_COMPLETE:
4347 		hci_auth_complete_evt(hdev, skb);
4348 		break;
4349 
4350 	case HCI_EV_REMOTE_NAME:
4351 		hci_remote_name_evt(hdev, skb);
4352 		break;
4353 
4354 	case HCI_EV_ENCRYPT_CHANGE:
4355 		hci_encrypt_change_evt(hdev, skb);
4356 		break;
4357 
4358 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4359 		hci_change_link_key_complete_evt(hdev, skb);
4360 		break;
4361 
4362 	case HCI_EV_REMOTE_FEATURES:
4363 		hci_remote_features_evt(hdev, skb);
4364 		break;
4365 
4366 	case HCI_EV_CMD_COMPLETE:
4367 		hci_cmd_complete_evt(hdev, skb);
4368 		break;
4369 
4370 	case HCI_EV_CMD_STATUS:
4371 		hci_cmd_status_evt(hdev, skb);
4372 		break;
4373 
4374 	case HCI_EV_ROLE_CHANGE:
4375 		hci_role_change_evt(hdev, skb);
4376 		break;
4377 
4378 	case HCI_EV_NUM_COMP_PKTS:
4379 		hci_num_comp_pkts_evt(hdev, skb);
4380 		break;
4381 
4382 	case HCI_EV_MODE_CHANGE:
4383 		hci_mode_change_evt(hdev, skb);
4384 		break;
4385 
4386 	case HCI_EV_PIN_CODE_REQ:
4387 		hci_pin_code_request_evt(hdev, skb);
4388 		break;
4389 
4390 	case HCI_EV_LINK_KEY_REQ:
4391 		hci_link_key_request_evt(hdev, skb);
4392 		break;
4393 
4394 	case HCI_EV_LINK_KEY_NOTIFY:
4395 		hci_link_key_notify_evt(hdev, skb);
4396 		break;
4397 
4398 	case HCI_EV_CLOCK_OFFSET:
4399 		hci_clock_offset_evt(hdev, skb);
4400 		break;
4401 
4402 	case HCI_EV_PKT_TYPE_CHANGE:
4403 		hci_pkt_type_change_evt(hdev, skb);
4404 		break;
4405 
4406 	case HCI_EV_PSCAN_REP_MODE:
4407 		hci_pscan_rep_mode_evt(hdev, skb);
4408 		break;
4409 
4410 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4411 		hci_inquiry_result_with_rssi_evt(hdev, skb);
4412 		break;
4413 
4414 	case HCI_EV_REMOTE_EXT_FEATURES:
4415 		hci_remote_ext_features_evt(hdev, skb);
4416 		break;
4417 
4418 	case HCI_EV_SYNC_CONN_COMPLETE:
4419 		hci_sync_conn_complete_evt(hdev, skb);
4420 		break;
4421 
4422 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4423 		hci_extended_inquiry_result_evt(hdev, skb);
4424 		break;
4425 
4426 	case HCI_EV_KEY_REFRESH_COMPLETE:
4427 		hci_key_refresh_complete_evt(hdev, skb);
4428 		break;
4429 
4430 	case HCI_EV_IO_CAPA_REQUEST:
4431 		hci_io_capa_request_evt(hdev, skb);
4432 		break;
4433 
4434 	case HCI_EV_IO_CAPA_REPLY:
4435 		hci_io_capa_reply_evt(hdev, skb);
4436 		break;
4437 
4438 	case HCI_EV_USER_CONFIRM_REQUEST:
4439 		hci_user_confirm_request_evt(hdev, skb);
4440 		break;
4441 
4442 	case HCI_EV_USER_PASSKEY_REQUEST:
4443 		hci_user_passkey_request_evt(hdev, skb);
4444 		break;
4445 
4446 	case HCI_EV_USER_PASSKEY_NOTIFY:
4447 		hci_user_passkey_notify_evt(hdev, skb);
4448 		break;
4449 
4450 	case HCI_EV_KEYPRESS_NOTIFY:
4451 		hci_keypress_notify_evt(hdev, skb);
4452 		break;
4453 
4454 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4455 		hci_simple_pair_complete_evt(hdev, skb);
4456 		break;
4457 
4458 	case HCI_EV_REMOTE_HOST_FEATURES:
4459 		hci_remote_host_features_evt(hdev, skb);
4460 		break;
4461 
4462 	case HCI_EV_LE_META:
4463 		hci_le_meta_evt(hdev, skb);
4464 		break;
4465 
4466 	case HCI_EV_CHANNEL_SELECTED:
4467 		hci_chan_selected_evt(hdev, skb);
4468 		break;
4469 
4470 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4471 		hci_remote_oob_data_request_evt(hdev, skb);
4472 		break;
4473 
4474 	case HCI_EV_PHY_LINK_COMPLETE:
4475 		hci_phy_link_complete_evt(hdev, skb);
4476 		break;
4477 
4478 	case HCI_EV_LOGICAL_LINK_COMPLETE:
4479 		hci_loglink_complete_evt(hdev, skb);
4480 		break;
4481 
4482 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4483 		hci_disconn_loglink_complete_evt(hdev, skb);
4484 		break;
4485 
4486 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4487 		hci_disconn_phylink_complete_evt(hdev, skb);
4488 		break;
4489 
4490 	case HCI_EV_NUM_COMP_BLOCKS:
4491 		hci_num_comp_blocks_evt(hdev, skb);
4492 		break;
4493 
4494 	default:
4495 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4496 		break;
4497 	}
4498 
4499 	kfree_skb(skb);
4500 	hdev->stat.evt_rx++;
4501 }
4502