xref: /openbmc/linux/net/bluetooth/hci_event.c (revision f35e839a)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34 
35 /* Handle HCI Event packets */
36 
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 	__u8 status = *((__u8 *) skb->data);
40 
41 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
42 
43 	if (status) {
44 		hci_dev_lock(hdev);
45 		mgmt_stop_discovery_failed(hdev, status);
46 		hci_dev_unlock(hdev);
47 		return;
48 	}
49 
50 	clear_bit(HCI_INQUIRY, &hdev->flags);
51 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
52 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
53 
54 	hci_dev_lock(hdev);
55 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 	hci_dev_unlock(hdev);
57 
58 	hci_conn_check_pending(hdev);
59 }
60 
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 	__u8 status = *((__u8 *) skb->data);
64 
65 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
66 
67 	if (status)
68 		return;
69 
70 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72 
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 	__u8 status = *((__u8 *) skb->data);
76 
77 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 
79 	if (status)
80 		return;
81 
82 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83 
84 	hci_conn_check_pending(hdev);
85 }
86 
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 					  struct sk_buff *skb)
89 {
90 	BT_DBG("%s", hdev->name);
91 }
92 
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 	struct hci_rp_role_discovery *rp = (void *) skb->data;
96 	struct hci_conn *conn;
97 
98 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99 
100 	if (rp->status)
101 		return;
102 
103 	hci_dev_lock(hdev);
104 
105 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 	if (conn) {
107 		if (rp->role)
108 			conn->link_mode &= ~HCI_LM_MASTER;
109 		else
110 			conn->link_mode |= HCI_LM_MASTER;
111 	}
112 
113 	hci_dev_unlock(hdev);
114 }
115 
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 	struct hci_conn *conn;
120 
121 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122 
123 	if (rp->status)
124 		return;
125 
126 	hci_dev_lock(hdev);
127 
128 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 	if (conn)
130 		conn->link_policy = __le16_to_cpu(rp->policy);
131 
132 	hci_dev_unlock(hdev);
133 }
134 
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 	struct hci_conn *conn;
139 	void *sent;
140 
141 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
142 
143 	if (rp->status)
144 		return;
145 
146 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 	if (!sent)
148 		return;
149 
150 	hci_dev_lock(hdev);
151 
152 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 	if (conn)
154 		conn->link_policy = get_unaligned_le16(sent + 2);
155 
156 	hci_dev_unlock(hdev);
157 }
158 
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
160 					struct sk_buff *skb)
161 {
162 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
163 
164 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 
166 	if (rp->status)
167 		return;
168 
169 	hdev->link_policy = __le16_to_cpu(rp->policy);
170 }
171 
172 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
173 					 struct sk_buff *skb)
174 {
175 	__u8 status = *((__u8 *) skb->data);
176 	void *sent;
177 
178 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
179 
180 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 	if (!sent)
182 		return;
183 
184 	if (!status)
185 		hdev->link_policy = get_unaligned_le16(sent);
186 }
187 
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 	__u8 status = *((__u8 *) skb->data);
191 
192 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
193 
194 	clear_bit(HCI_RESET, &hdev->flags);
195 
196 	/* Reset all non-persistent flags */
197 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198 
199 	hdev->discovery.state = DISCOVERY_STOPPED;
200 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202 
203 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 	hdev->adv_data_len = 0;
205 }
206 
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 	__u8 status = *((__u8 *) skb->data);
210 	void *sent;
211 
212 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
213 
214 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 	if (!sent)
216 		return;
217 
218 	hci_dev_lock(hdev);
219 
220 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 		mgmt_set_local_name_complete(hdev, sent, status);
222 	else if (!status)
223 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224 
225 	hci_dev_unlock(hdev);
226 }
227 
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 	struct hci_rp_read_local_name *rp = (void *) skb->data;
231 
232 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233 
234 	if (rp->status)
235 		return;
236 
237 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240 
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 	__u8 status = *((__u8 *) skb->data);
244 	void *sent;
245 
246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
247 
248 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 	if (!sent)
250 		return;
251 
252 	if (!status) {
253 		__u8 param = *((__u8 *) sent);
254 
255 		if (param == AUTH_ENABLED)
256 			set_bit(HCI_AUTH, &hdev->flags);
257 		else
258 			clear_bit(HCI_AUTH, &hdev->flags);
259 	}
260 
261 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 		mgmt_auth_enable_complete(hdev, status);
263 }
264 
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 	__u8 status = *((__u8 *) skb->data);
268 	void *sent;
269 
270 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
271 
272 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 	if (!sent)
274 		return;
275 
276 	if (!status) {
277 		__u8 param = *((__u8 *) sent);
278 
279 		if (param)
280 			set_bit(HCI_ENCRYPT, &hdev->flags);
281 		else
282 			clear_bit(HCI_ENCRYPT, &hdev->flags);
283 	}
284 }
285 
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 	__u8 param, status = *((__u8 *) skb->data);
289 	int old_pscan, old_iscan;
290 	void *sent;
291 
292 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
293 
294 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 	if (!sent)
296 		return;
297 
298 	param = *((__u8 *) sent);
299 
300 	hci_dev_lock(hdev);
301 
302 	if (status) {
303 		mgmt_write_scan_failed(hdev, param, status);
304 		hdev->discov_timeout = 0;
305 		goto done;
306 	}
307 
308 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310 
311 	if (param & SCAN_INQUIRY) {
312 		set_bit(HCI_ISCAN, &hdev->flags);
313 		if (!old_iscan)
314 			mgmt_discoverable(hdev, 1);
315 		if (hdev->discov_timeout > 0) {
316 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 					   to);
319 		}
320 	} else if (old_iscan)
321 		mgmt_discoverable(hdev, 0);
322 
323 	if (param & SCAN_PAGE) {
324 		set_bit(HCI_PSCAN, &hdev->flags);
325 		if (!old_pscan)
326 			mgmt_connectable(hdev, 1);
327 	} else if (old_pscan)
328 		mgmt_connectable(hdev, 0);
329 
330 done:
331 	hci_dev_unlock(hdev);
332 }
333 
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337 
338 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339 
340 	if (rp->status)
341 		return;
342 
343 	memcpy(hdev->dev_class, rp->dev_class, 3);
344 
345 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348 
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	__u8 status = *((__u8 *) skb->data);
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 	if (!sent)
358 		return;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (status == 0)
363 		memcpy(hdev->dev_class, sent, 3);
364 
365 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 		mgmt_set_class_of_dev_complete(hdev, sent, status);
367 
368 	hci_dev_unlock(hdev);
369 }
370 
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 	__u16 setting;
375 
376 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377 
378 	if (rp->status)
379 		return;
380 
381 	setting = __le16_to_cpu(rp->voice_setting);
382 
383 	if (hdev->voice_setting == setting)
384 		return;
385 
386 	hdev->voice_setting = setting;
387 
388 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389 
390 	if (hdev->notify)
391 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393 
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 				       struct sk_buff *skb)
396 {
397 	__u8 status = *((__u8 *) skb->data);
398 	__u16 setting;
399 	void *sent;
400 
401 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
402 
403 	if (status)
404 		return;
405 
406 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 	if (!sent)
408 		return;
409 
410 	setting = get_unaligned_le16(sent);
411 
412 	if (hdev->voice_setting == setting)
413 		return;
414 
415 	hdev->voice_setting = setting;
416 
417 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418 
419 	if (hdev->notify)
420 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422 
423 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 	__u8 status = *((__u8 *) skb->data);
426 	struct hci_cp_write_ssp_mode *sent;
427 
428 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
429 
430 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
431 	if (!sent)
432 		return;
433 
434 	if (!status) {
435 		if (sent->mode)
436 			hdev->features[1][0] |= LMP_HOST_SSP;
437 		else
438 			hdev->features[1][0] &= ~LMP_HOST_SSP;
439 	}
440 
441 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
442 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
443 	else if (!status) {
444 		if (sent->mode)
445 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
446 		else
447 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 	}
449 }
450 
451 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
452 {
453 	struct hci_rp_read_local_version *rp = (void *) skb->data;
454 
455 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
456 
457 	if (rp->status)
458 		return;
459 
460 	hdev->hci_ver = rp->hci_ver;
461 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
462 	hdev->lmp_ver = rp->lmp_ver;
463 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
464 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
465 
466 	BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
467 	       hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
468 }
469 
470 static void hci_cc_read_local_commands(struct hci_dev *hdev,
471 				       struct sk_buff *skb)
472 {
473 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
474 
475 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
476 
477 	if (!rp->status)
478 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
479 }
480 
481 static void hci_cc_read_local_features(struct hci_dev *hdev,
482 				       struct sk_buff *skb)
483 {
484 	struct hci_rp_read_local_features *rp = (void *) skb->data;
485 
486 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
487 
488 	if (rp->status)
489 		return;
490 
491 	memcpy(hdev->features, rp->features, 8);
492 
493 	/* Adjust default settings according to features
494 	 * supported by device. */
495 
496 	if (hdev->features[0][0] & LMP_3SLOT)
497 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
498 
499 	if (hdev->features[0][0] & LMP_5SLOT)
500 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
501 
502 	if (hdev->features[0][1] & LMP_HV2) {
503 		hdev->pkt_type  |= (HCI_HV2);
504 		hdev->esco_type |= (ESCO_HV2);
505 	}
506 
507 	if (hdev->features[0][1] & LMP_HV3) {
508 		hdev->pkt_type  |= (HCI_HV3);
509 		hdev->esco_type |= (ESCO_HV3);
510 	}
511 
512 	if (lmp_esco_capable(hdev))
513 		hdev->esco_type |= (ESCO_EV3);
514 
515 	if (hdev->features[0][4] & LMP_EV4)
516 		hdev->esco_type |= (ESCO_EV4);
517 
518 	if (hdev->features[0][4] & LMP_EV5)
519 		hdev->esco_type |= (ESCO_EV5);
520 
521 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
522 		hdev->esco_type |= (ESCO_2EV3);
523 
524 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
525 		hdev->esco_type |= (ESCO_3EV3);
526 
527 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
528 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
529 
530 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
531 	       hdev->features[0][0], hdev->features[0][1],
532 	       hdev->features[0][2], hdev->features[0][3],
533 	       hdev->features[0][4], hdev->features[0][5],
534 	       hdev->features[0][6], hdev->features[0][7]);
535 }
536 
537 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
538 					   struct sk_buff *skb)
539 {
540 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
541 
542 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
543 
544 	if (rp->status)
545 		return;
546 
547 	hdev->max_page = rp->max_page;
548 
549 	if (rp->page < HCI_MAX_PAGES)
550 		memcpy(hdev->features[rp->page], rp->features, 8);
551 }
552 
553 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
554 					  struct sk_buff *skb)
555 {
556 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
557 
558 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
559 
560 	if (!rp->status)
561 		hdev->flow_ctl_mode = rp->mode;
562 }
563 
564 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
565 {
566 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
567 
568 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
569 
570 	if (rp->status)
571 		return;
572 
573 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
574 	hdev->sco_mtu  = rp->sco_mtu;
575 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
576 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
577 
578 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
579 		hdev->sco_mtu  = 64;
580 		hdev->sco_pkts = 8;
581 	}
582 
583 	hdev->acl_cnt = hdev->acl_pkts;
584 	hdev->sco_cnt = hdev->sco_pkts;
585 
586 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
587 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
588 }
589 
590 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
593 
594 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595 
596 	if (!rp->status)
597 		bacpy(&hdev->bdaddr, &rp->bdaddr);
598 }
599 
600 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
601 					   struct sk_buff *skb)
602 {
603 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
604 
605 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
606 
607 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
608 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
609 		hdev->page_scan_window = __le16_to_cpu(rp->window);
610 	}
611 }
612 
613 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
614 					    struct sk_buff *skb)
615 {
616 	u8 status = *((u8 *) skb->data);
617 	struct hci_cp_write_page_scan_activity *sent;
618 
619 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
620 
621 	if (status)
622 		return;
623 
624 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
625 	if (!sent)
626 		return;
627 
628 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
629 	hdev->page_scan_window = __le16_to_cpu(sent->window);
630 }
631 
632 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
633 					   struct sk_buff *skb)
634 {
635 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
636 
637 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
638 
639 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
640 		hdev->page_scan_type = rp->type;
641 }
642 
643 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
644 					struct sk_buff *skb)
645 {
646 	u8 status = *((u8 *) skb->data);
647 	u8 *type;
648 
649 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
650 
651 	if (status)
652 		return;
653 
654 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
655 	if (type)
656 		hdev->page_scan_type = *type;
657 }
658 
659 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
660 					struct sk_buff *skb)
661 {
662 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
663 
664 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
665 
666 	if (rp->status)
667 		return;
668 
669 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
670 	hdev->block_len = __le16_to_cpu(rp->block_len);
671 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
672 
673 	hdev->block_cnt = hdev->num_blocks;
674 
675 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
676 	       hdev->block_cnt, hdev->block_len);
677 }
678 
679 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
680 				       struct sk_buff *skb)
681 {
682 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
683 
684 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685 
686 	if (rp->status)
687 		goto a2mp_rsp;
688 
689 	hdev->amp_status = rp->amp_status;
690 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
691 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
692 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
693 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
694 	hdev->amp_type = rp->amp_type;
695 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
696 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
697 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
698 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
699 
700 a2mp_rsp:
701 	a2mp_send_getinfo_rsp(hdev);
702 }
703 
704 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
705 					struct sk_buff *skb)
706 {
707 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
708 	struct amp_assoc *assoc = &hdev->loc_assoc;
709 	size_t rem_len, frag_len;
710 
711 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
712 
713 	if (rp->status)
714 		goto a2mp_rsp;
715 
716 	frag_len = skb->len - sizeof(*rp);
717 	rem_len = __le16_to_cpu(rp->rem_len);
718 
719 	if (rem_len > frag_len) {
720 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
721 
722 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
723 		assoc->offset += frag_len;
724 
725 		/* Read other fragments */
726 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
727 
728 		return;
729 	}
730 
731 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
732 	assoc->len = assoc->offset + rem_len;
733 	assoc->offset = 0;
734 
735 a2mp_rsp:
736 	/* Send A2MP Rsp when all fragments are received */
737 	a2mp_send_getampassoc_rsp(hdev, rp->status);
738 	a2mp_send_create_phy_link_req(hdev, rp->status);
739 }
740 
741 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
742 					 struct sk_buff *skb)
743 {
744 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
745 
746 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
747 
748 	if (!rp->status)
749 		hdev->inq_tx_power = rp->tx_power;
750 }
751 
752 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
753 {
754 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
755 	struct hci_cp_pin_code_reply *cp;
756 	struct hci_conn *conn;
757 
758 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
759 
760 	hci_dev_lock(hdev);
761 
762 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
763 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
764 
765 	if (rp->status)
766 		goto unlock;
767 
768 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
769 	if (!cp)
770 		goto unlock;
771 
772 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
773 	if (conn)
774 		conn->pin_length = cp->pin_len;
775 
776 unlock:
777 	hci_dev_unlock(hdev);
778 }
779 
780 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
781 {
782 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
783 
784 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785 
786 	hci_dev_lock(hdev);
787 
788 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
789 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
790 						 rp->status);
791 
792 	hci_dev_unlock(hdev);
793 }
794 
795 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
796 				       struct sk_buff *skb)
797 {
798 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
799 
800 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
801 
802 	if (rp->status)
803 		return;
804 
805 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
806 	hdev->le_pkts = rp->le_max_pkt;
807 
808 	hdev->le_cnt = hdev->le_pkts;
809 
810 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
811 }
812 
813 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
814 					  struct sk_buff *skb)
815 {
816 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
817 
818 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
819 
820 	if (!rp->status)
821 		memcpy(hdev->le_features, rp->features, 8);
822 }
823 
824 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
825 					struct sk_buff *skb)
826 {
827 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
828 
829 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
830 
831 	if (!rp->status)
832 		hdev->adv_tx_power = rp->tx_power;
833 }
834 
835 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
836 {
837 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
838 
839 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
840 
841 	hci_dev_lock(hdev);
842 
843 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
844 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
845 						 rp->status);
846 
847 	hci_dev_unlock(hdev);
848 }
849 
850 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
851 					  struct sk_buff *skb)
852 {
853 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
854 
855 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
856 
857 	hci_dev_lock(hdev);
858 
859 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
860 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
861 						     ACL_LINK, 0, rp->status);
862 
863 	hci_dev_unlock(hdev);
864 }
865 
866 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
867 {
868 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
869 
870 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
871 
872 	hci_dev_lock(hdev);
873 
874 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
875 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
876 						 0, rp->status);
877 
878 	hci_dev_unlock(hdev);
879 }
880 
881 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
882 					  struct sk_buff *skb)
883 {
884 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
885 
886 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
887 
888 	hci_dev_lock(hdev);
889 
890 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
891 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
892 						     ACL_LINK, 0, rp->status);
893 
894 	hci_dev_unlock(hdev);
895 }
896 
897 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
898 					     struct sk_buff *skb)
899 {
900 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
901 
902 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
903 
904 	hci_dev_lock(hdev);
905 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
906 						rp->randomizer, rp->status);
907 	hci_dev_unlock(hdev);
908 }
909 
910 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
911 {
912 	__u8 *sent, status = *((__u8 *) skb->data);
913 
914 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
915 
916 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
917 	if (!sent)
918 		return;
919 
920 	hci_dev_lock(hdev);
921 
922 	if (!status) {
923 		if (*sent)
924 			set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
925 		else
926 			clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
927 	}
928 
929 	if (!test_bit(HCI_INIT, &hdev->flags)) {
930 		struct hci_request req;
931 
932 		hci_req_init(&req, hdev);
933 		hci_update_ad(&req);
934 		hci_req_run(&req, NULL);
935 	}
936 
937 	hci_dev_unlock(hdev);
938 }
939 
940 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
941 {
942 	__u8 status = *((__u8 *) skb->data);
943 
944 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
945 
946 	if (status) {
947 		hci_dev_lock(hdev);
948 		mgmt_start_discovery_failed(hdev, status);
949 		hci_dev_unlock(hdev);
950 		return;
951 	}
952 }
953 
954 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
955 				      struct sk_buff *skb)
956 {
957 	struct hci_cp_le_set_scan_enable *cp;
958 	__u8 status = *((__u8 *) skb->data);
959 
960 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
961 
962 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
963 	if (!cp)
964 		return;
965 
966 	switch (cp->enable) {
967 	case LE_SCAN_ENABLE:
968 		if (status) {
969 			hci_dev_lock(hdev);
970 			mgmt_start_discovery_failed(hdev, status);
971 			hci_dev_unlock(hdev);
972 			return;
973 		}
974 
975 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
976 
977 		hci_dev_lock(hdev);
978 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
979 		hci_dev_unlock(hdev);
980 		break;
981 
982 	case LE_SCAN_DISABLE:
983 		if (status) {
984 			hci_dev_lock(hdev);
985 			mgmt_stop_discovery_failed(hdev, status);
986 			hci_dev_unlock(hdev);
987 			return;
988 		}
989 
990 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
991 
992 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
993 		    hdev->discovery.state == DISCOVERY_FINDING) {
994 			mgmt_interleaved_discovery(hdev);
995 		} else {
996 			hci_dev_lock(hdev);
997 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
998 			hci_dev_unlock(hdev);
999 		}
1000 
1001 		break;
1002 
1003 	default:
1004 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1005 		break;
1006 	}
1007 }
1008 
1009 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1010 					   struct sk_buff *skb)
1011 {
1012 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1013 
1014 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1015 
1016 	if (!rp->status)
1017 		hdev->le_white_list_size = rp->size;
1018 }
1019 
1020 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1021 					    struct sk_buff *skb)
1022 {
1023 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1024 
1025 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1026 
1027 	if (!rp->status)
1028 		memcpy(hdev->le_states, rp->le_states, 8);
1029 }
1030 
1031 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1032 					   struct sk_buff *skb)
1033 {
1034 	struct hci_cp_write_le_host_supported *sent;
1035 	__u8 status = *((__u8 *) skb->data);
1036 
1037 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1038 
1039 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1040 	if (!sent)
1041 		return;
1042 
1043 	if (!status) {
1044 		if (sent->le)
1045 			hdev->features[1][0] |= LMP_HOST_LE;
1046 		else
1047 			hdev->features[1][0] &= ~LMP_HOST_LE;
1048 
1049 		if (sent->simul)
1050 			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1051 		else
1052 			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1053 	}
1054 
1055 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1056 	    !test_bit(HCI_INIT, &hdev->flags))
1057 		mgmt_le_enable_complete(hdev, sent->le, status);
1058 }
1059 
1060 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1061 					  struct sk_buff *skb)
1062 {
1063 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1064 
1065 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1066 	       hdev->name, rp->status, rp->phy_handle);
1067 
1068 	if (rp->status)
1069 		return;
1070 
1071 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1072 }
1073 
1074 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1075 {
1076 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1077 
1078 	if (status) {
1079 		hci_conn_check_pending(hdev);
1080 		hci_dev_lock(hdev);
1081 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1082 			mgmt_start_discovery_failed(hdev, status);
1083 		hci_dev_unlock(hdev);
1084 		return;
1085 	}
1086 
1087 	set_bit(HCI_INQUIRY, &hdev->flags);
1088 
1089 	hci_dev_lock(hdev);
1090 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1091 	hci_dev_unlock(hdev);
1092 }
1093 
1094 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1095 {
1096 	struct hci_cp_create_conn *cp;
1097 	struct hci_conn *conn;
1098 
1099 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1100 
1101 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1102 	if (!cp)
1103 		return;
1104 
1105 	hci_dev_lock(hdev);
1106 
1107 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1108 
1109 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1110 
1111 	if (status) {
1112 		if (conn && conn->state == BT_CONNECT) {
1113 			if (status != 0x0c || conn->attempt > 2) {
1114 				conn->state = BT_CLOSED;
1115 				hci_proto_connect_cfm(conn, status);
1116 				hci_conn_del(conn);
1117 			} else
1118 				conn->state = BT_CONNECT2;
1119 		}
1120 	} else {
1121 		if (!conn) {
1122 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1123 			if (conn) {
1124 				conn->out = true;
1125 				conn->link_mode |= HCI_LM_MASTER;
1126 			} else
1127 				BT_ERR("No memory for new connection");
1128 		}
1129 	}
1130 
1131 	hci_dev_unlock(hdev);
1132 }
1133 
1134 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1135 {
1136 	struct hci_cp_add_sco *cp;
1137 	struct hci_conn *acl, *sco;
1138 	__u16 handle;
1139 
1140 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1141 
1142 	if (!status)
1143 		return;
1144 
1145 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1146 	if (!cp)
1147 		return;
1148 
1149 	handle = __le16_to_cpu(cp->handle);
1150 
1151 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1156 	if (acl) {
1157 		sco = acl->link;
1158 		if (sco) {
1159 			sco->state = BT_CLOSED;
1160 
1161 			hci_proto_connect_cfm(sco, status);
1162 			hci_conn_del(sco);
1163 		}
1164 	}
1165 
1166 	hci_dev_unlock(hdev);
1167 }
1168 
1169 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1170 {
1171 	struct hci_cp_auth_requested *cp;
1172 	struct hci_conn *conn;
1173 
1174 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1175 
1176 	if (!status)
1177 		return;
1178 
1179 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1180 	if (!cp)
1181 		return;
1182 
1183 	hci_dev_lock(hdev);
1184 
1185 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1186 	if (conn) {
1187 		if (conn->state == BT_CONFIG) {
1188 			hci_proto_connect_cfm(conn, status);
1189 			hci_conn_drop(conn);
1190 		}
1191 	}
1192 
1193 	hci_dev_unlock(hdev);
1194 }
1195 
1196 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1197 {
1198 	struct hci_cp_set_conn_encrypt *cp;
1199 	struct hci_conn *conn;
1200 
1201 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1202 
1203 	if (!status)
1204 		return;
1205 
1206 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1207 	if (!cp)
1208 		return;
1209 
1210 	hci_dev_lock(hdev);
1211 
1212 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1213 	if (conn) {
1214 		if (conn->state == BT_CONFIG) {
1215 			hci_proto_connect_cfm(conn, status);
1216 			hci_conn_drop(conn);
1217 		}
1218 	}
1219 
1220 	hci_dev_unlock(hdev);
1221 }
1222 
1223 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1224 				    struct hci_conn *conn)
1225 {
1226 	if (conn->state != BT_CONFIG || !conn->out)
1227 		return 0;
1228 
1229 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1230 		return 0;
1231 
1232 	/* Only request authentication for SSP connections or non-SSP
1233 	 * devices with sec_level HIGH or if MITM protection is requested */
1234 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1235 	    conn->pending_sec_level != BT_SECURITY_HIGH)
1236 		return 0;
1237 
1238 	return 1;
1239 }
1240 
1241 static int hci_resolve_name(struct hci_dev *hdev,
1242 				   struct inquiry_entry *e)
1243 {
1244 	struct hci_cp_remote_name_req cp;
1245 
1246 	memset(&cp, 0, sizeof(cp));
1247 
1248 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1249 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1250 	cp.pscan_mode = e->data.pscan_mode;
1251 	cp.clock_offset = e->data.clock_offset;
1252 
1253 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1254 }
1255 
1256 static bool hci_resolve_next_name(struct hci_dev *hdev)
1257 {
1258 	struct discovery_state *discov = &hdev->discovery;
1259 	struct inquiry_entry *e;
1260 
1261 	if (list_empty(&discov->resolve))
1262 		return false;
1263 
1264 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1265 	if (!e)
1266 		return false;
1267 
1268 	if (hci_resolve_name(hdev, e) == 0) {
1269 		e->name_state = NAME_PENDING;
1270 		return true;
1271 	}
1272 
1273 	return false;
1274 }
1275 
1276 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1277 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1278 {
1279 	struct discovery_state *discov = &hdev->discovery;
1280 	struct inquiry_entry *e;
1281 
1282 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1283 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1284 				      name_len, conn->dev_class);
1285 
1286 	if (discov->state == DISCOVERY_STOPPED)
1287 		return;
1288 
1289 	if (discov->state == DISCOVERY_STOPPING)
1290 		goto discov_complete;
1291 
1292 	if (discov->state != DISCOVERY_RESOLVING)
1293 		return;
1294 
1295 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1296 	/* If the device was not found in a list of found devices names of which
1297 	 * are pending. there is no need to continue resolving a next name as it
1298 	 * will be done upon receiving another Remote Name Request Complete
1299 	 * Event */
1300 	if (!e)
1301 		return;
1302 
1303 	list_del(&e->list);
1304 	if (name) {
1305 		e->name_state = NAME_KNOWN;
1306 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1307 				 e->data.rssi, name, name_len);
1308 	} else {
1309 		e->name_state = NAME_NOT_KNOWN;
1310 	}
1311 
1312 	if (hci_resolve_next_name(hdev))
1313 		return;
1314 
1315 discov_complete:
1316 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1317 }
1318 
1319 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1320 {
1321 	struct hci_cp_remote_name_req *cp;
1322 	struct hci_conn *conn;
1323 
1324 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1325 
1326 	/* If successful wait for the name req complete event before
1327 	 * checking for the need to do authentication */
1328 	if (!status)
1329 		return;
1330 
1331 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1332 	if (!cp)
1333 		return;
1334 
1335 	hci_dev_lock(hdev);
1336 
1337 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1338 
1339 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1340 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1341 
1342 	if (!conn)
1343 		goto unlock;
1344 
1345 	if (!hci_outgoing_auth_needed(hdev, conn))
1346 		goto unlock;
1347 
1348 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1349 		struct hci_cp_auth_requested cp;
1350 		cp.handle = __cpu_to_le16(conn->handle);
1351 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1352 	}
1353 
1354 unlock:
1355 	hci_dev_unlock(hdev);
1356 }
1357 
1358 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1359 {
1360 	struct hci_cp_read_remote_features *cp;
1361 	struct hci_conn *conn;
1362 
1363 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364 
1365 	if (!status)
1366 		return;
1367 
1368 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1369 	if (!cp)
1370 		return;
1371 
1372 	hci_dev_lock(hdev);
1373 
1374 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1375 	if (conn) {
1376 		if (conn->state == BT_CONFIG) {
1377 			hci_proto_connect_cfm(conn, status);
1378 			hci_conn_drop(conn);
1379 		}
1380 	}
1381 
1382 	hci_dev_unlock(hdev);
1383 }
1384 
1385 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1386 {
1387 	struct hci_cp_read_remote_ext_features *cp;
1388 	struct hci_conn *conn;
1389 
1390 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1391 
1392 	if (!status)
1393 		return;
1394 
1395 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1396 	if (!cp)
1397 		return;
1398 
1399 	hci_dev_lock(hdev);
1400 
1401 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1402 	if (conn) {
1403 		if (conn->state == BT_CONFIG) {
1404 			hci_proto_connect_cfm(conn, status);
1405 			hci_conn_drop(conn);
1406 		}
1407 	}
1408 
1409 	hci_dev_unlock(hdev);
1410 }
1411 
1412 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1413 {
1414 	struct hci_cp_setup_sync_conn *cp;
1415 	struct hci_conn *acl, *sco;
1416 	__u16 handle;
1417 
1418 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1419 
1420 	if (!status)
1421 		return;
1422 
1423 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1424 	if (!cp)
1425 		return;
1426 
1427 	handle = __le16_to_cpu(cp->handle);
1428 
1429 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1430 
1431 	hci_dev_lock(hdev);
1432 
1433 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1434 	if (acl) {
1435 		sco = acl->link;
1436 		if (sco) {
1437 			sco->state = BT_CLOSED;
1438 
1439 			hci_proto_connect_cfm(sco, status);
1440 			hci_conn_del(sco);
1441 		}
1442 	}
1443 
1444 	hci_dev_unlock(hdev);
1445 }
1446 
1447 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1448 {
1449 	struct hci_cp_sniff_mode *cp;
1450 	struct hci_conn *conn;
1451 
1452 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1453 
1454 	if (!status)
1455 		return;
1456 
1457 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1458 	if (!cp)
1459 		return;
1460 
1461 	hci_dev_lock(hdev);
1462 
1463 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1464 	if (conn) {
1465 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1466 
1467 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1468 			hci_sco_setup(conn, status);
1469 	}
1470 
1471 	hci_dev_unlock(hdev);
1472 }
1473 
1474 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1475 {
1476 	struct hci_cp_exit_sniff_mode *cp;
1477 	struct hci_conn *conn;
1478 
1479 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1480 
1481 	if (!status)
1482 		return;
1483 
1484 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1485 	if (!cp)
1486 		return;
1487 
1488 	hci_dev_lock(hdev);
1489 
1490 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1491 	if (conn) {
1492 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1493 
1494 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1495 			hci_sco_setup(conn, status);
1496 	}
1497 
1498 	hci_dev_unlock(hdev);
1499 }
1500 
1501 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1502 {
1503 	struct hci_cp_disconnect *cp;
1504 	struct hci_conn *conn;
1505 
1506 	if (!status)
1507 		return;
1508 
1509 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1510 	if (!cp)
1511 		return;
1512 
1513 	hci_dev_lock(hdev);
1514 
1515 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1516 	if (conn)
1517 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1518 				       conn->dst_type, status);
1519 
1520 	hci_dev_unlock(hdev);
1521 }
1522 
1523 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1524 {
1525 	struct hci_conn *conn;
1526 
1527 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1528 
1529 	if (status) {
1530 		hci_dev_lock(hdev);
1531 
1532 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1533 		if (!conn) {
1534 			hci_dev_unlock(hdev);
1535 			return;
1536 		}
1537 
1538 		BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1539 
1540 		conn->state = BT_CLOSED;
1541 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1542 				    conn->dst_type, status);
1543 		hci_proto_connect_cfm(conn, status);
1544 		hci_conn_del(conn);
1545 
1546 		hci_dev_unlock(hdev);
1547 	}
1548 }
1549 
1550 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1551 {
1552 	struct hci_cp_create_phy_link *cp;
1553 
1554 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1555 
1556 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1557 	if (!cp)
1558 		return;
1559 
1560 	hci_dev_lock(hdev);
1561 
1562 	if (status) {
1563 		struct hci_conn *hcon;
1564 
1565 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1566 		if (hcon)
1567 			hci_conn_del(hcon);
1568 	} else {
1569 		amp_write_remote_assoc(hdev, cp->phy_handle);
1570 	}
1571 
1572 	hci_dev_unlock(hdev);
1573 }
1574 
1575 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1576 {
1577 	struct hci_cp_accept_phy_link *cp;
1578 
1579 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1580 
1581 	if (status)
1582 		return;
1583 
1584 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1585 	if (!cp)
1586 		return;
1587 
1588 	amp_write_remote_assoc(hdev, cp->phy_handle);
1589 }
1590 
1591 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1592 {
1593 	__u8 status = *((__u8 *) skb->data);
1594 	struct discovery_state *discov = &hdev->discovery;
1595 	struct inquiry_entry *e;
1596 
1597 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1598 
1599 	hci_conn_check_pending(hdev);
1600 
1601 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1602 		return;
1603 
1604 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1605 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1606 
1607 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1608 		return;
1609 
1610 	hci_dev_lock(hdev);
1611 
1612 	if (discov->state != DISCOVERY_FINDING)
1613 		goto unlock;
1614 
1615 	if (list_empty(&discov->resolve)) {
1616 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1617 		goto unlock;
1618 	}
1619 
1620 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1621 	if (e && hci_resolve_name(hdev, e) == 0) {
1622 		e->name_state = NAME_PENDING;
1623 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1624 	} else {
1625 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1626 	}
1627 
1628 unlock:
1629 	hci_dev_unlock(hdev);
1630 }
1631 
1632 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1633 {
1634 	struct inquiry_data data;
1635 	struct inquiry_info *info = (void *) (skb->data + 1);
1636 	int num_rsp = *((__u8 *) skb->data);
1637 
1638 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1639 
1640 	if (!num_rsp)
1641 		return;
1642 
1643 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1644 		return;
1645 
1646 	hci_dev_lock(hdev);
1647 
1648 	for (; num_rsp; num_rsp--, info++) {
1649 		bool name_known, ssp;
1650 
1651 		bacpy(&data.bdaddr, &info->bdaddr);
1652 		data.pscan_rep_mode	= info->pscan_rep_mode;
1653 		data.pscan_period_mode	= info->pscan_period_mode;
1654 		data.pscan_mode		= info->pscan_mode;
1655 		memcpy(data.dev_class, info->dev_class, 3);
1656 		data.clock_offset	= info->clock_offset;
1657 		data.rssi		= 0x00;
1658 		data.ssp_mode		= 0x00;
1659 
1660 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1661 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1662 				  info->dev_class, 0, !name_known, ssp, NULL,
1663 				  0);
1664 	}
1665 
1666 	hci_dev_unlock(hdev);
1667 }
1668 
1669 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1670 {
1671 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1672 	struct hci_conn *conn;
1673 
1674 	BT_DBG("%s", hdev->name);
1675 
1676 	hci_dev_lock(hdev);
1677 
1678 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1679 	if (!conn) {
1680 		if (ev->link_type != SCO_LINK)
1681 			goto unlock;
1682 
1683 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1684 		if (!conn)
1685 			goto unlock;
1686 
1687 		conn->type = SCO_LINK;
1688 	}
1689 
1690 	if (!ev->status) {
1691 		conn->handle = __le16_to_cpu(ev->handle);
1692 
1693 		if (conn->type == ACL_LINK) {
1694 			conn->state = BT_CONFIG;
1695 			hci_conn_hold(conn);
1696 
1697 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1698 			    !hci_find_link_key(hdev, &ev->bdaddr))
1699 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1700 			else
1701 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1702 		} else
1703 			conn->state = BT_CONNECTED;
1704 
1705 		hci_conn_add_sysfs(conn);
1706 
1707 		if (test_bit(HCI_AUTH, &hdev->flags))
1708 			conn->link_mode |= HCI_LM_AUTH;
1709 
1710 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1711 			conn->link_mode |= HCI_LM_ENCRYPT;
1712 
1713 		/* Get remote features */
1714 		if (conn->type == ACL_LINK) {
1715 			struct hci_cp_read_remote_features cp;
1716 			cp.handle = ev->handle;
1717 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1718 				     sizeof(cp), &cp);
1719 		}
1720 
1721 		/* Set packet type for incoming connection */
1722 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1723 			struct hci_cp_change_conn_ptype cp;
1724 			cp.handle = ev->handle;
1725 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1726 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1727 				     &cp);
1728 		}
1729 	} else {
1730 		conn->state = BT_CLOSED;
1731 		if (conn->type == ACL_LINK)
1732 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1733 					    conn->dst_type, ev->status);
1734 	}
1735 
1736 	if (conn->type == ACL_LINK)
1737 		hci_sco_setup(conn, ev->status);
1738 
1739 	if (ev->status) {
1740 		hci_proto_connect_cfm(conn, ev->status);
1741 		hci_conn_del(conn);
1742 	} else if (ev->link_type != ACL_LINK)
1743 		hci_proto_connect_cfm(conn, ev->status);
1744 
1745 unlock:
1746 	hci_dev_unlock(hdev);
1747 
1748 	hci_conn_check_pending(hdev);
1749 }
1750 
1751 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1752 {
1753 	struct hci_ev_conn_request *ev = (void *) skb->data;
1754 	int mask = hdev->link_mode;
1755 	__u8 flags = 0;
1756 
1757 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1758 	       ev->link_type);
1759 
1760 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1761 				      &flags);
1762 
1763 	if ((mask & HCI_LM_ACCEPT) &&
1764 	    !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1765 		/* Connection accepted */
1766 		struct inquiry_entry *ie;
1767 		struct hci_conn *conn;
1768 
1769 		hci_dev_lock(hdev);
1770 
1771 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1772 		if (ie)
1773 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1774 
1775 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1776 					       &ev->bdaddr);
1777 		if (!conn) {
1778 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1779 			if (!conn) {
1780 				BT_ERR("No memory for new connection");
1781 				hci_dev_unlock(hdev);
1782 				return;
1783 			}
1784 		}
1785 
1786 		memcpy(conn->dev_class, ev->dev_class, 3);
1787 
1788 		hci_dev_unlock(hdev);
1789 
1790 		if (ev->link_type == ACL_LINK ||
1791 		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1792 			struct hci_cp_accept_conn_req cp;
1793 			conn->state = BT_CONNECT;
1794 
1795 			bacpy(&cp.bdaddr, &ev->bdaddr);
1796 
1797 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1798 				cp.role = 0x00; /* Become master */
1799 			else
1800 				cp.role = 0x01; /* Remain slave */
1801 
1802 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1803 				     &cp);
1804 		} else if (!(flags & HCI_PROTO_DEFER)) {
1805 			struct hci_cp_accept_sync_conn_req cp;
1806 			conn->state = BT_CONNECT;
1807 
1808 			bacpy(&cp.bdaddr, &ev->bdaddr);
1809 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1810 
1811 			cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1812 			cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1813 			cp.max_latency    = __constant_cpu_to_le16(0xffff);
1814 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1815 			cp.retrans_effort = 0xff;
1816 
1817 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1818 				     sizeof(cp), &cp);
1819 		} else {
1820 			conn->state = BT_CONNECT2;
1821 			hci_proto_connect_cfm(conn, 0);
1822 		}
1823 	} else {
1824 		/* Connection rejected */
1825 		struct hci_cp_reject_conn_req cp;
1826 
1827 		bacpy(&cp.bdaddr, &ev->bdaddr);
1828 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1829 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1830 	}
1831 }
1832 
1833 static u8 hci_to_mgmt_reason(u8 err)
1834 {
1835 	switch (err) {
1836 	case HCI_ERROR_CONNECTION_TIMEOUT:
1837 		return MGMT_DEV_DISCONN_TIMEOUT;
1838 	case HCI_ERROR_REMOTE_USER_TERM:
1839 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
1840 	case HCI_ERROR_REMOTE_POWER_OFF:
1841 		return MGMT_DEV_DISCONN_REMOTE;
1842 	case HCI_ERROR_LOCAL_HOST_TERM:
1843 		return MGMT_DEV_DISCONN_LOCAL_HOST;
1844 	default:
1845 		return MGMT_DEV_DISCONN_UNKNOWN;
1846 	}
1847 }
1848 
1849 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1850 {
1851 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1852 	struct hci_conn *conn;
1853 
1854 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1855 
1856 	hci_dev_lock(hdev);
1857 
1858 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1859 	if (!conn)
1860 		goto unlock;
1861 
1862 	if (ev->status == 0)
1863 		conn->state = BT_CLOSED;
1864 
1865 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1866 	    (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1867 		if (ev->status) {
1868 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1869 					       conn->dst_type, ev->status);
1870 		} else {
1871 			u8 reason = hci_to_mgmt_reason(ev->reason);
1872 
1873 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1874 						 conn->dst_type, reason);
1875 		}
1876 	}
1877 
1878 	if (ev->status == 0) {
1879 		if (conn->type == ACL_LINK && conn->flush_key)
1880 			hci_remove_link_key(hdev, &conn->dst);
1881 		hci_proto_disconn_cfm(conn, ev->reason);
1882 		hci_conn_del(conn);
1883 	}
1884 
1885 unlock:
1886 	hci_dev_unlock(hdev);
1887 }
1888 
1889 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1890 {
1891 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1892 	struct hci_conn *conn;
1893 
1894 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1895 
1896 	hci_dev_lock(hdev);
1897 
1898 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1899 	if (!conn)
1900 		goto unlock;
1901 
1902 	if (!ev->status) {
1903 		if (!hci_conn_ssp_enabled(conn) &&
1904 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1905 			BT_INFO("re-auth of legacy device is not possible.");
1906 		} else {
1907 			conn->link_mode |= HCI_LM_AUTH;
1908 			conn->sec_level = conn->pending_sec_level;
1909 		}
1910 	} else {
1911 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1912 				 ev->status);
1913 	}
1914 
1915 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1916 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1917 
1918 	if (conn->state == BT_CONFIG) {
1919 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1920 			struct hci_cp_set_conn_encrypt cp;
1921 			cp.handle  = ev->handle;
1922 			cp.encrypt = 0x01;
1923 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1924 				     &cp);
1925 		} else {
1926 			conn->state = BT_CONNECTED;
1927 			hci_proto_connect_cfm(conn, ev->status);
1928 			hci_conn_drop(conn);
1929 		}
1930 	} else {
1931 		hci_auth_cfm(conn, ev->status);
1932 
1933 		hci_conn_hold(conn);
1934 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1935 		hci_conn_drop(conn);
1936 	}
1937 
1938 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1939 		if (!ev->status) {
1940 			struct hci_cp_set_conn_encrypt cp;
1941 			cp.handle  = ev->handle;
1942 			cp.encrypt = 0x01;
1943 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1944 				     &cp);
1945 		} else {
1946 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1947 			hci_encrypt_cfm(conn, ev->status, 0x00);
1948 		}
1949 	}
1950 
1951 unlock:
1952 	hci_dev_unlock(hdev);
1953 }
1954 
1955 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1956 {
1957 	struct hci_ev_remote_name *ev = (void *) skb->data;
1958 	struct hci_conn *conn;
1959 
1960 	BT_DBG("%s", hdev->name);
1961 
1962 	hci_conn_check_pending(hdev);
1963 
1964 	hci_dev_lock(hdev);
1965 
1966 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1967 
1968 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1969 		goto check_auth;
1970 
1971 	if (ev->status == 0)
1972 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1973 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1974 	else
1975 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1976 
1977 check_auth:
1978 	if (!conn)
1979 		goto unlock;
1980 
1981 	if (!hci_outgoing_auth_needed(hdev, conn))
1982 		goto unlock;
1983 
1984 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1985 		struct hci_cp_auth_requested cp;
1986 		cp.handle = __cpu_to_le16(conn->handle);
1987 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1988 	}
1989 
1990 unlock:
1991 	hci_dev_unlock(hdev);
1992 }
1993 
1994 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1995 {
1996 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
1997 	struct hci_conn *conn;
1998 
1999 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2000 
2001 	hci_dev_lock(hdev);
2002 
2003 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2004 	if (conn) {
2005 		if (!ev->status) {
2006 			if (ev->encrypt) {
2007 				/* Encryption implies authentication */
2008 				conn->link_mode |= HCI_LM_AUTH;
2009 				conn->link_mode |= HCI_LM_ENCRYPT;
2010 				conn->sec_level = conn->pending_sec_level;
2011 			} else
2012 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2013 		}
2014 
2015 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2016 
2017 		if (ev->status && conn->state == BT_CONNECTED) {
2018 			hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2019 			hci_conn_drop(conn);
2020 			goto unlock;
2021 		}
2022 
2023 		if (conn->state == BT_CONFIG) {
2024 			if (!ev->status)
2025 				conn->state = BT_CONNECTED;
2026 
2027 			hci_proto_connect_cfm(conn, ev->status);
2028 			hci_conn_drop(conn);
2029 		} else
2030 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2031 	}
2032 
2033 unlock:
2034 	hci_dev_unlock(hdev);
2035 }
2036 
2037 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2038 					     struct sk_buff *skb)
2039 {
2040 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2041 	struct hci_conn *conn;
2042 
2043 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2044 
2045 	hci_dev_lock(hdev);
2046 
2047 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2048 	if (conn) {
2049 		if (!ev->status)
2050 			conn->link_mode |= HCI_LM_SECURE;
2051 
2052 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2053 
2054 		hci_key_change_cfm(conn, ev->status);
2055 	}
2056 
2057 	hci_dev_unlock(hdev);
2058 }
2059 
2060 static void hci_remote_features_evt(struct hci_dev *hdev,
2061 				    struct sk_buff *skb)
2062 {
2063 	struct hci_ev_remote_features *ev = (void *) skb->data;
2064 	struct hci_conn *conn;
2065 
2066 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2067 
2068 	hci_dev_lock(hdev);
2069 
2070 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2071 	if (!conn)
2072 		goto unlock;
2073 
2074 	if (!ev->status)
2075 		memcpy(conn->features[0], ev->features, 8);
2076 
2077 	if (conn->state != BT_CONFIG)
2078 		goto unlock;
2079 
2080 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2081 		struct hci_cp_read_remote_ext_features cp;
2082 		cp.handle = ev->handle;
2083 		cp.page = 0x01;
2084 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2085 			     sizeof(cp), &cp);
2086 		goto unlock;
2087 	}
2088 
2089 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2090 		struct hci_cp_remote_name_req cp;
2091 		memset(&cp, 0, sizeof(cp));
2092 		bacpy(&cp.bdaddr, &conn->dst);
2093 		cp.pscan_rep_mode = 0x02;
2094 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2095 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2096 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2097 				      conn->dst_type, 0, NULL, 0,
2098 				      conn->dev_class);
2099 
2100 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2101 		conn->state = BT_CONNECTED;
2102 		hci_proto_connect_cfm(conn, ev->status);
2103 		hci_conn_drop(conn);
2104 	}
2105 
2106 unlock:
2107 	hci_dev_unlock(hdev);
2108 }
2109 
2110 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2111 {
2112 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2113 	u8 status = skb->data[sizeof(*ev)];
2114 	__u16 opcode;
2115 
2116 	skb_pull(skb, sizeof(*ev));
2117 
2118 	opcode = __le16_to_cpu(ev->opcode);
2119 
2120 	switch (opcode) {
2121 	case HCI_OP_INQUIRY_CANCEL:
2122 		hci_cc_inquiry_cancel(hdev, skb);
2123 		break;
2124 
2125 	case HCI_OP_PERIODIC_INQ:
2126 		hci_cc_periodic_inq(hdev, skb);
2127 		break;
2128 
2129 	case HCI_OP_EXIT_PERIODIC_INQ:
2130 		hci_cc_exit_periodic_inq(hdev, skb);
2131 		break;
2132 
2133 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2134 		hci_cc_remote_name_req_cancel(hdev, skb);
2135 		break;
2136 
2137 	case HCI_OP_ROLE_DISCOVERY:
2138 		hci_cc_role_discovery(hdev, skb);
2139 		break;
2140 
2141 	case HCI_OP_READ_LINK_POLICY:
2142 		hci_cc_read_link_policy(hdev, skb);
2143 		break;
2144 
2145 	case HCI_OP_WRITE_LINK_POLICY:
2146 		hci_cc_write_link_policy(hdev, skb);
2147 		break;
2148 
2149 	case HCI_OP_READ_DEF_LINK_POLICY:
2150 		hci_cc_read_def_link_policy(hdev, skb);
2151 		break;
2152 
2153 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2154 		hci_cc_write_def_link_policy(hdev, skb);
2155 		break;
2156 
2157 	case HCI_OP_RESET:
2158 		hci_cc_reset(hdev, skb);
2159 		break;
2160 
2161 	case HCI_OP_WRITE_LOCAL_NAME:
2162 		hci_cc_write_local_name(hdev, skb);
2163 		break;
2164 
2165 	case HCI_OP_READ_LOCAL_NAME:
2166 		hci_cc_read_local_name(hdev, skb);
2167 		break;
2168 
2169 	case HCI_OP_WRITE_AUTH_ENABLE:
2170 		hci_cc_write_auth_enable(hdev, skb);
2171 		break;
2172 
2173 	case HCI_OP_WRITE_ENCRYPT_MODE:
2174 		hci_cc_write_encrypt_mode(hdev, skb);
2175 		break;
2176 
2177 	case HCI_OP_WRITE_SCAN_ENABLE:
2178 		hci_cc_write_scan_enable(hdev, skb);
2179 		break;
2180 
2181 	case HCI_OP_READ_CLASS_OF_DEV:
2182 		hci_cc_read_class_of_dev(hdev, skb);
2183 		break;
2184 
2185 	case HCI_OP_WRITE_CLASS_OF_DEV:
2186 		hci_cc_write_class_of_dev(hdev, skb);
2187 		break;
2188 
2189 	case HCI_OP_READ_VOICE_SETTING:
2190 		hci_cc_read_voice_setting(hdev, skb);
2191 		break;
2192 
2193 	case HCI_OP_WRITE_VOICE_SETTING:
2194 		hci_cc_write_voice_setting(hdev, skb);
2195 		break;
2196 
2197 	case HCI_OP_WRITE_SSP_MODE:
2198 		hci_cc_write_ssp_mode(hdev, skb);
2199 		break;
2200 
2201 	case HCI_OP_READ_LOCAL_VERSION:
2202 		hci_cc_read_local_version(hdev, skb);
2203 		break;
2204 
2205 	case HCI_OP_READ_LOCAL_COMMANDS:
2206 		hci_cc_read_local_commands(hdev, skb);
2207 		break;
2208 
2209 	case HCI_OP_READ_LOCAL_FEATURES:
2210 		hci_cc_read_local_features(hdev, skb);
2211 		break;
2212 
2213 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2214 		hci_cc_read_local_ext_features(hdev, skb);
2215 		break;
2216 
2217 	case HCI_OP_READ_BUFFER_SIZE:
2218 		hci_cc_read_buffer_size(hdev, skb);
2219 		break;
2220 
2221 	case HCI_OP_READ_BD_ADDR:
2222 		hci_cc_read_bd_addr(hdev, skb);
2223 		break;
2224 
2225 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2226 		hci_cc_read_page_scan_activity(hdev, skb);
2227 		break;
2228 
2229 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2230 		hci_cc_write_page_scan_activity(hdev, skb);
2231 		break;
2232 
2233 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2234 		hci_cc_read_page_scan_type(hdev, skb);
2235 		break;
2236 
2237 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2238 		hci_cc_write_page_scan_type(hdev, skb);
2239 		break;
2240 
2241 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2242 		hci_cc_read_data_block_size(hdev, skb);
2243 		break;
2244 
2245 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2246 		hci_cc_read_flow_control_mode(hdev, skb);
2247 		break;
2248 
2249 	case HCI_OP_READ_LOCAL_AMP_INFO:
2250 		hci_cc_read_local_amp_info(hdev, skb);
2251 		break;
2252 
2253 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2254 		hci_cc_read_local_amp_assoc(hdev, skb);
2255 		break;
2256 
2257 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2258 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2259 		break;
2260 
2261 	case HCI_OP_PIN_CODE_REPLY:
2262 		hci_cc_pin_code_reply(hdev, skb);
2263 		break;
2264 
2265 	case HCI_OP_PIN_CODE_NEG_REPLY:
2266 		hci_cc_pin_code_neg_reply(hdev, skb);
2267 		break;
2268 
2269 	case HCI_OP_READ_LOCAL_OOB_DATA:
2270 		hci_cc_read_local_oob_data_reply(hdev, skb);
2271 		break;
2272 
2273 	case HCI_OP_LE_READ_BUFFER_SIZE:
2274 		hci_cc_le_read_buffer_size(hdev, skb);
2275 		break;
2276 
2277 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2278 		hci_cc_le_read_local_features(hdev, skb);
2279 		break;
2280 
2281 	case HCI_OP_LE_READ_ADV_TX_POWER:
2282 		hci_cc_le_read_adv_tx_power(hdev, skb);
2283 		break;
2284 
2285 	case HCI_OP_USER_CONFIRM_REPLY:
2286 		hci_cc_user_confirm_reply(hdev, skb);
2287 		break;
2288 
2289 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2290 		hci_cc_user_confirm_neg_reply(hdev, skb);
2291 		break;
2292 
2293 	case HCI_OP_USER_PASSKEY_REPLY:
2294 		hci_cc_user_passkey_reply(hdev, skb);
2295 		break;
2296 
2297 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2298 		hci_cc_user_passkey_neg_reply(hdev, skb);
2299 		break;
2300 
2301 	case HCI_OP_LE_SET_SCAN_PARAM:
2302 		hci_cc_le_set_scan_param(hdev, skb);
2303 		break;
2304 
2305 	case HCI_OP_LE_SET_ADV_ENABLE:
2306 		hci_cc_le_set_adv_enable(hdev, skb);
2307 		break;
2308 
2309 	case HCI_OP_LE_SET_SCAN_ENABLE:
2310 		hci_cc_le_set_scan_enable(hdev, skb);
2311 		break;
2312 
2313 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2314 		hci_cc_le_read_white_list_size(hdev, skb);
2315 		break;
2316 
2317 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2318 		hci_cc_le_read_supported_states(hdev, skb);
2319 		break;
2320 
2321 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2322 		hci_cc_write_le_host_supported(hdev, skb);
2323 		break;
2324 
2325 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2326 		hci_cc_write_remote_amp_assoc(hdev, skb);
2327 		break;
2328 
2329 	default:
2330 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2331 		break;
2332 	}
2333 
2334 	if (opcode != HCI_OP_NOP)
2335 		del_timer(&hdev->cmd_timer);
2336 
2337 	hci_req_cmd_complete(hdev, opcode, status);
2338 
2339 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2340 		atomic_set(&hdev->cmd_cnt, 1);
2341 		if (!skb_queue_empty(&hdev->cmd_q))
2342 			queue_work(hdev->workqueue, &hdev->cmd_work);
2343 	}
2344 }
2345 
2346 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2347 {
2348 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2349 	__u16 opcode;
2350 
2351 	skb_pull(skb, sizeof(*ev));
2352 
2353 	opcode = __le16_to_cpu(ev->opcode);
2354 
2355 	switch (opcode) {
2356 	case HCI_OP_INQUIRY:
2357 		hci_cs_inquiry(hdev, ev->status);
2358 		break;
2359 
2360 	case HCI_OP_CREATE_CONN:
2361 		hci_cs_create_conn(hdev, ev->status);
2362 		break;
2363 
2364 	case HCI_OP_ADD_SCO:
2365 		hci_cs_add_sco(hdev, ev->status);
2366 		break;
2367 
2368 	case HCI_OP_AUTH_REQUESTED:
2369 		hci_cs_auth_requested(hdev, ev->status);
2370 		break;
2371 
2372 	case HCI_OP_SET_CONN_ENCRYPT:
2373 		hci_cs_set_conn_encrypt(hdev, ev->status);
2374 		break;
2375 
2376 	case HCI_OP_REMOTE_NAME_REQ:
2377 		hci_cs_remote_name_req(hdev, ev->status);
2378 		break;
2379 
2380 	case HCI_OP_READ_REMOTE_FEATURES:
2381 		hci_cs_read_remote_features(hdev, ev->status);
2382 		break;
2383 
2384 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2385 		hci_cs_read_remote_ext_features(hdev, ev->status);
2386 		break;
2387 
2388 	case HCI_OP_SETUP_SYNC_CONN:
2389 		hci_cs_setup_sync_conn(hdev, ev->status);
2390 		break;
2391 
2392 	case HCI_OP_SNIFF_MODE:
2393 		hci_cs_sniff_mode(hdev, ev->status);
2394 		break;
2395 
2396 	case HCI_OP_EXIT_SNIFF_MODE:
2397 		hci_cs_exit_sniff_mode(hdev, ev->status);
2398 		break;
2399 
2400 	case HCI_OP_DISCONNECT:
2401 		hci_cs_disconnect(hdev, ev->status);
2402 		break;
2403 
2404 	case HCI_OP_LE_CREATE_CONN:
2405 		hci_cs_le_create_conn(hdev, ev->status);
2406 		break;
2407 
2408 	case HCI_OP_CREATE_PHY_LINK:
2409 		hci_cs_create_phylink(hdev, ev->status);
2410 		break;
2411 
2412 	case HCI_OP_ACCEPT_PHY_LINK:
2413 		hci_cs_accept_phylink(hdev, ev->status);
2414 		break;
2415 
2416 	default:
2417 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2418 		break;
2419 	}
2420 
2421 	if (opcode != HCI_OP_NOP)
2422 		del_timer(&hdev->cmd_timer);
2423 
2424 	if (ev->status ||
2425 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2426 		hci_req_cmd_complete(hdev, opcode, ev->status);
2427 
2428 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2429 		atomic_set(&hdev->cmd_cnt, 1);
2430 		if (!skb_queue_empty(&hdev->cmd_q))
2431 			queue_work(hdev->workqueue, &hdev->cmd_work);
2432 	}
2433 }
2434 
2435 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2436 {
2437 	struct hci_ev_role_change *ev = (void *) skb->data;
2438 	struct hci_conn *conn;
2439 
2440 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2441 
2442 	hci_dev_lock(hdev);
2443 
2444 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2445 	if (conn) {
2446 		if (!ev->status) {
2447 			if (ev->role)
2448 				conn->link_mode &= ~HCI_LM_MASTER;
2449 			else
2450 				conn->link_mode |= HCI_LM_MASTER;
2451 		}
2452 
2453 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2454 
2455 		hci_role_switch_cfm(conn, ev->status, ev->role);
2456 	}
2457 
2458 	hci_dev_unlock(hdev);
2459 }
2460 
2461 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2462 {
2463 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2464 	int i;
2465 
2466 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2467 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2468 		return;
2469 	}
2470 
2471 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2472 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2473 		BT_DBG("%s bad parameters", hdev->name);
2474 		return;
2475 	}
2476 
2477 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2478 
2479 	for (i = 0; i < ev->num_hndl; i++) {
2480 		struct hci_comp_pkts_info *info = &ev->handles[i];
2481 		struct hci_conn *conn;
2482 		__u16  handle, count;
2483 
2484 		handle = __le16_to_cpu(info->handle);
2485 		count  = __le16_to_cpu(info->count);
2486 
2487 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2488 		if (!conn)
2489 			continue;
2490 
2491 		conn->sent -= count;
2492 
2493 		switch (conn->type) {
2494 		case ACL_LINK:
2495 			hdev->acl_cnt += count;
2496 			if (hdev->acl_cnt > hdev->acl_pkts)
2497 				hdev->acl_cnt = hdev->acl_pkts;
2498 			break;
2499 
2500 		case LE_LINK:
2501 			if (hdev->le_pkts) {
2502 				hdev->le_cnt += count;
2503 				if (hdev->le_cnt > hdev->le_pkts)
2504 					hdev->le_cnt = hdev->le_pkts;
2505 			} else {
2506 				hdev->acl_cnt += count;
2507 				if (hdev->acl_cnt > hdev->acl_pkts)
2508 					hdev->acl_cnt = hdev->acl_pkts;
2509 			}
2510 			break;
2511 
2512 		case SCO_LINK:
2513 			hdev->sco_cnt += count;
2514 			if (hdev->sco_cnt > hdev->sco_pkts)
2515 				hdev->sco_cnt = hdev->sco_pkts;
2516 			break;
2517 
2518 		default:
2519 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2520 			break;
2521 		}
2522 	}
2523 
2524 	queue_work(hdev->workqueue, &hdev->tx_work);
2525 }
2526 
2527 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2528 						 __u16 handle)
2529 {
2530 	struct hci_chan *chan;
2531 
2532 	switch (hdev->dev_type) {
2533 	case HCI_BREDR:
2534 		return hci_conn_hash_lookup_handle(hdev, handle);
2535 	case HCI_AMP:
2536 		chan = hci_chan_lookup_handle(hdev, handle);
2537 		if (chan)
2538 			return chan->conn;
2539 		break;
2540 	default:
2541 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2542 		break;
2543 	}
2544 
2545 	return NULL;
2546 }
2547 
2548 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2549 {
2550 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2551 	int i;
2552 
2553 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2554 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2555 		return;
2556 	}
2557 
2558 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2559 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2560 		BT_DBG("%s bad parameters", hdev->name);
2561 		return;
2562 	}
2563 
2564 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2565 	       ev->num_hndl);
2566 
2567 	for (i = 0; i < ev->num_hndl; i++) {
2568 		struct hci_comp_blocks_info *info = &ev->handles[i];
2569 		struct hci_conn *conn = NULL;
2570 		__u16  handle, block_count;
2571 
2572 		handle = __le16_to_cpu(info->handle);
2573 		block_count = __le16_to_cpu(info->blocks);
2574 
2575 		conn = __hci_conn_lookup_handle(hdev, handle);
2576 		if (!conn)
2577 			continue;
2578 
2579 		conn->sent -= block_count;
2580 
2581 		switch (conn->type) {
2582 		case ACL_LINK:
2583 		case AMP_LINK:
2584 			hdev->block_cnt += block_count;
2585 			if (hdev->block_cnt > hdev->num_blocks)
2586 				hdev->block_cnt = hdev->num_blocks;
2587 			break;
2588 
2589 		default:
2590 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2591 			break;
2592 		}
2593 	}
2594 
2595 	queue_work(hdev->workqueue, &hdev->tx_work);
2596 }
2597 
2598 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2599 {
2600 	struct hci_ev_mode_change *ev = (void *) skb->data;
2601 	struct hci_conn *conn;
2602 
2603 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2604 
2605 	hci_dev_lock(hdev);
2606 
2607 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2608 	if (conn) {
2609 		conn->mode = ev->mode;
2610 		conn->interval = __le16_to_cpu(ev->interval);
2611 
2612 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2613 					&conn->flags)) {
2614 			if (conn->mode == HCI_CM_ACTIVE)
2615 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2616 			else
2617 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2618 		}
2619 
2620 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2621 			hci_sco_setup(conn, ev->status);
2622 	}
2623 
2624 	hci_dev_unlock(hdev);
2625 }
2626 
2627 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 {
2629 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2630 	struct hci_conn *conn;
2631 
2632 	BT_DBG("%s", hdev->name);
2633 
2634 	hci_dev_lock(hdev);
2635 
2636 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2637 	if (!conn)
2638 		goto unlock;
2639 
2640 	if (conn->state == BT_CONNECTED) {
2641 		hci_conn_hold(conn);
2642 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2643 		hci_conn_drop(conn);
2644 	}
2645 
2646 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2647 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2648 			     sizeof(ev->bdaddr), &ev->bdaddr);
2649 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2650 		u8 secure;
2651 
2652 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2653 			secure = 1;
2654 		else
2655 			secure = 0;
2656 
2657 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2658 	}
2659 
2660 unlock:
2661 	hci_dev_unlock(hdev);
2662 }
2663 
2664 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2665 {
2666 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2667 	struct hci_cp_link_key_reply cp;
2668 	struct hci_conn *conn;
2669 	struct link_key *key;
2670 
2671 	BT_DBG("%s", hdev->name);
2672 
2673 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2674 		return;
2675 
2676 	hci_dev_lock(hdev);
2677 
2678 	key = hci_find_link_key(hdev, &ev->bdaddr);
2679 	if (!key) {
2680 		BT_DBG("%s link key not found for %pMR", hdev->name,
2681 		       &ev->bdaddr);
2682 		goto not_found;
2683 	}
2684 
2685 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2686 	       &ev->bdaddr);
2687 
2688 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2689 	    key->type == HCI_LK_DEBUG_COMBINATION) {
2690 		BT_DBG("%s ignoring debug key", hdev->name);
2691 		goto not_found;
2692 	}
2693 
2694 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2695 	if (conn) {
2696 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2697 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2698 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2699 			goto not_found;
2700 		}
2701 
2702 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2703 		    conn->pending_sec_level == BT_SECURITY_HIGH) {
2704 			BT_DBG("%s ignoring key unauthenticated for high security",
2705 			       hdev->name);
2706 			goto not_found;
2707 		}
2708 
2709 		conn->key_type = key->type;
2710 		conn->pin_length = key->pin_len;
2711 	}
2712 
2713 	bacpy(&cp.bdaddr, &ev->bdaddr);
2714 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2715 
2716 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2717 
2718 	hci_dev_unlock(hdev);
2719 
2720 	return;
2721 
2722 not_found:
2723 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2724 	hci_dev_unlock(hdev);
2725 }
2726 
2727 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2728 {
2729 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2730 	struct hci_conn *conn;
2731 	u8 pin_len = 0;
2732 
2733 	BT_DBG("%s", hdev->name);
2734 
2735 	hci_dev_lock(hdev);
2736 
2737 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2738 	if (conn) {
2739 		hci_conn_hold(conn);
2740 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2741 		pin_len = conn->pin_length;
2742 
2743 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2744 			conn->key_type = ev->key_type;
2745 
2746 		hci_conn_drop(conn);
2747 	}
2748 
2749 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2750 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2751 				 ev->key_type, pin_len);
2752 
2753 	hci_dev_unlock(hdev);
2754 }
2755 
2756 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2757 {
2758 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2759 	struct hci_conn *conn;
2760 
2761 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2762 
2763 	hci_dev_lock(hdev);
2764 
2765 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2766 	if (conn && !ev->status) {
2767 		struct inquiry_entry *ie;
2768 
2769 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2770 		if (ie) {
2771 			ie->data.clock_offset = ev->clock_offset;
2772 			ie->timestamp = jiffies;
2773 		}
2774 	}
2775 
2776 	hci_dev_unlock(hdev);
2777 }
2778 
2779 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2780 {
2781 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2782 	struct hci_conn *conn;
2783 
2784 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2785 
2786 	hci_dev_lock(hdev);
2787 
2788 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2789 	if (conn && !ev->status)
2790 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2791 
2792 	hci_dev_unlock(hdev);
2793 }
2794 
2795 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2796 {
2797 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2798 	struct inquiry_entry *ie;
2799 
2800 	BT_DBG("%s", hdev->name);
2801 
2802 	hci_dev_lock(hdev);
2803 
2804 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2805 	if (ie) {
2806 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2807 		ie->timestamp = jiffies;
2808 	}
2809 
2810 	hci_dev_unlock(hdev);
2811 }
2812 
2813 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2814 					     struct sk_buff *skb)
2815 {
2816 	struct inquiry_data data;
2817 	int num_rsp = *((__u8 *) skb->data);
2818 	bool name_known, ssp;
2819 
2820 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2821 
2822 	if (!num_rsp)
2823 		return;
2824 
2825 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2826 		return;
2827 
2828 	hci_dev_lock(hdev);
2829 
2830 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2831 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2832 		info = (void *) (skb->data + 1);
2833 
2834 		for (; num_rsp; num_rsp--, info++) {
2835 			bacpy(&data.bdaddr, &info->bdaddr);
2836 			data.pscan_rep_mode	= info->pscan_rep_mode;
2837 			data.pscan_period_mode	= info->pscan_period_mode;
2838 			data.pscan_mode		= info->pscan_mode;
2839 			memcpy(data.dev_class, info->dev_class, 3);
2840 			data.clock_offset	= info->clock_offset;
2841 			data.rssi		= info->rssi;
2842 			data.ssp_mode		= 0x00;
2843 
2844 			name_known = hci_inquiry_cache_update(hdev, &data,
2845 							      false, &ssp);
2846 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2847 					  info->dev_class, info->rssi,
2848 					  !name_known, ssp, NULL, 0);
2849 		}
2850 	} else {
2851 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2852 
2853 		for (; num_rsp; num_rsp--, info++) {
2854 			bacpy(&data.bdaddr, &info->bdaddr);
2855 			data.pscan_rep_mode	= info->pscan_rep_mode;
2856 			data.pscan_period_mode	= info->pscan_period_mode;
2857 			data.pscan_mode		= 0x00;
2858 			memcpy(data.dev_class, info->dev_class, 3);
2859 			data.clock_offset	= info->clock_offset;
2860 			data.rssi		= info->rssi;
2861 			data.ssp_mode		= 0x00;
2862 			name_known = hci_inquiry_cache_update(hdev, &data,
2863 							      false, &ssp);
2864 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2865 					  info->dev_class, info->rssi,
2866 					  !name_known, ssp, NULL, 0);
2867 		}
2868 	}
2869 
2870 	hci_dev_unlock(hdev);
2871 }
2872 
2873 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2874 					struct sk_buff *skb)
2875 {
2876 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2877 	struct hci_conn *conn;
2878 
2879 	BT_DBG("%s", hdev->name);
2880 
2881 	hci_dev_lock(hdev);
2882 
2883 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2884 	if (!conn)
2885 		goto unlock;
2886 
2887 	if (ev->page < HCI_MAX_PAGES)
2888 		memcpy(conn->features[ev->page], ev->features, 8);
2889 
2890 	if (!ev->status && ev->page == 0x01) {
2891 		struct inquiry_entry *ie;
2892 
2893 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2894 		if (ie)
2895 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2896 
2897 		if (ev->features[0] & LMP_HOST_SSP) {
2898 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2899 		} else {
2900 			/* It is mandatory by the Bluetooth specification that
2901 			 * Extended Inquiry Results are only used when Secure
2902 			 * Simple Pairing is enabled, but some devices violate
2903 			 * this.
2904 			 *
2905 			 * To make these devices work, the internal SSP
2906 			 * enabled flag needs to be cleared if the remote host
2907 			 * features do not indicate SSP support */
2908 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2909 		}
2910 	}
2911 
2912 	if (conn->state != BT_CONFIG)
2913 		goto unlock;
2914 
2915 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2916 		struct hci_cp_remote_name_req cp;
2917 		memset(&cp, 0, sizeof(cp));
2918 		bacpy(&cp.bdaddr, &conn->dst);
2919 		cp.pscan_rep_mode = 0x02;
2920 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2921 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2922 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2923 				      conn->dst_type, 0, NULL, 0,
2924 				      conn->dev_class);
2925 
2926 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2927 		conn->state = BT_CONNECTED;
2928 		hci_proto_connect_cfm(conn, ev->status);
2929 		hci_conn_drop(conn);
2930 	}
2931 
2932 unlock:
2933 	hci_dev_unlock(hdev);
2934 }
2935 
2936 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2937 				       struct sk_buff *skb)
2938 {
2939 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2940 	struct hci_conn *conn;
2941 
2942 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2943 
2944 	hci_dev_lock(hdev);
2945 
2946 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2947 	if (!conn) {
2948 		if (ev->link_type == ESCO_LINK)
2949 			goto unlock;
2950 
2951 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2952 		if (!conn)
2953 			goto unlock;
2954 
2955 		conn->type = SCO_LINK;
2956 	}
2957 
2958 	switch (ev->status) {
2959 	case 0x00:
2960 		conn->handle = __le16_to_cpu(ev->handle);
2961 		conn->state  = BT_CONNECTED;
2962 
2963 		hci_conn_add_sysfs(conn);
2964 		break;
2965 
2966 	case 0x11:	/* Unsupported Feature or Parameter Value */
2967 	case 0x1c:	/* SCO interval rejected */
2968 	case 0x1a:	/* Unsupported Remote Feature */
2969 	case 0x1f:	/* Unspecified error */
2970 		if (conn->out && conn->attempt < 2) {
2971 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2972 					(hdev->esco_type & EDR_ESCO_MASK);
2973 			hci_setup_sync(conn, conn->link->handle);
2974 			goto unlock;
2975 		}
2976 		/* fall through */
2977 
2978 	default:
2979 		conn->state = BT_CLOSED;
2980 		break;
2981 	}
2982 
2983 	hci_proto_connect_cfm(conn, ev->status);
2984 	if (ev->status)
2985 		hci_conn_del(conn);
2986 
2987 unlock:
2988 	hci_dev_unlock(hdev);
2989 }
2990 
2991 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2992 					    struct sk_buff *skb)
2993 {
2994 	struct inquiry_data data;
2995 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2996 	int num_rsp = *((__u8 *) skb->data);
2997 	size_t eir_len;
2998 
2999 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3000 
3001 	if (!num_rsp)
3002 		return;
3003 
3004 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3005 		return;
3006 
3007 	hci_dev_lock(hdev);
3008 
3009 	for (; num_rsp; num_rsp--, info++) {
3010 		bool name_known, ssp;
3011 
3012 		bacpy(&data.bdaddr, &info->bdaddr);
3013 		data.pscan_rep_mode	= info->pscan_rep_mode;
3014 		data.pscan_period_mode	= info->pscan_period_mode;
3015 		data.pscan_mode		= 0x00;
3016 		memcpy(data.dev_class, info->dev_class, 3);
3017 		data.clock_offset	= info->clock_offset;
3018 		data.rssi		= info->rssi;
3019 		data.ssp_mode		= 0x01;
3020 
3021 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3022 			name_known = eir_has_data_type(info->data,
3023 						       sizeof(info->data),
3024 						       EIR_NAME_COMPLETE);
3025 		else
3026 			name_known = true;
3027 
3028 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3029 						      &ssp);
3030 		eir_len = eir_get_length(info->data, sizeof(info->data));
3031 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3032 				  info->dev_class, info->rssi, !name_known,
3033 				  ssp, info->data, eir_len);
3034 	}
3035 
3036 	hci_dev_unlock(hdev);
3037 }
3038 
3039 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3040 					 struct sk_buff *skb)
3041 {
3042 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3043 	struct hci_conn *conn;
3044 
3045 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3046 	       __le16_to_cpu(ev->handle));
3047 
3048 	hci_dev_lock(hdev);
3049 
3050 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3051 	if (!conn)
3052 		goto unlock;
3053 
3054 	if (!ev->status)
3055 		conn->sec_level = conn->pending_sec_level;
3056 
3057 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3058 
3059 	if (ev->status && conn->state == BT_CONNECTED) {
3060 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3061 		hci_conn_drop(conn);
3062 		goto unlock;
3063 	}
3064 
3065 	if (conn->state == BT_CONFIG) {
3066 		if (!ev->status)
3067 			conn->state = BT_CONNECTED;
3068 
3069 		hci_proto_connect_cfm(conn, ev->status);
3070 		hci_conn_drop(conn);
3071 	} else {
3072 		hci_auth_cfm(conn, ev->status);
3073 
3074 		hci_conn_hold(conn);
3075 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3076 		hci_conn_drop(conn);
3077 	}
3078 
3079 unlock:
3080 	hci_dev_unlock(hdev);
3081 }
3082 
3083 static u8 hci_get_auth_req(struct hci_conn *conn)
3084 {
3085 	/* If remote requests dedicated bonding follow that lead */
3086 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3087 		/* If both remote and local IO capabilities allow MITM
3088 		 * protection then require it, otherwise don't */
3089 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3090 			return 0x02;
3091 		else
3092 			return 0x03;
3093 	}
3094 
3095 	/* If remote requests no-bonding follow that lead */
3096 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3097 		return conn->remote_auth | (conn->auth_type & 0x01);
3098 
3099 	return conn->auth_type;
3100 }
3101 
3102 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3103 {
3104 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3105 	struct hci_conn *conn;
3106 
3107 	BT_DBG("%s", hdev->name);
3108 
3109 	hci_dev_lock(hdev);
3110 
3111 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3112 	if (!conn)
3113 		goto unlock;
3114 
3115 	hci_conn_hold(conn);
3116 
3117 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3118 		goto unlock;
3119 
3120 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3121 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3122 		struct hci_cp_io_capability_reply cp;
3123 
3124 		bacpy(&cp.bdaddr, &ev->bdaddr);
3125 		/* Change the IO capability from KeyboardDisplay
3126 		 * to DisplayYesNo as it is not supported by BT spec. */
3127 		cp.capability = (conn->io_capability == 0x04) ?
3128 						0x01 : conn->io_capability;
3129 		conn->auth_type = hci_get_auth_req(conn);
3130 		cp.authentication = conn->auth_type;
3131 
3132 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3133 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3134 			cp.oob_data = 0x01;
3135 		else
3136 			cp.oob_data = 0x00;
3137 
3138 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3139 			     sizeof(cp), &cp);
3140 	} else {
3141 		struct hci_cp_io_capability_neg_reply cp;
3142 
3143 		bacpy(&cp.bdaddr, &ev->bdaddr);
3144 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3145 
3146 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3147 			     sizeof(cp), &cp);
3148 	}
3149 
3150 unlock:
3151 	hci_dev_unlock(hdev);
3152 }
3153 
3154 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3155 {
3156 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3157 	struct hci_conn *conn;
3158 
3159 	BT_DBG("%s", hdev->name);
3160 
3161 	hci_dev_lock(hdev);
3162 
3163 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3164 	if (!conn)
3165 		goto unlock;
3166 
3167 	conn->remote_cap = ev->capability;
3168 	conn->remote_auth = ev->authentication;
3169 	if (ev->oob_data)
3170 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3171 
3172 unlock:
3173 	hci_dev_unlock(hdev);
3174 }
3175 
3176 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3177 					 struct sk_buff *skb)
3178 {
3179 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3180 	int loc_mitm, rem_mitm, confirm_hint = 0;
3181 	struct hci_conn *conn;
3182 
3183 	BT_DBG("%s", hdev->name);
3184 
3185 	hci_dev_lock(hdev);
3186 
3187 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3188 		goto unlock;
3189 
3190 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3191 	if (!conn)
3192 		goto unlock;
3193 
3194 	loc_mitm = (conn->auth_type & 0x01);
3195 	rem_mitm = (conn->remote_auth & 0x01);
3196 
3197 	/* If we require MITM but the remote device can't provide that
3198 	 * (it has NoInputNoOutput) then reject the confirmation
3199 	 * request. The only exception is when we're dedicated bonding
3200 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3201 	 * bit set. */
3202 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3203 		BT_DBG("Rejecting request: remote device can't provide MITM");
3204 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3205 			     sizeof(ev->bdaddr), &ev->bdaddr);
3206 		goto unlock;
3207 	}
3208 
3209 	/* If no side requires MITM protection; auto-accept */
3210 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3211 	    (!rem_mitm || conn->io_capability == 0x03)) {
3212 
3213 		/* If we're not the initiators request authorization to
3214 		 * proceed from user space (mgmt_user_confirm with
3215 		 * confirm_hint set to 1). */
3216 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3217 			BT_DBG("Confirming auto-accept as acceptor");
3218 			confirm_hint = 1;
3219 			goto confirm;
3220 		}
3221 
3222 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3223 		       hdev->auto_accept_delay);
3224 
3225 		if (hdev->auto_accept_delay > 0) {
3226 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3227 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3228 			goto unlock;
3229 		}
3230 
3231 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3232 			     sizeof(ev->bdaddr), &ev->bdaddr);
3233 		goto unlock;
3234 	}
3235 
3236 confirm:
3237 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3238 				  confirm_hint);
3239 
3240 unlock:
3241 	hci_dev_unlock(hdev);
3242 }
3243 
3244 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3245 					 struct sk_buff *skb)
3246 {
3247 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3248 
3249 	BT_DBG("%s", hdev->name);
3250 
3251 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3252 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3253 }
3254 
3255 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3256 					struct sk_buff *skb)
3257 {
3258 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3259 	struct hci_conn *conn;
3260 
3261 	BT_DBG("%s", hdev->name);
3262 
3263 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3264 	if (!conn)
3265 		return;
3266 
3267 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3268 	conn->passkey_entered = 0;
3269 
3270 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3271 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3272 					 conn->dst_type, conn->passkey_notify,
3273 					 conn->passkey_entered);
3274 }
3275 
3276 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3277 {
3278 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3279 	struct hci_conn *conn;
3280 
3281 	BT_DBG("%s", hdev->name);
3282 
3283 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3284 	if (!conn)
3285 		return;
3286 
3287 	switch (ev->type) {
3288 	case HCI_KEYPRESS_STARTED:
3289 		conn->passkey_entered = 0;
3290 		return;
3291 
3292 	case HCI_KEYPRESS_ENTERED:
3293 		conn->passkey_entered++;
3294 		break;
3295 
3296 	case HCI_KEYPRESS_ERASED:
3297 		conn->passkey_entered--;
3298 		break;
3299 
3300 	case HCI_KEYPRESS_CLEARED:
3301 		conn->passkey_entered = 0;
3302 		break;
3303 
3304 	case HCI_KEYPRESS_COMPLETED:
3305 		return;
3306 	}
3307 
3308 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3309 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3310 					 conn->dst_type, conn->passkey_notify,
3311 					 conn->passkey_entered);
3312 }
3313 
3314 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3315 					 struct sk_buff *skb)
3316 {
3317 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3318 	struct hci_conn *conn;
3319 
3320 	BT_DBG("%s", hdev->name);
3321 
3322 	hci_dev_lock(hdev);
3323 
3324 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3325 	if (!conn)
3326 		goto unlock;
3327 
3328 	/* To avoid duplicate auth_failed events to user space we check
3329 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3330 	 * initiated the authentication. A traditional auth_complete
3331 	 * event gets always produced as initiator and is also mapped to
3332 	 * the mgmt_auth_failed event */
3333 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3334 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3335 				 ev->status);
3336 
3337 	hci_conn_drop(conn);
3338 
3339 unlock:
3340 	hci_dev_unlock(hdev);
3341 }
3342 
3343 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3344 					 struct sk_buff *skb)
3345 {
3346 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3347 	struct inquiry_entry *ie;
3348 	struct hci_conn *conn;
3349 
3350 	BT_DBG("%s", hdev->name);
3351 
3352 	hci_dev_lock(hdev);
3353 
3354 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3355 	if (conn)
3356 		memcpy(conn->features[1], ev->features, 8);
3357 
3358 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3359 	if (ie)
3360 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3361 
3362 	hci_dev_unlock(hdev);
3363 }
3364 
3365 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3366 					    struct sk_buff *skb)
3367 {
3368 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3369 	struct oob_data *data;
3370 
3371 	BT_DBG("%s", hdev->name);
3372 
3373 	hci_dev_lock(hdev);
3374 
3375 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3376 		goto unlock;
3377 
3378 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3379 	if (data) {
3380 		struct hci_cp_remote_oob_data_reply cp;
3381 
3382 		bacpy(&cp.bdaddr, &ev->bdaddr);
3383 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3384 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3385 
3386 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3387 			     &cp);
3388 	} else {
3389 		struct hci_cp_remote_oob_data_neg_reply cp;
3390 
3391 		bacpy(&cp.bdaddr, &ev->bdaddr);
3392 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3393 			     &cp);
3394 	}
3395 
3396 unlock:
3397 	hci_dev_unlock(hdev);
3398 }
3399 
3400 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3401 				      struct sk_buff *skb)
3402 {
3403 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3404 	struct hci_conn *hcon, *bredr_hcon;
3405 
3406 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3407 	       ev->status);
3408 
3409 	hci_dev_lock(hdev);
3410 
3411 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3412 	if (!hcon) {
3413 		hci_dev_unlock(hdev);
3414 		return;
3415 	}
3416 
3417 	if (ev->status) {
3418 		hci_conn_del(hcon);
3419 		hci_dev_unlock(hdev);
3420 		return;
3421 	}
3422 
3423 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3424 
3425 	hcon->state = BT_CONNECTED;
3426 	bacpy(&hcon->dst, &bredr_hcon->dst);
3427 
3428 	hci_conn_hold(hcon);
3429 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3430 	hci_conn_drop(hcon);
3431 
3432 	hci_conn_add_sysfs(hcon);
3433 
3434 	amp_physical_cfm(bredr_hcon, hcon);
3435 
3436 	hci_dev_unlock(hdev);
3437 }
3438 
3439 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3440 {
3441 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3442 	struct hci_conn *hcon;
3443 	struct hci_chan *hchan;
3444 	struct amp_mgr *mgr;
3445 
3446 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3447 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3448 	       ev->status);
3449 
3450 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3451 	if (!hcon)
3452 		return;
3453 
3454 	/* Create AMP hchan */
3455 	hchan = hci_chan_create(hcon);
3456 	if (!hchan)
3457 		return;
3458 
3459 	hchan->handle = le16_to_cpu(ev->handle);
3460 
3461 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3462 
3463 	mgr = hcon->amp_mgr;
3464 	if (mgr && mgr->bredr_chan) {
3465 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3466 
3467 		l2cap_chan_lock(bredr_chan);
3468 
3469 		bredr_chan->conn->mtu = hdev->block_mtu;
3470 		l2cap_logical_cfm(bredr_chan, hchan, 0);
3471 		hci_conn_hold(hcon);
3472 
3473 		l2cap_chan_unlock(bredr_chan);
3474 	}
3475 }
3476 
3477 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3478 					     struct sk_buff *skb)
3479 {
3480 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3481 	struct hci_chan *hchan;
3482 
3483 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3484 	       le16_to_cpu(ev->handle), ev->status);
3485 
3486 	if (ev->status)
3487 		return;
3488 
3489 	hci_dev_lock(hdev);
3490 
3491 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3492 	if (!hchan)
3493 		goto unlock;
3494 
3495 	amp_destroy_logical_link(hchan, ev->reason);
3496 
3497 unlock:
3498 	hci_dev_unlock(hdev);
3499 }
3500 
3501 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3502 					     struct sk_buff *skb)
3503 {
3504 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3505 	struct hci_conn *hcon;
3506 
3507 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3508 
3509 	if (ev->status)
3510 		return;
3511 
3512 	hci_dev_lock(hdev);
3513 
3514 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3515 	if (hcon) {
3516 		hcon->state = BT_CLOSED;
3517 		hci_conn_del(hcon);
3518 	}
3519 
3520 	hci_dev_unlock(hdev);
3521 }
3522 
3523 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3524 {
3525 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3526 	struct hci_conn *conn;
3527 
3528 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3529 
3530 	hci_dev_lock(hdev);
3531 
3532 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3533 	if (!conn) {
3534 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3535 		if (!conn) {
3536 			BT_ERR("No memory for new connection");
3537 			goto unlock;
3538 		}
3539 
3540 		conn->dst_type = ev->bdaddr_type;
3541 
3542 		if (ev->role == LE_CONN_ROLE_MASTER) {
3543 			conn->out = true;
3544 			conn->link_mode |= HCI_LM_MASTER;
3545 		}
3546 	}
3547 
3548 	if (ev->status) {
3549 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
3550 				    conn->dst_type, ev->status);
3551 		hci_proto_connect_cfm(conn, ev->status);
3552 		conn->state = BT_CLOSED;
3553 		hci_conn_del(conn);
3554 		goto unlock;
3555 	}
3556 
3557 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3558 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3559 				      conn->dst_type, 0, NULL, 0, NULL);
3560 
3561 	conn->sec_level = BT_SECURITY_LOW;
3562 	conn->handle = __le16_to_cpu(ev->handle);
3563 	conn->state = BT_CONNECTED;
3564 
3565 	hci_conn_add_sysfs(conn);
3566 
3567 	hci_proto_connect_cfm(conn, ev->status);
3568 
3569 unlock:
3570 	hci_dev_unlock(hdev);
3571 }
3572 
3573 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3574 {
3575 	u8 num_reports = skb->data[0];
3576 	void *ptr = &skb->data[1];
3577 	s8 rssi;
3578 
3579 	while (num_reports--) {
3580 		struct hci_ev_le_advertising_info *ev = ptr;
3581 
3582 		rssi = ev->data[ev->length];
3583 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3584 				  NULL, rssi, 0, 1, ev->data, ev->length);
3585 
3586 		ptr += sizeof(*ev) + ev->length + 1;
3587 	}
3588 }
3589 
3590 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3591 {
3592 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3593 	struct hci_cp_le_ltk_reply cp;
3594 	struct hci_cp_le_ltk_neg_reply neg;
3595 	struct hci_conn *conn;
3596 	struct smp_ltk *ltk;
3597 
3598 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3599 
3600 	hci_dev_lock(hdev);
3601 
3602 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3603 	if (conn == NULL)
3604 		goto not_found;
3605 
3606 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3607 	if (ltk == NULL)
3608 		goto not_found;
3609 
3610 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3611 	cp.handle = cpu_to_le16(conn->handle);
3612 
3613 	if (ltk->authenticated)
3614 		conn->sec_level = BT_SECURITY_HIGH;
3615 
3616 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3617 
3618 	if (ltk->type & HCI_SMP_STK) {
3619 		list_del(&ltk->list);
3620 		kfree(ltk);
3621 	}
3622 
3623 	hci_dev_unlock(hdev);
3624 
3625 	return;
3626 
3627 not_found:
3628 	neg.handle = ev->handle;
3629 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3630 	hci_dev_unlock(hdev);
3631 }
3632 
3633 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3634 {
3635 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3636 
3637 	skb_pull(skb, sizeof(*le_ev));
3638 
3639 	switch (le_ev->subevent) {
3640 	case HCI_EV_LE_CONN_COMPLETE:
3641 		hci_le_conn_complete_evt(hdev, skb);
3642 		break;
3643 
3644 	case HCI_EV_LE_ADVERTISING_REPORT:
3645 		hci_le_adv_report_evt(hdev, skb);
3646 		break;
3647 
3648 	case HCI_EV_LE_LTK_REQ:
3649 		hci_le_ltk_request_evt(hdev, skb);
3650 		break;
3651 
3652 	default:
3653 		break;
3654 	}
3655 }
3656 
3657 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3658 {
3659 	struct hci_ev_channel_selected *ev = (void *) skb->data;
3660 	struct hci_conn *hcon;
3661 
3662 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3663 
3664 	skb_pull(skb, sizeof(*ev));
3665 
3666 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3667 	if (!hcon)
3668 		return;
3669 
3670 	amp_read_loc_assoc_final_data(hdev, hcon);
3671 }
3672 
3673 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3674 {
3675 	struct hci_event_hdr *hdr = (void *) skb->data;
3676 	__u8 event = hdr->evt;
3677 
3678 	hci_dev_lock(hdev);
3679 
3680 	/* Received events are (currently) only needed when a request is
3681 	 * ongoing so avoid unnecessary memory allocation.
3682 	 */
3683 	if (hdev->req_status == HCI_REQ_PEND) {
3684 		kfree_skb(hdev->recv_evt);
3685 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3686 	}
3687 
3688 	hci_dev_unlock(hdev);
3689 
3690 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3691 
3692 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3693 		struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
3694 		u16 opcode = __le16_to_cpu(hdr->opcode);
3695 
3696 		hci_req_cmd_complete(hdev, opcode, 0);
3697 	}
3698 
3699 	switch (event) {
3700 	case HCI_EV_INQUIRY_COMPLETE:
3701 		hci_inquiry_complete_evt(hdev, skb);
3702 		break;
3703 
3704 	case HCI_EV_INQUIRY_RESULT:
3705 		hci_inquiry_result_evt(hdev, skb);
3706 		break;
3707 
3708 	case HCI_EV_CONN_COMPLETE:
3709 		hci_conn_complete_evt(hdev, skb);
3710 		break;
3711 
3712 	case HCI_EV_CONN_REQUEST:
3713 		hci_conn_request_evt(hdev, skb);
3714 		break;
3715 
3716 	case HCI_EV_DISCONN_COMPLETE:
3717 		hci_disconn_complete_evt(hdev, skb);
3718 		break;
3719 
3720 	case HCI_EV_AUTH_COMPLETE:
3721 		hci_auth_complete_evt(hdev, skb);
3722 		break;
3723 
3724 	case HCI_EV_REMOTE_NAME:
3725 		hci_remote_name_evt(hdev, skb);
3726 		break;
3727 
3728 	case HCI_EV_ENCRYPT_CHANGE:
3729 		hci_encrypt_change_evt(hdev, skb);
3730 		break;
3731 
3732 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3733 		hci_change_link_key_complete_evt(hdev, skb);
3734 		break;
3735 
3736 	case HCI_EV_REMOTE_FEATURES:
3737 		hci_remote_features_evt(hdev, skb);
3738 		break;
3739 
3740 	case HCI_EV_CMD_COMPLETE:
3741 		hci_cmd_complete_evt(hdev, skb);
3742 		break;
3743 
3744 	case HCI_EV_CMD_STATUS:
3745 		hci_cmd_status_evt(hdev, skb);
3746 		break;
3747 
3748 	case HCI_EV_ROLE_CHANGE:
3749 		hci_role_change_evt(hdev, skb);
3750 		break;
3751 
3752 	case HCI_EV_NUM_COMP_PKTS:
3753 		hci_num_comp_pkts_evt(hdev, skb);
3754 		break;
3755 
3756 	case HCI_EV_MODE_CHANGE:
3757 		hci_mode_change_evt(hdev, skb);
3758 		break;
3759 
3760 	case HCI_EV_PIN_CODE_REQ:
3761 		hci_pin_code_request_evt(hdev, skb);
3762 		break;
3763 
3764 	case HCI_EV_LINK_KEY_REQ:
3765 		hci_link_key_request_evt(hdev, skb);
3766 		break;
3767 
3768 	case HCI_EV_LINK_KEY_NOTIFY:
3769 		hci_link_key_notify_evt(hdev, skb);
3770 		break;
3771 
3772 	case HCI_EV_CLOCK_OFFSET:
3773 		hci_clock_offset_evt(hdev, skb);
3774 		break;
3775 
3776 	case HCI_EV_PKT_TYPE_CHANGE:
3777 		hci_pkt_type_change_evt(hdev, skb);
3778 		break;
3779 
3780 	case HCI_EV_PSCAN_REP_MODE:
3781 		hci_pscan_rep_mode_evt(hdev, skb);
3782 		break;
3783 
3784 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3785 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3786 		break;
3787 
3788 	case HCI_EV_REMOTE_EXT_FEATURES:
3789 		hci_remote_ext_features_evt(hdev, skb);
3790 		break;
3791 
3792 	case HCI_EV_SYNC_CONN_COMPLETE:
3793 		hci_sync_conn_complete_evt(hdev, skb);
3794 		break;
3795 
3796 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3797 		hci_extended_inquiry_result_evt(hdev, skb);
3798 		break;
3799 
3800 	case HCI_EV_KEY_REFRESH_COMPLETE:
3801 		hci_key_refresh_complete_evt(hdev, skb);
3802 		break;
3803 
3804 	case HCI_EV_IO_CAPA_REQUEST:
3805 		hci_io_capa_request_evt(hdev, skb);
3806 		break;
3807 
3808 	case HCI_EV_IO_CAPA_REPLY:
3809 		hci_io_capa_reply_evt(hdev, skb);
3810 		break;
3811 
3812 	case HCI_EV_USER_CONFIRM_REQUEST:
3813 		hci_user_confirm_request_evt(hdev, skb);
3814 		break;
3815 
3816 	case HCI_EV_USER_PASSKEY_REQUEST:
3817 		hci_user_passkey_request_evt(hdev, skb);
3818 		break;
3819 
3820 	case HCI_EV_USER_PASSKEY_NOTIFY:
3821 		hci_user_passkey_notify_evt(hdev, skb);
3822 		break;
3823 
3824 	case HCI_EV_KEYPRESS_NOTIFY:
3825 		hci_keypress_notify_evt(hdev, skb);
3826 		break;
3827 
3828 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3829 		hci_simple_pair_complete_evt(hdev, skb);
3830 		break;
3831 
3832 	case HCI_EV_REMOTE_HOST_FEATURES:
3833 		hci_remote_host_features_evt(hdev, skb);
3834 		break;
3835 
3836 	case HCI_EV_LE_META:
3837 		hci_le_meta_evt(hdev, skb);
3838 		break;
3839 
3840 	case HCI_EV_CHANNEL_SELECTED:
3841 		hci_chan_selected_evt(hdev, skb);
3842 		break;
3843 
3844 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3845 		hci_remote_oob_data_request_evt(hdev, skb);
3846 		break;
3847 
3848 	case HCI_EV_PHY_LINK_COMPLETE:
3849 		hci_phy_link_complete_evt(hdev, skb);
3850 		break;
3851 
3852 	case HCI_EV_LOGICAL_LINK_COMPLETE:
3853 		hci_loglink_complete_evt(hdev, skb);
3854 		break;
3855 
3856 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3857 		hci_disconn_loglink_complete_evt(hdev, skb);
3858 		break;
3859 
3860 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3861 		hci_disconn_phylink_complete_evt(hdev, skb);
3862 		break;
3863 
3864 	case HCI_EV_NUM_COMP_BLOCKS:
3865 		hci_num_comp_blocks_evt(hdev, skb);
3866 		break;
3867 
3868 	default:
3869 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
3870 		break;
3871 	}
3872 
3873 	kfree_skb(skb);
3874 	hdev->stat.evt_rx++;
3875 }
3876