xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 31b90347)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "a2mp.h"
34 #include "amp.h"
35 
36 /* Handle HCI Event packets */
37 
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 	__u8 status = *((__u8 *) skb->data);
41 
42 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
43 
44 	if (status)
45 		return;
46 
47 	clear_bit(HCI_INQUIRY, &hdev->flags);
48 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 
51 	hci_conn_check_pending(hdev);
52 }
53 
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55 {
56 	__u8 status = *((__u8 *) skb->data);
57 
58 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
59 
60 	if (status)
61 		return;
62 
63 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64 }
65 
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 	__u8 status = *((__u8 *) skb->data);
69 
70 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
71 
72 	if (status)
73 		return;
74 
75 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76 
77 	hci_conn_check_pending(hdev);
78 }
79 
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 					  struct sk_buff *skb)
82 {
83 	BT_DBG("%s", hdev->name);
84 }
85 
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 	struct hci_rp_role_discovery *rp = (void *) skb->data;
89 	struct hci_conn *conn;
90 
91 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92 
93 	if (rp->status)
94 		return;
95 
96 	hci_dev_lock(hdev);
97 
98 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 	if (conn) {
100 		if (rp->role)
101 			conn->link_mode &= ~HCI_LM_MASTER;
102 		else
103 			conn->link_mode |= HCI_LM_MASTER;
104 	}
105 
106 	hci_dev_unlock(hdev);
107 }
108 
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110 {
111 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 	struct hci_conn *conn;
113 
114 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115 
116 	if (rp->status)
117 		return;
118 
119 	hci_dev_lock(hdev);
120 
121 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 	if (conn)
123 		conn->link_policy = __le16_to_cpu(rp->policy);
124 
125 	hci_dev_unlock(hdev);
126 }
127 
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129 {
130 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 	struct hci_conn *conn;
132 	void *sent;
133 
134 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135 
136 	if (rp->status)
137 		return;
138 
139 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 	if (!sent)
141 		return;
142 
143 	hci_dev_lock(hdev);
144 
145 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 	if (conn)
147 		conn->link_policy = get_unaligned_le16(sent + 2);
148 
149 	hci_dev_unlock(hdev);
150 }
151 
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 					struct sk_buff *skb)
154 {
155 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156 
157 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158 
159 	if (rp->status)
160 		return;
161 
162 	hdev->link_policy = __le16_to_cpu(rp->policy);
163 }
164 
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 					 struct sk_buff *skb)
167 {
168 	__u8 status = *((__u8 *) skb->data);
169 	void *sent;
170 
171 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
172 
173 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 	if (!sent)
175 		return;
176 
177 	if (!status)
178 		hdev->link_policy = get_unaligned_le16(sent);
179 }
180 
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 	__u8 status = *((__u8 *) skb->data);
184 
185 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
186 
187 	clear_bit(HCI_RESET, &hdev->flags);
188 
189 	/* Reset all non-persistent flags */
190 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191 
192 	hdev->discovery.state = DISCOVERY_STOPPED;
193 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195 
196 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 	hdev->adv_data_len = 0;
198 
199 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 	hdev->scan_rsp_data_len = 0;
201 
202 	hdev->ssp_debug_mode = 0;
203 }
204 
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 	__u8 status = *((__u8 *) skb->data);
208 	void *sent;
209 
210 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
211 
212 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 	if (!sent)
214 		return;
215 
216 	hci_dev_lock(hdev);
217 
218 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 		mgmt_set_local_name_complete(hdev, sent, status);
220 	else if (!status)
221 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222 
223 	hci_dev_unlock(hdev);
224 }
225 
226 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
227 {
228 	struct hci_rp_read_local_name *rp = (void *) skb->data;
229 
230 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
231 
232 	if (rp->status)
233 		return;
234 
235 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
236 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237 }
238 
239 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
240 {
241 	__u8 status = *((__u8 *) skb->data);
242 	void *sent;
243 
244 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
245 
246 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
247 	if (!sent)
248 		return;
249 
250 	if (!status) {
251 		__u8 param = *((__u8 *) sent);
252 
253 		if (param == AUTH_ENABLED)
254 			set_bit(HCI_AUTH, &hdev->flags);
255 		else
256 			clear_bit(HCI_AUTH, &hdev->flags);
257 	}
258 
259 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
260 		mgmt_auth_enable_complete(hdev, status);
261 }
262 
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 	__u8 status = *((__u8 *) skb->data);
266 	void *sent;
267 
268 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
269 
270 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 	if (!sent)
272 		return;
273 
274 	if (!status) {
275 		__u8 param = *((__u8 *) sent);
276 
277 		if (param)
278 			set_bit(HCI_ENCRYPT, &hdev->flags);
279 		else
280 			clear_bit(HCI_ENCRYPT, &hdev->flags);
281 	}
282 }
283 
284 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
285 {
286 	__u8 param, status = *((__u8 *) skb->data);
287 	int old_pscan, old_iscan;
288 	void *sent;
289 
290 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
291 
292 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
293 	if (!sent)
294 		return;
295 
296 	param = *((__u8 *) sent);
297 
298 	hci_dev_lock(hdev);
299 
300 	if (status) {
301 		mgmt_write_scan_failed(hdev, param, status);
302 		hdev->discov_timeout = 0;
303 		goto done;
304 	}
305 
306 	/* We need to ensure that we set this back on if someone changed
307 	 * the scan mode through a raw HCI socket.
308 	 */
309 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
310 
311 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
312 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
313 
314 	if (param & SCAN_INQUIRY) {
315 		set_bit(HCI_ISCAN, &hdev->flags);
316 		if (!old_iscan)
317 			mgmt_discoverable(hdev, 1);
318 	} else if (old_iscan)
319 		mgmt_discoverable(hdev, 0);
320 
321 	if (param & SCAN_PAGE) {
322 		set_bit(HCI_PSCAN, &hdev->flags);
323 		if (!old_pscan)
324 			mgmt_connectable(hdev, 1);
325 	} else if (old_pscan)
326 		mgmt_connectable(hdev, 0);
327 
328 done:
329 	hci_dev_unlock(hdev);
330 }
331 
332 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335 
336 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337 
338 	if (rp->status)
339 		return;
340 
341 	memcpy(hdev->dev_class, rp->dev_class, 3);
342 
343 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 }
346 
347 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 	__u8 status = *((__u8 *) skb->data);
350 	void *sent;
351 
352 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 
354 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 	if (!sent)
356 		return;
357 
358 	hci_dev_lock(hdev);
359 
360 	if (status == 0)
361 		memcpy(hdev->dev_class, sent, 3);
362 
363 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
364 		mgmt_set_class_of_dev_complete(hdev, sent, status);
365 
366 	hci_dev_unlock(hdev);
367 }
368 
369 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 {
371 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 	__u16 setting;
373 
374 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375 
376 	if (rp->status)
377 		return;
378 
379 	setting = __le16_to_cpu(rp->voice_setting);
380 
381 	if (hdev->voice_setting == setting)
382 		return;
383 
384 	hdev->voice_setting = setting;
385 
386 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387 
388 	if (hdev->notify)
389 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 }
391 
392 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 				       struct sk_buff *skb)
394 {
395 	__u8 status = *((__u8 *) skb->data);
396 	__u16 setting;
397 	void *sent;
398 
399 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
400 
401 	if (status)
402 		return;
403 
404 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405 	if (!sent)
406 		return;
407 
408 	setting = get_unaligned_le16(sent);
409 
410 	if (hdev->voice_setting == setting)
411 		return;
412 
413 	hdev->voice_setting = setting;
414 
415 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416 
417 	if (hdev->notify)
418 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 }
420 
421 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422 					  struct sk_buff *skb)
423 {
424 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425 
426 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427 
428 	if (rp->status)
429 		return;
430 
431 	hdev->num_iac = rp->num_iac;
432 
433 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434 }
435 
436 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437 {
438 	__u8 status = *((__u8 *) skb->data);
439 	struct hci_cp_write_ssp_mode *sent;
440 
441 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
442 
443 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
444 	if (!sent)
445 		return;
446 
447 	if (!status) {
448 		if (sent->mode)
449 			hdev->features[1][0] |= LMP_HOST_SSP;
450 		else
451 			hdev->features[1][0] &= ~LMP_HOST_SSP;
452 	}
453 
454 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
455 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
456 	else if (!status) {
457 		if (sent->mode)
458 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 		else
460 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 	}
462 }
463 
464 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
465 {
466 	struct hci_rp_read_local_version *rp = (void *) skb->data;
467 
468 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
469 
470 	if (rp->status)
471 		return;
472 
473 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
474 		hdev->hci_ver = rp->hci_ver;
475 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
476 		hdev->lmp_ver = rp->lmp_ver;
477 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
478 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
479 	}
480 }
481 
482 static void hci_cc_read_local_commands(struct hci_dev *hdev,
483 				       struct sk_buff *skb)
484 {
485 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
486 
487 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
488 
489 	if (!rp->status)
490 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
491 }
492 
493 static void hci_cc_read_local_features(struct hci_dev *hdev,
494 				       struct sk_buff *skb)
495 {
496 	struct hci_rp_read_local_features *rp = (void *) skb->data;
497 
498 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
499 
500 	if (rp->status)
501 		return;
502 
503 	memcpy(hdev->features, rp->features, 8);
504 
505 	/* Adjust default settings according to features
506 	 * supported by device. */
507 
508 	if (hdev->features[0][0] & LMP_3SLOT)
509 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
510 
511 	if (hdev->features[0][0] & LMP_5SLOT)
512 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
513 
514 	if (hdev->features[0][1] & LMP_HV2) {
515 		hdev->pkt_type  |= (HCI_HV2);
516 		hdev->esco_type |= (ESCO_HV2);
517 	}
518 
519 	if (hdev->features[0][1] & LMP_HV3) {
520 		hdev->pkt_type  |= (HCI_HV3);
521 		hdev->esco_type |= (ESCO_HV3);
522 	}
523 
524 	if (lmp_esco_capable(hdev))
525 		hdev->esco_type |= (ESCO_EV3);
526 
527 	if (hdev->features[0][4] & LMP_EV4)
528 		hdev->esco_type |= (ESCO_EV4);
529 
530 	if (hdev->features[0][4] & LMP_EV5)
531 		hdev->esco_type |= (ESCO_EV5);
532 
533 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
534 		hdev->esco_type |= (ESCO_2EV3);
535 
536 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
537 		hdev->esco_type |= (ESCO_3EV3);
538 
539 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
540 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
541 
542 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
543 	       hdev->features[0][0], hdev->features[0][1],
544 	       hdev->features[0][2], hdev->features[0][3],
545 	       hdev->features[0][4], hdev->features[0][5],
546 	       hdev->features[0][6], hdev->features[0][7]);
547 }
548 
549 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
550 					   struct sk_buff *skb)
551 {
552 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
553 
554 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
555 
556 	if (rp->status)
557 		return;
558 
559 	if (hdev->max_page < rp->max_page)
560 		hdev->max_page = rp->max_page;
561 
562 	if (rp->page < HCI_MAX_PAGES)
563 		memcpy(hdev->features[rp->page], rp->features, 8);
564 }
565 
566 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
567 					  struct sk_buff *skb)
568 {
569 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
570 
571 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
572 
573 	if (!rp->status)
574 		hdev->flow_ctl_mode = rp->mode;
575 }
576 
577 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
578 {
579 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
580 
581 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582 
583 	if (rp->status)
584 		return;
585 
586 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
587 	hdev->sco_mtu  = rp->sco_mtu;
588 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
589 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
590 
591 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
592 		hdev->sco_mtu  = 64;
593 		hdev->sco_pkts = 8;
594 	}
595 
596 	hdev->acl_cnt = hdev->acl_pkts;
597 	hdev->sco_cnt = hdev->sco_pkts;
598 
599 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
600 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
601 }
602 
603 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
604 {
605 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
606 
607 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608 
609 	if (!rp->status)
610 		bacpy(&hdev->bdaddr, &rp->bdaddr);
611 }
612 
613 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
614 					   struct sk_buff *skb)
615 {
616 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
617 
618 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
619 
620 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
621 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
622 		hdev->page_scan_window = __le16_to_cpu(rp->window);
623 	}
624 }
625 
626 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
627 					    struct sk_buff *skb)
628 {
629 	u8 status = *((u8 *) skb->data);
630 	struct hci_cp_write_page_scan_activity *sent;
631 
632 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
633 
634 	if (status)
635 		return;
636 
637 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
638 	if (!sent)
639 		return;
640 
641 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
642 	hdev->page_scan_window = __le16_to_cpu(sent->window);
643 }
644 
645 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
646 					   struct sk_buff *skb)
647 {
648 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
649 
650 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 
652 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
653 		hdev->page_scan_type = rp->type;
654 }
655 
656 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
657 					struct sk_buff *skb)
658 {
659 	u8 status = *((u8 *) skb->data);
660 	u8 *type;
661 
662 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
663 
664 	if (status)
665 		return;
666 
667 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
668 	if (type)
669 		hdev->page_scan_type = *type;
670 }
671 
672 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
673 					struct sk_buff *skb)
674 {
675 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
676 
677 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678 
679 	if (rp->status)
680 		return;
681 
682 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
683 	hdev->block_len = __le16_to_cpu(rp->block_len);
684 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
685 
686 	hdev->block_cnt = hdev->num_blocks;
687 
688 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
689 	       hdev->block_cnt, hdev->block_len);
690 }
691 
692 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
693 				       struct sk_buff *skb)
694 {
695 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
696 
697 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
698 
699 	if (rp->status)
700 		goto a2mp_rsp;
701 
702 	hdev->amp_status = rp->amp_status;
703 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
704 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
705 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
706 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
707 	hdev->amp_type = rp->amp_type;
708 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
709 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
710 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
711 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
712 
713 a2mp_rsp:
714 	a2mp_send_getinfo_rsp(hdev);
715 }
716 
717 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
718 					struct sk_buff *skb)
719 {
720 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
721 	struct amp_assoc *assoc = &hdev->loc_assoc;
722 	size_t rem_len, frag_len;
723 
724 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
725 
726 	if (rp->status)
727 		goto a2mp_rsp;
728 
729 	frag_len = skb->len - sizeof(*rp);
730 	rem_len = __le16_to_cpu(rp->rem_len);
731 
732 	if (rem_len > frag_len) {
733 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
734 
735 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
736 		assoc->offset += frag_len;
737 
738 		/* Read other fragments */
739 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
740 
741 		return;
742 	}
743 
744 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
745 	assoc->len = assoc->offset + rem_len;
746 	assoc->offset = 0;
747 
748 a2mp_rsp:
749 	/* Send A2MP Rsp when all fragments are received */
750 	a2mp_send_getampassoc_rsp(hdev, rp->status);
751 	a2mp_send_create_phy_link_req(hdev, rp->status);
752 }
753 
754 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
755 					 struct sk_buff *skb)
756 {
757 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
758 
759 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760 
761 	if (!rp->status)
762 		hdev->inq_tx_power = rp->tx_power;
763 }
764 
765 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
766 {
767 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
768 	struct hci_cp_pin_code_reply *cp;
769 	struct hci_conn *conn;
770 
771 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772 
773 	hci_dev_lock(hdev);
774 
775 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
776 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
777 
778 	if (rp->status)
779 		goto unlock;
780 
781 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
782 	if (!cp)
783 		goto unlock;
784 
785 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
786 	if (conn)
787 		conn->pin_length = cp->pin_len;
788 
789 unlock:
790 	hci_dev_unlock(hdev);
791 }
792 
793 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
794 {
795 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
796 
797 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 
799 	hci_dev_lock(hdev);
800 
801 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
802 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
803 						 rp->status);
804 
805 	hci_dev_unlock(hdev);
806 }
807 
808 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
809 				       struct sk_buff *skb)
810 {
811 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
812 
813 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
814 
815 	if (rp->status)
816 		return;
817 
818 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
819 	hdev->le_pkts = rp->le_max_pkt;
820 
821 	hdev->le_cnt = hdev->le_pkts;
822 
823 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
824 }
825 
826 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
827 					  struct sk_buff *skb)
828 {
829 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
830 
831 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832 
833 	if (!rp->status)
834 		memcpy(hdev->le_features, rp->features, 8);
835 }
836 
837 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
838 					struct sk_buff *skb)
839 {
840 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
841 
842 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
843 
844 	if (!rp->status)
845 		hdev->adv_tx_power = rp->tx_power;
846 }
847 
848 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
849 {
850 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
851 
852 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853 
854 	hci_dev_lock(hdev);
855 
856 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
857 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
858 						 rp->status);
859 
860 	hci_dev_unlock(hdev);
861 }
862 
863 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
864 					  struct sk_buff *skb)
865 {
866 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
867 
868 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
869 
870 	hci_dev_lock(hdev);
871 
872 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
873 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
874 						     ACL_LINK, 0, rp->status);
875 
876 	hci_dev_unlock(hdev);
877 }
878 
879 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
880 {
881 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
882 
883 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884 
885 	hci_dev_lock(hdev);
886 
887 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
888 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
889 						 0, rp->status);
890 
891 	hci_dev_unlock(hdev);
892 }
893 
894 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
895 					  struct sk_buff *skb)
896 {
897 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
898 
899 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900 
901 	hci_dev_lock(hdev);
902 
903 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
904 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
905 						     ACL_LINK, 0, rp->status);
906 
907 	hci_dev_unlock(hdev);
908 }
909 
910 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
911 					     struct sk_buff *skb)
912 {
913 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
914 
915 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
916 
917 	hci_dev_lock(hdev);
918 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
919 						rp->randomizer, rp->status);
920 	hci_dev_unlock(hdev);
921 }
922 
923 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
924 {
925 	__u8 *sent, status = *((__u8 *) skb->data);
926 
927 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
928 
929 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
930 	if (!sent)
931 		return;
932 
933 	hci_dev_lock(hdev);
934 
935 	if (!status) {
936 		if (*sent)
937 			set_bit(HCI_ADVERTISING, &hdev->dev_flags);
938 		else
939 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
940 	}
941 
942 	hci_dev_unlock(hdev);
943 }
944 
945 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
946 				      struct sk_buff *skb)
947 {
948 	struct hci_cp_le_set_scan_enable *cp;
949 	__u8 status = *((__u8 *) skb->data);
950 
951 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
952 
953 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
954 	if (!cp)
955 		return;
956 
957 	if (status)
958 		return;
959 
960 	switch (cp->enable) {
961 	case LE_SCAN_ENABLE:
962 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
963 		break;
964 
965 	case LE_SCAN_DISABLE:
966 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
967 		break;
968 
969 	default:
970 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
971 		break;
972 	}
973 }
974 
975 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
976 					   struct sk_buff *skb)
977 {
978 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
979 
980 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
981 
982 	if (!rp->status)
983 		hdev->le_white_list_size = rp->size;
984 }
985 
986 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
987 					    struct sk_buff *skb)
988 {
989 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
990 
991 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
992 
993 	if (!rp->status)
994 		memcpy(hdev->le_states, rp->le_states, 8);
995 }
996 
997 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
998 					   struct sk_buff *skb)
999 {
1000 	struct hci_cp_write_le_host_supported *sent;
1001 	__u8 status = *((__u8 *) skb->data);
1002 
1003 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1004 
1005 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1006 	if (!sent)
1007 		return;
1008 
1009 	if (!status) {
1010 		if (sent->le) {
1011 			hdev->features[1][0] |= LMP_HOST_LE;
1012 			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1013 		} else {
1014 			hdev->features[1][0] &= ~LMP_HOST_LE;
1015 			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1016 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1017 		}
1018 
1019 		if (sent->simul)
1020 			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1021 		else
1022 			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1023 	}
1024 }
1025 
1026 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1027 					  struct sk_buff *skb)
1028 {
1029 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1030 
1031 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1032 	       hdev->name, rp->status, rp->phy_handle);
1033 
1034 	if (rp->status)
1035 		return;
1036 
1037 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1038 }
1039 
1040 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1041 {
1042 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043 
1044 	if (status) {
1045 		hci_conn_check_pending(hdev);
1046 		return;
1047 	}
1048 
1049 	set_bit(HCI_INQUIRY, &hdev->flags);
1050 }
1051 
1052 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1053 {
1054 	struct hci_cp_create_conn *cp;
1055 	struct hci_conn *conn;
1056 
1057 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1058 
1059 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1060 	if (!cp)
1061 		return;
1062 
1063 	hci_dev_lock(hdev);
1064 
1065 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1066 
1067 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1068 
1069 	if (status) {
1070 		if (conn && conn->state == BT_CONNECT) {
1071 			if (status != 0x0c || conn->attempt > 2) {
1072 				conn->state = BT_CLOSED;
1073 				hci_proto_connect_cfm(conn, status);
1074 				hci_conn_del(conn);
1075 			} else
1076 				conn->state = BT_CONNECT2;
1077 		}
1078 	} else {
1079 		if (!conn) {
1080 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1081 			if (conn) {
1082 				conn->out = true;
1083 				conn->link_mode |= HCI_LM_MASTER;
1084 			} else
1085 				BT_ERR("No memory for new connection");
1086 		}
1087 	}
1088 
1089 	hci_dev_unlock(hdev);
1090 }
1091 
1092 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1093 {
1094 	struct hci_cp_add_sco *cp;
1095 	struct hci_conn *acl, *sco;
1096 	__u16 handle;
1097 
1098 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1099 
1100 	if (!status)
1101 		return;
1102 
1103 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1104 	if (!cp)
1105 		return;
1106 
1107 	handle = __le16_to_cpu(cp->handle);
1108 
1109 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1110 
1111 	hci_dev_lock(hdev);
1112 
1113 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1114 	if (acl) {
1115 		sco = acl->link;
1116 		if (sco) {
1117 			sco->state = BT_CLOSED;
1118 
1119 			hci_proto_connect_cfm(sco, status);
1120 			hci_conn_del(sco);
1121 		}
1122 	}
1123 
1124 	hci_dev_unlock(hdev);
1125 }
1126 
1127 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1128 {
1129 	struct hci_cp_auth_requested *cp;
1130 	struct hci_conn *conn;
1131 
1132 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1133 
1134 	if (!status)
1135 		return;
1136 
1137 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1138 	if (!cp)
1139 		return;
1140 
1141 	hci_dev_lock(hdev);
1142 
1143 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1144 	if (conn) {
1145 		if (conn->state == BT_CONFIG) {
1146 			hci_proto_connect_cfm(conn, status);
1147 			hci_conn_drop(conn);
1148 		}
1149 	}
1150 
1151 	hci_dev_unlock(hdev);
1152 }
1153 
1154 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1155 {
1156 	struct hci_cp_set_conn_encrypt *cp;
1157 	struct hci_conn *conn;
1158 
1159 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1160 
1161 	if (!status)
1162 		return;
1163 
1164 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1165 	if (!cp)
1166 		return;
1167 
1168 	hci_dev_lock(hdev);
1169 
1170 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1171 	if (conn) {
1172 		if (conn->state == BT_CONFIG) {
1173 			hci_proto_connect_cfm(conn, status);
1174 			hci_conn_drop(conn);
1175 		}
1176 	}
1177 
1178 	hci_dev_unlock(hdev);
1179 }
1180 
1181 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1182 				    struct hci_conn *conn)
1183 {
1184 	if (conn->state != BT_CONFIG || !conn->out)
1185 		return 0;
1186 
1187 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1188 		return 0;
1189 
1190 	/* Only request authentication for SSP connections or non-SSP
1191 	 * devices with sec_level HIGH or if MITM protection is requested */
1192 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1193 	    conn->pending_sec_level != BT_SECURITY_HIGH)
1194 		return 0;
1195 
1196 	return 1;
1197 }
1198 
1199 static int hci_resolve_name(struct hci_dev *hdev,
1200 				   struct inquiry_entry *e)
1201 {
1202 	struct hci_cp_remote_name_req cp;
1203 
1204 	memset(&cp, 0, sizeof(cp));
1205 
1206 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1207 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1208 	cp.pscan_mode = e->data.pscan_mode;
1209 	cp.clock_offset = e->data.clock_offset;
1210 
1211 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1212 }
1213 
1214 static bool hci_resolve_next_name(struct hci_dev *hdev)
1215 {
1216 	struct discovery_state *discov = &hdev->discovery;
1217 	struct inquiry_entry *e;
1218 
1219 	if (list_empty(&discov->resolve))
1220 		return false;
1221 
1222 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1223 	if (!e)
1224 		return false;
1225 
1226 	if (hci_resolve_name(hdev, e) == 0) {
1227 		e->name_state = NAME_PENDING;
1228 		return true;
1229 	}
1230 
1231 	return false;
1232 }
1233 
1234 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1235 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1236 {
1237 	struct discovery_state *discov = &hdev->discovery;
1238 	struct inquiry_entry *e;
1239 
1240 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1241 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1242 				      name_len, conn->dev_class);
1243 
1244 	if (discov->state == DISCOVERY_STOPPED)
1245 		return;
1246 
1247 	if (discov->state == DISCOVERY_STOPPING)
1248 		goto discov_complete;
1249 
1250 	if (discov->state != DISCOVERY_RESOLVING)
1251 		return;
1252 
1253 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1254 	/* If the device was not found in a list of found devices names of which
1255 	 * are pending. there is no need to continue resolving a next name as it
1256 	 * will be done upon receiving another Remote Name Request Complete
1257 	 * Event */
1258 	if (!e)
1259 		return;
1260 
1261 	list_del(&e->list);
1262 	if (name) {
1263 		e->name_state = NAME_KNOWN;
1264 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1265 				 e->data.rssi, name, name_len);
1266 	} else {
1267 		e->name_state = NAME_NOT_KNOWN;
1268 	}
1269 
1270 	if (hci_resolve_next_name(hdev))
1271 		return;
1272 
1273 discov_complete:
1274 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1275 }
1276 
1277 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1278 {
1279 	struct hci_cp_remote_name_req *cp;
1280 	struct hci_conn *conn;
1281 
1282 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1283 
1284 	/* If successful wait for the name req complete event before
1285 	 * checking for the need to do authentication */
1286 	if (!status)
1287 		return;
1288 
1289 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1290 	if (!cp)
1291 		return;
1292 
1293 	hci_dev_lock(hdev);
1294 
1295 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1296 
1297 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1298 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1299 
1300 	if (!conn)
1301 		goto unlock;
1302 
1303 	if (!hci_outgoing_auth_needed(hdev, conn))
1304 		goto unlock;
1305 
1306 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1307 		struct hci_cp_auth_requested auth_cp;
1308 
1309 		auth_cp.handle = __cpu_to_le16(conn->handle);
1310 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1311 			     sizeof(auth_cp), &auth_cp);
1312 	}
1313 
1314 unlock:
1315 	hci_dev_unlock(hdev);
1316 }
1317 
1318 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1319 {
1320 	struct hci_cp_read_remote_features *cp;
1321 	struct hci_conn *conn;
1322 
1323 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1324 
1325 	if (!status)
1326 		return;
1327 
1328 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1329 	if (!cp)
1330 		return;
1331 
1332 	hci_dev_lock(hdev);
1333 
1334 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1335 	if (conn) {
1336 		if (conn->state == BT_CONFIG) {
1337 			hci_proto_connect_cfm(conn, status);
1338 			hci_conn_drop(conn);
1339 		}
1340 	}
1341 
1342 	hci_dev_unlock(hdev);
1343 }
1344 
1345 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1346 {
1347 	struct hci_cp_read_remote_ext_features *cp;
1348 	struct hci_conn *conn;
1349 
1350 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1351 
1352 	if (!status)
1353 		return;
1354 
1355 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1356 	if (!cp)
1357 		return;
1358 
1359 	hci_dev_lock(hdev);
1360 
1361 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1362 	if (conn) {
1363 		if (conn->state == BT_CONFIG) {
1364 			hci_proto_connect_cfm(conn, status);
1365 			hci_conn_drop(conn);
1366 		}
1367 	}
1368 
1369 	hci_dev_unlock(hdev);
1370 }
1371 
1372 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1373 {
1374 	struct hci_cp_setup_sync_conn *cp;
1375 	struct hci_conn *acl, *sco;
1376 	__u16 handle;
1377 
1378 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1379 
1380 	if (!status)
1381 		return;
1382 
1383 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1384 	if (!cp)
1385 		return;
1386 
1387 	handle = __le16_to_cpu(cp->handle);
1388 
1389 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1390 
1391 	hci_dev_lock(hdev);
1392 
1393 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1394 	if (acl) {
1395 		sco = acl->link;
1396 		if (sco) {
1397 			sco->state = BT_CLOSED;
1398 
1399 			hci_proto_connect_cfm(sco, status);
1400 			hci_conn_del(sco);
1401 		}
1402 	}
1403 
1404 	hci_dev_unlock(hdev);
1405 }
1406 
1407 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1408 {
1409 	struct hci_cp_sniff_mode *cp;
1410 	struct hci_conn *conn;
1411 
1412 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1413 
1414 	if (!status)
1415 		return;
1416 
1417 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1418 	if (!cp)
1419 		return;
1420 
1421 	hci_dev_lock(hdev);
1422 
1423 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1424 	if (conn) {
1425 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1426 
1427 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1428 			hci_sco_setup(conn, status);
1429 	}
1430 
1431 	hci_dev_unlock(hdev);
1432 }
1433 
1434 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1435 {
1436 	struct hci_cp_exit_sniff_mode *cp;
1437 	struct hci_conn *conn;
1438 
1439 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1440 
1441 	if (!status)
1442 		return;
1443 
1444 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1445 	if (!cp)
1446 		return;
1447 
1448 	hci_dev_lock(hdev);
1449 
1450 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1451 	if (conn) {
1452 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1453 
1454 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1455 			hci_sco_setup(conn, status);
1456 	}
1457 
1458 	hci_dev_unlock(hdev);
1459 }
1460 
1461 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1462 {
1463 	struct hci_cp_disconnect *cp;
1464 	struct hci_conn *conn;
1465 
1466 	if (!status)
1467 		return;
1468 
1469 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1470 	if (!cp)
1471 		return;
1472 
1473 	hci_dev_lock(hdev);
1474 
1475 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1476 	if (conn)
1477 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1478 				       conn->dst_type, status);
1479 
1480 	hci_dev_unlock(hdev);
1481 }
1482 
1483 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1484 {
1485 	struct hci_cp_create_phy_link *cp;
1486 
1487 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1488 
1489 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1490 	if (!cp)
1491 		return;
1492 
1493 	hci_dev_lock(hdev);
1494 
1495 	if (status) {
1496 		struct hci_conn *hcon;
1497 
1498 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1499 		if (hcon)
1500 			hci_conn_del(hcon);
1501 	} else {
1502 		amp_write_remote_assoc(hdev, cp->phy_handle);
1503 	}
1504 
1505 	hci_dev_unlock(hdev);
1506 }
1507 
1508 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1509 {
1510 	struct hci_cp_accept_phy_link *cp;
1511 
1512 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1513 
1514 	if (status)
1515 		return;
1516 
1517 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1518 	if (!cp)
1519 		return;
1520 
1521 	amp_write_remote_assoc(hdev, cp->phy_handle);
1522 }
1523 
1524 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1525 {
1526 	__u8 status = *((__u8 *) skb->data);
1527 	struct discovery_state *discov = &hdev->discovery;
1528 	struct inquiry_entry *e;
1529 
1530 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1531 
1532 	hci_conn_check_pending(hdev);
1533 
1534 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1535 		return;
1536 
1537 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1538 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1539 
1540 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1541 		return;
1542 
1543 	hci_dev_lock(hdev);
1544 
1545 	if (discov->state != DISCOVERY_FINDING)
1546 		goto unlock;
1547 
1548 	if (list_empty(&discov->resolve)) {
1549 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1550 		goto unlock;
1551 	}
1552 
1553 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1554 	if (e && hci_resolve_name(hdev, e) == 0) {
1555 		e->name_state = NAME_PENDING;
1556 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1557 	} else {
1558 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1559 	}
1560 
1561 unlock:
1562 	hci_dev_unlock(hdev);
1563 }
1564 
1565 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1566 {
1567 	struct inquiry_data data;
1568 	struct inquiry_info *info = (void *) (skb->data + 1);
1569 	int num_rsp = *((__u8 *) skb->data);
1570 
1571 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1572 
1573 	if (!num_rsp)
1574 		return;
1575 
1576 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1577 		return;
1578 
1579 	hci_dev_lock(hdev);
1580 
1581 	for (; num_rsp; num_rsp--, info++) {
1582 		bool name_known, ssp;
1583 
1584 		bacpy(&data.bdaddr, &info->bdaddr);
1585 		data.pscan_rep_mode	= info->pscan_rep_mode;
1586 		data.pscan_period_mode	= info->pscan_period_mode;
1587 		data.pscan_mode		= info->pscan_mode;
1588 		memcpy(data.dev_class, info->dev_class, 3);
1589 		data.clock_offset	= info->clock_offset;
1590 		data.rssi		= 0x00;
1591 		data.ssp_mode		= 0x00;
1592 
1593 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1594 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1595 				  info->dev_class, 0, !name_known, ssp, NULL,
1596 				  0);
1597 	}
1598 
1599 	hci_dev_unlock(hdev);
1600 }
1601 
1602 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1603 {
1604 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1605 	struct hci_conn *conn;
1606 
1607 	BT_DBG("%s", hdev->name);
1608 
1609 	hci_dev_lock(hdev);
1610 
1611 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1612 	if (!conn) {
1613 		if (ev->link_type != SCO_LINK)
1614 			goto unlock;
1615 
1616 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1617 		if (!conn)
1618 			goto unlock;
1619 
1620 		conn->type = SCO_LINK;
1621 	}
1622 
1623 	if (!ev->status) {
1624 		conn->handle = __le16_to_cpu(ev->handle);
1625 
1626 		if (conn->type == ACL_LINK) {
1627 			conn->state = BT_CONFIG;
1628 			hci_conn_hold(conn);
1629 
1630 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1631 			    !hci_find_link_key(hdev, &ev->bdaddr))
1632 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1633 			else
1634 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1635 		} else
1636 			conn->state = BT_CONNECTED;
1637 
1638 		hci_conn_add_sysfs(conn);
1639 
1640 		if (test_bit(HCI_AUTH, &hdev->flags))
1641 			conn->link_mode |= HCI_LM_AUTH;
1642 
1643 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1644 			conn->link_mode |= HCI_LM_ENCRYPT;
1645 
1646 		/* Get remote features */
1647 		if (conn->type == ACL_LINK) {
1648 			struct hci_cp_read_remote_features cp;
1649 			cp.handle = ev->handle;
1650 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1651 				     sizeof(cp), &cp);
1652 		}
1653 
1654 		/* Set packet type for incoming connection */
1655 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1656 			struct hci_cp_change_conn_ptype cp;
1657 			cp.handle = ev->handle;
1658 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1659 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1660 				     &cp);
1661 		}
1662 	} else {
1663 		conn->state = BT_CLOSED;
1664 		if (conn->type == ACL_LINK)
1665 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1666 					    conn->dst_type, ev->status);
1667 	}
1668 
1669 	if (conn->type == ACL_LINK)
1670 		hci_sco_setup(conn, ev->status);
1671 
1672 	if (ev->status) {
1673 		hci_proto_connect_cfm(conn, ev->status);
1674 		hci_conn_del(conn);
1675 	} else if (ev->link_type != ACL_LINK)
1676 		hci_proto_connect_cfm(conn, ev->status);
1677 
1678 unlock:
1679 	hci_dev_unlock(hdev);
1680 
1681 	hci_conn_check_pending(hdev);
1682 }
1683 
1684 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1685 {
1686 	struct hci_ev_conn_request *ev = (void *) skb->data;
1687 	int mask = hdev->link_mode;
1688 	__u8 flags = 0;
1689 
1690 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1691 	       ev->link_type);
1692 
1693 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1694 				      &flags);
1695 
1696 	if ((mask & HCI_LM_ACCEPT) &&
1697 	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1698 		/* Connection accepted */
1699 		struct inquiry_entry *ie;
1700 		struct hci_conn *conn;
1701 
1702 		hci_dev_lock(hdev);
1703 
1704 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1705 		if (ie)
1706 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1707 
1708 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1709 					       &ev->bdaddr);
1710 		if (!conn) {
1711 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1712 			if (!conn) {
1713 				BT_ERR("No memory for new connection");
1714 				hci_dev_unlock(hdev);
1715 				return;
1716 			}
1717 		}
1718 
1719 		memcpy(conn->dev_class, ev->dev_class, 3);
1720 
1721 		hci_dev_unlock(hdev);
1722 
1723 		if (ev->link_type == ACL_LINK ||
1724 		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1725 			struct hci_cp_accept_conn_req cp;
1726 			conn->state = BT_CONNECT;
1727 
1728 			bacpy(&cp.bdaddr, &ev->bdaddr);
1729 
1730 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1731 				cp.role = 0x00; /* Become master */
1732 			else
1733 				cp.role = 0x01; /* Remain slave */
1734 
1735 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1736 				     &cp);
1737 		} else if (!(flags & HCI_PROTO_DEFER)) {
1738 			struct hci_cp_accept_sync_conn_req cp;
1739 			conn->state = BT_CONNECT;
1740 
1741 			bacpy(&cp.bdaddr, &ev->bdaddr);
1742 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1743 
1744 			cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1745 			cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1746 			cp.max_latency    = __constant_cpu_to_le16(0xffff);
1747 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1748 			cp.retrans_effort = 0xff;
1749 
1750 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1751 				     sizeof(cp), &cp);
1752 		} else {
1753 			conn->state = BT_CONNECT2;
1754 			hci_proto_connect_cfm(conn, 0);
1755 		}
1756 	} else {
1757 		/* Connection rejected */
1758 		struct hci_cp_reject_conn_req cp;
1759 
1760 		bacpy(&cp.bdaddr, &ev->bdaddr);
1761 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1762 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1763 	}
1764 }
1765 
1766 static u8 hci_to_mgmt_reason(u8 err)
1767 {
1768 	switch (err) {
1769 	case HCI_ERROR_CONNECTION_TIMEOUT:
1770 		return MGMT_DEV_DISCONN_TIMEOUT;
1771 	case HCI_ERROR_REMOTE_USER_TERM:
1772 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
1773 	case HCI_ERROR_REMOTE_POWER_OFF:
1774 		return MGMT_DEV_DISCONN_REMOTE;
1775 	case HCI_ERROR_LOCAL_HOST_TERM:
1776 		return MGMT_DEV_DISCONN_LOCAL_HOST;
1777 	default:
1778 		return MGMT_DEV_DISCONN_UNKNOWN;
1779 	}
1780 }
1781 
1782 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1783 {
1784 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1785 	struct hci_conn *conn;
1786 
1787 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1788 
1789 	hci_dev_lock(hdev);
1790 
1791 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1792 	if (!conn)
1793 		goto unlock;
1794 
1795 	if (ev->status == 0)
1796 		conn->state = BT_CLOSED;
1797 
1798 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1799 	    (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1800 		if (ev->status) {
1801 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1802 					       conn->dst_type, ev->status);
1803 		} else {
1804 			u8 reason = hci_to_mgmt_reason(ev->reason);
1805 
1806 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1807 						 conn->dst_type, reason);
1808 		}
1809 	}
1810 
1811 	if (ev->status == 0) {
1812 		u8 type = conn->type;
1813 
1814 		if (type == ACL_LINK && conn->flush_key)
1815 			hci_remove_link_key(hdev, &conn->dst);
1816 		hci_proto_disconn_cfm(conn, ev->reason);
1817 		hci_conn_del(conn);
1818 
1819 		/* Re-enable advertising if necessary, since it might
1820 		 * have been disabled by the connection. From the
1821 		 * HCI_LE_Set_Advertise_Enable command description in
1822 		 * the core specification (v4.0):
1823 		 * "The Controller shall continue advertising until the Host
1824 		 * issues an LE_Set_Advertise_Enable command with
1825 		 * Advertising_Enable set to 0x00 (Advertising is disabled)
1826 		 * or until a connection is created or until the Advertising
1827 		 * is timed out due to Directed Advertising."
1828 		 */
1829 		if (type == LE_LINK)
1830 			mgmt_reenable_advertising(hdev);
1831 	}
1832 
1833 unlock:
1834 	hci_dev_unlock(hdev);
1835 }
1836 
1837 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1838 {
1839 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1840 	struct hci_conn *conn;
1841 
1842 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1843 
1844 	hci_dev_lock(hdev);
1845 
1846 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1847 	if (!conn)
1848 		goto unlock;
1849 
1850 	if (!ev->status) {
1851 		if (!hci_conn_ssp_enabled(conn) &&
1852 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1853 			BT_INFO("re-auth of legacy device is not possible.");
1854 		} else {
1855 			conn->link_mode |= HCI_LM_AUTH;
1856 			conn->sec_level = conn->pending_sec_level;
1857 		}
1858 	} else {
1859 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1860 				 ev->status);
1861 	}
1862 
1863 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1864 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1865 
1866 	if (conn->state == BT_CONFIG) {
1867 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1868 			struct hci_cp_set_conn_encrypt cp;
1869 			cp.handle  = ev->handle;
1870 			cp.encrypt = 0x01;
1871 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1872 				     &cp);
1873 		} else {
1874 			conn->state = BT_CONNECTED;
1875 			hci_proto_connect_cfm(conn, ev->status);
1876 			hci_conn_drop(conn);
1877 		}
1878 	} else {
1879 		hci_auth_cfm(conn, ev->status);
1880 
1881 		hci_conn_hold(conn);
1882 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1883 		hci_conn_drop(conn);
1884 	}
1885 
1886 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1887 		if (!ev->status) {
1888 			struct hci_cp_set_conn_encrypt cp;
1889 			cp.handle  = ev->handle;
1890 			cp.encrypt = 0x01;
1891 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1892 				     &cp);
1893 		} else {
1894 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1895 			hci_encrypt_cfm(conn, ev->status, 0x00);
1896 		}
1897 	}
1898 
1899 unlock:
1900 	hci_dev_unlock(hdev);
1901 }
1902 
1903 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1904 {
1905 	struct hci_ev_remote_name *ev = (void *) skb->data;
1906 	struct hci_conn *conn;
1907 
1908 	BT_DBG("%s", hdev->name);
1909 
1910 	hci_conn_check_pending(hdev);
1911 
1912 	hci_dev_lock(hdev);
1913 
1914 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1915 
1916 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1917 		goto check_auth;
1918 
1919 	if (ev->status == 0)
1920 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1921 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1922 	else
1923 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1924 
1925 check_auth:
1926 	if (!conn)
1927 		goto unlock;
1928 
1929 	if (!hci_outgoing_auth_needed(hdev, conn))
1930 		goto unlock;
1931 
1932 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1933 		struct hci_cp_auth_requested cp;
1934 		cp.handle = __cpu_to_le16(conn->handle);
1935 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1936 	}
1937 
1938 unlock:
1939 	hci_dev_unlock(hdev);
1940 }
1941 
1942 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1943 {
1944 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
1945 	struct hci_conn *conn;
1946 
1947 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1948 
1949 	hci_dev_lock(hdev);
1950 
1951 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1952 	if (conn) {
1953 		if (!ev->status) {
1954 			if (ev->encrypt) {
1955 				/* Encryption implies authentication */
1956 				conn->link_mode |= HCI_LM_AUTH;
1957 				conn->link_mode |= HCI_LM_ENCRYPT;
1958 				conn->sec_level = conn->pending_sec_level;
1959 			} else
1960 				conn->link_mode &= ~HCI_LM_ENCRYPT;
1961 		}
1962 
1963 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1964 
1965 		if (ev->status && conn->state == BT_CONNECTED) {
1966 			hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1967 			hci_conn_drop(conn);
1968 			goto unlock;
1969 		}
1970 
1971 		if (conn->state == BT_CONFIG) {
1972 			if (!ev->status)
1973 				conn->state = BT_CONNECTED;
1974 
1975 			hci_proto_connect_cfm(conn, ev->status);
1976 			hci_conn_drop(conn);
1977 		} else
1978 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1979 	}
1980 
1981 unlock:
1982 	hci_dev_unlock(hdev);
1983 }
1984 
1985 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
1986 					     struct sk_buff *skb)
1987 {
1988 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1989 	struct hci_conn *conn;
1990 
1991 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1992 
1993 	hci_dev_lock(hdev);
1994 
1995 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1996 	if (conn) {
1997 		if (!ev->status)
1998 			conn->link_mode |= HCI_LM_SECURE;
1999 
2000 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2001 
2002 		hci_key_change_cfm(conn, ev->status);
2003 	}
2004 
2005 	hci_dev_unlock(hdev);
2006 }
2007 
2008 static void hci_remote_features_evt(struct hci_dev *hdev,
2009 				    struct sk_buff *skb)
2010 {
2011 	struct hci_ev_remote_features *ev = (void *) skb->data;
2012 	struct hci_conn *conn;
2013 
2014 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2015 
2016 	hci_dev_lock(hdev);
2017 
2018 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2019 	if (!conn)
2020 		goto unlock;
2021 
2022 	if (!ev->status)
2023 		memcpy(conn->features[0], ev->features, 8);
2024 
2025 	if (conn->state != BT_CONFIG)
2026 		goto unlock;
2027 
2028 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2029 		struct hci_cp_read_remote_ext_features cp;
2030 		cp.handle = ev->handle;
2031 		cp.page = 0x01;
2032 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2033 			     sizeof(cp), &cp);
2034 		goto unlock;
2035 	}
2036 
2037 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2038 		struct hci_cp_remote_name_req cp;
2039 		memset(&cp, 0, sizeof(cp));
2040 		bacpy(&cp.bdaddr, &conn->dst);
2041 		cp.pscan_rep_mode = 0x02;
2042 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2043 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2044 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2045 				      conn->dst_type, 0, NULL, 0,
2046 				      conn->dev_class);
2047 
2048 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2049 		conn->state = BT_CONNECTED;
2050 		hci_proto_connect_cfm(conn, ev->status);
2051 		hci_conn_drop(conn);
2052 	}
2053 
2054 unlock:
2055 	hci_dev_unlock(hdev);
2056 }
2057 
2058 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2059 {
2060 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2061 	u8 status = skb->data[sizeof(*ev)];
2062 	__u16 opcode;
2063 
2064 	skb_pull(skb, sizeof(*ev));
2065 
2066 	opcode = __le16_to_cpu(ev->opcode);
2067 
2068 	switch (opcode) {
2069 	case HCI_OP_INQUIRY_CANCEL:
2070 		hci_cc_inquiry_cancel(hdev, skb);
2071 		break;
2072 
2073 	case HCI_OP_PERIODIC_INQ:
2074 		hci_cc_periodic_inq(hdev, skb);
2075 		break;
2076 
2077 	case HCI_OP_EXIT_PERIODIC_INQ:
2078 		hci_cc_exit_periodic_inq(hdev, skb);
2079 		break;
2080 
2081 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2082 		hci_cc_remote_name_req_cancel(hdev, skb);
2083 		break;
2084 
2085 	case HCI_OP_ROLE_DISCOVERY:
2086 		hci_cc_role_discovery(hdev, skb);
2087 		break;
2088 
2089 	case HCI_OP_READ_LINK_POLICY:
2090 		hci_cc_read_link_policy(hdev, skb);
2091 		break;
2092 
2093 	case HCI_OP_WRITE_LINK_POLICY:
2094 		hci_cc_write_link_policy(hdev, skb);
2095 		break;
2096 
2097 	case HCI_OP_READ_DEF_LINK_POLICY:
2098 		hci_cc_read_def_link_policy(hdev, skb);
2099 		break;
2100 
2101 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2102 		hci_cc_write_def_link_policy(hdev, skb);
2103 		break;
2104 
2105 	case HCI_OP_RESET:
2106 		hci_cc_reset(hdev, skb);
2107 		break;
2108 
2109 	case HCI_OP_WRITE_LOCAL_NAME:
2110 		hci_cc_write_local_name(hdev, skb);
2111 		break;
2112 
2113 	case HCI_OP_READ_LOCAL_NAME:
2114 		hci_cc_read_local_name(hdev, skb);
2115 		break;
2116 
2117 	case HCI_OP_WRITE_AUTH_ENABLE:
2118 		hci_cc_write_auth_enable(hdev, skb);
2119 		break;
2120 
2121 	case HCI_OP_WRITE_ENCRYPT_MODE:
2122 		hci_cc_write_encrypt_mode(hdev, skb);
2123 		break;
2124 
2125 	case HCI_OP_WRITE_SCAN_ENABLE:
2126 		hci_cc_write_scan_enable(hdev, skb);
2127 		break;
2128 
2129 	case HCI_OP_READ_CLASS_OF_DEV:
2130 		hci_cc_read_class_of_dev(hdev, skb);
2131 		break;
2132 
2133 	case HCI_OP_WRITE_CLASS_OF_DEV:
2134 		hci_cc_write_class_of_dev(hdev, skb);
2135 		break;
2136 
2137 	case HCI_OP_READ_VOICE_SETTING:
2138 		hci_cc_read_voice_setting(hdev, skb);
2139 		break;
2140 
2141 	case HCI_OP_WRITE_VOICE_SETTING:
2142 		hci_cc_write_voice_setting(hdev, skb);
2143 		break;
2144 
2145 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2146 		hci_cc_read_num_supported_iac(hdev, skb);
2147 		break;
2148 
2149 	case HCI_OP_WRITE_SSP_MODE:
2150 		hci_cc_write_ssp_mode(hdev, skb);
2151 		break;
2152 
2153 	case HCI_OP_READ_LOCAL_VERSION:
2154 		hci_cc_read_local_version(hdev, skb);
2155 		break;
2156 
2157 	case HCI_OP_READ_LOCAL_COMMANDS:
2158 		hci_cc_read_local_commands(hdev, skb);
2159 		break;
2160 
2161 	case HCI_OP_READ_LOCAL_FEATURES:
2162 		hci_cc_read_local_features(hdev, skb);
2163 		break;
2164 
2165 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2166 		hci_cc_read_local_ext_features(hdev, skb);
2167 		break;
2168 
2169 	case HCI_OP_READ_BUFFER_SIZE:
2170 		hci_cc_read_buffer_size(hdev, skb);
2171 		break;
2172 
2173 	case HCI_OP_READ_BD_ADDR:
2174 		hci_cc_read_bd_addr(hdev, skb);
2175 		break;
2176 
2177 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2178 		hci_cc_read_page_scan_activity(hdev, skb);
2179 		break;
2180 
2181 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2182 		hci_cc_write_page_scan_activity(hdev, skb);
2183 		break;
2184 
2185 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2186 		hci_cc_read_page_scan_type(hdev, skb);
2187 		break;
2188 
2189 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2190 		hci_cc_write_page_scan_type(hdev, skb);
2191 		break;
2192 
2193 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2194 		hci_cc_read_data_block_size(hdev, skb);
2195 		break;
2196 
2197 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2198 		hci_cc_read_flow_control_mode(hdev, skb);
2199 		break;
2200 
2201 	case HCI_OP_READ_LOCAL_AMP_INFO:
2202 		hci_cc_read_local_amp_info(hdev, skb);
2203 		break;
2204 
2205 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2206 		hci_cc_read_local_amp_assoc(hdev, skb);
2207 		break;
2208 
2209 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2210 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2211 		break;
2212 
2213 	case HCI_OP_PIN_CODE_REPLY:
2214 		hci_cc_pin_code_reply(hdev, skb);
2215 		break;
2216 
2217 	case HCI_OP_PIN_CODE_NEG_REPLY:
2218 		hci_cc_pin_code_neg_reply(hdev, skb);
2219 		break;
2220 
2221 	case HCI_OP_READ_LOCAL_OOB_DATA:
2222 		hci_cc_read_local_oob_data_reply(hdev, skb);
2223 		break;
2224 
2225 	case HCI_OP_LE_READ_BUFFER_SIZE:
2226 		hci_cc_le_read_buffer_size(hdev, skb);
2227 		break;
2228 
2229 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2230 		hci_cc_le_read_local_features(hdev, skb);
2231 		break;
2232 
2233 	case HCI_OP_LE_READ_ADV_TX_POWER:
2234 		hci_cc_le_read_adv_tx_power(hdev, skb);
2235 		break;
2236 
2237 	case HCI_OP_USER_CONFIRM_REPLY:
2238 		hci_cc_user_confirm_reply(hdev, skb);
2239 		break;
2240 
2241 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2242 		hci_cc_user_confirm_neg_reply(hdev, skb);
2243 		break;
2244 
2245 	case HCI_OP_USER_PASSKEY_REPLY:
2246 		hci_cc_user_passkey_reply(hdev, skb);
2247 		break;
2248 
2249 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2250 		hci_cc_user_passkey_neg_reply(hdev, skb);
2251 		break;
2252 
2253 	case HCI_OP_LE_SET_ADV_ENABLE:
2254 		hci_cc_le_set_adv_enable(hdev, skb);
2255 		break;
2256 
2257 	case HCI_OP_LE_SET_SCAN_ENABLE:
2258 		hci_cc_le_set_scan_enable(hdev, skb);
2259 		break;
2260 
2261 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2262 		hci_cc_le_read_white_list_size(hdev, skb);
2263 		break;
2264 
2265 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2266 		hci_cc_le_read_supported_states(hdev, skb);
2267 		break;
2268 
2269 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2270 		hci_cc_write_le_host_supported(hdev, skb);
2271 		break;
2272 
2273 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2274 		hci_cc_write_remote_amp_assoc(hdev, skb);
2275 		break;
2276 
2277 	default:
2278 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2279 		break;
2280 	}
2281 
2282 	if (opcode != HCI_OP_NOP)
2283 		del_timer(&hdev->cmd_timer);
2284 
2285 	hci_req_cmd_complete(hdev, opcode, status);
2286 
2287 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2288 		atomic_set(&hdev->cmd_cnt, 1);
2289 		if (!skb_queue_empty(&hdev->cmd_q))
2290 			queue_work(hdev->workqueue, &hdev->cmd_work);
2291 	}
2292 }
2293 
2294 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2295 {
2296 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2297 	__u16 opcode;
2298 
2299 	skb_pull(skb, sizeof(*ev));
2300 
2301 	opcode = __le16_to_cpu(ev->opcode);
2302 
2303 	switch (opcode) {
2304 	case HCI_OP_INQUIRY:
2305 		hci_cs_inquiry(hdev, ev->status);
2306 		break;
2307 
2308 	case HCI_OP_CREATE_CONN:
2309 		hci_cs_create_conn(hdev, ev->status);
2310 		break;
2311 
2312 	case HCI_OP_ADD_SCO:
2313 		hci_cs_add_sco(hdev, ev->status);
2314 		break;
2315 
2316 	case HCI_OP_AUTH_REQUESTED:
2317 		hci_cs_auth_requested(hdev, ev->status);
2318 		break;
2319 
2320 	case HCI_OP_SET_CONN_ENCRYPT:
2321 		hci_cs_set_conn_encrypt(hdev, ev->status);
2322 		break;
2323 
2324 	case HCI_OP_REMOTE_NAME_REQ:
2325 		hci_cs_remote_name_req(hdev, ev->status);
2326 		break;
2327 
2328 	case HCI_OP_READ_REMOTE_FEATURES:
2329 		hci_cs_read_remote_features(hdev, ev->status);
2330 		break;
2331 
2332 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2333 		hci_cs_read_remote_ext_features(hdev, ev->status);
2334 		break;
2335 
2336 	case HCI_OP_SETUP_SYNC_CONN:
2337 		hci_cs_setup_sync_conn(hdev, ev->status);
2338 		break;
2339 
2340 	case HCI_OP_SNIFF_MODE:
2341 		hci_cs_sniff_mode(hdev, ev->status);
2342 		break;
2343 
2344 	case HCI_OP_EXIT_SNIFF_MODE:
2345 		hci_cs_exit_sniff_mode(hdev, ev->status);
2346 		break;
2347 
2348 	case HCI_OP_DISCONNECT:
2349 		hci_cs_disconnect(hdev, ev->status);
2350 		break;
2351 
2352 	case HCI_OP_CREATE_PHY_LINK:
2353 		hci_cs_create_phylink(hdev, ev->status);
2354 		break;
2355 
2356 	case HCI_OP_ACCEPT_PHY_LINK:
2357 		hci_cs_accept_phylink(hdev, ev->status);
2358 		break;
2359 
2360 	default:
2361 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2362 		break;
2363 	}
2364 
2365 	if (opcode != HCI_OP_NOP)
2366 		del_timer(&hdev->cmd_timer);
2367 
2368 	if (ev->status ||
2369 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2370 		hci_req_cmd_complete(hdev, opcode, ev->status);
2371 
2372 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2373 		atomic_set(&hdev->cmd_cnt, 1);
2374 		if (!skb_queue_empty(&hdev->cmd_q))
2375 			queue_work(hdev->workqueue, &hdev->cmd_work);
2376 	}
2377 }
2378 
2379 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2380 {
2381 	struct hci_ev_role_change *ev = (void *) skb->data;
2382 	struct hci_conn *conn;
2383 
2384 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2385 
2386 	hci_dev_lock(hdev);
2387 
2388 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2389 	if (conn) {
2390 		if (!ev->status) {
2391 			if (ev->role)
2392 				conn->link_mode &= ~HCI_LM_MASTER;
2393 			else
2394 				conn->link_mode |= HCI_LM_MASTER;
2395 		}
2396 
2397 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2398 
2399 		hci_role_switch_cfm(conn, ev->status, ev->role);
2400 	}
2401 
2402 	hci_dev_unlock(hdev);
2403 }
2404 
2405 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2406 {
2407 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2408 	int i;
2409 
2410 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2411 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2412 		return;
2413 	}
2414 
2415 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2416 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2417 		BT_DBG("%s bad parameters", hdev->name);
2418 		return;
2419 	}
2420 
2421 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2422 
2423 	for (i = 0; i < ev->num_hndl; i++) {
2424 		struct hci_comp_pkts_info *info = &ev->handles[i];
2425 		struct hci_conn *conn;
2426 		__u16  handle, count;
2427 
2428 		handle = __le16_to_cpu(info->handle);
2429 		count  = __le16_to_cpu(info->count);
2430 
2431 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2432 		if (!conn)
2433 			continue;
2434 
2435 		conn->sent -= count;
2436 
2437 		switch (conn->type) {
2438 		case ACL_LINK:
2439 			hdev->acl_cnt += count;
2440 			if (hdev->acl_cnt > hdev->acl_pkts)
2441 				hdev->acl_cnt = hdev->acl_pkts;
2442 			break;
2443 
2444 		case LE_LINK:
2445 			if (hdev->le_pkts) {
2446 				hdev->le_cnt += count;
2447 				if (hdev->le_cnt > hdev->le_pkts)
2448 					hdev->le_cnt = hdev->le_pkts;
2449 			} else {
2450 				hdev->acl_cnt += count;
2451 				if (hdev->acl_cnt > hdev->acl_pkts)
2452 					hdev->acl_cnt = hdev->acl_pkts;
2453 			}
2454 			break;
2455 
2456 		case SCO_LINK:
2457 			hdev->sco_cnt += count;
2458 			if (hdev->sco_cnt > hdev->sco_pkts)
2459 				hdev->sco_cnt = hdev->sco_pkts;
2460 			break;
2461 
2462 		default:
2463 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2464 			break;
2465 		}
2466 	}
2467 
2468 	queue_work(hdev->workqueue, &hdev->tx_work);
2469 }
2470 
2471 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2472 						 __u16 handle)
2473 {
2474 	struct hci_chan *chan;
2475 
2476 	switch (hdev->dev_type) {
2477 	case HCI_BREDR:
2478 		return hci_conn_hash_lookup_handle(hdev, handle);
2479 	case HCI_AMP:
2480 		chan = hci_chan_lookup_handle(hdev, handle);
2481 		if (chan)
2482 			return chan->conn;
2483 		break;
2484 	default:
2485 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2486 		break;
2487 	}
2488 
2489 	return NULL;
2490 }
2491 
2492 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2493 {
2494 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2495 	int i;
2496 
2497 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2498 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2499 		return;
2500 	}
2501 
2502 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2503 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2504 		BT_DBG("%s bad parameters", hdev->name);
2505 		return;
2506 	}
2507 
2508 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2509 	       ev->num_hndl);
2510 
2511 	for (i = 0; i < ev->num_hndl; i++) {
2512 		struct hci_comp_blocks_info *info = &ev->handles[i];
2513 		struct hci_conn *conn = NULL;
2514 		__u16  handle, block_count;
2515 
2516 		handle = __le16_to_cpu(info->handle);
2517 		block_count = __le16_to_cpu(info->blocks);
2518 
2519 		conn = __hci_conn_lookup_handle(hdev, handle);
2520 		if (!conn)
2521 			continue;
2522 
2523 		conn->sent -= block_count;
2524 
2525 		switch (conn->type) {
2526 		case ACL_LINK:
2527 		case AMP_LINK:
2528 			hdev->block_cnt += block_count;
2529 			if (hdev->block_cnt > hdev->num_blocks)
2530 				hdev->block_cnt = hdev->num_blocks;
2531 			break;
2532 
2533 		default:
2534 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2535 			break;
2536 		}
2537 	}
2538 
2539 	queue_work(hdev->workqueue, &hdev->tx_work);
2540 }
2541 
2542 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2543 {
2544 	struct hci_ev_mode_change *ev = (void *) skb->data;
2545 	struct hci_conn *conn;
2546 
2547 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2548 
2549 	hci_dev_lock(hdev);
2550 
2551 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2552 	if (conn) {
2553 		conn->mode = ev->mode;
2554 
2555 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2556 					&conn->flags)) {
2557 			if (conn->mode == HCI_CM_ACTIVE)
2558 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2559 			else
2560 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2561 		}
2562 
2563 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2564 			hci_sco_setup(conn, ev->status);
2565 	}
2566 
2567 	hci_dev_unlock(hdev);
2568 }
2569 
2570 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2571 {
2572 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2573 	struct hci_conn *conn;
2574 
2575 	BT_DBG("%s", hdev->name);
2576 
2577 	hci_dev_lock(hdev);
2578 
2579 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2580 	if (!conn)
2581 		goto unlock;
2582 
2583 	if (conn->state == BT_CONNECTED) {
2584 		hci_conn_hold(conn);
2585 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2586 		hci_conn_drop(conn);
2587 	}
2588 
2589 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2590 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2591 			     sizeof(ev->bdaddr), &ev->bdaddr);
2592 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2593 		u8 secure;
2594 
2595 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2596 			secure = 1;
2597 		else
2598 			secure = 0;
2599 
2600 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2601 	}
2602 
2603 unlock:
2604 	hci_dev_unlock(hdev);
2605 }
2606 
2607 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2608 {
2609 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2610 	struct hci_cp_link_key_reply cp;
2611 	struct hci_conn *conn;
2612 	struct link_key *key;
2613 
2614 	BT_DBG("%s", hdev->name);
2615 
2616 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2617 		return;
2618 
2619 	hci_dev_lock(hdev);
2620 
2621 	key = hci_find_link_key(hdev, &ev->bdaddr);
2622 	if (!key) {
2623 		BT_DBG("%s link key not found for %pMR", hdev->name,
2624 		       &ev->bdaddr);
2625 		goto not_found;
2626 	}
2627 
2628 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2629 	       &ev->bdaddr);
2630 
2631 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2632 	    key->type == HCI_LK_DEBUG_COMBINATION) {
2633 		BT_DBG("%s ignoring debug key", hdev->name);
2634 		goto not_found;
2635 	}
2636 
2637 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2638 	if (conn) {
2639 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2640 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2641 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2642 			goto not_found;
2643 		}
2644 
2645 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2646 		    conn->pending_sec_level == BT_SECURITY_HIGH) {
2647 			BT_DBG("%s ignoring key unauthenticated for high security",
2648 			       hdev->name);
2649 			goto not_found;
2650 		}
2651 
2652 		conn->key_type = key->type;
2653 		conn->pin_length = key->pin_len;
2654 	}
2655 
2656 	bacpy(&cp.bdaddr, &ev->bdaddr);
2657 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2658 
2659 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2660 
2661 	hci_dev_unlock(hdev);
2662 
2663 	return;
2664 
2665 not_found:
2666 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2667 	hci_dev_unlock(hdev);
2668 }
2669 
2670 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2671 {
2672 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2673 	struct hci_conn *conn;
2674 	u8 pin_len = 0;
2675 
2676 	BT_DBG("%s", hdev->name);
2677 
2678 	hci_dev_lock(hdev);
2679 
2680 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2681 	if (conn) {
2682 		hci_conn_hold(conn);
2683 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2684 		pin_len = conn->pin_length;
2685 
2686 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2687 			conn->key_type = ev->key_type;
2688 
2689 		hci_conn_drop(conn);
2690 	}
2691 
2692 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
2693 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2694 				 ev->key_type, pin_len);
2695 
2696 	hci_dev_unlock(hdev);
2697 }
2698 
2699 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2700 {
2701 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2702 	struct hci_conn *conn;
2703 
2704 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2705 
2706 	hci_dev_lock(hdev);
2707 
2708 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2709 	if (conn && !ev->status) {
2710 		struct inquiry_entry *ie;
2711 
2712 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2713 		if (ie) {
2714 			ie->data.clock_offset = ev->clock_offset;
2715 			ie->timestamp = jiffies;
2716 		}
2717 	}
2718 
2719 	hci_dev_unlock(hdev);
2720 }
2721 
2722 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2723 {
2724 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2725 	struct hci_conn *conn;
2726 
2727 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2728 
2729 	hci_dev_lock(hdev);
2730 
2731 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2732 	if (conn && !ev->status)
2733 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2734 
2735 	hci_dev_unlock(hdev);
2736 }
2737 
2738 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2739 {
2740 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2741 	struct inquiry_entry *ie;
2742 
2743 	BT_DBG("%s", hdev->name);
2744 
2745 	hci_dev_lock(hdev);
2746 
2747 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2748 	if (ie) {
2749 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2750 		ie->timestamp = jiffies;
2751 	}
2752 
2753 	hci_dev_unlock(hdev);
2754 }
2755 
2756 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2757 					     struct sk_buff *skb)
2758 {
2759 	struct inquiry_data data;
2760 	int num_rsp = *((__u8 *) skb->data);
2761 	bool name_known, ssp;
2762 
2763 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2764 
2765 	if (!num_rsp)
2766 		return;
2767 
2768 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2769 		return;
2770 
2771 	hci_dev_lock(hdev);
2772 
2773 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2774 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2775 		info = (void *) (skb->data + 1);
2776 
2777 		for (; num_rsp; num_rsp--, info++) {
2778 			bacpy(&data.bdaddr, &info->bdaddr);
2779 			data.pscan_rep_mode	= info->pscan_rep_mode;
2780 			data.pscan_period_mode	= info->pscan_period_mode;
2781 			data.pscan_mode		= info->pscan_mode;
2782 			memcpy(data.dev_class, info->dev_class, 3);
2783 			data.clock_offset	= info->clock_offset;
2784 			data.rssi		= info->rssi;
2785 			data.ssp_mode		= 0x00;
2786 
2787 			name_known = hci_inquiry_cache_update(hdev, &data,
2788 							      false, &ssp);
2789 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2790 					  info->dev_class, info->rssi,
2791 					  !name_known, ssp, NULL, 0);
2792 		}
2793 	} else {
2794 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2795 
2796 		for (; num_rsp; num_rsp--, info++) {
2797 			bacpy(&data.bdaddr, &info->bdaddr);
2798 			data.pscan_rep_mode	= info->pscan_rep_mode;
2799 			data.pscan_period_mode	= info->pscan_period_mode;
2800 			data.pscan_mode		= 0x00;
2801 			memcpy(data.dev_class, info->dev_class, 3);
2802 			data.clock_offset	= info->clock_offset;
2803 			data.rssi		= info->rssi;
2804 			data.ssp_mode		= 0x00;
2805 			name_known = hci_inquiry_cache_update(hdev, &data,
2806 							      false, &ssp);
2807 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2808 					  info->dev_class, info->rssi,
2809 					  !name_known, ssp, NULL, 0);
2810 		}
2811 	}
2812 
2813 	hci_dev_unlock(hdev);
2814 }
2815 
2816 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2817 					struct sk_buff *skb)
2818 {
2819 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2820 	struct hci_conn *conn;
2821 
2822 	BT_DBG("%s", hdev->name);
2823 
2824 	hci_dev_lock(hdev);
2825 
2826 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2827 	if (!conn)
2828 		goto unlock;
2829 
2830 	if (ev->page < HCI_MAX_PAGES)
2831 		memcpy(conn->features[ev->page], ev->features, 8);
2832 
2833 	if (!ev->status && ev->page == 0x01) {
2834 		struct inquiry_entry *ie;
2835 
2836 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2837 		if (ie)
2838 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2839 
2840 		if (ev->features[0] & LMP_HOST_SSP) {
2841 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2842 		} else {
2843 			/* It is mandatory by the Bluetooth specification that
2844 			 * Extended Inquiry Results are only used when Secure
2845 			 * Simple Pairing is enabled, but some devices violate
2846 			 * this.
2847 			 *
2848 			 * To make these devices work, the internal SSP
2849 			 * enabled flag needs to be cleared if the remote host
2850 			 * features do not indicate SSP support */
2851 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2852 		}
2853 	}
2854 
2855 	if (conn->state != BT_CONFIG)
2856 		goto unlock;
2857 
2858 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2859 		struct hci_cp_remote_name_req cp;
2860 		memset(&cp, 0, sizeof(cp));
2861 		bacpy(&cp.bdaddr, &conn->dst);
2862 		cp.pscan_rep_mode = 0x02;
2863 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2864 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2865 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2866 				      conn->dst_type, 0, NULL, 0,
2867 				      conn->dev_class);
2868 
2869 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2870 		conn->state = BT_CONNECTED;
2871 		hci_proto_connect_cfm(conn, ev->status);
2872 		hci_conn_drop(conn);
2873 	}
2874 
2875 unlock:
2876 	hci_dev_unlock(hdev);
2877 }
2878 
2879 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2880 				       struct sk_buff *skb)
2881 {
2882 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2883 	struct hci_conn *conn;
2884 
2885 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2886 
2887 	hci_dev_lock(hdev);
2888 
2889 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2890 	if (!conn) {
2891 		if (ev->link_type == ESCO_LINK)
2892 			goto unlock;
2893 
2894 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2895 		if (!conn)
2896 			goto unlock;
2897 
2898 		conn->type = SCO_LINK;
2899 	}
2900 
2901 	switch (ev->status) {
2902 	case 0x00:
2903 		conn->handle = __le16_to_cpu(ev->handle);
2904 		conn->state  = BT_CONNECTED;
2905 
2906 		hci_conn_add_sysfs(conn);
2907 		break;
2908 
2909 	case 0x0d:	/* Connection Rejected due to Limited Resources */
2910 	case 0x11:	/* Unsupported Feature or Parameter Value */
2911 	case 0x1c:	/* SCO interval rejected */
2912 	case 0x1a:	/* Unsupported Remote Feature */
2913 	case 0x1f:	/* Unspecified error */
2914 		if (conn->out) {
2915 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2916 					(hdev->esco_type & EDR_ESCO_MASK);
2917 			if (hci_setup_sync(conn, conn->link->handle))
2918 				goto unlock;
2919 		}
2920 		/* fall through */
2921 
2922 	default:
2923 		conn->state = BT_CLOSED;
2924 		break;
2925 	}
2926 
2927 	hci_proto_connect_cfm(conn, ev->status);
2928 	if (ev->status)
2929 		hci_conn_del(conn);
2930 
2931 unlock:
2932 	hci_dev_unlock(hdev);
2933 }
2934 
2935 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
2936 {
2937 	size_t parsed = 0;
2938 
2939 	while (parsed < eir_len) {
2940 		u8 field_len = eir[0];
2941 
2942 		if (field_len == 0)
2943 			return parsed;
2944 
2945 		parsed += field_len + 1;
2946 		eir += field_len + 1;
2947 	}
2948 
2949 	return eir_len;
2950 }
2951 
2952 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2953 					    struct sk_buff *skb)
2954 {
2955 	struct inquiry_data data;
2956 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2957 	int num_rsp = *((__u8 *) skb->data);
2958 	size_t eir_len;
2959 
2960 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2961 
2962 	if (!num_rsp)
2963 		return;
2964 
2965 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2966 		return;
2967 
2968 	hci_dev_lock(hdev);
2969 
2970 	for (; num_rsp; num_rsp--, info++) {
2971 		bool name_known, ssp;
2972 
2973 		bacpy(&data.bdaddr, &info->bdaddr);
2974 		data.pscan_rep_mode	= info->pscan_rep_mode;
2975 		data.pscan_period_mode	= info->pscan_period_mode;
2976 		data.pscan_mode		= 0x00;
2977 		memcpy(data.dev_class, info->dev_class, 3);
2978 		data.clock_offset	= info->clock_offset;
2979 		data.rssi		= info->rssi;
2980 		data.ssp_mode		= 0x01;
2981 
2982 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
2983 			name_known = eir_has_data_type(info->data,
2984 						       sizeof(info->data),
2985 						       EIR_NAME_COMPLETE);
2986 		else
2987 			name_known = true;
2988 
2989 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2990 						      &ssp);
2991 		eir_len = eir_get_length(info->data, sizeof(info->data));
2992 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2993 				  info->dev_class, info->rssi, !name_known,
2994 				  ssp, info->data, eir_len);
2995 	}
2996 
2997 	hci_dev_unlock(hdev);
2998 }
2999 
3000 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3001 					 struct sk_buff *skb)
3002 {
3003 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3004 	struct hci_conn *conn;
3005 
3006 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3007 	       __le16_to_cpu(ev->handle));
3008 
3009 	hci_dev_lock(hdev);
3010 
3011 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3012 	if (!conn)
3013 		goto unlock;
3014 
3015 	if (!ev->status)
3016 		conn->sec_level = conn->pending_sec_level;
3017 
3018 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3019 
3020 	if (ev->status && conn->state == BT_CONNECTED) {
3021 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3022 		hci_conn_drop(conn);
3023 		goto unlock;
3024 	}
3025 
3026 	if (conn->state == BT_CONFIG) {
3027 		if (!ev->status)
3028 			conn->state = BT_CONNECTED;
3029 
3030 		hci_proto_connect_cfm(conn, ev->status);
3031 		hci_conn_drop(conn);
3032 	} else {
3033 		hci_auth_cfm(conn, ev->status);
3034 
3035 		hci_conn_hold(conn);
3036 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3037 		hci_conn_drop(conn);
3038 	}
3039 
3040 unlock:
3041 	hci_dev_unlock(hdev);
3042 }
3043 
3044 static u8 hci_get_auth_req(struct hci_conn *conn)
3045 {
3046 	/* If remote requests dedicated bonding follow that lead */
3047 	if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3048 	    conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3049 		/* If both remote and local IO capabilities allow MITM
3050 		 * protection then require it, otherwise don't */
3051 		if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3052 		    conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3053 			return HCI_AT_DEDICATED_BONDING;
3054 		else
3055 			return HCI_AT_DEDICATED_BONDING_MITM;
3056 	}
3057 
3058 	/* If remote requests no-bonding follow that lead */
3059 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3060 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3061 		return conn->remote_auth | (conn->auth_type & 0x01);
3062 
3063 	return conn->auth_type;
3064 }
3065 
3066 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3067 {
3068 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3069 	struct hci_conn *conn;
3070 
3071 	BT_DBG("%s", hdev->name);
3072 
3073 	hci_dev_lock(hdev);
3074 
3075 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3076 	if (!conn)
3077 		goto unlock;
3078 
3079 	hci_conn_hold(conn);
3080 
3081 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3082 		goto unlock;
3083 
3084 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3085 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3086 		struct hci_cp_io_capability_reply cp;
3087 
3088 		bacpy(&cp.bdaddr, &ev->bdaddr);
3089 		/* Change the IO capability from KeyboardDisplay
3090 		 * to DisplayYesNo as it is not supported by BT spec. */
3091 		cp.capability = (conn->io_capability == 0x04) ?
3092 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3093 		conn->auth_type = hci_get_auth_req(conn);
3094 		cp.authentication = conn->auth_type;
3095 
3096 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3097 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3098 			cp.oob_data = 0x01;
3099 		else
3100 			cp.oob_data = 0x00;
3101 
3102 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3103 			     sizeof(cp), &cp);
3104 	} else {
3105 		struct hci_cp_io_capability_neg_reply cp;
3106 
3107 		bacpy(&cp.bdaddr, &ev->bdaddr);
3108 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3109 
3110 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3111 			     sizeof(cp), &cp);
3112 	}
3113 
3114 unlock:
3115 	hci_dev_unlock(hdev);
3116 }
3117 
3118 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3119 {
3120 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3121 	struct hci_conn *conn;
3122 
3123 	BT_DBG("%s", hdev->name);
3124 
3125 	hci_dev_lock(hdev);
3126 
3127 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3128 	if (!conn)
3129 		goto unlock;
3130 
3131 	conn->remote_cap = ev->capability;
3132 	conn->remote_auth = ev->authentication;
3133 	if (ev->oob_data)
3134 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3135 
3136 unlock:
3137 	hci_dev_unlock(hdev);
3138 }
3139 
3140 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3141 					 struct sk_buff *skb)
3142 {
3143 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3144 	int loc_mitm, rem_mitm, confirm_hint = 0;
3145 	struct hci_conn *conn;
3146 
3147 	BT_DBG("%s", hdev->name);
3148 
3149 	hci_dev_lock(hdev);
3150 
3151 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3152 		goto unlock;
3153 
3154 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3155 	if (!conn)
3156 		goto unlock;
3157 
3158 	loc_mitm = (conn->auth_type & 0x01);
3159 	rem_mitm = (conn->remote_auth & 0x01);
3160 
3161 	/* If we require MITM but the remote device can't provide that
3162 	 * (it has NoInputNoOutput) then reject the confirmation
3163 	 * request. The only exception is when we're dedicated bonding
3164 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3165 	 * bit set. */
3166 	if (!conn->connect_cfm_cb && loc_mitm &&
3167 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3168 		BT_DBG("Rejecting request: remote device can't provide MITM");
3169 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3170 			     sizeof(ev->bdaddr), &ev->bdaddr);
3171 		goto unlock;
3172 	}
3173 
3174 	/* If no side requires MITM protection; auto-accept */
3175 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3176 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3177 
3178 		/* If we're not the initiators request authorization to
3179 		 * proceed from user space (mgmt_user_confirm with
3180 		 * confirm_hint set to 1). */
3181 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3182 			BT_DBG("Confirming auto-accept as acceptor");
3183 			confirm_hint = 1;
3184 			goto confirm;
3185 		}
3186 
3187 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3188 		       hdev->auto_accept_delay);
3189 
3190 		if (hdev->auto_accept_delay > 0) {
3191 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3192 			queue_delayed_work(conn->hdev->workqueue,
3193 					   &conn->auto_accept_work, delay);
3194 			goto unlock;
3195 		}
3196 
3197 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3198 			     sizeof(ev->bdaddr), &ev->bdaddr);
3199 		goto unlock;
3200 	}
3201 
3202 confirm:
3203 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3204 				  confirm_hint);
3205 
3206 unlock:
3207 	hci_dev_unlock(hdev);
3208 }
3209 
3210 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3211 					 struct sk_buff *skb)
3212 {
3213 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3214 
3215 	BT_DBG("%s", hdev->name);
3216 
3217 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3218 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3219 }
3220 
3221 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3222 					struct sk_buff *skb)
3223 {
3224 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3225 	struct hci_conn *conn;
3226 
3227 	BT_DBG("%s", hdev->name);
3228 
3229 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3230 	if (!conn)
3231 		return;
3232 
3233 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3234 	conn->passkey_entered = 0;
3235 
3236 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3237 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3238 					 conn->dst_type, conn->passkey_notify,
3239 					 conn->passkey_entered);
3240 }
3241 
3242 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3243 {
3244 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3245 	struct hci_conn *conn;
3246 
3247 	BT_DBG("%s", hdev->name);
3248 
3249 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3250 	if (!conn)
3251 		return;
3252 
3253 	switch (ev->type) {
3254 	case HCI_KEYPRESS_STARTED:
3255 		conn->passkey_entered = 0;
3256 		return;
3257 
3258 	case HCI_KEYPRESS_ENTERED:
3259 		conn->passkey_entered++;
3260 		break;
3261 
3262 	case HCI_KEYPRESS_ERASED:
3263 		conn->passkey_entered--;
3264 		break;
3265 
3266 	case HCI_KEYPRESS_CLEARED:
3267 		conn->passkey_entered = 0;
3268 		break;
3269 
3270 	case HCI_KEYPRESS_COMPLETED:
3271 		return;
3272 	}
3273 
3274 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3275 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3276 					 conn->dst_type, conn->passkey_notify,
3277 					 conn->passkey_entered);
3278 }
3279 
3280 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3281 					 struct sk_buff *skb)
3282 {
3283 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3284 	struct hci_conn *conn;
3285 
3286 	BT_DBG("%s", hdev->name);
3287 
3288 	hci_dev_lock(hdev);
3289 
3290 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3291 	if (!conn)
3292 		goto unlock;
3293 
3294 	/* To avoid duplicate auth_failed events to user space we check
3295 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3296 	 * initiated the authentication. A traditional auth_complete
3297 	 * event gets always produced as initiator and is also mapped to
3298 	 * the mgmt_auth_failed event */
3299 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3300 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3301 				 ev->status);
3302 
3303 	hci_conn_drop(conn);
3304 
3305 unlock:
3306 	hci_dev_unlock(hdev);
3307 }
3308 
3309 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3310 					 struct sk_buff *skb)
3311 {
3312 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3313 	struct inquiry_entry *ie;
3314 	struct hci_conn *conn;
3315 
3316 	BT_DBG("%s", hdev->name);
3317 
3318 	hci_dev_lock(hdev);
3319 
3320 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3321 	if (conn)
3322 		memcpy(conn->features[1], ev->features, 8);
3323 
3324 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3325 	if (ie)
3326 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3327 
3328 	hci_dev_unlock(hdev);
3329 }
3330 
3331 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3332 					    struct sk_buff *skb)
3333 {
3334 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3335 	struct oob_data *data;
3336 
3337 	BT_DBG("%s", hdev->name);
3338 
3339 	hci_dev_lock(hdev);
3340 
3341 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3342 		goto unlock;
3343 
3344 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3345 	if (data) {
3346 		struct hci_cp_remote_oob_data_reply cp;
3347 
3348 		bacpy(&cp.bdaddr, &ev->bdaddr);
3349 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3350 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3351 
3352 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3353 			     &cp);
3354 	} else {
3355 		struct hci_cp_remote_oob_data_neg_reply cp;
3356 
3357 		bacpy(&cp.bdaddr, &ev->bdaddr);
3358 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3359 			     &cp);
3360 	}
3361 
3362 unlock:
3363 	hci_dev_unlock(hdev);
3364 }
3365 
3366 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3367 				      struct sk_buff *skb)
3368 {
3369 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3370 	struct hci_conn *hcon, *bredr_hcon;
3371 
3372 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3373 	       ev->status);
3374 
3375 	hci_dev_lock(hdev);
3376 
3377 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3378 	if (!hcon) {
3379 		hci_dev_unlock(hdev);
3380 		return;
3381 	}
3382 
3383 	if (ev->status) {
3384 		hci_conn_del(hcon);
3385 		hci_dev_unlock(hdev);
3386 		return;
3387 	}
3388 
3389 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3390 
3391 	hcon->state = BT_CONNECTED;
3392 	bacpy(&hcon->dst, &bredr_hcon->dst);
3393 
3394 	hci_conn_hold(hcon);
3395 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3396 	hci_conn_drop(hcon);
3397 
3398 	hci_conn_add_sysfs(hcon);
3399 
3400 	amp_physical_cfm(bredr_hcon, hcon);
3401 
3402 	hci_dev_unlock(hdev);
3403 }
3404 
3405 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3406 {
3407 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3408 	struct hci_conn *hcon;
3409 	struct hci_chan *hchan;
3410 	struct amp_mgr *mgr;
3411 
3412 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3413 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3414 	       ev->status);
3415 
3416 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3417 	if (!hcon)
3418 		return;
3419 
3420 	/* Create AMP hchan */
3421 	hchan = hci_chan_create(hcon);
3422 	if (!hchan)
3423 		return;
3424 
3425 	hchan->handle = le16_to_cpu(ev->handle);
3426 
3427 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3428 
3429 	mgr = hcon->amp_mgr;
3430 	if (mgr && mgr->bredr_chan) {
3431 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3432 
3433 		l2cap_chan_lock(bredr_chan);
3434 
3435 		bredr_chan->conn->mtu = hdev->block_mtu;
3436 		l2cap_logical_cfm(bredr_chan, hchan, 0);
3437 		hci_conn_hold(hcon);
3438 
3439 		l2cap_chan_unlock(bredr_chan);
3440 	}
3441 }
3442 
3443 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3444 					     struct sk_buff *skb)
3445 {
3446 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3447 	struct hci_chan *hchan;
3448 
3449 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3450 	       le16_to_cpu(ev->handle), ev->status);
3451 
3452 	if (ev->status)
3453 		return;
3454 
3455 	hci_dev_lock(hdev);
3456 
3457 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3458 	if (!hchan)
3459 		goto unlock;
3460 
3461 	amp_destroy_logical_link(hchan, ev->reason);
3462 
3463 unlock:
3464 	hci_dev_unlock(hdev);
3465 }
3466 
3467 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3468 					     struct sk_buff *skb)
3469 {
3470 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3471 	struct hci_conn *hcon;
3472 
3473 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3474 
3475 	if (ev->status)
3476 		return;
3477 
3478 	hci_dev_lock(hdev);
3479 
3480 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3481 	if (hcon) {
3482 		hcon->state = BT_CLOSED;
3483 		hci_conn_del(hcon);
3484 	}
3485 
3486 	hci_dev_unlock(hdev);
3487 }
3488 
3489 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3490 {
3491 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3492 	struct hci_conn *conn;
3493 
3494 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3495 
3496 	hci_dev_lock(hdev);
3497 
3498 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3499 	if (!conn) {
3500 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3501 		if (!conn) {
3502 			BT_ERR("No memory for new connection");
3503 			goto unlock;
3504 		}
3505 
3506 		conn->dst_type = ev->bdaddr_type;
3507 
3508 		/* The advertising parameters for own address type
3509 		 * define which source address and source address
3510 		 * type this connections has.
3511 		 */
3512 		if (bacmp(&conn->src, BDADDR_ANY)) {
3513 			conn->src_type = ADDR_LE_DEV_PUBLIC;
3514 		} else {
3515 			bacpy(&conn->src, &hdev->static_addr);
3516 			conn->src_type = ADDR_LE_DEV_RANDOM;
3517 		}
3518 
3519 		if (ev->role == LE_CONN_ROLE_MASTER) {
3520 			conn->out = true;
3521 			conn->link_mode |= HCI_LM_MASTER;
3522 		}
3523 	}
3524 
3525 	if (ev->status) {
3526 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
3527 				    conn->dst_type, ev->status);
3528 		hci_proto_connect_cfm(conn, ev->status);
3529 		conn->state = BT_CLOSED;
3530 		hci_conn_del(conn);
3531 		goto unlock;
3532 	}
3533 
3534 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3535 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3536 				      conn->dst_type, 0, NULL, 0, NULL);
3537 
3538 	conn->sec_level = BT_SECURITY_LOW;
3539 	conn->handle = __le16_to_cpu(ev->handle);
3540 	conn->state = BT_CONNECTED;
3541 
3542 	hci_conn_add_sysfs(conn);
3543 
3544 	hci_proto_connect_cfm(conn, ev->status);
3545 
3546 unlock:
3547 	hci_dev_unlock(hdev);
3548 }
3549 
3550 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3551 {
3552 	u8 num_reports = skb->data[0];
3553 	void *ptr = &skb->data[1];
3554 	s8 rssi;
3555 
3556 	while (num_reports--) {
3557 		struct hci_ev_le_advertising_info *ev = ptr;
3558 
3559 		rssi = ev->data[ev->length];
3560 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3561 				  NULL, rssi, 0, 1, ev->data, ev->length);
3562 
3563 		ptr += sizeof(*ev) + ev->length + 1;
3564 	}
3565 }
3566 
3567 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3568 {
3569 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3570 	struct hci_cp_le_ltk_reply cp;
3571 	struct hci_cp_le_ltk_neg_reply neg;
3572 	struct hci_conn *conn;
3573 	struct smp_ltk *ltk;
3574 
3575 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3576 
3577 	hci_dev_lock(hdev);
3578 
3579 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3580 	if (conn == NULL)
3581 		goto not_found;
3582 
3583 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3584 	if (ltk == NULL)
3585 		goto not_found;
3586 
3587 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3588 	cp.handle = cpu_to_le16(conn->handle);
3589 
3590 	if (ltk->authenticated)
3591 		conn->pending_sec_level = BT_SECURITY_HIGH;
3592 	else
3593 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
3594 
3595 	conn->enc_key_size = ltk->enc_size;
3596 
3597 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3598 
3599 	if (ltk->type & HCI_SMP_STK) {
3600 		list_del(&ltk->list);
3601 		kfree(ltk);
3602 	}
3603 
3604 	hci_dev_unlock(hdev);
3605 
3606 	return;
3607 
3608 not_found:
3609 	neg.handle = ev->handle;
3610 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3611 	hci_dev_unlock(hdev);
3612 }
3613 
3614 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3615 {
3616 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3617 
3618 	skb_pull(skb, sizeof(*le_ev));
3619 
3620 	switch (le_ev->subevent) {
3621 	case HCI_EV_LE_CONN_COMPLETE:
3622 		hci_le_conn_complete_evt(hdev, skb);
3623 		break;
3624 
3625 	case HCI_EV_LE_ADVERTISING_REPORT:
3626 		hci_le_adv_report_evt(hdev, skb);
3627 		break;
3628 
3629 	case HCI_EV_LE_LTK_REQ:
3630 		hci_le_ltk_request_evt(hdev, skb);
3631 		break;
3632 
3633 	default:
3634 		break;
3635 	}
3636 }
3637 
3638 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3639 {
3640 	struct hci_ev_channel_selected *ev = (void *) skb->data;
3641 	struct hci_conn *hcon;
3642 
3643 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3644 
3645 	skb_pull(skb, sizeof(*ev));
3646 
3647 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3648 	if (!hcon)
3649 		return;
3650 
3651 	amp_read_loc_assoc_final_data(hdev, hcon);
3652 }
3653 
3654 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3655 {
3656 	struct hci_event_hdr *hdr = (void *) skb->data;
3657 	__u8 event = hdr->evt;
3658 
3659 	hci_dev_lock(hdev);
3660 
3661 	/* Received events are (currently) only needed when a request is
3662 	 * ongoing so avoid unnecessary memory allocation.
3663 	 */
3664 	if (hdev->req_status == HCI_REQ_PEND) {
3665 		kfree_skb(hdev->recv_evt);
3666 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3667 	}
3668 
3669 	hci_dev_unlock(hdev);
3670 
3671 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3672 
3673 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3674 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
3675 		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
3676 
3677 		hci_req_cmd_complete(hdev, opcode, 0);
3678 	}
3679 
3680 	switch (event) {
3681 	case HCI_EV_INQUIRY_COMPLETE:
3682 		hci_inquiry_complete_evt(hdev, skb);
3683 		break;
3684 
3685 	case HCI_EV_INQUIRY_RESULT:
3686 		hci_inquiry_result_evt(hdev, skb);
3687 		break;
3688 
3689 	case HCI_EV_CONN_COMPLETE:
3690 		hci_conn_complete_evt(hdev, skb);
3691 		break;
3692 
3693 	case HCI_EV_CONN_REQUEST:
3694 		hci_conn_request_evt(hdev, skb);
3695 		break;
3696 
3697 	case HCI_EV_DISCONN_COMPLETE:
3698 		hci_disconn_complete_evt(hdev, skb);
3699 		break;
3700 
3701 	case HCI_EV_AUTH_COMPLETE:
3702 		hci_auth_complete_evt(hdev, skb);
3703 		break;
3704 
3705 	case HCI_EV_REMOTE_NAME:
3706 		hci_remote_name_evt(hdev, skb);
3707 		break;
3708 
3709 	case HCI_EV_ENCRYPT_CHANGE:
3710 		hci_encrypt_change_evt(hdev, skb);
3711 		break;
3712 
3713 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3714 		hci_change_link_key_complete_evt(hdev, skb);
3715 		break;
3716 
3717 	case HCI_EV_REMOTE_FEATURES:
3718 		hci_remote_features_evt(hdev, skb);
3719 		break;
3720 
3721 	case HCI_EV_CMD_COMPLETE:
3722 		hci_cmd_complete_evt(hdev, skb);
3723 		break;
3724 
3725 	case HCI_EV_CMD_STATUS:
3726 		hci_cmd_status_evt(hdev, skb);
3727 		break;
3728 
3729 	case HCI_EV_ROLE_CHANGE:
3730 		hci_role_change_evt(hdev, skb);
3731 		break;
3732 
3733 	case HCI_EV_NUM_COMP_PKTS:
3734 		hci_num_comp_pkts_evt(hdev, skb);
3735 		break;
3736 
3737 	case HCI_EV_MODE_CHANGE:
3738 		hci_mode_change_evt(hdev, skb);
3739 		break;
3740 
3741 	case HCI_EV_PIN_CODE_REQ:
3742 		hci_pin_code_request_evt(hdev, skb);
3743 		break;
3744 
3745 	case HCI_EV_LINK_KEY_REQ:
3746 		hci_link_key_request_evt(hdev, skb);
3747 		break;
3748 
3749 	case HCI_EV_LINK_KEY_NOTIFY:
3750 		hci_link_key_notify_evt(hdev, skb);
3751 		break;
3752 
3753 	case HCI_EV_CLOCK_OFFSET:
3754 		hci_clock_offset_evt(hdev, skb);
3755 		break;
3756 
3757 	case HCI_EV_PKT_TYPE_CHANGE:
3758 		hci_pkt_type_change_evt(hdev, skb);
3759 		break;
3760 
3761 	case HCI_EV_PSCAN_REP_MODE:
3762 		hci_pscan_rep_mode_evt(hdev, skb);
3763 		break;
3764 
3765 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3766 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3767 		break;
3768 
3769 	case HCI_EV_REMOTE_EXT_FEATURES:
3770 		hci_remote_ext_features_evt(hdev, skb);
3771 		break;
3772 
3773 	case HCI_EV_SYNC_CONN_COMPLETE:
3774 		hci_sync_conn_complete_evt(hdev, skb);
3775 		break;
3776 
3777 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3778 		hci_extended_inquiry_result_evt(hdev, skb);
3779 		break;
3780 
3781 	case HCI_EV_KEY_REFRESH_COMPLETE:
3782 		hci_key_refresh_complete_evt(hdev, skb);
3783 		break;
3784 
3785 	case HCI_EV_IO_CAPA_REQUEST:
3786 		hci_io_capa_request_evt(hdev, skb);
3787 		break;
3788 
3789 	case HCI_EV_IO_CAPA_REPLY:
3790 		hci_io_capa_reply_evt(hdev, skb);
3791 		break;
3792 
3793 	case HCI_EV_USER_CONFIRM_REQUEST:
3794 		hci_user_confirm_request_evt(hdev, skb);
3795 		break;
3796 
3797 	case HCI_EV_USER_PASSKEY_REQUEST:
3798 		hci_user_passkey_request_evt(hdev, skb);
3799 		break;
3800 
3801 	case HCI_EV_USER_PASSKEY_NOTIFY:
3802 		hci_user_passkey_notify_evt(hdev, skb);
3803 		break;
3804 
3805 	case HCI_EV_KEYPRESS_NOTIFY:
3806 		hci_keypress_notify_evt(hdev, skb);
3807 		break;
3808 
3809 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3810 		hci_simple_pair_complete_evt(hdev, skb);
3811 		break;
3812 
3813 	case HCI_EV_REMOTE_HOST_FEATURES:
3814 		hci_remote_host_features_evt(hdev, skb);
3815 		break;
3816 
3817 	case HCI_EV_LE_META:
3818 		hci_le_meta_evt(hdev, skb);
3819 		break;
3820 
3821 	case HCI_EV_CHANNEL_SELECTED:
3822 		hci_chan_selected_evt(hdev, skb);
3823 		break;
3824 
3825 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3826 		hci_remote_oob_data_request_evt(hdev, skb);
3827 		break;
3828 
3829 	case HCI_EV_PHY_LINK_COMPLETE:
3830 		hci_phy_link_complete_evt(hdev, skb);
3831 		break;
3832 
3833 	case HCI_EV_LOGICAL_LINK_COMPLETE:
3834 		hci_loglink_complete_evt(hdev, skb);
3835 		break;
3836 
3837 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3838 		hci_disconn_loglink_complete_evt(hdev, skb);
3839 		break;
3840 
3841 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3842 		hci_disconn_phylink_complete_evt(hdev, skb);
3843 		break;
3844 
3845 	case HCI_EV_NUM_COMP_BLOCKS:
3846 		hci_num_comp_blocks_evt(hdev, skb);
3847 		break;
3848 
3849 	default:
3850 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
3851 		break;
3852 	}
3853 
3854 	kfree_skb(skb);
3855 	hdev->stat.evt_rx++;
3856 }
3857