xref: /openbmc/linux/net/bluetooth/hci_event.c (revision afb46f79)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "a2mp.h"
34 #include "amp.h"
35 
36 /* Handle HCI Event packets */
37 
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 	__u8 status = *((__u8 *) skb->data);
41 
42 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
43 
44 	if (status)
45 		return;
46 
47 	clear_bit(HCI_INQUIRY, &hdev->flags);
48 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 
51 	hci_conn_check_pending(hdev);
52 }
53 
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55 {
56 	__u8 status = *((__u8 *) skb->data);
57 
58 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
59 
60 	if (status)
61 		return;
62 
63 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64 }
65 
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 	__u8 status = *((__u8 *) skb->data);
69 
70 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
71 
72 	if (status)
73 		return;
74 
75 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76 
77 	hci_conn_check_pending(hdev);
78 }
79 
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 					  struct sk_buff *skb)
82 {
83 	BT_DBG("%s", hdev->name);
84 }
85 
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 	struct hci_rp_role_discovery *rp = (void *) skb->data;
89 	struct hci_conn *conn;
90 
91 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92 
93 	if (rp->status)
94 		return;
95 
96 	hci_dev_lock(hdev);
97 
98 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 	if (conn) {
100 		if (rp->role)
101 			conn->link_mode &= ~HCI_LM_MASTER;
102 		else
103 			conn->link_mode |= HCI_LM_MASTER;
104 	}
105 
106 	hci_dev_unlock(hdev);
107 }
108 
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110 {
111 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 	struct hci_conn *conn;
113 
114 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115 
116 	if (rp->status)
117 		return;
118 
119 	hci_dev_lock(hdev);
120 
121 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 	if (conn)
123 		conn->link_policy = __le16_to_cpu(rp->policy);
124 
125 	hci_dev_unlock(hdev);
126 }
127 
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129 {
130 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 	struct hci_conn *conn;
132 	void *sent;
133 
134 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135 
136 	if (rp->status)
137 		return;
138 
139 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 	if (!sent)
141 		return;
142 
143 	hci_dev_lock(hdev);
144 
145 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 	if (conn)
147 		conn->link_policy = get_unaligned_le16(sent + 2);
148 
149 	hci_dev_unlock(hdev);
150 }
151 
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 					struct sk_buff *skb)
154 {
155 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156 
157 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158 
159 	if (rp->status)
160 		return;
161 
162 	hdev->link_policy = __le16_to_cpu(rp->policy);
163 }
164 
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 					 struct sk_buff *skb)
167 {
168 	__u8 status = *((__u8 *) skb->data);
169 	void *sent;
170 
171 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
172 
173 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 	if (!sent)
175 		return;
176 
177 	if (!status)
178 		hdev->link_policy = get_unaligned_le16(sent);
179 }
180 
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 	__u8 status = *((__u8 *) skb->data);
184 
185 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
186 
187 	clear_bit(HCI_RESET, &hdev->flags);
188 
189 	/* Reset all non-persistent flags */
190 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191 
192 	hdev->discovery.state = DISCOVERY_STOPPED;
193 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195 
196 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 	hdev->adv_data_len = 0;
198 
199 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 	hdev->scan_rsp_data_len = 0;
201 
202 	hdev->le_scan_type = LE_SCAN_PASSIVE;
203 
204 	hdev->ssp_debug_mode = 0;
205 }
206 
207 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208 {
209 	__u8 status = *((__u8 *) skb->data);
210 	void *sent;
211 
212 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
213 
214 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 	if (!sent)
216 		return;
217 
218 	hci_dev_lock(hdev);
219 
220 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 		mgmt_set_local_name_complete(hdev, sent, status);
222 	else if (!status)
223 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224 
225 	hci_dev_unlock(hdev);
226 }
227 
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 	struct hci_rp_read_local_name *rp = (void *) skb->data;
231 
232 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233 
234 	if (rp->status)
235 		return;
236 
237 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240 
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 	__u8 status = *((__u8 *) skb->data);
244 	void *sent;
245 
246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
247 
248 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 	if (!sent)
250 		return;
251 
252 	if (!status) {
253 		__u8 param = *((__u8 *) sent);
254 
255 		if (param == AUTH_ENABLED)
256 			set_bit(HCI_AUTH, &hdev->flags);
257 		else
258 			clear_bit(HCI_AUTH, &hdev->flags);
259 	}
260 
261 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 		mgmt_auth_enable_complete(hdev, status);
263 }
264 
265 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266 {
267 	__u8 status = *((__u8 *) skb->data);
268 	void *sent;
269 
270 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
271 
272 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 	if (!sent)
274 		return;
275 
276 	if (!status) {
277 		__u8 param = *((__u8 *) sent);
278 
279 		if (param)
280 			set_bit(HCI_ENCRYPT, &hdev->flags);
281 		else
282 			clear_bit(HCI_ENCRYPT, &hdev->flags);
283 	}
284 }
285 
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 	__u8 param, status = *((__u8 *) skb->data);
289 	int old_pscan, old_iscan;
290 	void *sent;
291 
292 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
293 
294 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 	if (!sent)
296 		return;
297 
298 	param = *((__u8 *) sent);
299 
300 	hci_dev_lock(hdev);
301 
302 	if (status) {
303 		mgmt_write_scan_failed(hdev, param, status);
304 		hdev->discov_timeout = 0;
305 		goto done;
306 	}
307 
308 	/* We need to ensure that we set this back on if someone changed
309 	 * the scan mode through a raw HCI socket.
310 	 */
311 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
312 
313 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
315 
316 	if (param & SCAN_INQUIRY) {
317 		set_bit(HCI_ISCAN, &hdev->flags);
318 		if (!old_iscan)
319 			mgmt_discoverable(hdev, 1);
320 	} else if (old_iscan)
321 		mgmt_discoverable(hdev, 0);
322 
323 	if (param & SCAN_PAGE) {
324 		set_bit(HCI_PSCAN, &hdev->flags);
325 		if (!old_pscan)
326 			mgmt_connectable(hdev, 1);
327 	} else if (old_pscan)
328 		mgmt_connectable(hdev, 0);
329 
330 done:
331 	hci_dev_unlock(hdev);
332 }
333 
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337 
338 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339 
340 	if (rp->status)
341 		return;
342 
343 	memcpy(hdev->dev_class, rp->dev_class, 3);
344 
345 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348 
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	__u8 status = *((__u8 *) skb->data);
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 	if (!sent)
358 		return;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (status == 0)
363 		memcpy(hdev->dev_class, sent, 3);
364 
365 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 		mgmt_set_class_of_dev_complete(hdev, sent, status);
367 
368 	hci_dev_unlock(hdev);
369 }
370 
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 	__u16 setting;
375 
376 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377 
378 	if (rp->status)
379 		return;
380 
381 	setting = __le16_to_cpu(rp->voice_setting);
382 
383 	if (hdev->voice_setting == setting)
384 		return;
385 
386 	hdev->voice_setting = setting;
387 
388 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389 
390 	if (hdev->notify)
391 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393 
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 				       struct sk_buff *skb)
396 {
397 	__u8 status = *((__u8 *) skb->data);
398 	__u16 setting;
399 	void *sent;
400 
401 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
402 
403 	if (status)
404 		return;
405 
406 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 	if (!sent)
408 		return;
409 
410 	setting = get_unaligned_le16(sent);
411 
412 	if (hdev->voice_setting == setting)
413 		return;
414 
415 	hdev->voice_setting = setting;
416 
417 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418 
419 	if (hdev->notify)
420 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422 
423 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
424 					  struct sk_buff *skb)
425 {
426 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
427 
428 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 
430 	if (rp->status)
431 		return;
432 
433 	hdev->num_iac = rp->num_iac;
434 
435 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
436 }
437 
438 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 	__u8 status = *((__u8 *) skb->data);
441 	struct hci_cp_write_ssp_mode *sent;
442 
443 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
444 
445 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
446 	if (!sent)
447 		return;
448 
449 	if (!status) {
450 		if (sent->mode)
451 			hdev->features[1][0] |= LMP_HOST_SSP;
452 		else
453 			hdev->features[1][0] &= ~LMP_HOST_SSP;
454 	}
455 
456 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
457 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
458 	else if (!status) {
459 		if (sent->mode)
460 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 		else
462 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
463 	}
464 }
465 
466 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
467 {
468 	u8 status = *((u8 *) skb->data);
469 	struct hci_cp_write_sc_support *sent;
470 
471 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
472 
473 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
474 	if (!sent)
475 		return;
476 
477 	if (!status) {
478 		if (sent->support)
479 			hdev->features[1][0] |= LMP_HOST_SC;
480 		else
481 			hdev->features[1][0] &= ~LMP_HOST_SC;
482 	}
483 
484 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
485 		mgmt_sc_enable_complete(hdev, sent->support, status);
486 	else if (!status) {
487 		if (sent->support)
488 			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
489 		else
490 			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
491 	}
492 }
493 
494 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
495 {
496 	struct hci_rp_read_local_version *rp = (void *) skb->data;
497 
498 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
499 
500 	if (rp->status)
501 		return;
502 
503 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
504 		hdev->hci_ver = rp->hci_ver;
505 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
506 		hdev->lmp_ver = rp->lmp_ver;
507 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
508 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
509 	}
510 }
511 
512 static void hci_cc_read_local_commands(struct hci_dev *hdev,
513 				       struct sk_buff *skb)
514 {
515 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
516 
517 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
518 
519 	if (rp->status)
520 		return;
521 
522 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
523 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
524 }
525 
526 static void hci_cc_read_local_features(struct hci_dev *hdev,
527 				       struct sk_buff *skb)
528 {
529 	struct hci_rp_read_local_features *rp = (void *) skb->data;
530 
531 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
532 
533 	if (rp->status)
534 		return;
535 
536 	memcpy(hdev->features, rp->features, 8);
537 
538 	/* Adjust default settings according to features
539 	 * supported by device. */
540 
541 	if (hdev->features[0][0] & LMP_3SLOT)
542 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
543 
544 	if (hdev->features[0][0] & LMP_5SLOT)
545 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
546 
547 	if (hdev->features[0][1] & LMP_HV2) {
548 		hdev->pkt_type  |= (HCI_HV2);
549 		hdev->esco_type |= (ESCO_HV2);
550 	}
551 
552 	if (hdev->features[0][1] & LMP_HV3) {
553 		hdev->pkt_type  |= (HCI_HV3);
554 		hdev->esco_type |= (ESCO_HV3);
555 	}
556 
557 	if (lmp_esco_capable(hdev))
558 		hdev->esco_type |= (ESCO_EV3);
559 
560 	if (hdev->features[0][4] & LMP_EV4)
561 		hdev->esco_type |= (ESCO_EV4);
562 
563 	if (hdev->features[0][4] & LMP_EV5)
564 		hdev->esco_type |= (ESCO_EV5);
565 
566 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
567 		hdev->esco_type |= (ESCO_2EV3);
568 
569 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
570 		hdev->esco_type |= (ESCO_3EV3);
571 
572 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
573 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
574 }
575 
576 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
577 					   struct sk_buff *skb)
578 {
579 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
580 
581 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582 
583 	if (rp->status)
584 		return;
585 
586 	if (hdev->max_page < rp->max_page)
587 		hdev->max_page = rp->max_page;
588 
589 	if (rp->page < HCI_MAX_PAGES)
590 		memcpy(hdev->features[rp->page], rp->features, 8);
591 }
592 
593 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
594 					  struct sk_buff *skb)
595 {
596 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
597 
598 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599 
600 	if (!rp->status)
601 		hdev->flow_ctl_mode = rp->mode;
602 }
603 
604 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
605 {
606 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
607 
608 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
609 
610 	if (rp->status)
611 		return;
612 
613 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
614 	hdev->sco_mtu  = rp->sco_mtu;
615 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
617 
618 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
619 		hdev->sco_mtu  = 64;
620 		hdev->sco_pkts = 8;
621 	}
622 
623 	hdev->acl_cnt = hdev->acl_pkts;
624 	hdev->sco_cnt = hdev->sco_pkts;
625 
626 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
628 }
629 
630 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
631 {
632 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
633 
634 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
635 
636 	if (!rp->status)
637 		bacpy(&hdev->bdaddr, &rp->bdaddr);
638 }
639 
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
641 					   struct sk_buff *skb)
642 {
643 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
644 
645 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646 
647 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
648 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
649 		hdev->page_scan_window = __le16_to_cpu(rp->window);
650 	}
651 }
652 
653 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
654 					    struct sk_buff *skb)
655 {
656 	u8 status = *((u8 *) skb->data);
657 	struct hci_cp_write_page_scan_activity *sent;
658 
659 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
660 
661 	if (status)
662 		return;
663 
664 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
665 	if (!sent)
666 		return;
667 
668 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
669 	hdev->page_scan_window = __le16_to_cpu(sent->window);
670 }
671 
672 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
673 					   struct sk_buff *skb)
674 {
675 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
676 
677 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678 
679 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
680 		hdev->page_scan_type = rp->type;
681 }
682 
683 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
684 					struct sk_buff *skb)
685 {
686 	u8 status = *((u8 *) skb->data);
687 	u8 *type;
688 
689 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
690 
691 	if (status)
692 		return;
693 
694 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
695 	if (type)
696 		hdev->page_scan_type = *type;
697 }
698 
699 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
700 					struct sk_buff *skb)
701 {
702 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
703 
704 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
705 
706 	if (rp->status)
707 		return;
708 
709 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
710 	hdev->block_len = __le16_to_cpu(rp->block_len);
711 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
712 
713 	hdev->block_cnt = hdev->num_blocks;
714 
715 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
716 	       hdev->block_cnt, hdev->block_len);
717 }
718 
719 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
720 				       struct sk_buff *skb)
721 {
722 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
723 
724 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
725 
726 	if (rp->status)
727 		goto a2mp_rsp;
728 
729 	hdev->amp_status = rp->amp_status;
730 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
731 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
732 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
733 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
734 	hdev->amp_type = rp->amp_type;
735 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
736 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
737 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
738 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
739 
740 a2mp_rsp:
741 	a2mp_send_getinfo_rsp(hdev);
742 }
743 
744 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
745 					struct sk_buff *skb)
746 {
747 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
748 	struct amp_assoc *assoc = &hdev->loc_assoc;
749 	size_t rem_len, frag_len;
750 
751 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
752 
753 	if (rp->status)
754 		goto a2mp_rsp;
755 
756 	frag_len = skb->len - sizeof(*rp);
757 	rem_len = __le16_to_cpu(rp->rem_len);
758 
759 	if (rem_len > frag_len) {
760 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
761 
762 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
763 		assoc->offset += frag_len;
764 
765 		/* Read other fragments */
766 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
767 
768 		return;
769 	}
770 
771 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
772 	assoc->len = assoc->offset + rem_len;
773 	assoc->offset = 0;
774 
775 a2mp_rsp:
776 	/* Send A2MP Rsp when all fragments are received */
777 	a2mp_send_getampassoc_rsp(hdev, rp->status);
778 	a2mp_send_create_phy_link_req(hdev, rp->status);
779 }
780 
781 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
782 					 struct sk_buff *skb)
783 {
784 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
785 
786 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
787 
788 	if (!rp->status)
789 		hdev->inq_tx_power = rp->tx_power;
790 }
791 
792 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
793 {
794 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
795 	struct hci_cp_pin_code_reply *cp;
796 	struct hci_conn *conn;
797 
798 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
799 
800 	hci_dev_lock(hdev);
801 
802 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
804 
805 	if (rp->status)
806 		goto unlock;
807 
808 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
809 	if (!cp)
810 		goto unlock;
811 
812 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
813 	if (conn)
814 		conn->pin_length = cp->pin_len;
815 
816 unlock:
817 	hci_dev_unlock(hdev);
818 }
819 
820 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
821 {
822 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
823 
824 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
825 
826 	hci_dev_lock(hdev);
827 
828 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
829 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
830 						 rp->status);
831 
832 	hci_dev_unlock(hdev);
833 }
834 
835 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
836 				       struct sk_buff *skb)
837 {
838 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
839 
840 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
841 
842 	if (rp->status)
843 		return;
844 
845 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
846 	hdev->le_pkts = rp->le_max_pkt;
847 
848 	hdev->le_cnt = hdev->le_pkts;
849 
850 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
851 }
852 
853 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
854 					  struct sk_buff *skb)
855 {
856 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
857 
858 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
859 
860 	if (!rp->status)
861 		memcpy(hdev->le_features, rp->features, 8);
862 }
863 
864 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
865 					struct sk_buff *skb)
866 {
867 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
868 
869 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870 
871 	if (!rp->status)
872 		hdev->adv_tx_power = rp->tx_power;
873 }
874 
875 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
876 {
877 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
878 
879 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
880 
881 	hci_dev_lock(hdev);
882 
883 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
884 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
885 						 rp->status);
886 
887 	hci_dev_unlock(hdev);
888 }
889 
890 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
891 					  struct sk_buff *skb)
892 {
893 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
894 
895 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
896 
897 	hci_dev_lock(hdev);
898 
899 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
900 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
901 						     ACL_LINK, 0, rp->status);
902 
903 	hci_dev_unlock(hdev);
904 }
905 
906 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
909 
910 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
911 
912 	hci_dev_lock(hdev);
913 
914 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
915 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
916 						 0, rp->status);
917 
918 	hci_dev_unlock(hdev);
919 }
920 
921 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
922 					  struct sk_buff *skb)
923 {
924 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
925 
926 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927 
928 	hci_dev_lock(hdev);
929 
930 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
932 						     ACL_LINK, 0, rp->status);
933 
934 	hci_dev_unlock(hdev);
935 }
936 
937 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
938 				       struct sk_buff *skb)
939 {
940 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
941 
942 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 
944 	hci_dev_lock(hdev);
945 	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
946 					  NULL, NULL, rp->status);
947 	hci_dev_unlock(hdev);
948 }
949 
950 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
951 					   struct sk_buff *skb)
952 {
953 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
954 
955 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
956 
957 	hci_dev_lock(hdev);
958 	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
959 					  rp->hash256, rp->randomizer256,
960 					  rp->status);
961 	hci_dev_unlock(hdev);
962 }
963 
964 
965 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
966 {
967 	__u8 status = *((__u8 *) skb->data);
968 	bdaddr_t *sent;
969 
970 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
971 
972 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
973 	if (!sent)
974 		return;
975 
976 	hci_dev_lock(hdev);
977 
978 	if (!status)
979 		bacpy(&hdev->random_addr, sent);
980 
981 	hci_dev_unlock(hdev);
982 }
983 
984 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
985 {
986 	__u8 *sent, status = *((__u8 *) skb->data);
987 
988 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
989 
990 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
991 	if (!sent)
992 		return;
993 
994 	hci_dev_lock(hdev);
995 
996 	if (!status)
997 		mgmt_advertising(hdev, *sent);
998 
999 	hci_dev_unlock(hdev);
1000 }
1001 
1002 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1003 {
1004 	struct hci_cp_le_set_scan_param *cp;
1005 	__u8 status = *((__u8 *) skb->data);
1006 
1007 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1008 
1009 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1010 	if (!cp)
1011 		return;
1012 
1013 	hci_dev_lock(hdev);
1014 
1015 	if (!status)
1016 		hdev->le_scan_type = cp->type;
1017 
1018 	hci_dev_unlock(hdev);
1019 }
1020 
1021 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1022 				      struct sk_buff *skb)
1023 {
1024 	struct hci_cp_le_set_scan_enable *cp;
1025 	__u8 status = *((__u8 *) skb->data);
1026 
1027 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1028 
1029 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1030 	if (!cp)
1031 		return;
1032 
1033 	if (status)
1034 		return;
1035 
1036 	switch (cp->enable) {
1037 	case LE_SCAN_ENABLE:
1038 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1039 		break;
1040 
1041 	case LE_SCAN_DISABLE:
1042 		/* Cancel this timer so that we don't try to disable scanning
1043 		 * when it's already disabled.
1044 		 */
1045 		cancel_delayed_work(&hdev->le_scan_disable);
1046 
1047 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1048 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1049 		 * interrupted scanning due to a connect request. Mark
1050 		 * therefore discovery as stopped.
1051 		 */
1052 		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1053 				       &hdev->dev_flags))
1054 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1055 		break;
1056 
1057 	default:
1058 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1059 		break;
1060 	}
1061 }
1062 
1063 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1064 					   struct sk_buff *skb)
1065 {
1066 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1067 
1068 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1069 
1070 	if (!rp->status)
1071 		hdev->le_white_list_size = rp->size;
1072 }
1073 
1074 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1075 				       struct sk_buff *skb)
1076 {
1077 	__u8 status = *((__u8 *) skb->data);
1078 
1079 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1080 
1081 	if (!status)
1082 		hci_white_list_clear(hdev);
1083 }
1084 
1085 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1086 					struct sk_buff *skb)
1087 {
1088 	struct hci_cp_le_add_to_white_list *sent;
1089 	__u8 status = *((__u8 *) skb->data);
1090 
1091 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1092 
1093 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1094 	if (!sent)
1095 		return;
1096 
1097 	if (!status)
1098 		hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1099 }
1100 
1101 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1102 					  struct sk_buff *skb)
1103 {
1104 	struct hci_cp_le_del_from_white_list *sent;
1105 	__u8 status = *((__u8 *) skb->data);
1106 
1107 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1108 
1109 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1110 	if (!sent)
1111 		return;
1112 
1113 	if (!status)
1114 		hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1115 }
1116 
1117 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1118 					    struct sk_buff *skb)
1119 {
1120 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1121 
1122 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1123 
1124 	if (!rp->status)
1125 		memcpy(hdev->le_states, rp->le_states, 8);
1126 }
1127 
1128 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1129 					   struct sk_buff *skb)
1130 {
1131 	struct hci_cp_write_le_host_supported *sent;
1132 	__u8 status = *((__u8 *) skb->data);
1133 
1134 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1135 
1136 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1137 	if (!sent)
1138 		return;
1139 
1140 	if (!status) {
1141 		if (sent->le) {
1142 			hdev->features[1][0] |= LMP_HOST_LE;
1143 			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1144 		} else {
1145 			hdev->features[1][0] &= ~LMP_HOST_LE;
1146 			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1147 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1148 		}
1149 
1150 		if (sent->simul)
1151 			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1152 		else
1153 			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1154 	}
1155 }
1156 
1157 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1158 {
1159 	struct hci_cp_le_set_adv_param *cp;
1160 	u8 status = *((u8 *) skb->data);
1161 
1162 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1163 
1164 	if (status)
1165 		return;
1166 
1167 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1168 	if (!cp)
1169 		return;
1170 
1171 	hci_dev_lock(hdev);
1172 	hdev->adv_addr_type = cp->own_address_type;
1173 	hci_dev_unlock(hdev);
1174 }
1175 
1176 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1177 					  struct sk_buff *skb)
1178 {
1179 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1180 
1181 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1182 	       hdev->name, rp->status, rp->phy_handle);
1183 
1184 	if (rp->status)
1185 		return;
1186 
1187 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1188 }
1189 
1190 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1191 {
1192 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1193 
1194 	if (status) {
1195 		hci_conn_check_pending(hdev);
1196 		return;
1197 	}
1198 
1199 	set_bit(HCI_INQUIRY, &hdev->flags);
1200 }
1201 
1202 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1203 {
1204 	struct hci_cp_create_conn *cp;
1205 	struct hci_conn *conn;
1206 
1207 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208 
1209 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1210 	if (!cp)
1211 		return;
1212 
1213 	hci_dev_lock(hdev);
1214 
1215 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1216 
1217 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1218 
1219 	if (status) {
1220 		if (conn && conn->state == BT_CONNECT) {
1221 			if (status != 0x0c || conn->attempt > 2) {
1222 				conn->state = BT_CLOSED;
1223 				hci_proto_connect_cfm(conn, status);
1224 				hci_conn_del(conn);
1225 			} else
1226 				conn->state = BT_CONNECT2;
1227 		}
1228 	} else {
1229 		if (!conn) {
1230 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1231 			if (conn) {
1232 				conn->out = true;
1233 				conn->link_mode |= HCI_LM_MASTER;
1234 			} else
1235 				BT_ERR("No memory for new connection");
1236 		}
1237 	}
1238 
1239 	hci_dev_unlock(hdev);
1240 }
1241 
1242 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1243 {
1244 	struct hci_cp_add_sco *cp;
1245 	struct hci_conn *acl, *sco;
1246 	__u16 handle;
1247 
1248 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1249 
1250 	if (!status)
1251 		return;
1252 
1253 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1254 	if (!cp)
1255 		return;
1256 
1257 	handle = __le16_to_cpu(cp->handle);
1258 
1259 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1260 
1261 	hci_dev_lock(hdev);
1262 
1263 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1264 	if (acl) {
1265 		sco = acl->link;
1266 		if (sco) {
1267 			sco->state = BT_CLOSED;
1268 
1269 			hci_proto_connect_cfm(sco, status);
1270 			hci_conn_del(sco);
1271 		}
1272 	}
1273 
1274 	hci_dev_unlock(hdev);
1275 }
1276 
1277 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1278 {
1279 	struct hci_cp_auth_requested *cp;
1280 	struct hci_conn *conn;
1281 
1282 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1283 
1284 	if (!status)
1285 		return;
1286 
1287 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1288 	if (!cp)
1289 		return;
1290 
1291 	hci_dev_lock(hdev);
1292 
1293 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1294 	if (conn) {
1295 		if (conn->state == BT_CONFIG) {
1296 			hci_proto_connect_cfm(conn, status);
1297 			hci_conn_drop(conn);
1298 		}
1299 	}
1300 
1301 	hci_dev_unlock(hdev);
1302 }
1303 
1304 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1305 {
1306 	struct hci_cp_set_conn_encrypt *cp;
1307 	struct hci_conn *conn;
1308 
1309 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1310 
1311 	if (!status)
1312 		return;
1313 
1314 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1315 	if (!cp)
1316 		return;
1317 
1318 	hci_dev_lock(hdev);
1319 
1320 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1321 	if (conn) {
1322 		if (conn->state == BT_CONFIG) {
1323 			hci_proto_connect_cfm(conn, status);
1324 			hci_conn_drop(conn);
1325 		}
1326 	}
1327 
1328 	hci_dev_unlock(hdev);
1329 }
1330 
1331 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1332 				    struct hci_conn *conn)
1333 {
1334 	if (conn->state != BT_CONFIG || !conn->out)
1335 		return 0;
1336 
1337 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1338 		return 0;
1339 
1340 	/* Only request authentication for SSP connections or non-SSP
1341 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1342 	 * is requested.
1343 	 */
1344 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1345 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1346 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1347 		return 0;
1348 
1349 	return 1;
1350 }
1351 
1352 static int hci_resolve_name(struct hci_dev *hdev,
1353 				   struct inquiry_entry *e)
1354 {
1355 	struct hci_cp_remote_name_req cp;
1356 
1357 	memset(&cp, 0, sizeof(cp));
1358 
1359 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1360 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1361 	cp.pscan_mode = e->data.pscan_mode;
1362 	cp.clock_offset = e->data.clock_offset;
1363 
1364 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1365 }
1366 
1367 static bool hci_resolve_next_name(struct hci_dev *hdev)
1368 {
1369 	struct discovery_state *discov = &hdev->discovery;
1370 	struct inquiry_entry *e;
1371 
1372 	if (list_empty(&discov->resolve))
1373 		return false;
1374 
1375 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1376 	if (!e)
1377 		return false;
1378 
1379 	if (hci_resolve_name(hdev, e) == 0) {
1380 		e->name_state = NAME_PENDING;
1381 		return true;
1382 	}
1383 
1384 	return false;
1385 }
1386 
1387 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1388 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1389 {
1390 	struct discovery_state *discov = &hdev->discovery;
1391 	struct inquiry_entry *e;
1392 
1393 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1394 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1395 				      name_len, conn->dev_class);
1396 
1397 	if (discov->state == DISCOVERY_STOPPED)
1398 		return;
1399 
1400 	if (discov->state == DISCOVERY_STOPPING)
1401 		goto discov_complete;
1402 
1403 	if (discov->state != DISCOVERY_RESOLVING)
1404 		return;
1405 
1406 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1407 	/* If the device was not found in a list of found devices names of which
1408 	 * are pending. there is no need to continue resolving a next name as it
1409 	 * will be done upon receiving another Remote Name Request Complete
1410 	 * Event */
1411 	if (!e)
1412 		return;
1413 
1414 	list_del(&e->list);
1415 	if (name) {
1416 		e->name_state = NAME_KNOWN;
1417 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1418 				 e->data.rssi, name, name_len);
1419 	} else {
1420 		e->name_state = NAME_NOT_KNOWN;
1421 	}
1422 
1423 	if (hci_resolve_next_name(hdev))
1424 		return;
1425 
1426 discov_complete:
1427 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1428 }
1429 
1430 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1431 {
1432 	struct hci_cp_remote_name_req *cp;
1433 	struct hci_conn *conn;
1434 
1435 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1436 
1437 	/* If successful wait for the name req complete event before
1438 	 * checking for the need to do authentication */
1439 	if (!status)
1440 		return;
1441 
1442 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1443 	if (!cp)
1444 		return;
1445 
1446 	hci_dev_lock(hdev);
1447 
1448 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1449 
1450 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1451 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1452 
1453 	if (!conn)
1454 		goto unlock;
1455 
1456 	if (!hci_outgoing_auth_needed(hdev, conn))
1457 		goto unlock;
1458 
1459 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1460 		struct hci_cp_auth_requested auth_cp;
1461 
1462 		auth_cp.handle = __cpu_to_le16(conn->handle);
1463 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1464 			     sizeof(auth_cp), &auth_cp);
1465 	}
1466 
1467 unlock:
1468 	hci_dev_unlock(hdev);
1469 }
1470 
1471 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1472 {
1473 	struct hci_cp_read_remote_features *cp;
1474 	struct hci_conn *conn;
1475 
1476 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1477 
1478 	if (!status)
1479 		return;
1480 
1481 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1482 	if (!cp)
1483 		return;
1484 
1485 	hci_dev_lock(hdev);
1486 
1487 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1488 	if (conn) {
1489 		if (conn->state == BT_CONFIG) {
1490 			hci_proto_connect_cfm(conn, status);
1491 			hci_conn_drop(conn);
1492 		}
1493 	}
1494 
1495 	hci_dev_unlock(hdev);
1496 }
1497 
1498 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1499 {
1500 	struct hci_cp_read_remote_ext_features *cp;
1501 	struct hci_conn *conn;
1502 
1503 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1504 
1505 	if (!status)
1506 		return;
1507 
1508 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1509 	if (!cp)
1510 		return;
1511 
1512 	hci_dev_lock(hdev);
1513 
1514 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1515 	if (conn) {
1516 		if (conn->state == BT_CONFIG) {
1517 			hci_proto_connect_cfm(conn, status);
1518 			hci_conn_drop(conn);
1519 		}
1520 	}
1521 
1522 	hci_dev_unlock(hdev);
1523 }
1524 
1525 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1526 {
1527 	struct hci_cp_setup_sync_conn *cp;
1528 	struct hci_conn *acl, *sco;
1529 	__u16 handle;
1530 
1531 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1532 
1533 	if (!status)
1534 		return;
1535 
1536 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1537 	if (!cp)
1538 		return;
1539 
1540 	handle = __le16_to_cpu(cp->handle);
1541 
1542 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1543 
1544 	hci_dev_lock(hdev);
1545 
1546 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1547 	if (acl) {
1548 		sco = acl->link;
1549 		if (sco) {
1550 			sco->state = BT_CLOSED;
1551 
1552 			hci_proto_connect_cfm(sco, status);
1553 			hci_conn_del(sco);
1554 		}
1555 	}
1556 
1557 	hci_dev_unlock(hdev);
1558 }
1559 
1560 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1561 {
1562 	struct hci_cp_sniff_mode *cp;
1563 	struct hci_conn *conn;
1564 
1565 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1566 
1567 	if (!status)
1568 		return;
1569 
1570 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1571 	if (!cp)
1572 		return;
1573 
1574 	hci_dev_lock(hdev);
1575 
1576 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1577 	if (conn) {
1578 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1579 
1580 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1581 			hci_sco_setup(conn, status);
1582 	}
1583 
1584 	hci_dev_unlock(hdev);
1585 }
1586 
1587 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1588 {
1589 	struct hci_cp_exit_sniff_mode *cp;
1590 	struct hci_conn *conn;
1591 
1592 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593 
1594 	if (!status)
1595 		return;
1596 
1597 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1598 	if (!cp)
1599 		return;
1600 
1601 	hci_dev_lock(hdev);
1602 
1603 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1604 	if (conn) {
1605 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1606 
1607 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1608 			hci_sco_setup(conn, status);
1609 	}
1610 
1611 	hci_dev_unlock(hdev);
1612 }
1613 
1614 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1615 {
1616 	struct hci_cp_disconnect *cp;
1617 	struct hci_conn *conn;
1618 
1619 	if (!status)
1620 		return;
1621 
1622 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1623 	if (!cp)
1624 		return;
1625 
1626 	hci_dev_lock(hdev);
1627 
1628 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1629 	if (conn)
1630 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1631 				       conn->dst_type, status);
1632 
1633 	hci_dev_unlock(hdev);
1634 }
1635 
1636 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1637 {
1638 	struct hci_cp_create_phy_link *cp;
1639 
1640 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1641 
1642 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1643 	if (!cp)
1644 		return;
1645 
1646 	hci_dev_lock(hdev);
1647 
1648 	if (status) {
1649 		struct hci_conn *hcon;
1650 
1651 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1652 		if (hcon)
1653 			hci_conn_del(hcon);
1654 	} else {
1655 		amp_write_remote_assoc(hdev, cp->phy_handle);
1656 	}
1657 
1658 	hci_dev_unlock(hdev);
1659 }
1660 
1661 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1662 {
1663 	struct hci_cp_accept_phy_link *cp;
1664 
1665 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1666 
1667 	if (status)
1668 		return;
1669 
1670 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1671 	if (!cp)
1672 		return;
1673 
1674 	amp_write_remote_assoc(hdev, cp->phy_handle);
1675 }
1676 
1677 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1678 {
1679 	struct hci_cp_le_create_conn *cp;
1680 	struct hci_conn *conn;
1681 
1682 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1683 
1684 	/* All connection failure handling is taken care of by the
1685 	 * hci_le_conn_failed function which is triggered by the HCI
1686 	 * request completion callbacks used for connecting.
1687 	 */
1688 	if (status)
1689 		return;
1690 
1691 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1692 	if (!cp)
1693 		return;
1694 
1695 	hci_dev_lock(hdev);
1696 
1697 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1698 	if (!conn)
1699 		goto unlock;
1700 
1701 	/* Store the initiator and responder address information which
1702 	 * is needed for SMP. These values will not change during the
1703 	 * lifetime of the connection.
1704 	 */
1705 	conn->init_addr_type = cp->own_address_type;
1706 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1707 		bacpy(&conn->init_addr, &hdev->random_addr);
1708 	else
1709 		bacpy(&conn->init_addr, &hdev->bdaddr);
1710 
1711 	conn->resp_addr_type = cp->peer_addr_type;
1712 	bacpy(&conn->resp_addr, &cp->peer_addr);
1713 
1714 	/* We don't want the connection attempt to stick around
1715 	 * indefinitely since LE doesn't have a page timeout concept
1716 	 * like BR/EDR. Set a timer for any connection that doesn't use
1717 	 * the white list for connecting.
1718 	 */
1719 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1720 		queue_delayed_work(conn->hdev->workqueue,
1721 				   &conn->le_conn_timeout,
1722 				   HCI_LE_CONN_TIMEOUT);
1723 
1724 unlock:
1725 	hci_dev_unlock(hdev);
1726 }
1727 
1728 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1729 {
1730 	struct hci_cp_le_start_enc *cp;
1731 	struct hci_conn *conn;
1732 
1733 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1734 
1735 	if (!status)
1736 		return;
1737 
1738 	hci_dev_lock(hdev);
1739 
1740 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1741 	if (!cp)
1742 		goto unlock;
1743 
1744 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1745 	if (!conn)
1746 		goto unlock;
1747 
1748 	if (conn->state != BT_CONNECTED)
1749 		goto unlock;
1750 
1751 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1752 	hci_conn_drop(conn);
1753 
1754 unlock:
1755 	hci_dev_unlock(hdev);
1756 }
1757 
1758 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1759 {
1760 	__u8 status = *((__u8 *) skb->data);
1761 	struct discovery_state *discov = &hdev->discovery;
1762 	struct inquiry_entry *e;
1763 
1764 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1765 
1766 	hci_conn_check_pending(hdev);
1767 
1768 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1769 		return;
1770 
1771 	smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1772 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1773 
1774 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1775 		return;
1776 
1777 	hci_dev_lock(hdev);
1778 
1779 	if (discov->state != DISCOVERY_FINDING)
1780 		goto unlock;
1781 
1782 	if (list_empty(&discov->resolve)) {
1783 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1784 		goto unlock;
1785 	}
1786 
1787 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1788 	if (e && hci_resolve_name(hdev, e) == 0) {
1789 		e->name_state = NAME_PENDING;
1790 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1791 	} else {
1792 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1793 	}
1794 
1795 unlock:
1796 	hci_dev_unlock(hdev);
1797 }
1798 
1799 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1800 {
1801 	struct inquiry_data data;
1802 	struct inquiry_info *info = (void *) (skb->data + 1);
1803 	int num_rsp = *((__u8 *) skb->data);
1804 
1805 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1806 
1807 	if (!num_rsp)
1808 		return;
1809 
1810 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1811 		return;
1812 
1813 	hci_dev_lock(hdev);
1814 
1815 	for (; num_rsp; num_rsp--, info++) {
1816 		bool name_known, ssp;
1817 
1818 		bacpy(&data.bdaddr, &info->bdaddr);
1819 		data.pscan_rep_mode	= info->pscan_rep_mode;
1820 		data.pscan_period_mode	= info->pscan_period_mode;
1821 		data.pscan_mode		= info->pscan_mode;
1822 		memcpy(data.dev_class, info->dev_class, 3);
1823 		data.clock_offset	= info->clock_offset;
1824 		data.rssi		= 0x00;
1825 		data.ssp_mode		= 0x00;
1826 
1827 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1828 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1829 				  info->dev_class, 0, !name_known, ssp, NULL,
1830 				  0);
1831 	}
1832 
1833 	hci_dev_unlock(hdev);
1834 }
1835 
1836 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1837 {
1838 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1839 	struct hci_conn *conn;
1840 
1841 	BT_DBG("%s", hdev->name);
1842 
1843 	hci_dev_lock(hdev);
1844 
1845 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1846 	if (!conn) {
1847 		if (ev->link_type != SCO_LINK)
1848 			goto unlock;
1849 
1850 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1851 		if (!conn)
1852 			goto unlock;
1853 
1854 		conn->type = SCO_LINK;
1855 	}
1856 
1857 	if (!ev->status) {
1858 		conn->handle = __le16_to_cpu(ev->handle);
1859 
1860 		if (conn->type == ACL_LINK) {
1861 			conn->state = BT_CONFIG;
1862 			hci_conn_hold(conn);
1863 
1864 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1865 			    !hci_find_link_key(hdev, &ev->bdaddr))
1866 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1867 			else
1868 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1869 		} else
1870 			conn->state = BT_CONNECTED;
1871 
1872 		hci_conn_add_sysfs(conn);
1873 
1874 		if (test_bit(HCI_AUTH, &hdev->flags))
1875 			conn->link_mode |= HCI_LM_AUTH;
1876 
1877 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1878 			conn->link_mode |= HCI_LM_ENCRYPT;
1879 
1880 		/* Get remote features */
1881 		if (conn->type == ACL_LINK) {
1882 			struct hci_cp_read_remote_features cp;
1883 			cp.handle = ev->handle;
1884 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1885 				     sizeof(cp), &cp);
1886 		}
1887 
1888 		/* Set packet type for incoming connection */
1889 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1890 			struct hci_cp_change_conn_ptype cp;
1891 			cp.handle = ev->handle;
1892 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1893 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1894 				     &cp);
1895 		}
1896 	} else {
1897 		conn->state = BT_CLOSED;
1898 		if (conn->type == ACL_LINK)
1899 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
1900 					    conn->dst_type, ev->status);
1901 	}
1902 
1903 	if (conn->type == ACL_LINK)
1904 		hci_sco_setup(conn, ev->status);
1905 
1906 	if (ev->status) {
1907 		hci_proto_connect_cfm(conn, ev->status);
1908 		hci_conn_del(conn);
1909 	} else if (ev->link_type != ACL_LINK)
1910 		hci_proto_connect_cfm(conn, ev->status);
1911 
1912 unlock:
1913 	hci_dev_unlock(hdev);
1914 
1915 	hci_conn_check_pending(hdev);
1916 }
1917 
1918 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1919 {
1920 	struct hci_ev_conn_request *ev = (void *) skb->data;
1921 	int mask = hdev->link_mode;
1922 	__u8 flags = 0;
1923 
1924 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1925 	       ev->link_type);
1926 
1927 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1928 				      &flags);
1929 
1930 	if ((mask & HCI_LM_ACCEPT) &&
1931 	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1932 		/* Connection accepted */
1933 		struct inquiry_entry *ie;
1934 		struct hci_conn *conn;
1935 
1936 		hci_dev_lock(hdev);
1937 
1938 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1939 		if (ie)
1940 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1941 
1942 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1943 					       &ev->bdaddr);
1944 		if (!conn) {
1945 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1946 			if (!conn) {
1947 				BT_ERR("No memory for new connection");
1948 				hci_dev_unlock(hdev);
1949 				return;
1950 			}
1951 		}
1952 
1953 		memcpy(conn->dev_class, ev->dev_class, 3);
1954 
1955 		hci_dev_unlock(hdev);
1956 
1957 		if (ev->link_type == ACL_LINK ||
1958 		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1959 			struct hci_cp_accept_conn_req cp;
1960 			conn->state = BT_CONNECT;
1961 
1962 			bacpy(&cp.bdaddr, &ev->bdaddr);
1963 
1964 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1965 				cp.role = 0x00; /* Become master */
1966 			else
1967 				cp.role = 0x01; /* Remain slave */
1968 
1969 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1970 				     &cp);
1971 		} else if (!(flags & HCI_PROTO_DEFER)) {
1972 			struct hci_cp_accept_sync_conn_req cp;
1973 			conn->state = BT_CONNECT;
1974 
1975 			bacpy(&cp.bdaddr, &ev->bdaddr);
1976 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1977 
1978 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1979 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1980 			cp.max_latency    = cpu_to_le16(0xffff);
1981 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1982 			cp.retrans_effort = 0xff;
1983 
1984 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1985 				     sizeof(cp), &cp);
1986 		} else {
1987 			conn->state = BT_CONNECT2;
1988 			hci_proto_connect_cfm(conn, 0);
1989 		}
1990 	} else {
1991 		/* Connection rejected */
1992 		struct hci_cp_reject_conn_req cp;
1993 
1994 		bacpy(&cp.bdaddr, &ev->bdaddr);
1995 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1996 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1997 	}
1998 }
1999 
2000 static u8 hci_to_mgmt_reason(u8 err)
2001 {
2002 	switch (err) {
2003 	case HCI_ERROR_CONNECTION_TIMEOUT:
2004 		return MGMT_DEV_DISCONN_TIMEOUT;
2005 	case HCI_ERROR_REMOTE_USER_TERM:
2006 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2007 	case HCI_ERROR_REMOTE_POWER_OFF:
2008 		return MGMT_DEV_DISCONN_REMOTE;
2009 	case HCI_ERROR_LOCAL_HOST_TERM:
2010 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2011 	default:
2012 		return MGMT_DEV_DISCONN_UNKNOWN;
2013 	}
2014 }
2015 
2016 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2017 {
2018 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2019 	u8 reason = hci_to_mgmt_reason(ev->reason);
2020 	struct hci_conn_params *params;
2021 	struct hci_conn *conn;
2022 	bool mgmt_connected;
2023 	u8 type;
2024 
2025 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2026 
2027 	hci_dev_lock(hdev);
2028 
2029 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2030 	if (!conn)
2031 		goto unlock;
2032 
2033 	if (ev->status) {
2034 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2035 				       conn->dst_type, ev->status);
2036 		goto unlock;
2037 	}
2038 
2039 	conn->state = BT_CLOSED;
2040 
2041 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2042 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2043 				reason, mgmt_connected);
2044 
2045 	if (conn->type == ACL_LINK && conn->flush_key)
2046 		hci_remove_link_key(hdev, &conn->dst);
2047 
2048 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2049 	if (params) {
2050 		switch (params->auto_connect) {
2051 		case HCI_AUTO_CONN_LINK_LOSS:
2052 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2053 				break;
2054 			/* Fall through */
2055 
2056 		case HCI_AUTO_CONN_ALWAYS:
2057 			hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2058 			break;
2059 
2060 		default:
2061 			break;
2062 		}
2063 	}
2064 
2065 	type = conn->type;
2066 
2067 	hci_proto_disconn_cfm(conn, ev->reason);
2068 	hci_conn_del(conn);
2069 
2070 	/* Re-enable advertising if necessary, since it might
2071 	 * have been disabled by the connection. From the
2072 	 * HCI_LE_Set_Advertise_Enable command description in
2073 	 * the core specification (v4.0):
2074 	 * "The Controller shall continue advertising until the Host
2075 	 * issues an LE_Set_Advertise_Enable command with
2076 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2077 	 * or until a connection is created or until the Advertising
2078 	 * is timed out due to Directed Advertising."
2079 	 */
2080 	if (type == LE_LINK)
2081 		mgmt_reenable_advertising(hdev);
2082 
2083 unlock:
2084 	hci_dev_unlock(hdev);
2085 }
2086 
2087 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2088 {
2089 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2090 	struct hci_conn *conn;
2091 
2092 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2093 
2094 	hci_dev_lock(hdev);
2095 
2096 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2097 	if (!conn)
2098 		goto unlock;
2099 
2100 	if (!ev->status) {
2101 		if (!hci_conn_ssp_enabled(conn) &&
2102 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2103 			BT_INFO("re-auth of legacy device is not possible.");
2104 		} else {
2105 			conn->link_mode |= HCI_LM_AUTH;
2106 			conn->sec_level = conn->pending_sec_level;
2107 		}
2108 	} else {
2109 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2110 				 ev->status);
2111 	}
2112 
2113 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2114 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2115 
2116 	if (conn->state == BT_CONFIG) {
2117 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2118 			struct hci_cp_set_conn_encrypt cp;
2119 			cp.handle  = ev->handle;
2120 			cp.encrypt = 0x01;
2121 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2122 				     &cp);
2123 		} else {
2124 			conn->state = BT_CONNECTED;
2125 			hci_proto_connect_cfm(conn, ev->status);
2126 			hci_conn_drop(conn);
2127 		}
2128 	} else {
2129 		hci_auth_cfm(conn, ev->status);
2130 
2131 		hci_conn_hold(conn);
2132 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2133 		hci_conn_drop(conn);
2134 	}
2135 
2136 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2137 		if (!ev->status) {
2138 			struct hci_cp_set_conn_encrypt cp;
2139 			cp.handle  = ev->handle;
2140 			cp.encrypt = 0x01;
2141 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2142 				     &cp);
2143 		} else {
2144 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2145 			hci_encrypt_cfm(conn, ev->status, 0x00);
2146 		}
2147 	}
2148 
2149 unlock:
2150 	hci_dev_unlock(hdev);
2151 }
2152 
2153 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2154 {
2155 	struct hci_ev_remote_name *ev = (void *) skb->data;
2156 	struct hci_conn *conn;
2157 
2158 	BT_DBG("%s", hdev->name);
2159 
2160 	hci_conn_check_pending(hdev);
2161 
2162 	hci_dev_lock(hdev);
2163 
2164 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2165 
2166 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2167 		goto check_auth;
2168 
2169 	if (ev->status == 0)
2170 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2171 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2172 	else
2173 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2174 
2175 check_auth:
2176 	if (!conn)
2177 		goto unlock;
2178 
2179 	if (!hci_outgoing_auth_needed(hdev, conn))
2180 		goto unlock;
2181 
2182 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2183 		struct hci_cp_auth_requested cp;
2184 		cp.handle = __cpu_to_le16(conn->handle);
2185 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2186 	}
2187 
2188 unlock:
2189 	hci_dev_unlock(hdev);
2190 }
2191 
2192 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2193 {
2194 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2195 	struct hci_conn *conn;
2196 
2197 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2198 
2199 	hci_dev_lock(hdev);
2200 
2201 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2202 	if (!conn)
2203 		goto unlock;
2204 
2205 	if (!ev->status) {
2206 		if (ev->encrypt) {
2207 			/* Encryption implies authentication */
2208 			conn->link_mode |= HCI_LM_AUTH;
2209 			conn->link_mode |= HCI_LM_ENCRYPT;
2210 			conn->sec_level = conn->pending_sec_level;
2211 
2212 			/* P-256 authentication key implies FIPS */
2213 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2214 				conn->link_mode |= HCI_LM_FIPS;
2215 
2216 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2217 			    conn->type == LE_LINK)
2218 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2219 		} else {
2220 			conn->link_mode &= ~HCI_LM_ENCRYPT;
2221 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2222 		}
2223 	}
2224 
2225 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2226 
2227 	if (ev->status && conn->state == BT_CONNECTED) {
2228 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2229 		hci_conn_drop(conn);
2230 		goto unlock;
2231 	}
2232 
2233 	if (conn->state == BT_CONFIG) {
2234 		if (!ev->status)
2235 			conn->state = BT_CONNECTED;
2236 
2237 		/* In Secure Connections Only mode, do not allow any
2238 		 * connections that are not encrypted with AES-CCM
2239 		 * using a P-256 authenticated combination key.
2240 		 */
2241 		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2242 		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2243 		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2244 			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2245 			hci_conn_drop(conn);
2246 			goto unlock;
2247 		}
2248 
2249 		hci_proto_connect_cfm(conn, ev->status);
2250 		hci_conn_drop(conn);
2251 	} else
2252 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2253 
2254 unlock:
2255 	hci_dev_unlock(hdev);
2256 }
2257 
2258 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2259 					     struct sk_buff *skb)
2260 {
2261 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2262 	struct hci_conn *conn;
2263 
2264 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2265 
2266 	hci_dev_lock(hdev);
2267 
2268 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2269 	if (conn) {
2270 		if (!ev->status)
2271 			conn->link_mode |= HCI_LM_SECURE;
2272 
2273 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2274 
2275 		hci_key_change_cfm(conn, ev->status);
2276 	}
2277 
2278 	hci_dev_unlock(hdev);
2279 }
2280 
2281 static void hci_remote_features_evt(struct hci_dev *hdev,
2282 				    struct sk_buff *skb)
2283 {
2284 	struct hci_ev_remote_features *ev = (void *) skb->data;
2285 	struct hci_conn *conn;
2286 
2287 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2288 
2289 	hci_dev_lock(hdev);
2290 
2291 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2292 	if (!conn)
2293 		goto unlock;
2294 
2295 	if (!ev->status)
2296 		memcpy(conn->features[0], ev->features, 8);
2297 
2298 	if (conn->state != BT_CONFIG)
2299 		goto unlock;
2300 
2301 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2302 		struct hci_cp_read_remote_ext_features cp;
2303 		cp.handle = ev->handle;
2304 		cp.page = 0x01;
2305 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2306 			     sizeof(cp), &cp);
2307 		goto unlock;
2308 	}
2309 
2310 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2311 		struct hci_cp_remote_name_req cp;
2312 		memset(&cp, 0, sizeof(cp));
2313 		bacpy(&cp.bdaddr, &conn->dst);
2314 		cp.pscan_rep_mode = 0x02;
2315 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2316 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2317 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2318 				      conn->dst_type, 0, NULL, 0,
2319 				      conn->dev_class);
2320 
2321 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2322 		conn->state = BT_CONNECTED;
2323 		hci_proto_connect_cfm(conn, ev->status);
2324 		hci_conn_drop(conn);
2325 	}
2326 
2327 unlock:
2328 	hci_dev_unlock(hdev);
2329 }
2330 
2331 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2332 {
2333 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2334 	u8 status = skb->data[sizeof(*ev)];
2335 	__u16 opcode;
2336 
2337 	skb_pull(skb, sizeof(*ev));
2338 
2339 	opcode = __le16_to_cpu(ev->opcode);
2340 
2341 	switch (opcode) {
2342 	case HCI_OP_INQUIRY_CANCEL:
2343 		hci_cc_inquiry_cancel(hdev, skb);
2344 		break;
2345 
2346 	case HCI_OP_PERIODIC_INQ:
2347 		hci_cc_periodic_inq(hdev, skb);
2348 		break;
2349 
2350 	case HCI_OP_EXIT_PERIODIC_INQ:
2351 		hci_cc_exit_periodic_inq(hdev, skb);
2352 		break;
2353 
2354 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2355 		hci_cc_remote_name_req_cancel(hdev, skb);
2356 		break;
2357 
2358 	case HCI_OP_ROLE_DISCOVERY:
2359 		hci_cc_role_discovery(hdev, skb);
2360 		break;
2361 
2362 	case HCI_OP_READ_LINK_POLICY:
2363 		hci_cc_read_link_policy(hdev, skb);
2364 		break;
2365 
2366 	case HCI_OP_WRITE_LINK_POLICY:
2367 		hci_cc_write_link_policy(hdev, skb);
2368 		break;
2369 
2370 	case HCI_OP_READ_DEF_LINK_POLICY:
2371 		hci_cc_read_def_link_policy(hdev, skb);
2372 		break;
2373 
2374 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2375 		hci_cc_write_def_link_policy(hdev, skb);
2376 		break;
2377 
2378 	case HCI_OP_RESET:
2379 		hci_cc_reset(hdev, skb);
2380 		break;
2381 
2382 	case HCI_OP_WRITE_LOCAL_NAME:
2383 		hci_cc_write_local_name(hdev, skb);
2384 		break;
2385 
2386 	case HCI_OP_READ_LOCAL_NAME:
2387 		hci_cc_read_local_name(hdev, skb);
2388 		break;
2389 
2390 	case HCI_OP_WRITE_AUTH_ENABLE:
2391 		hci_cc_write_auth_enable(hdev, skb);
2392 		break;
2393 
2394 	case HCI_OP_WRITE_ENCRYPT_MODE:
2395 		hci_cc_write_encrypt_mode(hdev, skb);
2396 		break;
2397 
2398 	case HCI_OP_WRITE_SCAN_ENABLE:
2399 		hci_cc_write_scan_enable(hdev, skb);
2400 		break;
2401 
2402 	case HCI_OP_READ_CLASS_OF_DEV:
2403 		hci_cc_read_class_of_dev(hdev, skb);
2404 		break;
2405 
2406 	case HCI_OP_WRITE_CLASS_OF_DEV:
2407 		hci_cc_write_class_of_dev(hdev, skb);
2408 		break;
2409 
2410 	case HCI_OP_READ_VOICE_SETTING:
2411 		hci_cc_read_voice_setting(hdev, skb);
2412 		break;
2413 
2414 	case HCI_OP_WRITE_VOICE_SETTING:
2415 		hci_cc_write_voice_setting(hdev, skb);
2416 		break;
2417 
2418 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2419 		hci_cc_read_num_supported_iac(hdev, skb);
2420 		break;
2421 
2422 	case HCI_OP_WRITE_SSP_MODE:
2423 		hci_cc_write_ssp_mode(hdev, skb);
2424 		break;
2425 
2426 	case HCI_OP_WRITE_SC_SUPPORT:
2427 		hci_cc_write_sc_support(hdev, skb);
2428 		break;
2429 
2430 	case HCI_OP_READ_LOCAL_VERSION:
2431 		hci_cc_read_local_version(hdev, skb);
2432 		break;
2433 
2434 	case HCI_OP_READ_LOCAL_COMMANDS:
2435 		hci_cc_read_local_commands(hdev, skb);
2436 		break;
2437 
2438 	case HCI_OP_READ_LOCAL_FEATURES:
2439 		hci_cc_read_local_features(hdev, skb);
2440 		break;
2441 
2442 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2443 		hci_cc_read_local_ext_features(hdev, skb);
2444 		break;
2445 
2446 	case HCI_OP_READ_BUFFER_SIZE:
2447 		hci_cc_read_buffer_size(hdev, skb);
2448 		break;
2449 
2450 	case HCI_OP_READ_BD_ADDR:
2451 		hci_cc_read_bd_addr(hdev, skb);
2452 		break;
2453 
2454 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2455 		hci_cc_read_page_scan_activity(hdev, skb);
2456 		break;
2457 
2458 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2459 		hci_cc_write_page_scan_activity(hdev, skb);
2460 		break;
2461 
2462 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2463 		hci_cc_read_page_scan_type(hdev, skb);
2464 		break;
2465 
2466 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2467 		hci_cc_write_page_scan_type(hdev, skb);
2468 		break;
2469 
2470 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2471 		hci_cc_read_data_block_size(hdev, skb);
2472 		break;
2473 
2474 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2475 		hci_cc_read_flow_control_mode(hdev, skb);
2476 		break;
2477 
2478 	case HCI_OP_READ_LOCAL_AMP_INFO:
2479 		hci_cc_read_local_amp_info(hdev, skb);
2480 		break;
2481 
2482 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2483 		hci_cc_read_local_amp_assoc(hdev, skb);
2484 		break;
2485 
2486 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2487 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2488 		break;
2489 
2490 	case HCI_OP_PIN_CODE_REPLY:
2491 		hci_cc_pin_code_reply(hdev, skb);
2492 		break;
2493 
2494 	case HCI_OP_PIN_CODE_NEG_REPLY:
2495 		hci_cc_pin_code_neg_reply(hdev, skb);
2496 		break;
2497 
2498 	case HCI_OP_READ_LOCAL_OOB_DATA:
2499 		hci_cc_read_local_oob_data(hdev, skb);
2500 		break;
2501 
2502 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2503 		hci_cc_read_local_oob_ext_data(hdev, skb);
2504 		break;
2505 
2506 	case HCI_OP_LE_READ_BUFFER_SIZE:
2507 		hci_cc_le_read_buffer_size(hdev, skb);
2508 		break;
2509 
2510 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2511 		hci_cc_le_read_local_features(hdev, skb);
2512 		break;
2513 
2514 	case HCI_OP_LE_READ_ADV_TX_POWER:
2515 		hci_cc_le_read_adv_tx_power(hdev, skb);
2516 		break;
2517 
2518 	case HCI_OP_USER_CONFIRM_REPLY:
2519 		hci_cc_user_confirm_reply(hdev, skb);
2520 		break;
2521 
2522 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2523 		hci_cc_user_confirm_neg_reply(hdev, skb);
2524 		break;
2525 
2526 	case HCI_OP_USER_PASSKEY_REPLY:
2527 		hci_cc_user_passkey_reply(hdev, skb);
2528 		break;
2529 
2530 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2531 		hci_cc_user_passkey_neg_reply(hdev, skb);
2532 		break;
2533 
2534 	case HCI_OP_LE_SET_RANDOM_ADDR:
2535 		hci_cc_le_set_random_addr(hdev, skb);
2536 		break;
2537 
2538 	case HCI_OP_LE_SET_ADV_ENABLE:
2539 		hci_cc_le_set_adv_enable(hdev, skb);
2540 		break;
2541 
2542 	case HCI_OP_LE_SET_SCAN_PARAM:
2543 		hci_cc_le_set_scan_param(hdev, skb);
2544 		break;
2545 
2546 	case HCI_OP_LE_SET_SCAN_ENABLE:
2547 		hci_cc_le_set_scan_enable(hdev, skb);
2548 		break;
2549 
2550 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2551 		hci_cc_le_read_white_list_size(hdev, skb);
2552 		break;
2553 
2554 	case HCI_OP_LE_CLEAR_WHITE_LIST:
2555 		hci_cc_le_clear_white_list(hdev, skb);
2556 		break;
2557 
2558 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2559 		hci_cc_le_add_to_white_list(hdev, skb);
2560 		break;
2561 
2562 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2563 		hci_cc_le_del_from_white_list(hdev, skb);
2564 		break;
2565 
2566 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2567 		hci_cc_le_read_supported_states(hdev, skb);
2568 		break;
2569 
2570 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2571 		hci_cc_write_le_host_supported(hdev, skb);
2572 		break;
2573 
2574 	case HCI_OP_LE_SET_ADV_PARAM:
2575 		hci_cc_set_adv_param(hdev, skb);
2576 		break;
2577 
2578 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2579 		hci_cc_write_remote_amp_assoc(hdev, skb);
2580 		break;
2581 
2582 	default:
2583 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2584 		break;
2585 	}
2586 
2587 	if (opcode != HCI_OP_NOP)
2588 		del_timer(&hdev->cmd_timer);
2589 
2590 	hci_req_cmd_complete(hdev, opcode, status);
2591 
2592 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2593 		atomic_set(&hdev->cmd_cnt, 1);
2594 		if (!skb_queue_empty(&hdev->cmd_q))
2595 			queue_work(hdev->workqueue, &hdev->cmd_work);
2596 	}
2597 }
2598 
2599 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2600 {
2601 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2602 	__u16 opcode;
2603 
2604 	skb_pull(skb, sizeof(*ev));
2605 
2606 	opcode = __le16_to_cpu(ev->opcode);
2607 
2608 	switch (opcode) {
2609 	case HCI_OP_INQUIRY:
2610 		hci_cs_inquiry(hdev, ev->status);
2611 		break;
2612 
2613 	case HCI_OP_CREATE_CONN:
2614 		hci_cs_create_conn(hdev, ev->status);
2615 		break;
2616 
2617 	case HCI_OP_ADD_SCO:
2618 		hci_cs_add_sco(hdev, ev->status);
2619 		break;
2620 
2621 	case HCI_OP_AUTH_REQUESTED:
2622 		hci_cs_auth_requested(hdev, ev->status);
2623 		break;
2624 
2625 	case HCI_OP_SET_CONN_ENCRYPT:
2626 		hci_cs_set_conn_encrypt(hdev, ev->status);
2627 		break;
2628 
2629 	case HCI_OP_REMOTE_NAME_REQ:
2630 		hci_cs_remote_name_req(hdev, ev->status);
2631 		break;
2632 
2633 	case HCI_OP_READ_REMOTE_FEATURES:
2634 		hci_cs_read_remote_features(hdev, ev->status);
2635 		break;
2636 
2637 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2638 		hci_cs_read_remote_ext_features(hdev, ev->status);
2639 		break;
2640 
2641 	case HCI_OP_SETUP_SYNC_CONN:
2642 		hci_cs_setup_sync_conn(hdev, ev->status);
2643 		break;
2644 
2645 	case HCI_OP_SNIFF_MODE:
2646 		hci_cs_sniff_mode(hdev, ev->status);
2647 		break;
2648 
2649 	case HCI_OP_EXIT_SNIFF_MODE:
2650 		hci_cs_exit_sniff_mode(hdev, ev->status);
2651 		break;
2652 
2653 	case HCI_OP_DISCONNECT:
2654 		hci_cs_disconnect(hdev, ev->status);
2655 		break;
2656 
2657 	case HCI_OP_CREATE_PHY_LINK:
2658 		hci_cs_create_phylink(hdev, ev->status);
2659 		break;
2660 
2661 	case HCI_OP_ACCEPT_PHY_LINK:
2662 		hci_cs_accept_phylink(hdev, ev->status);
2663 		break;
2664 
2665 	case HCI_OP_LE_CREATE_CONN:
2666 		hci_cs_le_create_conn(hdev, ev->status);
2667 		break;
2668 
2669 	case HCI_OP_LE_START_ENC:
2670 		hci_cs_le_start_enc(hdev, ev->status);
2671 		break;
2672 
2673 	default:
2674 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2675 		break;
2676 	}
2677 
2678 	if (opcode != HCI_OP_NOP)
2679 		del_timer(&hdev->cmd_timer);
2680 
2681 	if (ev->status ||
2682 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2683 		hci_req_cmd_complete(hdev, opcode, ev->status);
2684 
2685 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2686 		atomic_set(&hdev->cmd_cnt, 1);
2687 		if (!skb_queue_empty(&hdev->cmd_q))
2688 			queue_work(hdev->workqueue, &hdev->cmd_work);
2689 	}
2690 }
2691 
2692 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2693 {
2694 	struct hci_ev_role_change *ev = (void *) skb->data;
2695 	struct hci_conn *conn;
2696 
2697 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2698 
2699 	hci_dev_lock(hdev);
2700 
2701 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2702 	if (conn) {
2703 		if (!ev->status) {
2704 			if (ev->role)
2705 				conn->link_mode &= ~HCI_LM_MASTER;
2706 			else
2707 				conn->link_mode |= HCI_LM_MASTER;
2708 		}
2709 
2710 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2711 
2712 		hci_role_switch_cfm(conn, ev->status, ev->role);
2713 	}
2714 
2715 	hci_dev_unlock(hdev);
2716 }
2717 
2718 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2719 {
2720 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2721 	int i;
2722 
2723 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2724 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2725 		return;
2726 	}
2727 
2728 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2729 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2730 		BT_DBG("%s bad parameters", hdev->name);
2731 		return;
2732 	}
2733 
2734 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2735 
2736 	for (i = 0; i < ev->num_hndl; i++) {
2737 		struct hci_comp_pkts_info *info = &ev->handles[i];
2738 		struct hci_conn *conn;
2739 		__u16  handle, count;
2740 
2741 		handle = __le16_to_cpu(info->handle);
2742 		count  = __le16_to_cpu(info->count);
2743 
2744 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2745 		if (!conn)
2746 			continue;
2747 
2748 		conn->sent -= count;
2749 
2750 		switch (conn->type) {
2751 		case ACL_LINK:
2752 			hdev->acl_cnt += count;
2753 			if (hdev->acl_cnt > hdev->acl_pkts)
2754 				hdev->acl_cnt = hdev->acl_pkts;
2755 			break;
2756 
2757 		case LE_LINK:
2758 			if (hdev->le_pkts) {
2759 				hdev->le_cnt += count;
2760 				if (hdev->le_cnt > hdev->le_pkts)
2761 					hdev->le_cnt = hdev->le_pkts;
2762 			} else {
2763 				hdev->acl_cnt += count;
2764 				if (hdev->acl_cnt > hdev->acl_pkts)
2765 					hdev->acl_cnt = hdev->acl_pkts;
2766 			}
2767 			break;
2768 
2769 		case SCO_LINK:
2770 			hdev->sco_cnt += count;
2771 			if (hdev->sco_cnt > hdev->sco_pkts)
2772 				hdev->sco_cnt = hdev->sco_pkts;
2773 			break;
2774 
2775 		default:
2776 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2777 			break;
2778 		}
2779 	}
2780 
2781 	queue_work(hdev->workqueue, &hdev->tx_work);
2782 }
2783 
2784 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2785 						 __u16 handle)
2786 {
2787 	struct hci_chan *chan;
2788 
2789 	switch (hdev->dev_type) {
2790 	case HCI_BREDR:
2791 		return hci_conn_hash_lookup_handle(hdev, handle);
2792 	case HCI_AMP:
2793 		chan = hci_chan_lookup_handle(hdev, handle);
2794 		if (chan)
2795 			return chan->conn;
2796 		break;
2797 	default:
2798 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2799 		break;
2800 	}
2801 
2802 	return NULL;
2803 }
2804 
2805 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2806 {
2807 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2808 	int i;
2809 
2810 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2811 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2812 		return;
2813 	}
2814 
2815 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2816 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2817 		BT_DBG("%s bad parameters", hdev->name);
2818 		return;
2819 	}
2820 
2821 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2822 	       ev->num_hndl);
2823 
2824 	for (i = 0; i < ev->num_hndl; i++) {
2825 		struct hci_comp_blocks_info *info = &ev->handles[i];
2826 		struct hci_conn *conn = NULL;
2827 		__u16  handle, block_count;
2828 
2829 		handle = __le16_to_cpu(info->handle);
2830 		block_count = __le16_to_cpu(info->blocks);
2831 
2832 		conn = __hci_conn_lookup_handle(hdev, handle);
2833 		if (!conn)
2834 			continue;
2835 
2836 		conn->sent -= block_count;
2837 
2838 		switch (conn->type) {
2839 		case ACL_LINK:
2840 		case AMP_LINK:
2841 			hdev->block_cnt += block_count;
2842 			if (hdev->block_cnt > hdev->num_blocks)
2843 				hdev->block_cnt = hdev->num_blocks;
2844 			break;
2845 
2846 		default:
2847 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2848 			break;
2849 		}
2850 	}
2851 
2852 	queue_work(hdev->workqueue, &hdev->tx_work);
2853 }
2854 
2855 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2856 {
2857 	struct hci_ev_mode_change *ev = (void *) skb->data;
2858 	struct hci_conn *conn;
2859 
2860 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2861 
2862 	hci_dev_lock(hdev);
2863 
2864 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2865 	if (conn) {
2866 		conn->mode = ev->mode;
2867 
2868 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2869 					&conn->flags)) {
2870 			if (conn->mode == HCI_CM_ACTIVE)
2871 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2872 			else
2873 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2874 		}
2875 
2876 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2877 			hci_sco_setup(conn, ev->status);
2878 	}
2879 
2880 	hci_dev_unlock(hdev);
2881 }
2882 
2883 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2884 {
2885 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2886 	struct hci_conn *conn;
2887 
2888 	BT_DBG("%s", hdev->name);
2889 
2890 	hci_dev_lock(hdev);
2891 
2892 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2893 	if (!conn)
2894 		goto unlock;
2895 
2896 	if (conn->state == BT_CONNECTED) {
2897 		hci_conn_hold(conn);
2898 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2899 		hci_conn_drop(conn);
2900 	}
2901 
2902 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2903 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2904 			     sizeof(ev->bdaddr), &ev->bdaddr);
2905 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2906 		u8 secure;
2907 
2908 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2909 			secure = 1;
2910 		else
2911 			secure = 0;
2912 
2913 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2914 	}
2915 
2916 unlock:
2917 	hci_dev_unlock(hdev);
2918 }
2919 
2920 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2921 {
2922 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2923 	struct hci_cp_link_key_reply cp;
2924 	struct hci_conn *conn;
2925 	struct link_key *key;
2926 
2927 	BT_DBG("%s", hdev->name);
2928 
2929 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2930 		return;
2931 
2932 	hci_dev_lock(hdev);
2933 
2934 	key = hci_find_link_key(hdev, &ev->bdaddr);
2935 	if (!key) {
2936 		BT_DBG("%s link key not found for %pMR", hdev->name,
2937 		       &ev->bdaddr);
2938 		goto not_found;
2939 	}
2940 
2941 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2942 	       &ev->bdaddr);
2943 
2944 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2945 	    key->type == HCI_LK_DEBUG_COMBINATION) {
2946 		BT_DBG("%s ignoring debug key", hdev->name);
2947 		goto not_found;
2948 	}
2949 
2950 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2951 	if (conn) {
2952 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2953 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2954 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2955 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2956 			goto not_found;
2957 		}
2958 
2959 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2960 		    conn->pending_sec_level == BT_SECURITY_HIGH) {
2961 			BT_DBG("%s ignoring key unauthenticated for high security",
2962 			       hdev->name);
2963 			goto not_found;
2964 		}
2965 
2966 		conn->key_type = key->type;
2967 		conn->pin_length = key->pin_len;
2968 	}
2969 
2970 	bacpy(&cp.bdaddr, &ev->bdaddr);
2971 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2972 
2973 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2974 
2975 	hci_dev_unlock(hdev);
2976 
2977 	return;
2978 
2979 not_found:
2980 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2981 	hci_dev_unlock(hdev);
2982 }
2983 
2984 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2985 {
2986 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2987 	struct hci_conn *conn;
2988 	u8 pin_len = 0;
2989 
2990 	BT_DBG("%s", hdev->name);
2991 
2992 	hci_dev_lock(hdev);
2993 
2994 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2995 	if (conn) {
2996 		hci_conn_hold(conn);
2997 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2998 		pin_len = conn->pin_length;
2999 
3000 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3001 			conn->key_type = ev->key_type;
3002 
3003 		hci_conn_drop(conn);
3004 	}
3005 
3006 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3007 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3008 				 ev->key_type, pin_len);
3009 
3010 	hci_dev_unlock(hdev);
3011 }
3012 
3013 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3014 {
3015 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3016 	struct hci_conn *conn;
3017 
3018 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3019 
3020 	hci_dev_lock(hdev);
3021 
3022 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3023 	if (conn && !ev->status) {
3024 		struct inquiry_entry *ie;
3025 
3026 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3027 		if (ie) {
3028 			ie->data.clock_offset = ev->clock_offset;
3029 			ie->timestamp = jiffies;
3030 		}
3031 	}
3032 
3033 	hci_dev_unlock(hdev);
3034 }
3035 
3036 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3037 {
3038 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3039 	struct hci_conn *conn;
3040 
3041 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3042 
3043 	hci_dev_lock(hdev);
3044 
3045 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3046 	if (conn && !ev->status)
3047 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3048 
3049 	hci_dev_unlock(hdev);
3050 }
3051 
3052 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3053 {
3054 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3055 	struct inquiry_entry *ie;
3056 
3057 	BT_DBG("%s", hdev->name);
3058 
3059 	hci_dev_lock(hdev);
3060 
3061 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3062 	if (ie) {
3063 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3064 		ie->timestamp = jiffies;
3065 	}
3066 
3067 	hci_dev_unlock(hdev);
3068 }
3069 
3070 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3071 					     struct sk_buff *skb)
3072 {
3073 	struct inquiry_data data;
3074 	int num_rsp = *((__u8 *) skb->data);
3075 	bool name_known, ssp;
3076 
3077 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3078 
3079 	if (!num_rsp)
3080 		return;
3081 
3082 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3083 		return;
3084 
3085 	hci_dev_lock(hdev);
3086 
3087 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3088 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3089 		info = (void *) (skb->data + 1);
3090 
3091 		for (; num_rsp; num_rsp--, info++) {
3092 			bacpy(&data.bdaddr, &info->bdaddr);
3093 			data.pscan_rep_mode	= info->pscan_rep_mode;
3094 			data.pscan_period_mode	= info->pscan_period_mode;
3095 			data.pscan_mode		= info->pscan_mode;
3096 			memcpy(data.dev_class, info->dev_class, 3);
3097 			data.clock_offset	= info->clock_offset;
3098 			data.rssi		= info->rssi;
3099 			data.ssp_mode		= 0x00;
3100 
3101 			name_known = hci_inquiry_cache_update(hdev, &data,
3102 							      false, &ssp);
3103 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3104 					  info->dev_class, info->rssi,
3105 					  !name_known, ssp, NULL, 0);
3106 		}
3107 	} else {
3108 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3109 
3110 		for (; num_rsp; num_rsp--, info++) {
3111 			bacpy(&data.bdaddr, &info->bdaddr);
3112 			data.pscan_rep_mode	= info->pscan_rep_mode;
3113 			data.pscan_period_mode	= info->pscan_period_mode;
3114 			data.pscan_mode		= 0x00;
3115 			memcpy(data.dev_class, info->dev_class, 3);
3116 			data.clock_offset	= info->clock_offset;
3117 			data.rssi		= info->rssi;
3118 			data.ssp_mode		= 0x00;
3119 			name_known = hci_inquiry_cache_update(hdev, &data,
3120 							      false, &ssp);
3121 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3122 					  info->dev_class, info->rssi,
3123 					  !name_known, ssp, NULL, 0);
3124 		}
3125 	}
3126 
3127 	hci_dev_unlock(hdev);
3128 }
3129 
3130 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3131 					struct sk_buff *skb)
3132 {
3133 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3134 	struct hci_conn *conn;
3135 
3136 	BT_DBG("%s", hdev->name);
3137 
3138 	hci_dev_lock(hdev);
3139 
3140 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3141 	if (!conn)
3142 		goto unlock;
3143 
3144 	if (ev->page < HCI_MAX_PAGES)
3145 		memcpy(conn->features[ev->page], ev->features, 8);
3146 
3147 	if (!ev->status && ev->page == 0x01) {
3148 		struct inquiry_entry *ie;
3149 
3150 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3151 		if (ie)
3152 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3153 
3154 		if (ev->features[0] & LMP_HOST_SSP) {
3155 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3156 		} else {
3157 			/* It is mandatory by the Bluetooth specification that
3158 			 * Extended Inquiry Results are only used when Secure
3159 			 * Simple Pairing is enabled, but some devices violate
3160 			 * this.
3161 			 *
3162 			 * To make these devices work, the internal SSP
3163 			 * enabled flag needs to be cleared if the remote host
3164 			 * features do not indicate SSP support */
3165 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3166 		}
3167 
3168 		if (ev->features[0] & LMP_HOST_SC)
3169 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3170 	}
3171 
3172 	if (conn->state != BT_CONFIG)
3173 		goto unlock;
3174 
3175 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3176 		struct hci_cp_remote_name_req cp;
3177 		memset(&cp, 0, sizeof(cp));
3178 		bacpy(&cp.bdaddr, &conn->dst);
3179 		cp.pscan_rep_mode = 0x02;
3180 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3181 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3182 		mgmt_device_connected(hdev, &conn->dst, conn->type,
3183 				      conn->dst_type, 0, NULL, 0,
3184 				      conn->dev_class);
3185 
3186 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3187 		conn->state = BT_CONNECTED;
3188 		hci_proto_connect_cfm(conn, ev->status);
3189 		hci_conn_drop(conn);
3190 	}
3191 
3192 unlock:
3193 	hci_dev_unlock(hdev);
3194 }
3195 
3196 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3197 				       struct sk_buff *skb)
3198 {
3199 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3200 	struct hci_conn *conn;
3201 
3202 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3203 
3204 	hci_dev_lock(hdev);
3205 
3206 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3207 	if (!conn) {
3208 		if (ev->link_type == ESCO_LINK)
3209 			goto unlock;
3210 
3211 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3212 		if (!conn)
3213 			goto unlock;
3214 
3215 		conn->type = SCO_LINK;
3216 	}
3217 
3218 	switch (ev->status) {
3219 	case 0x00:
3220 		conn->handle = __le16_to_cpu(ev->handle);
3221 		conn->state  = BT_CONNECTED;
3222 
3223 		hci_conn_add_sysfs(conn);
3224 		break;
3225 
3226 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3227 	case 0x11:	/* Unsupported Feature or Parameter Value */
3228 	case 0x1c:	/* SCO interval rejected */
3229 	case 0x1a:	/* Unsupported Remote Feature */
3230 	case 0x1f:	/* Unspecified error */
3231 	case 0x20:	/* Unsupported LMP Parameter value */
3232 		if (conn->out) {
3233 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3234 					(hdev->esco_type & EDR_ESCO_MASK);
3235 			if (hci_setup_sync(conn, conn->link->handle))
3236 				goto unlock;
3237 		}
3238 		/* fall through */
3239 
3240 	default:
3241 		conn->state = BT_CLOSED;
3242 		break;
3243 	}
3244 
3245 	hci_proto_connect_cfm(conn, ev->status);
3246 	if (ev->status)
3247 		hci_conn_del(conn);
3248 
3249 unlock:
3250 	hci_dev_unlock(hdev);
3251 }
3252 
3253 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3254 {
3255 	size_t parsed = 0;
3256 
3257 	while (parsed < eir_len) {
3258 		u8 field_len = eir[0];
3259 
3260 		if (field_len == 0)
3261 			return parsed;
3262 
3263 		parsed += field_len + 1;
3264 		eir += field_len + 1;
3265 	}
3266 
3267 	return eir_len;
3268 }
3269 
3270 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3271 					    struct sk_buff *skb)
3272 {
3273 	struct inquiry_data data;
3274 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3275 	int num_rsp = *((__u8 *) skb->data);
3276 	size_t eir_len;
3277 
3278 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3279 
3280 	if (!num_rsp)
3281 		return;
3282 
3283 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3284 		return;
3285 
3286 	hci_dev_lock(hdev);
3287 
3288 	for (; num_rsp; num_rsp--, info++) {
3289 		bool name_known, ssp;
3290 
3291 		bacpy(&data.bdaddr, &info->bdaddr);
3292 		data.pscan_rep_mode	= info->pscan_rep_mode;
3293 		data.pscan_period_mode	= info->pscan_period_mode;
3294 		data.pscan_mode		= 0x00;
3295 		memcpy(data.dev_class, info->dev_class, 3);
3296 		data.clock_offset	= info->clock_offset;
3297 		data.rssi		= info->rssi;
3298 		data.ssp_mode		= 0x01;
3299 
3300 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3301 			name_known = eir_has_data_type(info->data,
3302 						       sizeof(info->data),
3303 						       EIR_NAME_COMPLETE);
3304 		else
3305 			name_known = true;
3306 
3307 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3308 						      &ssp);
3309 		eir_len = eir_get_length(info->data, sizeof(info->data));
3310 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3311 				  info->dev_class, info->rssi, !name_known,
3312 				  ssp, info->data, eir_len);
3313 	}
3314 
3315 	hci_dev_unlock(hdev);
3316 }
3317 
3318 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3319 					 struct sk_buff *skb)
3320 {
3321 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3322 	struct hci_conn *conn;
3323 
3324 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3325 	       __le16_to_cpu(ev->handle));
3326 
3327 	hci_dev_lock(hdev);
3328 
3329 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3330 	if (!conn)
3331 		goto unlock;
3332 
3333 	if (!ev->status)
3334 		conn->sec_level = conn->pending_sec_level;
3335 
3336 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3337 
3338 	if (ev->status && conn->state == BT_CONNECTED) {
3339 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3340 		hci_conn_drop(conn);
3341 		goto unlock;
3342 	}
3343 
3344 	if (conn->state == BT_CONFIG) {
3345 		if (!ev->status)
3346 			conn->state = BT_CONNECTED;
3347 
3348 		hci_proto_connect_cfm(conn, ev->status);
3349 		hci_conn_drop(conn);
3350 	} else {
3351 		hci_auth_cfm(conn, ev->status);
3352 
3353 		hci_conn_hold(conn);
3354 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3355 		hci_conn_drop(conn);
3356 	}
3357 
3358 unlock:
3359 	hci_dev_unlock(hdev);
3360 }
3361 
3362 static u8 hci_get_auth_req(struct hci_conn *conn)
3363 {
3364 	/* If remote requests dedicated bonding follow that lead */
3365 	if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3366 	    conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3367 		/* If both remote and local IO capabilities allow MITM
3368 		 * protection then require it, otherwise don't */
3369 		if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3370 		    conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3371 			return HCI_AT_DEDICATED_BONDING;
3372 		else
3373 			return HCI_AT_DEDICATED_BONDING_MITM;
3374 	}
3375 
3376 	/* If remote requests no-bonding follow that lead */
3377 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3378 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3379 		return conn->remote_auth | (conn->auth_type & 0x01);
3380 
3381 	return conn->auth_type;
3382 }
3383 
3384 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3385 {
3386 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3387 	struct hci_conn *conn;
3388 
3389 	BT_DBG("%s", hdev->name);
3390 
3391 	hci_dev_lock(hdev);
3392 
3393 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3394 	if (!conn)
3395 		goto unlock;
3396 
3397 	hci_conn_hold(conn);
3398 
3399 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3400 		goto unlock;
3401 
3402 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3403 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3404 		struct hci_cp_io_capability_reply cp;
3405 
3406 		bacpy(&cp.bdaddr, &ev->bdaddr);
3407 		/* Change the IO capability from KeyboardDisplay
3408 		 * to DisplayYesNo as it is not supported by BT spec. */
3409 		cp.capability = (conn->io_capability == 0x04) ?
3410 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3411 		conn->auth_type = hci_get_auth_req(conn);
3412 		cp.authentication = conn->auth_type;
3413 
3414 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3415 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3416 			cp.oob_data = 0x01;
3417 		else
3418 			cp.oob_data = 0x00;
3419 
3420 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3421 			     sizeof(cp), &cp);
3422 	} else {
3423 		struct hci_cp_io_capability_neg_reply cp;
3424 
3425 		bacpy(&cp.bdaddr, &ev->bdaddr);
3426 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3427 
3428 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3429 			     sizeof(cp), &cp);
3430 	}
3431 
3432 unlock:
3433 	hci_dev_unlock(hdev);
3434 }
3435 
3436 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3437 {
3438 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3439 	struct hci_conn *conn;
3440 
3441 	BT_DBG("%s", hdev->name);
3442 
3443 	hci_dev_lock(hdev);
3444 
3445 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3446 	if (!conn)
3447 		goto unlock;
3448 
3449 	conn->remote_cap = ev->capability;
3450 	conn->remote_auth = ev->authentication;
3451 	if (ev->oob_data)
3452 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3453 
3454 unlock:
3455 	hci_dev_unlock(hdev);
3456 }
3457 
3458 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3459 					 struct sk_buff *skb)
3460 {
3461 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3462 	int loc_mitm, rem_mitm, confirm_hint = 0;
3463 	struct hci_conn *conn;
3464 
3465 	BT_DBG("%s", hdev->name);
3466 
3467 	hci_dev_lock(hdev);
3468 
3469 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3470 		goto unlock;
3471 
3472 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3473 	if (!conn)
3474 		goto unlock;
3475 
3476 	loc_mitm = (conn->auth_type & 0x01);
3477 	rem_mitm = (conn->remote_auth & 0x01);
3478 
3479 	/* If we require MITM but the remote device can't provide that
3480 	 * (it has NoInputNoOutput) then reject the confirmation
3481 	 * request. The only exception is when we're dedicated bonding
3482 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3483 	 * bit set. */
3484 	if (!conn->connect_cfm_cb && loc_mitm &&
3485 	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3486 		BT_DBG("Rejecting request: remote device can't provide MITM");
3487 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3488 			     sizeof(ev->bdaddr), &ev->bdaddr);
3489 		goto unlock;
3490 	}
3491 
3492 	/* If no side requires MITM protection; auto-accept */
3493 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3494 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3495 
3496 		/* If we're not the initiators request authorization to
3497 		 * proceed from user space (mgmt_user_confirm with
3498 		 * confirm_hint set to 1). */
3499 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3500 			BT_DBG("Confirming auto-accept as acceptor");
3501 			confirm_hint = 1;
3502 			goto confirm;
3503 		}
3504 
3505 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3506 		       hdev->auto_accept_delay);
3507 
3508 		if (hdev->auto_accept_delay > 0) {
3509 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3510 			queue_delayed_work(conn->hdev->workqueue,
3511 					   &conn->auto_accept_work, delay);
3512 			goto unlock;
3513 		}
3514 
3515 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3516 			     sizeof(ev->bdaddr), &ev->bdaddr);
3517 		goto unlock;
3518 	}
3519 
3520 confirm:
3521 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3522 				  le32_to_cpu(ev->passkey), confirm_hint);
3523 
3524 unlock:
3525 	hci_dev_unlock(hdev);
3526 }
3527 
3528 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3529 					 struct sk_buff *skb)
3530 {
3531 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3532 
3533 	BT_DBG("%s", hdev->name);
3534 
3535 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3536 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3537 }
3538 
3539 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3540 					struct sk_buff *skb)
3541 {
3542 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3543 	struct hci_conn *conn;
3544 
3545 	BT_DBG("%s", hdev->name);
3546 
3547 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3548 	if (!conn)
3549 		return;
3550 
3551 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3552 	conn->passkey_entered = 0;
3553 
3554 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3555 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3556 					 conn->dst_type, conn->passkey_notify,
3557 					 conn->passkey_entered);
3558 }
3559 
3560 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3561 {
3562 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3563 	struct hci_conn *conn;
3564 
3565 	BT_DBG("%s", hdev->name);
3566 
3567 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3568 	if (!conn)
3569 		return;
3570 
3571 	switch (ev->type) {
3572 	case HCI_KEYPRESS_STARTED:
3573 		conn->passkey_entered = 0;
3574 		return;
3575 
3576 	case HCI_KEYPRESS_ENTERED:
3577 		conn->passkey_entered++;
3578 		break;
3579 
3580 	case HCI_KEYPRESS_ERASED:
3581 		conn->passkey_entered--;
3582 		break;
3583 
3584 	case HCI_KEYPRESS_CLEARED:
3585 		conn->passkey_entered = 0;
3586 		break;
3587 
3588 	case HCI_KEYPRESS_COMPLETED:
3589 		return;
3590 	}
3591 
3592 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3593 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3594 					 conn->dst_type, conn->passkey_notify,
3595 					 conn->passkey_entered);
3596 }
3597 
3598 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3599 					 struct sk_buff *skb)
3600 {
3601 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3602 	struct hci_conn *conn;
3603 
3604 	BT_DBG("%s", hdev->name);
3605 
3606 	hci_dev_lock(hdev);
3607 
3608 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3609 	if (!conn)
3610 		goto unlock;
3611 
3612 	/* To avoid duplicate auth_failed events to user space we check
3613 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3614 	 * initiated the authentication. A traditional auth_complete
3615 	 * event gets always produced as initiator and is also mapped to
3616 	 * the mgmt_auth_failed event */
3617 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3618 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3619 				 ev->status);
3620 
3621 	hci_conn_drop(conn);
3622 
3623 unlock:
3624 	hci_dev_unlock(hdev);
3625 }
3626 
3627 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3628 					 struct sk_buff *skb)
3629 {
3630 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3631 	struct inquiry_entry *ie;
3632 	struct hci_conn *conn;
3633 
3634 	BT_DBG("%s", hdev->name);
3635 
3636 	hci_dev_lock(hdev);
3637 
3638 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3639 	if (conn)
3640 		memcpy(conn->features[1], ev->features, 8);
3641 
3642 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3643 	if (ie)
3644 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3645 
3646 	hci_dev_unlock(hdev);
3647 }
3648 
3649 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3650 					    struct sk_buff *skb)
3651 {
3652 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3653 	struct oob_data *data;
3654 
3655 	BT_DBG("%s", hdev->name);
3656 
3657 	hci_dev_lock(hdev);
3658 
3659 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3660 		goto unlock;
3661 
3662 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3663 	if (data) {
3664 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3665 			struct hci_cp_remote_oob_ext_data_reply cp;
3666 
3667 			bacpy(&cp.bdaddr, &ev->bdaddr);
3668 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3669 			memcpy(cp.randomizer192, data->randomizer192,
3670 			       sizeof(cp.randomizer192));
3671 			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3672 			memcpy(cp.randomizer256, data->randomizer256,
3673 			       sizeof(cp.randomizer256));
3674 
3675 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3676 				     sizeof(cp), &cp);
3677 		} else {
3678 			struct hci_cp_remote_oob_data_reply cp;
3679 
3680 			bacpy(&cp.bdaddr, &ev->bdaddr);
3681 			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3682 			memcpy(cp.randomizer, data->randomizer192,
3683 			       sizeof(cp.randomizer));
3684 
3685 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3686 				     sizeof(cp), &cp);
3687 		}
3688 	} else {
3689 		struct hci_cp_remote_oob_data_neg_reply cp;
3690 
3691 		bacpy(&cp.bdaddr, &ev->bdaddr);
3692 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3693 			     sizeof(cp), &cp);
3694 	}
3695 
3696 unlock:
3697 	hci_dev_unlock(hdev);
3698 }
3699 
3700 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3701 				      struct sk_buff *skb)
3702 {
3703 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3704 	struct hci_conn *hcon, *bredr_hcon;
3705 
3706 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3707 	       ev->status);
3708 
3709 	hci_dev_lock(hdev);
3710 
3711 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3712 	if (!hcon) {
3713 		hci_dev_unlock(hdev);
3714 		return;
3715 	}
3716 
3717 	if (ev->status) {
3718 		hci_conn_del(hcon);
3719 		hci_dev_unlock(hdev);
3720 		return;
3721 	}
3722 
3723 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3724 
3725 	hcon->state = BT_CONNECTED;
3726 	bacpy(&hcon->dst, &bredr_hcon->dst);
3727 
3728 	hci_conn_hold(hcon);
3729 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3730 	hci_conn_drop(hcon);
3731 
3732 	hci_conn_add_sysfs(hcon);
3733 
3734 	amp_physical_cfm(bredr_hcon, hcon);
3735 
3736 	hci_dev_unlock(hdev);
3737 }
3738 
3739 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3740 {
3741 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3742 	struct hci_conn *hcon;
3743 	struct hci_chan *hchan;
3744 	struct amp_mgr *mgr;
3745 
3746 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3747 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3748 	       ev->status);
3749 
3750 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3751 	if (!hcon)
3752 		return;
3753 
3754 	/* Create AMP hchan */
3755 	hchan = hci_chan_create(hcon);
3756 	if (!hchan)
3757 		return;
3758 
3759 	hchan->handle = le16_to_cpu(ev->handle);
3760 
3761 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3762 
3763 	mgr = hcon->amp_mgr;
3764 	if (mgr && mgr->bredr_chan) {
3765 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3766 
3767 		l2cap_chan_lock(bredr_chan);
3768 
3769 		bredr_chan->conn->mtu = hdev->block_mtu;
3770 		l2cap_logical_cfm(bredr_chan, hchan, 0);
3771 		hci_conn_hold(hcon);
3772 
3773 		l2cap_chan_unlock(bredr_chan);
3774 	}
3775 }
3776 
3777 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3778 					     struct sk_buff *skb)
3779 {
3780 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3781 	struct hci_chan *hchan;
3782 
3783 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3784 	       le16_to_cpu(ev->handle), ev->status);
3785 
3786 	if (ev->status)
3787 		return;
3788 
3789 	hci_dev_lock(hdev);
3790 
3791 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3792 	if (!hchan)
3793 		goto unlock;
3794 
3795 	amp_destroy_logical_link(hchan, ev->reason);
3796 
3797 unlock:
3798 	hci_dev_unlock(hdev);
3799 }
3800 
3801 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3802 					     struct sk_buff *skb)
3803 {
3804 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3805 	struct hci_conn *hcon;
3806 
3807 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3808 
3809 	if (ev->status)
3810 		return;
3811 
3812 	hci_dev_lock(hdev);
3813 
3814 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3815 	if (hcon) {
3816 		hcon->state = BT_CLOSED;
3817 		hci_conn_del(hcon);
3818 	}
3819 
3820 	hci_dev_unlock(hdev);
3821 }
3822 
3823 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3824 {
3825 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3826 	struct hci_conn *conn;
3827 	struct smp_irk *irk;
3828 
3829 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3830 
3831 	hci_dev_lock(hdev);
3832 
3833 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3834 	if (!conn) {
3835 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3836 		if (!conn) {
3837 			BT_ERR("No memory for new connection");
3838 			goto unlock;
3839 		}
3840 
3841 		conn->dst_type = ev->bdaddr_type;
3842 
3843 		/* The advertising parameters for own address type
3844 		 * define which source address and source address
3845 		 * type this connections has.
3846 		 */
3847 		if (bacmp(&conn->src, BDADDR_ANY)) {
3848 			conn->src_type = ADDR_LE_DEV_PUBLIC;
3849 		} else {
3850 			bacpy(&conn->src, &hdev->static_addr);
3851 			conn->src_type = ADDR_LE_DEV_RANDOM;
3852 		}
3853 
3854 		if (ev->role == LE_CONN_ROLE_MASTER) {
3855 			conn->out = true;
3856 			conn->link_mode |= HCI_LM_MASTER;
3857 		}
3858 
3859 		/* If we didn't have a hci_conn object previously
3860 		 * but we're in master role this must be something
3861 		 * initiated using a white list. Since white list based
3862 		 * connections are not "first class citizens" we don't
3863 		 * have full tracking of them. Therefore, we go ahead
3864 		 * with a "best effort" approach of determining the
3865 		 * initiator address based on the HCI_PRIVACY flag.
3866 		 */
3867 		if (conn->out) {
3868 			conn->resp_addr_type = ev->bdaddr_type;
3869 			bacpy(&conn->resp_addr, &ev->bdaddr);
3870 			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3871 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3872 				bacpy(&conn->init_addr, &hdev->rpa);
3873 			} else {
3874 				hci_copy_identity_address(hdev,
3875 							  &conn->init_addr,
3876 							  &conn->init_addr_type);
3877 			}
3878 		} else {
3879 			/* Set the responder (our side) address type based on
3880 			 * the advertising address type.
3881 			 */
3882 			conn->resp_addr_type = hdev->adv_addr_type;
3883 			if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
3884 				bacpy(&conn->resp_addr, &hdev->random_addr);
3885 			else
3886 				bacpy(&conn->resp_addr, &hdev->bdaddr);
3887 
3888 			conn->init_addr_type = ev->bdaddr_type;
3889 			bacpy(&conn->init_addr, &ev->bdaddr);
3890 		}
3891 	} else {
3892 		cancel_delayed_work(&conn->le_conn_timeout);
3893 	}
3894 
3895 	/* Ensure that the hci_conn contains the identity address type
3896 	 * regardless of which address the connection was made with.
3897 	 */
3898 	hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
3899 
3900 	/* Lookup the identity address from the stored connection
3901 	 * address and address type.
3902 	 *
3903 	 * When establishing connections to an identity address, the
3904 	 * connection procedure will store the resolvable random
3905 	 * address first. Now if it can be converted back into the
3906 	 * identity address, start using the identity address from
3907 	 * now on.
3908 	 */
3909 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
3910 	if (irk) {
3911 		bacpy(&conn->dst, &irk->bdaddr);
3912 		conn->dst_type = irk->addr_type;
3913 	}
3914 
3915 	if (ev->status) {
3916 		hci_le_conn_failed(conn, ev->status);
3917 		goto unlock;
3918 	}
3919 
3920 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3921 		mgmt_device_connected(hdev, &conn->dst, conn->type,
3922 				      conn->dst_type, 0, NULL, 0, NULL);
3923 
3924 	conn->sec_level = BT_SECURITY_LOW;
3925 	conn->handle = __le16_to_cpu(ev->handle);
3926 	conn->state = BT_CONNECTED;
3927 
3928 	if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3929 		set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3930 
3931 	hci_conn_add_sysfs(conn);
3932 
3933 	hci_proto_connect_cfm(conn, ev->status);
3934 
3935 	hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
3936 
3937 unlock:
3938 	hci_dev_unlock(hdev);
3939 }
3940 
3941 /* This function requires the caller holds hdev->lock */
3942 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
3943 				  u8 addr_type)
3944 {
3945 	struct hci_conn *conn;
3946 	struct smp_irk *irk;
3947 
3948 	/* If this is a resolvable address, we should resolve it and then
3949 	 * update address and address type variables.
3950 	 */
3951 	irk = hci_get_irk(hdev, addr, addr_type);
3952 	if (irk) {
3953 		addr = &irk->bdaddr;
3954 		addr_type = irk->addr_type;
3955 	}
3956 
3957 	if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
3958 		return;
3959 
3960 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
3961 			      HCI_AT_NO_BONDING);
3962 	if (!IS_ERR(conn))
3963 		return;
3964 
3965 	switch (PTR_ERR(conn)) {
3966 	case -EBUSY:
3967 		/* If hci_connect() returns -EBUSY it means there is already
3968 		 * an LE connection attempt going on. Since controllers don't
3969 		 * support more than one connection attempt at the time, we
3970 		 * don't consider this an error case.
3971 		 */
3972 		break;
3973 	default:
3974 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
3975 	}
3976 }
3977 
3978 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3979 {
3980 	u8 num_reports = skb->data[0];
3981 	void *ptr = &skb->data[1];
3982 	s8 rssi;
3983 
3984 	hci_dev_lock(hdev);
3985 
3986 	while (num_reports--) {
3987 		struct hci_ev_le_advertising_info *ev = ptr;
3988 
3989 		if (ev->evt_type == LE_ADV_IND ||
3990 		    ev->evt_type == LE_ADV_DIRECT_IND)
3991 			check_pending_le_conn(hdev, &ev->bdaddr,
3992 					      ev->bdaddr_type);
3993 
3994 		rssi = ev->data[ev->length];
3995 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3996 				  NULL, rssi, 0, 1, ev->data, ev->length);
3997 
3998 		ptr += sizeof(*ev) + ev->length + 1;
3999 	}
4000 
4001 	hci_dev_unlock(hdev);
4002 }
4003 
4004 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4005 {
4006 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4007 	struct hci_cp_le_ltk_reply cp;
4008 	struct hci_cp_le_ltk_neg_reply neg;
4009 	struct hci_conn *conn;
4010 	struct smp_ltk *ltk;
4011 
4012 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4013 
4014 	hci_dev_lock(hdev);
4015 
4016 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4017 	if (conn == NULL)
4018 		goto not_found;
4019 
4020 	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4021 	if (ltk == NULL)
4022 		goto not_found;
4023 
4024 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4025 	cp.handle = cpu_to_le16(conn->handle);
4026 
4027 	if (ltk->authenticated)
4028 		conn->pending_sec_level = BT_SECURITY_HIGH;
4029 	else
4030 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4031 
4032 	conn->enc_key_size = ltk->enc_size;
4033 
4034 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4035 
4036 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4037 	 * temporary key used to encrypt a connection following
4038 	 * pairing. It is used during the Encrypted Session Setup to
4039 	 * distribute the keys. Later, security can be re-established
4040 	 * using a distributed LTK.
4041 	 */
4042 	if (ltk->type == HCI_SMP_STK_SLAVE) {
4043 		list_del(&ltk->list);
4044 		kfree(ltk);
4045 	}
4046 
4047 	hci_dev_unlock(hdev);
4048 
4049 	return;
4050 
4051 not_found:
4052 	neg.handle = ev->handle;
4053 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4054 	hci_dev_unlock(hdev);
4055 }
4056 
4057 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4058 {
4059 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4060 
4061 	skb_pull(skb, sizeof(*le_ev));
4062 
4063 	switch (le_ev->subevent) {
4064 	case HCI_EV_LE_CONN_COMPLETE:
4065 		hci_le_conn_complete_evt(hdev, skb);
4066 		break;
4067 
4068 	case HCI_EV_LE_ADVERTISING_REPORT:
4069 		hci_le_adv_report_evt(hdev, skb);
4070 		break;
4071 
4072 	case HCI_EV_LE_LTK_REQ:
4073 		hci_le_ltk_request_evt(hdev, skb);
4074 		break;
4075 
4076 	default:
4077 		break;
4078 	}
4079 }
4080 
4081 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4082 {
4083 	struct hci_ev_channel_selected *ev = (void *) skb->data;
4084 	struct hci_conn *hcon;
4085 
4086 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4087 
4088 	skb_pull(skb, sizeof(*ev));
4089 
4090 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4091 	if (!hcon)
4092 		return;
4093 
4094 	amp_read_loc_assoc_final_data(hdev, hcon);
4095 }
4096 
4097 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4098 {
4099 	struct hci_event_hdr *hdr = (void *) skb->data;
4100 	__u8 event = hdr->evt;
4101 
4102 	hci_dev_lock(hdev);
4103 
4104 	/* Received events are (currently) only needed when a request is
4105 	 * ongoing so avoid unnecessary memory allocation.
4106 	 */
4107 	if (hdev->req_status == HCI_REQ_PEND) {
4108 		kfree_skb(hdev->recv_evt);
4109 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4110 	}
4111 
4112 	hci_dev_unlock(hdev);
4113 
4114 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4115 
4116 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4117 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4118 		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4119 
4120 		hci_req_cmd_complete(hdev, opcode, 0);
4121 	}
4122 
4123 	switch (event) {
4124 	case HCI_EV_INQUIRY_COMPLETE:
4125 		hci_inquiry_complete_evt(hdev, skb);
4126 		break;
4127 
4128 	case HCI_EV_INQUIRY_RESULT:
4129 		hci_inquiry_result_evt(hdev, skb);
4130 		break;
4131 
4132 	case HCI_EV_CONN_COMPLETE:
4133 		hci_conn_complete_evt(hdev, skb);
4134 		break;
4135 
4136 	case HCI_EV_CONN_REQUEST:
4137 		hci_conn_request_evt(hdev, skb);
4138 		break;
4139 
4140 	case HCI_EV_DISCONN_COMPLETE:
4141 		hci_disconn_complete_evt(hdev, skb);
4142 		break;
4143 
4144 	case HCI_EV_AUTH_COMPLETE:
4145 		hci_auth_complete_evt(hdev, skb);
4146 		break;
4147 
4148 	case HCI_EV_REMOTE_NAME:
4149 		hci_remote_name_evt(hdev, skb);
4150 		break;
4151 
4152 	case HCI_EV_ENCRYPT_CHANGE:
4153 		hci_encrypt_change_evt(hdev, skb);
4154 		break;
4155 
4156 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4157 		hci_change_link_key_complete_evt(hdev, skb);
4158 		break;
4159 
4160 	case HCI_EV_REMOTE_FEATURES:
4161 		hci_remote_features_evt(hdev, skb);
4162 		break;
4163 
4164 	case HCI_EV_CMD_COMPLETE:
4165 		hci_cmd_complete_evt(hdev, skb);
4166 		break;
4167 
4168 	case HCI_EV_CMD_STATUS:
4169 		hci_cmd_status_evt(hdev, skb);
4170 		break;
4171 
4172 	case HCI_EV_ROLE_CHANGE:
4173 		hci_role_change_evt(hdev, skb);
4174 		break;
4175 
4176 	case HCI_EV_NUM_COMP_PKTS:
4177 		hci_num_comp_pkts_evt(hdev, skb);
4178 		break;
4179 
4180 	case HCI_EV_MODE_CHANGE:
4181 		hci_mode_change_evt(hdev, skb);
4182 		break;
4183 
4184 	case HCI_EV_PIN_CODE_REQ:
4185 		hci_pin_code_request_evt(hdev, skb);
4186 		break;
4187 
4188 	case HCI_EV_LINK_KEY_REQ:
4189 		hci_link_key_request_evt(hdev, skb);
4190 		break;
4191 
4192 	case HCI_EV_LINK_KEY_NOTIFY:
4193 		hci_link_key_notify_evt(hdev, skb);
4194 		break;
4195 
4196 	case HCI_EV_CLOCK_OFFSET:
4197 		hci_clock_offset_evt(hdev, skb);
4198 		break;
4199 
4200 	case HCI_EV_PKT_TYPE_CHANGE:
4201 		hci_pkt_type_change_evt(hdev, skb);
4202 		break;
4203 
4204 	case HCI_EV_PSCAN_REP_MODE:
4205 		hci_pscan_rep_mode_evt(hdev, skb);
4206 		break;
4207 
4208 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4209 		hci_inquiry_result_with_rssi_evt(hdev, skb);
4210 		break;
4211 
4212 	case HCI_EV_REMOTE_EXT_FEATURES:
4213 		hci_remote_ext_features_evt(hdev, skb);
4214 		break;
4215 
4216 	case HCI_EV_SYNC_CONN_COMPLETE:
4217 		hci_sync_conn_complete_evt(hdev, skb);
4218 		break;
4219 
4220 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4221 		hci_extended_inquiry_result_evt(hdev, skb);
4222 		break;
4223 
4224 	case HCI_EV_KEY_REFRESH_COMPLETE:
4225 		hci_key_refresh_complete_evt(hdev, skb);
4226 		break;
4227 
4228 	case HCI_EV_IO_CAPA_REQUEST:
4229 		hci_io_capa_request_evt(hdev, skb);
4230 		break;
4231 
4232 	case HCI_EV_IO_CAPA_REPLY:
4233 		hci_io_capa_reply_evt(hdev, skb);
4234 		break;
4235 
4236 	case HCI_EV_USER_CONFIRM_REQUEST:
4237 		hci_user_confirm_request_evt(hdev, skb);
4238 		break;
4239 
4240 	case HCI_EV_USER_PASSKEY_REQUEST:
4241 		hci_user_passkey_request_evt(hdev, skb);
4242 		break;
4243 
4244 	case HCI_EV_USER_PASSKEY_NOTIFY:
4245 		hci_user_passkey_notify_evt(hdev, skb);
4246 		break;
4247 
4248 	case HCI_EV_KEYPRESS_NOTIFY:
4249 		hci_keypress_notify_evt(hdev, skb);
4250 		break;
4251 
4252 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4253 		hci_simple_pair_complete_evt(hdev, skb);
4254 		break;
4255 
4256 	case HCI_EV_REMOTE_HOST_FEATURES:
4257 		hci_remote_host_features_evt(hdev, skb);
4258 		break;
4259 
4260 	case HCI_EV_LE_META:
4261 		hci_le_meta_evt(hdev, skb);
4262 		break;
4263 
4264 	case HCI_EV_CHANNEL_SELECTED:
4265 		hci_chan_selected_evt(hdev, skb);
4266 		break;
4267 
4268 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4269 		hci_remote_oob_data_request_evt(hdev, skb);
4270 		break;
4271 
4272 	case HCI_EV_PHY_LINK_COMPLETE:
4273 		hci_phy_link_complete_evt(hdev, skb);
4274 		break;
4275 
4276 	case HCI_EV_LOGICAL_LINK_COMPLETE:
4277 		hci_loglink_complete_evt(hdev, skb);
4278 		break;
4279 
4280 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4281 		hci_disconn_loglink_complete_evt(hdev, skb);
4282 		break;
4283 
4284 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4285 		hci_disconn_phylink_complete_evt(hdev, skb);
4286 		break;
4287 
4288 	case HCI_EV_NUM_COMP_BLOCKS:
4289 		hci_num_comp_blocks_evt(hdev, skb);
4290 		break;
4291 
4292 	default:
4293 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4294 		break;
4295 	}
4296 
4297 	kfree_skb(skb);
4298 	hdev->stat.evt_rx++;
4299 }
4300