xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 1855d92dce0dc0ed81a78eacae710529600513f4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <asm/unaligned.h>
28 
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36 
37 /* Handle HCI Event packets */
38 
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 	__u8 status = *((__u8 *) skb->data);
42 
43 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
44 
45 	if (status)
46 		return;
47 
48 	clear_bit(HCI_INQUIRY, &hdev->flags);
49 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
51 
52 	hci_dev_lock(hdev);
53 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 	hci_dev_unlock(hdev);
55 
56 	hci_conn_check_pending(hdev);
57 }
58 
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 	__u8 status = *((__u8 *) skb->data);
62 
63 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64 
65 	if (status)
66 		return;
67 
68 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70 
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 	__u8 status = *((__u8 *) skb->data);
74 
75 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76 
77 	if (status)
78 		return;
79 
80 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81 
82 	hci_conn_check_pending(hdev);
83 }
84 
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 					  struct sk_buff *skb)
87 {
88 	BT_DBG("%s", hdev->name);
89 }
90 
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 	struct hci_rp_role_discovery *rp = (void *) skb->data;
94 	struct hci_conn *conn;
95 
96 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97 
98 	if (rp->status)
99 		return;
100 
101 	hci_dev_lock(hdev);
102 
103 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 	if (conn) {
105 		if (rp->role)
106 			conn->link_mode &= ~HCI_LM_MASTER;
107 		else
108 			conn->link_mode |= HCI_LM_MASTER;
109 	}
110 
111 	hci_dev_unlock(hdev);
112 }
113 
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 	struct hci_conn *conn;
118 
119 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120 
121 	if (rp->status)
122 		return;
123 
124 	hci_dev_lock(hdev);
125 
126 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 	if (conn)
128 		conn->link_policy = __le16_to_cpu(rp->policy);
129 
130 	hci_dev_unlock(hdev);
131 }
132 
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 	struct hci_conn *conn;
137 	void *sent;
138 
139 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140 
141 	if (rp->status)
142 		return;
143 
144 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 	if (!sent)
146 		return;
147 
148 	hci_dev_lock(hdev);
149 
150 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 	if (conn)
152 		conn->link_policy = get_unaligned_le16(sent + 2);
153 
154 	hci_dev_unlock(hdev);
155 }
156 
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 					struct sk_buff *skb)
159 {
160 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161 
162 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163 
164 	if (rp->status)
165 		return;
166 
167 	hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169 
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 					 struct sk_buff *skb)
172 {
173 	__u8 status = *((__u8 *) skb->data);
174 	void *sent;
175 
176 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
177 
178 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 	if (!sent)
180 		return;
181 
182 	if (!status)
183 		hdev->link_policy = get_unaligned_le16(sent);
184 }
185 
186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187 {
188 	__u8 status = *((__u8 *) skb->data);
189 
190 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
191 
192 	clear_bit(HCI_RESET, &hdev->flags);
193 
194 	/* Reset all non-persistent flags */
195 	hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
196 
197 	hdev->discovery.state = DISCOVERY_STOPPED;
198 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
199 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
200 
201 	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
202 	hdev->adv_data_len = 0;
203 
204 	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
205 	hdev->scan_rsp_data_len = 0;
206 
207 	hdev->le_scan_type = LE_SCAN_PASSIVE;
208 
209 	hdev->ssp_debug_mode = 0;
210 }
211 
212 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 	__u8 status = *((__u8 *) skb->data);
215 	void *sent;
216 
217 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 
219 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 	if (!sent)
221 		return;
222 
223 	hci_dev_lock(hdev);
224 
225 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
226 		mgmt_set_local_name_complete(hdev, sent, status);
227 	else if (!status)
228 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229 
230 	hci_dev_unlock(hdev);
231 }
232 
233 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
234 {
235 	struct hci_rp_read_local_name *rp = (void *) skb->data;
236 
237 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
238 
239 	if (rp->status)
240 		return;
241 
242 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
243 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
244 }
245 
246 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
247 {
248 	__u8 status = *((__u8 *) skb->data);
249 	void *sent;
250 
251 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
252 
253 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
254 	if (!sent)
255 		return;
256 
257 	if (!status) {
258 		__u8 param = *((__u8 *) sent);
259 
260 		if (param == AUTH_ENABLED)
261 			set_bit(HCI_AUTH, &hdev->flags);
262 		else
263 			clear_bit(HCI_AUTH, &hdev->flags);
264 	}
265 
266 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
267 		mgmt_auth_enable_complete(hdev, status);
268 }
269 
270 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
271 {
272 	__u8 status = *((__u8 *) skb->data);
273 	void *sent;
274 
275 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
276 
277 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
278 	if (!sent)
279 		return;
280 
281 	if (!status) {
282 		__u8 param = *((__u8 *) sent);
283 
284 		if (param)
285 			set_bit(HCI_ENCRYPT, &hdev->flags);
286 		else
287 			clear_bit(HCI_ENCRYPT, &hdev->flags);
288 	}
289 }
290 
291 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 {
293 	__u8 param, status = *((__u8 *) skb->data);
294 	int old_pscan, old_iscan;
295 	void *sent;
296 
297 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
298 
299 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300 	if (!sent)
301 		return;
302 
303 	param = *((__u8 *) sent);
304 
305 	hci_dev_lock(hdev);
306 
307 	if (status) {
308 		mgmt_write_scan_failed(hdev, param, status);
309 		hdev->discov_timeout = 0;
310 		goto done;
311 	}
312 
313 	/* We need to ensure that we set this back on if someone changed
314 	 * the scan mode through a raw HCI socket.
315 	 */
316 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
317 
318 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
319 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
320 
321 	if (param & SCAN_INQUIRY) {
322 		set_bit(HCI_ISCAN, &hdev->flags);
323 		if (!old_iscan)
324 			mgmt_discoverable(hdev, 1);
325 	} else if (old_iscan)
326 		mgmt_discoverable(hdev, 0);
327 
328 	if (param & SCAN_PAGE) {
329 		set_bit(HCI_PSCAN, &hdev->flags);
330 		if (!old_pscan)
331 			mgmt_connectable(hdev, 1);
332 	} else if (old_pscan)
333 		mgmt_connectable(hdev, 0);
334 
335 done:
336 	hci_dev_unlock(hdev);
337 }
338 
339 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340 {
341 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342 
343 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
344 
345 	if (rp->status)
346 		return;
347 
348 	memcpy(hdev->dev_class, rp->dev_class, 3);
349 
350 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
351 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
352 }
353 
354 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355 {
356 	__u8 status = *((__u8 *) skb->data);
357 	void *sent;
358 
359 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
360 
361 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
362 	if (!sent)
363 		return;
364 
365 	hci_dev_lock(hdev);
366 
367 	if (status == 0)
368 		memcpy(hdev->dev_class, sent, 3);
369 
370 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
371 		mgmt_set_class_of_dev_complete(hdev, sent, status);
372 
373 	hci_dev_unlock(hdev);
374 }
375 
376 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
379 	__u16 setting;
380 
381 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382 
383 	if (rp->status)
384 		return;
385 
386 	setting = __le16_to_cpu(rp->voice_setting);
387 
388 	if (hdev->voice_setting == setting)
389 		return;
390 
391 	hdev->voice_setting = setting;
392 
393 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
394 
395 	if (hdev->notify)
396 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
397 }
398 
399 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 				       struct sk_buff *skb)
401 {
402 	__u8 status = *((__u8 *) skb->data);
403 	__u16 setting;
404 	void *sent;
405 
406 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
407 
408 	if (status)
409 		return;
410 
411 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
412 	if (!sent)
413 		return;
414 
415 	setting = get_unaligned_le16(sent);
416 
417 	if (hdev->voice_setting == setting)
418 		return;
419 
420 	hdev->voice_setting = setting;
421 
422 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
423 
424 	if (hdev->notify)
425 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
426 }
427 
428 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
429 					  struct sk_buff *skb)
430 {
431 	struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
432 
433 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
434 
435 	if (rp->status)
436 		return;
437 
438 	hdev->num_iac = rp->num_iac;
439 
440 	BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
441 }
442 
443 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
444 {
445 	__u8 status = *((__u8 *) skb->data);
446 	struct hci_cp_write_ssp_mode *sent;
447 
448 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
449 
450 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 	if (!sent)
452 		return;
453 
454 	if (!status) {
455 		if (sent->mode)
456 			hdev->features[1][0] |= LMP_HOST_SSP;
457 		else
458 			hdev->features[1][0] &= ~LMP_HOST_SSP;
459 	}
460 
461 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
462 		mgmt_ssp_enable_complete(hdev, sent->mode, status);
463 	else if (!status) {
464 		if (sent->mode)
465 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466 		else
467 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
468 	}
469 }
470 
471 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
472 {
473 	u8 status = *((u8 *) skb->data);
474 	struct hci_cp_write_sc_support *sent;
475 
476 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
477 
478 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479 	if (!sent)
480 		return;
481 
482 	if (!status) {
483 		if (sent->support)
484 			hdev->features[1][0] |= LMP_HOST_SC;
485 		else
486 			hdev->features[1][0] &= ~LMP_HOST_SC;
487 	}
488 
489 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
490 		mgmt_sc_enable_complete(hdev, sent->support, status);
491 	else if (!status) {
492 		if (sent->support)
493 			set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
494 		else
495 			clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
496 	}
497 }
498 
499 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
500 {
501 	struct hci_rp_read_local_version *rp = (void *) skb->data;
502 
503 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
504 
505 	if (rp->status)
506 		return;
507 
508 	if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
509 		hdev->hci_ver = rp->hci_ver;
510 		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
511 		hdev->lmp_ver = rp->lmp_ver;
512 		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
513 		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
514 	}
515 }
516 
517 static void hci_cc_read_local_commands(struct hci_dev *hdev,
518 				       struct sk_buff *skb)
519 {
520 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
521 
522 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
523 
524 	if (rp->status)
525 		return;
526 
527 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
528 		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
529 }
530 
531 static void hci_cc_read_local_features(struct hci_dev *hdev,
532 				       struct sk_buff *skb)
533 {
534 	struct hci_rp_read_local_features *rp = (void *) skb->data;
535 
536 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
537 
538 	if (rp->status)
539 		return;
540 
541 	memcpy(hdev->features, rp->features, 8);
542 
543 	/* Adjust default settings according to features
544 	 * supported by device. */
545 
546 	if (hdev->features[0][0] & LMP_3SLOT)
547 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
548 
549 	if (hdev->features[0][0] & LMP_5SLOT)
550 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
551 
552 	if (hdev->features[0][1] & LMP_HV2) {
553 		hdev->pkt_type  |= (HCI_HV2);
554 		hdev->esco_type |= (ESCO_HV2);
555 	}
556 
557 	if (hdev->features[0][1] & LMP_HV3) {
558 		hdev->pkt_type  |= (HCI_HV3);
559 		hdev->esco_type |= (ESCO_HV3);
560 	}
561 
562 	if (lmp_esco_capable(hdev))
563 		hdev->esco_type |= (ESCO_EV3);
564 
565 	if (hdev->features[0][4] & LMP_EV4)
566 		hdev->esco_type |= (ESCO_EV4);
567 
568 	if (hdev->features[0][4] & LMP_EV5)
569 		hdev->esco_type |= (ESCO_EV5);
570 
571 	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
572 		hdev->esco_type |= (ESCO_2EV3);
573 
574 	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
575 		hdev->esco_type |= (ESCO_3EV3);
576 
577 	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
578 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
579 }
580 
581 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
582 					   struct sk_buff *skb)
583 {
584 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
585 
586 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
587 
588 	if (rp->status)
589 		return;
590 
591 	if (hdev->max_page < rp->max_page)
592 		hdev->max_page = rp->max_page;
593 
594 	if (rp->page < HCI_MAX_PAGES)
595 		memcpy(hdev->features[rp->page], rp->features, 8);
596 }
597 
598 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
599 					  struct sk_buff *skb)
600 {
601 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
602 
603 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
604 
605 	if (!rp->status)
606 		hdev->flow_ctl_mode = rp->mode;
607 }
608 
609 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
610 {
611 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
612 
613 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
614 
615 	if (rp->status)
616 		return;
617 
618 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
619 	hdev->sco_mtu  = rp->sco_mtu;
620 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
621 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
622 
623 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
624 		hdev->sco_mtu  = 64;
625 		hdev->sco_pkts = 8;
626 	}
627 
628 	hdev->acl_cnt = hdev->acl_pkts;
629 	hdev->sco_cnt = hdev->sco_pkts;
630 
631 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
632 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
633 }
634 
635 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
636 {
637 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
638 
639 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
640 
641 	if (!rp->status)
642 		bacpy(&hdev->bdaddr, &rp->bdaddr);
643 }
644 
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 					   struct sk_buff *skb)
647 {
648 	struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649 
650 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 
652 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
653 		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
654 		hdev->page_scan_window = __le16_to_cpu(rp->window);
655 	}
656 }
657 
658 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
659 					    struct sk_buff *skb)
660 {
661 	u8 status = *((u8 *) skb->data);
662 	struct hci_cp_write_page_scan_activity *sent;
663 
664 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
665 
666 	if (status)
667 		return;
668 
669 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
670 	if (!sent)
671 		return;
672 
673 	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
674 	hdev->page_scan_window = __le16_to_cpu(sent->window);
675 }
676 
677 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
678 					   struct sk_buff *skb)
679 {
680 	struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
681 
682 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
683 
684 	if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
685 		hdev->page_scan_type = rp->type;
686 }
687 
688 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
689 					struct sk_buff *skb)
690 {
691 	u8 status = *((u8 *) skb->data);
692 	u8 *type;
693 
694 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
695 
696 	if (status)
697 		return;
698 
699 	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
700 	if (type)
701 		hdev->page_scan_type = *type;
702 }
703 
704 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
705 					struct sk_buff *skb)
706 {
707 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
708 
709 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
710 
711 	if (rp->status)
712 		return;
713 
714 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
715 	hdev->block_len = __le16_to_cpu(rp->block_len);
716 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
717 
718 	hdev->block_cnt = hdev->num_blocks;
719 
720 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
721 	       hdev->block_cnt, hdev->block_len);
722 }
723 
724 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
725 				       struct sk_buff *skb)
726 {
727 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
728 
729 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
730 
731 	if (rp->status)
732 		goto a2mp_rsp;
733 
734 	hdev->amp_status = rp->amp_status;
735 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
736 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
737 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
738 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
739 	hdev->amp_type = rp->amp_type;
740 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
741 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
742 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
743 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
744 
745 a2mp_rsp:
746 	a2mp_send_getinfo_rsp(hdev);
747 }
748 
749 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
750 					struct sk_buff *skb)
751 {
752 	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
753 	struct amp_assoc *assoc = &hdev->loc_assoc;
754 	size_t rem_len, frag_len;
755 
756 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757 
758 	if (rp->status)
759 		goto a2mp_rsp;
760 
761 	frag_len = skb->len - sizeof(*rp);
762 	rem_len = __le16_to_cpu(rp->rem_len);
763 
764 	if (rem_len > frag_len) {
765 		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
766 
767 		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
768 		assoc->offset += frag_len;
769 
770 		/* Read other fragments */
771 		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
772 
773 		return;
774 	}
775 
776 	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
777 	assoc->len = assoc->offset + rem_len;
778 	assoc->offset = 0;
779 
780 a2mp_rsp:
781 	/* Send A2MP Rsp when all fragments are received */
782 	a2mp_send_getampassoc_rsp(hdev, rp->status);
783 	a2mp_send_create_phy_link_req(hdev, rp->status);
784 }
785 
786 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
787 					 struct sk_buff *skb)
788 {
789 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
790 
791 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
792 
793 	if (!rp->status)
794 		hdev->inq_tx_power = rp->tx_power;
795 }
796 
797 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
798 {
799 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
800 	struct hci_cp_pin_code_reply *cp;
801 	struct hci_conn *conn;
802 
803 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
804 
805 	hci_dev_lock(hdev);
806 
807 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
808 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
809 
810 	if (rp->status)
811 		goto unlock;
812 
813 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
814 	if (!cp)
815 		goto unlock;
816 
817 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
818 	if (conn)
819 		conn->pin_length = cp->pin_len;
820 
821 unlock:
822 	hci_dev_unlock(hdev);
823 }
824 
825 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
826 {
827 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
828 
829 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
830 
831 	hci_dev_lock(hdev);
832 
833 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
834 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
835 						 rp->status);
836 
837 	hci_dev_unlock(hdev);
838 }
839 
840 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
841 				       struct sk_buff *skb)
842 {
843 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
844 
845 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846 
847 	if (rp->status)
848 		return;
849 
850 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
851 	hdev->le_pkts = rp->le_max_pkt;
852 
853 	hdev->le_cnt = hdev->le_pkts;
854 
855 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
856 }
857 
858 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
859 					  struct sk_buff *skb)
860 {
861 	struct hci_rp_le_read_local_features *rp = (void *) skb->data;
862 
863 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
864 
865 	if (!rp->status)
866 		memcpy(hdev->le_features, rp->features, 8);
867 }
868 
869 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
870 					struct sk_buff *skb)
871 {
872 	struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
873 
874 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875 
876 	if (!rp->status)
877 		hdev->adv_tx_power = rp->tx_power;
878 }
879 
880 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
881 {
882 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
883 
884 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
885 
886 	hci_dev_lock(hdev);
887 
888 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
889 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
890 						 rp->status);
891 
892 	hci_dev_unlock(hdev);
893 }
894 
895 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
896 					  struct sk_buff *skb)
897 {
898 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
899 
900 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
901 
902 	hci_dev_lock(hdev);
903 
904 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
905 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
906 						     ACL_LINK, 0, rp->status);
907 
908 	hci_dev_unlock(hdev);
909 }
910 
911 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
912 {
913 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
914 
915 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
916 
917 	hci_dev_lock(hdev);
918 
919 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
920 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
921 						 0, rp->status);
922 
923 	hci_dev_unlock(hdev);
924 }
925 
926 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
927 					  struct sk_buff *skb)
928 {
929 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930 
931 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932 
933 	hci_dev_lock(hdev);
934 
935 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
937 						     ACL_LINK, 0, rp->status);
938 
939 	hci_dev_unlock(hdev);
940 }
941 
942 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
943 				       struct sk_buff *skb)
944 {
945 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
946 
947 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948 
949 	hci_dev_lock(hdev);
950 	mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
951 					  NULL, NULL, rp->status);
952 	hci_dev_unlock(hdev);
953 }
954 
955 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
956 					   struct sk_buff *skb)
957 {
958 	struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
959 
960 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961 
962 	hci_dev_lock(hdev);
963 	mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
964 					  rp->hash256, rp->randomizer256,
965 					  rp->status);
966 	hci_dev_unlock(hdev);
967 }
968 
969 
970 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
971 {
972 	__u8 status = *((__u8 *) skb->data);
973 	bdaddr_t *sent;
974 
975 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
976 
977 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
978 	if (!sent)
979 		return;
980 
981 	hci_dev_lock(hdev);
982 
983 	if (!status)
984 		bacpy(&hdev->random_addr, sent);
985 
986 	hci_dev_unlock(hdev);
987 }
988 
989 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
990 {
991 	__u8 *sent, status = *((__u8 *) skb->data);
992 
993 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
994 
995 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
996 	if (!sent)
997 		return;
998 
999 	if (status)
1000 		return;
1001 
1002 	hci_dev_lock(hdev);
1003 
1004 	/* If we're doing connection initation as peripheral. Set a
1005 	 * timeout in case something goes wrong.
1006 	 */
1007 	if (*sent) {
1008 		struct hci_conn *conn;
1009 
1010 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1011 		if (conn)
1012 			queue_delayed_work(hdev->workqueue,
1013 					   &conn->le_conn_timeout,
1014 					   HCI_LE_CONN_TIMEOUT);
1015 	}
1016 
1017 	mgmt_advertising(hdev, *sent);
1018 
1019 	hci_dev_unlock(hdev);
1020 }
1021 
1022 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1023 {
1024 	struct hci_cp_le_set_scan_param *cp;
1025 	__u8 status = *((__u8 *) skb->data);
1026 
1027 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1028 
1029 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1030 	if (!cp)
1031 		return;
1032 
1033 	hci_dev_lock(hdev);
1034 
1035 	if (!status)
1036 		hdev->le_scan_type = cp->type;
1037 
1038 	hci_dev_unlock(hdev);
1039 }
1040 
1041 static bool has_pending_adv_report(struct hci_dev *hdev)
1042 {
1043 	struct discovery_state *d = &hdev->discovery;
1044 
1045 	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1046 }
1047 
1048 static void clear_pending_adv_report(struct hci_dev *hdev)
1049 {
1050 	struct discovery_state *d = &hdev->discovery;
1051 
1052 	bacpy(&d->last_adv_addr, BDADDR_ANY);
1053 	d->last_adv_data_len = 0;
1054 }
1055 
1056 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1057 				     u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1058 {
1059 	struct discovery_state *d = &hdev->discovery;
1060 
1061 	bacpy(&d->last_adv_addr, bdaddr);
1062 	d->last_adv_addr_type = bdaddr_type;
1063 	d->last_adv_rssi = rssi;
1064 	memcpy(d->last_adv_data, data, len);
1065 	d->last_adv_data_len = len;
1066 }
1067 
1068 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1069 				      struct sk_buff *skb)
1070 {
1071 	struct hci_cp_le_set_scan_enable *cp;
1072 	__u8 status = *((__u8 *) skb->data);
1073 
1074 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1075 
1076 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1077 	if (!cp)
1078 		return;
1079 
1080 	if (status)
1081 		return;
1082 
1083 	switch (cp->enable) {
1084 	case LE_SCAN_ENABLE:
1085 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1086 		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1087 			clear_pending_adv_report(hdev);
1088 		break;
1089 
1090 	case LE_SCAN_DISABLE:
1091 		/* We do this here instead of when setting DISCOVERY_STOPPED
1092 		 * since the latter would potentially require waiting for
1093 		 * inquiry to stop too.
1094 		 */
1095 		if (has_pending_adv_report(hdev)) {
1096 			struct discovery_state *d = &hdev->discovery;
1097 
1098 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1099 					  d->last_adv_addr_type, NULL,
1100 					  d->last_adv_rssi, 0, 1,
1101 					  d->last_adv_data,
1102 					  d->last_adv_data_len, NULL, 0);
1103 		}
1104 
1105 		/* Cancel this timer so that we don't try to disable scanning
1106 		 * when it's already disabled.
1107 		 */
1108 		cancel_delayed_work(&hdev->le_scan_disable);
1109 
1110 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1111 		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1112 		 * interrupted scanning due to a connect request. Mark
1113 		 * therefore discovery as stopped.
1114 		 */
1115 		if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1116 				       &hdev->dev_flags))
1117 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1118 		break;
1119 
1120 	default:
1121 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1122 		break;
1123 	}
1124 }
1125 
1126 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1127 					   struct sk_buff *skb)
1128 {
1129 	struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1130 
1131 	BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1132 
1133 	if (!rp->status)
1134 		hdev->le_white_list_size = rp->size;
1135 }
1136 
1137 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1138 				       struct sk_buff *skb)
1139 {
1140 	__u8 status = *((__u8 *) skb->data);
1141 
1142 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1143 
1144 	if (!status)
1145 		hci_white_list_clear(hdev);
1146 }
1147 
1148 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1149 					struct sk_buff *skb)
1150 {
1151 	struct hci_cp_le_add_to_white_list *sent;
1152 	__u8 status = *((__u8 *) skb->data);
1153 
1154 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1155 
1156 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1157 	if (!sent)
1158 		return;
1159 
1160 	if (!status)
1161 		hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1162 }
1163 
1164 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1165 					  struct sk_buff *skb)
1166 {
1167 	struct hci_cp_le_del_from_white_list *sent;
1168 	__u8 status = *((__u8 *) skb->data);
1169 
1170 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1171 
1172 	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1173 	if (!sent)
1174 		return;
1175 
1176 	if (!status)
1177 		hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1178 }
1179 
1180 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1181 					    struct sk_buff *skb)
1182 {
1183 	struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1184 
1185 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1186 
1187 	if (!rp->status)
1188 		memcpy(hdev->le_states, rp->le_states, 8);
1189 }
1190 
1191 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1192 					   struct sk_buff *skb)
1193 {
1194 	struct hci_cp_write_le_host_supported *sent;
1195 	__u8 status = *((__u8 *) skb->data);
1196 
1197 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1198 
1199 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1200 	if (!sent)
1201 		return;
1202 
1203 	if (!status) {
1204 		if (sent->le) {
1205 			hdev->features[1][0] |= LMP_HOST_LE;
1206 			set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1207 		} else {
1208 			hdev->features[1][0] &= ~LMP_HOST_LE;
1209 			clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1210 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1211 		}
1212 
1213 		if (sent->simul)
1214 			hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1215 		else
1216 			hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1217 	}
1218 }
1219 
1220 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1221 {
1222 	struct hci_cp_le_set_adv_param *cp;
1223 	u8 status = *((u8 *) skb->data);
1224 
1225 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1226 
1227 	if (status)
1228 		return;
1229 
1230 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1231 	if (!cp)
1232 		return;
1233 
1234 	hci_dev_lock(hdev);
1235 	hdev->adv_addr_type = cp->own_address_type;
1236 	hci_dev_unlock(hdev);
1237 }
1238 
1239 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1240 					  struct sk_buff *skb)
1241 {
1242 	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1243 
1244 	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1245 	       hdev->name, rp->status, rp->phy_handle);
1246 
1247 	if (rp->status)
1248 		return;
1249 
1250 	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1251 }
1252 
1253 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1254 {
1255 	struct hci_rp_read_rssi *rp = (void *) skb->data;
1256 	struct hci_conn *conn;
1257 
1258 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1259 
1260 	if (rp->status)
1261 		return;
1262 
1263 	hci_dev_lock(hdev);
1264 
1265 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1266 	if (conn)
1267 		conn->rssi = rp->rssi;
1268 
1269 	hci_dev_unlock(hdev);
1270 }
1271 
1272 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1273 {
1274 	struct hci_cp_read_tx_power *sent;
1275 	struct hci_rp_read_tx_power *rp = (void *) skb->data;
1276 	struct hci_conn *conn;
1277 
1278 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1279 
1280 	if (rp->status)
1281 		return;
1282 
1283 	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1284 	if (!sent)
1285 		return;
1286 
1287 	hci_dev_lock(hdev);
1288 
1289 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1290 	if (!conn)
1291 		goto unlock;
1292 
1293 	switch (sent->type) {
1294 	case 0x00:
1295 		conn->tx_power = rp->tx_power;
1296 		break;
1297 	case 0x01:
1298 		conn->max_tx_power = rp->tx_power;
1299 		break;
1300 	}
1301 
1302 unlock:
1303 	hci_dev_unlock(hdev);
1304 }
1305 
1306 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1307 {
1308 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1309 
1310 	if (status) {
1311 		hci_conn_check_pending(hdev);
1312 		return;
1313 	}
1314 
1315 	set_bit(HCI_INQUIRY, &hdev->flags);
1316 }
1317 
1318 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1319 {
1320 	struct hci_cp_create_conn *cp;
1321 	struct hci_conn *conn;
1322 
1323 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1324 
1325 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1326 	if (!cp)
1327 		return;
1328 
1329 	hci_dev_lock(hdev);
1330 
1331 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1332 
1333 	BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1334 
1335 	if (status) {
1336 		if (conn && conn->state == BT_CONNECT) {
1337 			if (status != 0x0c || conn->attempt > 2) {
1338 				conn->state = BT_CLOSED;
1339 				hci_proto_connect_cfm(conn, status);
1340 				hci_conn_del(conn);
1341 			} else
1342 				conn->state = BT_CONNECT2;
1343 		}
1344 	} else {
1345 		if (!conn) {
1346 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1347 			if (conn) {
1348 				conn->out = true;
1349 				conn->link_mode |= HCI_LM_MASTER;
1350 			} else
1351 				BT_ERR("No memory for new connection");
1352 		}
1353 	}
1354 
1355 	hci_dev_unlock(hdev);
1356 }
1357 
1358 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1359 {
1360 	struct hci_cp_add_sco *cp;
1361 	struct hci_conn *acl, *sco;
1362 	__u16 handle;
1363 
1364 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1365 
1366 	if (!status)
1367 		return;
1368 
1369 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1370 	if (!cp)
1371 		return;
1372 
1373 	handle = __le16_to_cpu(cp->handle);
1374 
1375 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1376 
1377 	hci_dev_lock(hdev);
1378 
1379 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1380 	if (acl) {
1381 		sco = acl->link;
1382 		if (sco) {
1383 			sco->state = BT_CLOSED;
1384 
1385 			hci_proto_connect_cfm(sco, status);
1386 			hci_conn_del(sco);
1387 		}
1388 	}
1389 
1390 	hci_dev_unlock(hdev);
1391 }
1392 
1393 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1394 {
1395 	struct hci_cp_auth_requested *cp;
1396 	struct hci_conn *conn;
1397 
1398 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1399 
1400 	if (!status)
1401 		return;
1402 
1403 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1404 	if (!cp)
1405 		return;
1406 
1407 	hci_dev_lock(hdev);
1408 
1409 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1410 	if (conn) {
1411 		if (conn->state == BT_CONFIG) {
1412 			hci_proto_connect_cfm(conn, status);
1413 			hci_conn_drop(conn);
1414 		}
1415 	}
1416 
1417 	hci_dev_unlock(hdev);
1418 }
1419 
1420 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1421 {
1422 	struct hci_cp_set_conn_encrypt *cp;
1423 	struct hci_conn *conn;
1424 
1425 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1426 
1427 	if (!status)
1428 		return;
1429 
1430 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1431 	if (!cp)
1432 		return;
1433 
1434 	hci_dev_lock(hdev);
1435 
1436 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1437 	if (conn) {
1438 		if (conn->state == BT_CONFIG) {
1439 			hci_proto_connect_cfm(conn, status);
1440 			hci_conn_drop(conn);
1441 		}
1442 	}
1443 
1444 	hci_dev_unlock(hdev);
1445 }
1446 
1447 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1448 				    struct hci_conn *conn)
1449 {
1450 	if (conn->state != BT_CONFIG || !conn->out)
1451 		return 0;
1452 
1453 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1454 		return 0;
1455 
1456 	/* Only request authentication for SSP connections or non-SSP
1457 	 * devices with sec_level MEDIUM or HIGH or if MITM protection
1458 	 * is requested.
1459 	 */
1460 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1461 	    conn->pending_sec_level != BT_SECURITY_FIPS &&
1462 	    conn->pending_sec_level != BT_SECURITY_HIGH &&
1463 	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
1464 		return 0;
1465 
1466 	return 1;
1467 }
1468 
1469 static int hci_resolve_name(struct hci_dev *hdev,
1470 				   struct inquiry_entry *e)
1471 {
1472 	struct hci_cp_remote_name_req cp;
1473 
1474 	memset(&cp, 0, sizeof(cp));
1475 
1476 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1477 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1478 	cp.pscan_mode = e->data.pscan_mode;
1479 	cp.clock_offset = e->data.clock_offset;
1480 
1481 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1482 }
1483 
1484 static bool hci_resolve_next_name(struct hci_dev *hdev)
1485 {
1486 	struct discovery_state *discov = &hdev->discovery;
1487 	struct inquiry_entry *e;
1488 
1489 	if (list_empty(&discov->resolve))
1490 		return false;
1491 
1492 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1493 	if (!e)
1494 		return false;
1495 
1496 	if (hci_resolve_name(hdev, e) == 0) {
1497 		e->name_state = NAME_PENDING;
1498 		return true;
1499 	}
1500 
1501 	return false;
1502 }
1503 
1504 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1505 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1506 {
1507 	struct discovery_state *discov = &hdev->discovery;
1508 	struct inquiry_entry *e;
1509 
1510 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1511 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1512 				      name_len, conn->dev_class);
1513 
1514 	if (discov->state == DISCOVERY_STOPPED)
1515 		return;
1516 
1517 	if (discov->state == DISCOVERY_STOPPING)
1518 		goto discov_complete;
1519 
1520 	if (discov->state != DISCOVERY_RESOLVING)
1521 		return;
1522 
1523 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1524 	/* If the device was not found in a list of found devices names of which
1525 	 * are pending. there is no need to continue resolving a next name as it
1526 	 * will be done upon receiving another Remote Name Request Complete
1527 	 * Event */
1528 	if (!e)
1529 		return;
1530 
1531 	list_del(&e->list);
1532 	if (name) {
1533 		e->name_state = NAME_KNOWN;
1534 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1535 				 e->data.rssi, name, name_len);
1536 	} else {
1537 		e->name_state = NAME_NOT_KNOWN;
1538 	}
1539 
1540 	if (hci_resolve_next_name(hdev))
1541 		return;
1542 
1543 discov_complete:
1544 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1545 }
1546 
1547 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1548 {
1549 	struct hci_cp_remote_name_req *cp;
1550 	struct hci_conn *conn;
1551 
1552 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1553 
1554 	/* If successful wait for the name req complete event before
1555 	 * checking for the need to do authentication */
1556 	if (!status)
1557 		return;
1558 
1559 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1560 	if (!cp)
1561 		return;
1562 
1563 	hci_dev_lock(hdev);
1564 
1565 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1566 
1567 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1568 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1569 
1570 	if (!conn)
1571 		goto unlock;
1572 
1573 	if (!hci_outgoing_auth_needed(hdev, conn))
1574 		goto unlock;
1575 
1576 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1577 		struct hci_cp_auth_requested auth_cp;
1578 
1579 		auth_cp.handle = __cpu_to_le16(conn->handle);
1580 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1581 			     sizeof(auth_cp), &auth_cp);
1582 	}
1583 
1584 unlock:
1585 	hci_dev_unlock(hdev);
1586 }
1587 
1588 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1589 {
1590 	struct hci_cp_read_remote_features *cp;
1591 	struct hci_conn *conn;
1592 
1593 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1594 
1595 	if (!status)
1596 		return;
1597 
1598 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1599 	if (!cp)
1600 		return;
1601 
1602 	hci_dev_lock(hdev);
1603 
1604 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1605 	if (conn) {
1606 		if (conn->state == BT_CONFIG) {
1607 			hci_proto_connect_cfm(conn, status);
1608 			hci_conn_drop(conn);
1609 		}
1610 	}
1611 
1612 	hci_dev_unlock(hdev);
1613 }
1614 
1615 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1616 {
1617 	struct hci_cp_read_remote_ext_features *cp;
1618 	struct hci_conn *conn;
1619 
1620 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1621 
1622 	if (!status)
1623 		return;
1624 
1625 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1626 	if (!cp)
1627 		return;
1628 
1629 	hci_dev_lock(hdev);
1630 
1631 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1632 	if (conn) {
1633 		if (conn->state == BT_CONFIG) {
1634 			hci_proto_connect_cfm(conn, status);
1635 			hci_conn_drop(conn);
1636 		}
1637 	}
1638 
1639 	hci_dev_unlock(hdev);
1640 }
1641 
1642 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1643 {
1644 	struct hci_cp_setup_sync_conn *cp;
1645 	struct hci_conn *acl, *sco;
1646 	__u16 handle;
1647 
1648 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1649 
1650 	if (!status)
1651 		return;
1652 
1653 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1654 	if (!cp)
1655 		return;
1656 
1657 	handle = __le16_to_cpu(cp->handle);
1658 
1659 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1660 
1661 	hci_dev_lock(hdev);
1662 
1663 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1664 	if (acl) {
1665 		sco = acl->link;
1666 		if (sco) {
1667 			sco->state = BT_CLOSED;
1668 
1669 			hci_proto_connect_cfm(sco, status);
1670 			hci_conn_del(sco);
1671 		}
1672 	}
1673 
1674 	hci_dev_unlock(hdev);
1675 }
1676 
1677 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1678 {
1679 	struct hci_cp_sniff_mode *cp;
1680 	struct hci_conn *conn;
1681 
1682 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1683 
1684 	if (!status)
1685 		return;
1686 
1687 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1688 	if (!cp)
1689 		return;
1690 
1691 	hci_dev_lock(hdev);
1692 
1693 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1694 	if (conn) {
1695 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1696 
1697 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1698 			hci_sco_setup(conn, status);
1699 	}
1700 
1701 	hci_dev_unlock(hdev);
1702 }
1703 
1704 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1705 {
1706 	struct hci_cp_exit_sniff_mode *cp;
1707 	struct hci_conn *conn;
1708 
1709 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1710 
1711 	if (!status)
1712 		return;
1713 
1714 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1715 	if (!cp)
1716 		return;
1717 
1718 	hci_dev_lock(hdev);
1719 
1720 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1721 	if (conn) {
1722 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1723 
1724 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1725 			hci_sco_setup(conn, status);
1726 	}
1727 
1728 	hci_dev_unlock(hdev);
1729 }
1730 
1731 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1732 {
1733 	struct hci_cp_disconnect *cp;
1734 	struct hci_conn *conn;
1735 
1736 	if (!status)
1737 		return;
1738 
1739 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1740 	if (!cp)
1741 		return;
1742 
1743 	hci_dev_lock(hdev);
1744 
1745 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1746 	if (conn)
1747 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1748 				       conn->dst_type, status);
1749 
1750 	hci_dev_unlock(hdev);
1751 }
1752 
1753 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1754 {
1755 	struct hci_cp_create_phy_link *cp;
1756 
1757 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1758 
1759 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1760 	if (!cp)
1761 		return;
1762 
1763 	hci_dev_lock(hdev);
1764 
1765 	if (status) {
1766 		struct hci_conn *hcon;
1767 
1768 		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1769 		if (hcon)
1770 			hci_conn_del(hcon);
1771 	} else {
1772 		amp_write_remote_assoc(hdev, cp->phy_handle);
1773 	}
1774 
1775 	hci_dev_unlock(hdev);
1776 }
1777 
1778 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1779 {
1780 	struct hci_cp_accept_phy_link *cp;
1781 
1782 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1783 
1784 	if (status)
1785 		return;
1786 
1787 	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1788 	if (!cp)
1789 		return;
1790 
1791 	amp_write_remote_assoc(hdev, cp->phy_handle);
1792 }
1793 
1794 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1795 {
1796 	struct hci_cp_le_create_conn *cp;
1797 	struct hci_conn *conn;
1798 
1799 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1800 
1801 	/* All connection failure handling is taken care of by the
1802 	 * hci_le_conn_failed function which is triggered by the HCI
1803 	 * request completion callbacks used for connecting.
1804 	 */
1805 	if (status)
1806 		return;
1807 
1808 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1809 	if (!cp)
1810 		return;
1811 
1812 	hci_dev_lock(hdev);
1813 
1814 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1815 	if (!conn)
1816 		goto unlock;
1817 
1818 	/* Store the initiator and responder address information which
1819 	 * is needed for SMP. These values will not change during the
1820 	 * lifetime of the connection.
1821 	 */
1822 	conn->init_addr_type = cp->own_address_type;
1823 	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1824 		bacpy(&conn->init_addr, &hdev->random_addr);
1825 	else
1826 		bacpy(&conn->init_addr, &hdev->bdaddr);
1827 
1828 	conn->resp_addr_type = cp->peer_addr_type;
1829 	bacpy(&conn->resp_addr, &cp->peer_addr);
1830 
1831 	/* We don't want the connection attempt to stick around
1832 	 * indefinitely since LE doesn't have a page timeout concept
1833 	 * like BR/EDR. Set a timer for any connection that doesn't use
1834 	 * the white list for connecting.
1835 	 */
1836 	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1837 		queue_delayed_work(conn->hdev->workqueue,
1838 				   &conn->le_conn_timeout,
1839 				   HCI_LE_CONN_TIMEOUT);
1840 
1841 unlock:
1842 	hci_dev_unlock(hdev);
1843 }
1844 
1845 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1846 {
1847 	struct hci_cp_le_start_enc *cp;
1848 	struct hci_conn *conn;
1849 
1850 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1851 
1852 	if (!status)
1853 		return;
1854 
1855 	hci_dev_lock(hdev);
1856 
1857 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1858 	if (!cp)
1859 		goto unlock;
1860 
1861 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1862 	if (!conn)
1863 		goto unlock;
1864 
1865 	if (conn->state != BT_CONNECTED)
1866 		goto unlock;
1867 
1868 	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1869 	hci_conn_drop(conn);
1870 
1871 unlock:
1872 	hci_dev_unlock(hdev);
1873 }
1874 
1875 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1876 {
1877 	__u8 status = *((__u8 *) skb->data);
1878 	struct discovery_state *discov = &hdev->discovery;
1879 	struct inquiry_entry *e;
1880 
1881 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1882 
1883 	hci_conn_check_pending(hdev);
1884 
1885 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1886 		return;
1887 
1888 	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1889 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
1890 
1891 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1892 		return;
1893 
1894 	hci_dev_lock(hdev);
1895 
1896 	if (discov->state != DISCOVERY_FINDING)
1897 		goto unlock;
1898 
1899 	if (list_empty(&discov->resolve)) {
1900 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1901 		goto unlock;
1902 	}
1903 
1904 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1905 	if (e && hci_resolve_name(hdev, e) == 0) {
1906 		e->name_state = NAME_PENDING;
1907 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1908 	} else {
1909 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1910 	}
1911 
1912 unlock:
1913 	hci_dev_unlock(hdev);
1914 }
1915 
1916 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1917 {
1918 	struct inquiry_data data;
1919 	struct inquiry_info *info = (void *) (skb->data + 1);
1920 	int num_rsp = *((__u8 *) skb->data);
1921 
1922 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1923 
1924 	if (!num_rsp)
1925 		return;
1926 
1927 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1928 		return;
1929 
1930 	hci_dev_lock(hdev);
1931 
1932 	for (; num_rsp; num_rsp--, info++) {
1933 		bool name_known, ssp;
1934 
1935 		bacpy(&data.bdaddr, &info->bdaddr);
1936 		data.pscan_rep_mode	= info->pscan_rep_mode;
1937 		data.pscan_period_mode	= info->pscan_period_mode;
1938 		data.pscan_mode		= info->pscan_mode;
1939 		memcpy(data.dev_class, info->dev_class, 3);
1940 		data.clock_offset	= info->clock_offset;
1941 		data.rssi		= 0x00;
1942 		data.ssp_mode		= 0x00;
1943 
1944 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1945 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1946 				  info->dev_class, 0, !name_known, ssp, NULL,
1947 				  0, NULL, 0);
1948 	}
1949 
1950 	hci_dev_unlock(hdev);
1951 }
1952 
1953 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1954 {
1955 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1956 	struct hci_conn *conn;
1957 
1958 	BT_DBG("%s", hdev->name);
1959 
1960 	hci_dev_lock(hdev);
1961 
1962 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1963 	if (!conn) {
1964 		if (ev->link_type != SCO_LINK)
1965 			goto unlock;
1966 
1967 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1968 		if (!conn)
1969 			goto unlock;
1970 
1971 		conn->type = SCO_LINK;
1972 	}
1973 
1974 	if (!ev->status) {
1975 		conn->handle = __le16_to_cpu(ev->handle);
1976 
1977 		if (conn->type == ACL_LINK) {
1978 			conn->state = BT_CONFIG;
1979 			hci_conn_hold(conn);
1980 
1981 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1982 			    !hci_find_link_key(hdev, &ev->bdaddr))
1983 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1984 			else
1985 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1986 		} else
1987 			conn->state = BT_CONNECTED;
1988 
1989 		hci_conn_add_sysfs(conn);
1990 
1991 		if (test_bit(HCI_AUTH, &hdev->flags))
1992 			conn->link_mode |= HCI_LM_AUTH;
1993 
1994 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1995 			conn->link_mode |= HCI_LM_ENCRYPT;
1996 
1997 		/* Get remote features */
1998 		if (conn->type == ACL_LINK) {
1999 			struct hci_cp_read_remote_features cp;
2000 			cp.handle = ev->handle;
2001 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2002 				     sizeof(cp), &cp);
2003 		}
2004 
2005 		/* Set packet type for incoming connection */
2006 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2007 			struct hci_cp_change_conn_ptype cp;
2008 			cp.handle = ev->handle;
2009 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2010 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2011 				     &cp);
2012 		}
2013 	} else {
2014 		conn->state = BT_CLOSED;
2015 		if (conn->type == ACL_LINK)
2016 			mgmt_connect_failed(hdev, &conn->dst, conn->type,
2017 					    conn->dst_type, ev->status);
2018 	}
2019 
2020 	if (conn->type == ACL_LINK)
2021 		hci_sco_setup(conn, ev->status);
2022 
2023 	if (ev->status) {
2024 		hci_proto_connect_cfm(conn, ev->status);
2025 		hci_conn_del(conn);
2026 	} else if (ev->link_type != ACL_LINK)
2027 		hci_proto_connect_cfm(conn, ev->status);
2028 
2029 unlock:
2030 	hci_dev_unlock(hdev);
2031 
2032 	hci_conn_check_pending(hdev);
2033 }
2034 
2035 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2036 {
2037 	struct hci_ev_conn_request *ev = (void *) skb->data;
2038 	int mask = hdev->link_mode;
2039 	__u8 flags = 0;
2040 
2041 	BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2042 	       ev->link_type);
2043 
2044 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2045 				      &flags);
2046 
2047 	if ((mask & HCI_LM_ACCEPT) &&
2048 	    !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2049 		/* Connection accepted */
2050 		struct inquiry_entry *ie;
2051 		struct hci_conn *conn;
2052 
2053 		hci_dev_lock(hdev);
2054 
2055 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2056 		if (ie)
2057 			memcpy(ie->data.dev_class, ev->dev_class, 3);
2058 
2059 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2060 					       &ev->bdaddr);
2061 		if (!conn) {
2062 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2063 			if (!conn) {
2064 				BT_ERR("No memory for new connection");
2065 				hci_dev_unlock(hdev);
2066 				return;
2067 			}
2068 		}
2069 
2070 		memcpy(conn->dev_class, ev->dev_class, 3);
2071 
2072 		hci_dev_unlock(hdev);
2073 
2074 		if (ev->link_type == ACL_LINK ||
2075 		    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2076 			struct hci_cp_accept_conn_req cp;
2077 			conn->state = BT_CONNECT;
2078 
2079 			bacpy(&cp.bdaddr, &ev->bdaddr);
2080 
2081 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2082 				cp.role = 0x00; /* Become master */
2083 			else
2084 				cp.role = 0x01; /* Remain slave */
2085 
2086 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2087 				     &cp);
2088 		} else if (!(flags & HCI_PROTO_DEFER)) {
2089 			struct hci_cp_accept_sync_conn_req cp;
2090 			conn->state = BT_CONNECT;
2091 
2092 			bacpy(&cp.bdaddr, &ev->bdaddr);
2093 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
2094 
2095 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
2096 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
2097 			cp.max_latency    = cpu_to_le16(0xffff);
2098 			cp.content_format = cpu_to_le16(hdev->voice_setting);
2099 			cp.retrans_effort = 0xff;
2100 
2101 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2102 				     sizeof(cp), &cp);
2103 		} else {
2104 			conn->state = BT_CONNECT2;
2105 			hci_proto_connect_cfm(conn, 0);
2106 		}
2107 	} else {
2108 		/* Connection rejected */
2109 		struct hci_cp_reject_conn_req cp;
2110 
2111 		bacpy(&cp.bdaddr, &ev->bdaddr);
2112 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2113 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2114 	}
2115 }
2116 
2117 static u8 hci_to_mgmt_reason(u8 err)
2118 {
2119 	switch (err) {
2120 	case HCI_ERROR_CONNECTION_TIMEOUT:
2121 		return MGMT_DEV_DISCONN_TIMEOUT;
2122 	case HCI_ERROR_REMOTE_USER_TERM:
2123 	case HCI_ERROR_REMOTE_LOW_RESOURCES:
2124 	case HCI_ERROR_REMOTE_POWER_OFF:
2125 		return MGMT_DEV_DISCONN_REMOTE;
2126 	case HCI_ERROR_LOCAL_HOST_TERM:
2127 		return MGMT_DEV_DISCONN_LOCAL_HOST;
2128 	default:
2129 		return MGMT_DEV_DISCONN_UNKNOWN;
2130 	}
2131 }
2132 
2133 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2134 {
2135 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
2136 	u8 reason = hci_to_mgmt_reason(ev->reason);
2137 	struct hci_conn_params *params;
2138 	struct hci_conn *conn;
2139 	bool mgmt_connected;
2140 	u8 type;
2141 
2142 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2143 
2144 	hci_dev_lock(hdev);
2145 
2146 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2147 	if (!conn)
2148 		goto unlock;
2149 
2150 	if (ev->status) {
2151 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2152 				       conn->dst_type, ev->status);
2153 		goto unlock;
2154 	}
2155 
2156 	conn->state = BT_CLOSED;
2157 
2158 	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2159 	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2160 				reason, mgmt_connected);
2161 
2162 	if (conn->type == ACL_LINK && conn->flush_key)
2163 		hci_remove_link_key(hdev, &conn->dst);
2164 
2165 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2166 	if (params) {
2167 		switch (params->auto_connect) {
2168 		case HCI_AUTO_CONN_LINK_LOSS:
2169 			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2170 				break;
2171 			/* Fall through */
2172 
2173 		case HCI_AUTO_CONN_ALWAYS:
2174 			hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2175 			break;
2176 
2177 		default:
2178 			break;
2179 		}
2180 	}
2181 
2182 	type = conn->type;
2183 
2184 	hci_proto_disconn_cfm(conn, ev->reason);
2185 	hci_conn_del(conn);
2186 
2187 	/* Re-enable advertising if necessary, since it might
2188 	 * have been disabled by the connection. From the
2189 	 * HCI_LE_Set_Advertise_Enable command description in
2190 	 * the core specification (v4.0):
2191 	 * "The Controller shall continue advertising until the Host
2192 	 * issues an LE_Set_Advertise_Enable command with
2193 	 * Advertising_Enable set to 0x00 (Advertising is disabled)
2194 	 * or until a connection is created or until the Advertising
2195 	 * is timed out due to Directed Advertising."
2196 	 */
2197 	if (type == LE_LINK)
2198 		mgmt_reenable_advertising(hdev);
2199 
2200 unlock:
2201 	hci_dev_unlock(hdev);
2202 }
2203 
2204 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2205 {
2206 	struct hci_ev_auth_complete *ev = (void *) skb->data;
2207 	struct hci_conn *conn;
2208 
2209 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2210 
2211 	hci_dev_lock(hdev);
2212 
2213 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2214 	if (!conn)
2215 		goto unlock;
2216 
2217 	if (!ev->status) {
2218 		if (!hci_conn_ssp_enabled(conn) &&
2219 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2220 			BT_INFO("re-auth of legacy device is not possible.");
2221 		} else {
2222 			conn->link_mode |= HCI_LM_AUTH;
2223 			conn->sec_level = conn->pending_sec_level;
2224 		}
2225 	} else {
2226 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2227 				 ev->status);
2228 	}
2229 
2230 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2231 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2232 
2233 	if (conn->state == BT_CONFIG) {
2234 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
2235 			struct hci_cp_set_conn_encrypt cp;
2236 			cp.handle  = ev->handle;
2237 			cp.encrypt = 0x01;
2238 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2239 				     &cp);
2240 		} else {
2241 			conn->state = BT_CONNECTED;
2242 			hci_proto_connect_cfm(conn, ev->status);
2243 			hci_conn_drop(conn);
2244 		}
2245 	} else {
2246 		hci_auth_cfm(conn, ev->status);
2247 
2248 		hci_conn_hold(conn);
2249 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2250 		hci_conn_drop(conn);
2251 	}
2252 
2253 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2254 		if (!ev->status) {
2255 			struct hci_cp_set_conn_encrypt cp;
2256 			cp.handle  = ev->handle;
2257 			cp.encrypt = 0x01;
2258 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2259 				     &cp);
2260 		} else {
2261 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2262 			hci_encrypt_cfm(conn, ev->status, 0x00);
2263 		}
2264 	}
2265 
2266 unlock:
2267 	hci_dev_unlock(hdev);
2268 }
2269 
2270 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2271 {
2272 	struct hci_ev_remote_name *ev = (void *) skb->data;
2273 	struct hci_conn *conn;
2274 
2275 	BT_DBG("%s", hdev->name);
2276 
2277 	hci_conn_check_pending(hdev);
2278 
2279 	hci_dev_lock(hdev);
2280 
2281 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2282 
2283 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2284 		goto check_auth;
2285 
2286 	if (ev->status == 0)
2287 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2288 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2289 	else
2290 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2291 
2292 check_auth:
2293 	if (!conn)
2294 		goto unlock;
2295 
2296 	if (!hci_outgoing_auth_needed(hdev, conn))
2297 		goto unlock;
2298 
2299 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2300 		struct hci_cp_auth_requested cp;
2301 		cp.handle = __cpu_to_le16(conn->handle);
2302 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2303 	}
2304 
2305 unlock:
2306 	hci_dev_unlock(hdev);
2307 }
2308 
2309 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2310 {
2311 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2312 	struct hci_conn *conn;
2313 
2314 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2315 
2316 	hci_dev_lock(hdev);
2317 
2318 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2319 	if (!conn)
2320 		goto unlock;
2321 
2322 	if (!ev->status) {
2323 		if (ev->encrypt) {
2324 			/* Encryption implies authentication */
2325 			conn->link_mode |= HCI_LM_AUTH;
2326 			conn->link_mode |= HCI_LM_ENCRYPT;
2327 			conn->sec_level = conn->pending_sec_level;
2328 
2329 			/* P-256 authentication key implies FIPS */
2330 			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2331 				conn->link_mode |= HCI_LM_FIPS;
2332 
2333 			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2334 			    conn->type == LE_LINK)
2335 				set_bit(HCI_CONN_AES_CCM, &conn->flags);
2336 		} else {
2337 			conn->link_mode &= ~HCI_LM_ENCRYPT;
2338 			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2339 		}
2340 	}
2341 
2342 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2343 
2344 	if (ev->status && conn->state == BT_CONNECTED) {
2345 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2346 		hci_conn_drop(conn);
2347 		goto unlock;
2348 	}
2349 
2350 	if (conn->state == BT_CONFIG) {
2351 		if (!ev->status)
2352 			conn->state = BT_CONNECTED;
2353 
2354 		/* In Secure Connections Only mode, do not allow any
2355 		 * connections that are not encrypted with AES-CCM
2356 		 * using a P-256 authenticated combination key.
2357 		 */
2358 		if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2359 		    (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2360 		     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2361 			hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2362 			hci_conn_drop(conn);
2363 			goto unlock;
2364 		}
2365 
2366 		hci_proto_connect_cfm(conn, ev->status);
2367 		hci_conn_drop(conn);
2368 	} else
2369 		hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2370 
2371 unlock:
2372 	hci_dev_unlock(hdev);
2373 }
2374 
2375 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2376 					     struct sk_buff *skb)
2377 {
2378 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2379 	struct hci_conn *conn;
2380 
2381 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2382 
2383 	hci_dev_lock(hdev);
2384 
2385 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2386 	if (conn) {
2387 		if (!ev->status)
2388 			conn->link_mode |= HCI_LM_SECURE;
2389 
2390 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2391 
2392 		hci_key_change_cfm(conn, ev->status);
2393 	}
2394 
2395 	hci_dev_unlock(hdev);
2396 }
2397 
2398 static void hci_remote_features_evt(struct hci_dev *hdev,
2399 				    struct sk_buff *skb)
2400 {
2401 	struct hci_ev_remote_features *ev = (void *) skb->data;
2402 	struct hci_conn *conn;
2403 
2404 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2405 
2406 	hci_dev_lock(hdev);
2407 
2408 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2409 	if (!conn)
2410 		goto unlock;
2411 
2412 	if (!ev->status)
2413 		memcpy(conn->features[0], ev->features, 8);
2414 
2415 	if (conn->state != BT_CONFIG)
2416 		goto unlock;
2417 
2418 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2419 		struct hci_cp_read_remote_ext_features cp;
2420 		cp.handle = ev->handle;
2421 		cp.page = 0x01;
2422 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2423 			     sizeof(cp), &cp);
2424 		goto unlock;
2425 	}
2426 
2427 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2428 		struct hci_cp_remote_name_req cp;
2429 		memset(&cp, 0, sizeof(cp));
2430 		bacpy(&cp.bdaddr, &conn->dst);
2431 		cp.pscan_rep_mode = 0x02;
2432 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2433 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2434 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2435 				      conn->dst_type, 0, NULL, 0,
2436 				      conn->dev_class);
2437 
2438 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2439 		conn->state = BT_CONNECTED;
2440 		hci_proto_connect_cfm(conn, ev->status);
2441 		hci_conn_drop(conn);
2442 	}
2443 
2444 unlock:
2445 	hci_dev_unlock(hdev);
2446 }
2447 
2448 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2449 {
2450 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2451 	u8 status = skb->data[sizeof(*ev)];
2452 	__u16 opcode;
2453 
2454 	skb_pull(skb, sizeof(*ev));
2455 
2456 	opcode = __le16_to_cpu(ev->opcode);
2457 
2458 	switch (opcode) {
2459 	case HCI_OP_INQUIRY_CANCEL:
2460 		hci_cc_inquiry_cancel(hdev, skb);
2461 		break;
2462 
2463 	case HCI_OP_PERIODIC_INQ:
2464 		hci_cc_periodic_inq(hdev, skb);
2465 		break;
2466 
2467 	case HCI_OP_EXIT_PERIODIC_INQ:
2468 		hci_cc_exit_periodic_inq(hdev, skb);
2469 		break;
2470 
2471 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2472 		hci_cc_remote_name_req_cancel(hdev, skb);
2473 		break;
2474 
2475 	case HCI_OP_ROLE_DISCOVERY:
2476 		hci_cc_role_discovery(hdev, skb);
2477 		break;
2478 
2479 	case HCI_OP_READ_LINK_POLICY:
2480 		hci_cc_read_link_policy(hdev, skb);
2481 		break;
2482 
2483 	case HCI_OP_WRITE_LINK_POLICY:
2484 		hci_cc_write_link_policy(hdev, skb);
2485 		break;
2486 
2487 	case HCI_OP_READ_DEF_LINK_POLICY:
2488 		hci_cc_read_def_link_policy(hdev, skb);
2489 		break;
2490 
2491 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2492 		hci_cc_write_def_link_policy(hdev, skb);
2493 		break;
2494 
2495 	case HCI_OP_RESET:
2496 		hci_cc_reset(hdev, skb);
2497 		break;
2498 
2499 	case HCI_OP_WRITE_LOCAL_NAME:
2500 		hci_cc_write_local_name(hdev, skb);
2501 		break;
2502 
2503 	case HCI_OP_READ_LOCAL_NAME:
2504 		hci_cc_read_local_name(hdev, skb);
2505 		break;
2506 
2507 	case HCI_OP_WRITE_AUTH_ENABLE:
2508 		hci_cc_write_auth_enable(hdev, skb);
2509 		break;
2510 
2511 	case HCI_OP_WRITE_ENCRYPT_MODE:
2512 		hci_cc_write_encrypt_mode(hdev, skb);
2513 		break;
2514 
2515 	case HCI_OP_WRITE_SCAN_ENABLE:
2516 		hci_cc_write_scan_enable(hdev, skb);
2517 		break;
2518 
2519 	case HCI_OP_READ_CLASS_OF_DEV:
2520 		hci_cc_read_class_of_dev(hdev, skb);
2521 		break;
2522 
2523 	case HCI_OP_WRITE_CLASS_OF_DEV:
2524 		hci_cc_write_class_of_dev(hdev, skb);
2525 		break;
2526 
2527 	case HCI_OP_READ_VOICE_SETTING:
2528 		hci_cc_read_voice_setting(hdev, skb);
2529 		break;
2530 
2531 	case HCI_OP_WRITE_VOICE_SETTING:
2532 		hci_cc_write_voice_setting(hdev, skb);
2533 		break;
2534 
2535 	case HCI_OP_READ_NUM_SUPPORTED_IAC:
2536 		hci_cc_read_num_supported_iac(hdev, skb);
2537 		break;
2538 
2539 	case HCI_OP_WRITE_SSP_MODE:
2540 		hci_cc_write_ssp_mode(hdev, skb);
2541 		break;
2542 
2543 	case HCI_OP_WRITE_SC_SUPPORT:
2544 		hci_cc_write_sc_support(hdev, skb);
2545 		break;
2546 
2547 	case HCI_OP_READ_LOCAL_VERSION:
2548 		hci_cc_read_local_version(hdev, skb);
2549 		break;
2550 
2551 	case HCI_OP_READ_LOCAL_COMMANDS:
2552 		hci_cc_read_local_commands(hdev, skb);
2553 		break;
2554 
2555 	case HCI_OP_READ_LOCAL_FEATURES:
2556 		hci_cc_read_local_features(hdev, skb);
2557 		break;
2558 
2559 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2560 		hci_cc_read_local_ext_features(hdev, skb);
2561 		break;
2562 
2563 	case HCI_OP_READ_BUFFER_SIZE:
2564 		hci_cc_read_buffer_size(hdev, skb);
2565 		break;
2566 
2567 	case HCI_OP_READ_BD_ADDR:
2568 		hci_cc_read_bd_addr(hdev, skb);
2569 		break;
2570 
2571 	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2572 		hci_cc_read_page_scan_activity(hdev, skb);
2573 		break;
2574 
2575 	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2576 		hci_cc_write_page_scan_activity(hdev, skb);
2577 		break;
2578 
2579 	case HCI_OP_READ_PAGE_SCAN_TYPE:
2580 		hci_cc_read_page_scan_type(hdev, skb);
2581 		break;
2582 
2583 	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2584 		hci_cc_write_page_scan_type(hdev, skb);
2585 		break;
2586 
2587 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2588 		hci_cc_read_data_block_size(hdev, skb);
2589 		break;
2590 
2591 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2592 		hci_cc_read_flow_control_mode(hdev, skb);
2593 		break;
2594 
2595 	case HCI_OP_READ_LOCAL_AMP_INFO:
2596 		hci_cc_read_local_amp_info(hdev, skb);
2597 		break;
2598 
2599 	case HCI_OP_READ_LOCAL_AMP_ASSOC:
2600 		hci_cc_read_local_amp_assoc(hdev, skb);
2601 		break;
2602 
2603 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2604 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2605 		break;
2606 
2607 	case HCI_OP_PIN_CODE_REPLY:
2608 		hci_cc_pin_code_reply(hdev, skb);
2609 		break;
2610 
2611 	case HCI_OP_PIN_CODE_NEG_REPLY:
2612 		hci_cc_pin_code_neg_reply(hdev, skb);
2613 		break;
2614 
2615 	case HCI_OP_READ_LOCAL_OOB_DATA:
2616 		hci_cc_read_local_oob_data(hdev, skb);
2617 		break;
2618 
2619 	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2620 		hci_cc_read_local_oob_ext_data(hdev, skb);
2621 		break;
2622 
2623 	case HCI_OP_LE_READ_BUFFER_SIZE:
2624 		hci_cc_le_read_buffer_size(hdev, skb);
2625 		break;
2626 
2627 	case HCI_OP_LE_READ_LOCAL_FEATURES:
2628 		hci_cc_le_read_local_features(hdev, skb);
2629 		break;
2630 
2631 	case HCI_OP_LE_READ_ADV_TX_POWER:
2632 		hci_cc_le_read_adv_tx_power(hdev, skb);
2633 		break;
2634 
2635 	case HCI_OP_USER_CONFIRM_REPLY:
2636 		hci_cc_user_confirm_reply(hdev, skb);
2637 		break;
2638 
2639 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2640 		hci_cc_user_confirm_neg_reply(hdev, skb);
2641 		break;
2642 
2643 	case HCI_OP_USER_PASSKEY_REPLY:
2644 		hci_cc_user_passkey_reply(hdev, skb);
2645 		break;
2646 
2647 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2648 		hci_cc_user_passkey_neg_reply(hdev, skb);
2649 		break;
2650 
2651 	case HCI_OP_LE_SET_RANDOM_ADDR:
2652 		hci_cc_le_set_random_addr(hdev, skb);
2653 		break;
2654 
2655 	case HCI_OP_LE_SET_ADV_ENABLE:
2656 		hci_cc_le_set_adv_enable(hdev, skb);
2657 		break;
2658 
2659 	case HCI_OP_LE_SET_SCAN_PARAM:
2660 		hci_cc_le_set_scan_param(hdev, skb);
2661 		break;
2662 
2663 	case HCI_OP_LE_SET_SCAN_ENABLE:
2664 		hci_cc_le_set_scan_enable(hdev, skb);
2665 		break;
2666 
2667 	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2668 		hci_cc_le_read_white_list_size(hdev, skb);
2669 		break;
2670 
2671 	case HCI_OP_LE_CLEAR_WHITE_LIST:
2672 		hci_cc_le_clear_white_list(hdev, skb);
2673 		break;
2674 
2675 	case HCI_OP_LE_ADD_TO_WHITE_LIST:
2676 		hci_cc_le_add_to_white_list(hdev, skb);
2677 		break;
2678 
2679 	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2680 		hci_cc_le_del_from_white_list(hdev, skb);
2681 		break;
2682 
2683 	case HCI_OP_LE_READ_SUPPORTED_STATES:
2684 		hci_cc_le_read_supported_states(hdev, skb);
2685 		break;
2686 
2687 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2688 		hci_cc_write_le_host_supported(hdev, skb);
2689 		break;
2690 
2691 	case HCI_OP_LE_SET_ADV_PARAM:
2692 		hci_cc_set_adv_param(hdev, skb);
2693 		break;
2694 
2695 	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2696 		hci_cc_write_remote_amp_assoc(hdev, skb);
2697 		break;
2698 
2699 	case HCI_OP_READ_RSSI:
2700 		hci_cc_read_rssi(hdev, skb);
2701 		break;
2702 
2703 	case HCI_OP_READ_TX_POWER:
2704 		hci_cc_read_tx_power(hdev, skb);
2705 		break;
2706 
2707 	default:
2708 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2709 		break;
2710 	}
2711 
2712 	if (opcode != HCI_OP_NOP)
2713 		cancel_delayed_work(&hdev->cmd_timer);
2714 
2715 	hci_req_cmd_complete(hdev, opcode, status);
2716 
2717 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2718 		atomic_set(&hdev->cmd_cnt, 1);
2719 		if (!skb_queue_empty(&hdev->cmd_q))
2720 			queue_work(hdev->workqueue, &hdev->cmd_work);
2721 	}
2722 }
2723 
2724 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2725 {
2726 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2727 	__u16 opcode;
2728 
2729 	skb_pull(skb, sizeof(*ev));
2730 
2731 	opcode = __le16_to_cpu(ev->opcode);
2732 
2733 	switch (opcode) {
2734 	case HCI_OP_INQUIRY:
2735 		hci_cs_inquiry(hdev, ev->status);
2736 		break;
2737 
2738 	case HCI_OP_CREATE_CONN:
2739 		hci_cs_create_conn(hdev, ev->status);
2740 		break;
2741 
2742 	case HCI_OP_ADD_SCO:
2743 		hci_cs_add_sco(hdev, ev->status);
2744 		break;
2745 
2746 	case HCI_OP_AUTH_REQUESTED:
2747 		hci_cs_auth_requested(hdev, ev->status);
2748 		break;
2749 
2750 	case HCI_OP_SET_CONN_ENCRYPT:
2751 		hci_cs_set_conn_encrypt(hdev, ev->status);
2752 		break;
2753 
2754 	case HCI_OP_REMOTE_NAME_REQ:
2755 		hci_cs_remote_name_req(hdev, ev->status);
2756 		break;
2757 
2758 	case HCI_OP_READ_REMOTE_FEATURES:
2759 		hci_cs_read_remote_features(hdev, ev->status);
2760 		break;
2761 
2762 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2763 		hci_cs_read_remote_ext_features(hdev, ev->status);
2764 		break;
2765 
2766 	case HCI_OP_SETUP_SYNC_CONN:
2767 		hci_cs_setup_sync_conn(hdev, ev->status);
2768 		break;
2769 
2770 	case HCI_OP_SNIFF_MODE:
2771 		hci_cs_sniff_mode(hdev, ev->status);
2772 		break;
2773 
2774 	case HCI_OP_EXIT_SNIFF_MODE:
2775 		hci_cs_exit_sniff_mode(hdev, ev->status);
2776 		break;
2777 
2778 	case HCI_OP_DISCONNECT:
2779 		hci_cs_disconnect(hdev, ev->status);
2780 		break;
2781 
2782 	case HCI_OP_CREATE_PHY_LINK:
2783 		hci_cs_create_phylink(hdev, ev->status);
2784 		break;
2785 
2786 	case HCI_OP_ACCEPT_PHY_LINK:
2787 		hci_cs_accept_phylink(hdev, ev->status);
2788 		break;
2789 
2790 	case HCI_OP_LE_CREATE_CONN:
2791 		hci_cs_le_create_conn(hdev, ev->status);
2792 		break;
2793 
2794 	case HCI_OP_LE_START_ENC:
2795 		hci_cs_le_start_enc(hdev, ev->status);
2796 		break;
2797 
2798 	default:
2799 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2800 		break;
2801 	}
2802 
2803 	if (opcode != HCI_OP_NOP)
2804 		cancel_delayed_work(&hdev->cmd_timer);
2805 
2806 	if (ev->status ||
2807 	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2808 		hci_req_cmd_complete(hdev, opcode, ev->status);
2809 
2810 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2811 		atomic_set(&hdev->cmd_cnt, 1);
2812 		if (!skb_queue_empty(&hdev->cmd_q))
2813 			queue_work(hdev->workqueue, &hdev->cmd_work);
2814 	}
2815 }
2816 
2817 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2818 {
2819 	struct hci_ev_role_change *ev = (void *) skb->data;
2820 	struct hci_conn *conn;
2821 
2822 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2823 
2824 	hci_dev_lock(hdev);
2825 
2826 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2827 	if (conn) {
2828 		if (!ev->status) {
2829 			if (ev->role)
2830 				conn->link_mode &= ~HCI_LM_MASTER;
2831 			else
2832 				conn->link_mode |= HCI_LM_MASTER;
2833 		}
2834 
2835 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2836 
2837 		hci_role_switch_cfm(conn, ev->status, ev->role);
2838 	}
2839 
2840 	hci_dev_unlock(hdev);
2841 }
2842 
2843 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2844 {
2845 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2846 	int i;
2847 
2848 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2849 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2850 		return;
2851 	}
2852 
2853 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2854 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2855 		BT_DBG("%s bad parameters", hdev->name);
2856 		return;
2857 	}
2858 
2859 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2860 
2861 	for (i = 0; i < ev->num_hndl; i++) {
2862 		struct hci_comp_pkts_info *info = &ev->handles[i];
2863 		struct hci_conn *conn;
2864 		__u16  handle, count;
2865 
2866 		handle = __le16_to_cpu(info->handle);
2867 		count  = __le16_to_cpu(info->count);
2868 
2869 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2870 		if (!conn)
2871 			continue;
2872 
2873 		conn->sent -= count;
2874 
2875 		switch (conn->type) {
2876 		case ACL_LINK:
2877 			hdev->acl_cnt += count;
2878 			if (hdev->acl_cnt > hdev->acl_pkts)
2879 				hdev->acl_cnt = hdev->acl_pkts;
2880 			break;
2881 
2882 		case LE_LINK:
2883 			if (hdev->le_pkts) {
2884 				hdev->le_cnt += count;
2885 				if (hdev->le_cnt > hdev->le_pkts)
2886 					hdev->le_cnt = hdev->le_pkts;
2887 			} else {
2888 				hdev->acl_cnt += count;
2889 				if (hdev->acl_cnt > hdev->acl_pkts)
2890 					hdev->acl_cnt = hdev->acl_pkts;
2891 			}
2892 			break;
2893 
2894 		case SCO_LINK:
2895 			hdev->sco_cnt += count;
2896 			if (hdev->sco_cnt > hdev->sco_pkts)
2897 				hdev->sco_cnt = hdev->sco_pkts;
2898 			break;
2899 
2900 		default:
2901 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2902 			break;
2903 		}
2904 	}
2905 
2906 	queue_work(hdev->workqueue, &hdev->tx_work);
2907 }
2908 
2909 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2910 						 __u16 handle)
2911 {
2912 	struct hci_chan *chan;
2913 
2914 	switch (hdev->dev_type) {
2915 	case HCI_BREDR:
2916 		return hci_conn_hash_lookup_handle(hdev, handle);
2917 	case HCI_AMP:
2918 		chan = hci_chan_lookup_handle(hdev, handle);
2919 		if (chan)
2920 			return chan->conn;
2921 		break;
2922 	default:
2923 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2924 		break;
2925 	}
2926 
2927 	return NULL;
2928 }
2929 
2930 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2931 {
2932 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2933 	int i;
2934 
2935 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2936 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2937 		return;
2938 	}
2939 
2940 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2941 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2942 		BT_DBG("%s bad parameters", hdev->name);
2943 		return;
2944 	}
2945 
2946 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2947 	       ev->num_hndl);
2948 
2949 	for (i = 0; i < ev->num_hndl; i++) {
2950 		struct hci_comp_blocks_info *info = &ev->handles[i];
2951 		struct hci_conn *conn = NULL;
2952 		__u16  handle, block_count;
2953 
2954 		handle = __le16_to_cpu(info->handle);
2955 		block_count = __le16_to_cpu(info->blocks);
2956 
2957 		conn = __hci_conn_lookup_handle(hdev, handle);
2958 		if (!conn)
2959 			continue;
2960 
2961 		conn->sent -= block_count;
2962 
2963 		switch (conn->type) {
2964 		case ACL_LINK:
2965 		case AMP_LINK:
2966 			hdev->block_cnt += block_count;
2967 			if (hdev->block_cnt > hdev->num_blocks)
2968 				hdev->block_cnt = hdev->num_blocks;
2969 			break;
2970 
2971 		default:
2972 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2973 			break;
2974 		}
2975 	}
2976 
2977 	queue_work(hdev->workqueue, &hdev->tx_work);
2978 }
2979 
2980 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2981 {
2982 	struct hci_ev_mode_change *ev = (void *) skb->data;
2983 	struct hci_conn *conn;
2984 
2985 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2986 
2987 	hci_dev_lock(hdev);
2988 
2989 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2990 	if (conn) {
2991 		conn->mode = ev->mode;
2992 
2993 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2994 					&conn->flags)) {
2995 			if (conn->mode == HCI_CM_ACTIVE)
2996 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2997 			else
2998 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2999 		}
3000 
3001 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3002 			hci_sco_setup(conn, ev->status);
3003 	}
3004 
3005 	hci_dev_unlock(hdev);
3006 }
3007 
3008 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3009 {
3010 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
3011 	struct hci_conn *conn;
3012 
3013 	BT_DBG("%s", hdev->name);
3014 
3015 	hci_dev_lock(hdev);
3016 
3017 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3018 	if (!conn)
3019 		goto unlock;
3020 
3021 	if (conn->state == BT_CONNECTED) {
3022 		hci_conn_hold(conn);
3023 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3024 		hci_conn_drop(conn);
3025 	}
3026 
3027 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3028 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3029 			     sizeof(ev->bdaddr), &ev->bdaddr);
3030 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3031 		u8 secure;
3032 
3033 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
3034 			secure = 1;
3035 		else
3036 			secure = 0;
3037 
3038 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3039 	}
3040 
3041 unlock:
3042 	hci_dev_unlock(hdev);
3043 }
3044 
3045 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3046 {
3047 	struct hci_ev_link_key_req *ev = (void *) skb->data;
3048 	struct hci_cp_link_key_reply cp;
3049 	struct hci_conn *conn;
3050 	struct link_key *key;
3051 
3052 	BT_DBG("%s", hdev->name);
3053 
3054 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3055 		return;
3056 
3057 	hci_dev_lock(hdev);
3058 
3059 	key = hci_find_link_key(hdev, &ev->bdaddr);
3060 	if (!key) {
3061 		BT_DBG("%s link key not found for %pMR", hdev->name,
3062 		       &ev->bdaddr);
3063 		goto not_found;
3064 	}
3065 
3066 	BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3067 	       &ev->bdaddr);
3068 
3069 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3070 	    key->type == HCI_LK_DEBUG_COMBINATION) {
3071 		BT_DBG("%s ignoring debug key", hdev->name);
3072 		goto not_found;
3073 	}
3074 
3075 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3076 	if (conn) {
3077 		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3078 		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3079 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3080 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
3081 			goto not_found;
3082 		}
3083 
3084 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3085 		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
3086 		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
3087 			BT_DBG("%s ignoring key unauthenticated for high security",
3088 			       hdev->name);
3089 			goto not_found;
3090 		}
3091 
3092 		conn->key_type = key->type;
3093 		conn->pin_length = key->pin_len;
3094 	}
3095 
3096 	bacpy(&cp.bdaddr, &ev->bdaddr);
3097 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3098 
3099 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3100 
3101 	hci_dev_unlock(hdev);
3102 
3103 	return;
3104 
3105 not_found:
3106 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3107 	hci_dev_unlock(hdev);
3108 }
3109 
3110 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3111 {
3112 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
3113 	struct hci_conn *conn;
3114 	u8 pin_len = 0;
3115 
3116 	BT_DBG("%s", hdev->name);
3117 
3118 	hci_dev_lock(hdev);
3119 
3120 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3121 	if (conn) {
3122 		hci_conn_hold(conn);
3123 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3124 		pin_len = conn->pin_length;
3125 
3126 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3127 			conn->key_type = ev->key_type;
3128 
3129 		hci_conn_drop(conn);
3130 	}
3131 
3132 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3133 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3134 				 ev->key_type, pin_len);
3135 
3136 	hci_dev_unlock(hdev);
3137 }
3138 
3139 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3140 {
3141 	struct hci_ev_clock_offset *ev = (void *) skb->data;
3142 	struct hci_conn *conn;
3143 
3144 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3145 
3146 	hci_dev_lock(hdev);
3147 
3148 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3149 	if (conn && !ev->status) {
3150 		struct inquiry_entry *ie;
3151 
3152 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3153 		if (ie) {
3154 			ie->data.clock_offset = ev->clock_offset;
3155 			ie->timestamp = jiffies;
3156 		}
3157 	}
3158 
3159 	hci_dev_unlock(hdev);
3160 }
3161 
3162 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3163 {
3164 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3165 	struct hci_conn *conn;
3166 
3167 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3168 
3169 	hci_dev_lock(hdev);
3170 
3171 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3172 	if (conn && !ev->status)
3173 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3174 
3175 	hci_dev_unlock(hdev);
3176 }
3177 
3178 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3179 {
3180 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3181 	struct inquiry_entry *ie;
3182 
3183 	BT_DBG("%s", hdev->name);
3184 
3185 	hci_dev_lock(hdev);
3186 
3187 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3188 	if (ie) {
3189 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3190 		ie->timestamp = jiffies;
3191 	}
3192 
3193 	hci_dev_unlock(hdev);
3194 }
3195 
3196 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3197 					     struct sk_buff *skb)
3198 {
3199 	struct inquiry_data data;
3200 	int num_rsp = *((__u8 *) skb->data);
3201 	bool name_known, ssp;
3202 
3203 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3204 
3205 	if (!num_rsp)
3206 		return;
3207 
3208 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3209 		return;
3210 
3211 	hci_dev_lock(hdev);
3212 
3213 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3214 		struct inquiry_info_with_rssi_and_pscan_mode *info;
3215 		info = (void *) (skb->data + 1);
3216 
3217 		for (; num_rsp; num_rsp--, info++) {
3218 			bacpy(&data.bdaddr, &info->bdaddr);
3219 			data.pscan_rep_mode	= info->pscan_rep_mode;
3220 			data.pscan_period_mode	= info->pscan_period_mode;
3221 			data.pscan_mode		= info->pscan_mode;
3222 			memcpy(data.dev_class, info->dev_class, 3);
3223 			data.clock_offset	= info->clock_offset;
3224 			data.rssi		= info->rssi;
3225 			data.ssp_mode		= 0x00;
3226 
3227 			name_known = hci_inquiry_cache_update(hdev, &data,
3228 							      false, &ssp);
3229 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3230 					  info->dev_class, info->rssi,
3231 					  !name_known, ssp, NULL, 0, NULL, 0);
3232 		}
3233 	} else {
3234 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3235 
3236 		for (; num_rsp; num_rsp--, info++) {
3237 			bacpy(&data.bdaddr, &info->bdaddr);
3238 			data.pscan_rep_mode	= info->pscan_rep_mode;
3239 			data.pscan_period_mode	= info->pscan_period_mode;
3240 			data.pscan_mode		= 0x00;
3241 			memcpy(data.dev_class, info->dev_class, 3);
3242 			data.clock_offset	= info->clock_offset;
3243 			data.rssi		= info->rssi;
3244 			data.ssp_mode		= 0x00;
3245 			name_known = hci_inquiry_cache_update(hdev, &data,
3246 							      false, &ssp);
3247 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3248 					  info->dev_class, info->rssi,
3249 					  !name_known, ssp, NULL, 0, NULL, 0);
3250 		}
3251 	}
3252 
3253 	hci_dev_unlock(hdev);
3254 }
3255 
3256 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3257 					struct sk_buff *skb)
3258 {
3259 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3260 	struct hci_conn *conn;
3261 
3262 	BT_DBG("%s", hdev->name);
3263 
3264 	hci_dev_lock(hdev);
3265 
3266 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3267 	if (!conn)
3268 		goto unlock;
3269 
3270 	if (ev->page < HCI_MAX_PAGES)
3271 		memcpy(conn->features[ev->page], ev->features, 8);
3272 
3273 	if (!ev->status && ev->page == 0x01) {
3274 		struct inquiry_entry *ie;
3275 
3276 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3277 		if (ie)
3278 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3279 
3280 		if (ev->features[0] & LMP_HOST_SSP) {
3281 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3282 		} else {
3283 			/* It is mandatory by the Bluetooth specification that
3284 			 * Extended Inquiry Results are only used when Secure
3285 			 * Simple Pairing is enabled, but some devices violate
3286 			 * this.
3287 			 *
3288 			 * To make these devices work, the internal SSP
3289 			 * enabled flag needs to be cleared if the remote host
3290 			 * features do not indicate SSP support */
3291 			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3292 		}
3293 
3294 		if (ev->features[0] & LMP_HOST_SC)
3295 			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3296 	}
3297 
3298 	if (conn->state != BT_CONFIG)
3299 		goto unlock;
3300 
3301 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3302 		struct hci_cp_remote_name_req cp;
3303 		memset(&cp, 0, sizeof(cp));
3304 		bacpy(&cp.bdaddr, &conn->dst);
3305 		cp.pscan_rep_mode = 0x02;
3306 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3307 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3308 		mgmt_device_connected(hdev, &conn->dst, conn->type,
3309 				      conn->dst_type, 0, NULL, 0,
3310 				      conn->dev_class);
3311 
3312 	if (!hci_outgoing_auth_needed(hdev, conn)) {
3313 		conn->state = BT_CONNECTED;
3314 		hci_proto_connect_cfm(conn, ev->status);
3315 		hci_conn_drop(conn);
3316 	}
3317 
3318 unlock:
3319 	hci_dev_unlock(hdev);
3320 }
3321 
3322 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3323 				       struct sk_buff *skb)
3324 {
3325 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3326 	struct hci_conn *conn;
3327 
3328 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3329 
3330 	hci_dev_lock(hdev);
3331 
3332 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3333 	if (!conn) {
3334 		if (ev->link_type == ESCO_LINK)
3335 			goto unlock;
3336 
3337 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3338 		if (!conn)
3339 			goto unlock;
3340 
3341 		conn->type = SCO_LINK;
3342 	}
3343 
3344 	switch (ev->status) {
3345 	case 0x00:
3346 		conn->handle = __le16_to_cpu(ev->handle);
3347 		conn->state  = BT_CONNECTED;
3348 
3349 		hci_conn_add_sysfs(conn);
3350 		break;
3351 
3352 	case 0x0d:	/* Connection Rejected due to Limited Resources */
3353 	case 0x11:	/* Unsupported Feature or Parameter Value */
3354 	case 0x1c:	/* SCO interval rejected */
3355 	case 0x1a:	/* Unsupported Remote Feature */
3356 	case 0x1f:	/* Unspecified error */
3357 	case 0x20:	/* Unsupported LMP Parameter value */
3358 		if (conn->out) {
3359 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3360 					(hdev->esco_type & EDR_ESCO_MASK);
3361 			if (hci_setup_sync(conn, conn->link->handle))
3362 				goto unlock;
3363 		}
3364 		/* fall through */
3365 
3366 	default:
3367 		conn->state = BT_CLOSED;
3368 		break;
3369 	}
3370 
3371 	hci_proto_connect_cfm(conn, ev->status);
3372 	if (ev->status)
3373 		hci_conn_del(conn);
3374 
3375 unlock:
3376 	hci_dev_unlock(hdev);
3377 }
3378 
3379 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3380 {
3381 	size_t parsed = 0;
3382 
3383 	while (parsed < eir_len) {
3384 		u8 field_len = eir[0];
3385 
3386 		if (field_len == 0)
3387 			return parsed;
3388 
3389 		parsed += field_len + 1;
3390 		eir += field_len + 1;
3391 	}
3392 
3393 	return eir_len;
3394 }
3395 
3396 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3397 					    struct sk_buff *skb)
3398 {
3399 	struct inquiry_data data;
3400 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3401 	int num_rsp = *((__u8 *) skb->data);
3402 	size_t eir_len;
3403 
3404 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3405 
3406 	if (!num_rsp)
3407 		return;
3408 
3409 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3410 		return;
3411 
3412 	hci_dev_lock(hdev);
3413 
3414 	for (; num_rsp; num_rsp--, info++) {
3415 		bool name_known, ssp;
3416 
3417 		bacpy(&data.bdaddr, &info->bdaddr);
3418 		data.pscan_rep_mode	= info->pscan_rep_mode;
3419 		data.pscan_period_mode	= info->pscan_period_mode;
3420 		data.pscan_mode		= 0x00;
3421 		memcpy(data.dev_class, info->dev_class, 3);
3422 		data.clock_offset	= info->clock_offset;
3423 		data.rssi		= info->rssi;
3424 		data.ssp_mode		= 0x01;
3425 
3426 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3427 			name_known = eir_has_data_type(info->data,
3428 						       sizeof(info->data),
3429 						       EIR_NAME_COMPLETE);
3430 		else
3431 			name_known = true;
3432 
3433 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3434 						      &ssp);
3435 		eir_len = eir_get_length(info->data, sizeof(info->data));
3436 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3437 				  info->dev_class, info->rssi, !name_known,
3438 				  ssp, info->data, eir_len, NULL, 0);
3439 	}
3440 
3441 	hci_dev_unlock(hdev);
3442 }
3443 
3444 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3445 					 struct sk_buff *skb)
3446 {
3447 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3448 	struct hci_conn *conn;
3449 
3450 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3451 	       __le16_to_cpu(ev->handle));
3452 
3453 	hci_dev_lock(hdev);
3454 
3455 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3456 	if (!conn)
3457 		goto unlock;
3458 
3459 	/* For BR/EDR the necessary steps are taken through the
3460 	 * auth_complete event.
3461 	 */
3462 	if (conn->type != LE_LINK)
3463 		goto unlock;
3464 
3465 	if (!ev->status)
3466 		conn->sec_level = conn->pending_sec_level;
3467 
3468 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3469 
3470 	if (ev->status && conn->state == BT_CONNECTED) {
3471 		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3472 		hci_conn_drop(conn);
3473 		goto unlock;
3474 	}
3475 
3476 	if (conn->state == BT_CONFIG) {
3477 		if (!ev->status)
3478 			conn->state = BT_CONNECTED;
3479 
3480 		hci_proto_connect_cfm(conn, ev->status);
3481 		hci_conn_drop(conn);
3482 	} else {
3483 		hci_auth_cfm(conn, ev->status);
3484 
3485 		hci_conn_hold(conn);
3486 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3487 		hci_conn_drop(conn);
3488 	}
3489 
3490 unlock:
3491 	hci_dev_unlock(hdev);
3492 }
3493 
3494 static u8 hci_get_auth_req(struct hci_conn *conn)
3495 {
3496 	/* If remote requests no-bonding follow that lead */
3497 	if (conn->remote_auth == HCI_AT_NO_BONDING ||
3498 	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3499 		return conn->remote_auth | (conn->auth_type & 0x01);
3500 
3501 	/* If both remote and local have enough IO capabilities, require
3502 	 * MITM protection
3503 	 */
3504 	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3505 	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3506 		return conn->remote_auth | 0x01;
3507 
3508 	/* No MITM protection possible so ignore remote requirement */
3509 	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3510 }
3511 
3512 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3513 {
3514 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3515 	struct hci_conn *conn;
3516 
3517 	BT_DBG("%s", hdev->name);
3518 
3519 	hci_dev_lock(hdev);
3520 
3521 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3522 	if (!conn)
3523 		goto unlock;
3524 
3525 	hci_conn_hold(conn);
3526 
3527 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3528 		goto unlock;
3529 
3530 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3531 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3532 		struct hci_cp_io_capability_reply cp;
3533 
3534 		bacpy(&cp.bdaddr, &ev->bdaddr);
3535 		/* Change the IO capability from KeyboardDisplay
3536 		 * to DisplayYesNo as it is not supported by BT spec. */
3537 		cp.capability = (conn->io_capability == 0x04) ?
3538 				HCI_IO_DISPLAY_YESNO : conn->io_capability;
3539 
3540 		/* If we are initiators, there is no remote information yet */
3541 		if (conn->remote_auth == 0xff) {
3542 			cp.authentication = conn->auth_type;
3543 
3544 			/* Request MITM protection if our IO caps allow it
3545 			 * except for the no-bonding case.
3546 			 * conn->auth_type is not updated here since
3547 			 * that might cause the user confirmation to be
3548 			 * rejected in case the remote doesn't have the
3549 			 * IO capabilities for MITM.
3550 			 */
3551 			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3552 			    cp.authentication != HCI_AT_NO_BONDING)
3553 				cp.authentication |= 0x01;
3554 		} else {
3555 			conn->auth_type = hci_get_auth_req(conn);
3556 			cp.authentication = conn->auth_type;
3557 		}
3558 
3559 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3560 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3561 			cp.oob_data = 0x01;
3562 		else
3563 			cp.oob_data = 0x00;
3564 
3565 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3566 			     sizeof(cp), &cp);
3567 	} else {
3568 		struct hci_cp_io_capability_neg_reply cp;
3569 
3570 		bacpy(&cp.bdaddr, &ev->bdaddr);
3571 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3572 
3573 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3574 			     sizeof(cp), &cp);
3575 	}
3576 
3577 unlock:
3578 	hci_dev_unlock(hdev);
3579 }
3580 
3581 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3582 {
3583 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3584 	struct hci_conn *conn;
3585 
3586 	BT_DBG("%s", hdev->name);
3587 
3588 	hci_dev_lock(hdev);
3589 
3590 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3591 	if (!conn)
3592 		goto unlock;
3593 
3594 	conn->remote_cap = ev->capability;
3595 	conn->remote_auth = ev->authentication;
3596 	if (ev->oob_data)
3597 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3598 
3599 unlock:
3600 	hci_dev_unlock(hdev);
3601 }
3602 
3603 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3604 					 struct sk_buff *skb)
3605 {
3606 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3607 	int loc_mitm, rem_mitm, confirm_hint = 0;
3608 	struct hci_conn *conn;
3609 
3610 	BT_DBG("%s", hdev->name);
3611 
3612 	hci_dev_lock(hdev);
3613 
3614 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3615 		goto unlock;
3616 
3617 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3618 	if (!conn)
3619 		goto unlock;
3620 
3621 	loc_mitm = (conn->auth_type & 0x01);
3622 	rem_mitm = (conn->remote_auth & 0x01);
3623 
3624 	/* If we require MITM but the remote device can't provide that
3625 	 * (it has NoInputNoOutput) then reject the confirmation request
3626 	 */
3627 	if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3628 		BT_DBG("Rejecting request: remote device can't provide MITM");
3629 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3630 			     sizeof(ev->bdaddr), &ev->bdaddr);
3631 		goto unlock;
3632 	}
3633 
3634 	/* If no side requires MITM protection; auto-accept */
3635 	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3636 	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3637 
3638 		/* If we're not the initiators request authorization to
3639 		 * proceed from user space (mgmt_user_confirm with
3640 		 * confirm_hint set to 1). The exception is if neither
3641 		 * side had MITM in which case we do auto-accept.
3642 		 */
3643 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3644 		    (loc_mitm || rem_mitm)) {
3645 			BT_DBG("Confirming auto-accept as acceptor");
3646 			confirm_hint = 1;
3647 			goto confirm;
3648 		}
3649 
3650 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3651 		       hdev->auto_accept_delay);
3652 
3653 		if (hdev->auto_accept_delay > 0) {
3654 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3655 			queue_delayed_work(conn->hdev->workqueue,
3656 					   &conn->auto_accept_work, delay);
3657 			goto unlock;
3658 		}
3659 
3660 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3661 			     sizeof(ev->bdaddr), &ev->bdaddr);
3662 		goto unlock;
3663 	}
3664 
3665 confirm:
3666 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3667 				  le32_to_cpu(ev->passkey), confirm_hint);
3668 
3669 unlock:
3670 	hci_dev_unlock(hdev);
3671 }
3672 
3673 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3674 					 struct sk_buff *skb)
3675 {
3676 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3677 
3678 	BT_DBG("%s", hdev->name);
3679 
3680 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3681 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3682 }
3683 
3684 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3685 					struct sk_buff *skb)
3686 {
3687 	struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3688 	struct hci_conn *conn;
3689 
3690 	BT_DBG("%s", hdev->name);
3691 
3692 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3693 	if (!conn)
3694 		return;
3695 
3696 	conn->passkey_notify = __le32_to_cpu(ev->passkey);
3697 	conn->passkey_entered = 0;
3698 
3699 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3700 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3701 					 conn->dst_type, conn->passkey_notify,
3702 					 conn->passkey_entered);
3703 }
3704 
3705 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3706 {
3707 	struct hci_ev_keypress_notify *ev = (void *) skb->data;
3708 	struct hci_conn *conn;
3709 
3710 	BT_DBG("%s", hdev->name);
3711 
3712 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3713 	if (!conn)
3714 		return;
3715 
3716 	switch (ev->type) {
3717 	case HCI_KEYPRESS_STARTED:
3718 		conn->passkey_entered = 0;
3719 		return;
3720 
3721 	case HCI_KEYPRESS_ENTERED:
3722 		conn->passkey_entered++;
3723 		break;
3724 
3725 	case HCI_KEYPRESS_ERASED:
3726 		conn->passkey_entered--;
3727 		break;
3728 
3729 	case HCI_KEYPRESS_CLEARED:
3730 		conn->passkey_entered = 0;
3731 		break;
3732 
3733 	case HCI_KEYPRESS_COMPLETED:
3734 		return;
3735 	}
3736 
3737 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3738 		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3739 					 conn->dst_type, conn->passkey_notify,
3740 					 conn->passkey_entered);
3741 }
3742 
3743 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3744 					 struct sk_buff *skb)
3745 {
3746 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3747 	struct hci_conn *conn;
3748 
3749 	BT_DBG("%s", hdev->name);
3750 
3751 	hci_dev_lock(hdev);
3752 
3753 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3754 	if (!conn)
3755 		goto unlock;
3756 
3757 	/* To avoid duplicate auth_failed events to user space we check
3758 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3759 	 * initiated the authentication. A traditional auth_complete
3760 	 * event gets always produced as initiator and is also mapped to
3761 	 * the mgmt_auth_failed event */
3762 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3763 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3764 				 ev->status);
3765 
3766 	hci_conn_drop(conn);
3767 
3768 unlock:
3769 	hci_dev_unlock(hdev);
3770 }
3771 
3772 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3773 					 struct sk_buff *skb)
3774 {
3775 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3776 	struct inquiry_entry *ie;
3777 	struct hci_conn *conn;
3778 
3779 	BT_DBG("%s", hdev->name);
3780 
3781 	hci_dev_lock(hdev);
3782 
3783 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3784 	if (conn)
3785 		memcpy(conn->features[1], ev->features, 8);
3786 
3787 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3788 	if (ie)
3789 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3790 
3791 	hci_dev_unlock(hdev);
3792 }
3793 
3794 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3795 					    struct sk_buff *skb)
3796 {
3797 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3798 	struct oob_data *data;
3799 
3800 	BT_DBG("%s", hdev->name);
3801 
3802 	hci_dev_lock(hdev);
3803 
3804 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3805 		goto unlock;
3806 
3807 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3808 	if (data) {
3809 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3810 			struct hci_cp_remote_oob_ext_data_reply cp;
3811 
3812 			bacpy(&cp.bdaddr, &ev->bdaddr);
3813 			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3814 			memcpy(cp.randomizer192, data->randomizer192,
3815 			       sizeof(cp.randomizer192));
3816 			memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3817 			memcpy(cp.randomizer256, data->randomizer256,
3818 			       sizeof(cp.randomizer256));
3819 
3820 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3821 				     sizeof(cp), &cp);
3822 		} else {
3823 			struct hci_cp_remote_oob_data_reply cp;
3824 
3825 			bacpy(&cp.bdaddr, &ev->bdaddr);
3826 			memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3827 			memcpy(cp.randomizer, data->randomizer192,
3828 			       sizeof(cp.randomizer));
3829 
3830 			hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3831 				     sizeof(cp), &cp);
3832 		}
3833 	} else {
3834 		struct hci_cp_remote_oob_data_neg_reply cp;
3835 
3836 		bacpy(&cp.bdaddr, &ev->bdaddr);
3837 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3838 			     sizeof(cp), &cp);
3839 	}
3840 
3841 unlock:
3842 	hci_dev_unlock(hdev);
3843 }
3844 
3845 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3846 				      struct sk_buff *skb)
3847 {
3848 	struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3849 	struct hci_conn *hcon, *bredr_hcon;
3850 
3851 	BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3852 	       ev->status);
3853 
3854 	hci_dev_lock(hdev);
3855 
3856 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3857 	if (!hcon) {
3858 		hci_dev_unlock(hdev);
3859 		return;
3860 	}
3861 
3862 	if (ev->status) {
3863 		hci_conn_del(hcon);
3864 		hci_dev_unlock(hdev);
3865 		return;
3866 	}
3867 
3868 	bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3869 
3870 	hcon->state = BT_CONNECTED;
3871 	bacpy(&hcon->dst, &bredr_hcon->dst);
3872 
3873 	hci_conn_hold(hcon);
3874 	hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3875 	hci_conn_drop(hcon);
3876 
3877 	hci_conn_add_sysfs(hcon);
3878 
3879 	amp_physical_cfm(bredr_hcon, hcon);
3880 
3881 	hci_dev_unlock(hdev);
3882 }
3883 
3884 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3885 {
3886 	struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3887 	struct hci_conn *hcon;
3888 	struct hci_chan *hchan;
3889 	struct amp_mgr *mgr;
3890 
3891 	BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3892 	       hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3893 	       ev->status);
3894 
3895 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3896 	if (!hcon)
3897 		return;
3898 
3899 	/* Create AMP hchan */
3900 	hchan = hci_chan_create(hcon);
3901 	if (!hchan)
3902 		return;
3903 
3904 	hchan->handle = le16_to_cpu(ev->handle);
3905 
3906 	BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3907 
3908 	mgr = hcon->amp_mgr;
3909 	if (mgr && mgr->bredr_chan) {
3910 		struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3911 
3912 		l2cap_chan_lock(bredr_chan);
3913 
3914 		bredr_chan->conn->mtu = hdev->block_mtu;
3915 		l2cap_logical_cfm(bredr_chan, hchan, 0);
3916 		hci_conn_hold(hcon);
3917 
3918 		l2cap_chan_unlock(bredr_chan);
3919 	}
3920 }
3921 
3922 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3923 					     struct sk_buff *skb)
3924 {
3925 	struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3926 	struct hci_chan *hchan;
3927 
3928 	BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3929 	       le16_to_cpu(ev->handle), ev->status);
3930 
3931 	if (ev->status)
3932 		return;
3933 
3934 	hci_dev_lock(hdev);
3935 
3936 	hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3937 	if (!hchan)
3938 		goto unlock;
3939 
3940 	amp_destroy_logical_link(hchan, ev->reason);
3941 
3942 unlock:
3943 	hci_dev_unlock(hdev);
3944 }
3945 
3946 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3947 					     struct sk_buff *skb)
3948 {
3949 	struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3950 	struct hci_conn *hcon;
3951 
3952 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3953 
3954 	if (ev->status)
3955 		return;
3956 
3957 	hci_dev_lock(hdev);
3958 
3959 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3960 	if (hcon) {
3961 		hcon->state = BT_CLOSED;
3962 		hci_conn_del(hcon);
3963 	}
3964 
3965 	hci_dev_unlock(hdev);
3966 }
3967 
3968 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3969 {
3970 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3971 	struct hci_conn *conn;
3972 	struct smp_irk *irk;
3973 
3974 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3975 
3976 	hci_dev_lock(hdev);
3977 
3978 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3979 	if (!conn) {
3980 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3981 		if (!conn) {
3982 			BT_ERR("No memory for new connection");
3983 			goto unlock;
3984 		}
3985 
3986 		conn->dst_type = ev->bdaddr_type;
3987 
3988 		if (ev->role == LE_CONN_ROLE_MASTER) {
3989 			conn->out = true;
3990 			conn->link_mode |= HCI_LM_MASTER;
3991 		}
3992 
3993 		/* If we didn't have a hci_conn object previously
3994 		 * but we're in master role this must be something
3995 		 * initiated using a white list. Since white list based
3996 		 * connections are not "first class citizens" we don't
3997 		 * have full tracking of them. Therefore, we go ahead
3998 		 * with a "best effort" approach of determining the
3999 		 * initiator address based on the HCI_PRIVACY flag.
4000 		 */
4001 		if (conn->out) {
4002 			conn->resp_addr_type = ev->bdaddr_type;
4003 			bacpy(&conn->resp_addr, &ev->bdaddr);
4004 			if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4005 				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4006 				bacpy(&conn->init_addr, &hdev->rpa);
4007 			} else {
4008 				hci_copy_identity_address(hdev,
4009 							  &conn->init_addr,
4010 							  &conn->init_addr_type);
4011 			}
4012 		}
4013 	} else {
4014 		cancel_delayed_work(&conn->le_conn_timeout);
4015 	}
4016 
4017 	if (!conn->out) {
4018 		/* Set the responder (our side) address type based on
4019 		 * the advertising address type.
4020 		 */
4021 		conn->resp_addr_type = hdev->adv_addr_type;
4022 		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4023 			bacpy(&conn->resp_addr, &hdev->random_addr);
4024 		else
4025 			bacpy(&conn->resp_addr, &hdev->bdaddr);
4026 
4027 		conn->init_addr_type = ev->bdaddr_type;
4028 		bacpy(&conn->init_addr, &ev->bdaddr);
4029 	}
4030 
4031 	/* Lookup the identity address from the stored connection
4032 	 * address and address type.
4033 	 *
4034 	 * When establishing connections to an identity address, the
4035 	 * connection procedure will store the resolvable random
4036 	 * address first. Now if it can be converted back into the
4037 	 * identity address, start using the identity address from
4038 	 * now on.
4039 	 */
4040 	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4041 	if (irk) {
4042 		bacpy(&conn->dst, &irk->bdaddr);
4043 		conn->dst_type = irk->addr_type;
4044 	}
4045 
4046 	if (ev->status) {
4047 		hci_le_conn_failed(conn, ev->status);
4048 		goto unlock;
4049 	}
4050 
4051 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4052 		mgmt_device_connected(hdev, &conn->dst, conn->type,
4053 				      conn->dst_type, 0, NULL, 0, NULL);
4054 
4055 	conn->sec_level = BT_SECURITY_LOW;
4056 	conn->handle = __le16_to_cpu(ev->handle);
4057 	conn->state = BT_CONNECTED;
4058 
4059 	conn->le_conn_interval = le16_to_cpu(ev->interval);
4060 	conn->le_conn_latency = le16_to_cpu(ev->latency);
4061 	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4062 
4063 	hci_conn_add_sysfs(conn);
4064 
4065 	hci_proto_connect_cfm(conn, ev->status);
4066 
4067 	hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4068 
4069 unlock:
4070 	hci_dev_unlock(hdev);
4071 }
4072 
4073 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4074 					    struct sk_buff *skb)
4075 {
4076 	struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4077 	struct hci_conn *conn;
4078 
4079 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4080 
4081 	if (ev->status)
4082 		return;
4083 
4084 	hci_dev_lock(hdev);
4085 
4086 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4087 	if (conn) {
4088 		conn->le_conn_interval = le16_to_cpu(ev->interval);
4089 		conn->le_conn_latency = le16_to_cpu(ev->latency);
4090 		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4091 	}
4092 
4093 	hci_dev_unlock(hdev);
4094 }
4095 
4096 /* This function requires the caller holds hdev->lock */
4097 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4098 				  u8 addr_type)
4099 {
4100 	struct hci_conn *conn;
4101 	struct smp_irk *irk;
4102 
4103 	/* If this is a resolvable address, we should resolve it and then
4104 	 * update address and address type variables.
4105 	 */
4106 	irk = hci_get_irk(hdev, addr, addr_type);
4107 	if (irk) {
4108 		addr = &irk->bdaddr;
4109 		addr_type = irk->addr_type;
4110 	}
4111 
4112 	if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4113 		return;
4114 
4115 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4116 			      HCI_AT_NO_BONDING);
4117 	if (!IS_ERR(conn))
4118 		return;
4119 
4120 	switch (PTR_ERR(conn)) {
4121 	case -EBUSY:
4122 		/* If hci_connect() returns -EBUSY it means there is already
4123 		 * an LE connection attempt going on. Since controllers don't
4124 		 * support more than one connection attempt at the time, we
4125 		 * don't consider this an error case.
4126 		 */
4127 		break;
4128 	default:
4129 		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4130 	}
4131 }
4132 
4133 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4134 			       u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4135 {
4136 	struct discovery_state *d = &hdev->discovery;
4137 	bool match;
4138 
4139 	/* Passive scanning shouldn't trigger any device found events */
4140 	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4141 		if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4142 			check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4143 		return;
4144 	}
4145 
4146 	/* If there's nothing pending either store the data from this
4147 	 * event or send an immediate device found event if the data
4148 	 * should not be stored for later.
4149 	 */
4150 	if (!has_pending_adv_report(hdev)) {
4151 		/* If the report will trigger a SCAN_REQ store it for
4152 		 * later merging.
4153 		 */
4154 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4155 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4156 						 rssi, data, len);
4157 			return;
4158 		}
4159 
4160 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4161 				  rssi, 0, 1, data, len, NULL, 0);
4162 		return;
4163 	}
4164 
4165 	/* Check if the pending report is for the same device as the new one */
4166 	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4167 		 bdaddr_type == d->last_adv_addr_type);
4168 
4169 	/* If the pending data doesn't match this report or this isn't a
4170 	 * scan response (e.g. we got a duplicate ADV_IND) then force
4171 	 * sending of the pending data.
4172 	 */
4173 	if (type != LE_ADV_SCAN_RSP || !match) {
4174 		/* Send out whatever is in the cache, but skip duplicates */
4175 		if (!match)
4176 			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4177 					  d->last_adv_addr_type, NULL,
4178 					  d->last_adv_rssi, 0, 1,
4179 					  d->last_adv_data,
4180 					  d->last_adv_data_len, NULL, 0);
4181 
4182 		/* If the new report will trigger a SCAN_REQ store it for
4183 		 * later merging.
4184 		 */
4185 		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4186 			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4187 						 rssi, data, len);
4188 			return;
4189 		}
4190 
4191 		/* The advertising reports cannot be merged, so clear
4192 		 * the pending report and send out a device found event.
4193 		 */
4194 		clear_pending_adv_report(hdev);
4195 		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4196 				  rssi, 0, 1, data, len, NULL, 0);
4197 		return;
4198 	}
4199 
4200 	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4201 	 * the new event is a SCAN_RSP. We can therefore proceed with
4202 	 * sending a merged device found event.
4203 	 */
4204 	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4205 			  d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4206 			  d->last_adv_data, d->last_adv_data_len);
4207 	clear_pending_adv_report(hdev);
4208 }
4209 
4210 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4211 {
4212 	u8 num_reports = skb->data[0];
4213 	void *ptr = &skb->data[1];
4214 
4215 	hci_dev_lock(hdev);
4216 
4217 	while (num_reports--) {
4218 		struct hci_ev_le_advertising_info *ev = ptr;
4219 		s8 rssi;
4220 
4221 		rssi = ev->data[ev->length];
4222 		process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4223 				   ev->bdaddr_type, rssi, ev->data, ev->length);
4224 
4225 		ptr += sizeof(*ev) + ev->length + 1;
4226 	}
4227 
4228 	hci_dev_unlock(hdev);
4229 }
4230 
4231 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4232 {
4233 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4234 	struct hci_cp_le_ltk_reply cp;
4235 	struct hci_cp_le_ltk_neg_reply neg;
4236 	struct hci_conn *conn;
4237 	struct smp_ltk *ltk;
4238 
4239 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4240 
4241 	hci_dev_lock(hdev);
4242 
4243 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4244 	if (conn == NULL)
4245 		goto not_found;
4246 
4247 	ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4248 	if (ltk == NULL)
4249 		goto not_found;
4250 
4251 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4252 	cp.handle = cpu_to_le16(conn->handle);
4253 
4254 	if (ltk->authenticated)
4255 		conn->pending_sec_level = BT_SECURITY_HIGH;
4256 	else
4257 		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4258 
4259 	conn->enc_key_size = ltk->enc_size;
4260 
4261 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4262 
4263 	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4264 	 * temporary key used to encrypt a connection following
4265 	 * pairing. It is used during the Encrypted Session Setup to
4266 	 * distribute the keys. Later, security can be re-established
4267 	 * using a distributed LTK.
4268 	 */
4269 	if (ltk->type == SMP_STK) {
4270 		list_del(&ltk->list);
4271 		kfree(ltk);
4272 	}
4273 
4274 	hci_dev_unlock(hdev);
4275 
4276 	return;
4277 
4278 not_found:
4279 	neg.handle = ev->handle;
4280 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4281 	hci_dev_unlock(hdev);
4282 }
4283 
4284 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4285 {
4286 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
4287 
4288 	skb_pull(skb, sizeof(*le_ev));
4289 
4290 	switch (le_ev->subevent) {
4291 	case HCI_EV_LE_CONN_COMPLETE:
4292 		hci_le_conn_complete_evt(hdev, skb);
4293 		break;
4294 
4295 	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4296 		hci_le_conn_update_complete_evt(hdev, skb);
4297 		break;
4298 
4299 	case HCI_EV_LE_ADVERTISING_REPORT:
4300 		hci_le_adv_report_evt(hdev, skb);
4301 		break;
4302 
4303 	case HCI_EV_LE_LTK_REQ:
4304 		hci_le_ltk_request_evt(hdev, skb);
4305 		break;
4306 
4307 	default:
4308 		break;
4309 	}
4310 }
4311 
4312 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4313 {
4314 	struct hci_ev_channel_selected *ev = (void *) skb->data;
4315 	struct hci_conn *hcon;
4316 
4317 	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4318 
4319 	skb_pull(skb, sizeof(*ev));
4320 
4321 	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4322 	if (!hcon)
4323 		return;
4324 
4325 	amp_read_loc_assoc_final_data(hdev, hcon);
4326 }
4327 
4328 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4329 {
4330 	struct hci_event_hdr *hdr = (void *) skb->data;
4331 	__u8 event = hdr->evt;
4332 
4333 	hci_dev_lock(hdev);
4334 
4335 	/* Received events are (currently) only needed when a request is
4336 	 * ongoing so avoid unnecessary memory allocation.
4337 	 */
4338 	if (hdev->req_status == HCI_REQ_PEND) {
4339 		kfree_skb(hdev->recv_evt);
4340 		hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4341 	}
4342 
4343 	hci_dev_unlock(hdev);
4344 
4345 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
4346 
4347 	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4348 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4349 		u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4350 
4351 		hci_req_cmd_complete(hdev, opcode, 0);
4352 	}
4353 
4354 	switch (event) {
4355 	case HCI_EV_INQUIRY_COMPLETE:
4356 		hci_inquiry_complete_evt(hdev, skb);
4357 		break;
4358 
4359 	case HCI_EV_INQUIRY_RESULT:
4360 		hci_inquiry_result_evt(hdev, skb);
4361 		break;
4362 
4363 	case HCI_EV_CONN_COMPLETE:
4364 		hci_conn_complete_evt(hdev, skb);
4365 		break;
4366 
4367 	case HCI_EV_CONN_REQUEST:
4368 		hci_conn_request_evt(hdev, skb);
4369 		break;
4370 
4371 	case HCI_EV_DISCONN_COMPLETE:
4372 		hci_disconn_complete_evt(hdev, skb);
4373 		break;
4374 
4375 	case HCI_EV_AUTH_COMPLETE:
4376 		hci_auth_complete_evt(hdev, skb);
4377 		break;
4378 
4379 	case HCI_EV_REMOTE_NAME:
4380 		hci_remote_name_evt(hdev, skb);
4381 		break;
4382 
4383 	case HCI_EV_ENCRYPT_CHANGE:
4384 		hci_encrypt_change_evt(hdev, skb);
4385 		break;
4386 
4387 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4388 		hci_change_link_key_complete_evt(hdev, skb);
4389 		break;
4390 
4391 	case HCI_EV_REMOTE_FEATURES:
4392 		hci_remote_features_evt(hdev, skb);
4393 		break;
4394 
4395 	case HCI_EV_CMD_COMPLETE:
4396 		hci_cmd_complete_evt(hdev, skb);
4397 		break;
4398 
4399 	case HCI_EV_CMD_STATUS:
4400 		hci_cmd_status_evt(hdev, skb);
4401 		break;
4402 
4403 	case HCI_EV_ROLE_CHANGE:
4404 		hci_role_change_evt(hdev, skb);
4405 		break;
4406 
4407 	case HCI_EV_NUM_COMP_PKTS:
4408 		hci_num_comp_pkts_evt(hdev, skb);
4409 		break;
4410 
4411 	case HCI_EV_MODE_CHANGE:
4412 		hci_mode_change_evt(hdev, skb);
4413 		break;
4414 
4415 	case HCI_EV_PIN_CODE_REQ:
4416 		hci_pin_code_request_evt(hdev, skb);
4417 		break;
4418 
4419 	case HCI_EV_LINK_KEY_REQ:
4420 		hci_link_key_request_evt(hdev, skb);
4421 		break;
4422 
4423 	case HCI_EV_LINK_KEY_NOTIFY:
4424 		hci_link_key_notify_evt(hdev, skb);
4425 		break;
4426 
4427 	case HCI_EV_CLOCK_OFFSET:
4428 		hci_clock_offset_evt(hdev, skb);
4429 		break;
4430 
4431 	case HCI_EV_PKT_TYPE_CHANGE:
4432 		hci_pkt_type_change_evt(hdev, skb);
4433 		break;
4434 
4435 	case HCI_EV_PSCAN_REP_MODE:
4436 		hci_pscan_rep_mode_evt(hdev, skb);
4437 		break;
4438 
4439 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4440 		hci_inquiry_result_with_rssi_evt(hdev, skb);
4441 		break;
4442 
4443 	case HCI_EV_REMOTE_EXT_FEATURES:
4444 		hci_remote_ext_features_evt(hdev, skb);
4445 		break;
4446 
4447 	case HCI_EV_SYNC_CONN_COMPLETE:
4448 		hci_sync_conn_complete_evt(hdev, skb);
4449 		break;
4450 
4451 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
4452 		hci_extended_inquiry_result_evt(hdev, skb);
4453 		break;
4454 
4455 	case HCI_EV_KEY_REFRESH_COMPLETE:
4456 		hci_key_refresh_complete_evt(hdev, skb);
4457 		break;
4458 
4459 	case HCI_EV_IO_CAPA_REQUEST:
4460 		hci_io_capa_request_evt(hdev, skb);
4461 		break;
4462 
4463 	case HCI_EV_IO_CAPA_REPLY:
4464 		hci_io_capa_reply_evt(hdev, skb);
4465 		break;
4466 
4467 	case HCI_EV_USER_CONFIRM_REQUEST:
4468 		hci_user_confirm_request_evt(hdev, skb);
4469 		break;
4470 
4471 	case HCI_EV_USER_PASSKEY_REQUEST:
4472 		hci_user_passkey_request_evt(hdev, skb);
4473 		break;
4474 
4475 	case HCI_EV_USER_PASSKEY_NOTIFY:
4476 		hci_user_passkey_notify_evt(hdev, skb);
4477 		break;
4478 
4479 	case HCI_EV_KEYPRESS_NOTIFY:
4480 		hci_keypress_notify_evt(hdev, skb);
4481 		break;
4482 
4483 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
4484 		hci_simple_pair_complete_evt(hdev, skb);
4485 		break;
4486 
4487 	case HCI_EV_REMOTE_HOST_FEATURES:
4488 		hci_remote_host_features_evt(hdev, skb);
4489 		break;
4490 
4491 	case HCI_EV_LE_META:
4492 		hci_le_meta_evt(hdev, skb);
4493 		break;
4494 
4495 	case HCI_EV_CHANNEL_SELECTED:
4496 		hci_chan_selected_evt(hdev, skb);
4497 		break;
4498 
4499 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4500 		hci_remote_oob_data_request_evt(hdev, skb);
4501 		break;
4502 
4503 	case HCI_EV_PHY_LINK_COMPLETE:
4504 		hci_phy_link_complete_evt(hdev, skb);
4505 		break;
4506 
4507 	case HCI_EV_LOGICAL_LINK_COMPLETE:
4508 		hci_loglink_complete_evt(hdev, skb);
4509 		break;
4510 
4511 	case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4512 		hci_disconn_loglink_complete_evt(hdev, skb);
4513 		break;
4514 
4515 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4516 		hci_disconn_phylink_complete_evt(hdev, skb);
4517 		break;
4518 
4519 	case HCI_EV_NUM_COMP_BLOCKS:
4520 		hci_num_comp_blocks_evt(hdev, skb);
4521 		break;
4522 
4523 	default:
4524 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
4525 		break;
4526 	}
4527 
4528 	kfree_skb(skb);
4529 	hdev->stat.evt_rx++;
4530 }
4531