xref: /openbmc/linux/net/bluetooth/hci_event.c (revision d23264a896a931c4b355c102d8e9d46649195ba4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40 
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44 
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47 
48 static int enable_le;
49 
50 /* Handle HCI Event packets */
51 
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 	__u8 status = *((__u8 *) skb->data);
55 
56 	BT_DBG("%s status 0x%x", hdev->name, status);
57 
58 	if (status) {
59 		hci_dev_lock(hdev);
60 		mgmt_stop_discovery_failed(hdev, status);
61 		hci_dev_unlock(hdev);
62 		return;
63 	}
64 
65 	clear_bit(HCI_INQUIRY, &hdev->flags);
66 
67 	hci_dev_lock(hdev);
68 	mgmt_discovering(hdev, 0);
69 	hci_dev_unlock(hdev);
70 
71 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72 
73 	hci_conn_check_pending(hdev);
74 }
75 
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 	__u8 status = *((__u8 *) skb->data);
79 
80 	BT_DBG("%s status 0x%x", hdev->name, status);
81 
82 	if (status)
83 		return;
84 
85 	hci_conn_check_pending(hdev);
86 }
87 
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 	BT_DBG("%s", hdev->name);
91 }
92 
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 	struct hci_rp_role_discovery *rp = (void *) skb->data;
96 	struct hci_conn *conn;
97 
98 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
99 
100 	if (rp->status)
101 		return;
102 
103 	hci_dev_lock(hdev);
104 
105 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 	if (conn) {
107 		if (rp->role)
108 			conn->link_mode &= ~HCI_LM_MASTER;
109 		else
110 			conn->link_mode |= HCI_LM_MASTER;
111 	}
112 
113 	hci_dev_unlock(hdev);
114 }
115 
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 	struct hci_conn *conn;
120 
121 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
122 
123 	if (rp->status)
124 		return;
125 
126 	hci_dev_lock(hdev);
127 
128 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 	if (conn)
130 		conn->link_policy = __le16_to_cpu(rp->policy);
131 
132 	hci_dev_unlock(hdev);
133 }
134 
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 	struct hci_conn *conn;
139 	void *sent;
140 
141 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
142 
143 	if (rp->status)
144 		return;
145 
146 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 	if (!sent)
148 		return;
149 
150 	hci_dev_lock(hdev);
151 
152 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 	if (conn)
154 		conn->link_policy = get_unaligned_le16(sent + 2);
155 
156 	hci_dev_unlock(hdev);
157 }
158 
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162 
163 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
164 
165 	if (rp->status)
166 		return;
167 
168 	hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170 
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 	__u8 status = *((__u8 *) skb->data);
174 	void *sent;
175 
176 	BT_DBG("%s status 0x%x", hdev->name, status);
177 
178 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 	if (!sent)
180 		return;
181 
182 	if (!status)
183 		hdev->link_policy = get_unaligned_le16(sent);
184 
185 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187 
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 	__u8 status = *((__u8 *) skb->data);
191 
192 	BT_DBG("%s status 0x%x", hdev->name, status);
193 
194 	clear_bit(HCI_RESET, &hdev->flags);
195 
196 	hci_req_complete(hdev, HCI_OP_RESET, status);
197 
198 	hdev->dev_flags = 0;
199 }
200 
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 	__u8 status = *((__u8 *) skb->data);
204 	void *sent;
205 
206 	BT_DBG("%s status 0x%x", hdev->name, status);
207 
208 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 	if (!sent)
210 		return;
211 
212 	hci_dev_lock(hdev);
213 
214 	if (test_bit(HCI_MGMT, &hdev->flags))
215 		mgmt_set_local_name_complete(hdev, sent, status);
216 
217 	if (status == 0)
218 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219 
220 	hci_dev_unlock(hdev);
221 }
222 
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 	struct hci_rp_read_local_name *rp = (void *) skb->data;
226 
227 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
228 
229 	if (rp->status)
230 		return;
231 
232 	memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
233 }
234 
235 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
236 {
237 	__u8 status = *((__u8 *) skb->data);
238 	void *sent;
239 
240 	BT_DBG("%s status 0x%x", hdev->name, status);
241 
242 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
243 	if (!sent)
244 		return;
245 
246 	if (!status) {
247 		__u8 param = *((__u8 *) sent);
248 
249 		if (param == AUTH_ENABLED)
250 			set_bit(HCI_AUTH, &hdev->flags);
251 		else
252 			clear_bit(HCI_AUTH, &hdev->flags);
253 	}
254 
255 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
256 }
257 
258 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
259 {
260 	__u8 status = *((__u8 *) skb->data);
261 	void *sent;
262 
263 	BT_DBG("%s status 0x%x", hdev->name, status);
264 
265 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
266 	if (!sent)
267 		return;
268 
269 	if (!status) {
270 		__u8 param = *((__u8 *) sent);
271 
272 		if (param)
273 			set_bit(HCI_ENCRYPT, &hdev->flags);
274 		else
275 			clear_bit(HCI_ENCRYPT, &hdev->flags);
276 	}
277 
278 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
279 }
280 
281 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
282 {
283 	__u8 param, status = *((__u8 *) skb->data);
284 	int old_pscan, old_iscan;
285 	void *sent;
286 
287 	BT_DBG("%s status 0x%x", hdev->name, status);
288 
289 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
290 	if (!sent)
291 		return;
292 
293 	param = *((__u8 *) sent);
294 
295 	hci_dev_lock(hdev);
296 
297 	if (status != 0) {
298 		mgmt_write_scan_failed(hdev, param, status);
299 		hdev->discov_timeout = 0;
300 		goto done;
301 	}
302 
303 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
304 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
305 
306 	if (param & SCAN_INQUIRY) {
307 		set_bit(HCI_ISCAN, &hdev->flags);
308 		if (!old_iscan)
309 			mgmt_discoverable(hdev, 1);
310 		if (hdev->discov_timeout > 0) {
311 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
312 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
313 									to);
314 		}
315 	} else if (old_iscan)
316 		mgmt_discoverable(hdev, 0);
317 
318 	if (param & SCAN_PAGE) {
319 		set_bit(HCI_PSCAN, &hdev->flags);
320 		if (!old_pscan)
321 			mgmt_connectable(hdev, 1);
322 	} else if (old_pscan)
323 		mgmt_connectable(hdev, 0);
324 
325 done:
326 	hci_dev_unlock(hdev);
327 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
328 }
329 
330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
331 {
332 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
333 
334 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
335 
336 	if (rp->status)
337 		return;
338 
339 	memcpy(hdev->dev_class, rp->dev_class, 3);
340 
341 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
343 }
344 
345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
346 {
347 	__u8 status = *((__u8 *) skb->data);
348 	void *sent;
349 
350 	BT_DBG("%s status 0x%x", hdev->name, status);
351 
352 	if (status)
353 		return;
354 
355 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
356 	if (!sent)
357 		return;
358 
359 	memcpy(hdev->dev_class, sent, 3);
360 }
361 
362 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
363 {
364 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
365 	__u16 setting;
366 
367 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
368 
369 	if (rp->status)
370 		return;
371 
372 	setting = __le16_to_cpu(rp->voice_setting);
373 
374 	if (hdev->voice_setting == setting)
375 		return;
376 
377 	hdev->voice_setting = setting;
378 
379 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
380 
381 	if (hdev->notify) {
382 		tasklet_disable(&hdev->tx_task);
383 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384 		tasklet_enable(&hdev->tx_task);
385 	}
386 }
387 
388 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
389 {
390 	__u8 status = *((__u8 *) skb->data);
391 	__u16 setting;
392 	void *sent;
393 
394 	BT_DBG("%s status 0x%x", hdev->name, status);
395 
396 	if (status)
397 		return;
398 
399 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
400 	if (!sent)
401 		return;
402 
403 	setting = get_unaligned_le16(sent);
404 
405 	if (hdev->voice_setting == setting)
406 		return;
407 
408 	hdev->voice_setting = setting;
409 
410 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
411 
412 	if (hdev->notify) {
413 		tasklet_disable(&hdev->tx_task);
414 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
415 		tasklet_enable(&hdev->tx_task);
416 	}
417 }
418 
419 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 	__u8 status = *((__u8 *) skb->data);
422 
423 	BT_DBG("%s status 0x%x", hdev->name, status);
424 
425 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
426 }
427 
428 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
429 {
430 	struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
431 
432 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
433 
434 	if (rp->status)
435 		return;
436 
437 	hdev->ssp_mode = rp->mode;
438 }
439 
440 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
441 {
442 	__u8 status = *((__u8 *) skb->data);
443 	void *sent;
444 
445 	BT_DBG("%s status 0x%x", hdev->name, status);
446 
447 	if (status)
448 		return;
449 
450 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 	if (!sent)
452 		return;
453 
454 	hdev->ssp_mode = *((__u8 *) sent);
455 }
456 
457 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
458 {
459 	if (hdev->features[6] & LMP_EXT_INQ)
460 		return 2;
461 
462 	if (hdev->features[3] & LMP_RSSI_INQ)
463 		return 1;
464 
465 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
466 						hdev->lmp_subver == 0x0757)
467 		return 1;
468 
469 	if (hdev->manufacturer == 15) {
470 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
471 			return 1;
472 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
473 			return 1;
474 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
475 			return 1;
476 	}
477 
478 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
479 						hdev->lmp_subver == 0x1805)
480 		return 1;
481 
482 	return 0;
483 }
484 
485 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
486 {
487 	u8 mode;
488 
489 	mode = hci_get_inquiry_mode(hdev);
490 
491 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
492 }
493 
494 static void hci_setup_event_mask(struct hci_dev *hdev)
495 {
496 	/* The second byte is 0xff instead of 0x9f (two reserved bits
497 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
498 	 * command otherwise */
499 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
500 
501 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
502 	 * any event mask for pre 1.2 devices */
503 	if (hdev->lmp_ver <= 1)
504 		return;
505 
506 	events[4] |= 0x01; /* Flow Specification Complete */
507 	events[4] |= 0x02; /* Inquiry Result with RSSI */
508 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
509 	events[5] |= 0x08; /* Synchronous Connection Complete */
510 	events[5] |= 0x10; /* Synchronous Connection Changed */
511 
512 	if (hdev->features[3] & LMP_RSSI_INQ)
513 		events[4] |= 0x04; /* Inquiry Result with RSSI */
514 
515 	if (hdev->features[5] & LMP_SNIFF_SUBR)
516 		events[5] |= 0x20; /* Sniff Subrating */
517 
518 	if (hdev->features[5] & LMP_PAUSE_ENC)
519 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
520 
521 	if (hdev->features[6] & LMP_EXT_INQ)
522 		events[5] |= 0x40; /* Extended Inquiry Result */
523 
524 	if (hdev->features[6] & LMP_NO_FLUSH)
525 		events[7] |= 0x01; /* Enhanced Flush Complete */
526 
527 	if (hdev->features[7] & LMP_LSTO)
528 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
529 
530 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
531 		events[6] |= 0x01;	/* IO Capability Request */
532 		events[6] |= 0x02;	/* IO Capability Response */
533 		events[6] |= 0x04;	/* User Confirmation Request */
534 		events[6] |= 0x08;	/* User Passkey Request */
535 		events[6] |= 0x10;	/* Remote OOB Data Request */
536 		events[6] |= 0x20;	/* Simple Pairing Complete */
537 		events[7] |= 0x04;	/* User Passkey Notification */
538 		events[7] |= 0x08;	/* Keypress Notification */
539 		events[7] |= 0x10;	/* Remote Host Supported
540 					 * Features Notification */
541 	}
542 
543 	if (hdev->features[4] & LMP_LE)
544 		events[7] |= 0x20;	/* LE Meta-Event */
545 
546 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
547 }
548 
549 static void hci_set_le_support(struct hci_dev *hdev)
550 {
551 	struct hci_cp_write_le_host_supported cp;
552 
553 	memset(&cp, 0, sizeof(cp));
554 
555 	if (enable_le) {
556 		cp.le = 1;
557 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
558 	}
559 
560 	hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
561 }
562 
563 static void hci_setup(struct hci_dev *hdev)
564 {
565 	hci_setup_event_mask(hdev);
566 
567 	if (hdev->lmp_ver > 1)
568 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
569 
570 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
571 		u8 mode = 0x01;
572 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
573 	}
574 
575 	if (hdev->features[3] & LMP_RSSI_INQ)
576 		hci_setup_inquiry_mode(hdev);
577 
578 	if (hdev->features[7] & LMP_INQ_TX_PWR)
579 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
580 
581 	if (hdev->features[7] & LMP_EXTFEATURES) {
582 		struct hci_cp_read_local_ext_features cp;
583 
584 		cp.page = 0x01;
585 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
586 							sizeof(cp), &cp);
587 	}
588 
589 	if (hdev->features[4] & LMP_LE)
590 		hci_set_le_support(hdev);
591 }
592 
593 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
594 {
595 	struct hci_rp_read_local_version *rp = (void *) skb->data;
596 
597 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
598 
599 	if (rp->status)
600 		return;
601 
602 	hdev->hci_ver = rp->hci_ver;
603 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
604 	hdev->lmp_ver = rp->lmp_ver;
605 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
606 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
607 
608 	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
609 					hdev->manufacturer,
610 					hdev->hci_ver, hdev->hci_rev);
611 
612 	if (test_bit(HCI_INIT, &hdev->flags))
613 		hci_setup(hdev);
614 }
615 
616 static void hci_setup_link_policy(struct hci_dev *hdev)
617 {
618 	u16 link_policy = 0;
619 
620 	if (hdev->features[0] & LMP_RSWITCH)
621 		link_policy |= HCI_LP_RSWITCH;
622 	if (hdev->features[0] & LMP_HOLD)
623 		link_policy |= HCI_LP_HOLD;
624 	if (hdev->features[0] & LMP_SNIFF)
625 		link_policy |= HCI_LP_SNIFF;
626 	if (hdev->features[1] & LMP_PARK)
627 		link_policy |= HCI_LP_PARK;
628 
629 	link_policy = cpu_to_le16(link_policy);
630 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
631 					sizeof(link_policy), &link_policy);
632 }
633 
634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
635 {
636 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
637 
638 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
639 
640 	if (rp->status)
641 		goto done;
642 
643 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
644 
645 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 		hci_setup_link_policy(hdev);
647 
648 done:
649 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
650 }
651 
652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
653 {
654 	struct hci_rp_read_local_features *rp = (void *) skb->data;
655 
656 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
657 
658 	if (rp->status)
659 		return;
660 
661 	memcpy(hdev->features, rp->features, 8);
662 
663 	/* Adjust default settings according to features
664 	 * supported by device. */
665 
666 	if (hdev->features[0] & LMP_3SLOT)
667 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
668 
669 	if (hdev->features[0] & LMP_5SLOT)
670 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
671 
672 	if (hdev->features[1] & LMP_HV2) {
673 		hdev->pkt_type  |= (HCI_HV2);
674 		hdev->esco_type |= (ESCO_HV2);
675 	}
676 
677 	if (hdev->features[1] & LMP_HV3) {
678 		hdev->pkt_type  |= (HCI_HV3);
679 		hdev->esco_type |= (ESCO_HV3);
680 	}
681 
682 	if (hdev->features[3] & LMP_ESCO)
683 		hdev->esco_type |= (ESCO_EV3);
684 
685 	if (hdev->features[4] & LMP_EV4)
686 		hdev->esco_type |= (ESCO_EV4);
687 
688 	if (hdev->features[4] & LMP_EV5)
689 		hdev->esco_type |= (ESCO_EV5);
690 
691 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 		hdev->esco_type |= (ESCO_2EV3);
693 
694 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 		hdev->esco_type |= (ESCO_3EV3);
696 
697 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
699 
700 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 					hdev->features[0], hdev->features[1],
702 					hdev->features[2], hdev->features[3],
703 					hdev->features[4], hdev->features[5],
704 					hdev->features[6], hdev->features[7]);
705 }
706 
707 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
708 							struct sk_buff *skb)
709 {
710 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
711 
712 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
713 
714 	if (rp->status)
715 		return;
716 
717 	memcpy(hdev->extfeatures, rp->features, 8);
718 
719 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
720 }
721 
722 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
723 						struct sk_buff *skb)
724 {
725 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
726 
727 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
728 
729 	if (rp->status)
730 		return;
731 
732 	hdev->flow_ctl_mode = rp->mode;
733 
734 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
735 }
736 
737 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
738 {
739 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
740 
741 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
742 
743 	if (rp->status)
744 		return;
745 
746 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
747 	hdev->sco_mtu  = rp->sco_mtu;
748 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
749 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
750 
751 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
752 		hdev->sco_mtu  = 64;
753 		hdev->sco_pkts = 8;
754 	}
755 
756 	hdev->acl_cnt = hdev->acl_pkts;
757 	hdev->sco_cnt = hdev->sco_pkts;
758 
759 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
760 					hdev->acl_mtu, hdev->acl_pkts,
761 					hdev->sco_mtu, hdev->sco_pkts);
762 }
763 
764 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
765 {
766 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
767 
768 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
769 
770 	if (!rp->status)
771 		bacpy(&hdev->bdaddr, &rp->bdaddr);
772 
773 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
774 }
775 
776 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
777 {
778 	__u8 status = *((__u8 *) skb->data);
779 
780 	BT_DBG("%s status 0x%x", hdev->name, status);
781 
782 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
783 }
784 
785 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
786 		struct sk_buff *skb)
787 {
788 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
789 
790 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
791 
792 	if (rp->status)
793 		return;
794 
795 	hdev->amp_status = rp->amp_status;
796 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
797 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
798 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
799 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
800 	hdev->amp_type = rp->amp_type;
801 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
802 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
803 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
804 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
805 
806 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
807 }
808 
809 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
810 							struct sk_buff *skb)
811 {
812 	__u8 status = *((__u8 *) skb->data);
813 
814 	BT_DBG("%s status 0x%x", hdev->name, status);
815 
816 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
817 }
818 
819 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
820 {
821 	__u8 status = *((__u8 *) skb->data);
822 
823 	BT_DBG("%s status 0x%x", hdev->name, status);
824 
825 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
826 }
827 
828 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
829 							struct sk_buff *skb)
830 {
831 	__u8 status = *((__u8 *) skb->data);
832 
833 	BT_DBG("%s status 0x%x", hdev->name, status);
834 
835 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
836 }
837 
838 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
839 							struct sk_buff *skb)
840 {
841 	__u8 status = *((__u8 *) skb->data);
842 
843 	BT_DBG("%s status 0x%x", hdev->name, status);
844 
845 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
846 }
847 
848 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
849 {
850 	__u8 status = *((__u8 *) skb->data);
851 
852 	BT_DBG("%s status 0x%x", hdev->name, status);
853 
854 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
855 }
856 
857 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
858 {
859 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
860 	struct hci_cp_pin_code_reply *cp;
861 	struct hci_conn *conn;
862 
863 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
864 
865 	hci_dev_lock(hdev);
866 
867 	if (test_bit(HCI_MGMT, &hdev->flags))
868 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
869 
870 	if (rp->status != 0)
871 		goto unlock;
872 
873 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
874 	if (!cp)
875 		goto unlock;
876 
877 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
878 	if (conn)
879 		conn->pin_length = cp->pin_len;
880 
881 unlock:
882 	hci_dev_unlock(hdev);
883 }
884 
885 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
886 {
887 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
888 
889 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
890 
891 	hci_dev_lock(hdev);
892 
893 	if (test_bit(HCI_MGMT, &hdev->flags))
894 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
895 								rp->status);
896 
897 	hci_dev_unlock(hdev);
898 }
899 
900 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
901 				       struct sk_buff *skb)
902 {
903 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
904 
905 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
906 
907 	if (rp->status)
908 		return;
909 
910 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
911 	hdev->le_pkts = rp->le_max_pkt;
912 
913 	hdev->le_cnt = hdev->le_pkts;
914 
915 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
916 
917 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
918 }
919 
920 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
921 {
922 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
923 
924 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
925 
926 	hci_dev_lock(hdev);
927 
928 	if (test_bit(HCI_MGMT, &hdev->flags))
929 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
930 								rp->status);
931 
932 	hci_dev_unlock(hdev);
933 }
934 
935 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
936 							struct sk_buff *skb)
937 {
938 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
939 
940 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
941 
942 	hci_dev_lock(hdev);
943 
944 	if (test_bit(HCI_MGMT, &hdev->flags))
945 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
946 								rp->status);
947 
948 	hci_dev_unlock(hdev);
949 }
950 
951 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
952 {
953 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
954 
955 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
956 
957 	hci_dev_lock(hdev);
958 
959 	if (test_bit(HCI_MGMT, &hdev->flags))
960 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
961 								rp->status);
962 
963 	hci_dev_unlock(hdev);
964 }
965 
966 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
967 							struct sk_buff *skb)
968 {
969 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
970 
971 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
972 
973 	hci_dev_lock(hdev);
974 
975 	if (test_bit(HCI_MGMT, &hdev->flags))
976 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
977 								rp->status);
978 
979 	hci_dev_unlock(hdev);
980 }
981 
982 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
983 							struct sk_buff *skb)
984 {
985 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
986 
987 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
988 
989 	hci_dev_lock(hdev);
990 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
991 						rp->randomizer, rp->status);
992 	hci_dev_unlock(hdev);
993 }
994 
995 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
996 					struct sk_buff *skb)
997 {
998 	struct hci_cp_le_set_scan_enable *cp;
999 	__u8 status = *((__u8 *) skb->data);
1000 
1001 	BT_DBG("%s status 0x%x", hdev->name, status);
1002 
1003 	if (status)
1004 		return;
1005 
1006 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1007 	if (!cp)
1008 		return;
1009 
1010 	if (cp->enable == 0x01) {
1011 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1012 
1013 		del_timer(&hdev->adv_timer);
1014 
1015 		hci_dev_lock(hdev);
1016 		hci_adv_entries_clear(hdev);
1017 		hci_dev_unlock(hdev);
1018 	} else if (cp->enable == 0x00) {
1019 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1020 
1021 		mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
1022 	}
1023 }
1024 
1025 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1026 {
1027 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1028 
1029 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1030 
1031 	if (rp->status)
1032 		return;
1033 
1034 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1035 }
1036 
1037 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1038 {
1039 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1040 
1041 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1042 
1043 	if (rp->status)
1044 		return;
1045 
1046 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1047 }
1048 
1049 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1050 							struct sk_buff *skb)
1051 {
1052 	struct hci_cp_read_local_ext_features cp;
1053 	__u8 status = *((__u8 *) skb->data);
1054 
1055 	BT_DBG("%s status 0x%x", hdev->name, status);
1056 
1057 	if (status)
1058 		return;
1059 
1060 	cp.page = 0x01;
1061 	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1062 }
1063 
1064 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1065 {
1066 	BT_DBG("%s status 0x%x", hdev->name, status);
1067 
1068 	if (status) {
1069 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1070 		hci_conn_check_pending(hdev);
1071 		hci_dev_lock(hdev);
1072 		if (test_bit(HCI_MGMT, &hdev->flags))
1073 			mgmt_start_discovery_failed(hdev, status);
1074 		hci_dev_unlock(hdev);
1075 		return;
1076 	}
1077 
1078 	set_bit(HCI_INQUIRY, &hdev->flags);
1079 
1080 	hci_dev_lock(hdev);
1081 	mgmt_discovering(hdev, 1);
1082 	hci_dev_unlock(hdev);
1083 }
1084 
1085 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1086 {
1087 	struct hci_cp_create_conn *cp;
1088 	struct hci_conn *conn;
1089 
1090 	BT_DBG("%s status 0x%x", hdev->name, status);
1091 
1092 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1093 	if (!cp)
1094 		return;
1095 
1096 	hci_dev_lock(hdev);
1097 
1098 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1099 
1100 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1101 
1102 	if (status) {
1103 		if (conn && conn->state == BT_CONNECT) {
1104 			if (status != 0x0c || conn->attempt > 2) {
1105 				conn->state = BT_CLOSED;
1106 				hci_proto_connect_cfm(conn, status);
1107 				hci_conn_del(conn);
1108 			} else
1109 				conn->state = BT_CONNECT2;
1110 		}
1111 	} else {
1112 		if (!conn) {
1113 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1114 			if (conn) {
1115 				conn->out = 1;
1116 				conn->link_mode |= HCI_LM_MASTER;
1117 			} else
1118 				BT_ERR("No memory for new connection");
1119 		}
1120 	}
1121 
1122 	hci_dev_unlock(hdev);
1123 }
1124 
1125 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1126 {
1127 	struct hci_cp_add_sco *cp;
1128 	struct hci_conn *acl, *sco;
1129 	__u16 handle;
1130 
1131 	BT_DBG("%s status 0x%x", hdev->name, status);
1132 
1133 	if (!status)
1134 		return;
1135 
1136 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1137 	if (!cp)
1138 		return;
1139 
1140 	handle = __le16_to_cpu(cp->handle);
1141 
1142 	BT_DBG("%s handle %d", hdev->name, handle);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1147 	if (acl) {
1148 		sco = acl->link;
1149 		if (sco) {
1150 			sco->state = BT_CLOSED;
1151 
1152 			hci_proto_connect_cfm(sco, status);
1153 			hci_conn_del(sco);
1154 		}
1155 	}
1156 
1157 	hci_dev_unlock(hdev);
1158 }
1159 
1160 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1161 {
1162 	struct hci_cp_auth_requested *cp;
1163 	struct hci_conn *conn;
1164 
1165 	BT_DBG("%s status 0x%x", hdev->name, status);
1166 
1167 	if (!status)
1168 		return;
1169 
1170 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1171 	if (!cp)
1172 		return;
1173 
1174 	hci_dev_lock(hdev);
1175 
1176 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1177 	if (conn) {
1178 		if (conn->state == BT_CONFIG) {
1179 			hci_proto_connect_cfm(conn, status);
1180 			hci_conn_put(conn);
1181 		}
1182 	}
1183 
1184 	hci_dev_unlock(hdev);
1185 }
1186 
1187 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1188 {
1189 	struct hci_cp_set_conn_encrypt *cp;
1190 	struct hci_conn *conn;
1191 
1192 	BT_DBG("%s status 0x%x", hdev->name, status);
1193 
1194 	if (!status)
1195 		return;
1196 
1197 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1198 	if (!cp)
1199 		return;
1200 
1201 	hci_dev_lock(hdev);
1202 
1203 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1204 	if (conn) {
1205 		if (conn->state == BT_CONFIG) {
1206 			hci_proto_connect_cfm(conn, status);
1207 			hci_conn_put(conn);
1208 		}
1209 	}
1210 
1211 	hci_dev_unlock(hdev);
1212 }
1213 
1214 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1215 							struct hci_conn *conn)
1216 {
1217 	if (conn->state != BT_CONFIG || !conn->out)
1218 		return 0;
1219 
1220 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1221 		return 0;
1222 
1223 	/* Only request authentication for SSP connections or non-SSP
1224 	 * devices with sec_level HIGH or if MITM protection is requested */
1225 	if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1226 				conn->pending_sec_level != BT_SECURITY_HIGH &&
1227 				!(conn->auth_type & 0x01))
1228 		return 0;
1229 
1230 	return 1;
1231 }
1232 
1233 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1234 {
1235 	struct hci_cp_remote_name_req *cp;
1236 	struct hci_conn *conn;
1237 
1238 	BT_DBG("%s status 0x%x", hdev->name, status);
1239 
1240 	/* If successful wait for the name req complete event before
1241 	 * checking for the need to do authentication */
1242 	if (!status)
1243 		return;
1244 
1245 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1246 	if (!cp)
1247 		return;
1248 
1249 	hci_dev_lock(hdev);
1250 
1251 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1252 	if (!conn)
1253 		goto unlock;
1254 
1255 	if (!hci_outgoing_auth_needed(hdev, conn))
1256 		goto unlock;
1257 
1258 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1259 		struct hci_cp_auth_requested cp;
1260 		cp.handle = __cpu_to_le16(conn->handle);
1261 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1262 	}
1263 
1264 unlock:
1265 	hci_dev_unlock(hdev);
1266 }
1267 
1268 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1269 {
1270 	struct hci_cp_read_remote_features *cp;
1271 	struct hci_conn *conn;
1272 
1273 	BT_DBG("%s status 0x%x", hdev->name, status);
1274 
1275 	if (!status)
1276 		return;
1277 
1278 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1279 	if (!cp)
1280 		return;
1281 
1282 	hci_dev_lock(hdev);
1283 
1284 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1285 	if (conn) {
1286 		if (conn->state == BT_CONFIG) {
1287 			hci_proto_connect_cfm(conn, status);
1288 			hci_conn_put(conn);
1289 		}
1290 	}
1291 
1292 	hci_dev_unlock(hdev);
1293 }
1294 
1295 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1296 {
1297 	struct hci_cp_read_remote_ext_features *cp;
1298 	struct hci_conn *conn;
1299 
1300 	BT_DBG("%s status 0x%x", hdev->name, status);
1301 
1302 	if (!status)
1303 		return;
1304 
1305 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1306 	if (!cp)
1307 		return;
1308 
1309 	hci_dev_lock(hdev);
1310 
1311 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1312 	if (conn) {
1313 		if (conn->state == BT_CONFIG) {
1314 			hci_proto_connect_cfm(conn, status);
1315 			hci_conn_put(conn);
1316 		}
1317 	}
1318 
1319 	hci_dev_unlock(hdev);
1320 }
1321 
1322 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1323 {
1324 	struct hci_cp_setup_sync_conn *cp;
1325 	struct hci_conn *acl, *sco;
1326 	__u16 handle;
1327 
1328 	BT_DBG("%s status 0x%x", hdev->name, status);
1329 
1330 	if (!status)
1331 		return;
1332 
1333 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1334 	if (!cp)
1335 		return;
1336 
1337 	handle = __le16_to_cpu(cp->handle);
1338 
1339 	BT_DBG("%s handle %d", hdev->name, handle);
1340 
1341 	hci_dev_lock(hdev);
1342 
1343 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1344 	if (acl) {
1345 		sco = acl->link;
1346 		if (sco) {
1347 			sco->state = BT_CLOSED;
1348 
1349 			hci_proto_connect_cfm(sco, status);
1350 			hci_conn_del(sco);
1351 		}
1352 	}
1353 
1354 	hci_dev_unlock(hdev);
1355 }
1356 
1357 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1358 {
1359 	struct hci_cp_sniff_mode *cp;
1360 	struct hci_conn *conn;
1361 
1362 	BT_DBG("%s status 0x%x", hdev->name, status);
1363 
1364 	if (!status)
1365 		return;
1366 
1367 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1368 	if (!cp)
1369 		return;
1370 
1371 	hci_dev_lock(hdev);
1372 
1373 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1374 	if (conn) {
1375 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1376 
1377 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1378 			hci_sco_setup(conn, status);
1379 	}
1380 
1381 	hci_dev_unlock(hdev);
1382 }
1383 
1384 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1385 {
1386 	struct hci_cp_exit_sniff_mode *cp;
1387 	struct hci_conn *conn;
1388 
1389 	BT_DBG("%s status 0x%x", hdev->name, status);
1390 
1391 	if (!status)
1392 		return;
1393 
1394 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1395 	if (!cp)
1396 		return;
1397 
1398 	hci_dev_lock(hdev);
1399 
1400 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1401 	if (conn) {
1402 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1403 
1404 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1405 			hci_sco_setup(conn, status);
1406 	}
1407 
1408 	hci_dev_unlock(hdev);
1409 }
1410 
1411 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1412 {
1413 	struct hci_cp_le_create_conn *cp;
1414 	struct hci_conn *conn;
1415 
1416 	BT_DBG("%s status 0x%x", hdev->name, status);
1417 
1418 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1419 	if (!cp)
1420 		return;
1421 
1422 	hci_dev_lock(hdev);
1423 
1424 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1425 
1426 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1427 		conn);
1428 
1429 	if (status) {
1430 		if (conn && conn->state == BT_CONNECT) {
1431 			conn->state = BT_CLOSED;
1432 			hci_proto_connect_cfm(conn, status);
1433 			hci_conn_del(conn);
1434 		}
1435 	} else {
1436 		if (!conn) {
1437 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1438 			if (conn) {
1439 				conn->dst_type = cp->peer_addr_type;
1440 				conn->out = 1;
1441 			} else {
1442 				BT_ERR("No memory for new connection");
1443 			}
1444 		}
1445 	}
1446 
1447 	hci_dev_unlock(hdev);
1448 }
1449 
1450 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1451 {
1452 	BT_DBG("%s status 0x%x", hdev->name, status);
1453 }
1454 
1455 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1456 {
1457 	__u8 status = *((__u8 *) skb->data);
1458 
1459 	BT_DBG("%s status %d", hdev->name, status);
1460 
1461 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1462 
1463 	hci_conn_check_pending(hdev);
1464 
1465 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1466 		return;
1467 
1468 	hci_dev_lock(hdev);
1469 	mgmt_discovering(hdev, 0);
1470 	hci_dev_unlock(hdev);
1471 }
1472 
1473 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1474 {
1475 	struct inquiry_data data;
1476 	struct inquiry_info *info = (void *) (skb->data + 1);
1477 	int num_rsp = *((__u8 *) skb->data);
1478 
1479 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1480 
1481 	if (!num_rsp)
1482 		return;
1483 
1484 	hci_dev_lock(hdev);
1485 
1486 	for (; num_rsp; num_rsp--, info++) {
1487 		bacpy(&data.bdaddr, &info->bdaddr);
1488 		data.pscan_rep_mode	= info->pscan_rep_mode;
1489 		data.pscan_period_mode	= info->pscan_period_mode;
1490 		data.pscan_mode		= info->pscan_mode;
1491 		memcpy(data.dev_class, info->dev_class, 3);
1492 		data.clock_offset	= info->clock_offset;
1493 		data.rssi		= 0x00;
1494 		data.ssp_mode		= 0x00;
1495 		hci_inquiry_cache_update(hdev, &data);
1496 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1497 						info->dev_class, 0, NULL);
1498 	}
1499 
1500 	hci_dev_unlock(hdev);
1501 }
1502 
1503 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1504 {
1505 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1506 	struct hci_conn *conn;
1507 
1508 	BT_DBG("%s", hdev->name);
1509 
1510 	hci_dev_lock(hdev);
1511 
1512 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1513 	if (!conn) {
1514 		if (ev->link_type != SCO_LINK)
1515 			goto unlock;
1516 
1517 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1518 		if (!conn)
1519 			goto unlock;
1520 
1521 		conn->type = SCO_LINK;
1522 	}
1523 
1524 	if (!ev->status) {
1525 		conn->handle = __le16_to_cpu(ev->handle);
1526 
1527 		if (conn->type == ACL_LINK) {
1528 			conn->state = BT_CONFIG;
1529 			hci_conn_hold(conn);
1530 			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1531 			mgmt_connected(hdev, &ev->bdaddr, conn->type,
1532 							conn->dst_type);
1533 		} else
1534 			conn->state = BT_CONNECTED;
1535 
1536 		hci_conn_hold_device(conn);
1537 		hci_conn_add_sysfs(conn);
1538 
1539 		if (test_bit(HCI_AUTH, &hdev->flags))
1540 			conn->link_mode |= HCI_LM_AUTH;
1541 
1542 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1543 			conn->link_mode |= HCI_LM_ENCRYPT;
1544 
1545 		/* Get remote features */
1546 		if (conn->type == ACL_LINK) {
1547 			struct hci_cp_read_remote_features cp;
1548 			cp.handle = ev->handle;
1549 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1550 							sizeof(cp), &cp);
1551 		}
1552 
1553 		/* Set packet type for incoming connection */
1554 		if (!conn->out && hdev->hci_ver < 3) {
1555 			struct hci_cp_change_conn_ptype cp;
1556 			cp.handle = ev->handle;
1557 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1558 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1559 							sizeof(cp), &cp);
1560 		}
1561 	} else {
1562 		conn->state = BT_CLOSED;
1563 		if (conn->type == ACL_LINK)
1564 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1565 						conn->dst_type, ev->status);
1566 	}
1567 
1568 	if (conn->type == ACL_LINK)
1569 		hci_sco_setup(conn, ev->status);
1570 
1571 	if (ev->status) {
1572 		hci_proto_connect_cfm(conn, ev->status);
1573 		hci_conn_del(conn);
1574 	} else if (ev->link_type != ACL_LINK)
1575 		hci_proto_connect_cfm(conn, ev->status);
1576 
1577 unlock:
1578 	hci_dev_unlock(hdev);
1579 
1580 	hci_conn_check_pending(hdev);
1581 }
1582 
1583 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1584 {
1585 	struct hci_ev_conn_request *ev = (void *) skb->data;
1586 	int mask = hdev->link_mode;
1587 
1588 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1589 					batostr(&ev->bdaddr), ev->link_type);
1590 
1591 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1592 
1593 	if ((mask & HCI_LM_ACCEPT) &&
1594 			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1595 		/* Connection accepted */
1596 		struct inquiry_entry *ie;
1597 		struct hci_conn *conn;
1598 
1599 		hci_dev_lock(hdev);
1600 
1601 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1602 		if (ie)
1603 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1604 
1605 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1606 		if (!conn) {
1607 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1608 			if (!conn) {
1609 				BT_ERR("No memory for new connection");
1610 				hci_dev_unlock(hdev);
1611 				return;
1612 			}
1613 		}
1614 
1615 		memcpy(conn->dev_class, ev->dev_class, 3);
1616 		conn->state = BT_CONNECT;
1617 
1618 		hci_dev_unlock(hdev);
1619 
1620 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1621 			struct hci_cp_accept_conn_req cp;
1622 
1623 			bacpy(&cp.bdaddr, &ev->bdaddr);
1624 
1625 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1626 				cp.role = 0x00; /* Become master */
1627 			else
1628 				cp.role = 0x01; /* Remain slave */
1629 
1630 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1631 							sizeof(cp), &cp);
1632 		} else {
1633 			struct hci_cp_accept_sync_conn_req cp;
1634 
1635 			bacpy(&cp.bdaddr, &ev->bdaddr);
1636 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1637 
1638 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1639 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1640 			cp.max_latency    = cpu_to_le16(0xffff);
1641 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1642 			cp.retrans_effort = 0xff;
1643 
1644 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1645 							sizeof(cp), &cp);
1646 		}
1647 	} else {
1648 		/* Connection rejected */
1649 		struct hci_cp_reject_conn_req cp;
1650 
1651 		bacpy(&cp.bdaddr, &ev->bdaddr);
1652 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1653 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1654 	}
1655 }
1656 
1657 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1658 {
1659 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1660 	struct hci_conn *conn;
1661 
1662 	BT_DBG("%s status %d", hdev->name, ev->status);
1663 
1664 	hci_dev_lock(hdev);
1665 
1666 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1667 	if (!conn)
1668 		goto unlock;
1669 
1670 	if (ev->status == 0)
1671 		conn->state = BT_CLOSED;
1672 
1673 	if (conn->type == ACL_LINK || conn->type == LE_LINK) {
1674 		if (ev->status != 0)
1675 			mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1676 		else
1677 			mgmt_disconnected(hdev, &conn->dst, conn->type,
1678 							conn->dst_type);
1679 	}
1680 
1681 	if (ev->status == 0) {
1682 		hci_proto_disconn_cfm(conn, ev->reason);
1683 		hci_conn_del(conn);
1684 	}
1685 
1686 unlock:
1687 	hci_dev_unlock(hdev);
1688 }
1689 
1690 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1691 {
1692 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1693 	struct hci_conn *conn;
1694 
1695 	BT_DBG("%s status %d", hdev->name, ev->status);
1696 
1697 	hci_dev_lock(hdev);
1698 
1699 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1700 	if (!conn)
1701 		goto unlock;
1702 
1703 	if (!ev->status) {
1704 		if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1705 				test_bit(HCI_CONN_REAUTH_PEND,	&conn->pend)) {
1706 			BT_INFO("re-auth of legacy device is not possible.");
1707 		} else {
1708 			conn->link_mode |= HCI_LM_AUTH;
1709 			conn->sec_level = conn->pending_sec_level;
1710 		}
1711 	} else {
1712 		mgmt_auth_failed(hdev, &conn->dst, ev->status);
1713 	}
1714 
1715 	clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1716 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1717 
1718 	if (conn->state == BT_CONFIG) {
1719 		if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1720 			struct hci_cp_set_conn_encrypt cp;
1721 			cp.handle  = ev->handle;
1722 			cp.encrypt = 0x01;
1723 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1724 									&cp);
1725 		} else {
1726 			conn->state = BT_CONNECTED;
1727 			hci_proto_connect_cfm(conn, ev->status);
1728 			hci_conn_put(conn);
1729 		}
1730 	} else {
1731 		hci_auth_cfm(conn, ev->status);
1732 
1733 		hci_conn_hold(conn);
1734 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1735 		hci_conn_put(conn);
1736 	}
1737 
1738 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1739 		if (!ev->status) {
1740 			struct hci_cp_set_conn_encrypt cp;
1741 			cp.handle  = ev->handle;
1742 			cp.encrypt = 0x01;
1743 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1744 									&cp);
1745 		} else {
1746 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1747 			hci_encrypt_cfm(conn, ev->status, 0x00);
1748 		}
1749 	}
1750 
1751 unlock:
1752 	hci_dev_unlock(hdev);
1753 }
1754 
1755 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1756 {
1757 	struct hci_ev_remote_name *ev = (void *) skb->data;
1758 	struct hci_conn *conn;
1759 
1760 	BT_DBG("%s", hdev->name);
1761 
1762 	hci_conn_check_pending(hdev);
1763 
1764 	hci_dev_lock(hdev);
1765 
1766 	if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1767 		mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1768 
1769 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1770 	if (!conn)
1771 		goto unlock;
1772 
1773 	if (!hci_outgoing_auth_needed(hdev, conn))
1774 		goto unlock;
1775 
1776 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1777 		struct hci_cp_auth_requested cp;
1778 		cp.handle = __cpu_to_le16(conn->handle);
1779 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1780 	}
1781 
1782 unlock:
1783 	hci_dev_unlock(hdev);
1784 }
1785 
1786 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1787 {
1788 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
1789 	struct hci_conn *conn;
1790 
1791 	BT_DBG("%s status %d", hdev->name, ev->status);
1792 
1793 	hci_dev_lock(hdev);
1794 
1795 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1796 	if (conn) {
1797 		if (!ev->status) {
1798 			if (ev->encrypt) {
1799 				/* Encryption implies authentication */
1800 				conn->link_mode |= HCI_LM_AUTH;
1801 				conn->link_mode |= HCI_LM_ENCRYPT;
1802 				conn->sec_level = conn->pending_sec_level;
1803 			} else
1804 				conn->link_mode &= ~HCI_LM_ENCRYPT;
1805 		}
1806 
1807 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1808 
1809 		if (conn->state == BT_CONFIG) {
1810 			if (!ev->status)
1811 				conn->state = BT_CONNECTED;
1812 
1813 			hci_proto_connect_cfm(conn, ev->status);
1814 			hci_conn_put(conn);
1815 		} else
1816 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1817 	}
1818 
1819 	hci_dev_unlock(hdev);
1820 }
1821 
1822 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1823 {
1824 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1825 	struct hci_conn *conn;
1826 
1827 	BT_DBG("%s status %d", hdev->name, ev->status);
1828 
1829 	hci_dev_lock(hdev);
1830 
1831 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1832 	if (conn) {
1833 		if (!ev->status)
1834 			conn->link_mode |= HCI_LM_SECURE;
1835 
1836 		clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1837 
1838 		hci_key_change_cfm(conn, ev->status);
1839 	}
1840 
1841 	hci_dev_unlock(hdev);
1842 }
1843 
1844 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1845 {
1846 	struct hci_ev_remote_features *ev = (void *) skb->data;
1847 	struct hci_conn *conn;
1848 
1849 	BT_DBG("%s status %d", hdev->name, ev->status);
1850 
1851 	hci_dev_lock(hdev);
1852 
1853 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1854 	if (!conn)
1855 		goto unlock;
1856 
1857 	if (!ev->status)
1858 		memcpy(conn->features, ev->features, 8);
1859 
1860 	if (conn->state != BT_CONFIG)
1861 		goto unlock;
1862 
1863 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1864 		struct hci_cp_read_remote_ext_features cp;
1865 		cp.handle = ev->handle;
1866 		cp.page = 0x01;
1867 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1868 							sizeof(cp), &cp);
1869 		goto unlock;
1870 	}
1871 
1872 	if (!ev->status) {
1873 		struct hci_cp_remote_name_req cp;
1874 		memset(&cp, 0, sizeof(cp));
1875 		bacpy(&cp.bdaddr, &conn->dst);
1876 		cp.pscan_rep_mode = 0x02;
1877 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1878 	}
1879 
1880 	if (!hci_outgoing_auth_needed(hdev, conn)) {
1881 		conn->state = BT_CONNECTED;
1882 		hci_proto_connect_cfm(conn, ev->status);
1883 		hci_conn_put(conn);
1884 	}
1885 
1886 unlock:
1887 	hci_dev_unlock(hdev);
1888 }
1889 
1890 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1891 {
1892 	BT_DBG("%s", hdev->name);
1893 }
1894 
1895 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1896 {
1897 	BT_DBG("%s", hdev->name);
1898 }
1899 
1900 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901 {
1902 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
1903 	__u16 opcode;
1904 
1905 	skb_pull(skb, sizeof(*ev));
1906 
1907 	opcode = __le16_to_cpu(ev->opcode);
1908 
1909 	switch (opcode) {
1910 	case HCI_OP_INQUIRY_CANCEL:
1911 		hci_cc_inquiry_cancel(hdev, skb);
1912 		break;
1913 
1914 	case HCI_OP_EXIT_PERIODIC_INQ:
1915 		hci_cc_exit_periodic_inq(hdev, skb);
1916 		break;
1917 
1918 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1919 		hci_cc_remote_name_req_cancel(hdev, skb);
1920 		break;
1921 
1922 	case HCI_OP_ROLE_DISCOVERY:
1923 		hci_cc_role_discovery(hdev, skb);
1924 		break;
1925 
1926 	case HCI_OP_READ_LINK_POLICY:
1927 		hci_cc_read_link_policy(hdev, skb);
1928 		break;
1929 
1930 	case HCI_OP_WRITE_LINK_POLICY:
1931 		hci_cc_write_link_policy(hdev, skb);
1932 		break;
1933 
1934 	case HCI_OP_READ_DEF_LINK_POLICY:
1935 		hci_cc_read_def_link_policy(hdev, skb);
1936 		break;
1937 
1938 	case HCI_OP_WRITE_DEF_LINK_POLICY:
1939 		hci_cc_write_def_link_policy(hdev, skb);
1940 		break;
1941 
1942 	case HCI_OP_RESET:
1943 		hci_cc_reset(hdev, skb);
1944 		break;
1945 
1946 	case HCI_OP_WRITE_LOCAL_NAME:
1947 		hci_cc_write_local_name(hdev, skb);
1948 		break;
1949 
1950 	case HCI_OP_READ_LOCAL_NAME:
1951 		hci_cc_read_local_name(hdev, skb);
1952 		break;
1953 
1954 	case HCI_OP_WRITE_AUTH_ENABLE:
1955 		hci_cc_write_auth_enable(hdev, skb);
1956 		break;
1957 
1958 	case HCI_OP_WRITE_ENCRYPT_MODE:
1959 		hci_cc_write_encrypt_mode(hdev, skb);
1960 		break;
1961 
1962 	case HCI_OP_WRITE_SCAN_ENABLE:
1963 		hci_cc_write_scan_enable(hdev, skb);
1964 		break;
1965 
1966 	case HCI_OP_READ_CLASS_OF_DEV:
1967 		hci_cc_read_class_of_dev(hdev, skb);
1968 		break;
1969 
1970 	case HCI_OP_WRITE_CLASS_OF_DEV:
1971 		hci_cc_write_class_of_dev(hdev, skb);
1972 		break;
1973 
1974 	case HCI_OP_READ_VOICE_SETTING:
1975 		hci_cc_read_voice_setting(hdev, skb);
1976 		break;
1977 
1978 	case HCI_OP_WRITE_VOICE_SETTING:
1979 		hci_cc_write_voice_setting(hdev, skb);
1980 		break;
1981 
1982 	case HCI_OP_HOST_BUFFER_SIZE:
1983 		hci_cc_host_buffer_size(hdev, skb);
1984 		break;
1985 
1986 	case HCI_OP_READ_SSP_MODE:
1987 		hci_cc_read_ssp_mode(hdev, skb);
1988 		break;
1989 
1990 	case HCI_OP_WRITE_SSP_MODE:
1991 		hci_cc_write_ssp_mode(hdev, skb);
1992 		break;
1993 
1994 	case HCI_OP_READ_LOCAL_VERSION:
1995 		hci_cc_read_local_version(hdev, skb);
1996 		break;
1997 
1998 	case HCI_OP_READ_LOCAL_COMMANDS:
1999 		hci_cc_read_local_commands(hdev, skb);
2000 		break;
2001 
2002 	case HCI_OP_READ_LOCAL_FEATURES:
2003 		hci_cc_read_local_features(hdev, skb);
2004 		break;
2005 
2006 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2007 		hci_cc_read_local_ext_features(hdev, skb);
2008 		break;
2009 
2010 	case HCI_OP_READ_BUFFER_SIZE:
2011 		hci_cc_read_buffer_size(hdev, skb);
2012 		break;
2013 
2014 	case HCI_OP_READ_BD_ADDR:
2015 		hci_cc_read_bd_addr(hdev, skb);
2016 		break;
2017 
2018 	case HCI_OP_WRITE_CA_TIMEOUT:
2019 		hci_cc_write_ca_timeout(hdev, skb);
2020 		break;
2021 
2022 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2023 		hci_cc_read_flow_control_mode(hdev, skb);
2024 		break;
2025 
2026 	case HCI_OP_READ_LOCAL_AMP_INFO:
2027 		hci_cc_read_local_amp_info(hdev, skb);
2028 		break;
2029 
2030 	case HCI_OP_DELETE_STORED_LINK_KEY:
2031 		hci_cc_delete_stored_link_key(hdev, skb);
2032 		break;
2033 
2034 	case HCI_OP_SET_EVENT_MASK:
2035 		hci_cc_set_event_mask(hdev, skb);
2036 		break;
2037 
2038 	case HCI_OP_WRITE_INQUIRY_MODE:
2039 		hci_cc_write_inquiry_mode(hdev, skb);
2040 		break;
2041 
2042 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2043 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2044 		break;
2045 
2046 	case HCI_OP_SET_EVENT_FLT:
2047 		hci_cc_set_event_flt(hdev, skb);
2048 		break;
2049 
2050 	case HCI_OP_PIN_CODE_REPLY:
2051 		hci_cc_pin_code_reply(hdev, skb);
2052 		break;
2053 
2054 	case HCI_OP_PIN_CODE_NEG_REPLY:
2055 		hci_cc_pin_code_neg_reply(hdev, skb);
2056 		break;
2057 
2058 	case HCI_OP_READ_LOCAL_OOB_DATA:
2059 		hci_cc_read_local_oob_data_reply(hdev, skb);
2060 		break;
2061 
2062 	case HCI_OP_LE_READ_BUFFER_SIZE:
2063 		hci_cc_le_read_buffer_size(hdev, skb);
2064 		break;
2065 
2066 	case HCI_OP_USER_CONFIRM_REPLY:
2067 		hci_cc_user_confirm_reply(hdev, skb);
2068 		break;
2069 
2070 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2071 		hci_cc_user_confirm_neg_reply(hdev, skb);
2072 		break;
2073 
2074 	case HCI_OP_USER_PASSKEY_REPLY:
2075 		hci_cc_user_passkey_reply(hdev, skb);
2076 		break;
2077 
2078 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2079 		hci_cc_user_passkey_neg_reply(hdev, skb);
2080 		break;
2081 
2082 	case HCI_OP_LE_SET_SCAN_ENABLE:
2083 		hci_cc_le_set_scan_enable(hdev, skb);
2084 		break;
2085 
2086 	case HCI_OP_LE_LTK_REPLY:
2087 		hci_cc_le_ltk_reply(hdev, skb);
2088 		break;
2089 
2090 	case HCI_OP_LE_LTK_NEG_REPLY:
2091 		hci_cc_le_ltk_neg_reply(hdev, skb);
2092 		break;
2093 
2094 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2095 		hci_cc_write_le_host_supported(hdev, skb);
2096 		break;
2097 
2098 	default:
2099 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2100 		break;
2101 	}
2102 
2103 	if (ev->opcode != HCI_OP_NOP)
2104 		del_timer(&hdev->cmd_timer);
2105 
2106 	if (ev->ncmd) {
2107 		atomic_set(&hdev->cmd_cnt, 1);
2108 		if (!skb_queue_empty(&hdev->cmd_q))
2109 			tasklet_schedule(&hdev->cmd_task);
2110 	}
2111 }
2112 
2113 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2114 {
2115 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2116 	__u16 opcode;
2117 
2118 	skb_pull(skb, sizeof(*ev));
2119 
2120 	opcode = __le16_to_cpu(ev->opcode);
2121 
2122 	switch (opcode) {
2123 	case HCI_OP_INQUIRY:
2124 		hci_cs_inquiry(hdev, ev->status);
2125 		break;
2126 
2127 	case HCI_OP_CREATE_CONN:
2128 		hci_cs_create_conn(hdev, ev->status);
2129 		break;
2130 
2131 	case HCI_OP_ADD_SCO:
2132 		hci_cs_add_sco(hdev, ev->status);
2133 		break;
2134 
2135 	case HCI_OP_AUTH_REQUESTED:
2136 		hci_cs_auth_requested(hdev, ev->status);
2137 		break;
2138 
2139 	case HCI_OP_SET_CONN_ENCRYPT:
2140 		hci_cs_set_conn_encrypt(hdev, ev->status);
2141 		break;
2142 
2143 	case HCI_OP_REMOTE_NAME_REQ:
2144 		hci_cs_remote_name_req(hdev, ev->status);
2145 		break;
2146 
2147 	case HCI_OP_READ_REMOTE_FEATURES:
2148 		hci_cs_read_remote_features(hdev, ev->status);
2149 		break;
2150 
2151 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2152 		hci_cs_read_remote_ext_features(hdev, ev->status);
2153 		break;
2154 
2155 	case HCI_OP_SETUP_SYNC_CONN:
2156 		hci_cs_setup_sync_conn(hdev, ev->status);
2157 		break;
2158 
2159 	case HCI_OP_SNIFF_MODE:
2160 		hci_cs_sniff_mode(hdev, ev->status);
2161 		break;
2162 
2163 	case HCI_OP_EXIT_SNIFF_MODE:
2164 		hci_cs_exit_sniff_mode(hdev, ev->status);
2165 		break;
2166 
2167 	case HCI_OP_DISCONNECT:
2168 		if (ev->status != 0)
2169 			mgmt_disconnect_failed(hdev, NULL, ev->status);
2170 		break;
2171 
2172 	case HCI_OP_LE_CREATE_CONN:
2173 		hci_cs_le_create_conn(hdev, ev->status);
2174 		break;
2175 
2176 	case HCI_OP_LE_START_ENC:
2177 		hci_cs_le_start_enc(hdev, ev->status);
2178 		break;
2179 
2180 	default:
2181 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2182 		break;
2183 	}
2184 
2185 	if (ev->opcode != HCI_OP_NOP)
2186 		del_timer(&hdev->cmd_timer);
2187 
2188 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2189 		atomic_set(&hdev->cmd_cnt, 1);
2190 		if (!skb_queue_empty(&hdev->cmd_q))
2191 			tasklet_schedule(&hdev->cmd_task);
2192 	}
2193 }
2194 
2195 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2196 {
2197 	struct hci_ev_role_change *ev = (void *) skb->data;
2198 	struct hci_conn *conn;
2199 
2200 	BT_DBG("%s status %d", hdev->name, ev->status);
2201 
2202 	hci_dev_lock(hdev);
2203 
2204 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2205 	if (conn) {
2206 		if (!ev->status) {
2207 			if (ev->role)
2208 				conn->link_mode &= ~HCI_LM_MASTER;
2209 			else
2210 				conn->link_mode |= HCI_LM_MASTER;
2211 		}
2212 
2213 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
2214 
2215 		hci_role_switch_cfm(conn, ev->status, ev->role);
2216 	}
2217 
2218 	hci_dev_unlock(hdev);
2219 }
2220 
2221 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2222 {
2223 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2224 	__le16 *ptr;
2225 	int i;
2226 
2227 	skb_pull(skb, sizeof(*ev));
2228 
2229 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2230 
2231 	if (skb->len < ev->num_hndl * 4) {
2232 		BT_DBG("%s bad parameters", hdev->name);
2233 		return;
2234 	}
2235 
2236 	tasklet_disable(&hdev->tx_task);
2237 
2238 	for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
2239 		struct hci_conn *conn;
2240 		__u16  handle, count;
2241 
2242 		handle = get_unaligned_le16(ptr++);
2243 		count  = get_unaligned_le16(ptr++);
2244 
2245 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2246 		if (conn) {
2247 			conn->sent -= count;
2248 
2249 			if (conn->type == ACL_LINK) {
2250 				hdev->acl_cnt += count;
2251 				if (hdev->acl_cnt > hdev->acl_pkts)
2252 					hdev->acl_cnt = hdev->acl_pkts;
2253 			} else if (conn->type == LE_LINK) {
2254 				if (hdev->le_pkts) {
2255 					hdev->le_cnt += count;
2256 					if (hdev->le_cnt > hdev->le_pkts)
2257 						hdev->le_cnt = hdev->le_pkts;
2258 				} else {
2259 					hdev->acl_cnt += count;
2260 					if (hdev->acl_cnt > hdev->acl_pkts)
2261 						hdev->acl_cnt = hdev->acl_pkts;
2262 				}
2263 			} else {
2264 				hdev->sco_cnt += count;
2265 				if (hdev->sco_cnt > hdev->sco_pkts)
2266 					hdev->sco_cnt = hdev->sco_pkts;
2267 			}
2268 		}
2269 	}
2270 
2271 	tasklet_schedule(&hdev->tx_task);
2272 
2273 	tasklet_enable(&hdev->tx_task);
2274 }
2275 
2276 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2277 {
2278 	struct hci_ev_mode_change *ev = (void *) skb->data;
2279 	struct hci_conn *conn;
2280 
2281 	BT_DBG("%s status %d", hdev->name, ev->status);
2282 
2283 	hci_dev_lock(hdev);
2284 
2285 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2286 	if (conn) {
2287 		conn->mode = ev->mode;
2288 		conn->interval = __le16_to_cpu(ev->interval);
2289 
2290 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2291 			if (conn->mode == HCI_CM_ACTIVE)
2292 				conn->power_save = 1;
2293 			else
2294 				conn->power_save = 0;
2295 		}
2296 
2297 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2298 			hci_sco_setup(conn, ev->status);
2299 	}
2300 
2301 	hci_dev_unlock(hdev);
2302 }
2303 
2304 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2305 {
2306 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2307 	struct hci_conn *conn;
2308 
2309 	BT_DBG("%s", hdev->name);
2310 
2311 	hci_dev_lock(hdev);
2312 
2313 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2314 	if (!conn)
2315 		goto unlock;
2316 
2317 	if (conn->state == BT_CONNECTED) {
2318 		hci_conn_hold(conn);
2319 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2320 		hci_conn_put(conn);
2321 	}
2322 
2323 	if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2324 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2325 					sizeof(ev->bdaddr), &ev->bdaddr);
2326 	else if (test_bit(HCI_MGMT, &hdev->flags)) {
2327 		u8 secure;
2328 
2329 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2330 			secure = 1;
2331 		else
2332 			secure = 0;
2333 
2334 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2335 	}
2336 
2337 unlock:
2338 	hci_dev_unlock(hdev);
2339 }
2340 
2341 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2342 {
2343 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2344 	struct hci_cp_link_key_reply cp;
2345 	struct hci_conn *conn;
2346 	struct link_key *key;
2347 
2348 	BT_DBG("%s", hdev->name);
2349 
2350 	if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2351 		return;
2352 
2353 	hci_dev_lock(hdev);
2354 
2355 	key = hci_find_link_key(hdev, &ev->bdaddr);
2356 	if (!key) {
2357 		BT_DBG("%s link key not found for %s", hdev->name,
2358 							batostr(&ev->bdaddr));
2359 		goto not_found;
2360 	}
2361 
2362 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2363 							batostr(&ev->bdaddr));
2364 
2365 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2366 				key->type == HCI_LK_DEBUG_COMBINATION) {
2367 		BT_DBG("%s ignoring debug key", hdev->name);
2368 		goto not_found;
2369 	}
2370 
2371 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2372 	if (conn) {
2373 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2374 				conn->auth_type != 0xff &&
2375 				(conn->auth_type & 0x01)) {
2376 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2377 			goto not_found;
2378 		}
2379 
2380 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2381 				conn->pending_sec_level == BT_SECURITY_HIGH) {
2382 			BT_DBG("%s ignoring key unauthenticated for high \
2383 							security", hdev->name);
2384 			goto not_found;
2385 		}
2386 
2387 		conn->key_type = key->type;
2388 		conn->pin_length = key->pin_len;
2389 	}
2390 
2391 	bacpy(&cp.bdaddr, &ev->bdaddr);
2392 	memcpy(cp.link_key, key->val, 16);
2393 
2394 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2395 
2396 	hci_dev_unlock(hdev);
2397 
2398 	return;
2399 
2400 not_found:
2401 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2402 	hci_dev_unlock(hdev);
2403 }
2404 
2405 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2406 {
2407 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2408 	struct hci_conn *conn;
2409 	u8 pin_len = 0;
2410 
2411 	BT_DBG("%s", hdev->name);
2412 
2413 	hci_dev_lock(hdev);
2414 
2415 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2416 	if (conn) {
2417 		hci_conn_hold(conn);
2418 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2419 		pin_len = conn->pin_length;
2420 
2421 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2422 			conn->key_type = ev->key_type;
2423 
2424 		hci_conn_put(conn);
2425 	}
2426 
2427 	if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2428 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2429 							ev->key_type, pin_len);
2430 
2431 	hci_dev_unlock(hdev);
2432 }
2433 
2434 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2435 {
2436 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2437 	struct hci_conn *conn;
2438 
2439 	BT_DBG("%s status %d", hdev->name, ev->status);
2440 
2441 	hci_dev_lock(hdev);
2442 
2443 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2444 	if (conn && !ev->status) {
2445 		struct inquiry_entry *ie;
2446 
2447 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2448 		if (ie) {
2449 			ie->data.clock_offset = ev->clock_offset;
2450 			ie->timestamp = jiffies;
2451 		}
2452 	}
2453 
2454 	hci_dev_unlock(hdev);
2455 }
2456 
2457 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2458 {
2459 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2460 	struct hci_conn *conn;
2461 
2462 	BT_DBG("%s status %d", hdev->name, ev->status);
2463 
2464 	hci_dev_lock(hdev);
2465 
2466 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2467 	if (conn && !ev->status)
2468 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2469 
2470 	hci_dev_unlock(hdev);
2471 }
2472 
2473 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2474 {
2475 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2476 	struct inquiry_entry *ie;
2477 
2478 	BT_DBG("%s", hdev->name);
2479 
2480 	hci_dev_lock(hdev);
2481 
2482 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2483 	if (ie) {
2484 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2485 		ie->timestamp = jiffies;
2486 	}
2487 
2488 	hci_dev_unlock(hdev);
2489 }
2490 
2491 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2492 {
2493 	struct inquiry_data data;
2494 	int num_rsp = *((__u8 *) skb->data);
2495 
2496 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2497 
2498 	if (!num_rsp)
2499 		return;
2500 
2501 	hci_dev_lock(hdev);
2502 
2503 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2504 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2505 		info = (void *) (skb->data + 1);
2506 
2507 		for (; num_rsp; num_rsp--, info++) {
2508 			bacpy(&data.bdaddr, &info->bdaddr);
2509 			data.pscan_rep_mode	= info->pscan_rep_mode;
2510 			data.pscan_period_mode	= info->pscan_period_mode;
2511 			data.pscan_mode		= info->pscan_mode;
2512 			memcpy(data.dev_class, info->dev_class, 3);
2513 			data.clock_offset	= info->clock_offset;
2514 			data.rssi		= info->rssi;
2515 			data.ssp_mode		= 0x00;
2516 			hci_inquiry_cache_update(hdev, &data);
2517 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2518 						info->dev_class, info->rssi,
2519 						NULL);
2520 		}
2521 	} else {
2522 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2523 
2524 		for (; num_rsp; num_rsp--, info++) {
2525 			bacpy(&data.bdaddr, &info->bdaddr);
2526 			data.pscan_rep_mode	= info->pscan_rep_mode;
2527 			data.pscan_period_mode	= info->pscan_period_mode;
2528 			data.pscan_mode		= 0x00;
2529 			memcpy(data.dev_class, info->dev_class, 3);
2530 			data.clock_offset	= info->clock_offset;
2531 			data.rssi		= info->rssi;
2532 			data.ssp_mode		= 0x00;
2533 			hci_inquiry_cache_update(hdev, &data);
2534 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2535 						info->dev_class, info->rssi,
2536 						NULL);
2537 		}
2538 	}
2539 
2540 	hci_dev_unlock(hdev);
2541 }
2542 
2543 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2544 {
2545 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2546 	struct hci_conn *conn;
2547 
2548 	BT_DBG("%s", hdev->name);
2549 
2550 	hci_dev_lock(hdev);
2551 
2552 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2553 	if (!conn)
2554 		goto unlock;
2555 
2556 	if (!ev->status && ev->page == 0x01) {
2557 		struct inquiry_entry *ie;
2558 
2559 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2560 		if (ie)
2561 			ie->data.ssp_mode = (ev->features[0] & 0x01);
2562 
2563 		conn->ssp_mode = (ev->features[0] & 0x01);
2564 	}
2565 
2566 	if (conn->state != BT_CONFIG)
2567 		goto unlock;
2568 
2569 	if (!ev->status) {
2570 		struct hci_cp_remote_name_req cp;
2571 		memset(&cp, 0, sizeof(cp));
2572 		bacpy(&cp.bdaddr, &conn->dst);
2573 		cp.pscan_rep_mode = 0x02;
2574 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2575 	}
2576 
2577 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2578 		conn->state = BT_CONNECTED;
2579 		hci_proto_connect_cfm(conn, ev->status);
2580 		hci_conn_put(conn);
2581 	}
2582 
2583 unlock:
2584 	hci_dev_unlock(hdev);
2585 }
2586 
2587 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588 {
2589 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2590 	struct hci_conn *conn;
2591 
2592 	BT_DBG("%s status %d", hdev->name, ev->status);
2593 
2594 	hci_dev_lock(hdev);
2595 
2596 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2597 	if (!conn) {
2598 		if (ev->link_type == ESCO_LINK)
2599 			goto unlock;
2600 
2601 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2602 		if (!conn)
2603 			goto unlock;
2604 
2605 		conn->type = SCO_LINK;
2606 	}
2607 
2608 	switch (ev->status) {
2609 	case 0x00:
2610 		conn->handle = __le16_to_cpu(ev->handle);
2611 		conn->state  = BT_CONNECTED;
2612 
2613 		hci_conn_hold_device(conn);
2614 		hci_conn_add_sysfs(conn);
2615 		break;
2616 
2617 	case 0x11:	/* Unsupported Feature or Parameter Value */
2618 	case 0x1c:	/* SCO interval rejected */
2619 	case 0x1a:	/* Unsupported Remote Feature */
2620 	case 0x1f:	/* Unspecified error */
2621 		if (conn->out && conn->attempt < 2) {
2622 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2623 					(hdev->esco_type & EDR_ESCO_MASK);
2624 			hci_setup_sync(conn, conn->link->handle);
2625 			goto unlock;
2626 		}
2627 		/* fall through */
2628 
2629 	default:
2630 		conn->state = BT_CLOSED;
2631 		break;
2632 	}
2633 
2634 	hci_proto_connect_cfm(conn, ev->status);
2635 	if (ev->status)
2636 		hci_conn_del(conn);
2637 
2638 unlock:
2639 	hci_dev_unlock(hdev);
2640 }
2641 
2642 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2643 {
2644 	BT_DBG("%s", hdev->name);
2645 }
2646 
2647 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2648 {
2649 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2650 
2651 	BT_DBG("%s status %d", hdev->name, ev->status);
2652 }
2653 
2654 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2655 {
2656 	struct inquiry_data data;
2657 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2658 	int num_rsp = *((__u8 *) skb->data);
2659 
2660 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2661 
2662 	if (!num_rsp)
2663 		return;
2664 
2665 	hci_dev_lock(hdev);
2666 
2667 	for (; num_rsp; num_rsp--, info++) {
2668 		bacpy(&data.bdaddr, &info->bdaddr);
2669 		data.pscan_rep_mode	= info->pscan_rep_mode;
2670 		data.pscan_period_mode	= info->pscan_period_mode;
2671 		data.pscan_mode		= 0x00;
2672 		memcpy(data.dev_class, info->dev_class, 3);
2673 		data.clock_offset	= info->clock_offset;
2674 		data.rssi		= info->rssi;
2675 		data.ssp_mode		= 0x01;
2676 		hci_inquiry_cache_update(hdev, &data);
2677 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2678 				info->dev_class, info->rssi, info->data);
2679 	}
2680 
2681 	hci_dev_unlock(hdev);
2682 }
2683 
2684 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2685 {
2686 	/* If remote requests dedicated bonding follow that lead */
2687 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2688 		/* If both remote and local IO capabilities allow MITM
2689 		 * protection then require it, otherwise don't */
2690 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2691 			return 0x02;
2692 		else
2693 			return 0x03;
2694 	}
2695 
2696 	/* If remote requests no-bonding follow that lead */
2697 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2698 		return conn->remote_auth | (conn->auth_type & 0x01);
2699 
2700 	return conn->auth_type;
2701 }
2702 
2703 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2704 {
2705 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
2706 	struct hci_conn *conn;
2707 
2708 	BT_DBG("%s", hdev->name);
2709 
2710 	hci_dev_lock(hdev);
2711 
2712 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2713 	if (!conn)
2714 		goto unlock;
2715 
2716 	hci_conn_hold(conn);
2717 
2718 	if (!test_bit(HCI_MGMT, &hdev->flags))
2719 		goto unlock;
2720 
2721 	if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2722 			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2723 		struct hci_cp_io_capability_reply cp;
2724 
2725 		bacpy(&cp.bdaddr, &ev->bdaddr);
2726 		cp.capability = conn->io_capability;
2727 		conn->auth_type = hci_get_auth_req(conn);
2728 		cp.authentication = conn->auth_type;
2729 
2730 		if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2731 				hci_find_remote_oob_data(hdev, &conn->dst))
2732 			cp.oob_data = 0x01;
2733 		else
2734 			cp.oob_data = 0x00;
2735 
2736 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2737 							sizeof(cp), &cp);
2738 	} else {
2739 		struct hci_cp_io_capability_neg_reply cp;
2740 
2741 		bacpy(&cp.bdaddr, &ev->bdaddr);
2742 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2743 
2744 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2745 							sizeof(cp), &cp);
2746 	}
2747 
2748 unlock:
2749 	hci_dev_unlock(hdev);
2750 }
2751 
2752 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2753 {
2754 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2755 	struct hci_conn *conn;
2756 
2757 	BT_DBG("%s", hdev->name);
2758 
2759 	hci_dev_lock(hdev);
2760 
2761 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2762 	if (!conn)
2763 		goto unlock;
2764 
2765 	conn->remote_cap = ev->capability;
2766 	conn->remote_oob = ev->oob_data;
2767 	conn->remote_auth = ev->authentication;
2768 
2769 unlock:
2770 	hci_dev_unlock(hdev);
2771 }
2772 
2773 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2774 							struct sk_buff *skb)
2775 {
2776 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2777 	int loc_mitm, rem_mitm, confirm_hint = 0;
2778 	struct hci_conn *conn;
2779 
2780 	BT_DBG("%s", hdev->name);
2781 
2782 	hci_dev_lock(hdev);
2783 
2784 	if (!test_bit(HCI_MGMT, &hdev->flags))
2785 		goto unlock;
2786 
2787 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2788 	if (!conn)
2789 		goto unlock;
2790 
2791 	loc_mitm = (conn->auth_type & 0x01);
2792 	rem_mitm = (conn->remote_auth & 0x01);
2793 
2794 	/* If we require MITM but the remote device can't provide that
2795 	 * (it has NoInputNoOutput) then reject the confirmation
2796 	 * request. The only exception is when we're dedicated bonding
2797 	 * initiators (connect_cfm_cb set) since then we always have the MITM
2798 	 * bit set. */
2799 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2800 		BT_DBG("Rejecting request: remote device can't provide MITM");
2801 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2802 					sizeof(ev->bdaddr), &ev->bdaddr);
2803 		goto unlock;
2804 	}
2805 
2806 	/* If no side requires MITM protection; auto-accept */
2807 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
2808 				(!rem_mitm || conn->io_capability == 0x03)) {
2809 
2810 		/* If we're not the initiators request authorization to
2811 		 * proceed from user space (mgmt_user_confirm with
2812 		 * confirm_hint set to 1). */
2813 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2814 			BT_DBG("Confirming auto-accept as acceptor");
2815 			confirm_hint = 1;
2816 			goto confirm;
2817 		}
2818 
2819 		BT_DBG("Auto-accept of user confirmation with %ums delay",
2820 						hdev->auto_accept_delay);
2821 
2822 		if (hdev->auto_accept_delay > 0) {
2823 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2824 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
2825 			goto unlock;
2826 		}
2827 
2828 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2829 						sizeof(ev->bdaddr), &ev->bdaddr);
2830 		goto unlock;
2831 	}
2832 
2833 confirm:
2834 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
2835 								confirm_hint);
2836 
2837 unlock:
2838 	hci_dev_unlock(hdev);
2839 }
2840 
2841 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
2842 							struct sk_buff *skb)
2843 {
2844 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
2845 
2846 	BT_DBG("%s", hdev->name);
2847 
2848 	hci_dev_lock(hdev);
2849 
2850 	if (test_bit(HCI_MGMT, &hdev->flags))
2851 		mgmt_user_passkey_request(hdev, &ev->bdaddr);
2852 
2853 	hci_dev_unlock(hdev);
2854 }
2855 
2856 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2857 {
2858 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2859 	struct hci_conn *conn;
2860 
2861 	BT_DBG("%s", hdev->name);
2862 
2863 	hci_dev_lock(hdev);
2864 
2865 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2866 	if (!conn)
2867 		goto unlock;
2868 
2869 	/* To avoid duplicate auth_failed events to user space we check
2870 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
2871 	 * initiated the authentication. A traditional auth_complete
2872 	 * event gets always produced as initiator and is also mapped to
2873 	 * the mgmt_auth_failed event */
2874 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2875 		mgmt_auth_failed(hdev, &conn->dst, ev->status);
2876 
2877 	hci_conn_put(conn);
2878 
2879 unlock:
2880 	hci_dev_unlock(hdev);
2881 }
2882 
2883 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2884 {
2885 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
2886 	struct inquiry_entry *ie;
2887 
2888 	BT_DBG("%s", hdev->name);
2889 
2890 	hci_dev_lock(hdev);
2891 
2892 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2893 	if (ie)
2894 		ie->data.ssp_mode = (ev->features[0] & 0x01);
2895 
2896 	hci_dev_unlock(hdev);
2897 }
2898 
2899 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2900 							struct sk_buff *skb)
2901 {
2902 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2903 	struct oob_data *data;
2904 
2905 	BT_DBG("%s", hdev->name);
2906 
2907 	hci_dev_lock(hdev);
2908 
2909 	if (!test_bit(HCI_MGMT, &hdev->flags))
2910 		goto unlock;
2911 
2912 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2913 	if (data) {
2914 		struct hci_cp_remote_oob_data_reply cp;
2915 
2916 		bacpy(&cp.bdaddr, &ev->bdaddr);
2917 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
2918 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2919 
2920 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2921 									&cp);
2922 	} else {
2923 		struct hci_cp_remote_oob_data_neg_reply cp;
2924 
2925 		bacpy(&cp.bdaddr, &ev->bdaddr);
2926 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2927 									&cp);
2928 	}
2929 
2930 unlock:
2931 	hci_dev_unlock(hdev);
2932 }
2933 
2934 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2935 {
2936 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2937 	struct hci_conn *conn;
2938 
2939 	BT_DBG("%s status %d", hdev->name, ev->status);
2940 
2941 	hci_dev_lock(hdev);
2942 
2943 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
2944 	if (!conn) {
2945 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
2946 		if (!conn) {
2947 			BT_ERR("No memory for new connection");
2948 			hci_dev_unlock(hdev);
2949 			return;
2950 		}
2951 
2952 		conn->dst_type = ev->bdaddr_type;
2953 	}
2954 
2955 	if (ev->status) {
2956 		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
2957 						conn->dst_type, ev->status);
2958 		hci_proto_connect_cfm(conn, ev->status);
2959 		conn->state = BT_CLOSED;
2960 		hci_conn_del(conn);
2961 		goto unlock;
2962 	}
2963 
2964 	mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
2965 
2966 	conn->sec_level = BT_SECURITY_LOW;
2967 	conn->handle = __le16_to_cpu(ev->handle);
2968 	conn->state = BT_CONNECTED;
2969 
2970 	hci_conn_hold_device(conn);
2971 	hci_conn_add_sysfs(conn);
2972 
2973 	hci_proto_connect_cfm(conn, ev->status);
2974 
2975 unlock:
2976 	hci_dev_unlock(hdev);
2977 }
2978 
2979 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2980 						struct sk_buff *skb)
2981 {
2982 	u8 num_reports = skb->data[0];
2983 	void *ptr = &skb->data[1];
2984 
2985 	hci_dev_lock(hdev);
2986 
2987 	while (num_reports--) {
2988 		struct hci_ev_le_advertising_info *ev = ptr;
2989 
2990 		hci_add_adv_entry(hdev, ev);
2991 
2992 		ptr += sizeof(*ev) + ev->length + 1;
2993 	}
2994 
2995 	hci_dev_unlock(hdev);
2996 }
2997 
2998 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
2999 						struct sk_buff *skb)
3000 {
3001 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3002 	struct hci_cp_le_ltk_reply cp;
3003 	struct hci_cp_le_ltk_neg_reply neg;
3004 	struct hci_conn *conn;
3005 	struct link_key *ltk;
3006 
3007 	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3008 
3009 	hci_dev_lock(hdev);
3010 
3011 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3012 	if (conn == NULL)
3013 		goto not_found;
3014 
3015 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3016 	if (ltk == NULL)
3017 		goto not_found;
3018 
3019 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3020 	cp.handle = cpu_to_le16(conn->handle);
3021 	conn->pin_length = ltk->pin_len;
3022 
3023 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3024 
3025 	hci_dev_unlock(hdev);
3026 
3027 	return;
3028 
3029 not_found:
3030 	neg.handle = ev->handle;
3031 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3032 	hci_dev_unlock(hdev);
3033 }
3034 
3035 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3036 {
3037 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3038 
3039 	skb_pull(skb, sizeof(*le_ev));
3040 
3041 	switch (le_ev->subevent) {
3042 	case HCI_EV_LE_CONN_COMPLETE:
3043 		hci_le_conn_complete_evt(hdev, skb);
3044 		break;
3045 
3046 	case HCI_EV_LE_ADVERTISING_REPORT:
3047 		hci_le_adv_report_evt(hdev, skb);
3048 		break;
3049 
3050 	case HCI_EV_LE_LTK_REQ:
3051 		hci_le_ltk_request_evt(hdev, skb);
3052 		break;
3053 
3054 	default:
3055 		break;
3056 	}
3057 }
3058 
3059 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3060 {
3061 	struct hci_event_hdr *hdr = (void *) skb->data;
3062 	__u8 event = hdr->evt;
3063 
3064 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3065 
3066 	switch (event) {
3067 	case HCI_EV_INQUIRY_COMPLETE:
3068 		hci_inquiry_complete_evt(hdev, skb);
3069 		break;
3070 
3071 	case HCI_EV_INQUIRY_RESULT:
3072 		hci_inquiry_result_evt(hdev, skb);
3073 		break;
3074 
3075 	case HCI_EV_CONN_COMPLETE:
3076 		hci_conn_complete_evt(hdev, skb);
3077 		break;
3078 
3079 	case HCI_EV_CONN_REQUEST:
3080 		hci_conn_request_evt(hdev, skb);
3081 		break;
3082 
3083 	case HCI_EV_DISCONN_COMPLETE:
3084 		hci_disconn_complete_evt(hdev, skb);
3085 		break;
3086 
3087 	case HCI_EV_AUTH_COMPLETE:
3088 		hci_auth_complete_evt(hdev, skb);
3089 		break;
3090 
3091 	case HCI_EV_REMOTE_NAME:
3092 		hci_remote_name_evt(hdev, skb);
3093 		break;
3094 
3095 	case HCI_EV_ENCRYPT_CHANGE:
3096 		hci_encrypt_change_evt(hdev, skb);
3097 		break;
3098 
3099 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3100 		hci_change_link_key_complete_evt(hdev, skb);
3101 		break;
3102 
3103 	case HCI_EV_REMOTE_FEATURES:
3104 		hci_remote_features_evt(hdev, skb);
3105 		break;
3106 
3107 	case HCI_EV_REMOTE_VERSION:
3108 		hci_remote_version_evt(hdev, skb);
3109 		break;
3110 
3111 	case HCI_EV_QOS_SETUP_COMPLETE:
3112 		hci_qos_setup_complete_evt(hdev, skb);
3113 		break;
3114 
3115 	case HCI_EV_CMD_COMPLETE:
3116 		hci_cmd_complete_evt(hdev, skb);
3117 		break;
3118 
3119 	case HCI_EV_CMD_STATUS:
3120 		hci_cmd_status_evt(hdev, skb);
3121 		break;
3122 
3123 	case HCI_EV_ROLE_CHANGE:
3124 		hci_role_change_evt(hdev, skb);
3125 		break;
3126 
3127 	case HCI_EV_NUM_COMP_PKTS:
3128 		hci_num_comp_pkts_evt(hdev, skb);
3129 		break;
3130 
3131 	case HCI_EV_MODE_CHANGE:
3132 		hci_mode_change_evt(hdev, skb);
3133 		break;
3134 
3135 	case HCI_EV_PIN_CODE_REQ:
3136 		hci_pin_code_request_evt(hdev, skb);
3137 		break;
3138 
3139 	case HCI_EV_LINK_KEY_REQ:
3140 		hci_link_key_request_evt(hdev, skb);
3141 		break;
3142 
3143 	case HCI_EV_LINK_KEY_NOTIFY:
3144 		hci_link_key_notify_evt(hdev, skb);
3145 		break;
3146 
3147 	case HCI_EV_CLOCK_OFFSET:
3148 		hci_clock_offset_evt(hdev, skb);
3149 		break;
3150 
3151 	case HCI_EV_PKT_TYPE_CHANGE:
3152 		hci_pkt_type_change_evt(hdev, skb);
3153 		break;
3154 
3155 	case HCI_EV_PSCAN_REP_MODE:
3156 		hci_pscan_rep_mode_evt(hdev, skb);
3157 		break;
3158 
3159 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3160 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3161 		break;
3162 
3163 	case HCI_EV_REMOTE_EXT_FEATURES:
3164 		hci_remote_ext_features_evt(hdev, skb);
3165 		break;
3166 
3167 	case HCI_EV_SYNC_CONN_COMPLETE:
3168 		hci_sync_conn_complete_evt(hdev, skb);
3169 		break;
3170 
3171 	case HCI_EV_SYNC_CONN_CHANGED:
3172 		hci_sync_conn_changed_evt(hdev, skb);
3173 		break;
3174 
3175 	case HCI_EV_SNIFF_SUBRATE:
3176 		hci_sniff_subrate_evt(hdev, skb);
3177 		break;
3178 
3179 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3180 		hci_extended_inquiry_result_evt(hdev, skb);
3181 		break;
3182 
3183 	case HCI_EV_IO_CAPA_REQUEST:
3184 		hci_io_capa_request_evt(hdev, skb);
3185 		break;
3186 
3187 	case HCI_EV_IO_CAPA_REPLY:
3188 		hci_io_capa_reply_evt(hdev, skb);
3189 		break;
3190 
3191 	case HCI_EV_USER_CONFIRM_REQUEST:
3192 		hci_user_confirm_request_evt(hdev, skb);
3193 		break;
3194 
3195 	case HCI_EV_USER_PASSKEY_REQUEST:
3196 		hci_user_passkey_request_evt(hdev, skb);
3197 		break;
3198 
3199 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3200 		hci_simple_pair_complete_evt(hdev, skb);
3201 		break;
3202 
3203 	case HCI_EV_REMOTE_HOST_FEATURES:
3204 		hci_remote_host_features_evt(hdev, skb);
3205 		break;
3206 
3207 	case HCI_EV_LE_META:
3208 		hci_le_meta_evt(hdev, skb);
3209 		break;
3210 
3211 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3212 		hci_remote_oob_data_request_evt(hdev, skb);
3213 		break;
3214 
3215 	default:
3216 		BT_DBG("%s event 0x%x", hdev->name, event);
3217 		break;
3218 	}
3219 
3220 	kfree_skb(skb);
3221 	hdev->stat.evt_rx++;
3222 }
3223 
3224 /* Generate internal stack event */
3225 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3226 {
3227 	struct hci_event_hdr *hdr;
3228 	struct hci_ev_stack_internal *ev;
3229 	struct sk_buff *skb;
3230 
3231 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3232 	if (!skb)
3233 		return;
3234 
3235 	hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3236 	hdr->evt  = HCI_EV_STACK_INTERNAL;
3237 	hdr->plen = sizeof(*ev) + dlen;
3238 
3239 	ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
3240 	ev->type = type;
3241 	memcpy(ev->data, data, dlen);
3242 
3243 	bt_cb(skb)->incoming = 1;
3244 	__net_timestamp(skb);
3245 
3246 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3247 	skb->dev = (void *) hdev;
3248 	hci_send_to_sock(hdev, skb, NULL);
3249 	kfree_skb(skb);
3250 }
3251 
3252 module_param(enable_le, bool, 0644);
3253 MODULE_PARM_DESC(enable_le, "Enable LE support");
3254