xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 1ab142d4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39 
40 #include <asm/system.h>
41 #include <linux/uaccess.h>
42 #include <asm/unaligned.h>
43 
44 #include <net/bluetooth/bluetooth.h>
45 #include <net/bluetooth/hci_core.h>
46 
47 /* Handle HCI Event packets */
48 
49 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
50 {
51 	__u8 status = *((__u8 *) skb->data);
52 
53 	BT_DBG("%s status 0x%x", hdev->name, status);
54 
55 	if (status) {
56 		hci_dev_lock(hdev);
57 		mgmt_stop_discovery_failed(hdev, status);
58 		hci_dev_unlock(hdev);
59 		return;
60 	}
61 
62 	clear_bit(HCI_INQUIRY, &hdev->flags);
63 
64 	hci_dev_lock(hdev);
65 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 	hci_dev_unlock(hdev);
67 
68 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
69 
70 	hci_conn_check_pending(hdev);
71 }
72 
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 	__u8 status = *((__u8 *) skb->data);
76 
77 	BT_DBG("%s status 0x%x", hdev->name, status);
78 
79 	if (status)
80 		return;
81 
82 	hci_conn_check_pending(hdev);
83 }
84 
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 	BT_DBG("%s", hdev->name);
88 }
89 
90 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91 {
92 	struct hci_rp_role_discovery *rp = (void *) skb->data;
93 	struct hci_conn *conn;
94 
95 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
96 
97 	if (rp->status)
98 		return;
99 
100 	hci_dev_lock(hdev);
101 
102 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 	if (conn) {
104 		if (rp->role)
105 			conn->link_mode &= ~HCI_LM_MASTER;
106 		else
107 			conn->link_mode |= HCI_LM_MASTER;
108 	}
109 
110 	hci_dev_unlock(hdev);
111 }
112 
113 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 {
115 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
116 	struct hci_conn *conn;
117 
118 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
119 
120 	if (rp->status)
121 		return;
122 
123 	hci_dev_lock(hdev);
124 
125 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 	if (conn)
127 		conn->link_policy = __le16_to_cpu(rp->policy);
128 
129 	hci_dev_unlock(hdev);
130 }
131 
132 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 {
134 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
135 	struct hci_conn *conn;
136 	void *sent;
137 
138 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
139 
140 	if (rp->status)
141 		return;
142 
143 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
144 	if (!sent)
145 		return;
146 
147 	hci_dev_lock(hdev);
148 
149 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 	if (conn)
151 		conn->link_policy = get_unaligned_le16(sent + 2);
152 
153 	hci_dev_unlock(hdev);
154 }
155 
156 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
159 
160 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
161 
162 	if (rp->status)
163 		return;
164 
165 	hdev->link_policy = __le16_to_cpu(rp->policy);
166 }
167 
168 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
169 {
170 	__u8 status = *((__u8 *) skb->data);
171 	void *sent;
172 
173 	BT_DBG("%s status 0x%x", hdev->name, status);
174 
175 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
176 	if (!sent)
177 		return;
178 
179 	if (!status)
180 		hdev->link_policy = get_unaligned_le16(sent);
181 
182 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
183 }
184 
185 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 {
187 	__u8 status = *((__u8 *) skb->data);
188 
189 	BT_DBG("%s status 0x%x", hdev->name, status);
190 
191 	clear_bit(HCI_RESET, &hdev->flags);
192 
193 	hci_req_complete(hdev, HCI_OP_RESET, status);
194 
195 	/* Reset all non-persistent flags */
196 	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
197 
198 	hdev->discovery.state = DISCOVERY_STOPPED;
199 }
200 
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 	__u8 status = *((__u8 *) skb->data);
204 	void *sent;
205 
206 	BT_DBG("%s status 0x%x", hdev->name, status);
207 
208 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 	if (!sent)
210 		return;
211 
212 	hci_dev_lock(hdev);
213 
214 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
215 		mgmt_set_local_name_complete(hdev, sent, status);
216 	else if (!status)
217 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
218 
219 	hci_dev_unlock(hdev);
220 
221 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
222 }
223 
224 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
225 {
226 	struct hci_rp_read_local_name *rp = (void *) skb->data;
227 
228 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
229 
230 	if (rp->status)
231 		return;
232 
233 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
234 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
235 }
236 
237 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238 {
239 	__u8 status = *((__u8 *) skb->data);
240 	void *sent;
241 
242 	BT_DBG("%s status 0x%x", hdev->name, status);
243 
244 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245 	if (!sent)
246 		return;
247 
248 	if (!status) {
249 		__u8 param = *((__u8 *) sent);
250 
251 		if (param == AUTH_ENABLED)
252 			set_bit(HCI_AUTH, &hdev->flags);
253 		else
254 			clear_bit(HCI_AUTH, &hdev->flags);
255 	}
256 
257 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
258 		mgmt_auth_enable_complete(hdev, status);
259 
260 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
261 }
262 
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 	__u8 status = *((__u8 *) skb->data);
266 	void *sent;
267 
268 	BT_DBG("%s status 0x%x", hdev->name, status);
269 
270 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 	if (!sent)
272 		return;
273 
274 	if (!status) {
275 		__u8 param = *((__u8 *) sent);
276 
277 		if (param)
278 			set_bit(HCI_ENCRYPT, &hdev->flags);
279 		else
280 			clear_bit(HCI_ENCRYPT, &hdev->flags);
281 	}
282 
283 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
284 }
285 
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 	__u8 param, status = *((__u8 *) skb->data);
289 	int old_pscan, old_iscan;
290 	void *sent;
291 
292 	BT_DBG("%s status 0x%x", hdev->name, status);
293 
294 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 	if (!sent)
296 		return;
297 
298 	param = *((__u8 *) sent);
299 
300 	hci_dev_lock(hdev);
301 
302 	if (status != 0) {
303 		mgmt_write_scan_failed(hdev, param, status);
304 		hdev->discov_timeout = 0;
305 		goto done;
306 	}
307 
308 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310 
311 	if (param & SCAN_INQUIRY) {
312 		set_bit(HCI_ISCAN, &hdev->flags);
313 		if (!old_iscan)
314 			mgmt_discoverable(hdev, 1);
315 		if (hdev->discov_timeout > 0) {
316 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 									to);
319 		}
320 	} else if (old_iscan)
321 		mgmt_discoverable(hdev, 0);
322 
323 	if (param & SCAN_PAGE) {
324 		set_bit(HCI_PSCAN, &hdev->flags);
325 		if (!old_pscan)
326 			mgmt_connectable(hdev, 1);
327 	} else if (old_pscan)
328 		mgmt_connectable(hdev, 0);
329 
330 done:
331 	hci_dev_unlock(hdev);
332 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
333 }
334 
335 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 {
337 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338 
339 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
340 
341 	if (rp->status)
342 		return;
343 
344 	memcpy(hdev->dev_class, rp->dev_class, 3);
345 
346 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
347 		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
348 }
349 
350 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 {
352 	__u8 status = *((__u8 *) skb->data);
353 	void *sent;
354 
355 	BT_DBG("%s status 0x%x", hdev->name, status);
356 
357 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
358 	if (!sent)
359 		return;
360 
361 	hci_dev_lock(hdev);
362 
363 	if (status == 0)
364 		memcpy(hdev->dev_class, sent, 3);
365 
366 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
367 		mgmt_set_class_of_dev_complete(hdev, sent, status);
368 
369 	hci_dev_unlock(hdev);
370 }
371 
372 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 {
374 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
375 	__u16 setting;
376 
377 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
378 
379 	if (rp->status)
380 		return;
381 
382 	setting = __le16_to_cpu(rp->voice_setting);
383 
384 	if (hdev->voice_setting == setting)
385 		return;
386 
387 	hdev->voice_setting = setting;
388 
389 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
390 
391 	if (hdev->notify)
392 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
393 }
394 
395 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
396 {
397 	__u8 status = *((__u8 *) skb->data);
398 	__u16 setting;
399 	void *sent;
400 
401 	BT_DBG("%s status 0x%x", hdev->name, status);
402 
403 	if (status)
404 		return;
405 
406 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 	if (!sent)
408 		return;
409 
410 	setting = get_unaligned_le16(sent);
411 
412 	if (hdev->voice_setting == setting)
413 		return;
414 
415 	hdev->voice_setting = setting;
416 
417 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
418 
419 	if (hdev->notify)
420 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422 
423 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 	__u8 status = *((__u8 *) skb->data);
426 
427 	BT_DBG("%s status 0x%x", hdev->name, status);
428 
429 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
430 }
431 
432 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
433 {
434 	__u8 status = *((__u8 *) skb->data);
435 	void *sent;
436 
437 	BT_DBG("%s status 0x%x", hdev->name, status);
438 
439 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
440 	if (!sent)
441 		return;
442 
443 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
444 		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
445 	else if (!status) {
446 		if (*((u8 *) sent))
447 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 		else
449 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
450 	}
451 }
452 
453 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
454 {
455 	if (hdev->features[6] & LMP_EXT_INQ)
456 		return 2;
457 
458 	if (hdev->features[3] & LMP_RSSI_INQ)
459 		return 1;
460 
461 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
462 						hdev->lmp_subver == 0x0757)
463 		return 1;
464 
465 	if (hdev->manufacturer == 15) {
466 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
467 			return 1;
468 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
469 			return 1;
470 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
471 			return 1;
472 	}
473 
474 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
475 						hdev->lmp_subver == 0x1805)
476 		return 1;
477 
478 	return 0;
479 }
480 
481 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
482 {
483 	u8 mode;
484 
485 	mode = hci_get_inquiry_mode(hdev);
486 
487 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
488 }
489 
490 static void hci_setup_event_mask(struct hci_dev *hdev)
491 {
492 	/* The second byte is 0xff instead of 0x9f (two reserved bits
493 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
494 	 * command otherwise */
495 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
496 
497 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
498 	 * any event mask for pre 1.2 devices */
499 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
500 		return;
501 
502 	events[4] |= 0x01; /* Flow Specification Complete */
503 	events[4] |= 0x02; /* Inquiry Result with RSSI */
504 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
505 	events[5] |= 0x08; /* Synchronous Connection Complete */
506 	events[5] |= 0x10; /* Synchronous Connection Changed */
507 
508 	if (hdev->features[3] & LMP_RSSI_INQ)
509 		events[4] |= 0x04; /* Inquiry Result with RSSI */
510 
511 	if (hdev->features[5] & LMP_SNIFF_SUBR)
512 		events[5] |= 0x20; /* Sniff Subrating */
513 
514 	if (hdev->features[5] & LMP_PAUSE_ENC)
515 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
516 
517 	if (hdev->features[6] & LMP_EXT_INQ)
518 		events[5] |= 0x40; /* Extended Inquiry Result */
519 
520 	if (hdev->features[6] & LMP_NO_FLUSH)
521 		events[7] |= 0x01; /* Enhanced Flush Complete */
522 
523 	if (hdev->features[7] & LMP_LSTO)
524 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
525 
526 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
527 		events[6] |= 0x01;	/* IO Capability Request */
528 		events[6] |= 0x02;	/* IO Capability Response */
529 		events[6] |= 0x04;	/* User Confirmation Request */
530 		events[6] |= 0x08;	/* User Passkey Request */
531 		events[6] |= 0x10;	/* Remote OOB Data Request */
532 		events[6] |= 0x20;	/* Simple Pairing Complete */
533 		events[7] |= 0x04;	/* User Passkey Notification */
534 		events[7] |= 0x08;	/* Keypress Notification */
535 		events[7] |= 0x10;	/* Remote Host Supported
536 					 * Features Notification */
537 	}
538 
539 	if (hdev->features[4] & LMP_LE)
540 		events[7] |= 0x20;	/* LE Meta-Event */
541 
542 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
543 }
544 
545 static void hci_setup(struct hci_dev *hdev)
546 {
547 	if (hdev->dev_type != HCI_BREDR)
548 		return;
549 
550 	hci_setup_event_mask(hdev);
551 
552 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
553 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
554 
555 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
556 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
557 			u8 mode = 0x01;
558 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
559 				     sizeof(mode), &mode);
560 		} else {
561 			struct hci_cp_write_eir cp;
562 
563 			memset(hdev->eir, 0, sizeof(hdev->eir));
564 			memset(&cp, 0, sizeof(cp));
565 
566 			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
567 		}
568 	}
569 
570 	if (hdev->features[3] & LMP_RSSI_INQ)
571 		hci_setup_inquiry_mode(hdev);
572 
573 	if (hdev->features[7] & LMP_INQ_TX_PWR)
574 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
575 
576 	if (hdev->features[7] & LMP_EXTFEATURES) {
577 		struct hci_cp_read_local_ext_features cp;
578 
579 		cp.page = 0x01;
580 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
581 			     &cp);
582 	}
583 
584 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
585 		u8 enable = 1;
586 		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
587 			     &enable);
588 	}
589 }
590 
591 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
592 {
593 	struct hci_rp_read_local_version *rp = (void *) skb->data;
594 
595 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
596 
597 	if (rp->status)
598 		goto done;
599 
600 	hdev->hci_ver = rp->hci_ver;
601 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 	hdev->lmp_ver = rp->lmp_ver;
603 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
605 
606 	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
607 					hdev->manufacturer,
608 					hdev->hci_ver, hdev->hci_rev);
609 
610 	if (test_bit(HCI_INIT, &hdev->flags))
611 		hci_setup(hdev);
612 
613 done:
614 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
615 }
616 
617 static void hci_setup_link_policy(struct hci_dev *hdev)
618 {
619 	u16 link_policy = 0;
620 
621 	if (hdev->features[0] & LMP_RSWITCH)
622 		link_policy |= HCI_LP_RSWITCH;
623 	if (hdev->features[0] & LMP_HOLD)
624 		link_policy |= HCI_LP_HOLD;
625 	if (hdev->features[0] & LMP_SNIFF)
626 		link_policy |= HCI_LP_SNIFF;
627 	if (hdev->features[1] & LMP_PARK)
628 		link_policy |= HCI_LP_PARK;
629 
630 	link_policy = cpu_to_le16(link_policy);
631 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
632 		     &link_policy);
633 }
634 
635 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
636 {
637 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
638 
639 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
640 
641 	if (rp->status)
642 		goto done;
643 
644 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
645 
646 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
647 		hci_setup_link_policy(hdev);
648 
649 done:
650 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
651 }
652 
653 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
654 {
655 	struct hci_rp_read_local_features *rp = (void *) skb->data;
656 
657 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
658 
659 	if (rp->status)
660 		return;
661 
662 	memcpy(hdev->features, rp->features, 8);
663 
664 	/* Adjust default settings according to features
665 	 * supported by device. */
666 
667 	if (hdev->features[0] & LMP_3SLOT)
668 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
669 
670 	if (hdev->features[0] & LMP_5SLOT)
671 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
672 
673 	if (hdev->features[1] & LMP_HV2) {
674 		hdev->pkt_type  |= (HCI_HV2);
675 		hdev->esco_type |= (ESCO_HV2);
676 	}
677 
678 	if (hdev->features[1] & LMP_HV3) {
679 		hdev->pkt_type  |= (HCI_HV3);
680 		hdev->esco_type |= (ESCO_HV3);
681 	}
682 
683 	if (hdev->features[3] & LMP_ESCO)
684 		hdev->esco_type |= (ESCO_EV3);
685 
686 	if (hdev->features[4] & LMP_EV4)
687 		hdev->esco_type |= (ESCO_EV4);
688 
689 	if (hdev->features[4] & LMP_EV5)
690 		hdev->esco_type |= (ESCO_EV5);
691 
692 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
693 		hdev->esco_type |= (ESCO_2EV3);
694 
695 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
696 		hdev->esco_type |= (ESCO_3EV3);
697 
698 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
699 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
700 
701 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
702 					hdev->features[0], hdev->features[1],
703 					hdev->features[2], hdev->features[3],
704 					hdev->features[4], hdev->features[5],
705 					hdev->features[6], hdev->features[7]);
706 }
707 
708 static void hci_set_le_support(struct hci_dev *hdev)
709 {
710 	struct hci_cp_write_le_host_supported cp;
711 
712 	memset(&cp, 0, sizeof(cp));
713 
714 	if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
715 		cp.le = 1;
716 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
717 	}
718 
719 	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
720 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
721 			     &cp);
722 }
723 
724 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
725 							struct sk_buff *skb)
726 {
727 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
728 
729 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
730 
731 	if (rp->status)
732 		goto done;
733 
734 	switch (rp->page) {
735 	case 0:
736 		memcpy(hdev->features, rp->features, 8);
737 		break;
738 	case 1:
739 		memcpy(hdev->host_features, rp->features, 8);
740 		break;
741 	}
742 
743 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
744 		hci_set_le_support(hdev);
745 
746 done:
747 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
748 }
749 
750 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
751 						struct sk_buff *skb)
752 {
753 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
754 
755 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
756 
757 	if (rp->status)
758 		return;
759 
760 	hdev->flow_ctl_mode = rp->mode;
761 
762 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
763 }
764 
765 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
766 {
767 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
768 
769 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
770 
771 	if (rp->status)
772 		return;
773 
774 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
775 	hdev->sco_mtu  = rp->sco_mtu;
776 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
777 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
778 
779 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
780 		hdev->sco_mtu  = 64;
781 		hdev->sco_pkts = 8;
782 	}
783 
784 	hdev->acl_cnt = hdev->acl_pkts;
785 	hdev->sco_cnt = hdev->sco_pkts;
786 
787 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
788 					hdev->acl_mtu, hdev->acl_pkts,
789 					hdev->sco_mtu, hdev->sco_pkts);
790 }
791 
792 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
793 {
794 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
795 
796 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
797 
798 	if (!rp->status)
799 		bacpy(&hdev->bdaddr, &rp->bdaddr);
800 
801 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
802 }
803 
804 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
805 							struct sk_buff *skb)
806 {
807 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
808 
809 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
810 
811 	if (rp->status)
812 		return;
813 
814 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
815 	hdev->block_len = __le16_to_cpu(rp->block_len);
816 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
817 
818 	hdev->block_cnt = hdev->num_blocks;
819 
820 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
821 					hdev->block_cnt, hdev->block_len);
822 
823 	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
824 }
825 
826 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
827 {
828 	__u8 status = *((__u8 *) skb->data);
829 
830 	BT_DBG("%s status 0x%x", hdev->name, status);
831 
832 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
833 }
834 
835 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
836 		struct sk_buff *skb)
837 {
838 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
839 
840 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
841 
842 	if (rp->status)
843 		return;
844 
845 	hdev->amp_status = rp->amp_status;
846 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
847 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
848 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
849 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
850 	hdev->amp_type = rp->amp_type;
851 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
852 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
853 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
854 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
855 
856 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
857 }
858 
859 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
860 							struct sk_buff *skb)
861 {
862 	__u8 status = *((__u8 *) skb->data);
863 
864 	BT_DBG("%s status 0x%x", hdev->name, status);
865 
866 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
867 }
868 
869 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
870 {
871 	__u8 status = *((__u8 *) skb->data);
872 
873 	BT_DBG("%s status 0x%x", hdev->name, status);
874 
875 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
876 }
877 
878 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
879 							struct sk_buff *skb)
880 {
881 	__u8 status = *((__u8 *) skb->data);
882 
883 	BT_DBG("%s status 0x%x", hdev->name, status);
884 
885 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
886 }
887 
888 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
889 							struct sk_buff *skb)
890 {
891 	__u8 status = *((__u8 *) skb->data);
892 
893 	BT_DBG("%s status 0x%x", hdev->name, status);
894 
895 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
896 }
897 
898 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
899 {
900 	__u8 status = *((__u8 *) skb->data);
901 
902 	BT_DBG("%s status 0x%x", hdev->name, status);
903 
904 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
905 }
906 
907 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
908 {
909 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
910 	struct hci_cp_pin_code_reply *cp;
911 	struct hci_conn *conn;
912 
913 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
914 
915 	hci_dev_lock(hdev);
916 
917 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
918 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
919 
920 	if (rp->status != 0)
921 		goto unlock;
922 
923 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
924 	if (!cp)
925 		goto unlock;
926 
927 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
928 	if (conn)
929 		conn->pin_length = cp->pin_len;
930 
931 unlock:
932 	hci_dev_unlock(hdev);
933 }
934 
935 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
936 {
937 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
938 
939 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
940 
941 	hci_dev_lock(hdev);
942 
943 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
944 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
945 								rp->status);
946 
947 	hci_dev_unlock(hdev);
948 }
949 
950 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
951 				       struct sk_buff *skb)
952 {
953 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
954 
955 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
956 
957 	if (rp->status)
958 		return;
959 
960 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
961 	hdev->le_pkts = rp->le_max_pkt;
962 
963 	hdev->le_cnt = hdev->le_pkts;
964 
965 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
966 
967 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
968 }
969 
970 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
971 {
972 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973 
974 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
975 
976 	hci_dev_lock(hdev);
977 
978 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
979 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
980 						 rp->status);
981 
982 	hci_dev_unlock(hdev);
983 }
984 
985 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
986 							struct sk_buff *skb)
987 {
988 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
989 
990 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
991 
992 	hci_dev_lock(hdev);
993 
994 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
995 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
996 						     ACL_LINK, 0, rp->status);
997 
998 	hci_dev_unlock(hdev);
999 }
1000 
1001 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1002 {
1003 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1004 
1005 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1006 
1007 	hci_dev_lock(hdev);
1008 
1009 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1010 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1011 						 0, rp->status);
1012 
1013 	hci_dev_unlock(hdev);
1014 }
1015 
1016 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1017 							struct sk_buff *skb)
1018 {
1019 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1020 
1021 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1022 
1023 	hci_dev_lock(hdev);
1024 
1025 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1026 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1027 						     ACL_LINK, 0, rp->status);
1028 
1029 	hci_dev_unlock(hdev);
1030 }
1031 
1032 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1033 							struct sk_buff *skb)
1034 {
1035 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1036 
1037 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1038 
1039 	hci_dev_lock(hdev);
1040 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1041 						rp->randomizer, rp->status);
1042 	hci_dev_unlock(hdev);
1043 }
1044 
1045 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1046 {
1047 	__u8 status = *((__u8 *) skb->data);
1048 
1049 	BT_DBG("%s status 0x%x", hdev->name, status);
1050 
1051 	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1052 
1053 	if (status) {
1054 		hci_dev_lock(hdev);
1055 		mgmt_start_discovery_failed(hdev, status);
1056 		hci_dev_unlock(hdev);
1057 		return;
1058 	}
1059 }
1060 
1061 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1062 					struct sk_buff *skb)
1063 {
1064 	struct hci_cp_le_set_scan_enable *cp;
1065 	__u8 status = *((__u8 *) skb->data);
1066 
1067 	BT_DBG("%s status 0x%x", hdev->name, status);
1068 
1069 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1070 	if (!cp)
1071 		return;
1072 
1073 	switch (cp->enable) {
1074 	case LE_SCANNING_ENABLED:
1075 		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1076 
1077 		if (status) {
1078 			hci_dev_lock(hdev);
1079 			mgmt_start_discovery_failed(hdev, status);
1080 			hci_dev_unlock(hdev);
1081 			return;
1082 		}
1083 
1084 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1085 
1086 		cancel_delayed_work_sync(&hdev->adv_work);
1087 
1088 		hci_dev_lock(hdev);
1089 		hci_adv_entries_clear(hdev);
1090 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1091 		hci_dev_unlock(hdev);
1092 		break;
1093 
1094 	case LE_SCANNING_DISABLED:
1095 		if (status)
1096 			return;
1097 
1098 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1099 
1100 		schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1101 
1102 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1103 			mgmt_interleaved_discovery(hdev);
1104 		} else {
1105 			hci_dev_lock(hdev);
1106 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1107 			hci_dev_unlock(hdev);
1108 		}
1109 
1110 		break;
1111 
1112 	default:
1113 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1114 		break;
1115 	}
1116 }
1117 
1118 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1119 {
1120 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1121 
1122 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1123 
1124 	if (rp->status)
1125 		return;
1126 
1127 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1128 }
1129 
1130 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1131 {
1132 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1133 
1134 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1135 
1136 	if (rp->status)
1137 		return;
1138 
1139 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1140 }
1141 
1142 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1143 							struct sk_buff *skb)
1144 {
1145 	struct hci_cp_write_le_host_supported *sent;
1146 	__u8 status = *((__u8 *) skb->data);
1147 
1148 	BT_DBG("%s status 0x%x", hdev->name, status);
1149 
1150 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1151 	if (!sent)
1152 		return;
1153 
1154 	if (!status) {
1155 		if (sent->le)
1156 			hdev->host_features[0] |= LMP_HOST_LE;
1157 		else
1158 			hdev->host_features[0] &= ~LMP_HOST_LE;
1159 	}
1160 
1161 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1162 					!test_bit(HCI_INIT, &hdev->flags))
1163 		mgmt_le_enable_complete(hdev, sent->le, status);
1164 
1165 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1166 }
1167 
1168 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1169 {
1170 	BT_DBG("%s status 0x%x", hdev->name, status);
1171 
1172 	if (status) {
1173 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1174 		hci_conn_check_pending(hdev);
1175 		hci_dev_lock(hdev);
1176 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1177 			mgmt_start_discovery_failed(hdev, status);
1178 		hci_dev_unlock(hdev);
1179 		return;
1180 	}
1181 
1182 	set_bit(HCI_INQUIRY, &hdev->flags);
1183 
1184 	hci_dev_lock(hdev);
1185 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1186 	hci_dev_unlock(hdev);
1187 }
1188 
1189 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1190 {
1191 	struct hci_cp_create_conn *cp;
1192 	struct hci_conn *conn;
1193 
1194 	BT_DBG("%s status 0x%x", hdev->name, status);
1195 
1196 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1197 	if (!cp)
1198 		return;
1199 
1200 	hci_dev_lock(hdev);
1201 
1202 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1203 
1204 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1205 
1206 	if (status) {
1207 		if (conn && conn->state == BT_CONNECT) {
1208 			if (status != 0x0c || conn->attempt > 2) {
1209 				conn->state = BT_CLOSED;
1210 				hci_proto_connect_cfm(conn, status);
1211 				hci_conn_del(conn);
1212 			} else
1213 				conn->state = BT_CONNECT2;
1214 		}
1215 	} else {
1216 		if (!conn) {
1217 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1218 			if (conn) {
1219 				conn->out = true;
1220 				conn->link_mode |= HCI_LM_MASTER;
1221 			} else
1222 				BT_ERR("No memory for new connection");
1223 		}
1224 	}
1225 
1226 	hci_dev_unlock(hdev);
1227 }
1228 
1229 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1230 {
1231 	struct hci_cp_add_sco *cp;
1232 	struct hci_conn *acl, *sco;
1233 	__u16 handle;
1234 
1235 	BT_DBG("%s status 0x%x", hdev->name, status);
1236 
1237 	if (!status)
1238 		return;
1239 
1240 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1241 	if (!cp)
1242 		return;
1243 
1244 	handle = __le16_to_cpu(cp->handle);
1245 
1246 	BT_DBG("%s handle %d", hdev->name, handle);
1247 
1248 	hci_dev_lock(hdev);
1249 
1250 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1251 	if (acl) {
1252 		sco = acl->link;
1253 		if (sco) {
1254 			sco->state = BT_CLOSED;
1255 
1256 			hci_proto_connect_cfm(sco, status);
1257 			hci_conn_del(sco);
1258 		}
1259 	}
1260 
1261 	hci_dev_unlock(hdev);
1262 }
1263 
1264 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1265 {
1266 	struct hci_cp_auth_requested *cp;
1267 	struct hci_conn *conn;
1268 
1269 	BT_DBG("%s status 0x%x", hdev->name, status);
1270 
1271 	if (!status)
1272 		return;
1273 
1274 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1275 	if (!cp)
1276 		return;
1277 
1278 	hci_dev_lock(hdev);
1279 
1280 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1281 	if (conn) {
1282 		if (conn->state == BT_CONFIG) {
1283 			hci_proto_connect_cfm(conn, status);
1284 			hci_conn_put(conn);
1285 		}
1286 	}
1287 
1288 	hci_dev_unlock(hdev);
1289 }
1290 
1291 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1292 {
1293 	struct hci_cp_set_conn_encrypt *cp;
1294 	struct hci_conn *conn;
1295 
1296 	BT_DBG("%s status 0x%x", hdev->name, status);
1297 
1298 	if (!status)
1299 		return;
1300 
1301 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1302 	if (!cp)
1303 		return;
1304 
1305 	hci_dev_lock(hdev);
1306 
1307 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1308 	if (conn) {
1309 		if (conn->state == BT_CONFIG) {
1310 			hci_proto_connect_cfm(conn, status);
1311 			hci_conn_put(conn);
1312 		}
1313 	}
1314 
1315 	hci_dev_unlock(hdev);
1316 }
1317 
1318 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1319 							struct hci_conn *conn)
1320 {
1321 	if (conn->state != BT_CONFIG || !conn->out)
1322 		return 0;
1323 
1324 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1325 		return 0;
1326 
1327 	/* Only request authentication for SSP connections or non-SSP
1328 	 * devices with sec_level HIGH or if MITM protection is requested */
1329 	if (!hci_conn_ssp_enabled(conn) &&
1330 				conn->pending_sec_level != BT_SECURITY_HIGH &&
1331 				!(conn->auth_type & 0x01))
1332 		return 0;
1333 
1334 	return 1;
1335 }
1336 
1337 static inline int hci_resolve_name(struct hci_dev *hdev,
1338 				   struct inquiry_entry *e)
1339 {
1340 	struct hci_cp_remote_name_req cp;
1341 
1342 	memset(&cp, 0, sizeof(cp));
1343 
1344 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1345 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1346 	cp.pscan_mode = e->data.pscan_mode;
1347 	cp.clock_offset = e->data.clock_offset;
1348 
1349 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1350 }
1351 
1352 static bool hci_resolve_next_name(struct hci_dev *hdev)
1353 {
1354 	struct discovery_state *discov = &hdev->discovery;
1355 	struct inquiry_entry *e;
1356 
1357 	if (list_empty(&discov->resolve))
1358 		return false;
1359 
1360 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1361 	if (hci_resolve_name(hdev, e) == 0) {
1362 		e->name_state = NAME_PENDING;
1363 		return true;
1364 	}
1365 
1366 	return false;
1367 }
1368 
1369 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1370 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1371 {
1372 	struct discovery_state *discov = &hdev->discovery;
1373 	struct inquiry_entry *e;
1374 
1375 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1376 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1377 				      name_len, conn->dev_class);
1378 
1379 	if (discov->state == DISCOVERY_STOPPED)
1380 		return;
1381 
1382 	if (discov->state == DISCOVERY_STOPPING)
1383 		goto discov_complete;
1384 
1385 	if (discov->state != DISCOVERY_RESOLVING)
1386 		return;
1387 
1388 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1389 	if (e) {
1390 		e->name_state = NAME_KNOWN;
1391 		list_del(&e->list);
1392 		if (name)
1393 			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1394 					 e->data.rssi, name, name_len);
1395 	}
1396 
1397 	if (hci_resolve_next_name(hdev))
1398 		return;
1399 
1400 discov_complete:
1401 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1402 }
1403 
1404 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1405 {
1406 	struct hci_cp_remote_name_req *cp;
1407 	struct hci_conn *conn;
1408 
1409 	BT_DBG("%s status 0x%x", hdev->name, status);
1410 
1411 	/* If successful wait for the name req complete event before
1412 	 * checking for the need to do authentication */
1413 	if (!status)
1414 		return;
1415 
1416 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1417 	if (!cp)
1418 		return;
1419 
1420 	hci_dev_lock(hdev);
1421 
1422 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1423 
1424 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1425 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1426 
1427 	if (!conn)
1428 		goto unlock;
1429 
1430 	if (!hci_outgoing_auth_needed(hdev, conn))
1431 		goto unlock;
1432 
1433 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1434 		struct hci_cp_auth_requested cp;
1435 		cp.handle = __cpu_to_le16(conn->handle);
1436 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1437 	}
1438 
1439 unlock:
1440 	hci_dev_unlock(hdev);
1441 }
1442 
1443 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1444 {
1445 	struct hci_cp_read_remote_features *cp;
1446 	struct hci_conn *conn;
1447 
1448 	BT_DBG("%s status 0x%x", hdev->name, status);
1449 
1450 	if (!status)
1451 		return;
1452 
1453 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1454 	if (!cp)
1455 		return;
1456 
1457 	hci_dev_lock(hdev);
1458 
1459 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1460 	if (conn) {
1461 		if (conn->state == BT_CONFIG) {
1462 			hci_proto_connect_cfm(conn, status);
1463 			hci_conn_put(conn);
1464 		}
1465 	}
1466 
1467 	hci_dev_unlock(hdev);
1468 }
1469 
1470 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1471 {
1472 	struct hci_cp_read_remote_ext_features *cp;
1473 	struct hci_conn *conn;
1474 
1475 	BT_DBG("%s status 0x%x", hdev->name, status);
1476 
1477 	if (!status)
1478 		return;
1479 
1480 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1481 	if (!cp)
1482 		return;
1483 
1484 	hci_dev_lock(hdev);
1485 
1486 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1487 	if (conn) {
1488 		if (conn->state == BT_CONFIG) {
1489 			hci_proto_connect_cfm(conn, status);
1490 			hci_conn_put(conn);
1491 		}
1492 	}
1493 
1494 	hci_dev_unlock(hdev);
1495 }
1496 
1497 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1498 {
1499 	struct hci_cp_setup_sync_conn *cp;
1500 	struct hci_conn *acl, *sco;
1501 	__u16 handle;
1502 
1503 	BT_DBG("%s status 0x%x", hdev->name, status);
1504 
1505 	if (!status)
1506 		return;
1507 
1508 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1509 	if (!cp)
1510 		return;
1511 
1512 	handle = __le16_to_cpu(cp->handle);
1513 
1514 	BT_DBG("%s handle %d", hdev->name, handle);
1515 
1516 	hci_dev_lock(hdev);
1517 
1518 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1519 	if (acl) {
1520 		sco = acl->link;
1521 		if (sco) {
1522 			sco->state = BT_CLOSED;
1523 
1524 			hci_proto_connect_cfm(sco, status);
1525 			hci_conn_del(sco);
1526 		}
1527 	}
1528 
1529 	hci_dev_unlock(hdev);
1530 }
1531 
1532 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1533 {
1534 	struct hci_cp_sniff_mode *cp;
1535 	struct hci_conn *conn;
1536 
1537 	BT_DBG("%s status 0x%x", hdev->name, status);
1538 
1539 	if (!status)
1540 		return;
1541 
1542 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1543 	if (!cp)
1544 		return;
1545 
1546 	hci_dev_lock(hdev);
1547 
1548 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1549 	if (conn) {
1550 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1551 
1552 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1553 			hci_sco_setup(conn, status);
1554 	}
1555 
1556 	hci_dev_unlock(hdev);
1557 }
1558 
1559 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1560 {
1561 	struct hci_cp_exit_sniff_mode *cp;
1562 	struct hci_conn *conn;
1563 
1564 	BT_DBG("%s status 0x%x", hdev->name, status);
1565 
1566 	if (!status)
1567 		return;
1568 
1569 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1570 	if (!cp)
1571 		return;
1572 
1573 	hci_dev_lock(hdev);
1574 
1575 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1576 	if (conn) {
1577 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1578 
1579 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1580 			hci_sco_setup(conn, status);
1581 	}
1582 
1583 	hci_dev_unlock(hdev);
1584 }
1585 
1586 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1587 {
1588 	struct hci_cp_disconnect *cp;
1589 	struct hci_conn *conn;
1590 
1591 	if (!status)
1592 		return;
1593 
1594 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1595 	if (!cp)
1596 		return;
1597 
1598 	hci_dev_lock(hdev);
1599 
1600 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1601 	if (conn)
1602 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1603 				       conn->dst_type, status);
1604 
1605 	hci_dev_unlock(hdev);
1606 }
1607 
1608 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1609 {
1610 	struct hci_cp_le_create_conn *cp;
1611 	struct hci_conn *conn;
1612 
1613 	BT_DBG("%s status 0x%x", hdev->name, status);
1614 
1615 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1616 	if (!cp)
1617 		return;
1618 
1619 	hci_dev_lock(hdev);
1620 
1621 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1622 
1623 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1624 		conn);
1625 
1626 	if (status) {
1627 		if (conn && conn->state == BT_CONNECT) {
1628 			conn->state = BT_CLOSED;
1629 			hci_proto_connect_cfm(conn, status);
1630 			hci_conn_del(conn);
1631 		}
1632 	} else {
1633 		if (!conn) {
1634 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1635 			if (conn) {
1636 				conn->dst_type = cp->peer_addr_type;
1637 				conn->out = true;
1638 			} else {
1639 				BT_ERR("No memory for new connection");
1640 			}
1641 		}
1642 	}
1643 
1644 	hci_dev_unlock(hdev);
1645 }
1646 
1647 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1648 {
1649 	BT_DBG("%s status 0x%x", hdev->name, status);
1650 }
1651 
1652 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1653 {
1654 	__u8 status = *((__u8 *) skb->data);
1655 	struct discovery_state *discov = &hdev->discovery;
1656 	struct inquiry_entry *e;
1657 
1658 	BT_DBG("%s status %d", hdev->name, status);
1659 
1660 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1661 
1662 	hci_conn_check_pending(hdev);
1663 
1664 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1665 		return;
1666 
1667 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1668 		return;
1669 
1670 	hci_dev_lock(hdev);
1671 
1672 	if (discov->state != DISCOVERY_FINDING)
1673 		goto unlock;
1674 
1675 	if (list_empty(&discov->resolve)) {
1676 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1677 		goto unlock;
1678 	}
1679 
1680 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1681 	if (e && hci_resolve_name(hdev, e) == 0) {
1682 		e->name_state = NAME_PENDING;
1683 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1684 	} else {
1685 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1686 	}
1687 
1688 unlock:
1689 	hci_dev_unlock(hdev);
1690 }
1691 
1692 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1693 {
1694 	struct inquiry_data data;
1695 	struct inquiry_info *info = (void *) (skb->data + 1);
1696 	int num_rsp = *((__u8 *) skb->data);
1697 
1698 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1699 
1700 	if (!num_rsp)
1701 		return;
1702 
1703 	hci_dev_lock(hdev);
1704 
1705 	for (; num_rsp; num_rsp--, info++) {
1706 		bool name_known, ssp;
1707 
1708 		bacpy(&data.bdaddr, &info->bdaddr);
1709 		data.pscan_rep_mode	= info->pscan_rep_mode;
1710 		data.pscan_period_mode	= info->pscan_period_mode;
1711 		data.pscan_mode		= info->pscan_mode;
1712 		memcpy(data.dev_class, info->dev_class, 3);
1713 		data.clock_offset	= info->clock_offset;
1714 		data.rssi		= 0x00;
1715 		data.ssp_mode		= 0x00;
1716 
1717 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1718 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1719 				  info->dev_class, 0, !name_known, ssp, NULL,
1720 				  0);
1721 	}
1722 
1723 	hci_dev_unlock(hdev);
1724 }
1725 
1726 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1727 {
1728 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1729 	struct hci_conn *conn;
1730 
1731 	BT_DBG("%s", hdev->name);
1732 
1733 	hci_dev_lock(hdev);
1734 
1735 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1736 	if (!conn) {
1737 		if (ev->link_type != SCO_LINK)
1738 			goto unlock;
1739 
1740 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1741 		if (!conn)
1742 			goto unlock;
1743 
1744 		conn->type = SCO_LINK;
1745 	}
1746 
1747 	if (!ev->status) {
1748 		conn->handle = __le16_to_cpu(ev->handle);
1749 
1750 		if (conn->type == ACL_LINK) {
1751 			conn->state = BT_CONFIG;
1752 			hci_conn_hold(conn);
1753 			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1754 		} else
1755 			conn->state = BT_CONNECTED;
1756 
1757 		hci_conn_hold_device(conn);
1758 		hci_conn_add_sysfs(conn);
1759 
1760 		if (test_bit(HCI_AUTH, &hdev->flags))
1761 			conn->link_mode |= HCI_LM_AUTH;
1762 
1763 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1764 			conn->link_mode |= HCI_LM_ENCRYPT;
1765 
1766 		/* Get remote features */
1767 		if (conn->type == ACL_LINK) {
1768 			struct hci_cp_read_remote_features cp;
1769 			cp.handle = ev->handle;
1770 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1771 				     sizeof(cp), &cp);
1772 		}
1773 
1774 		/* Set packet type for incoming connection */
1775 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1776 			struct hci_cp_change_conn_ptype cp;
1777 			cp.handle = ev->handle;
1778 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1779 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1780 				     &cp);
1781 		}
1782 	} else {
1783 		conn->state = BT_CLOSED;
1784 		if (conn->type == ACL_LINK)
1785 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1786 					    conn->dst_type, ev->status);
1787 	}
1788 
1789 	if (conn->type == ACL_LINK)
1790 		hci_sco_setup(conn, ev->status);
1791 
1792 	if (ev->status) {
1793 		hci_proto_connect_cfm(conn, ev->status);
1794 		hci_conn_del(conn);
1795 	} else if (ev->link_type != ACL_LINK)
1796 		hci_proto_connect_cfm(conn, ev->status);
1797 
1798 unlock:
1799 	hci_dev_unlock(hdev);
1800 
1801 	hci_conn_check_pending(hdev);
1802 }
1803 
1804 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1805 {
1806 	struct hci_ev_conn_request *ev = (void *) skb->data;
1807 	int mask = hdev->link_mode;
1808 
1809 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1810 					batostr(&ev->bdaddr), ev->link_type);
1811 
1812 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1813 
1814 	if ((mask & HCI_LM_ACCEPT) &&
1815 			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1816 		/* Connection accepted */
1817 		struct inquiry_entry *ie;
1818 		struct hci_conn *conn;
1819 
1820 		hci_dev_lock(hdev);
1821 
1822 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1823 		if (ie)
1824 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1825 
1826 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1827 		if (!conn) {
1828 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1829 			if (!conn) {
1830 				BT_ERR("No memory for new connection");
1831 				hci_dev_unlock(hdev);
1832 				return;
1833 			}
1834 		}
1835 
1836 		memcpy(conn->dev_class, ev->dev_class, 3);
1837 		conn->state = BT_CONNECT;
1838 
1839 		hci_dev_unlock(hdev);
1840 
1841 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1842 			struct hci_cp_accept_conn_req cp;
1843 
1844 			bacpy(&cp.bdaddr, &ev->bdaddr);
1845 
1846 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1847 				cp.role = 0x00; /* Become master */
1848 			else
1849 				cp.role = 0x01; /* Remain slave */
1850 
1851 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1852 				     &cp);
1853 		} else {
1854 			struct hci_cp_accept_sync_conn_req cp;
1855 
1856 			bacpy(&cp.bdaddr, &ev->bdaddr);
1857 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1858 
1859 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1860 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1861 			cp.max_latency    = cpu_to_le16(0xffff);
1862 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1863 			cp.retrans_effort = 0xff;
1864 
1865 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1866 				     sizeof(cp), &cp);
1867 		}
1868 	} else {
1869 		/* Connection rejected */
1870 		struct hci_cp_reject_conn_req cp;
1871 
1872 		bacpy(&cp.bdaddr, &ev->bdaddr);
1873 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1874 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1875 	}
1876 }
1877 
1878 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1879 {
1880 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1881 	struct hci_conn *conn;
1882 
1883 	BT_DBG("%s status %d", hdev->name, ev->status);
1884 
1885 	hci_dev_lock(hdev);
1886 
1887 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1888 	if (!conn)
1889 		goto unlock;
1890 
1891 	if (ev->status == 0)
1892 		conn->state = BT_CLOSED;
1893 
1894 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1895 			(conn->type == ACL_LINK || conn->type == LE_LINK)) {
1896 		if (ev->status != 0)
1897 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1898 						conn->dst_type, ev->status);
1899 		else
1900 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1901 						 conn->dst_type);
1902 	}
1903 
1904 	if (ev->status == 0) {
1905 		hci_proto_disconn_cfm(conn, ev->reason);
1906 		hci_conn_del(conn);
1907 	}
1908 
1909 unlock:
1910 	hci_dev_unlock(hdev);
1911 }
1912 
1913 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1914 {
1915 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1916 	struct hci_conn *conn;
1917 
1918 	BT_DBG("%s status %d", hdev->name, ev->status);
1919 
1920 	hci_dev_lock(hdev);
1921 
1922 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1923 	if (!conn)
1924 		goto unlock;
1925 
1926 	if (!ev->status) {
1927 		if (!hci_conn_ssp_enabled(conn) &&
1928 				test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1929 			BT_INFO("re-auth of legacy device is not possible.");
1930 		} else {
1931 			conn->link_mode |= HCI_LM_AUTH;
1932 			conn->sec_level = conn->pending_sec_level;
1933 		}
1934 	} else {
1935 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1936 				 ev->status);
1937 	}
1938 
1939 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1940 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1941 
1942 	if (conn->state == BT_CONFIG) {
1943 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1944 			struct hci_cp_set_conn_encrypt cp;
1945 			cp.handle  = ev->handle;
1946 			cp.encrypt = 0x01;
1947 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1948 									&cp);
1949 		} else {
1950 			conn->state = BT_CONNECTED;
1951 			hci_proto_connect_cfm(conn, ev->status);
1952 			hci_conn_put(conn);
1953 		}
1954 	} else {
1955 		hci_auth_cfm(conn, ev->status);
1956 
1957 		hci_conn_hold(conn);
1958 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1959 		hci_conn_put(conn);
1960 	}
1961 
1962 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1963 		if (!ev->status) {
1964 			struct hci_cp_set_conn_encrypt cp;
1965 			cp.handle  = ev->handle;
1966 			cp.encrypt = 0x01;
1967 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1968 									&cp);
1969 		} else {
1970 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1971 			hci_encrypt_cfm(conn, ev->status, 0x00);
1972 		}
1973 	}
1974 
1975 unlock:
1976 	hci_dev_unlock(hdev);
1977 }
1978 
1979 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1980 {
1981 	struct hci_ev_remote_name *ev = (void *) skb->data;
1982 	struct hci_conn *conn;
1983 
1984 	BT_DBG("%s", hdev->name);
1985 
1986 	hci_conn_check_pending(hdev);
1987 
1988 	hci_dev_lock(hdev);
1989 
1990 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1991 
1992 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1993 		goto check_auth;
1994 
1995 	if (ev->status == 0)
1996 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1997 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1998 	else
1999 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2000 
2001 check_auth:
2002 	if (!conn)
2003 		goto unlock;
2004 
2005 	if (!hci_outgoing_auth_needed(hdev, conn))
2006 		goto unlock;
2007 
2008 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2009 		struct hci_cp_auth_requested cp;
2010 		cp.handle = __cpu_to_le16(conn->handle);
2011 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2012 	}
2013 
2014 unlock:
2015 	hci_dev_unlock(hdev);
2016 }
2017 
2018 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2019 {
2020 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2021 	struct hci_conn *conn;
2022 
2023 	BT_DBG("%s status %d", hdev->name, ev->status);
2024 
2025 	hci_dev_lock(hdev);
2026 
2027 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2028 	if (conn) {
2029 		if (!ev->status) {
2030 			if (ev->encrypt) {
2031 				/* Encryption implies authentication */
2032 				conn->link_mode |= HCI_LM_AUTH;
2033 				conn->link_mode |= HCI_LM_ENCRYPT;
2034 				conn->sec_level = conn->pending_sec_level;
2035 			} else
2036 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2037 		}
2038 
2039 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2040 
2041 		if (conn->state == BT_CONFIG) {
2042 			if (!ev->status)
2043 				conn->state = BT_CONNECTED;
2044 
2045 			hci_proto_connect_cfm(conn, ev->status);
2046 			hci_conn_put(conn);
2047 		} else
2048 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2049 	}
2050 
2051 	hci_dev_unlock(hdev);
2052 }
2053 
2054 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2055 {
2056 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2057 	struct hci_conn *conn;
2058 
2059 	BT_DBG("%s status %d", hdev->name, ev->status);
2060 
2061 	hci_dev_lock(hdev);
2062 
2063 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2064 	if (conn) {
2065 		if (!ev->status)
2066 			conn->link_mode |= HCI_LM_SECURE;
2067 
2068 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2069 
2070 		hci_key_change_cfm(conn, ev->status);
2071 	}
2072 
2073 	hci_dev_unlock(hdev);
2074 }
2075 
2076 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2077 {
2078 	struct hci_ev_remote_features *ev = (void *) skb->data;
2079 	struct hci_conn *conn;
2080 
2081 	BT_DBG("%s status %d", hdev->name, ev->status);
2082 
2083 	hci_dev_lock(hdev);
2084 
2085 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2086 	if (!conn)
2087 		goto unlock;
2088 
2089 	if (!ev->status)
2090 		memcpy(conn->features, ev->features, 8);
2091 
2092 	if (conn->state != BT_CONFIG)
2093 		goto unlock;
2094 
2095 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2096 		struct hci_cp_read_remote_ext_features cp;
2097 		cp.handle = ev->handle;
2098 		cp.page = 0x01;
2099 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2100 							sizeof(cp), &cp);
2101 		goto unlock;
2102 	}
2103 
2104 	if (!ev->status) {
2105 		struct hci_cp_remote_name_req cp;
2106 		memset(&cp, 0, sizeof(cp));
2107 		bacpy(&cp.bdaddr, &conn->dst);
2108 		cp.pscan_rep_mode = 0x02;
2109 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2110 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2111 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2112 				      conn->dst_type, 0, NULL, 0,
2113 				      conn->dev_class);
2114 
2115 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2116 		conn->state = BT_CONNECTED;
2117 		hci_proto_connect_cfm(conn, ev->status);
2118 		hci_conn_put(conn);
2119 	}
2120 
2121 unlock:
2122 	hci_dev_unlock(hdev);
2123 }
2124 
2125 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2126 {
2127 	BT_DBG("%s", hdev->name);
2128 }
2129 
2130 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2131 {
2132 	BT_DBG("%s", hdev->name);
2133 }
2134 
2135 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2136 {
2137 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2138 	__u16 opcode;
2139 
2140 	skb_pull(skb, sizeof(*ev));
2141 
2142 	opcode = __le16_to_cpu(ev->opcode);
2143 
2144 	switch (opcode) {
2145 	case HCI_OP_INQUIRY_CANCEL:
2146 		hci_cc_inquiry_cancel(hdev, skb);
2147 		break;
2148 
2149 	case HCI_OP_EXIT_PERIODIC_INQ:
2150 		hci_cc_exit_periodic_inq(hdev, skb);
2151 		break;
2152 
2153 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2154 		hci_cc_remote_name_req_cancel(hdev, skb);
2155 		break;
2156 
2157 	case HCI_OP_ROLE_DISCOVERY:
2158 		hci_cc_role_discovery(hdev, skb);
2159 		break;
2160 
2161 	case HCI_OP_READ_LINK_POLICY:
2162 		hci_cc_read_link_policy(hdev, skb);
2163 		break;
2164 
2165 	case HCI_OP_WRITE_LINK_POLICY:
2166 		hci_cc_write_link_policy(hdev, skb);
2167 		break;
2168 
2169 	case HCI_OP_READ_DEF_LINK_POLICY:
2170 		hci_cc_read_def_link_policy(hdev, skb);
2171 		break;
2172 
2173 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2174 		hci_cc_write_def_link_policy(hdev, skb);
2175 		break;
2176 
2177 	case HCI_OP_RESET:
2178 		hci_cc_reset(hdev, skb);
2179 		break;
2180 
2181 	case HCI_OP_WRITE_LOCAL_NAME:
2182 		hci_cc_write_local_name(hdev, skb);
2183 		break;
2184 
2185 	case HCI_OP_READ_LOCAL_NAME:
2186 		hci_cc_read_local_name(hdev, skb);
2187 		break;
2188 
2189 	case HCI_OP_WRITE_AUTH_ENABLE:
2190 		hci_cc_write_auth_enable(hdev, skb);
2191 		break;
2192 
2193 	case HCI_OP_WRITE_ENCRYPT_MODE:
2194 		hci_cc_write_encrypt_mode(hdev, skb);
2195 		break;
2196 
2197 	case HCI_OP_WRITE_SCAN_ENABLE:
2198 		hci_cc_write_scan_enable(hdev, skb);
2199 		break;
2200 
2201 	case HCI_OP_READ_CLASS_OF_DEV:
2202 		hci_cc_read_class_of_dev(hdev, skb);
2203 		break;
2204 
2205 	case HCI_OP_WRITE_CLASS_OF_DEV:
2206 		hci_cc_write_class_of_dev(hdev, skb);
2207 		break;
2208 
2209 	case HCI_OP_READ_VOICE_SETTING:
2210 		hci_cc_read_voice_setting(hdev, skb);
2211 		break;
2212 
2213 	case HCI_OP_WRITE_VOICE_SETTING:
2214 		hci_cc_write_voice_setting(hdev, skb);
2215 		break;
2216 
2217 	case HCI_OP_HOST_BUFFER_SIZE:
2218 		hci_cc_host_buffer_size(hdev, skb);
2219 		break;
2220 
2221 	case HCI_OP_WRITE_SSP_MODE:
2222 		hci_cc_write_ssp_mode(hdev, skb);
2223 		break;
2224 
2225 	case HCI_OP_READ_LOCAL_VERSION:
2226 		hci_cc_read_local_version(hdev, skb);
2227 		break;
2228 
2229 	case HCI_OP_READ_LOCAL_COMMANDS:
2230 		hci_cc_read_local_commands(hdev, skb);
2231 		break;
2232 
2233 	case HCI_OP_READ_LOCAL_FEATURES:
2234 		hci_cc_read_local_features(hdev, skb);
2235 		break;
2236 
2237 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2238 		hci_cc_read_local_ext_features(hdev, skb);
2239 		break;
2240 
2241 	case HCI_OP_READ_BUFFER_SIZE:
2242 		hci_cc_read_buffer_size(hdev, skb);
2243 		break;
2244 
2245 	case HCI_OP_READ_BD_ADDR:
2246 		hci_cc_read_bd_addr(hdev, skb);
2247 		break;
2248 
2249 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2250 		hci_cc_read_data_block_size(hdev, skb);
2251 		break;
2252 
2253 	case HCI_OP_WRITE_CA_TIMEOUT:
2254 		hci_cc_write_ca_timeout(hdev, skb);
2255 		break;
2256 
2257 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2258 		hci_cc_read_flow_control_mode(hdev, skb);
2259 		break;
2260 
2261 	case HCI_OP_READ_LOCAL_AMP_INFO:
2262 		hci_cc_read_local_amp_info(hdev, skb);
2263 		break;
2264 
2265 	case HCI_OP_DELETE_STORED_LINK_KEY:
2266 		hci_cc_delete_stored_link_key(hdev, skb);
2267 		break;
2268 
2269 	case HCI_OP_SET_EVENT_MASK:
2270 		hci_cc_set_event_mask(hdev, skb);
2271 		break;
2272 
2273 	case HCI_OP_WRITE_INQUIRY_MODE:
2274 		hci_cc_write_inquiry_mode(hdev, skb);
2275 		break;
2276 
2277 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2278 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2279 		break;
2280 
2281 	case HCI_OP_SET_EVENT_FLT:
2282 		hci_cc_set_event_flt(hdev, skb);
2283 		break;
2284 
2285 	case HCI_OP_PIN_CODE_REPLY:
2286 		hci_cc_pin_code_reply(hdev, skb);
2287 		break;
2288 
2289 	case HCI_OP_PIN_CODE_NEG_REPLY:
2290 		hci_cc_pin_code_neg_reply(hdev, skb);
2291 		break;
2292 
2293 	case HCI_OP_READ_LOCAL_OOB_DATA:
2294 		hci_cc_read_local_oob_data_reply(hdev, skb);
2295 		break;
2296 
2297 	case HCI_OP_LE_READ_BUFFER_SIZE:
2298 		hci_cc_le_read_buffer_size(hdev, skb);
2299 		break;
2300 
2301 	case HCI_OP_USER_CONFIRM_REPLY:
2302 		hci_cc_user_confirm_reply(hdev, skb);
2303 		break;
2304 
2305 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2306 		hci_cc_user_confirm_neg_reply(hdev, skb);
2307 		break;
2308 
2309 	case HCI_OP_USER_PASSKEY_REPLY:
2310 		hci_cc_user_passkey_reply(hdev, skb);
2311 		break;
2312 
2313 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2314 		hci_cc_user_passkey_neg_reply(hdev, skb);
2315 
2316 	case HCI_OP_LE_SET_SCAN_PARAM:
2317 		hci_cc_le_set_scan_param(hdev, skb);
2318 		break;
2319 
2320 	case HCI_OP_LE_SET_SCAN_ENABLE:
2321 		hci_cc_le_set_scan_enable(hdev, skb);
2322 		break;
2323 
2324 	case HCI_OP_LE_LTK_REPLY:
2325 		hci_cc_le_ltk_reply(hdev, skb);
2326 		break;
2327 
2328 	case HCI_OP_LE_LTK_NEG_REPLY:
2329 		hci_cc_le_ltk_neg_reply(hdev, skb);
2330 		break;
2331 
2332 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2333 		hci_cc_write_le_host_supported(hdev, skb);
2334 		break;
2335 
2336 	default:
2337 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2338 		break;
2339 	}
2340 
2341 	if (ev->opcode != HCI_OP_NOP)
2342 		del_timer(&hdev->cmd_timer);
2343 
2344 	if (ev->ncmd) {
2345 		atomic_set(&hdev->cmd_cnt, 1);
2346 		if (!skb_queue_empty(&hdev->cmd_q))
2347 			queue_work(hdev->workqueue, &hdev->cmd_work);
2348 	}
2349 }
2350 
2351 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2352 {
2353 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2354 	__u16 opcode;
2355 
2356 	skb_pull(skb, sizeof(*ev));
2357 
2358 	opcode = __le16_to_cpu(ev->opcode);
2359 
2360 	switch (opcode) {
2361 	case HCI_OP_INQUIRY:
2362 		hci_cs_inquiry(hdev, ev->status);
2363 		break;
2364 
2365 	case HCI_OP_CREATE_CONN:
2366 		hci_cs_create_conn(hdev, ev->status);
2367 		break;
2368 
2369 	case HCI_OP_ADD_SCO:
2370 		hci_cs_add_sco(hdev, ev->status);
2371 		break;
2372 
2373 	case HCI_OP_AUTH_REQUESTED:
2374 		hci_cs_auth_requested(hdev, ev->status);
2375 		break;
2376 
2377 	case HCI_OP_SET_CONN_ENCRYPT:
2378 		hci_cs_set_conn_encrypt(hdev, ev->status);
2379 		break;
2380 
2381 	case HCI_OP_REMOTE_NAME_REQ:
2382 		hci_cs_remote_name_req(hdev, ev->status);
2383 		break;
2384 
2385 	case HCI_OP_READ_REMOTE_FEATURES:
2386 		hci_cs_read_remote_features(hdev, ev->status);
2387 		break;
2388 
2389 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2390 		hci_cs_read_remote_ext_features(hdev, ev->status);
2391 		break;
2392 
2393 	case HCI_OP_SETUP_SYNC_CONN:
2394 		hci_cs_setup_sync_conn(hdev, ev->status);
2395 		break;
2396 
2397 	case HCI_OP_SNIFF_MODE:
2398 		hci_cs_sniff_mode(hdev, ev->status);
2399 		break;
2400 
2401 	case HCI_OP_EXIT_SNIFF_MODE:
2402 		hci_cs_exit_sniff_mode(hdev, ev->status);
2403 		break;
2404 
2405 	case HCI_OP_DISCONNECT:
2406 		hci_cs_disconnect(hdev, ev->status);
2407 		break;
2408 
2409 	case HCI_OP_LE_CREATE_CONN:
2410 		hci_cs_le_create_conn(hdev, ev->status);
2411 		break;
2412 
2413 	case HCI_OP_LE_START_ENC:
2414 		hci_cs_le_start_enc(hdev, ev->status);
2415 		break;
2416 
2417 	default:
2418 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2419 		break;
2420 	}
2421 
2422 	if (ev->opcode != HCI_OP_NOP)
2423 		del_timer(&hdev->cmd_timer);
2424 
2425 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2426 		atomic_set(&hdev->cmd_cnt, 1);
2427 		if (!skb_queue_empty(&hdev->cmd_q))
2428 			queue_work(hdev->workqueue, &hdev->cmd_work);
2429 	}
2430 }
2431 
2432 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2433 {
2434 	struct hci_ev_role_change *ev = (void *) skb->data;
2435 	struct hci_conn *conn;
2436 
2437 	BT_DBG("%s status %d", hdev->name, ev->status);
2438 
2439 	hci_dev_lock(hdev);
2440 
2441 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2442 	if (conn) {
2443 		if (!ev->status) {
2444 			if (ev->role)
2445 				conn->link_mode &= ~HCI_LM_MASTER;
2446 			else
2447 				conn->link_mode |= HCI_LM_MASTER;
2448 		}
2449 
2450 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2451 
2452 		hci_role_switch_cfm(conn, ev->status, ev->role);
2453 	}
2454 
2455 	hci_dev_unlock(hdev);
2456 }
2457 
2458 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2459 {
2460 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2461 	int i;
2462 
2463 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2464 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2465 		return;
2466 	}
2467 
2468 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2469 			ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2470 		BT_DBG("%s bad parameters", hdev->name);
2471 		return;
2472 	}
2473 
2474 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2475 
2476 	for (i = 0; i < ev->num_hndl; i++) {
2477 		struct hci_comp_pkts_info *info = &ev->handles[i];
2478 		struct hci_conn *conn;
2479 		__u16  handle, count;
2480 
2481 		handle = __le16_to_cpu(info->handle);
2482 		count  = __le16_to_cpu(info->count);
2483 
2484 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2485 		if (!conn)
2486 			continue;
2487 
2488 		conn->sent -= count;
2489 
2490 		switch (conn->type) {
2491 		case ACL_LINK:
2492 			hdev->acl_cnt += count;
2493 			if (hdev->acl_cnt > hdev->acl_pkts)
2494 				hdev->acl_cnt = hdev->acl_pkts;
2495 			break;
2496 
2497 		case LE_LINK:
2498 			if (hdev->le_pkts) {
2499 				hdev->le_cnt += count;
2500 				if (hdev->le_cnt > hdev->le_pkts)
2501 					hdev->le_cnt = hdev->le_pkts;
2502 			} else {
2503 				hdev->acl_cnt += count;
2504 				if (hdev->acl_cnt > hdev->acl_pkts)
2505 					hdev->acl_cnt = hdev->acl_pkts;
2506 			}
2507 			break;
2508 
2509 		case SCO_LINK:
2510 			hdev->sco_cnt += count;
2511 			if (hdev->sco_cnt > hdev->sco_pkts)
2512 				hdev->sco_cnt = hdev->sco_pkts;
2513 			break;
2514 
2515 		default:
2516 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2517 			break;
2518 		}
2519 	}
2520 
2521 	queue_work(hdev->workqueue, &hdev->tx_work);
2522 }
2523 
2524 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2525 					   struct sk_buff *skb)
2526 {
2527 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2528 	int i;
2529 
2530 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2531 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2532 		return;
2533 	}
2534 
2535 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2536 			ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2537 		BT_DBG("%s bad parameters", hdev->name);
2538 		return;
2539 	}
2540 
2541 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2542 								ev->num_hndl);
2543 
2544 	for (i = 0; i < ev->num_hndl; i++) {
2545 		struct hci_comp_blocks_info *info = &ev->handles[i];
2546 		struct hci_conn *conn;
2547 		__u16  handle, block_count;
2548 
2549 		handle = __le16_to_cpu(info->handle);
2550 		block_count = __le16_to_cpu(info->blocks);
2551 
2552 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2553 		if (!conn)
2554 			continue;
2555 
2556 		conn->sent -= block_count;
2557 
2558 		switch (conn->type) {
2559 		case ACL_LINK:
2560 			hdev->block_cnt += block_count;
2561 			if (hdev->block_cnt > hdev->num_blocks)
2562 				hdev->block_cnt = hdev->num_blocks;
2563 			break;
2564 
2565 		default:
2566 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2567 			break;
2568 		}
2569 	}
2570 
2571 	queue_work(hdev->workqueue, &hdev->tx_work);
2572 }
2573 
2574 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575 {
2576 	struct hci_ev_mode_change *ev = (void *) skb->data;
2577 	struct hci_conn *conn;
2578 
2579 	BT_DBG("%s status %d", hdev->name, ev->status);
2580 
2581 	hci_dev_lock(hdev);
2582 
2583 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2584 	if (conn) {
2585 		conn->mode = ev->mode;
2586 		conn->interval = __le16_to_cpu(ev->interval);
2587 
2588 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2589 			if (conn->mode == HCI_CM_ACTIVE)
2590 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2591 			else
2592 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2593 		}
2594 
2595 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2596 			hci_sco_setup(conn, ev->status);
2597 	}
2598 
2599 	hci_dev_unlock(hdev);
2600 }
2601 
2602 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2603 {
2604 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2605 	struct hci_conn *conn;
2606 
2607 	BT_DBG("%s", hdev->name);
2608 
2609 	hci_dev_lock(hdev);
2610 
2611 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2612 	if (!conn)
2613 		goto unlock;
2614 
2615 	if (conn->state == BT_CONNECTED) {
2616 		hci_conn_hold(conn);
2617 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2618 		hci_conn_put(conn);
2619 	}
2620 
2621 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2622 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2623 					sizeof(ev->bdaddr), &ev->bdaddr);
2624 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2625 		u8 secure;
2626 
2627 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2628 			secure = 1;
2629 		else
2630 			secure = 0;
2631 
2632 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2633 	}
2634 
2635 unlock:
2636 	hci_dev_unlock(hdev);
2637 }
2638 
2639 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2640 {
2641 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2642 	struct hci_cp_link_key_reply cp;
2643 	struct hci_conn *conn;
2644 	struct link_key *key;
2645 
2646 	BT_DBG("%s", hdev->name);
2647 
2648 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2649 		return;
2650 
2651 	hci_dev_lock(hdev);
2652 
2653 	key = hci_find_link_key(hdev, &ev->bdaddr);
2654 	if (!key) {
2655 		BT_DBG("%s link key not found for %s", hdev->name,
2656 							batostr(&ev->bdaddr));
2657 		goto not_found;
2658 	}
2659 
2660 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2661 							batostr(&ev->bdaddr));
2662 
2663 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2664 				key->type == HCI_LK_DEBUG_COMBINATION) {
2665 		BT_DBG("%s ignoring debug key", hdev->name);
2666 		goto not_found;
2667 	}
2668 
2669 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2670 	if (conn) {
2671 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2672 				conn->auth_type != 0xff &&
2673 				(conn->auth_type & 0x01)) {
2674 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2675 			goto not_found;
2676 		}
2677 
2678 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2679 				conn->pending_sec_level == BT_SECURITY_HIGH) {
2680 			BT_DBG("%s ignoring key unauthenticated for high \
2681 							security", hdev->name);
2682 			goto not_found;
2683 		}
2684 
2685 		conn->key_type = key->type;
2686 		conn->pin_length = key->pin_len;
2687 	}
2688 
2689 	bacpy(&cp.bdaddr, &ev->bdaddr);
2690 	memcpy(cp.link_key, key->val, 16);
2691 
2692 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2693 
2694 	hci_dev_unlock(hdev);
2695 
2696 	return;
2697 
2698 not_found:
2699 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2700 	hci_dev_unlock(hdev);
2701 }
2702 
2703 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2704 {
2705 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2706 	struct hci_conn *conn;
2707 	u8 pin_len = 0;
2708 
2709 	BT_DBG("%s", hdev->name);
2710 
2711 	hci_dev_lock(hdev);
2712 
2713 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2714 	if (conn) {
2715 		hci_conn_hold(conn);
2716 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2717 		pin_len = conn->pin_length;
2718 
2719 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2720 			conn->key_type = ev->key_type;
2721 
2722 		hci_conn_put(conn);
2723 	}
2724 
2725 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2726 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2727 							ev->key_type, pin_len);
2728 
2729 	hci_dev_unlock(hdev);
2730 }
2731 
2732 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2733 {
2734 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2735 	struct hci_conn *conn;
2736 
2737 	BT_DBG("%s status %d", hdev->name, ev->status);
2738 
2739 	hci_dev_lock(hdev);
2740 
2741 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2742 	if (conn && !ev->status) {
2743 		struct inquiry_entry *ie;
2744 
2745 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2746 		if (ie) {
2747 			ie->data.clock_offset = ev->clock_offset;
2748 			ie->timestamp = jiffies;
2749 		}
2750 	}
2751 
2752 	hci_dev_unlock(hdev);
2753 }
2754 
2755 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2756 {
2757 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2758 	struct hci_conn *conn;
2759 
2760 	BT_DBG("%s status %d", hdev->name, ev->status);
2761 
2762 	hci_dev_lock(hdev);
2763 
2764 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2765 	if (conn && !ev->status)
2766 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2767 
2768 	hci_dev_unlock(hdev);
2769 }
2770 
2771 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2772 {
2773 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2774 	struct inquiry_entry *ie;
2775 
2776 	BT_DBG("%s", hdev->name);
2777 
2778 	hci_dev_lock(hdev);
2779 
2780 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2781 	if (ie) {
2782 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2783 		ie->timestamp = jiffies;
2784 	}
2785 
2786 	hci_dev_unlock(hdev);
2787 }
2788 
2789 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2790 {
2791 	struct inquiry_data data;
2792 	int num_rsp = *((__u8 *) skb->data);
2793 	bool name_known, ssp;
2794 
2795 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2796 
2797 	if (!num_rsp)
2798 		return;
2799 
2800 	hci_dev_lock(hdev);
2801 
2802 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2803 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2804 		info = (void *) (skb->data + 1);
2805 
2806 		for (; num_rsp; num_rsp--, info++) {
2807 			bacpy(&data.bdaddr, &info->bdaddr);
2808 			data.pscan_rep_mode	= info->pscan_rep_mode;
2809 			data.pscan_period_mode	= info->pscan_period_mode;
2810 			data.pscan_mode		= info->pscan_mode;
2811 			memcpy(data.dev_class, info->dev_class, 3);
2812 			data.clock_offset	= info->clock_offset;
2813 			data.rssi		= info->rssi;
2814 			data.ssp_mode		= 0x00;
2815 
2816 			name_known = hci_inquiry_cache_update(hdev, &data,
2817 							      false, &ssp);
2818 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2819 					  info->dev_class, info->rssi,
2820 					  !name_known, ssp, NULL, 0);
2821 		}
2822 	} else {
2823 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2824 
2825 		for (; num_rsp; num_rsp--, info++) {
2826 			bacpy(&data.bdaddr, &info->bdaddr);
2827 			data.pscan_rep_mode	= info->pscan_rep_mode;
2828 			data.pscan_period_mode	= info->pscan_period_mode;
2829 			data.pscan_mode		= 0x00;
2830 			memcpy(data.dev_class, info->dev_class, 3);
2831 			data.clock_offset	= info->clock_offset;
2832 			data.rssi		= info->rssi;
2833 			data.ssp_mode		= 0x00;
2834 			name_known = hci_inquiry_cache_update(hdev, &data,
2835 							      false, &ssp);
2836 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2837 					  info->dev_class, info->rssi,
2838 					  !name_known, ssp, NULL, 0);
2839 		}
2840 	}
2841 
2842 	hci_dev_unlock(hdev);
2843 }
2844 
2845 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2846 {
2847 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2848 	struct hci_conn *conn;
2849 
2850 	BT_DBG("%s", hdev->name);
2851 
2852 	hci_dev_lock(hdev);
2853 
2854 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2855 	if (!conn)
2856 		goto unlock;
2857 
2858 	if (!ev->status && ev->page == 0x01) {
2859 		struct inquiry_entry *ie;
2860 
2861 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2862 		if (ie)
2863 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2864 
2865 		if (ev->features[0] & LMP_HOST_SSP)
2866 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2867 	}
2868 
2869 	if (conn->state != BT_CONFIG)
2870 		goto unlock;
2871 
2872 	if (!ev->status) {
2873 		struct hci_cp_remote_name_req cp;
2874 		memset(&cp, 0, sizeof(cp));
2875 		bacpy(&cp.bdaddr, &conn->dst);
2876 		cp.pscan_rep_mode = 0x02;
2877 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2878 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2879 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2880 				      conn->dst_type, 0, NULL, 0,
2881 				      conn->dev_class);
2882 
2883 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2884 		conn->state = BT_CONNECTED;
2885 		hci_proto_connect_cfm(conn, ev->status);
2886 		hci_conn_put(conn);
2887 	}
2888 
2889 unlock:
2890 	hci_dev_unlock(hdev);
2891 }
2892 
2893 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2894 {
2895 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2896 	struct hci_conn *conn;
2897 
2898 	BT_DBG("%s status %d", hdev->name, ev->status);
2899 
2900 	hci_dev_lock(hdev);
2901 
2902 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2903 	if (!conn) {
2904 		if (ev->link_type == ESCO_LINK)
2905 			goto unlock;
2906 
2907 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2908 		if (!conn)
2909 			goto unlock;
2910 
2911 		conn->type = SCO_LINK;
2912 	}
2913 
2914 	switch (ev->status) {
2915 	case 0x00:
2916 		conn->handle = __le16_to_cpu(ev->handle);
2917 		conn->state  = BT_CONNECTED;
2918 
2919 		hci_conn_hold_device(conn);
2920 		hci_conn_add_sysfs(conn);
2921 		break;
2922 
2923 	case 0x11:	/* Unsupported Feature or Parameter Value */
2924 	case 0x1c:	/* SCO interval rejected */
2925 	case 0x1a:	/* Unsupported Remote Feature */
2926 	case 0x1f:	/* Unspecified error */
2927 		if (conn->out && conn->attempt < 2) {
2928 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2929 					(hdev->esco_type & EDR_ESCO_MASK);
2930 			hci_setup_sync(conn, conn->link->handle);
2931 			goto unlock;
2932 		}
2933 		/* fall through */
2934 
2935 	default:
2936 		conn->state = BT_CLOSED;
2937 		break;
2938 	}
2939 
2940 	hci_proto_connect_cfm(conn, ev->status);
2941 	if (ev->status)
2942 		hci_conn_del(conn);
2943 
2944 unlock:
2945 	hci_dev_unlock(hdev);
2946 }
2947 
2948 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2949 {
2950 	BT_DBG("%s", hdev->name);
2951 }
2952 
2953 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2954 {
2955 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2956 
2957 	BT_DBG("%s status %d", hdev->name, ev->status);
2958 }
2959 
2960 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2961 {
2962 	struct inquiry_data data;
2963 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2964 	int num_rsp = *((__u8 *) skb->data);
2965 
2966 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2967 
2968 	if (!num_rsp)
2969 		return;
2970 
2971 	hci_dev_lock(hdev);
2972 
2973 	for (; num_rsp; num_rsp--, info++) {
2974 		bool name_known, ssp;
2975 
2976 		bacpy(&data.bdaddr, &info->bdaddr);
2977 		data.pscan_rep_mode	= info->pscan_rep_mode;
2978 		data.pscan_period_mode	= info->pscan_period_mode;
2979 		data.pscan_mode		= 0x00;
2980 		memcpy(data.dev_class, info->dev_class, 3);
2981 		data.clock_offset	= info->clock_offset;
2982 		data.rssi		= info->rssi;
2983 		data.ssp_mode		= 0x01;
2984 
2985 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
2986 			name_known = eir_has_data_type(info->data,
2987 						       sizeof(info->data),
2988 						       EIR_NAME_COMPLETE);
2989 		else
2990 			name_known = true;
2991 
2992 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2993 						      &ssp);
2994 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2995 				  info->dev_class, info->rssi, !name_known,
2996 				  ssp, info->data, sizeof(info->data));
2997 	}
2998 
2999 	hci_dev_unlock(hdev);
3000 }
3001 
3002 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3003 {
3004 	/* If remote requests dedicated bonding follow that lead */
3005 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3006 		/* If both remote and local IO capabilities allow MITM
3007 		 * protection then require it, otherwise don't */
3008 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3009 			return 0x02;
3010 		else
3011 			return 0x03;
3012 	}
3013 
3014 	/* If remote requests no-bonding follow that lead */
3015 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3016 		return conn->remote_auth | (conn->auth_type & 0x01);
3017 
3018 	return conn->auth_type;
3019 }
3020 
3021 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3022 {
3023 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3024 	struct hci_conn *conn;
3025 
3026 	BT_DBG("%s", hdev->name);
3027 
3028 	hci_dev_lock(hdev);
3029 
3030 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3031 	if (!conn)
3032 		goto unlock;
3033 
3034 	hci_conn_hold(conn);
3035 
3036 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3037 		goto unlock;
3038 
3039 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3040 			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3041 		struct hci_cp_io_capability_reply cp;
3042 
3043 		bacpy(&cp.bdaddr, &ev->bdaddr);
3044 		/* Change the IO capability from KeyboardDisplay
3045 		 * to DisplayYesNo as it is not supported by BT spec. */
3046 		cp.capability = (conn->io_capability == 0x04) ?
3047 						0x01 : conn->io_capability;
3048 		conn->auth_type = hci_get_auth_req(conn);
3049 		cp.authentication = conn->auth_type;
3050 
3051 		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3052 				hci_find_remote_oob_data(hdev, &conn->dst))
3053 			cp.oob_data = 0x01;
3054 		else
3055 			cp.oob_data = 0x00;
3056 
3057 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3058 							sizeof(cp), &cp);
3059 	} else {
3060 		struct hci_cp_io_capability_neg_reply cp;
3061 
3062 		bacpy(&cp.bdaddr, &ev->bdaddr);
3063 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3064 
3065 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3066 							sizeof(cp), &cp);
3067 	}
3068 
3069 unlock:
3070 	hci_dev_unlock(hdev);
3071 }
3072 
3073 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3074 {
3075 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3076 	struct hci_conn *conn;
3077 
3078 	BT_DBG("%s", hdev->name);
3079 
3080 	hci_dev_lock(hdev);
3081 
3082 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3083 	if (!conn)
3084 		goto unlock;
3085 
3086 	conn->remote_cap = ev->capability;
3087 	conn->remote_auth = ev->authentication;
3088 	if (ev->oob_data)
3089 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3090 
3091 unlock:
3092 	hci_dev_unlock(hdev);
3093 }
3094 
3095 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3096 							struct sk_buff *skb)
3097 {
3098 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3099 	int loc_mitm, rem_mitm, confirm_hint = 0;
3100 	struct hci_conn *conn;
3101 
3102 	BT_DBG("%s", hdev->name);
3103 
3104 	hci_dev_lock(hdev);
3105 
3106 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3107 		goto unlock;
3108 
3109 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3110 	if (!conn)
3111 		goto unlock;
3112 
3113 	loc_mitm = (conn->auth_type & 0x01);
3114 	rem_mitm = (conn->remote_auth & 0x01);
3115 
3116 	/* If we require MITM but the remote device can't provide that
3117 	 * (it has NoInputNoOutput) then reject the confirmation
3118 	 * request. The only exception is when we're dedicated bonding
3119 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3120 	 * bit set. */
3121 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3122 		BT_DBG("Rejecting request: remote device can't provide MITM");
3123 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3124 					sizeof(ev->bdaddr), &ev->bdaddr);
3125 		goto unlock;
3126 	}
3127 
3128 	/* If no side requires MITM protection; auto-accept */
3129 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3130 				(!rem_mitm || conn->io_capability == 0x03)) {
3131 
3132 		/* If we're not the initiators request authorization to
3133 		 * proceed from user space (mgmt_user_confirm with
3134 		 * confirm_hint set to 1). */
3135 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3136 			BT_DBG("Confirming auto-accept as acceptor");
3137 			confirm_hint = 1;
3138 			goto confirm;
3139 		}
3140 
3141 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3142 						hdev->auto_accept_delay);
3143 
3144 		if (hdev->auto_accept_delay > 0) {
3145 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3146 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3147 			goto unlock;
3148 		}
3149 
3150 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3151 						sizeof(ev->bdaddr), &ev->bdaddr);
3152 		goto unlock;
3153 	}
3154 
3155 confirm:
3156 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3157 				  confirm_hint);
3158 
3159 unlock:
3160 	hci_dev_unlock(hdev);
3161 }
3162 
3163 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3164 							struct sk_buff *skb)
3165 {
3166 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3167 
3168 	BT_DBG("%s", hdev->name);
3169 
3170 	hci_dev_lock(hdev);
3171 
3172 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3173 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3174 
3175 	hci_dev_unlock(hdev);
3176 }
3177 
3178 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3179 {
3180 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3181 	struct hci_conn *conn;
3182 
3183 	BT_DBG("%s", hdev->name);
3184 
3185 	hci_dev_lock(hdev);
3186 
3187 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3188 	if (!conn)
3189 		goto unlock;
3190 
3191 	/* To avoid duplicate auth_failed events to user space we check
3192 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3193 	 * initiated the authentication. A traditional auth_complete
3194 	 * event gets always produced as initiator and is also mapped to
3195 	 * the mgmt_auth_failed event */
3196 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3197 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3198 				 ev->status);
3199 
3200 	hci_conn_put(conn);
3201 
3202 unlock:
3203 	hci_dev_unlock(hdev);
3204 }
3205 
3206 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3207 {
3208 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3209 	struct inquiry_entry *ie;
3210 
3211 	BT_DBG("%s", hdev->name);
3212 
3213 	hci_dev_lock(hdev);
3214 
3215 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3216 	if (ie)
3217 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3218 
3219 	hci_dev_unlock(hdev);
3220 }
3221 
3222 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3223 						   struct sk_buff *skb)
3224 {
3225 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3226 	struct oob_data *data;
3227 
3228 	BT_DBG("%s", hdev->name);
3229 
3230 	hci_dev_lock(hdev);
3231 
3232 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3233 		goto unlock;
3234 
3235 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3236 	if (data) {
3237 		struct hci_cp_remote_oob_data_reply cp;
3238 
3239 		bacpy(&cp.bdaddr, &ev->bdaddr);
3240 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3241 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3242 
3243 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3244 									&cp);
3245 	} else {
3246 		struct hci_cp_remote_oob_data_neg_reply cp;
3247 
3248 		bacpy(&cp.bdaddr, &ev->bdaddr);
3249 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3250 									&cp);
3251 	}
3252 
3253 unlock:
3254 	hci_dev_unlock(hdev);
3255 }
3256 
3257 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3258 {
3259 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3260 	struct hci_conn *conn;
3261 
3262 	BT_DBG("%s status %d", hdev->name, ev->status);
3263 
3264 	hci_dev_lock(hdev);
3265 
3266 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3267 	if (!conn) {
3268 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3269 		if (!conn) {
3270 			BT_ERR("No memory for new connection");
3271 			hci_dev_unlock(hdev);
3272 			return;
3273 		}
3274 
3275 		conn->dst_type = ev->bdaddr_type;
3276 	}
3277 
3278 	if (ev->status) {
3279 		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3280 						conn->dst_type, ev->status);
3281 		hci_proto_connect_cfm(conn, ev->status);
3282 		conn->state = BT_CLOSED;
3283 		hci_conn_del(conn);
3284 		goto unlock;
3285 	}
3286 
3287 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3288 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3289 				      conn->dst_type, 0, NULL, 0, NULL);
3290 
3291 	conn->sec_level = BT_SECURITY_LOW;
3292 	conn->handle = __le16_to_cpu(ev->handle);
3293 	conn->state = BT_CONNECTED;
3294 
3295 	hci_conn_hold_device(conn);
3296 	hci_conn_add_sysfs(conn);
3297 
3298 	hci_proto_connect_cfm(conn, ev->status);
3299 
3300 unlock:
3301 	hci_dev_unlock(hdev);
3302 }
3303 
3304 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3305 						struct sk_buff *skb)
3306 {
3307 	u8 num_reports = skb->data[0];
3308 	void *ptr = &skb->data[1];
3309 	s8 rssi;
3310 
3311 	hci_dev_lock(hdev);
3312 
3313 	while (num_reports--) {
3314 		struct hci_ev_le_advertising_info *ev = ptr;
3315 
3316 		hci_add_adv_entry(hdev, ev);
3317 
3318 		rssi = ev->data[ev->length];
3319 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3320 				  NULL, rssi, 0, 1, ev->data, ev->length);
3321 
3322 		ptr += sizeof(*ev) + ev->length + 1;
3323 	}
3324 
3325 	hci_dev_unlock(hdev);
3326 }
3327 
3328 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3329 						struct sk_buff *skb)
3330 {
3331 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3332 	struct hci_cp_le_ltk_reply cp;
3333 	struct hci_cp_le_ltk_neg_reply neg;
3334 	struct hci_conn *conn;
3335 	struct smp_ltk *ltk;
3336 
3337 	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3338 
3339 	hci_dev_lock(hdev);
3340 
3341 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3342 	if (conn == NULL)
3343 		goto not_found;
3344 
3345 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3346 	if (ltk == NULL)
3347 		goto not_found;
3348 
3349 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3350 	cp.handle = cpu_to_le16(conn->handle);
3351 
3352 	if (ltk->authenticated)
3353 		conn->sec_level = BT_SECURITY_HIGH;
3354 
3355 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3356 
3357 	if (ltk->type & HCI_SMP_STK) {
3358 		list_del(&ltk->list);
3359 		kfree(ltk);
3360 	}
3361 
3362 	hci_dev_unlock(hdev);
3363 
3364 	return;
3365 
3366 not_found:
3367 	neg.handle = ev->handle;
3368 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3369 	hci_dev_unlock(hdev);
3370 }
3371 
3372 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3373 {
3374 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3375 
3376 	skb_pull(skb, sizeof(*le_ev));
3377 
3378 	switch (le_ev->subevent) {
3379 	case HCI_EV_LE_CONN_COMPLETE:
3380 		hci_le_conn_complete_evt(hdev, skb);
3381 		break;
3382 
3383 	case HCI_EV_LE_ADVERTISING_REPORT:
3384 		hci_le_adv_report_evt(hdev, skb);
3385 		break;
3386 
3387 	case HCI_EV_LE_LTK_REQ:
3388 		hci_le_ltk_request_evt(hdev, skb);
3389 		break;
3390 
3391 	default:
3392 		break;
3393 	}
3394 }
3395 
3396 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3397 {
3398 	struct hci_event_hdr *hdr = (void *) skb->data;
3399 	__u8 event = hdr->evt;
3400 
3401 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3402 
3403 	switch (event) {
3404 	case HCI_EV_INQUIRY_COMPLETE:
3405 		hci_inquiry_complete_evt(hdev, skb);
3406 		break;
3407 
3408 	case HCI_EV_INQUIRY_RESULT:
3409 		hci_inquiry_result_evt(hdev, skb);
3410 		break;
3411 
3412 	case HCI_EV_CONN_COMPLETE:
3413 		hci_conn_complete_evt(hdev, skb);
3414 		break;
3415 
3416 	case HCI_EV_CONN_REQUEST:
3417 		hci_conn_request_evt(hdev, skb);
3418 		break;
3419 
3420 	case HCI_EV_DISCONN_COMPLETE:
3421 		hci_disconn_complete_evt(hdev, skb);
3422 		break;
3423 
3424 	case HCI_EV_AUTH_COMPLETE:
3425 		hci_auth_complete_evt(hdev, skb);
3426 		break;
3427 
3428 	case HCI_EV_REMOTE_NAME:
3429 		hci_remote_name_evt(hdev, skb);
3430 		break;
3431 
3432 	case HCI_EV_ENCRYPT_CHANGE:
3433 		hci_encrypt_change_evt(hdev, skb);
3434 		break;
3435 
3436 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3437 		hci_change_link_key_complete_evt(hdev, skb);
3438 		break;
3439 
3440 	case HCI_EV_REMOTE_FEATURES:
3441 		hci_remote_features_evt(hdev, skb);
3442 		break;
3443 
3444 	case HCI_EV_REMOTE_VERSION:
3445 		hci_remote_version_evt(hdev, skb);
3446 		break;
3447 
3448 	case HCI_EV_QOS_SETUP_COMPLETE:
3449 		hci_qos_setup_complete_evt(hdev, skb);
3450 		break;
3451 
3452 	case HCI_EV_CMD_COMPLETE:
3453 		hci_cmd_complete_evt(hdev, skb);
3454 		break;
3455 
3456 	case HCI_EV_CMD_STATUS:
3457 		hci_cmd_status_evt(hdev, skb);
3458 		break;
3459 
3460 	case HCI_EV_ROLE_CHANGE:
3461 		hci_role_change_evt(hdev, skb);
3462 		break;
3463 
3464 	case HCI_EV_NUM_COMP_PKTS:
3465 		hci_num_comp_pkts_evt(hdev, skb);
3466 		break;
3467 
3468 	case HCI_EV_MODE_CHANGE:
3469 		hci_mode_change_evt(hdev, skb);
3470 		break;
3471 
3472 	case HCI_EV_PIN_CODE_REQ:
3473 		hci_pin_code_request_evt(hdev, skb);
3474 		break;
3475 
3476 	case HCI_EV_LINK_KEY_REQ:
3477 		hci_link_key_request_evt(hdev, skb);
3478 		break;
3479 
3480 	case HCI_EV_LINK_KEY_NOTIFY:
3481 		hci_link_key_notify_evt(hdev, skb);
3482 		break;
3483 
3484 	case HCI_EV_CLOCK_OFFSET:
3485 		hci_clock_offset_evt(hdev, skb);
3486 		break;
3487 
3488 	case HCI_EV_PKT_TYPE_CHANGE:
3489 		hci_pkt_type_change_evt(hdev, skb);
3490 		break;
3491 
3492 	case HCI_EV_PSCAN_REP_MODE:
3493 		hci_pscan_rep_mode_evt(hdev, skb);
3494 		break;
3495 
3496 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3497 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3498 		break;
3499 
3500 	case HCI_EV_REMOTE_EXT_FEATURES:
3501 		hci_remote_ext_features_evt(hdev, skb);
3502 		break;
3503 
3504 	case HCI_EV_SYNC_CONN_COMPLETE:
3505 		hci_sync_conn_complete_evt(hdev, skb);
3506 		break;
3507 
3508 	case HCI_EV_SYNC_CONN_CHANGED:
3509 		hci_sync_conn_changed_evt(hdev, skb);
3510 		break;
3511 
3512 	case HCI_EV_SNIFF_SUBRATE:
3513 		hci_sniff_subrate_evt(hdev, skb);
3514 		break;
3515 
3516 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3517 		hci_extended_inquiry_result_evt(hdev, skb);
3518 		break;
3519 
3520 	case HCI_EV_IO_CAPA_REQUEST:
3521 		hci_io_capa_request_evt(hdev, skb);
3522 		break;
3523 
3524 	case HCI_EV_IO_CAPA_REPLY:
3525 		hci_io_capa_reply_evt(hdev, skb);
3526 		break;
3527 
3528 	case HCI_EV_USER_CONFIRM_REQUEST:
3529 		hci_user_confirm_request_evt(hdev, skb);
3530 		break;
3531 
3532 	case HCI_EV_USER_PASSKEY_REQUEST:
3533 		hci_user_passkey_request_evt(hdev, skb);
3534 		break;
3535 
3536 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3537 		hci_simple_pair_complete_evt(hdev, skb);
3538 		break;
3539 
3540 	case HCI_EV_REMOTE_HOST_FEATURES:
3541 		hci_remote_host_features_evt(hdev, skb);
3542 		break;
3543 
3544 	case HCI_EV_LE_META:
3545 		hci_le_meta_evt(hdev, skb);
3546 		break;
3547 
3548 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3549 		hci_remote_oob_data_request_evt(hdev, skb);
3550 		break;
3551 
3552 	case HCI_EV_NUM_COMP_BLOCKS:
3553 		hci_num_comp_blocks_evt(hdev, skb);
3554 		break;
3555 
3556 	default:
3557 		BT_DBG("%s event 0x%x", hdev->name, event);
3558 		break;
3559 	}
3560 
3561 	kfree_skb(skb);
3562 	hdev->stat.evt_rx++;
3563 }
3564