xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 63dc02bd)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/module.h>
28 
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39 
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42 
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45 
46 /* Handle HCI Event packets */
47 
48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49 {
50 	__u8 status = *((__u8 *) skb->data);
51 
52 	BT_DBG("%s status 0x%x", hdev->name, status);
53 
54 	if (status) {
55 		hci_dev_lock(hdev);
56 		mgmt_stop_discovery_failed(hdev, status);
57 		hci_dev_unlock(hdev);
58 		return;
59 	}
60 
61 	clear_bit(HCI_INQUIRY, &hdev->flags);
62 
63 	hci_dev_lock(hdev);
64 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65 	hci_dev_unlock(hdev);
66 
67 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
68 
69 	hci_conn_check_pending(hdev);
70 }
71 
72 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 {
74 	__u8 status = *((__u8 *) skb->data);
75 
76 	BT_DBG("%s status 0x%x", hdev->name, status);
77 
78 	if (status)
79 		return;
80 
81 	hci_conn_check_pending(hdev);
82 }
83 
84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
85 {
86 	BT_DBG("%s", hdev->name);
87 }
88 
89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
90 {
91 	struct hci_rp_role_discovery *rp = (void *) skb->data;
92 	struct hci_conn *conn;
93 
94 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
95 
96 	if (rp->status)
97 		return;
98 
99 	hci_dev_lock(hdev);
100 
101 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102 	if (conn) {
103 		if (rp->role)
104 			conn->link_mode &= ~HCI_LM_MASTER;
105 		else
106 			conn->link_mode |= HCI_LM_MASTER;
107 	}
108 
109 	hci_dev_unlock(hdev);
110 }
111 
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113 {
114 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 	struct hci_conn *conn;
116 
117 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
118 
119 	if (rp->status)
120 		return;
121 
122 	hci_dev_lock(hdev);
123 
124 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 	if (conn)
126 		conn->link_policy = __le16_to_cpu(rp->policy);
127 
128 	hci_dev_unlock(hdev);
129 }
130 
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132 {
133 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 	struct hci_conn *conn;
135 	void *sent;
136 
137 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
138 
139 	if (rp->status)
140 		return;
141 
142 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 	if (!sent)
144 		return;
145 
146 	hci_dev_lock(hdev);
147 
148 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 	if (conn)
150 		conn->link_policy = get_unaligned_le16(sent + 2);
151 
152 	hci_dev_unlock(hdev);
153 }
154 
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 {
157 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158 
159 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
160 
161 	if (rp->status)
162 		return;
163 
164 	hdev->link_policy = __le16_to_cpu(rp->policy);
165 }
166 
167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
168 {
169 	__u8 status = *((__u8 *) skb->data);
170 	void *sent;
171 
172 	BT_DBG("%s status 0x%x", hdev->name, status);
173 
174 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
175 	if (!sent)
176 		return;
177 
178 	if (!status)
179 		hdev->link_policy = get_unaligned_le16(sent);
180 
181 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
182 }
183 
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 	__u8 status = *((__u8 *) skb->data);
187 
188 	BT_DBG("%s status 0x%x", hdev->name, status);
189 
190 	clear_bit(HCI_RESET, &hdev->flags);
191 
192 	hci_req_complete(hdev, HCI_OP_RESET, status);
193 
194 	/* Reset all non-persistent flags */
195 	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
196 
197 	hdev->discovery.state = DISCOVERY_STOPPED;
198 }
199 
200 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
201 {
202 	__u8 status = *((__u8 *) skb->data);
203 	void *sent;
204 
205 	BT_DBG("%s status 0x%x", hdev->name, status);
206 
207 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
208 	if (!sent)
209 		return;
210 
211 	hci_dev_lock(hdev);
212 
213 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 		mgmt_set_local_name_complete(hdev, sent, status);
215 	else if (!status)
216 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
217 
218 	hci_dev_unlock(hdev);
219 
220 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
221 }
222 
223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 	struct hci_rp_read_local_name *rp = (void *) skb->data;
226 
227 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
228 
229 	if (rp->status)
230 		return;
231 
232 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
233 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
234 }
235 
236 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 	__u8 status = *((__u8 *) skb->data);
239 	void *sent;
240 
241 	BT_DBG("%s status 0x%x", hdev->name, status);
242 
243 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
244 	if (!sent)
245 		return;
246 
247 	if (!status) {
248 		__u8 param = *((__u8 *) sent);
249 
250 		if (param == AUTH_ENABLED)
251 			set_bit(HCI_AUTH, &hdev->flags);
252 		else
253 			clear_bit(HCI_AUTH, &hdev->flags);
254 	}
255 
256 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
257 		mgmt_auth_enable_complete(hdev, status);
258 
259 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
260 }
261 
262 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
263 {
264 	__u8 status = *((__u8 *) skb->data);
265 	void *sent;
266 
267 	BT_DBG("%s status 0x%x", hdev->name, status);
268 
269 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
270 	if (!sent)
271 		return;
272 
273 	if (!status) {
274 		__u8 param = *((__u8 *) sent);
275 
276 		if (param)
277 			set_bit(HCI_ENCRYPT, &hdev->flags);
278 		else
279 			clear_bit(HCI_ENCRYPT, &hdev->flags);
280 	}
281 
282 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
283 }
284 
285 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
286 {
287 	__u8 param, status = *((__u8 *) skb->data);
288 	int old_pscan, old_iscan;
289 	void *sent;
290 
291 	BT_DBG("%s status 0x%x", hdev->name, status);
292 
293 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
294 	if (!sent)
295 		return;
296 
297 	param = *((__u8 *) sent);
298 
299 	hci_dev_lock(hdev);
300 
301 	if (status != 0) {
302 		mgmt_write_scan_failed(hdev, param, status);
303 		hdev->discov_timeout = 0;
304 		goto done;
305 	}
306 
307 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
308 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
309 
310 	if (param & SCAN_INQUIRY) {
311 		set_bit(HCI_ISCAN, &hdev->flags);
312 		if (!old_iscan)
313 			mgmt_discoverable(hdev, 1);
314 		if (hdev->discov_timeout > 0) {
315 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
316 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
317 									to);
318 		}
319 	} else if (old_iscan)
320 		mgmt_discoverable(hdev, 0);
321 
322 	if (param & SCAN_PAGE) {
323 		set_bit(HCI_PSCAN, &hdev->flags);
324 		if (!old_pscan)
325 			mgmt_connectable(hdev, 1);
326 	} else if (old_pscan)
327 		mgmt_connectable(hdev, 0);
328 
329 done:
330 	hci_dev_unlock(hdev);
331 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
332 }
333 
334 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335 {
336 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337 
338 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
339 
340 	if (rp->status)
341 		return;
342 
343 	memcpy(hdev->dev_class, rp->dev_class, 3);
344 
345 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 		hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347 }
348 
349 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350 {
351 	__u8 status = *((__u8 *) skb->data);
352 	void *sent;
353 
354 	BT_DBG("%s status 0x%x", hdev->name, status);
355 
356 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 	if (!sent)
358 		return;
359 
360 	hci_dev_lock(hdev);
361 
362 	if (status == 0)
363 		memcpy(hdev->dev_class, sent, 3);
364 
365 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 		mgmt_set_class_of_dev_complete(hdev, sent, status);
367 
368 	hci_dev_unlock(hdev);
369 }
370 
371 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372 {
373 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 	__u16 setting;
375 
376 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
377 
378 	if (rp->status)
379 		return;
380 
381 	setting = __le16_to_cpu(rp->voice_setting);
382 
383 	if (hdev->voice_setting == setting)
384 		return;
385 
386 	hdev->voice_setting = setting;
387 
388 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
389 
390 	if (hdev->notify)
391 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392 }
393 
394 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
395 {
396 	__u8 status = *((__u8 *) skb->data);
397 	__u16 setting;
398 	void *sent;
399 
400 	BT_DBG("%s status 0x%x", hdev->name, status);
401 
402 	if (status)
403 		return;
404 
405 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
406 	if (!sent)
407 		return;
408 
409 	setting = get_unaligned_le16(sent);
410 
411 	if (hdev->voice_setting == setting)
412 		return;
413 
414 	hdev->voice_setting = setting;
415 
416 	BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
417 
418 	if (hdev->notify)
419 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
420 }
421 
422 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
423 {
424 	__u8 status = *((__u8 *) skb->data);
425 
426 	BT_DBG("%s status 0x%x", hdev->name, status);
427 
428 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
429 }
430 
431 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
432 {
433 	__u8 status = *((__u8 *) skb->data);
434 	void *sent;
435 
436 	BT_DBG("%s status 0x%x", hdev->name, status);
437 
438 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
439 	if (!sent)
440 		return;
441 
442 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
443 		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
444 	else if (!status) {
445 		if (*((u8 *) sent))
446 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
447 		else
448 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
449 	}
450 }
451 
452 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
453 {
454 	if (hdev->features[6] & LMP_EXT_INQ)
455 		return 2;
456 
457 	if (hdev->features[3] & LMP_RSSI_INQ)
458 		return 1;
459 
460 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 						hdev->lmp_subver == 0x0757)
462 		return 1;
463 
464 	if (hdev->manufacturer == 15) {
465 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
466 			return 1;
467 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
468 			return 1;
469 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
470 			return 1;
471 	}
472 
473 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 						hdev->lmp_subver == 0x1805)
475 		return 1;
476 
477 	return 0;
478 }
479 
480 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
481 {
482 	u8 mode;
483 
484 	mode = hci_get_inquiry_mode(hdev);
485 
486 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 }
488 
489 static void hci_setup_event_mask(struct hci_dev *hdev)
490 {
491 	/* The second byte is 0xff instead of 0x9f (two reserved bits
492 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 	 * command otherwise */
494 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
495 
496 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 	 * any event mask for pre 1.2 devices */
498 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
499 		return;
500 
501 	events[4] |= 0x01; /* Flow Specification Complete */
502 	events[4] |= 0x02; /* Inquiry Result with RSSI */
503 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 	events[5] |= 0x08; /* Synchronous Connection Complete */
505 	events[5] |= 0x10; /* Synchronous Connection Changed */
506 
507 	if (hdev->features[3] & LMP_RSSI_INQ)
508 		events[4] |= 0x04; /* Inquiry Result with RSSI */
509 
510 	if (hdev->features[5] & LMP_SNIFF_SUBR)
511 		events[5] |= 0x20; /* Sniff Subrating */
512 
513 	if (hdev->features[5] & LMP_PAUSE_ENC)
514 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
515 
516 	if (hdev->features[6] & LMP_EXT_INQ)
517 		events[5] |= 0x40; /* Extended Inquiry Result */
518 
519 	if (hdev->features[6] & LMP_NO_FLUSH)
520 		events[7] |= 0x01; /* Enhanced Flush Complete */
521 
522 	if (hdev->features[7] & LMP_LSTO)
523 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
524 
525 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 		events[6] |= 0x01;	/* IO Capability Request */
527 		events[6] |= 0x02;	/* IO Capability Response */
528 		events[6] |= 0x04;	/* User Confirmation Request */
529 		events[6] |= 0x08;	/* User Passkey Request */
530 		events[6] |= 0x10;	/* Remote OOB Data Request */
531 		events[6] |= 0x20;	/* Simple Pairing Complete */
532 		events[7] |= 0x04;	/* User Passkey Notification */
533 		events[7] |= 0x08;	/* Keypress Notification */
534 		events[7] |= 0x10;	/* Remote Host Supported
535 					 * Features Notification */
536 	}
537 
538 	if (hdev->features[4] & LMP_LE)
539 		events[7] |= 0x20;	/* LE Meta-Event */
540 
541 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
542 }
543 
544 static void hci_setup(struct hci_dev *hdev)
545 {
546 	if (hdev->dev_type != HCI_BREDR)
547 		return;
548 
549 	hci_setup_event_mask(hdev);
550 
551 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
552 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
553 
554 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
555 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
556 			u8 mode = 0x01;
557 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
558 				     sizeof(mode), &mode);
559 		} else {
560 			struct hci_cp_write_eir cp;
561 
562 			memset(hdev->eir, 0, sizeof(hdev->eir));
563 			memset(&cp, 0, sizeof(cp));
564 
565 			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
566 		}
567 	}
568 
569 	if (hdev->features[3] & LMP_RSSI_INQ)
570 		hci_setup_inquiry_mode(hdev);
571 
572 	if (hdev->features[7] & LMP_INQ_TX_PWR)
573 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
574 
575 	if (hdev->features[7] & LMP_EXTFEATURES) {
576 		struct hci_cp_read_local_ext_features cp;
577 
578 		cp.page = 0x01;
579 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
580 			     &cp);
581 	}
582 
583 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
584 		u8 enable = 1;
585 		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
586 			     &enable);
587 	}
588 }
589 
590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 	struct hci_rp_read_local_version *rp = (void *) skb->data;
593 
594 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
595 
596 	if (rp->status)
597 		goto done;
598 
599 	hdev->hci_ver = rp->hci_ver;
600 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 	hdev->lmp_ver = rp->lmp_ver;
602 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604 
605 	BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 					hdev->manufacturer,
607 					hdev->hci_ver, hdev->hci_rev);
608 
609 	if (test_bit(HCI_INIT, &hdev->flags))
610 		hci_setup(hdev);
611 
612 done:
613 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
614 }
615 
616 static void hci_setup_link_policy(struct hci_dev *hdev)
617 {
618 	u16 link_policy = 0;
619 
620 	if (hdev->features[0] & LMP_RSWITCH)
621 		link_policy |= HCI_LP_RSWITCH;
622 	if (hdev->features[0] & LMP_HOLD)
623 		link_policy |= HCI_LP_HOLD;
624 	if (hdev->features[0] & LMP_SNIFF)
625 		link_policy |= HCI_LP_SNIFF;
626 	if (hdev->features[1] & LMP_PARK)
627 		link_policy |= HCI_LP_PARK;
628 
629 	link_policy = cpu_to_le16(link_policy);
630 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
631 		     &link_policy);
632 }
633 
634 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
635 {
636 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
637 
638 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
639 
640 	if (rp->status)
641 		goto done;
642 
643 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
644 
645 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 		hci_setup_link_policy(hdev);
647 
648 done:
649 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
650 }
651 
652 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
653 {
654 	struct hci_rp_read_local_features *rp = (void *) skb->data;
655 
656 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
657 
658 	if (rp->status)
659 		return;
660 
661 	memcpy(hdev->features, rp->features, 8);
662 
663 	/* Adjust default settings according to features
664 	 * supported by device. */
665 
666 	if (hdev->features[0] & LMP_3SLOT)
667 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
668 
669 	if (hdev->features[0] & LMP_5SLOT)
670 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
671 
672 	if (hdev->features[1] & LMP_HV2) {
673 		hdev->pkt_type  |= (HCI_HV2);
674 		hdev->esco_type |= (ESCO_HV2);
675 	}
676 
677 	if (hdev->features[1] & LMP_HV3) {
678 		hdev->pkt_type  |= (HCI_HV3);
679 		hdev->esco_type |= (ESCO_HV3);
680 	}
681 
682 	if (hdev->features[3] & LMP_ESCO)
683 		hdev->esco_type |= (ESCO_EV3);
684 
685 	if (hdev->features[4] & LMP_EV4)
686 		hdev->esco_type |= (ESCO_EV4);
687 
688 	if (hdev->features[4] & LMP_EV5)
689 		hdev->esco_type |= (ESCO_EV5);
690 
691 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 		hdev->esco_type |= (ESCO_2EV3);
693 
694 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 		hdev->esco_type |= (ESCO_3EV3);
696 
697 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
699 
700 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 					hdev->features[0], hdev->features[1],
702 					hdev->features[2], hdev->features[3],
703 					hdev->features[4], hdev->features[5],
704 					hdev->features[6], hdev->features[7]);
705 }
706 
707 static void hci_set_le_support(struct hci_dev *hdev)
708 {
709 	struct hci_cp_write_le_host_supported cp;
710 
711 	memset(&cp, 0, sizeof(cp));
712 
713 	if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
714 		cp.le = 1;
715 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
716 	}
717 
718 	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
719 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
720 			     &cp);
721 }
722 
723 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
724 							struct sk_buff *skb)
725 {
726 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
727 
728 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
729 
730 	if (rp->status)
731 		goto done;
732 
733 	switch (rp->page) {
734 	case 0:
735 		memcpy(hdev->features, rp->features, 8);
736 		break;
737 	case 1:
738 		memcpy(hdev->host_features, rp->features, 8);
739 		break;
740 	}
741 
742 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
743 		hci_set_le_support(hdev);
744 
745 done:
746 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
747 }
748 
749 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
750 						struct sk_buff *skb)
751 {
752 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
753 
754 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
755 
756 	if (rp->status)
757 		return;
758 
759 	hdev->flow_ctl_mode = rp->mode;
760 
761 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
762 }
763 
764 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 {
766 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 
768 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
769 
770 	if (rp->status)
771 		return;
772 
773 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
774 	hdev->sco_mtu  = rp->sco_mtu;
775 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
776 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 
778 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
779 		hdev->sco_mtu  = 64;
780 		hdev->sco_pkts = 8;
781 	}
782 
783 	hdev->acl_cnt = hdev->acl_pkts;
784 	hdev->sco_cnt = hdev->sco_pkts;
785 
786 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
787 					hdev->acl_mtu, hdev->acl_pkts,
788 					hdev->sco_mtu, hdev->sco_pkts);
789 }
790 
791 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
792 {
793 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
794 
795 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
796 
797 	if (!rp->status)
798 		bacpy(&hdev->bdaddr, &rp->bdaddr);
799 
800 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
801 }
802 
803 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
804 							struct sk_buff *skb)
805 {
806 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
807 
808 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
809 
810 	if (rp->status)
811 		return;
812 
813 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
814 	hdev->block_len = __le16_to_cpu(rp->block_len);
815 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
816 
817 	hdev->block_cnt = hdev->num_blocks;
818 
819 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
820 					hdev->block_cnt, hdev->block_len);
821 
822 	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
823 }
824 
825 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
826 {
827 	__u8 status = *((__u8 *) skb->data);
828 
829 	BT_DBG("%s status 0x%x", hdev->name, status);
830 
831 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
832 }
833 
834 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
835 		struct sk_buff *skb)
836 {
837 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
838 
839 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
840 
841 	if (rp->status)
842 		return;
843 
844 	hdev->amp_status = rp->amp_status;
845 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
846 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
847 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
848 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
849 	hdev->amp_type = rp->amp_type;
850 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
851 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
852 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
853 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
854 
855 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
856 }
857 
858 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
859 							struct sk_buff *skb)
860 {
861 	__u8 status = *((__u8 *) skb->data);
862 
863 	BT_DBG("%s status 0x%x", hdev->name, status);
864 
865 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
866 }
867 
868 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 	__u8 status = *((__u8 *) skb->data);
871 
872 	BT_DBG("%s status 0x%x", hdev->name, status);
873 
874 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
875 }
876 
877 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
878 							struct sk_buff *skb)
879 {
880 	__u8 status = *((__u8 *) skb->data);
881 
882 	BT_DBG("%s status 0x%x", hdev->name, status);
883 
884 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
885 }
886 
887 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
888 							struct sk_buff *skb)
889 {
890 	__u8 status = *((__u8 *) skb->data);
891 
892 	BT_DBG("%s status 0x%x", hdev->name, status);
893 
894 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
895 }
896 
897 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
898 {
899 	__u8 status = *((__u8 *) skb->data);
900 
901 	BT_DBG("%s status 0x%x", hdev->name, status);
902 
903 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
904 }
905 
906 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
909 	struct hci_cp_pin_code_reply *cp;
910 	struct hci_conn *conn;
911 
912 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
913 
914 	hci_dev_lock(hdev);
915 
916 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
917 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
918 
919 	if (rp->status != 0)
920 		goto unlock;
921 
922 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
923 	if (!cp)
924 		goto unlock;
925 
926 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
927 	if (conn)
928 		conn->pin_length = cp->pin_len;
929 
930 unlock:
931 	hci_dev_unlock(hdev);
932 }
933 
934 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
935 {
936 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
937 
938 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
939 
940 	hci_dev_lock(hdev);
941 
942 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
943 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
944 								rp->status);
945 
946 	hci_dev_unlock(hdev);
947 }
948 
949 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
950 				       struct sk_buff *skb)
951 {
952 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
953 
954 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
955 
956 	if (rp->status)
957 		return;
958 
959 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
960 	hdev->le_pkts = rp->le_max_pkt;
961 
962 	hdev->le_cnt = hdev->le_pkts;
963 
964 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
965 
966 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
967 }
968 
969 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
970 {
971 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972 
973 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
974 
975 	hci_dev_lock(hdev);
976 
977 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
979 						 rp->status);
980 
981 	hci_dev_unlock(hdev);
982 }
983 
984 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
985 							struct sk_buff *skb)
986 {
987 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
988 
989 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
990 
991 	hci_dev_lock(hdev);
992 
993 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
995 						     ACL_LINK, 0, rp->status);
996 
997 	hci_dev_unlock(hdev);
998 }
999 
1000 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1001 {
1002 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1003 
1004 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1005 
1006 	hci_dev_lock(hdev);
1007 
1008 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1010 						 0, rp->status);
1011 
1012 	hci_dev_unlock(hdev);
1013 }
1014 
1015 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1016 							struct sk_buff *skb)
1017 {
1018 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1019 
1020 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1021 
1022 	hci_dev_lock(hdev);
1023 
1024 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1025 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1026 						     ACL_LINK, 0, rp->status);
1027 
1028 	hci_dev_unlock(hdev);
1029 }
1030 
1031 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1032 							struct sk_buff *skb)
1033 {
1034 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1035 
1036 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1037 
1038 	hci_dev_lock(hdev);
1039 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1040 						rp->randomizer, rp->status);
1041 	hci_dev_unlock(hdev);
1042 }
1043 
1044 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1045 {
1046 	__u8 status = *((__u8 *) skb->data);
1047 
1048 	BT_DBG("%s status 0x%x", hdev->name, status);
1049 
1050 	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1051 
1052 	if (status) {
1053 		hci_dev_lock(hdev);
1054 		mgmt_start_discovery_failed(hdev, status);
1055 		hci_dev_unlock(hdev);
1056 		return;
1057 	}
1058 }
1059 
1060 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1061 					struct sk_buff *skb)
1062 {
1063 	struct hci_cp_le_set_scan_enable *cp;
1064 	__u8 status = *((__u8 *) skb->data);
1065 
1066 	BT_DBG("%s status 0x%x", hdev->name, status);
1067 
1068 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1069 	if (!cp)
1070 		return;
1071 
1072 	switch (cp->enable) {
1073 	case LE_SCANNING_ENABLED:
1074 		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1075 
1076 		if (status) {
1077 			hci_dev_lock(hdev);
1078 			mgmt_start_discovery_failed(hdev, status);
1079 			hci_dev_unlock(hdev);
1080 			return;
1081 		}
1082 
1083 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1084 
1085 		cancel_delayed_work_sync(&hdev->adv_work);
1086 
1087 		hci_dev_lock(hdev);
1088 		hci_adv_entries_clear(hdev);
1089 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 		hci_dev_unlock(hdev);
1091 		break;
1092 
1093 	case LE_SCANNING_DISABLED:
1094 		if (status)
1095 			return;
1096 
1097 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1098 
1099 		schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1100 
1101 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 			mgmt_interleaved_discovery(hdev);
1103 		} else {
1104 			hci_dev_lock(hdev);
1105 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1106 			hci_dev_unlock(hdev);
1107 		}
1108 
1109 		break;
1110 
1111 	default:
1112 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1113 		break;
1114 	}
1115 }
1116 
1117 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1118 {
1119 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1120 
1121 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1122 
1123 	if (rp->status)
1124 		return;
1125 
1126 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1127 }
1128 
1129 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1130 {
1131 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1132 
1133 	BT_DBG("%s status 0x%x", hdev->name, rp->status);
1134 
1135 	if (rp->status)
1136 		return;
1137 
1138 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1139 }
1140 
1141 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1142 							struct sk_buff *skb)
1143 {
1144 	struct hci_cp_write_le_host_supported *sent;
1145 	__u8 status = *((__u8 *) skb->data);
1146 
1147 	BT_DBG("%s status 0x%x", hdev->name, status);
1148 
1149 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1150 	if (!sent)
1151 		return;
1152 
1153 	if (!status) {
1154 		if (sent->le)
1155 			hdev->host_features[0] |= LMP_HOST_LE;
1156 		else
1157 			hdev->host_features[0] &= ~LMP_HOST_LE;
1158 	}
1159 
1160 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1161 					!test_bit(HCI_INIT, &hdev->flags))
1162 		mgmt_le_enable_complete(hdev, sent->le, status);
1163 
1164 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1165 }
1166 
1167 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1168 {
1169 	BT_DBG("%s status 0x%x", hdev->name, status);
1170 
1171 	if (status) {
1172 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1173 		hci_conn_check_pending(hdev);
1174 		hci_dev_lock(hdev);
1175 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1176 			mgmt_start_discovery_failed(hdev, status);
1177 		hci_dev_unlock(hdev);
1178 		return;
1179 	}
1180 
1181 	set_bit(HCI_INQUIRY, &hdev->flags);
1182 
1183 	hci_dev_lock(hdev);
1184 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1185 	hci_dev_unlock(hdev);
1186 }
1187 
1188 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1189 {
1190 	struct hci_cp_create_conn *cp;
1191 	struct hci_conn *conn;
1192 
1193 	BT_DBG("%s status 0x%x", hdev->name, status);
1194 
1195 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1196 	if (!cp)
1197 		return;
1198 
1199 	hci_dev_lock(hdev);
1200 
1201 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1202 
1203 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1204 
1205 	if (status) {
1206 		if (conn && conn->state == BT_CONNECT) {
1207 			if (status != 0x0c || conn->attempt > 2) {
1208 				conn->state = BT_CLOSED;
1209 				hci_proto_connect_cfm(conn, status);
1210 				hci_conn_del(conn);
1211 			} else
1212 				conn->state = BT_CONNECT2;
1213 		}
1214 	} else {
1215 		if (!conn) {
1216 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1217 			if (conn) {
1218 				conn->out = true;
1219 				conn->link_mode |= HCI_LM_MASTER;
1220 			} else
1221 				BT_ERR("No memory for new connection");
1222 		}
1223 	}
1224 
1225 	hci_dev_unlock(hdev);
1226 }
1227 
1228 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1229 {
1230 	struct hci_cp_add_sco *cp;
1231 	struct hci_conn *acl, *sco;
1232 	__u16 handle;
1233 
1234 	BT_DBG("%s status 0x%x", hdev->name, status);
1235 
1236 	if (!status)
1237 		return;
1238 
1239 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1240 	if (!cp)
1241 		return;
1242 
1243 	handle = __le16_to_cpu(cp->handle);
1244 
1245 	BT_DBG("%s handle %d", hdev->name, handle);
1246 
1247 	hci_dev_lock(hdev);
1248 
1249 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1250 	if (acl) {
1251 		sco = acl->link;
1252 		if (sco) {
1253 			sco->state = BT_CLOSED;
1254 
1255 			hci_proto_connect_cfm(sco, status);
1256 			hci_conn_del(sco);
1257 		}
1258 	}
1259 
1260 	hci_dev_unlock(hdev);
1261 }
1262 
1263 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1264 {
1265 	struct hci_cp_auth_requested *cp;
1266 	struct hci_conn *conn;
1267 
1268 	BT_DBG("%s status 0x%x", hdev->name, status);
1269 
1270 	if (!status)
1271 		return;
1272 
1273 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1274 	if (!cp)
1275 		return;
1276 
1277 	hci_dev_lock(hdev);
1278 
1279 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1280 	if (conn) {
1281 		if (conn->state == BT_CONFIG) {
1282 			hci_proto_connect_cfm(conn, status);
1283 			hci_conn_put(conn);
1284 		}
1285 	}
1286 
1287 	hci_dev_unlock(hdev);
1288 }
1289 
1290 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1291 {
1292 	struct hci_cp_set_conn_encrypt *cp;
1293 	struct hci_conn *conn;
1294 
1295 	BT_DBG("%s status 0x%x", hdev->name, status);
1296 
1297 	if (!status)
1298 		return;
1299 
1300 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1301 	if (!cp)
1302 		return;
1303 
1304 	hci_dev_lock(hdev);
1305 
1306 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1307 	if (conn) {
1308 		if (conn->state == BT_CONFIG) {
1309 			hci_proto_connect_cfm(conn, status);
1310 			hci_conn_put(conn);
1311 		}
1312 	}
1313 
1314 	hci_dev_unlock(hdev);
1315 }
1316 
1317 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1318 							struct hci_conn *conn)
1319 {
1320 	if (conn->state != BT_CONFIG || !conn->out)
1321 		return 0;
1322 
1323 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1324 		return 0;
1325 
1326 	/* Only request authentication for SSP connections or non-SSP
1327 	 * devices with sec_level HIGH or if MITM protection is requested */
1328 	if (!hci_conn_ssp_enabled(conn) &&
1329 				conn->pending_sec_level != BT_SECURITY_HIGH &&
1330 				!(conn->auth_type & 0x01))
1331 		return 0;
1332 
1333 	return 1;
1334 }
1335 
1336 static inline int hci_resolve_name(struct hci_dev *hdev,
1337 				   struct inquiry_entry *e)
1338 {
1339 	struct hci_cp_remote_name_req cp;
1340 
1341 	memset(&cp, 0, sizeof(cp));
1342 
1343 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1344 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1345 	cp.pscan_mode = e->data.pscan_mode;
1346 	cp.clock_offset = e->data.clock_offset;
1347 
1348 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1349 }
1350 
1351 static bool hci_resolve_next_name(struct hci_dev *hdev)
1352 {
1353 	struct discovery_state *discov = &hdev->discovery;
1354 	struct inquiry_entry *e;
1355 
1356 	if (list_empty(&discov->resolve))
1357 		return false;
1358 
1359 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1360 	if (hci_resolve_name(hdev, e) == 0) {
1361 		e->name_state = NAME_PENDING;
1362 		return true;
1363 	}
1364 
1365 	return false;
1366 }
1367 
1368 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1369 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1370 {
1371 	struct discovery_state *discov = &hdev->discovery;
1372 	struct inquiry_entry *e;
1373 
1374 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1375 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1376 				      name_len, conn->dev_class);
1377 
1378 	if (discov->state == DISCOVERY_STOPPED)
1379 		return;
1380 
1381 	if (discov->state == DISCOVERY_STOPPING)
1382 		goto discov_complete;
1383 
1384 	if (discov->state != DISCOVERY_RESOLVING)
1385 		return;
1386 
1387 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1388 	if (e) {
1389 		e->name_state = NAME_KNOWN;
1390 		list_del(&e->list);
1391 		if (name)
1392 			mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1393 					 e->data.rssi, name, name_len);
1394 	}
1395 
1396 	if (hci_resolve_next_name(hdev))
1397 		return;
1398 
1399 discov_complete:
1400 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1401 }
1402 
1403 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1404 {
1405 	struct hci_cp_remote_name_req *cp;
1406 	struct hci_conn *conn;
1407 
1408 	BT_DBG("%s status 0x%x", hdev->name, status);
1409 
1410 	/* If successful wait for the name req complete event before
1411 	 * checking for the need to do authentication */
1412 	if (!status)
1413 		return;
1414 
1415 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1416 	if (!cp)
1417 		return;
1418 
1419 	hci_dev_lock(hdev);
1420 
1421 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1422 
1423 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1424 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1425 
1426 	if (!conn)
1427 		goto unlock;
1428 
1429 	if (!hci_outgoing_auth_needed(hdev, conn))
1430 		goto unlock;
1431 
1432 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1433 		struct hci_cp_auth_requested cp;
1434 		cp.handle = __cpu_to_le16(conn->handle);
1435 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1436 	}
1437 
1438 unlock:
1439 	hci_dev_unlock(hdev);
1440 }
1441 
1442 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1443 {
1444 	struct hci_cp_read_remote_features *cp;
1445 	struct hci_conn *conn;
1446 
1447 	BT_DBG("%s status 0x%x", hdev->name, status);
1448 
1449 	if (!status)
1450 		return;
1451 
1452 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1453 	if (!cp)
1454 		return;
1455 
1456 	hci_dev_lock(hdev);
1457 
1458 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1459 	if (conn) {
1460 		if (conn->state == BT_CONFIG) {
1461 			hci_proto_connect_cfm(conn, status);
1462 			hci_conn_put(conn);
1463 		}
1464 	}
1465 
1466 	hci_dev_unlock(hdev);
1467 }
1468 
1469 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1470 {
1471 	struct hci_cp_read_remote_ext_features *cp;
1472 	struct hci_conn *conn;
1473 
1474 	BT_DBG("%s status 0x%x", hdev->name, status);
1475 
1476 	if (!status)
1477 		return;
1478 
1479 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1480 	if (!cp)
1481 		return;
1482 
1483 	hci_dev_lock(hdev);
1484 
1485 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1486 	if (conn) {
1487 		if (conn->state == BT_CONFIG) {
1488 			hci_proto_connect_cfm(conn, status);
1489 			hci_conn_put(conn);
1490 		}
1491 	}
1492 
1493 	hci_dev_unlock(hdev);
1494 }
1495 
1496 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1497 {
1498 	struct hci_cp_setup_sync_conn *cp;
1499 	struct hci_conn *acl, *sco;
1500 	__u16 handle;
1501 
1502 	BT_DBG("%s status 0x%x", hdev->name, status);
1503 
1504 	if (!status)
1505 		return;
1506 
1507 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1508 	if (!cp)
1509 		return;
1510 
1511 	handle = __le16_to_cpu(cp->handle);
1512 
1513 	BT_DBG("%s handle %d", hdev->name, handle);
1514 
1515 	hci_dev_lock(hdev);
1516 
1517 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1518 	if (acl) {
1519 		sco = acl->link;
1520 		if (sco) {
1521 			sco->state = BT_CLOSED;
1522 
1523 			hci_proto_connect_cfm(sco, status);
1524 			hci_conn_del(sco);
1525 		}
1526 	}
1527 
1528 	hci_dev_unlock(hdev);
1529 }
1530 
1531 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1532 {
1533 	struct hci_cp_sniff_mode *cp;
1534 	struct hci_conn *conn;
1535 
1536 	BT_DBG("%s status 0x%x", hdev->name, status);
1537 
1538 	if (!status)
1539 		return;
1540 
1541 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1542 	if (!cp)
1543 		return;
1544 
1545 	hci_dev_lock(hdev);
1546 
1547 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1548 	if (conn) {
1549 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1550 
1551 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1552 			hci_sco_setup(conn, status);
1553 	}
1554 
1555 	hci_dev_unlock(hdev);
1556 }
1557 
1558 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1559 {
1560 	struct hci_cp_exit_sniff_mode *cp;
1561 	struct hci_conn *conn;
1562 
1563 	BT_DBG("%s status 0x%x", hdev->name, status);
1564 
1565 	if (!status)
1566 		return;
1567 
1568 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1569 	if (!cp)
1570 		return;
1571 
1572 	hci_dev_lock(hdev);
1573 
1574 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1575 	if (conn) {
1576 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1577 
1578 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1579 			hci_sco_setup(conn, status);
1580 	}
1581 
1582 	hci_dev_unlock(hdev);
1583 }
1584 
1585 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1586 {
1587 	struct hci_cp_disconnect *cp;
1588 	struct hci_conn *conn;
1589 
1590 	if (!status)
1591 		return;
1592 
1593 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1594 	if (!cp)
1595 		return;
1596 
1597 	hci_dev_lock(hdev);
1598 
1599 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1600 	if (conn)
1601 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1602 				       conn->dst_type, status);
1603 
1604 	hci_dev_unlock(hdev);
1605 }
1606 
1607 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1608 {
1609 	struct hci_cp_le_create_conn *cp;
1610 	struct hci_conn *conn;
1611 
1612 	BT_DBG("%s status 0x%x", hdev->name, status);
1613 
1614 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1615 	if (!cp)
1616 		return;
1617 
1618 	hci_dev_lock(hdev);
1619 
1620 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1621 
1622 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1623 		conn);
1624 
1625 	if (status) {
1626 		if (conn && conn->state == BT_CONNECT) {
1627 			conn->state = BT_CLOSED;
1628 			hci_proto_connect_cfm(conn, status);
1629 			hci_conn_del(conn);
1630 		}
1631 	} else {
1632 		if (!conn) {
1633 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1634 			if (conn) {
1635 				conn->dst_type = cp->peer_addr_type;
1636 				conn->out = true;
1637 			} else {
1638 				BT_ERR("No memory for new connection");
1639 			}
1640 		}
1641 	}
1642 
1643 	hci_dev_unlock(hdev);
1644 }
1645 
1646 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1647 {
1648 	BT_DBG("%s status 0x%x", hdev->name, status);
1649 }
1650 
1651 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1652 {
1653 	__u8 status = *((__u8 *) skb->data);
1654 	struct discovery_state *discov = &hdev->discovery;
1655 	struct inquiry_entry *e;
1656 
1657 	BT_DBG("%s status %d", hdev->name, status);
1658 
1659 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1660 
1661 	hci_conn_check_pending(hdev);
1662 
1663 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1664 		return;
1665 
1666 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1667 		return;
1668 
1669 	hci_dev_lock(hdev);
1670 
1671 	if (discov->state != DISCOVERY_FINDING)
1672 		goto unlock;
1673 
1674 	if (list_empty(&discov->resolve)) {
1675 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1676 		goto unlock;
1677 	}
1678 
1679 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1680 	if (e && hci_resolve_name(hdev, e) == 0) {
1681 		e->name_state = NAME_PENDING;
1682 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1683 	} else {
1684 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1685 	}
1686 
1687 unlock:
1688 	hci_dev_unlock(hdev);
1689 }
1690 
1691 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1692 {
1693 	struct inquiry_data data;
1694 	struct inquiry_info *info = (void *) (skb->data + 1);
1695 	int num_rsp = *((__u8 *) skb->data);
1696 
1697 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1698 
1699 	if (!num_rsp)
1700 		return;
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	for (; num_rsp; num_rsp--, info++) {
1705 		bool name_known, ssp;
1706 
1707 		bacpy(&data.bdaddr, &info->bdaddr);
1708 		data.pscan_rep_mode	= info->pscan_rep_mode;
1709 		data.pscan_period_mode	= info->pscan_period_mode;
1710 		data.pscan_mode		= info->pscan_mode;
1711 		memcpy(data.dev_class, info->dev_class, 3);
1712 		data.clock_offset	= info->clock_offset;
1713 		data.rssi		= 0x00;
1714 		data.ssp_mode		= 0x00;
1715 
1716 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1717 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1718 				  info->dev_class, 0, !name_known, ssp, NULL,
1719 				  0);
1720 	}
1721 
1722 	hci_dev_unlock(hdev);
1723 }
1724 
1725 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1726 {
1727 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1728 	struct hci_conn *conn;
1729 
1730 	BT_DBG("%s", hdev->name);
1731 
1732 	hci_dev_lock(hdev);
1733 
1734 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1735 	if (!conn) {
1736 		if (ev->link_type != SCO_LINK)
1737 			goto unlock;
1738 
1739 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1740 		if (!conn)
1741 			goto unlock;
1742 
1743 		conn->type = SCO_LINK;
1744 	}
1745 
1746 	if (!ev->status) {
1747 		conn->handle = __le16_to_cpu(ev->handle);
1748 
1749 		if (conn->type == ACL_LINK) {
1750 			conn->state = BT_CONFIG;
1751 			hci_conn_hold(conn);
1752 			conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1753 		} else
1754 			conn->state = BT_CONNECTED;
1755 
1756 		hci_conn_hold_device(conn);
1757 		hci_conn_add_sysfs(conn);
1758 
1759 		if (test_bit(HCI_AUTH, &hdev->flags))
1760 			conn->link_mode |= HCI_LM_AUTH;
1761 
1762 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1763 			conn->link_mode |= HCI_LM_ENCRYPT;
1764 
1765 		/* Get remote features */
1766 		if (conn->type == ACL_LINK) {
1767 			struct hci_cp_read_remote_features cp;
1768 			cp.handle = ev->handle;
1769 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1770 				     sizeof(cp), &cp);
1771 		}
1772 
1773 		/* Set packet type for incoming connection */
1774 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1775 			struct hci_cp_change_conn_ptype cp;
1776 			cp.handle = ev->handle;
1777 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1778 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1779 				     &cp);
1780 		}
1781 	} else {
1782 		conn->state = BT_CLOSED;
1783 		if (conn->type == ACL_LINK)
1784 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1785 					    conn->dst_type, ev->status);
1786 	}
1787 
1788 	if (conn->type == ACL_LINK)
1789 		hci_sco_setup(conn, ev->status);
1790 
1791 	if (ev->status) {
1792 		hci_proto_connect_cfm(conn, ev->status);
1793 		hci_conn_del(conn);
1794 	} else if (ev->link_type != ACL_LINK)
1795 		hci_proto_connect_cfm(conn, ev->status);
1796 
1797 unlock:
1798 	hci_dev_unlock(hdev);
1799 
1800 	hci_conn_check_pending(hdev);
1801 }
1802 
1803 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1804 {
1805 	struct hci_ev_conn_request *ev = (void *) skb->data;
1806 	int mask = hdev->link_mode;
1807 
1808 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1809 					batostr(&ev->bdaddr), ev->link_type);
1810 
1811 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1812 
1813 	if ((mask & HCI_LM_ACCEPT) &&
1814 			!hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1815 		/* Connection accepted */
1816 		struct inquiry_entry *ie;
1817 		struct hci_conn *conn;
1818 
1819 		hci_dev_lock(hdev);
1820 
1821 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1822 		if (ie)
1823 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1824 
1825 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1826 		if (!conn) {
1827 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1828 			if (!conn) {
1829 				BT_ERR("No memory for new connection");
1830 				hci_dev_unlock(hdev);
1831 				return;
1832 			}
1833 		}
1834 
1835 		memcpy(conn->dev_class, ev->dev_class, 3);
1836 		conn->state = BT_CONNECT;
1837 
1838 		hci_dev_unlock(hdev);
1839 
1840 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1841 			struct hci_cp_accept_conn_req cp;
1842 
1843 			bacpy(&cp.bdaddr, &ev->bdaddr);
1844 
1845 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1846 				cp.role = 0x00; /* Become master */
1847 			else
1848 				cp.role = 0x01; /* Remain slave */
1849 
1850 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1851 				     &cp);
1852 		} else {
1853 			struct hci_cp_accept_sync_conn_req cp;
1854 
1855 			bacpy(&cp.bdaddr, &ev->bdaddr);
1856 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1857 
1858 			cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
1859 			cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
1860 			cp.max_latency    = cpu_to_le16(0xffff);
1861 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1862 			cp.retrans_effort = 0xff;
1863 
1864 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1865 				     sizeof(cp), &cp);
1866 		}
1867 	} else {
1868 		/* Connection rejected */
1869 		struct hci_cp_reject_conn_req cp;
1870 
1871 		bacpy(&cp.bdaddr, &ev->bdaddr);
1872 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1873 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1874 	}
1875 }
1876 
1877 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1878 {
1879 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1880 	struct hci_conn *conn;
1881 
1882 	BT_DBG("%s status %d", hdev->name, ev->status);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1887 	if (!conn)
1888 		goto unlock;
1889 
1890 	if (ev->status == 0)
1891 		conn->state = BT_CLOSED;
1892 
1893 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1894 			(conn->type == ACL_LINK || conn->type == LE_LINK)) {
1895 		if (ev->status != 0)
1896 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1897 						conn->dst_type, ev->status);
1898 		else
1899 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1900 						 conn->dst_type);
1901 	}
1902 
1903 	if (ev->status == 0) {
1904 		if (conn->type == ACL_LINK && conn->flush_key)
1905 			hci_remove_link_key(hdev, &conn->dst);
1906 		hci_proto_disconn_cfm(conn, ev->reason);
1907 		hci_conn_del(conn);
1908 	}
1909 
1910 unlock:
1911 	hci_dev_unlock(hdev);
1912 }
1913 
1914 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1915 {
1916 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1917 	struct hci_conn *conn;
1918 
1919 	BT_DBG("%s status %d", hdev->name, ev->status);
1920 
1921 	hci_dev_lock(hdev);
1922 
1923 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1924 	if (!conn)
1925 		goto unlock;
1926 
1927 	if (!ev->status) {
1928 		if (!hci_conn_ssp_enabled(conn) &&
1929 				test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1930 			BT_INFO("re-auth of legacy device is not possible.");
1931 		} else {
1932 			conn->link_mode |= HCI_LM_AUTH;
1933 			conn->sec_level = conn->pending_sec_level;
1934 		}
1935 	} else {
1936 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1937 				 ev->status);
1938 	}
1939 
1940 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1941 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1942 
1943 	if (conn->state == BT_CONFIG) {
1944 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1945 			struct hci_cp_set_conn_encrypt cp;
1946 			cp.handle  = ev->handle;
1947 			cp.encrypt = 0x01;
1948 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1949 									&cp);
1950 		} else {
1951 			conn->state = BT_CONNECTED;
1952 			hci_proto_connect_cfm(conn, ev->status);
1953 			hci_conn_put(conn);
1954 		}
1955 	} else {
1956 		hci_auth_cfm(conn, ev->status);
1957 
1958 		hci_conn_hold(conn);
1959 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1960 		hci_conn_put(conn);
1961 	}
1962 
1963 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1964 		if (!ev->status) {
1965 			struct hci_cp_set_conn_encrypt cp;
1966 			cp.handle  = ev->handle;
1967 			cp.encrypt = 0x01;
1968 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1969 									&cp);
1970 		} else {
1971 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1972 			hci_encrypt_cfm(conn, ev->status, 0x00);
1973 		}
1974 	}
1975 
1976 unlock:
1977 	hci_dev_unlock(hdev);
1978 }
1979 
1980 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1981 {
1982 	struct hci_ev_remote_name *ev = (void *) skb->data;
1983 	struct hci_conn *conn;
1984 
1985 	BT_DBG("%s", hdev->name);
1986 
1987 	hci_conn_check_pending(hdev);
1988 
1989 	hci_dev_lock(hdev);
1990 
1991 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1992 
1993 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1994 		goto check_auth;
1995 
1996 	if (ev->status == 0)
1997 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1998 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1999 	else
2000 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2001 
2002 check_auth:
2003 	if (!conn)
2004 		goto unlock;
2005 
2006 	if (!hci_outgoing_auth_needed(hdev, conn))
2007 		goto unlock;
2008 
2009 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2010 		struct hci_cp_auth_requested cp;
2011 		cp.handle = __cpu_to_le16(conn->handle);
2012 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2013 	}
2014 
2015 unlock:
2016 	hci_dev_unlock(hdev);
2017 }
2018 
2019 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2020 {
2021 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2022 	struct hci_conn *conn;
2023 
2024 	BT_DBG("%s status %d", hdev->name, ev->status);
2025 
2026 	hci_dev_lock(hdev);
2027 
2028 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2029 	if (conn) {
2030 		if (!ev->status) {
2031 			if (ev->encrypt) {
2032 				/* Encryption implies authentication */
2033 				conn->link_mode |= HCI_LM_AUTH;
2034 				conn->link_mode |= HCI_LM_ENCRYPT;
2035 				conn->sec_level = conn->pending_sec_level;
2036 			} else
2037 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2038 		}
2039 
2040 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2041 
2042 		if (ev->status && conn->state == BT_CONNECTED) {
2043 			hci_acl_disconn(conn, 0x13);
2044 			hci_conn_put(conn);
2045 			goto unlock;
2046 		}
2047 
2048 		if (conn->state == BT_CONFIG) {
2049 			if (!ev->status)
2050 				conn->state = BT_CONNECTED;
2051 
2052 			hci_proto_connect_cfm(conn, ev->status);
2053 			hci_conn_put(conn);
2054 		} else
2055 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2056 	}
2057 
2058 unlock:
2059 	hci_dev_unlock(hdev);
2060 }
2061 
2062 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2063 {
2064 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2065 	struct hci_conn *conn;
2066 
2067 	BT_DBG("%s status %d", hdev->name, ev->status);
2068 
2069 	hci_dev_lock(hdev);
2070 
2071 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2072 	if (conn) {
2073 		if (!ev->status)
2074 			conn->link_mode |= HCI_LM_SECURE;
2075 
2076 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2077 
2078 		hci_key_change_cfm(conn, ev->status);
2079 	}
2080 
2081 	hci_dev_unlock(hdev);
2082 }
2083 
2084 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2085 {
2086 	struct hci_ev_remote_features *ev = (void *) skb->data;
2087 	struct hci_conn *conn;
2088 
2089 	BT_DBG("%s status %d", hdev->name, ev->status);
2090 
2091 	hci_dev_lock(hdev);
2092 
2093 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2094 	if (!conn)
2095 		goto unlock;
2096 
2097 	if (!ev->status)
2098 		memcpy(conn->features, ev->features, 8);
2099 
2100 	if (conn->state != BT_CONFIG)
2101 		goto unlock;
2102 
2103 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2104 		struct hci_cp_read_remote_ext_features cp;
2105 		cp.handle = ev->handle;
2106 		cp.page = 0x01;
2107 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2108 							sizeof(cp), &cp);
2109 		goto unlock;
2110 	}
2111 
2112 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2113 		struct hci_cp_remote_name_req cp;
2114 		memset(&cp, 0, sizeof(cp));
2115 		bacpy(&cp.bdaddr, &conn->dst);
2116 		cp.pscan_rep_mode = 0x02;
2117 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2118 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2119 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2120 				      conn->dst_type, 0, NULL, 0,
2121 				      conn->dev_class);
2122 
2123 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2124 		conn->state = BT_CONNECTED;
2125 		hci_proto_connect_cfm(conn, ev->status);
2126 		hci_conn_put(conn);
2127 	}
2128 
2129 unlock:
2130 	hci_dev_unlock(hdev);
2131 }
2132 
2133 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2134 {
2135 	BT_DBG("%s", hdev->name);
2136 }
2137 
2138 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2139 {
2140 	BT_DBG("%s", hdev->name);
2141 }
2142 
2143 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2144 {
2145 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2146 	__u16 opcode;
2147 
2148 	skb_pull(skb, sizeof(*ev));
2149 
2150 	opcode = __le16_to_cpu(ev->opcode);
2151 
2152 	switch (opcode) {
2153 	case HCI_OP_INQUIRY_CANCEL:
2154 		hci_cc_inquiry_cancel(hdev, skb);
2155 		break;
2156 
2157 	case HCI_OP_EXIT_PERIODIC_INQ:
2158 		hci_cc_exit_periodic_inq(hdev, skb);
2159 		break;
2160 
2161 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2162 		hci_cc_remote_name_req_cancel(hdev, skb);
2163 		break;
2164 
2165 	case HCI_OP_ROLE_DISCOVERY:
2166 		hci_cc_role_discovery(hdev, skb);
2167 		break;
2168 
2169 	case HCI_OP_READ_LINK_POLICY:
2170 		hci_cc_read_link_policy(hdev, skb);
2171 		break;
2172 
2173 	case HCI_OP_WRITE_LINK_POLICY:
2174 		hci_cc_write_link_policy(hdev, skb);
2175 		break;
2176 
2177 	case HCI_OP_READ_DEF_LINK_POLICY:
2178 		hci_cc_read_def_link_policy(hdev, skb);
2179 		break;
2180 
2181 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2182 		hci_cc_write_def_link_policy(hdev, skb);
2183 		break;
2184 
2185 	case HCI_OP_RESET:
2186 		hci_cc_reset(hdev, skb);
2187 		break;
2188 
2189 	case HCI_OP_WRITE_LOCAL_NAME:
2190 		hci_cc_write_local_name(hdev, skb);
2191 		break;
2192 
2193 	case HCI_OP_READ_LOCAL_NAME:
2194 		hci_cc_read_local_name(hdev, skb);
2195 		break;
2196 
2197 	case HCI_OP_WRITE_AUTH_ENABLE:
2198 		hci_cc_write_auth_enable(hdev, skb);
2199 		break;
2200 
2201 	case HCI_OP_WRITE_ENCRYPT_MODE:
2202 		hci_cc_write_encrypt_mode(hdev, skb);
2203 		break;
2204 
2205 	case HCI_OP_WRITE_SCAN_ENABLE:
2206 		hci_cc_write_scan_enable(hdev, skb);
2207 		break;
2208 
2209 	case HCI_OP_READ_CLASS_OF_DEV:
2210 		hci_cc_read_class_of_dev(hdev, skb);
2211 		break;
2212 
2213 	case HCI_OP_WRITE_CLASS_OF_DEV:
2214 		hci_cc_write_class_of_dev(hdev, skb);
2215 		break;
2216 
2217 	case HCI_OP_READ_VOICE_SETTING:
2218 		hci_cc_read_voice_setting(hdev, skb);
2219 		break;
2220 
2221 	case HCI_OP_WRITE_VOICE_SETTING:
2222 		hci_cc_write_voice_setting(hdev, skb);
2223 		break;
2224 
2225 	case HCI_OP_HOST_BUFFER_SIZE:
2226 		hci_cc_host_buffer_size(hdev, skb);
2227 		break;
2228 
2229 	case HCI_OP_WRITE_SSP_MODE:
2230 		hci_cc_write_ssp_mode(hdev, skb);
2231 		break;
2232 
2233 	case HCI_OP_READ_LOCAL_VERSION:
2234 		hci_cc_read_local_version(hdev, skb);
2235 		break;
2236 
2237 	case HCI_OP_READ_LOCAL_COMMANDS:
2238 		hci_cc_read_local_commands(hdev, skb);
2239 		break;
2240 
2241 	case HCI_OP_READ_LOCAL_FEATURES:
2242 		hci_cc_read_local_features(hdev, skb);
2243 		break;
2244 
2245 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2246 		hci_cc_read_local_ext_features(hdev, skb);
2247 		break;
2248 
2249 	case HCI_OP_READ_BUFFER_SIZE:
2250 		hci_cc_read_buffer_size(hdev, skb);
2251 		break;
2252 
2253 	case HCI_OP_READ_BD_ADDR:
2254 		hci_cc_read_bd_addr(hdev, skb);
2255 		break;
2256 
2257 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2258 		hci_cc_read_data_block_size(hdev, skb);
2259 		break;
2260 
2261 	case HCI_OP_WRITE_CA_TIMEOUT:
2262 		hci_cc_write_ca_timeout(hdev, skb);
2263 		break;
2264 
2265 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2266 		hci_cc_read_flow_control_mode(hdev, skb);
2267 		break;
2268 
2269 	case HCI_OP_READ_LOCAL_AMP_INFO:
2270 		hci_cc_read_local_amp_info(hdev, skb);
2271 		break;
2272 
2273 	case HCI_OP_DELETE_STORED_LINK_KEY:
2274 		hci_cc_delete_stored_link_key(hdev, skb);
2275 		break;
2276 
2277 	case HCI_OP_SET_EVENT_MASK:
2278 		hci_cc_set_event_mask(hdev, skb);
2279 		break;
2280 
2281 	case HCI_OP_WRITE_INQUIRY_MODE:
2282 		hci_cc_write_inquiry_mode(hdev, skb);
2283 		break;
2284 
2285 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2286 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2287 		break;
2288 
2289 	case HCI_OP_SET_EVENT_FLT:
2290 		hci_cc_set_event_flt(hdev, skb);
2291 		break;
2292 
2293 	case HCI_OP_PIN_CODE_REPLY:
2294 		hci_cc_pin_code_reply(hdev, skb);
2295 		break;
2296 
2297 	case HCI_OP_PIN_CODE_NEG_REPLY:
2298 		hci_cc_pin_code_neg_reply(hdev, skb);
2299 		break;
2300 
2301 	case HCI_OP_READ_LOCAL_OOB_DATA:
2302 		hci_cc_read_local_oob_data_reply(hdev, skb);
2303 		break;
2304 
2305 	case HCI_OP_LE_READ_BUFFER_SIZE:
2306 		hci_cc_le_read_buffer_size(hdev, skb);
2307 		break;
2308 
2309 	case HCI_OP_USER_CONFIRM_REPLY:
2310 		hci_cc_user_confirm_reply(hdev, skb);
2311 		break;
2312 
2313 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2314 		hci_cc_user_confirm_neg_reply(hdev, skb);
2315 		break;
2316 
2317 	case HCI_OP_USER_PASSKEY_REPLY:
2318 		hci_cc_user_passkey_reply(hdev, skb);
2319 		break;
2320 
2321 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2322 		hci_cc_user_passkey_neg_reply(hdev, skb);
2323 		break;
2324 
2325 	case HCI_OP_LE_SET_SCAN_PARAM:
2326 		hci_cc_le_set_scan_param(hdev, skb);
2327 		break;
2328 
2329 	case HCI_OP_LE_SET_SCAN_ENABLE:
2330 		hci_cc_le_set_scan_enable(hdev, skb);
2331 		break;
2332 
2333 	case HCI_OP_LE_LTK_REPLY:
2334 		hci_cc_le_ltk_reply(hdev, skb);
2335 		break;
2336 
2337 	case HCI_OP_LE_LTK_NEG_REPLY:
2338 		hci_cc_le_ltk_neg_reply(hdev, skb);
2339 		break;
2340 
2341 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2342 		hci_cc_write_le_host_supported(hdev, skb);
2343 		break;
2344 
2345 	default:
2346 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2347 		break;
2348 	}
2349 
2350 	if (ev->opcode != HCI_OP_NOP)
2351 		del_timer(&hdev->cmd_timer);
2352 
2353 	if (ev->ncmd) {
2354 		atomic_set(&hdev->cmd_cnt, 1);
2355 		if (!skb_queue_empty(&hdev->cmd_q))
2356 			queue_work(hdev->workqueue, &hdev->cmd_work);
2357 	}
2358 }
2359 
2360 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2361 {
2362 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2363 	__u16 opcode;
2364 
2365 	skb_pull(skb, sizeof(*ev));
2366 
2367 	opcode = __le16_to_cpu(ev->opcode);
2368 
2369 	switch (opcode) {
2370 	case HCI_OP_INQUIRY:
2371 		hci_cs_inquiry(hdev, ev->status);
2372 		break;
2373 
2374 	case HCI_OP_CREATE_CONN:
2375 		hci_cs_create_conn(hdev, ev->status);
2376 		break;
2377 
2378 	case HCI_OP_ADD_SCO:
2379 		hci_cs_add_sco(hdev, ev->status);
2380 		break;
2381 
2382 	case HCI_OP_AUTH_REQUESTED:
2383 		hci_cs_auth_requested(hdev, ev->status);
2384 		break;
2385 
2386 	case HCI_OP_SET_CONN_ENCRYPT:
2387 		hci_cs_set_conn_encrypt(hdev, ev->status);
2388 		break;
2389 
2390 	case HCI_OP_REMOTE_NAME_REQ:
2391 		hci_cs_remote_name_req(hdev, ev->status);
2392 		break;
2393 
2394 	case HCI_OP_READ_REMOTE_FEATURES:
2395 		hci_cs_read_remote_features(hdev, ev->status);
2396 		break;
2397 
2398 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2399 		hci_cs_read_remote_ext_features(hdev, ev->status);
2400 		break;
2401 
2402 	case HCI_OP_SETUP_SYNC_CONN:
2403 		hci_cs_setup_sync_conn(hdev, ev->status);
2404 		break;
2405 
2406 	case HCI_OP_SNIFF_MODE:
2407 		hci_cs_sniff_mode(hdev, ev->status);
2408 		break;
2409 
2410 	case HCI_OP_EXIT_SNIFF_MODE:
2411 		hci_cs_exit_sniff_mode(hdev, ev->status);
2412 		break;
2413 
2414 	case HCI_OP_DISCONNECT:
2415 		hci_cs_disconnect(hdev, ev->status);
2416 		break;
2417 
2418 	case HCI_OP_LE_CREATE_CONN:
2419 		hci_cs_le_create_conn(hdev, ev->status);
2420 		break;
2421 
2422 	case HCI_OP_LE_START_ENC:
2423 		hci_cs_le_start_enc(hdev, ev->status);
2424 		break;
2425 
2426 	default:
2427 		BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2428 		break;
2429 	}
2430 
2431 	if (ev->opcode != HCI_OP_NOP)
2432 		del_timer(&hdev->cmd_timer);
2433 
2434 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2435 		atomic_set(&hdev->cmd_cnt, 1);
2436 		if (!skb_queue_empty(&hdev->cmd_q))
2437 			queue_work(hdev->workqueue, &hdev->cmd_work);
2438 	}
2439 }
2440 
2441 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2442 {
2443 	struct hci_ev_role_change *ev = (void *) skb->data;
2444 	struct hci_conn *conn;
2445 
2446 	BT_DBG("%s status %d", hdev->name, ev->status);
2447 
2448 	hci_dev_lock(hdev);
2449 
2450 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2451 	if (conn) {
2452 		if (!ev->status) {
2453 			if (ev->role)
2454 				conn->link_mode &= ~HCI_LM_MASTER;
2455 			else
2456 				conn->link_mode |= HCI_LM_MASTER;
2457 		}
2458 
2459 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2460 
2461 		hci_role_switch_cfm(conn, ev->status, ev->role);
2462 	}
2463 
2464 	hci_dev_unlock(hdev);
2465 }
2466 
2467 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2468 {
2469 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2470 	int i;
2471 
2472 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2473 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2474 		return;
2475 	}
2476 
2477 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2478 			ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2479 		BT_DBG("%s bad parameters", hdev->name);
2480 		return;
2481 	}
2482 
2483 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2484 
2485 	for (i = 0; i < ev->num_hndl; i++) {
2486 		struct hci_comp_pkts_info *info = &ev->handles[i];
2487 		struct hci_conn *conn;
2488 		__u16  handle, count;
2489 
2490 		handle = __le16_to_cpu(info->handle);
2491 		count  = __le16_to_cpu(info->count);
2492 
2493 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2494 		if (!conn)
2495 			continue;
2496 
2497 		conn->sent -= count;
2498 
2499 		switch (conn->type) {
2500 		case ACL_LINK:
2501 			hdev->acl_cnt += count;
2502 			if (hdev->acl_cnt > hdev->acl_pkts)
2503 				hdev->acl_cnt = hdev->acl_pkts;
2504 			break;
2505 
2506 		case LE_LINK:
2507 			if (hdev->le_pkts) {
2508 				hdev->le_cnt += count;
2509 				if (hdev->le_cnt > hdev->le_pkts)
2510 					hdev->le_cnt = hdev->le_pkts;
2511 			} else {
2512 				hdev->acl_cnt += count;
2513 				if (hdev->acl_cnt > hdev->acl_pkts)
2514 					hdev->acl_cnt = hdev->acl_pkts;
2515 			}
2516 			break;
2517 
2518 		case SCO_LINK:
2519 			hdev->sco_cnt += count;
2520 			if (hdev->sco_cnt > hdev->sco_pkts)
2521 				hdev->sco_cnt = hdev->sco_pkts;
2522 			break;
2523 
2524 		default:
2525 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2526 			break;
2527 		}
2528 	}
2529 
2530 	queue_work(hdev->workqueue, &hdev->tx_work);
2531 }
2532 
2533 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2534 					   struct sk_buff *skb)
2535 {
2536 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2537 	int i;
2538 
2539 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2540 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2541 		return;
2542 	}
2543 
2544 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2545 			ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2546 		BT_DBG("%s bad parameters", hdev->name);
2547 		return;
2548 	}
2549 
2550 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2551 								ev->num_hndl);
2552 
2553 	for (i = 0; i < ev->num_hndl; i++) {
2554 		struct hci_comp_blocks_info *info = &ev->handles[i];
2555 		struct hci_conn *conn;
2556 		__u16  handle, block_count;
2557 
2558 		handle = __le16_to_cpu(info->handle);
2559 		block_count = __le16_to_cpu(info->blocks);
2560 
2561 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2562 		if (!conn)
2563 			continue;
2564 
2565 		conn->sent -= block_count;
2566 
2567 		switch (conn->type) {
2568 		case ACL_LINK:
2569 			hdev->block_cnt += block_count;
2570 			if (hdev->block_cnt > hdev->num_blocks)
2571 				hdev->block_cnt = hdev->num_blocks;
2572 			break;
2573 
2574 		default:
2575 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2576 			break;
2577 		}
2578 	}
2579 
2580 	queue_work(hdev->workqueue, &hdev->tx_work);
2581 }
2582 
2583 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2584 {
2585 	struct hci_ev_mode_change *ev = (void *) skb->data;
2586 	struct hci_conn *conn;
2587 
2588 	BT_DBG("%s status %d", hdev->name, ev->status);
2589 
2590 	hci_dev_lock(hdev);
2591 
2592 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2593 	if (conn) {
2594 		conn->mode = ev->mode;
2595 		conn->interval = __le16_to_cpu(ev->interval);
2596 
2597 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2598 			if (conn->mode == HCI_CM_ACTIVE)
2599 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2600 			else
2601 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2602 		}
2603 
2604 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2605 			hci_sco_setup(conn, ev->status);
2606 	}
2607 
2608 	hci_dev_unlock(hdev);
2609 }
2610 
2611 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2612 {
2613 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2614 	struct hci_conn *conn;
2615 
2616 	BT_DBG("%s", hdev->name);
2617 
2618 	hci_dev_lock(hdev);
2619 
2620 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2621 	if (!conn)
2622 		goto unlock;
2623 
2624 	if (conn->state == BT_CONNECTED) {
2625 		hci_conn_hold(conn);
2626 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2627 		hci_conn_put(conn);
2628 	}
2629 
2630 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2631 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2632 					sizeof(ev->bdaddr), &ev->bdaddr);
2633 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2634 		u8 secure;
2635 
2636 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2637 			secure = 1;
2638 		else
2639 			secure = 0;
2640 
2641 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2642 	}
2643 
2644 unlock:
2645 	hci_dev_unlock(hdev);
2646 }
2647 
2648 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2649 {
2650 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2651 	struct hci_cp_link_key_reply cp;
2652 	struct hci_conn *conn;
2653 	struct link_key *key;
2654 
2655 	BT_DBG("%s", hdev->name);
2656 
2657 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2658 		return;
2659 
2660 	hci_dev_lock(hdev);
2661 
2662 	key = hci_find_link_key(hdev, &ev->bdaddr);
2663 	if (!key) {
2664 		BT_DBG("%s link key not found for %s", hdev->name,
2665 							batostr(&ev->bdaddr));
2666 		goto not_found;
2667 	}
2668 
2669 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2670 							batostr(&ev->bdaddr));
2671 
2672 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2673 				key->type == HCI_LK_DEBUG_COMBINATION) {
2674 		BT_DBG("%s ignoring debug key", hdev->name);
2675 		goto not_found;
2676 	}
2677 
2678 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2679 	if (conn) {
2680 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2681 				conn->auth_type != 0xff &&
2682 				(conn->auth_type & 0x01)) {
2683 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2684 			goto not_found;
2685 		}
2686 
2687 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2688 				conn->pending_sec_level == BT_SECURITY_HIGH) {
2689 			BT_DBG("%s ignoring key unauthenticated for high \
2690 							security", hdev->name);
2691 			goto not_found;
2692 		}
2693 
2694 		conn->key_type = key->type;
2695 		conn->pin_length = key->pin_len;
2696 	}
2697 
2698 	bacpy(&cp.bdaddr, &ev->bdaddr);
2699 	memcpy(cp.link_key, key->val, 16);
2700 
2701 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2702 
2703 	hci_dev_unlock(hdev);
2704 
2705 	return;
2706 
2707 not_found:
2708 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2709 	hci_dev_unlock(hdev);
2710 }
2711 
2712 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2713 {
2714 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2715 	struct hci_conn *conn;
2716 	u8 pin_len = 0;
2717 
2718 	BT_DBG("%s", hdev->name);
2719 
2720 	hci_dev_lock(hdev);
2721 
2722 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2723 	if (conn) {
2724 		hci_conn_hold(conn);
2725 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2726 		pin_len = conn->pin_length;
2727 
2728 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2729 			conn->key_type = ev->key_type;
2730 
2731 		hci_conn_put(conn);
2732 	}
2733 
2734 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2735 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2736 							ev->key_type, pin_len);
2737 
2738 	hci_dev_unlock(hdev);
2739 }
2740 
2741 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2742 {
2743 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2744 	struct hci_conn *conn;
2745 
2746 	BT_DBG("%s status %d", hdev->name, ev->status);
2747 
2748 	hci_dev_lock(hdev);
2749 
2750 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2751 	if (conn && !ev->status) {
2752 		struct inquiry_entry *ie;
2753 
2754 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2755 		if (ie) {
2756 			ie->data.clock_offset = ev->clock_offset;
2757 			ie->timestamp = jiffies;
2758 		}
2759 	}
2760 
2761 	hci_dev_unlock(hdev);
2762 }
2763 
2764 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2765 {
2766 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2767 	struct hci_conn *conn;
2768 
2769 	BT_DBG("%s status %d", hdev->name, ev->status);
2770 
2771 	hci_dev_lock(hdev);
2772 
2773 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2774 	if (conn && !ev->status)
2775 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2776 
2777 	hci_dev_unlock(hdev);
2778 }
2779 
2780 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2781 {
2782 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2783 	struct inquiry_entry *ie;
2784 
2785 	BT_DBG("%s", hdev->name);
2786 
2787 	hci_dev_lock(hdev);
2788 
2789 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2790 	if (ie) {
2791 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2792 		ie->timestamp = jiffies;
2793 	}
2794 
2795 	hci_dev_unlock(hdev);
2796 }
2797 
2798 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2799 {
2800 	struct inquiry_data data;
2801 	int num_rsp = *((__u8 *) skb->data);
2802 	bool name_known, ssp;
2803 
2804 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2805 
2806 	if (!num_rsp)
2807 		return;
2808 
2809 	hci_dev_lock(hdev);
2810 
2811 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2812 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2813 		info = (void *) (skb->data + 1);
2814 
2815 		for (; num_rsp; num_rsp--, info++) {
2816 			bacpy(&data.bdaddr, &info->bdaddr);
2817 			data.pscan_rep_mode	= info->pscan_rep_mode;
2818 			data.pscan_period_mode	= info->pscan_period_mode;
2819 			data.pscan_mode		= info->pscan_mode;
2820 			memcpy(data.dev_class, info->dev_class, 3);
2821 			data.clock_offset	= info->clock_offset;
2822 			data.rssi		= info->rssi;
2823 			data.ssp_mode		= 0x00;
2824 
2825 			name_known = hci_inquiry_cache_update(hdev, &data,
2826 							      false, &ssp);
2827 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2828 					  info->dev_class, info->rssi,
2829 					  !name_known, ssp, NULL, 0);
2830 		}
2831 	} else {
2832 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2833 
2834 		for (; num_rsp; num_rsp--, info++) {
2835 			bacpy(&data.bdaddr, &info->bdaddr);
2836 			data.pscan_rep_mode	= info->pscan_rep_mode;
2837 			data.pscan_period_mode	= info->pscan_period_mode;
2838 			data.pscan_mode		= 0x00;
2839 			memcpy(data.dev_class, info->dev_class, 3);
2840 			data.clock_offset	= info->clock_offset;
2841 			data.rssi		= info->rssi;
2842 			data.ssp_mode		= 0x00;
2843 			name_known = hci_inquiry_cache_update(hdev, &data,
2844 							      false, &ssp);
2845 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2846 					  info->dev_class, info->rssi,
2847 					  !name_known, ssp, NULL, 0);
2848 		}
2849 	}
2850 
2851 	hci_dev_unlock(hdev);
2852 }
2853 
2854 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2855 {
2856 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2857 	struct hci_conn *conn;
2858 
2859 	BT_DBG("%s", hdev->name);
2860 
2861 	hci_dev_lock(hdev);
2862 
2863 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2864 	if (!conn)
2865 		goto unlock;
2866 
2867 	if (!ev->status && ev->page == 0x01) {
2868 		struct inquiry_entry *ie;
2869 
2870 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2871 		if (ie)
2872 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2873 
2874 		if (ev->features[0] & LMP_HOST_SSP)
2875 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2876 	}
2877 
2878 	if (conn->state != BT_CONFIG)
2879 		goto unlock;
2880 
2881 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2882 		struct hci_cp_remote_name_req cp;
2883 		memset(&cp, 0, sizeof(cp));
2884 		bacpy(&cp.bdaddr, &conn->dst);
2885 		cp.pscan_rep_mode = 0x02;
2886 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2887 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2888 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2889 				      conn->dst_type, 0, NULL, 0,
2890 				      conn->dev_class);
2891 
2892 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2893 		conn->state = BT_CONNECTED;
2894 		hci_proto_connect_cfm(conn, ev->status);
2895 		hci_conn_put(conn);
2896 	}
2897 
2898 unlock:
2899 	hci_dev_unlock(hdev);
2900 }
2901 
2902 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2903 {
2904 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2905 	struct hci_conn *conn;
2906 
2907 	BT_DBG("%s status %d", hdev->name, ev->status);
2908 
2909 	hci_dev_lock(hdev);
2910 
2911 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2912 	if (!conn) {
2913 		if (ev->link_type == ESCO_LINK)
2914 			goto unlock;
2915 
2916 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2917 		if (!conn)
2918 			goto unlock;
2919 
2920 		conn->type = SCO_LINK;
2921 	}
2922 
2923 	switch (ev->status) {
2924 	case 0x00:
2925 		conn->handle = __le16_to_cpu(ev->handle);
2926 		conn->state  = BT_CONNECTED;
2927 
2928 		hci_conn_hold_device(conn);
2929 		hci_conn_add_sysfs(conn);
2930 		break;
2931 
2932 	case 0x11:	/* Unsupported Feature or Parameter Value */
2933 	case 0x1c:	/* SCO interval rejected */
2934 	case 0x1a:	/* Unsupported Remote Feature */
2935 	case 0x1f:	/* Unspecified error */
2936 		if (conn->out && conn->attempt < 2) {
2937 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2938 					(hdev->esco_type & EDR_ESCO_MASK);
2939 			hci_setup_sync(conn, conn->link->handle);
2940 			goto unlock;
2941 		}
2942 		/* fall through */
2943 
2944 	default:
2945 		conn->state = BT_CLOSED;
2946 		break;
2947 	}
2948 
2949 	hci_proto_connect_cfm(conn, ev->status);
2950 	if (ev->status)
2951 		hci_conn_del(conn);
2952 
2953 unlock:
2954 	hci_dev_unlock(hdev);
2955 }
2956 
2957 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2958 {
2959 	BT_DBG("%s", hdev->name);
2960 }
2961 
2962 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2963 {
2964 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2965 
2966 	BT_DBG("%s status %d", hdev->name, ev->status);
2967 }
2968 
2969 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2970 {
2971 	struct inquiry_data data;
2972 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
2973 	int num_rsp = *((__u8 *) skb->data);
2974 
2975 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2976 
2977 	if (!num_rsp)
2978 		return;
2979 
2980 	hci_dev_lock(hdev);
2981 
2982 	for (; num_rsp; num_rsp--, info++) {
2983 		bool name_known, ssp;
2984 
2985 		bacpy(&data.bdaddr, &info->bdaddr);
2986 		data.pscan_rep_mode	= info->pscan_rep_mode;
2987 		data.pscan_period_mode	= info->pscan_period_mode;
2988 		data.pscan_mode		= 0x00;
2989 		memcpy(data.dev_class, info->dev_class, 3);
2990 		data.clock_offset	= info->clock_offset;
2991 		data.rssi		= info->rssi;
2992 		data.ssp_mode		= 0x01;
2993 
2994 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
2995 			name_known = eir_has_data_type(info->data,
2996 						       sizeof(info->data),
2997 						       EIR_NAME_COMPLETE);
2998 		else
2999 			name_known = true;
3000 
3001 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3002 						      &ssp);
3003 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3004 				  info->dev_class, info->rssi, !name_known,
3005 				  ssp, info->data, sizeof(info->data));
3006 	}
3007 
3008 	hci_dev_unlock(hdev);
3009 }
3010 
3011 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3012 {
3013 	/* If remote requests dedicated bonding follow that lead */
3014 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3015 		/* If both remote and local IO capabilities allow MITM
3016 		 * protection then require it, otherwise don't */
3017 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3018 			return 0x02;
3019 		else
3020 			return 0x03;
3021 	}
3022 
3023 	/* If remote requests no-bonding follow that lead */
3024 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3025 		return conn->remote_auth | (conn->auth_type & 0x01);
3026 
3027 	return conn->auth_type;
3028 }
3029 
3030 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3031 {
3032 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3033 	struct hci_conn *conn;
3034 
3035 	BT_DBG("%s", hdev->name);
3036 
3037 	hci_dev_lock(hdev);
3038 
3039 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3040 	if (!conn)
3041 		goto unlock;
3042 
3043 	hci_conn_hold(conn);
3044 
3045 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3046 		goto unlock;
3047 
3048 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3049 			(conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3050 		struct hci_cp_io_capability_reply cp;
3051 
3052 		bacpy(&cp.bdaddr, &ev->bdaddr);
3053 		/* Change the IO capability from KeyboardDisplay
3054 		 * to DisplayYesNo as it is not supported by BT spec. */
3055 		cp.capability = (conn->io_capability == 0x04) ?
3056 						0x01 : conn->io_capability;
3057 		conn->auth_type = hci_get_auth_req(conn);
3058 		cp.authentication = conn->auth_type;
3059 
3060 		if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3061 				hci_find_remote_oob_data(hdev, &conn->dst))
3062 			cp.oob_data = 0x01;
3063 		else
3064 			cp.oob_data = 0x00;
3065 
3066 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3067 							sizeof(cp), &cp);
3068 	} else {
3069 		struct hci_cp_io_capability_neg_reply cp;
3070 
3071 		bacpy(&cp.bdaddr, &ev->bdaddr);
3072 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3073 
3074 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3075 							sizeof(cp), &cp);
3076 	}
3077 
3078 unlock:
3079 	hci_dev_unlock(hdev);
3080 }
3081 
3082 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3083 {
3084 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3085 	struct hci_conn *conn;
3086 
3087 	BT_DBG("%s", hdev->name);
3088 
3089 	hci_dev_lock(hdev);
3090 
3091 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3092 	if (!conn)
3093 		goto unlock;
3094 
3095 	conn->remote_cap = ev->capability;
3096 	conn->remote_auth = ev->authentication;
3097 	if (ev->oob_data)
3098 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3099 
3100 unlock:
3101 	hci_dev_unlock(hdev);
3102 }
3103 
3104 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3105 							struct sk_buff *skb)
3106 {
3107 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3108 	int loc_mitm, rem_mitm, confirm_hint = 0;
3109 	struct hci_conn *conn;
3110 
3111 	BT_DBG("%s", hdev->name);
3112 
3113 	hci_dev_lock(hdev);
3114 
3115 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3116 		goto unlock;
3117 
3118 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3119 	if (!conn)
3120 		goto unlock;
3121 
3122 	loc_mitm = (conn->auth_type & 0x01);
3123 	rem_mitm = (conn->remote_auth & 0x01);
3124 
3125 	/* If we require MITM but the remote device can't provide that
3126 	 * (it has NoInputNoOutput) then reject the confirmation
3127 	 * request. The only exception is when we're dedicated bonding
3128 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3129 	 * bit set. */
3130 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3131 		BT_DBG("Rejecting request: remote device can't provide MITM");
3132 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3133 					sizeof(ev->bdaddr), &ev->bdaddr);
3134 		goto unlock;
3135 	}
3136 
3137 	/* If no side requires MITM protection; auto-accept */
3138 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3139 				(!rem_mitm || conn->io_capability == 0x03)) {
3140 
3141 		/* If we're not the initiators request authorization to
3142 		 * proceed from user space (mgmt_user_confirm with
3143 		 * confirm_hint set to 1). */
3144 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3145 			BT_DBG("Confirming auto-accept as acceptor");
3146 			confirm_hint = 1;
3147 			goto confirm;
3148 		}
3149 
3150 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3151 						hdev->auto_accept_delay);
3152 
3153 		if (hdev->auto_accept_delay > 0) {
3154 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3155 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3156 			goto unlock;
3157 		}
3158 
3159 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3160 						sizeof(ev->bdaddr), &ev->bdaddr);
3161 		goto unlock;
3162 	}
3163 
3164 confirm:
3165 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3166 				  confirm_hint);
3167 
3168 unlock:
3169 	hci_dev_unlock(hdev);
3170 }
3171 
3172 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3173 							struct sk_buff *skb)
3174 {
3175 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3176 
3177 	BT_DBG("%s", hdev->name);
3178 
3179 	hci_dev_lock(hdev);
3180 
3181 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3182 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3183 
3184 	hci_dev_unlock(hdev);
3185 }
3186 
3187 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3188 {
3189 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3190 	struct hci_conn *conn;
3191 
3192 	BT_DBG("%s", hdev->name);
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3197 	if (!conn)
3198 		goto unlock;
3199 
3200 	/* To avoid duplicate auth_failed events to user space we check
3201 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3202 	 * initiated the authentication. A traditional auth_complete
3203 	 * event gets always produced as initiator and is also mapped to
3204 	 * the mgmt_auth_failed event */
3205 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3206 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3207 				 ev->status);
3208 
3209 	hci_conn_put(conn);
3210 
3211 unlock:
3212 	hci_dev_unlock(hdev);
3213 }
3214 
3215 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3216 {
3217 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3218 	struct inquiry_entry *ie;
3219 
3220 	BT_DBG("%s", hdev->name);
3221 
3222 	hci_dev_lock(hdev);
3223 
3224 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3225 	if (ie)
3226 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3227 
3228 	hci_dev_unlock(hdev);
3229 }
3230 
3231 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3232 						   struct sk_buff *skb)
3233 {
3234 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3235 	struct oob_data *data;
3236 
3237 	BT_DBG("%s", hdev->name);
3238 
3239 	hci_dev_lock(hdev);
3240 
3241 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3242 		goto unlock;
3243 
3244 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3245 	if (data) {
3246 		struct hci_cp_remote_oob_data_reply cp;
3247 
3248 		bacpy(&cp.bdaddr, &ev->bdaddr);
3249 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3250 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3251 
3252 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3253 									&cp);
3254 	} else {
3255 		struct hci_cp_remote_oob_data_neg_reply cp;
3256 
3257 		bacpy(&cp.bdaddr, &ev->bdaddr);
3258 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3259 									&cp);
3260 	}
3261 
3262 unlock:
3263 	hci_dev_unlock(hdev);
3264 }
3265 
3266 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3267 {
3268 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3269 	struct hci_conn *conn;
3270 
3271 	BT_DBG("%s status %d", hdev->name, ev->status);
3272 
3273 	hci_dev_lock(hdev);
3274 
3275 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3276 	if (!conn) {
3277 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3278 		if (!conn) {
3279 			BT_ERR("No memory for new connection");
3280 			hci_dev_unlock(hdev);
3281 			return;
3282 		}
3283 
3284 		conn->dst_type = ev->bdaddr_type;
3285 	}
3286 
3287 	if (ev->status) {
3288 		mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3289 						conn->dst_type, ev->status);
3290 		hci_proto_connect_cfm(conn, ev->status);
3291 		conn->state = BT_CLOSED;
3292 		hci_conn_del(conn);
3293 		goto unlock;
3294 	}
3295 
3296 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3297 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3298 				      conn->dst_type, 0, NULL, 0, NULL);
3299 
3300 	conn->sec_level = BT_SECURITY_LOW;
3301 	conn->handle = __le16_to_cpu(ev->handle);
3302 	conn->state = BT_CONNECTED;
3303 
3304 	hci_conn_hold_device(conn);
3305 	hci_conn_add_sysfs(conn);
3306 
3307 	hci_proto_connect_cfm(conn, ev->status);
3308 
3309 unlock:
3310 	hci_dev_unlock(hdev);
3311 }
3312 
3313 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3314 						struct sk_buff *skb)
3315 {
3316 	u8 num_reports = skb->data[0];
3317 	void *ptr = &skb->data[1];
3318 	s8 rssi;
3319 
3320 	hci_dev_lock(hdev);
3321 
3322 	while (num_reports--) {
3323 		struct hci_ev_le_advertising_info *ev = ptr;
3324 
3325 		hci_add_adv_entry(hdev, ev);
3326 
3327 		rssi = ev->data[ev->length];
3328 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3329 				  NULL, rssi, 0, 1, ev->data, ev->length);
3330 
3331 		ptr += sizeof(*ev) + ev->length + 1;
3332 	}
3333 
3334 	hci_dev_unlock(hdev);
3335 }
3336 
3337 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3338 						struct sk_buff *skb)
3339 {
3340 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3341 	struct hci_cp_le_ltk_reply cp;
3342 	struct hci_cp_le_ltk_neg_reply neg;
3343 	struct hci_conn *conn;
3344 	struct smp_ltk *ltk;
3345 
3346 	BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3347 
3348 	hci_dev_lock(hdev);
3349 
3350 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3351 	if (conn == NULL)
3352 		goto not_found;
3353 
3354 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3355 	if (ltk == NULL)
3356 		goto not_found;
3357 
3358 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3359 	cp.handle = cpu_to_le16(conn->handle);
3360 
3361 	if (ltk->authenticated)
3362 		conn->sec_level = BT_SECURITY_HIGH;
3363 
3364 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3365 
3366 	if (ltk->type & HCI_SMP_STK) {
3367 		list_del(&ltk->list);
3368 		kfree(ltk);
3369 	}
3370 
3371 	hci_dev_unlock(hdev);
3372 
3373 	return;
3374 
3375 not_found:
3376 	neg.handle = ev->handle;
3377 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3378 	hci_dev_unlock(hdev);
3379 }
3380 
3381 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3382 {
3383 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3384 
3385 	skb_pull(skb, sizeof(*le_ev));
3386 
3387 	switch (le_ev->subevent) {
3388 	case HCI_EV_LE_CONN_COMPLETE:
3389 		hci_le_conn_complete_evt(hdev, skb);
3390 		break;
3391 
3392 	case HCI_EV_LE_ADVERTISING_REPORT:
3393 		hci_le_adv_report_evt(hdev, skb);
3394 		break;
3395 
3396 	case HCI_EV_LE_LTK_REQ:
3397 		hci_le_ltk_request_evt(hdev, skb);
3398 		break;
3399 
3400 	default:
3401 		break;
3402 	}
3403 }
3404 
3405 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3406 {
3407 	struct hci_event_hdr *hdr = (void *) skb->data;
3408 	__u8 event = hdr->evt;
3409 
3410 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3411 
3412 	switch (event) {
3413 	case HCI_EV_INQUIRY_COMPLETE:
3414 		hci_inquiry_complete_evt(hdev, skb);
3415 		break;
3416 
3417 	case HCI_EV_INQUIRY_RESULT:
3418 		hci_inquiry_result_evt(hdev, skb);
3419 		break;
3420 
3421 	case HCI_EV_CONN_COMPLETE:
3422 		hci_conn_complete_evt(hdev, skb);
3423 		break;
3424 
3425 	case HCI_EV_CONN_REQUEST:
3426 		hci_conn_request_evt(hdev, skb);
3427 		break;
3428 
3429 	case HCI_EV_DISCONN_COMPLETE:
3430 		hci_disconn_complete_evt(hdev, skb);
3431 		break;
3432 
3433 	case HCI_EV_AUTH_COMPLETE:
3434 		hci_auth_complete_evt(hdev, skb);
3435 		break;
3436 
3437 	case HCI_EV_REMOTE_NAME:
3438 		hci_remote_name_evt(hdev, skb);
3439 		break;
3440 
3441 	case HCI_EV_ENCRYPT_CHANGE:
3442 		hci_encrypt_change_evt(hdev, skb);
3443 		break;
3444 
3445 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3446 		hci_change_link_key_complete_evt(hdev, skb);
3447 		break;
3448 
3449 	case HCI_EV_REMOTE_FEATURES:
3450 		hci_remote_features_evt(hdev, skb);
3451 		break;
3452 
3453 	case HCI_EV_REMOTE_VERSION:
3454 		hci_remote_version_evt(hdev, skb);
3455 		break;
3456 
3457 	case HCI_EV_QOS_SETUP_COMPLETE:
3458 		hci_qos_setup_complete_evt(hdev, skb);
3459 		break;
3460 
3461 	case HCI_EV_CMD_COMPLETE:
3462 		hci_cmd_complete_evt(hdev, skb);
3463 		break;
3464 
3465 	case HCI_EV_CMD_STATUS:
3466 		hci_cmd_status_evt(hdev, skb);
3467 		break;
3468 
3469 	case HCI_EV_ROLE_CHANGE:
3470 		hci_role_change_evt(hdev, skb);
3471 		break;
3472 
3473 	case HCI_EV_NUM_COMP_PKTS:
3474 		hci_num_comp_pkts_evt(hdev, skb);
3475 		break;
3476 
3477 	case HCI_EV_MODE_CHANGE:
3478 		hci_mode_change_evt(hdev, skb);
3479 		break;
3480 
3481 	case HCI_EV_PIN_CODE_REQ:
3482 		hci_pin_code_request_evt(hdev, skb);
3483 		break;
3484 
3485 	case HCI_EV_LINK_KEY_REQ:
3486 		hci_link_key_request_evt(hdev, skb);
3487 		break;
3488 
3489 	case HCI_EV_LINK_KEY_NOTIFY:
3490 		hci_link_key_notify_evt(hdev, skb);
3491 		break;
3492 
3493 	case HCI_EV_CLOCK_OFFSET:
3494 		hci_clock_offset_evt(hdev, skb);
3495 		break;
3496 
3497 	case HCI_EV_PKT_TYPE_CHANGE:
3498 		hci_pkt_type_change_evt(hdev, skb);
3499 		break;
3500 
3501 	case HCI_EV_PSCAN_REP_MODE:
3502 		hci_pscan_rep_mode_evt(hdev, skb);
3503 		break;
3504 
3505 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3506 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3507 		break;
3508 
3509 	case HCI_EV_REMOTE_EXT_FEATURES:
3510 		hci_remote_ext_features_evt(hdev, skb);
3511 		break;
3512 
3513 	case HCI_EV_SYNC_CONN_COMPLETE:
3514 		hci_sync_conn_complete_evt(hdev, skb);
3515 		break;
3516 
3517 	case HCI_EV_SYNC_CONN_CHANGED:
3518 		hci_sync_conn_changed_evt(hdev, skb);
3519 		break;
3520 
3521 	case HCI_EV_SNIFF_SUBRATE:
3522 		hci_sniff_subrate_evt(hdev, skb);
3523 		break;
3524 
3525 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3526 		hci_extended_inquiry_result_evt(hdev, skb);
3527 		break;
3528 
3529 	case HCI_EV_IO_CAPA_REQUEST:
3530 		hci_io_capa_request_evt(hdev, skb);
3531 		break;
3532 
3533 	case HCI_EV_IO_CAPA_REPLY:
3534 		hci_io_capa_reply_evt(hdev, skb);
3535 		break;
3536 
3537 	case HCI_EV_USER_CONFIRM_REQUEST:
3538 		hci_user_confirm_request_evt(hdev, skb);
3539 		break;
3540 
3541 	case HCI_EV_USER_PASSKEY_REQUEST:
3542 		hci_user_passkey_request_evt(hdev, skb);
3543 		break;
3544 
3545 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3546 		hci_simple_pair_complete_evt(hdev, skb);
3547 		break;
3548 
3549 	case HCI_EV_REMOTE_HOST_FEATURES:
3550 		hci_remote_host_features_evt(hdev, skb);
3551 		break;
3552 
3553 	case HCI_EV_LE_META:
3554 		hci_le_meta_evt(hdev, skb);
3555 		break;
3556 
3557 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3558 		hci_remote_oob_data_request_evt(hdev, skb);
3559 		break;
3560 
3561 	case HCI_EV_NUM_COMP_BLOCKS:
3562 		hci_num_comp_blocks_evt(hdev, skb);
3563 		break;
3564 
3565 	default:
3566 		BT_DBG("%s event 0x%x", hdev->name, event);
3567 		break;
3568 	}
3569 
3570 	kfree_skb(skb);
3571 	hdev->stat.evt_rx++;
3572 }
3573