xref: /openbmc/linux/net/bluetooth/hci_event.c (revision 95e9fd10)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI event handling. */
26 
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 
33 /* Handle HCI Event packets */
34 
35 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
36 {
37 	__u8 status = *((__u8 *) skb->data);
38 
39 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
40 
41 	if (status) {
42 		hci_dev_lock(hdev);
43 		mgmt_stop_discovery_failed(hdev, status);
44 		hci_dev_unlock(hdev);
45 		return;
46 	}
47 
48 	clear_bit(HCI_INQUIRY, &hdev->flags);
49 
50 	hci_dev_lock(hdev);
51 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
52 	hci_dev_unlock(hdev);
53 
54 	hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
55 
56 	hci_conn_check_pending(hdev);
57 }
58 
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 	__u8 status = *((__u8 *) skb->data);
62 
63 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
64 
65 	if (status)
66 		return;
67 
68 	set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70 
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 	__u8 status = *((__u8 *) skb->data);
74 
75 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
76 
77 	if (status)
78 		return;
79 
80 	clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81 
82 	hci_conn_check_pending(hdev);
83 }
84 
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 					  struct sk_buff *skb)
87 {
88 	BT_DBG("%s", hdev->name);
89 }
90 
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 	struct hci_rp_role_discovery *rp = (void *) skb->data;
94 	struct hci_conn *conn;
95 
96 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97 
98 	if (rp->status)
99 		return;
100 
101 	hci_dev_lock(hdev);
102 
103 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 	if (conn) {
105 		if (rp->role)
106 			conn->link_mode &= ~HCI_LM_MASTER;
107 		else
108 			conn->link_mode |= HCI_LM_MASTER;
109 	}
110 
111 	hci_dev_unlock(hdev);
112 }
113 
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 	struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 	struct hci_conn *conn;
118 
119 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120 
121 	if (rp->status)
122 		return;
123 
124 	hci_dev_lock(hdev);
125 
126 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 	if (conn)
128 		conn->link_policy = __le16_to_cpu(rp->policy);
129 
130 	hci_dev_unlock(hdev);
131 }
132 
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 	struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 	struct hci_conn *conn;
137 	void *sent;
138 
139 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140 
141 	if (rp->status)
142 		return;
143 
144 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 	if (!sent)
146 		return;
147 
148 	hci_dev_lock(hdev);
149 
150 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 	if (conn)
152 		conn->link_policy = get_unaligned_le16(sent + 2);
153 
154 	hci_dev_unlock(hdev);
155 }
156 
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 					struct sk_buff *skb)
159 {
160 	struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161 
162 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163 
164 	if (rp->status)
165 		return;
166 
167 	hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169 
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 					 struct sk_buff *skb)
172 {
173 	__u8 status = *((__u8 *) skb->data);
174 	void *sent;
175 
176 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
177 
178 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 	if (!sent)
180 		return;
181 
182 	if (!status)
183 		hdev->link_policy = get_unaligned_le16(sent);
184 
185 	hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187 
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 	__u8 status = *((__u8 *) skb->data);
191 
192 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
193 
194 	clear_bit(HCI_RESET, &hdev->flags);
195 
196 	hci_req_complete(hdev, HCI_OP_RESET, status);
197 
198 	/* Reset all non-persistent flags */
199 	hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
200 			     BIT(HCI_PERIODIC_INQ));
201 
202 	hdev->discovery.state = DISCOVERY_STOPPED;
203 }
204 
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 	__u8 status = *((__u8 *) skb->data);
208 	void *sent;
209 
210 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
211 
212 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 	if (!sent)
214 		return;
215 
216 	hci_dev_lock(hdev);
217 
218 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 		mgmt_set_local_name_complete(hdev, sent, status);
220 	else if (!status)
221 		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222 
223 	hci_dev_unlock(hdev);
224 
225 	hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
226 }
227 
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 	struct hci_rp_read_local_name *rp = (void *) skb->data;
231 
232 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233 
234 	if (rp->status)
235 		return;
236 
237 	if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240 
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 	__u8 status = *((__u8 *) skb->data);
244 	void *sent;
245 
246 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
247 
248 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 	if (!sent)
250 		return;
251 
252 	if (!status) {
253 		__u8 param = *((__u8 *) sent);
254 
255 		if (param == AUTH_ENABLED)
256 			set_bit(HCI_AUTH, &hdev->flags);
257 		else
258 			clear_bit(HCI_AUTH, &hdev->flags);
259 	}
260 
261 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 		mgmt_auth_enable_complete(hdev, status);
263 
264 	hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
265 }
266 
267 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
268 {
269 	__u8 status = *((__u8 *) skb->data);
270 	void *sent;
271 
272 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
273 
274 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
275 	if (!sent)
276 		return;
277 
278 	if (!status) {
279 		__u8 param = *((__u8 *) sent);
280 
281 		if (param)
282 			set_bit(HCI_ENCRYPT, &hdev->flags);
283 		else
284 			clear_bit(HCI_ENCRYPT, &hdev->flags);
285 	}
286 
287 	hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
288 }
289 
290 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 	__u8 param, status = *((__u8 *) skb->data);
293 	int old_pscan, old_iscan;
294 	void *sent;
295 
296 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
297 
298 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
299 	if (!sent)
300 		return;
301 
302 	param = *((__u8 *) sent);
303 
304 	hci_dev_lock(hdev);
305 
306 	if (status != 0) {
307 		mgmt_write_scan_failed(hdev, param, status);
308 		hdev->discov_timeout = 0;
309 		goto done;
310 	}
311 
312 	old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
313 	old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
314 
315 	if (param & SCAN_INQUIRY) {
316 		set_bit(HCI_ISCAN, &hdev->flags);
317 		if (!old_iscan)
318 			mgmt_discoverable(hdev, 1);
319 		if (hdev->discov_timeout > 0) {
320 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
321 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
322 					   to);
323 		}
324 	} else if (old_iscan)
325 		mgmt_discoverable(hdev, 0);
326 
327 	if (param & SCAN_PAGE) {
328 		set_bit(HCI_PSCAN, &hdev->flags);
329 		if (!old_pscan)
330 			mgmt_connectable(hdev, 1);
331 	} else if (old_pscan)
332 		mgmt_connectable(hdev, 0);
333 
334 done:
335 	hci_dev_unlock(hdev);
336 	hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
337 }
338 
339 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340 {
341 	struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342 
343 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
344 
345 	if (rp->status)
346 		return;
347 
348 	memcpy(hdev->dev_class, rp->dev_class, 3);
349 
350 	BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
351 	       hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
352 }
353 
354 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355 {
356 	__u8 status = *((__u8 *) skb->data);
357 	void *sent;
358 
359 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
360 
361 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
362 	if (!sent)
363 		return;
364 
365 	hci_dev_lock(hdev);
366 
367 	if (status == 0)
368 		memcpy(hdev->dev_class, sent, 3);
369 
370 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
371 		mgmt_set_class_of_dev_complete(hdev, sent, status);
372 
373 	hci_dev_unlock(hdev);
374 }
375 
376 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 	struct hci_rp_read_voice_setting *rp = (void *) skb->data;
379 	__u16 setting;
380 
381 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382 
383 	if (rp->status)
384 		return;
385 
386 	setting = __le16_to_cpu(rp->voice_setting);
387 
388 	if (hdev->voice_setting == setting)
389 		return;
390 
391 	hdev->voice_setting = setting;
392 
393 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
394 
395 	if (hdev->notify)
396 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
397 }
398 
399 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 				       struct sk_buff *skb)
401 {
402 	__u8 status = *((__u8 *) skb->data);
403 	__u16 setting;
404 	void *sent;
405 
406 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
407 
408 	if (status)
409 		return;
410 
411 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
412 	if (!sent)
413 		return;
414 
415 	setting = get_unaligned_le16(sent);
416 
417 	if (hdev->voice_setting == setting)
418 		return;
419 
420 	hdev->voice_setting = setting;
421 
422 	BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
423 
424 	if (hdev->notify)
425 		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
426 }
427 
428 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
429 {
430 	__u8 status = *((__u8 *) skb->data);
431 
432 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
433 
434 	hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
435 }
436 
437 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
438 {
439 	__u8 status = *((__u8 *) skb->data);
440 	void *sent;
441 
442 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
443 
444 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 	if (!sent)
446 		return;
447 
448 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
449 		mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
450 	else if (!status) {
451 		if (*((u8 *) sent))
452 			set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453 		else
454 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455 	}
456 }
457 
458 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
459 {
460 	if (hdev->features[6] & LMP_EXT_INQ)
461 		return 2;
462 
463 	if (hdev->features[3] & LMP_RSSI_INQ)
464 		return 1;
465 
466 	if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
467 	    hdev->lmp_subver == 0x0757)
468 		return 1;
469 
470 	if (hdev->manufacturer == 15) {
471 		if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
472 			return 1;
473 		if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
474 			return 1;
475 		if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
476 			return 1;
477 	}
478 
479 	if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
480 	    hdev->lmp_subver == 0x1805)
481 		return 1;
482 
483 	return 0;
484 }
485 
486 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
487 {
488 	u8 mode;
489 
490 	mode = hci_get_inquiry_mode(hdev);
491 
492 	hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
493 }
494 
495 static void hci_setup_event_mask(struct hci_dev *hdev)
496 {
497 	/* The second byte is 0xff instead of 0x9f (two reserved bits
498 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
499 	 * command otherwise */
500 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
501 
502 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
503 	 * any event mask for pre 1.2 devices */
504 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
505 		return;
506 
507 	events[4] |= 0x01; /* Flow Specification Complete */
508 	events[4] |= 0x02; /* Inquiry Result with RSSI */
509 	events[4] |= 0x04; /* Read Remote Extended Features Complete */
510 	events[5] |= 0x08; /* Synchronous Connection Complete */
511 	events[5] |= 0x10; /* Synchronous Connection Changed */
512 
513 	if (hdev->features[3] & LMP_RSSI_INQ)
514 		events[4] |= 0x02; /* Inquiry Result with RSSI */
515 
516 	if (hdev->features[5] & LMP_SNIFF_SUBR)
517 		events[5] |= 0x20; /* Sniff Subrating */
518 
519 	if (hdev->features[5] & LMP_PAUSE_ENC)
520 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
521 
522 	if (hdev->features[6] & LMP_EXT_INQ)
523 		events[5] |= 0x40; /* Extended Inquiry Result */
524 
525 	if (hdev->features[6] & LMP_NO_FLUSH)
526 		events[7] |= 0x01; /* Enhanced Flush Complete */
527 
528 	if (hdev->features[7] & LMP_LSTO)
529 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
530 
531 	if (hdev->features[6] & LMP_SIMPLE_PAIR) {
532 		events[6] |= 0x01;	/* IO Capability Request */
533 		events[6] |= 0x02;	/* IO Capability Response */
534 		events[6] |= 0x04;	/* User Confirmation Request */
535 		events[6] |= 0x08;	/* User Passkey Request */
536 		events[6] |= 0x10;	/* Remote OOB Data Request */
537 		events[6] |= 0x20;	/* Simple Pairing Complete */
538 		events[7] |= 0x04;	/* User Passkey Notification */
539 		events[7] |= 0x08;	/* Keypress Notification */
540 		events[7] |= 0x10;	/* Remote Host Supported
541 					 * Features Notification */
542 	}
543 
544 	if (hdev->features[4] & LMP_LE)
545 		events[7] |= 0x20;	/* LE Meta-Event */
546 
547 	hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
548 }
549 
550 static void hci_setup(struct hci_dev *hdev)
551 {
552 	if (hdev->dev_type != HCI_BREDR)
553 		return;
554 
555 	hci_setup_event_mask(hdev);
556 
557 	if (hdev->hci_ver > BLUETOOTH_VER_1_1)
558 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
559 
560 	if (lmp_ssp_capable(hdev)) {
561 		if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
562 			u8 mode = 0x01;
563 			hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
564 				     sizeof(mode), &mode);
565 		} else {
566 			struct hci_cp_write_eir cp;
567 
568 			memset(hdev->eir, 0, sizeof(hdev->eir));
569 			memset(&cp, 0, sizeof(cp));
570 
571 			hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
572 		}
573 	}
574 
575 	if (hdev->features[3] & LMP_RSSI_INQ)
576 		hci_setup_inquiry_mode(hdev);
577 
578 	if (hdev->features[7] & LMP_INQ_TX_PWR)
579 		hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
580 
581 	if (hdev->features[7] & LMP_EXTFEATURES) {
582 		struct hci_cp_read_local_ext_features cp;
583 
584 		cp.page = 0x01;
585 		hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
586 			     &cp);
587 	}
588 
589 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
590 		u8 enable = 1;
591 		hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
592 			     &enable);
593 	}
594 }
595 
596 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
597 {
598 	struct hci_rp_read_local_version *rp = (void *) skb->data;
599 
600 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601 
602 	if (rp->status)
603 		goto done;
604 
605 	hdev->hci_ver = rp->hci_ver;
606 	hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
607 	hdev->lmp_ver = rp->lmp_ver;
608 	hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
609 	hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
610 
611 	BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
612 	       hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
613 
614 	if (test_bit(HCI_INIT, &hdev->flags))
615 		hci_setup(hdev);
616 
617 done:
618 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
619 }
620 
621 static void hci_setup_link_policy(struct hci_dev *hdev)
622 {
623 	struct hci_cp_write_def_link_policy cp;
624 	u16 link_policy = 0;
625 
626 	if (hdev->features[0] & LMP_RSWITCH)
627 		link_policy |= HCI_LP_RSWITCH;
628 	if (hdev->features[0] & LMP_HOLD)
629 		link_policy |= HCI_LP_HOLD;
630 	if (hdev->features[0] & LMP_SNIFF)
631 		link_policy |= HCI_LP_SNIFF;
632 	if (hdev->features[1] & LMP_PARK)
633 		link_policy |= HCI_LP_PARK;
634 
635 	cp.policy = cpu_to_le16(link_policy);
636 	hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
637 }
638 
639 static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 				       struct sk_buff *skb)
641 {
642 	struct hci_rp_read_local_commands *rp = (void *) skb->data;
643 
644 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
645 
646 	if (rp->status)
647 		goto done;
648 
649 	memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
650 
651 	if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
652 		hci_setup_link_policy(hdev);
653 
654 done:
655 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
656 }
657 
658 static void hci_cc_read_local_features(struct hci_dev *hdev,
659 				       struct sk_buff *skb)
660 {
661 	struct hci_rp_read_local_features *rp = (void *) skb->data;
662 
663 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
664 
665 	if (rp->status)
666 		return;
667 
668 	memcpy(hdev->features, rp->features, 8);
669 
670 	/* Adjust default settings according to features
671 	 * supported by device. */
672 
673 	if (hdev->features[0] & LMP_3SLOT)
674 		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
675 
676 	if (hdev->features[0] & LMP_5SLOT)
677 		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
678 
679 	if (hdev->features[1] & LMP_HV2) {
680 		hdev->pkt_type  |= (HCI_HV2);
681 		hdev->esco_type |= (ESCO_HV2);
682 	}
683 
684 	if (hdev->features[1] & LMP_HV3) {
685 		hdev->pkt_type  |= (HCI_HV3);
686 		hdev->esco_type |= (ESCO_HV3);
687 	}
688 
689 	if (hdev->features[3] & LMP_ESCO)
690 		hdev->esco_type |= (ESCO_EV3);
691 
692 	if (hdev->features[4] & LMP_EV4)
693 		hdev->esco_type |= (ESCO_EV4);
694 
695 	if (hdev->features[4] & LMP_EV5)
696 		hdev->esco_type |= (ESCO_EV5);
697 
698 	if (hdev->features[5] & LMP_EDR_ESCO_2M)
699 		hdev->esco_type |= (ESCO_2EV3);
700 
701 	if (hdev->features[5] & LMP_EDR_ESCO_3M)
702 		hdev->esco_type |= (ESCO_3EV3);
703 
704 	if (hdev->features[5] & LMP_EDR_3S_ESCO)
705 		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
706 
707 	BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
708 	       hdev->features[0], hdev->features[1],
709 	       hdev->features[2], hdev->features[3],
710 	       hdev->features[4], hdev->features[5],
711 	       hdev->features[6], hdev->features[7]);
712 }
713 
714 static void hci_set_le_support(struct hci_dev *hdev)
715 {
716 	struct hci_cp_write_le_host_supported cp;
717 
718 	memset(&cp, 0, sizeof(cp));
719 
720 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
721 		cp.le = 1;
722 		cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
723 	}
724 
725 	if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
726 		hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
727 			     &cp);
728 }
729 
730 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
731 					   struct sk_buff *skb)
732 {
733 	struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
734 
735 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
736 
737 	if (rp->status)
738 		goto done;
739 
740 	switch (rp->page) {
741 	case 0:
742 		memcpy(hdev->features, rp->features, 8);
743 		break;
744 	case 1:
745 		memcpy(hdev->host_features, rp->features, 8);
746 		break;
747 	}
748 
749 	if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
750 		hci_set_le_support(hdev);
751 
752 done:
753 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
754 }
755 
756 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
757 					  struct sk_buff *skb)
758 {
759 	struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
760 
761 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
762 
763 	if (rp->status)
764 		return;
765 
766 	hdev->flow_ctl_mode = rp->mode;
767 
768 	hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
769 }
770 
771 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
772 {
773 	struct hci_rp_read_buffer_size *rp = (void *) skb->data;
774 
775 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
776 
777 	if (rp->status)
778 		return;
779 
780 	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
781 	hdev->sco_mtu  = rp->sco_mtu;
782 	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
783 	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
784 
785 	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
786 		hdev->sco_mtu  = 64;
787 		hdev->sco_pkts = 8;
788 	}
789 
790 	hdev->acl_cnt = hdev->acl_pkts;
791 	hdev->sco_cnt = hdev->sco_pkts;
792 
793 	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
794 	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
795 }
796 
797 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
798 {
799 	struct hci_rp_read_bd_addr *rp = (void *) skb->data;
800 
801 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 
803 	if (!rp->status)
804 		bacpy(&hdev->bdaddr, &rp->bdaddr);
805 
806 	hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
807 }
808 
809 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
810 					struct sk_buff *skb)
811 {
812 	struct hci_rp_read_data_block_size *rp = (void *) skb->data;
813 
814 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815 
816 	if (rp->status)
817 		return;
818 
819 	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
820 	hdev->block_len = __le16_to_cpu(rp->block_len);
821 	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
822 
823 	hdev->block_cnt = hdev->num_blocks;
824 
825 	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
826 	       hdev->block_cnt, hdev->block_len);
827 
828 	hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
829 }
830 
831 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
832 {
833 	__u8 status = *((__u8 *) skb->data);
834 
835 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
836 
837 	hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
838 }
839 
840 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 				       struct sk_buff *skb)
842 {
843 	struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
844 
845 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846 
847 	if (rp->status)
848 		return;
849 
850 	hdev->amp_status = rp->amp_status;
851 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
852 	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
853 	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
854 	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
855 	hdev->amp_type = rp->amp_type;
856 	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
857 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
858 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
859 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860 
861 	hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
862 }
863 
864 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
865 					  struct sk_buff *skb)
866 {
867 	__u8 status = *((__u8 *) skb->data);
868 
869 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
870 
871 	hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
872 }
873 
874 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 	__u8 status = *((__u8 *) skb->data);
877 
878 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
879 
880 	hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
881 }
882 
883 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
884 				      struct sk_buff *skb)
885 {
886 	__u8 status = *((__u8 *) skb->data);
887 
888 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
889 
890 	hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
891 }
892 
893 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
894 					 struct sk_buff *skb)
895 {
896 	struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
897 
898 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899 
900 	if (!rp->status)
901 		hdev->inq_tx_power = rp->tx_power;
902 
903 	hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
904 }
905 
906 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 	__u8 status = *((__u8 *) skb->data);
909 
910 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
911 
912 	hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
913 }
914 
915 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 {
917 	struct hci_rp_pin_code_reply *rp = (void *) skb->data;
918 	struct hci_cp_pin_code_reply *cp;
919 	struct hci_conn *conn;
920 
921 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
922 
923 	hci_dev_lock(hdev);
924 
925 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
926 		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
927 
928 	if (rp->status != 0)
929 		goto unlock;
930 
931 	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
932 	if (!cp)
933 		goto unlock;
934 
935 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
936 	if (conn)
937 		conn->pin_length = cp->pin_len;
938 
939 unlock:
940 	hci_dev_unlock(hdev);
941 }
942 
943 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
944 {
945 	struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
946 
947 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948 
949 	hci_dev_lock(hdev);
950 
951 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
953 						 rp->status);
954 
955 	hci_dev_unlock(hdev);
956 }
957 
958 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
959 				       struct sk_buff *skb)
960 {
961 	struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
962 
963 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964 
965 	if (rp->status)
966 		return;
967 
968 	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
969 	hdev->le_pkts = rp->le_max_pkt;
970 
971 	hdev->le_cnt = hdev->le_pkts;
972 
973 	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
974 
975 	hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
976 }
977 
978 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 {
980 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981 
982 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983 
984 	hci_dev_lock(hdev);
985 
986 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
987 		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
988 						 rp->status);
989 
990 	hci_dev_unlock(hdev);
991 }
992 
993 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
994 					  struct sk_buff *skb)
995 {
996 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997 
998 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999 
1000 	hci_dev_lock(hdev);
1001 
1002 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1003 		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1004 						     ACL_LINK, 0, rp->status);
1005 
1006 	hci_dev_unlock(hdev);
1007 }
1008 
1009 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 {
1011 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012 
1013 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014 
1015 	hci_dev_lock(hdev);
1016 
1017 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1018 		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1019 						 0, rp->status);
1020 
1021 	hci_dev_unlock(hdev);
1022 }
1023 
1024 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1025 					  struct sk_buff *skb)
1026 {
1027 	struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1028 
1029 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 
1031 	hci_dev_lock(hdev);
1032 
1033 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1034 		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1035 						     ACL_LINK, 0, rp->status);
1036 
1037 	hci_dev_unlock(hdev);
1038 }
1039 
1040 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1041 					     struct sk_buff *skb)
1042 {
1043 	struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1044 
1045 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 
1047 	hci_dev_lock(hdev);
1048 	mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1049 						rp->randomizer, rp->status);
1050 	hci_dev_unlock(hdev);
1051 }
1052 
1053 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1054 {
1055 	__u8 status = *((__u8 *) skb->data);
1056 
1057 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1058 
1059 	hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1060 
1061 	if (status) {
1062 		hci_dev_lock(hdev);
1063 		mgmt_start_discovery_failed(hdev, status);
1064 		hci_dev_unlock(hdev);
1065 		return;
1066 	}
1067 }
1068 
1069 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1070 				      struct sk_buff *skb)
1071 {
1072 	struct hci_cp_le_set_scan_enable *cp;
1073 	__u8 status = *((__u8 *) skb->data);
1074 
1075 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1076 
1077 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 	if (!cp)
1079 		return;
1080 
1081 	switch (cp->enable) {
1082 	case LE_SCANNING_ENABLED:
1083 		hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1084 
1085 		if (status) {
1086 			hci_dev_lock(hdev);
1087 			mgmt_start_discovery_failed(hdev, status);
1088 			hci_dev_unlock(hdev);
1089 			return;
1090 		}
1091 
1092 		set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1093 
1094 		hci_dev_lock(hdev);
1095 		hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1096 		hci_dev_unlock(hdev);
1097 		break;
1098 
1099 	case LE_SCANNING_DISABLED:
1100 		if (status) {
1101 			hci_dev_lock(hdev);
1102 			mgmt_stop_discovery_failed(hdev, status);
1103 			hci_dev_unlock(hdev);
1104 			return;
1105 		}
1106 
1107 		clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1108 
1109 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1110 		    hdev->discovery.state == DISCOVERY_FINDING) {
1111 			mgmt_interleaved_discovery(hdev);
1112 		} else {
1113 			hci_dev_lock(hdev);
1114 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1115 			hci_dev_unlock(hdev);
1116 		}
1117 
1118 		break;
1119 
1120 	default:
1121 		BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1122 		break;
1123 	}
1124 }
1125 
1126 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1127 {
1128 	struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1129 
1130 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131 
1132 	if (rp->status)
1133 		return;
1134 
1135 	hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1136 }
1137 
1138 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1139 {
1140 	struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1141 
1142 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1143 
1144 	if (rp->status)
1145 		return;
1146 
1147 	hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1148 }
1149 
1150 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1151 					   struct sk_buff *skb)
1152 {
1153 	struct hci_cp_write_le_host_supported *sent;
1154 	__u8 status = *((__u8 *) skb->data);
1155 
1156 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1157 
1158 	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1159 	if (!sent)
1160 		return;
1161 
1162 	if (!status) {
1163 		if (sent->le)
1164 			hdev->host_features[0] |= LMP_HOST_LE;
1165 		else
1166 			hdev->host_features[0] &= ~LMP_HOST_LE;
1167 	}
1168 
1169 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1170 	    !test_bit(HCI_INIT, &hdev->flags))
1171 		mgmt_le_enable_complete(hdev, sent->le, status);
1172 
1173 	hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1174 }
1175 
1176 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1177 {
1178 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179 
1180 	if (status) {
1181 		hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1182 		hci_conn_check_pending(hdev);
1183 		hci_dev_lock(hdev);
1184 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
1185 			mgmt_start_discovery_failed(hdev, status);
1186 		hci_dev_unlock(hdev);
1187 		return;
1188 	}
1189 
1190 	set_bit(HCI_INQUIRY, &hdev->flags);
1191 
1192 	hci_dev_lock(hdev);
1193 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1194 	hci_dev_unlock(hdev);
1195 }
1196 
1197 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1198 {
1199 	struct hci_cp_create_conn *cp;
1200 	struct hci_conn *conn;
1201 
1202 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203 
1204 	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1205 	if (!cp)
1206 		return;
1207 
1208 	hci_dev_lock(hdev);
1209 
1210 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1211 
1212 	BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1213 
1214 	if (status) {
1215 		if (conn && conn->state == BT_CONNECT) {
1216 			if (status != 0x0c || conn->attempt > 2) {
1217 				conn->state = BT_CLOSED;
1218 				hci_proto_connect_cfm(conn, status);
1219 				hci_conn_del(conn);
1220 			} else
1221 				conn->state = BT_CONNECT2;
1222 		}
1223 	} else {
1224 		if (!conn) {
1225 			conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1226 			if (conn) {
1227 				conn->out = true;
1228 				conn->link_mode |= HCI_LM_MASTER;
1229 			} else
1230 				BT_ERR("No memory for new connection");
1231 		}
1232 	}
1233 
1234 	hci_dev_unlock(hdev);
1235 }
1236 
1237 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1238 {
1239 	struct hci_cp_add_sco *cp;
1240 	struct hci_conn *acl, *sco;
1241 	__u16 handle;
1242 
1243 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1244 
1245 	if (!status)
1246 		return;
1247 
1248 	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1249 	if (!cp)
1250 		return;
1251 
1252 	handle = __le16_to_cpu(cp->handle);
1253 
1254 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1255 
1256 	hci_dev_lock(hdev);
1257 
1258 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1259 	if (acl) {
1260 		sco = acl->link;
1261 		if (sco) {
1262 			sco->state = BT_CLOSED;
1263 
1264 			hci_proto_connect_cfm(sco, status);
1265 			hci_conn_del(sco);
1266 		}
1267 	}
1268 
1269 	hci_dev_unlock(hdev);
1270 }
1271 
1272 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1273 {
1274 	struct hci_cp_auth_requested *cp;
1275 	struct hci_conn *conn;
1276 
1277 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1278 
1279 	if (!status)
1280 		return;
1281 
1282 	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1283 	if (!cp)
1284 		return;
1285 
1286 	hci_dev_lock(hdev);
1287 
1288 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1289 	if (conn) {
1290 		if (conn->state == BT_CONFIG) {
1291 			hci_proto_connect_cfm(conn, status);
1292 			hci_conn_put(conn);
1293 		}
1294 	}
1295 
1296 	hci_dev_unlock(hdev);
1297 }
1298 
1299 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1300 {
1301 	struct hci_cp_set_conn_encrypt *cp;
1302 	struct hci_conn *conn;
1303 
1304 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305 
1306 	if (!status)
1307 		return;
1308 
1309 	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1310 	if (!cp)
1311 		return;
1312 
1313 	hci_dev_lock(hdev);
1314 
1315 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1316 	if (conn) {
1317 		if (conn->state == BT_CONFIG) {
1318 			hci_proto_connect_cfm(conn, status);
1319 			hci_conn_put(conn);
1320 		}
1321 	}
1322 
1323 	hci_dev_unlock(hdev);
1324 }
1325 
1326 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1327 				    struct hci_conn *conn)
1328 {
1329 	if (conn->state != BT_CONFIG || !conn->out)
1330 		return 0;
1331 
1332 	if (conn->pending_sec_level == BT_SECURITY_SDP)
1333 		return 0;
1334 
1335 	/* Only request authentication for SSP connections or non-SSP
1336 	 * devices with sec_level HIGH or if MITM protection is requested */
1337 	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1338 	    conn->pending_sec_level != BT_SECURITY_HIGH)
1339 		return 0;
1340 
1341 	return 1;
1342 }
1343 
1344 static int hci_resolve_name(struct hci_dev *hdev,
1345 				   struct inquiry_entry *e)
1346 {
1347 	struct hci_cp_remote_name_req cp;
1348 
1349 	memset(&cp, 0, sizeof(cp));
1350 
1351 	bacpy(&cp.bdaddr, &e->data.bdaddr);
1352 	cp.pscan_rep_mode = e->data.pscan_rep_mode;
1353 	cp.pscan_mode = e->data.pscan_mode;
1354 	cp.clock_offset = e->data.clock_offset;
1355 
1356 	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1357 }
1358 
1359 static bool hci_resolve_next_name(struct hci_dev *hdev)
1360 {
1361 	struct discovery_state *discov = &hdev->discovery;
1362 	struct inquiry_entry *e;
1363 
1364 	if (list_empty(&discov->resolve))
1365 		return false;
1366 
1367 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1368 	if (!e)
1369 		return false;
1370 
1371 	if (hci_resolve_name(hdev, e) == 0) {
1372 		e->name_state = NAME_PENDING;
1373 		return true;
1374 	}
1375 
1376 	return false;
1377 }
1378 
1379 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1380 				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
1381 {
1382 	struct discovery_state *discov = &hdev->discovery;
1383 	struct inquiry_entry *e;
1384 
1385 	if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1386 		mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1387 				      name_len, conn->dev_class);
1388 
1389 	if (discov->state == DISCOVERY_STOPPED)
1390 		return;
1391 
1392 	if (discov->state == DISCOVERY_STOPPING)
1393 		goto discov_complete;
1394 
1395 	if (discov->state != DISCOVERY_RESOLVING)
1396 		return;
1397 
1398 	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1399 	/* If the device was not found in a list of found devices names of which
1400 	 * are pending. there is no need to continue resolving a next name as it
1401 	 * will be done upon receiving another Remote Name Request Complete
1402 	 * Event */
1403 	if (!e)
1404 		return;
1405 
1406 	list_del(&e->list);
1407 	if (name) {
1408 		e->name_state = NAME_KNOWN;
1409 		mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1410 				 e->data.rssi, name, name_len);
1411 	} else {
1412 		e->name_state = NAME_NOT_KNOWN;
1413 	}
1414 
1415 	if (hci_resolve_next_name(hdev))
1416 		return;
1417 
1418 discov_complete:
1419 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1420 }
1421 
1422 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1423 {
1424 	struct hci_cp_remote_name_req *cp;
1425 	struct hci_conn *conn;
1426 
1427 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1428 
1429 	/* If successful wait for the name req complete event before
1430 	 * checking for the need to do authentication */
1431 	if (!status)
1432 		return;
1433 
1434 	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1435 	if (!cp)
1436 		return;
1437 
1438 	hci_dev_lock(hdev);
1439 
1440 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1441 
1442 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
1443 		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1444 
1445 	if (!conn)
1446 		goto unlock;
1447 
1448 	if (!hci_outgoing_auth_needed(hdev, conn))
1449 		goto unlock;
1450 
1451 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1452 		struct hci_cp_auth_requested cp;
1453 		cp.handle = __cpu_to_le16(conn->handle);
1454 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1455 	}
1456 
1457 unlock:
1458 	hci_dev_unlock(hdev);
1459 }
1460 
1461 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1462 {
1463 	struct hci_cp_read_remote_features *cp;
1464 	struct hci_conn *conn;
1465 
1466 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1467 
1468 	if (!status)
1469 		return;
1470 
1471 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1472 	if (!cp)
1473 		return;
1474 
1475 	hci_dev_lock(hdev);
1476 
1477 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1478 	if (conn) {
1479 		if (conn->state == BT_CONFIG) {
1480 			hci_proto_connect_cfm(conn, status);
1481 			hci_conn_put(conn);
1482 		}
1483 	}
1484 
1485 	hci_dev_unlock(hdev);
1486 }
1487 
1488 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1489 {
1490 	struct hci_cp_read_remote_ext_features *cp;
1491 	struct hci_conn *conn;
1492 
1493 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1494 
1495 	if (!status)
1496 		return;
1497 
1498 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1499 	if (!cp)
1500 		return;
1501 
1502 	hci_dev_lock(hdev);
1503 
1504 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1505 	if (conn) {
1506 		if (conn->state == BT_CONFIG) {
1507 			hci_proto_connect_cfm(conn, status);
1508 			hci_conn_put(conn);
1509 		}
1510 	}
1511 
1512 	hci_dev_unlock(hdev);
1513 }
1514 
1515 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1516 {
1517 	struct hci_cp_setup_sync_conn *cp;
1518 	struct hci_conn *acl, *sco;
1519 	__u16 handle;
1520 
1521 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1522 
1523 	if (!status)
1524 		return;
1525 
1526 	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1527 	if (!cp)
1528 		return;
1529 
1530 	handle = __le16_to_cpu(cp->handle);
1531 
1532 	BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1533 
1534 	hci_dev_lock(hdev);
1535 
1536 	acl = hci_conn_hash_lookup_handle(hdev, handle);
1537 	if (acl) {
1538 		sco = acl->link;
1539 		if (sco) {
1540 			sco->state = BT_CLOSED;
1541 
1542 			hci_proto_connect_cfm(sco, status);
1543 			hci_conn_del(sco);
1544 		}
1545 	}
1546 
1547 	hci_dev_unlock(hdev);
1548 }
1549 
1550 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1551 {
1552 	struct hci_cp_sniff_mode *cp;
1553 	struct hci_conn *conn;
1554 
1555 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1556 
1557 	if (!status)
1558 		return;
1559 
1560 	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1561 	if (!cp)
1562 		return;
1563 
1564 	hci_dev_lock(hdev);
1565 
1566 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1567 	if (conn) {
1568 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1569 
1570 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1571 			hci_sco_setup(conn, status);
1572 	}
1573 
1574 	hci_dev_unlock(hdev);
1575 }
1576 
1577 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1578 {
1579 	struct hci_cp_exit_sniff_mode *cp;
1580 	struct hci_conn *conn;
1581 
1582 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1583 
1584 	if (!status)
1585 		return;
1586 
1587 	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1588 	if (!cp)
1589 		return;
1590 
1591 	hci_dev_lock(hdev);
1592 
1593 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1594 	if (conn) {
1595 		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1596 
1597 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1598 			hci_sco_setup(conn, status);
1599 	}
1600 
1601 	hci_dev_unlock(hdev);
1602 }
1603 
1604 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1605 {
1606 	struct hci_cp_disconnect *cp;
1607 	struct hci_conn *conn;
1608 
1609 	if (!status)
1610 		return;
1611 
1612 	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1613 	if (!cp)
1614 		return;
1615 
1616 	hci_dev_lock(hdev);
1617 
1618 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1619 	if (conn)
1620 		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1621 				       conn->dst_type, status);
1622 
1623 	hci_dev_unlock(hdev);
1624 }
1625 
1626 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1627 {
1628 	struct hci_cp_le_create_conn *cp;
1629 	struct hci_conn *conn;
1630 
1631 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1632 
1633 	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1634 	if (!cp)
1635 		return;
1636 
1637 	hci_dev_lock(hdev);
1638 
1639 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1640 
1641 	BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1642 	       conn);
1643 
1644 	if (status) {
1645 		if (conn && conn->state == BT_CONNECT) {
1646 			conn->state = BT_CLOSED;
1647 			mgmt_connect_failed(hdev, &cp->peer_addr, conn->type,
1648 					    conn->dst_type, status);
1649 			hci_proto_connect_cfm(conn, status);
1650 			hci_conn_del(conn);
1651 		}
1652 	} else {
1653 		if (!conn) {
1654 			conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1655 			if (conn) {
1656 				conn->dst_type = cp->peer_addr_type;
1657 				conn->out = true;
1658 			} else {
1659 				BT_ERR("No memory for new connection");
1660 			}
1661 		}
1662 	}
1663 
1664 	hci_dev_unlock(hdev);
1665 }
1666 
1667 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1668 {
1669 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1670 }
1671 
1672 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1673 {
1674 	__u8 status = *((__u8 *) skb->data);
1675 	struct discovery_state *discov = &hdev->discovery;
1676 	struct inquiry_entry *e;
1677 
1678 	BT_DBG("%s status 0x%2.2x", hdev->name, status);
1679 
1680 	hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1681 
1682 	hci_conn_check_pending(hdev);
1683 
1684 	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1685 		return;
1686 
1687 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1688 		return;
1689 
1690 	hci_dev_lock(hdev);
1691 
1692 	if (discov->state != DISCOVERY_FINDING)
1693 		goto unlock;
1694 
1695 	if (list_empty(&discov->resolve)) {
1696 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1697 		goto unlock;
1698 	}
1699 
1700 	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1701 	if (e && hci_resolve_name(hdev, e) == 0) {
1702 		e->name_state = NAME_PENDING;
1703 		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1704 	} else {
1705 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1706 	}
1707 
1708 unlock:
1709 	hci_dev_unlock(hdev);
1710 }
1711 
1712 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1713 {
1714 	struct inquiry_data data;
1715 	struct inquiry_info *info = (void *) (skb->data + 1);
1716 	int num_rsp = *((__u8 *) skb->data);
1717 
1718 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1719 
1720 	if (!num_rsp)
1721 		return;
1722 
1723 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1724 		return;
1725 
1726 	hci_dev_lock(hdev);
1727 
1728 	for (; num_rsp; num_rsp--, info++) {
1729 		bool name_known, ssp;
1730 
1731 		bacpy(&data.bdaddr, &info->bdaddr);
1732 		data.pscan_rep_mode	= info->pscan_rep_mode;
1733 		data.pscan_period_mode	= info->pscan_period_mode;
1734 		data.pscan_mode		= info->pscan_mode;
1735 		memcpy(data.dev_class, info->dev_class, 3);
1736 		data.clock_offset	= info->clock_offset;
1737 		data.rssi		= 0x00;
1738 		data.ssp_mode		= 0x00;
1739 
1740 		name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1741 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1742 				  info->dev_class, 0, !name_known, ssp, NULL,
1743 				  0);
1744 	}
1745 
1746 	hci_dev_unlock(hdev);
1747 }
1748 
1749 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1750 {
1751 	struct hci_ev_conn_complete *ev = (void *) skb->data;
1752 	struct hci_conn *conn;
1753 
1754 	BT_DBG("%s", hdev->name);
1755 
1756 	hci_dev_lock(hdev);
1757 
1758 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1759 	if (!conn) {
1760 		if (ev->link_type != SCO_LINK)
1761 			goto unlock;
1762 
1763 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1764 		if (!conn)
1765 			goto unlock;
1766 
1767 		conn->type = SCO_LINK;
1768 	}
1769 
1770 	if (!ev->status) {
1771 		conn->handle = __le16_to_cpu(ev->handle);
1772 
1773 		if (conn->type == ACL_LINK) {
1774 			conn->state = BT_CONFIG;
1775 			hci_conn_hold(conn);
1776 
1777 			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1778 			    !hci_find_link_key(hdev, &ev->bdaddr))
1779 				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1780 			else
1781 				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1782 		} else
1783 			conn->state = BT_CONNECTED;
1784 
1785 		hci_conn_hold_device(conn);
1786 		hci_conn_add_sysfs(conn);
1787 
1788 		if (test_bit(HCI_AUTH, &hdev->flags))
1789 			conn->link_mode |= HCI_LM_AUTH;
1790 
1791 		if (test_bit(HCI_ENCRYPT, &hdev->flags))
1792 			conn->link_mode |= HCI_LM_ENCRYPT;
1793 
1794 		/* Get remote features */
1795 		if (conn->type == ACL_LINK) {
1796 			struct hci_cp_read_remote_features cp;
1797 			cp.handle = ev->handle;
1798 			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1799 				     sizeof(cp), &cp);
1800 		}
1801 
1802 		/* Set packet type for incoming connection */
1803 		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1804 			struct hci_cp_change_conn_ptype cp;
1805 			cp.handle = ev->handle;
1806 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1807 			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1808 				     &cp);
1809 		}
1810 	} else {
1811 		conn->state = BT_CLOSED;
1812 		if (conn->type == ACL_LINK)
1813 			mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1814 					    conn->dst_type, ev->status);
1815 	}
1816 
1817 	if (conn->type == ACL_LINK)
1818 		hci_sco_setup(conn, ev->status);
1819 
1820 	if (ev->status) {
1821 		hci_proto_connect_cfm(conn, ev->status);
1822 		hci_conn_del(conn);
1823 	} else if (ev->link_type != ACL_LINK)
1824 		hci_proto_connect_cfm(conn, ev->status);
1825 
1826 unlock:
1827 	hci_dev_unlock(hdev);
1828 
1829 	hci_conn_check_pending(hdev);
1830 }
1831 
1832 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1833 {
1834 	struct hci_ev_conn_request *ev = (void *) skb->data;
1835 	int mask = hdev->link_mode;
1836 
1837 	BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1838 	       ev->link_type);
1839 
1840 	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1841 
1842 	if ((mask & HCI_LM_ACCEPT) &&
1843 	    !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1844 		/* Connection accepted */
1845 		struct inquiry_entry *ie;
1846 		struct hci_conn *conn;
1847 
1848 		hci_dev_lock(hdev);
1849 
1850 		ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1851 		if (ie)
1852 			memcpy(ie->data.dev_class, ev->dev_class, 3);
1853 
1854 		conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1855 					       &ev->bdaddr);
1856 		if (!conn) {
1857 			conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1858 			if (!conn) {
1859 				BT_ERR("No memory for new connection");
1860 				hci_dev_unlock(hdev);
1861 				return;
1862 			}
1863 		}
1864 
1865 		memcpy(conn->dev_class, ev->dev_class, 3);
1866 		conn->state = BT_CONNECT;
1867 
1868 		hci_dev_unlock(hdev);
1869 
1870 		if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1871 			struct hci_cp_accept_conn_req cp;
1872 
1873 			bacpy(&cp.bdaddr, &ev->bdaddr);
1874 
1875 			if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1876 				cp.role = 0x00; /* Become master */
1877 			else
1878 				cp.role = 0x01; /* Remain slave */
1879 
1880 			hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1881 				     &cp);
1882 		} else {
1883 			struct hci_cp_accept_sync_conn_req cp;
1884 
1885 			bacpy(&cp.bdaddr, &ev->bdaddr);
1886 			cp.pkt_type = cpu_to_le16(conn->pkt_type);
1887 
1888 			cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1889 			cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
1890 			cp.max_latency    = __constant_cpu_to_le16(0xffff);
1891 			cp.content_format = cpu_to_le16(hdev->voice_setting);
1892 			cp.retrans_effort = 0xff;
1893 
1894 			hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1895 				     sizeof(cp), &cp);
1896 		}
1897 	} else {
1898 		/* Connection rejected */
1899 		struct hci_cp_reject_conn_req cp;
1900 
1901 		bacpy(&cp.bdaddr, &ev->bdaddr);
1902 		cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1903 		hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1904 	}
1905 }
1906 
1907 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1908 {
1909 	struct hci_ev_disconn_complete *ev = (void *) skb->data;
1910 	struct hci_conn *conn;
1911 
1912 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1913 
1914 	hci_dev_lock(hdev);
1915 
1916 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1917 	if (!conn)
1918 		goto unlock;
1919 
1920 	if (ev->status == 0)
1921 		conn->state = BT_CLOSED;
1922 
1923 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1924 	    (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1925 		if (ev->status != 0)
1926 			mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1927 					       conn->dst_type, ev->status);
1928 		else
1929 			mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1930 						 conn->dst_type);
1931 	}
1932 
1933 	if (ev->status == 0) {
1934 		if (conn->type == ACL_LINK && conn->flush_key)
1935 			hci_remove_link_key(hdev, &conn->dst);
1936 		hci_proto_disconn_cfm(conn, ev->reason);
1937 		hci_conn_del(conn);
1938 	}
1939 
1940 unlock:
1941 	hci_dev_unlock(hdev);
1942 }
1943 
1944 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1945 {
1946 	struct hci_ev_auth_complete *ev = (void *) skb->data;
1947 	struct hci_conn *conn;
1948 
1949 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1950 
1951 	hci_dev_lock(hdev);
1952 
1953 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1954 	if (!conn)
1955 		goto unlock;
1956 
1957 	if (!ev->status) {
1958 		if (!hci_conn_ssp_enabled(conn) &&
1959 		    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1960 			BT_INFO("re-auth of legacy device is not possible.");
1961 		} else {
1962 			conn->link_mode |= HCI_LM_AUTH;
1963 			conn->sec_level = conn->pending_sec_level;
1964 		}
1965 	} else {
1966 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1967 				 ev->status);
1968 	}
1969 
1970 	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1971 	clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1972 
1973 	if (conn->state == BT_CONFIG) {
1974 		if (!ev->status && hci_conn_ssp_enabled(conn)) {
1975 			struct hci_cp_set_conn_encrypt cp;
1976 			cp.handle  = ev->handle;
1977 			cp.encrypt = 0x01;
1978 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1979 				     &cp);
1980 		} else {
1981 			conn->state = BT_CONNECTED;
1982 			hci_proto_connect_cfm(conn, ev->status);
1983 			hci_conn_put(conn);
1984 		}
1985 	} else {
1986 		hci_auth_cfm(conn, ev->status);
1987 
1988 		hci_conn_hold(conn);
1989 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1990 		hci_conn_put(conn);
1991 	}
1992 
1993 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1994 		if (!ev->status) {
1995 			struct hci_cp_set_conn_encrypt cp;
1996 			cp.handle  = ev->handle;
1997 			cp.encrypt = 0x01;
1998 			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1999 				     &cp);
2000 		} else {
2001 			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2002 			hci_encrypt_cfm(conn, ev->status, 0x00);
2003 		}
2004 	}
2005 
2006 unlock:
2007 	hci_dev_unlock(hdev);
2008 }
2009 
2010 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2011 {
2012 	struct hci_ev_remote_name *ev = (void *) skb->data;
2013 	struct hci_conn *conn;
2014 
2015 	BT_DBG("%s", hdev->name);
2016 
2017 	hci_conn_check_pending(hdev);
2018 
2019 	hci_dev_lock(hdev);
2020 
2021 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2022 
2023 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2024 		goto check_auth;
2025 
2026 	if (ev->status == 0)
2027 		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2028 				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2029 	else
2030 		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2031 
2032 check_auth:
2033 	if (!conn)
2034 		goto unlock;
2035 
2036 	if (!hci_outgoing_auth_needed(hdev, conn))
2037 		goto unlock;
2038 
2039 	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2040 		struct hci_cp_auth_requested cp;
2041 		cp.handle = __cpu_to_le16(conn->handle);
2042 		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2043 	}
2044 
2045 unlock:
2046 	hci_dev_unlock(hdev);
2047 }
2048 
2049 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2050 {
2051 	struct hci_ev_encrypt_change *ev = (void *) skb->data;
2052 	struct hci_conn *conn;
2053 
2054 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2055 
2056 	hci_dev_lock(hdev);
2057 
2058 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2059 	if (conn) {
2060 		if (!ev->status) {
2061 			if (ev->encrypt) {
2062 				/* Encryption implies authentication */
2063 				conn->link_mode |= HCI_LM_AUTH;
2064 				conn->link_mode |= HCI_LM_ENCRYPT;
2065 				conn->sec_level = conn->pending_sec_level;
2066 			} else
2067 				conn->link_mode &= ~HCI_LM_ENCRYPT;
2068 		}
2069 
2070 		clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2071 
2072 		if (ev->status && conn->state == BT_CONNECTED) {
2073 			hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2074 			hci_conn_put(conn);
2075 			goto unlock;
2076 		}
2077 
2078 		if (conn->state == BT_CONFIG) {
2079 			if (!ev->status)
2080 				conn->state = BT_CONNECTED;
2081 
2082 			hci_proto_connect_cfm(conn, ev->status);
2083 			hci_conn_put(conn);
2084 		} else
2085 			hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2086 	}
2087 
2088 unlock:
2089 	hci_dev_unlock(hdev);
2090 }
2091 
2092 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2093 					     struct sk_buff *skb)
2094 {
2095 	struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2096 	struct hci_conn *conn;
2097 
2098 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2099 
2100 	hci_dev_lock(hdev);
2101 
2102 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2103 	if (conn) {
2104 		if (!ev->status)
2105 			conn->link_mode |= HCI_LM_SECURE;
2106 
2107 		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2108 
2109 		hci_key_change_cfm(conn, ev->status);
2110 	}
2111 
2112 	hci_dev_unlock(hdev);
2113 }
2114 
2115 static void hci_remote_features_evt(struct hci_dev *hdev,
2116 				    struct sk_buff *skb)
2117 {
2118 	struct hci_ev_remote_features *ev = (void *) skb->data;
2119 	struct hci_conn *conn;
2120 
2121 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2122 
2123 	hci_dev_lock(hdev);
2124 
2125 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2126 	if (!conn)
2127 		goto unlock;
2128 
2129 	if (!ev->status)
2130 		memcpy(conn->features, ev->features, 8);
2131 
2132 	if (conn->state != BT_CONFIG)
2133 		goto unlock;
2134 
2135 	if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2136 		struct hci_cp_read_remote_ext_features cp;
2137 		cp.handle = ev->handle;
2138 		cp.page = 0x01;
2139 		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2140 			     sizeof(cp), &cp);
2141 		goto unlock;
2142 	}
2143 
2144 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2145 		struct hci_cp_remote_name_req cp;
2146 		memset(&cp, 0, sizeof(cp));
2147 		bacpy(&cp.bdaddr, &conn->dst);
2148 		cp.pscan_rep_mode = 0x02;
2149 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2150 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2151 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2152 				      conn->dst_type, 0, NULL, 0,
2153 				      conn->dev_class);
2154 
2155 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2156 		conn->state = BT_CONNECTED;
2157 		hci_proto_connect_cfm(conn, ev->status);
2158 		hci_conn_put(conn);
2159 	}
2160 
2161 unlock:
2162 	hci_dev_unlock(hdev);
2163 }
2164 
2165 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2166 {
2167 	BT_DBG("%s", hdev->name);
2168 }
2169 
2170 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2171 				       struct sk_buff *skb)
2172 {
2173 	BT_DBG("%s", hdev->name);
2174 }
2175 
2176 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2177 {
2178 	struct hci_ev_cmd_complete *ev = (void *) skb->data;
2179 	__u16 opcode;
2180 
2181 	skb_pull(skb, sizeof(*ev));
2182 
2183 	opcode = __le16_to_cpu(ev->opcode);
2184 
2185 	switch (opcode) {
2186 	case HCI_OP_INQUIRY_CANCEL:
2187 		hci_cc_inquiry_cancel(hdev, skb);
2188 		break;
2189 
2190 	case HCI_OP_PERIODIC_INQ:
2191 		hci_cc_periodic_inq(hdev, skb);
2192 		break;
2193 
2194 	case HCI_OP_EXIT_PERIODIC_INQ:
2195 		hci_cc_exit_periodic_inq(hdev, skb);
2196 		break;
2197 
2198 	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2199 		hci_cc_remote_name_req_cancel(hdev, skb);
2200 		break;
2201 
2202 	case HCI_OP_ROLE_DISCOVERY:
2203 		hci_cc_role_discovery(hdev, skb);
2204 		break;
2205 
2206 	case HCI_OP_READ_LINK_POLICY:
2207 		hci_cc_read_link_policy(hdev, skb);
2208 		break;
2209 
2210 	case HCI_OP_WRITE_LINK_POLICY:
2211 		hci_cc_write_link_policy(hdev, skb);
2212 		break;
2213 
2214 	case HCI_OP_READ_DEF_LINK_POLICY:
2215 		hci_cc_read_def_link_policy(hdev, skb);
2216 		break;
2217 
2218 	case HCI_OP_WRITE_DEF_LINK_POLICY:
2219 		hci_cc_write_def_link_policy(hdev, skb);
2220 		break;
2221 
2222 	case HCI_OP_RESET:
2223 		hci_cc_reset(hdev, skb);
2224 		break;
2225 
2226 	case HCI_OP_WRITE_LOCAL_NAME:
2227 		hci_cc_write_local_name(hdev, skb);
2228 		break;
2229 
2230 	case HCI_OP_READ_LOCAL_NAME:
2231 		hci_cc_read_local_name(hdev, skb);
2232 		break;
2233 
2234 	case HCI_OP_WRITE_AUTH_ENABLE:
2235 		hci_cc_write_auth_enable(hdev, skb);
2236 		break;
2237 
2238 	case HCI_OP_WRITE_ENCRYPT_MODE:
2239 		hci_cc_write_encrypt_mode(hdev, skb);
2240 		break;
2241 
2242 	case HCI_OP_WRITE_SCAN_ENABLE:
2243 		hci_cc_write_scan_enable(hdev, skb);
2244 		break;
2245 
2246 	case HCI_OP_READ_CLASS_OF_DEV:
2247 		hci_cc_read_class_of_dev(hdev, skb);
2248 		break;
2249 
2250 	case HCI_OP_WRITE_CLASS_OF_DEV:
2251 		hci_cc_write_class_of_dev(hdev, skb);
2252 		break;
2253 
2254 	case HCI_OP_READ_VOICE_SETTING:
2255 		hci_cc_read_voice_setting(hdev, skb);
2256 		break;
2257 
2258 	case HCI_OP_WRITE_VOICE_SETTING:
2259 		hci_cc_write_voice_setting(hdev, skb);
2260 		break;
2261 
2262 	case HCI_OP_HOST_BUFFER_SIZE:
2263 		hci_cc_host_buffer_size(hdev, skb);
2264 		break;
2265 
2266 	case HCI_OP_WRITE_SSP_MODE:
2267 		hci_cc_write_ssp_mode(hdev, skb);
2268 		break;
2269 
2270 	case HCI_OP_READ_LOCAL_VERSION:
2271 		hci_cc_read_local_version(hdev, skb);
2272 		break;
2273 
2274 	case HCI_OP_READ_LOCAL_COMMANDS:
2275 		hci_cc_read_local_commands(hdev, skb);
2276 		break;
2277 
2278 	case HCI_OP_READ_LOCAL_FEATURES:
2279 		hci_cc_read_local_features(hdev, skb);
2280 		break;
2281 
2282 	case HCI_OP_READ_LOCAL_EXT_FEATURES:
2283 		hci_cc_read_local_ext_features(hdev, skb);
2284 		break;
2285 
2286 	case HCI_OP_READ_BUFFER_SIZE:
2287 		hci_cc_read_buffer_size(hdev, skb);
2288 		break;
2289 
2290 	case HCI_OP_READ_BD_ADDR:
2291 		hci_cc_read_bd_addr(hdev, skb);
2292 		break;
2293 
2294 	case HCI_OP_READ_DATA_BLOCK_SIZE:
2295 		hci_cc_read_data_block_size(hdev, skb);
2296 		break;
2297 
2298 	case HCI_OP_WRITE_CA_TIMEOUT:
2299 		hci_cc_write_ca_timeout(hdev, skb);
2300 		break;
2301 
2302 	case HCI_OP_READ_FLOW_CONTROL_MODE:
2303 		hci_cc_read_flow_control_mode(hdev, skb);
2304 		break;
2305 
2306 	case HCI_OP_READ_LOCAL_AMP_INFO:
2307 		hci_cc_read_local_amp_info(hdev, skb);
2308 		break;
2309 
2310 	case HCI_OP_DELETE_STORED_LINK_KEY:
2311 		hci_cc_delete_stored_link_key(hdev, skb);
2312 		break;
2313 
2314 	case HCI_OP_SET_EVENT_MASK:
2315 		hci_cc_set_event_mask(hdev, skb);
2316 		break;
2317 
2318 	case HCI_OP_WRITE_INQUIRY_MODE:
2319 		hci_cc_write_inquiry_mode(hdev, skb);
2320 		break;
2321 
2322 	case HCI_OP_READ_INQ_RSP_TX_POWER:
2323 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
2324 		break;
2325 
2326 	case HCI_OP_SET_EVENT_FLT:
2327 		hci_cc_set_event_flt(hdev, skb);
2328 		break;
2329 
2330 	case HCI_OP_PIN_CODE_REPLY:
2331 		hci_cc_pin_code_reply(hdev, skb);
2332 		break;
2333 
2334 	case HCI_OP_PIN_CODE_NEG_REPLY:
2335 		hci_cc_pin_code_neg_reply(hdev, skb);
2336 		break;
2337 
2338 	case HCI_OP_READ_LOCAL_OOB_DATA:
2339 		hci_cc_read_local_oob_data_reply(hdev, skb);
2340 		break;
2341 
2342 	case HCI_OP_LE_READ_BUFFER_SIZE:
2343 		hci_cc_le_read_buffer_size(hdev, skb);
2344 		break;
2345 
2346 	case HCI_OP_USER_CONFIRM_REPLY:
2347 		hci_cc_user_confirm_reply(hdev, skb);
2348 		break;
2349 
2350 	case HCI_OP_USER_CONFIRM_NEG_REPLY:
2351 		hci_cc_user_confirm_neg_reply(hdev, skb);
2352 		break;
2353 
2354 	case HCI_OP_USER_PASSKEY_REPLY:
2355 		hci_cc_user_passkey_reply(hdev, skb);
2356 		break;
2357 
2358 	case HCI_OP_USER_PASSKEY_NEG_REPLY:
2359 		hci_cc_user_passkey_neg_reply(hdev, skb);
2360 		break;
2361 
2362 	case HCI_OP_LE_SET_SCAN_PARAM:
2363 		hci_cc_le_set_scan_param(hdev, skb);
2364 		break;
2365 
2366 	case HCI_OP_LE_SET_SCAN_ENABLE:
2367 		hci_cc_le_set_scan_enable(hdev, skb);
2368 		break;
2369 
2370 	case HCI_OP_LE_LTK_REPLY:
2371 		hci_cc_le_ltk_reply(hdev, skb);
2372 		break;
2373 
2374 	case HCI_OP_LE_LTK_NEG_REPLY:
2375 		hci_cc_le_ltk_neg_reply(hdev, skb);
2376 		break;
2377 
2378 	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2379 		hci_cc_write_le_host_supported(hdev, skb);
2380 		break;
2381 
2382 	default:
2383 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2384 		break;
2385 	}
2386 
2387 	if (ev->opcode != HCI_OP_NOP)
2388 		del_timer(&hdev->cmd_timer);
2389 
2390 	if (ev->ncmd) {
2391 		atomic_set(&hdev->cmd_cnt, 1);
2392 		if (!skb_queue_empty(&hdev->cmd_q))
2393 			queue_work(hdev->workqueue, &hdev->cmd_work);
2394 	}
2395 }
2396 
2397 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2398 {
2399 	struct hci_ev_cmd_status *ev = (void *) skb->data;
2400 	__u16 opcode;
2401 
2402 	skb_pull(skb, sizeof(*ev));
2403 
2404 	opcode = __le16_to_cpu(ev->opcode);
2405 
2406 	switch (opcode) {
2407 	case HCI_OP_INQUIRY:
2408 		hci_cs_inquiry(hdev, ev->status);
2409 		break;
2410 
2411 	case HCI_OP_CREATE_CONN:
2412 		hci_cs_create_conn(hdev, ev->status);
2413 		break;
2414 
2415 	case HCI_OP_ADD_SCO:
2416 		hci_cs_add_sco(hdev, ev->status);
2417 		break;
2418 
2419 	case HCI_OP_AUTH_REQUESTED:
2420 		hci_cs_auth_requested(hdev, ev->status);
2421 		break;
2422 
2423 	case HCI_OP_SET_CONN_ENCRYPT:
2424 		hci_cs_set_conn_encrypt(hdev, ev->status);
2425 		break;
2426 
2427 	case HCI_OP_REMOTE_NAME_REQ:
2428 		hci_cs_remote_name_req(hdev, ev->status);
2429 		break;
2430 
2431 	case HCI_OP_READ_REMOTE_FEATURES:
2432 		hci_cs_read_remote_features(hdev, ev->status);
2433 		break;
2434 
2435 	case HCI_OP_READ_REMOTE_EXT_FEATURES:
2436 		hci_cs_read_remote_ext_features(hdev, ev->status);
2437 		break;
2438 
2439 	case HCI_OP_SETUP_SYNC_CONN:
2440 		hci_cs_setup_sync_conn(hdev, ev->status);
2441 		break;
2442 
2443 	case HCI_OP_SNIFF_MODE:
2444 		hci_cs_sniff_mode(hdev, ev->status);
2445 		break;
2446 
2447 	case HCI_OP_EXIT_SNIFF_MODE:
2448 		hci_cs_exit_sniff_mode(hdev, ev->status);
2449 		break;
2450 
2451 	case HCI_OP_DISCONNECT:
2452 		hci_cs_disconnect(hdev, ev->status);
2453 		break;
2454 
2455 	case HCI_OP_LE_CREATE_CONN:
2456 		hci_cs_le_create_conn(hdev, ev->status);
2457 		break;
2458 
2459 	case HCI_OP_LE_START_ENC:
2460 		hci_cs_le_start_enc(hdev, ev->status);
2461 		break;
2462 
2463 	default:
2464 		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2465 		break;
2466 	}
2467 
2468 	if (ev->opcode != HCI_OP_NOP)
2469 		del_timer(&hdev->cmd_timer);
2470 
2471 	if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2472 		atomic_set(&hdev->cmd_cnt, 1);
2473 		if (!skb_queue_empty(&hdev->cmd_q))
2474 			queue_work(hdev->workqueue, &hdev->cmd_work);
2475 	}
2476 }
2477 
2478 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2479 {
2480 	struct hci_ev_role_change *ev = (void *) skb->data;
2481 	struct hci_conn *conn;
2482 
2483 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2484 
2485 	hci_dev_lock(hdev);
2486 
2487 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2488 	if (conn) {
2489 		if (!ev->status) {
2490 			if (ev->role)
2491 				conn->link_mode &= ~HCI_LM_MASTER;
2492 			else
2493 				conn->link_mode |= HCI_LM_MASTER;
2494 		}
2495 
2496 		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2497 
2498 		hci_role_switch_cfm(conn, ev->status, ev->role);
2499 	}
2500 
2501 	hci_dev_unlock(hdev);
2502 }
2503 
2504 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2505 {
2506 	struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2507 	int i;
2508 
2509 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2510 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2511 		return;
2512 	}
2513 
2514 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2515 	    ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2516 		BT_DBG("%s bad parameters", hdev->name);
2517 		return;
2518 	}
2519 
2520 	BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2521 
2522 	for (i = 0; i < ev->num_hndl; i++) {
2523 		struct hci_comp_pkts_info *info = &ev->handles[i];
2524 		struct hci_conn *conn;
2525 		__u16  handle, count;
2526 
2527 		handle = __le16_to_cpu(info->handle);
2528 		count  = __le16_to_cpu(info->count);
2529 
2530 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2531 		if (!conn)
2532 			continue;
2533 
2534 		conn->sent -= count;
2535 
2536 		switch (conn->type) {
2537 		case ACL_LINK:
2538 			hdev->acl_cnt += count;
2539 			if (hdev->acl_cnt > hdev->acl_pkts)
2540 				hdev->acl_cnt = hdev->acl_pkts;
2541 			break;
2542 
2543 		case LE_LINK:
2544 			if (hdev->le_pkts) {
2545 				hdev->le_cnt += count;
2546 				if (hdev->le_cnt > hdev->le_pkts)
2547 					hdev->le_cnt = hdev->le_pkts;
2548 			} else {
2549 				hdev->acl_cnt += count;
2550 				if (hdev->acl_cnt > hdev->acl_pkts)
2551 					hdev->acl_cnt = hdev->acl_pkts;
2552 			}
2553 			break;
2554 
2555 		case SCO_LINK:
2556 			hdev->sco_cnt += count;
2557 			if (hdev->sco_cnt > hdev->sco_pkts)
2558 				hdev->sco_cnt = hdev->sco_pkts;
2559 			break;
2560 
2561 		default:
2562 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2563 			break;
2564 		}
2565 	}
2566 
2567 	queue_work(hdev->workqueue, &hdev->tx_work);
2568 }
2569 
2570 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2571 {
2572 	struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2573 	int i;
2574 
2575 	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2576 		BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2577 		return;
2578 	}
2579 
2580 	if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2581 	    ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2582 		BT_DBG("%s bad parameters", hdev->name);
2583 		return;
2584 	}
2585 
2586 	BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2587 	       ev->num_hndl);
2588 
2589 	for (i = 0; i < ev->num_hndl; i++) {
2590 		struct hci_comp_blocks_info *info = &ev->handles[i];
2591 		struct hci_conn *conn;
2592 		__u16  handle, block_count;
2593 
2594 		handle = __le16_to_cpu(info->handle);
2595 		block_count = __le16_to_cpu(info->blocks);
2596 
2597 		conn = hci_conn_hash_lookup_handle(hdev, handle);
2598 		if (!conn)
2599 			continue;
2600 
2601 		conn->sent -= block_count;
2602 
2603 		switch (conn->type) {
2604 		case ACL_LINK:
2605 			hdev->block_cnt += block_count;
2606 			if (hdev->block_cnt > hdev->num_blocks)
2607 				hdev->block_cnt = hdev->num_blocks;
2608 			break;
2609 
2610 		default:
2611 			BT_ERR("Unknown type %d conn %p", conn->type, conn);
2612 			break;
2613 		}
2614 	}
2615 
2616 	queue_work(hdev->workqueue, &hdev->tx_work);
2617 }
2618 
2619 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2620 {
2621 	struct hci_ev_mode_change *ev = (void *) skb->data;
2622 	struct hci_conn *conn;
2623 
2624 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2625 
2626 	hci_dev_lock(hdev);
2627 
2628 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2629 	if (conn) {
2630 		conn->mode = ev->mode;
2631 		conn->interval = __le16_to_cpu(ev->interval);
2632 
2633 		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2634 					&conn->flags)) {
2635 			if (conn->mode == HCI_CM_ACTIVE)
2636 				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2637 			else
2638 				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2639 		}
2640 
2641 		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2642 			hci_sco_setup(conn, ev->status);
2643 	}
2644 
2645 	hci_dev_unlock(hdev);
2646 }
2647 
2648 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2649 {
2650 	struct hci_ev_pin_code_req *ev = (void *) skb->data;
2651 	struct hci_conn *conn;
2652 
2653 	BT_DBG("%s", hdev->name);
2654 
2655 	hci_dev_lock(hdev);
2656 
2657 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2658 	if (!conn)
2659 		goto unlock;
2660 
2661 	if (conn->state == BT_CONNECTED) {
2662 		hci_conn_hold(conn);
2663 		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2664 		hci_conn_put(conn);
2665 	}
2666 
2667 	if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2668 		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2669 			     sizeof(ev->bdaddr), &ev->bdaddr);
2670 	else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2671 		u8 secure;
2672 
2673 		if (conn->pending_sec_level == BT_SECURITY_HIGH)
2674 			secure = 1;
2675 		else
2676 			secure = 0;
2677 
2678 		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2679 	}
2680 
2681 unlock:
2682 	hci_dev_unlock(hdev);
2683 }
2684 
2685 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2686 {
2687 	struct hci_ev_link_key_req *ev = (void *) skb->data;
2688 	struct hci_cp_link_key_reply cp;
2689 	struct hci_conn *conn;
2690 	struct link_key *key;
2691 
2692 	BT_DBG("%s", hdev->name);
2693 
2694 	if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2695 		return;
2696 
2697 	hci_dev_lock(hdev);
2698 
2699 	key = hci_find_link_key(hdev, &ev->bdaddr);
2700 	if (!key) {
2701 		BT_DBG("%s link key not found for %s", hdev->name,
2702 		       batostr(&ev->bdaddr));
2703 		goto not_found;
2704 	}
2705 
2706 	BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2707 	       batostr(&ev->bdaddr));
2708 
2709 	if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2710 	    key->type == HCI_LK_DEBUG_COMBINATION) {
2711 		BT_DBG("%s ignoring debug key", hdev->name);
2712 		goto not_found;
2713 	}
2714 
2715 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2716 	if (conn) {
2717 		if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2718 		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2719 			BT_DBG("%s ignoring unauthenticated key", hdev->name);
2720 			goto not_found;
2721 		}
2722 
2723 		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2724 		    conn->pending_sec_level == BT_SECURITY_HIGH) {
2725 			BT_DBG("%s ignoring key unauthenticated for high security",
2726 			       hdev->name);
2727 			goto not_found;
2728 		}
2729 
2730 		conn->key_type = key->type;
2731 		conn->pin_length = key->pin_len;
2732 	}
2733 
2734 	bacpy(&cp.bdaddr, &ev->bdaddr);
2735 	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2736 
2737 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2738 
2739 	hci_dev_unlock(hdev);
2740 
2741 	return;
2742 
2743 not_found:
2744 	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2745 	hci_dev_unlock(hdev);
2746 }
2747 
2748 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2749 {
2750 	struct hci_ev_link_key_notify *ev = (void *) skb->data;
2751 	struct hci_conn *conn;
2752 	u8 pin_len = 0;
2753 
2754 	BT_DBG("%s", hdev->name);
2755 
2756 	hci_dev_lock(hdev);
2757 
2758 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2759 	if (conn) {
2760 		hci_conn_hold(conn);
2761 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2762 		pin_len = conn->pin_length;
2763 
2764 		if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2765 			conn->key_type = ev->key_type;
2766 
2767 		hci_conn_put(conn);
2768 	}
2769 
2770 	if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2771 		hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2772 				 ev->key_type, pin_len);
2773 
2774 	hci_dev_unlock(hdev);
2775 }
2776 
2777 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2778 {
2779 	struct hci_ev_clock_offset *ev = (void *) skb->data;
2780 	struct hci_conn *conn;
2781 
2782 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2783 
2784 	hci_dev_lock(hdev);
2785 
2786 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2787 	if (conn && !ev->status) {
2788 		struct inquiry_entry *ie;
2789 
2790 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2791 		if (ie) {
2792 			ie->data.clock_offset = ev->clock_offset;
2793 			ie->timestamp = jiffies;
2794 		}
2795 	}
2796 
2797 	hci_dev_unlock(hdev);
2798 }
2799 
2800 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2801 {
2802 	struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2803 	struct hci_conn *conn;
2804 
2805 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2806 
2807 	hci_dev_lock(hdev);
2808 
2809 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2810 	if (conn && !ev->status)
2811 		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2812 
2813 	hci_dev_unlock(hdev);
2814 }
2815 
2816 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2817 {
2818 	struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2819 	struct inquiry_entry *ie;
2820 
2821 	BT_DBG("%s", hdev->name);
2822 
2823 	hci_dev_lock(hdev);
2824 
2825 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2826 	if (ie) {
2827 		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2828 		ie->timestamp = jiffies;
2829 	}
2830 
2831 	hci_dev_unlock(hdev);
2832 }
2833 
2834 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2835 					     struct sk_buff *skb)
2836 {
2837 	struct inquiry_data data;
2838 	int num_rsp = *((__u8 *) skb->data);
2839 	bool name_known, ssp;
2840 
2841 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2842 
2843 	if (!num_rsp)
2844 		return;
2845 
2846 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2847 		return;
2848 
2849 	hci_dev_lock(hdev);
2850 
2851 	if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2852 		struct inquiry_info_with_rssi_and_pscan_mode *info;
2853 		info = (void *) (skb->data + 1);
2854 
2855 		for (; num_rsp; num_rsp--, info++) {
2856 			bacpy(&data.bdaddr, &info->bdaddr);
2857 			data.pscan_rep_mode	= info->pscan_rep_mode;
2858 			data.pscan_period_mode	= info->pscan_period_mode;
2859 			data.pscan_mode		= info->pscan_mode;
2860 			memcpy(data.dev_class, info->dev_class, 3);
2861 			data.clock_offset	= info->clock_offset;
2862 			data.rssi		= info->rssi;
2863 			data.ssp_mode		= 0x00;
2864 
2865 			name_known = hci_inquiry_cache_update(hdev, &data,
2866 							      false, &ssp);
2867 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2868 					  info->dev_class, info->rssi,
2869 					  !name_known, ssp, NULL, 0);
2870 		}
2871 	} else {
2872 		struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2873 
2874 		for (; num_rsp; num_rsp--, info++) {
2875 			bacpy(&data.bdaddr, &info->bdaddr);
2876 			data.pscan_rep_mode	= info->pscan_rep_mode;
2877 			data.pscan_period_mode	= info->pscan_period_mode;
2878 			data.pscan_mode		= 0x00;
2879 			memcpy(data.dev_class, info->dev_class, 3);
2880 			data.clock_offset	= info->clock_offset;
2881 			data.rssi		= info->rssi;
2882 			data.ssp_mode		= 0x00;
2883 			name_known = hci_inquiry_cache_update(hdev, &data,
2884 							      false, &ssp);
2885 			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2886 					  info->dev_class, info->rssi,
2887 					  !name_known, ssp, NULL, 0);
2888 		}
2889 	}
2890 
2891 	hci_dev_unlock(hdev);
2892 }
2893 
2894 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2895 					struct sk_buff *skb)
2896 {
2897 	struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2898 	struct hci_conn *conn;
2899 
2900 	BT_DBG("%s", hdev->name);
2901 
2902 	hci_dev_lock(hdev);
2903 
2904 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2905 	if (!conn)
2906 		goto unlock;
2907 
2908 	if (!ev->status && ev->page == 0x01) {
2909 		struct inquiry_entry *ie;
2910 
2911 		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2912 		if (ie)
2913 			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2914 
2915 		if (ev->features[0] & LMP_HOST_SSP)
2916 			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2917 	}
2918 
2919 	if (conn->state != BT_CONFIG)
2920 		goto unlock;
2921 
2922 	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2923 		struct hci_cp_remote_name_req cp;
2924 		memset(&cp, 0, sizeof(cp));
2925 		bacpy(&cp.bdaddr, &conn->dst);
2926 		cp.pscan_rep_mode = 0x02;
2927 		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2928 	} else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2929 		mgmt_device_connected(hdev, &conn->dst, conn->type,
2930 				      conn->dst_type, 0, NULL, 0,
2931 				      conn->dev_class);
2932 
2933 	if (!hci_outgoing_auth_needed(hdev, conn)) {
2934 		conn->state = BT_CONNECTED;
2935 		hci_proto_connect_cfm(conn, ev->status);
2936 		hci_conn_put(conn);
2937 	}
2938 
2939 unlock:
2940 	hci_dev_unlock(hdev);
2941 }
2942 
2943 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2944 				       struct sk_buff *skb)
2945 {
2946 	struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2947 	struct hci_conn *conn;
2948 
2949 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2950 
2951 	hci_dev_lock(hdev);
2952 
2953 	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2954 	if (!conn) {
2955 		if (ev->link_type == ESCO_LINK)
2956 			goto unlock;
2957 
2958 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2959 		if (!conn)
2960 			goto unlock;
2961 
2962 		conn->type = SCO_LINK;
2963 	}
2964 
2965 	switch (ev->status) {
2966 	case 0x00:
2967 		conn->handle = __le16_to_cpu(ev->handle);
2968 		conn->state  = BT_CONNECTED;
2969 
2970 		hci_conn_hold_device(conn);
2971 		hci_conn_add_sysfs(conn);
2972 		break;
2973 
2974 	case 0x11:	/* Unsupported Feature or Parameter Value */
2975 	case 0x1c:	/* SCO interval rejected */
2976 	case 0x1a:	/* Unsupported Remote Feature */
2977 	case 0x1f:	/* Unspecified error */
2978 		if (conn->out && conn->attempt < 2) {
2979 			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2980 					(hdev->esco_type & EDR_ESCO_MASK);
2981 			hci_setup_sync(conn, conn->link->handle);
2982 			goto unlock;
2983 		}
2984 		/* fall through */
2985 
2986 	default:
2987 		conn->state = BT_CLOSED;
2988 		break;
2989 	}
2990 
2991 	hci_proto_connect_cfm(conn, ev->status);
2992 	if (ev->status)
2993 		hci_conn_del(conn);
2994 
2995 unlock:
2996 	hci_dev_unlock(hdev);
2997 }
2998 
2999 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3000 {
3001 	BT_DBG("%s", hdev->name);
3002 }
3003 
3004 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3005 {
3006 	struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3007 
3008 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3009 }
3010 
3011 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3012 					    struct sk_buff *skb)
3013 {
3014 	struct inquiry_data data;
3015 	struct extended_inquiry_info *info = (void *) (skb->data + 1);
3016 	int num_rsp = *((__u8 *) skb->data);
3017 	size_t eir_len;
3018 
3019 	BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3020 
3021 	if (!num_rsp)
3022 		return;
3023 
3024 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3025 		return;
3026 
3027 	hci_dev_lock(hdev);
3028 
3029 	for (; num_rsp; num_rsp--, info++) {
3030 		bool name_known, ssp;
3031 
3032 		bacpy(&data.bdaddr, &info->bdaddr);
3033 		data.pscan_rep_mode	= info->pscan_rep_mode;
3034 		data.pscan_period_mode	= info->pscan_period_mode;
3035 		data.pscan_mode		= 0x00;
3036 		memcpy(data.dev_class, info->dev_class, 3);
3037 		data.clock_offset	= info->clock_offset;
3038 		data.rssi		= info->rssi;
3039 		data.ssp_mode		= 0x01;
3040 
3041 		if (test_bit(HCI_MGMT, &hdev->dev_flags))
3042 			name_known = eir_has_data_type(info->data,
3043 						       sizeof(info->data),
3044 						       EIR_NAME_COMPLETE);
3045 		else
3046 			name_known = true;
3047 
3048 		name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3049 						      &ssp);
3050 		eir_len = eir_get_length(info->data, sizeof(info->data));
3051 		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3052 				  info->dev_class, info->rssi, !name_known,
3053 				  ssp, info->data, eir_len);
3054 	}
3055 
3056 	hci_dev_unlock(hdev);
3057 }
3058 
3059 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3060 					 struct sk_buff *skb)
3061 {
3062 	struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3063 	struct hci_conn *conn;
3064 
3065 	BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3066 	       __le16_to_cpu(ev->handle));
3067 
3068 	hci_dev_lock(hdev);
3069 
3070 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3071 	if (!conn)
3072 		goto unlock;
3073 
3074 	if (!ev->status)
3075 		conn->sec_level = conn->pending_sec_level;
3076 
3077 	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3078 
3079 	if (ev->status && conn->state == BT_CONNECTED) {
3080 		hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3081 		hci_conn_put(conn);
3082 		goto unlock;
3083 	}
3084 
3085 	if (conn->state == BT_CONFIG) {
3086 		if (!ev->status)
3087 			conn->state = BT_CONNECTED;
3088 
3089 		hci_proto_connect_cfm(conn, ev->status);
3090 		hci_conn_put(conn);
3091 	} else {
3092 		hci_auth_cfm(conn, ev->status);
3093 
3094 		hci_conn_hold(conn);
3095 		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3096 		hci_conn_put(conn);
3097 	}
3098 
3099 unlock:
3100 	hci_dev_unlock(hdev);
3101 }
3102 
3103 static u8 hci_get_auth_req(struct hci_conn *conn)
3104 {
3105 	/* If remote requests dedicated bonding follow that lead */
3106 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3107 		/* If both remote and local IO capabilities allow MITM
3108 		 * protection then require it, otherwise don't */
3109 		if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3110 			return 0x02;
3111 		else
3112 			return 0x03;
3113 	}
3114 
3115 	/* If remote requests no-bonding follow that lead */
3116 	if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3117 		return conn->remote_auth | (conn->auth_type & 0x01);
3118 
3119 	return conn->auth_type;
3120 }
3121 
3122 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3123 {
3124 	struct hci_ev_io_capa_request *ev = (void *) skb->data;
3125 	struct hci_conn *conn;
3126 
3127 	BT_DBG("%s", hdev->name);
3128 
3129 	hci_dev_lock(hdev);
3130 
3131 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3132 	if (!conn)
3133 		goto unlock;
3134 
3135 	hci_conn_hold(conn);
3136 
3137 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3138 		goto unlock;
3139 
3140 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3141 	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3142 		struct hci_cp_io_capability_reply cp;
3143 
3144 		bacpy(&cp.bdaddr, &ev->bdaddr);
3145 		/* Change the IO capability from KeyboardDisplay
3146 		 * to DisplayYesNo as it is not supported by BT spec. */
3147 		cp.capability = (conn->io_capability == 0x04) ?
3148 						0x01 : conn->io_capability;
3149 		conn->auth_type = hci_get_auth_req(conn);
3150 		cp.authentication = conn->auth_type;
3151 
3152 		if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3153 		    (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3154 			cp.oob_data = 0x01;
3155 		else
3156 			cp.oob_data = 0x00;
3157 
3158 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3159 			     sizeof(cp), &cp);
3160 	} else {
3161 		struct hci_cp_io_capability_neg_reply cp;
3162 
3163 		bacpy(&cp.bdaddr, &ev->bdaddr);
3164 		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3165 
3166 		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3167 			     sizeof(cp), &cp);
3168 	}
3169 
3170 unlock:
3171 	hci_dev_unlock(hdev);
3172 }
3173 
3174 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3175 {
3176 	struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3177 	struct hci_conn *conn;
3178 
3179 	BT_DBG("%s", hdev->name);
3180 
3181 	hci_dev_lock(hdev);
3182 
3183 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3184 	if (!conn)
3185 		goto unlock;
3186 
3187 	conn->remote_cap = ev->capability;
3188 	conn->remote_auth = ev->authentication;
3189 	if (ev->oob_data)
3190 		set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3191 
3192 unlock:
3193 	hci_dev_unlock(hdev);
3194 }
3195 
3196 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3197 					 struct sk_buff *skb)
3198 {
3199 	struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3200 	int loc_mitm, rem_mitm, confirm_hint = 0;
3201 	struct hci_conn *conn;
3202 
3203 	BT_DBG("%s", hdev->name);
3204 
3205 	hci_dev_lock(hdev);
3206 
3207 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3208 		goto unlock;
3209 
3210 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3211 	if (!conn)
3212 		goto unlock;
3213 
3214 	loc_mitm = (conn->auth_type & 0x01);
3215 	rem_mitm = (conn->remote_auth & 0x01);
3216 
3217 	/* If we require MITM but the remote device can't provide that
3218 	 * (it has NoInputNoOutput) then reject the confirmation
3219 	 * request. The only exception is when we're dedicated bonding
3220 	 * initiators (connect_cfm_cb set) since then we always have the MITM
3221 	 * bit set. */
3222 	if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3223 		BT_DBG("Rejecting request: remote device can't provide MITM");
3224 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3225 			     sizeof(ev->bdaddr), &ev->bdaddr);
3226 		goto unlock;
3227 	}
3228 
3229 	/* If no side requires MITM protection; auto-accept */
3230 	if ((!loc_mitm || conn->remote_cap == 0x03) &&
3231 	    (!rem_mitm || conn->io_capability == 0x03)) {
3232 
3233 		/* If we're not the initiators request authorization to
3234 		 * proceed from user space (mgmt_user_confirm with
3235 		 * confirm_hint set to 1). */
3236 		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3237 			BT_DBG("Confirming auto-accept as acceptor");
3238 			confirm_hint = 1;
3239 			goto confirm;
3240 		}
3241 
3242 		BT_DBG("Auto-accept of user confirmation with %ums delay",
3243 		       hdev->auto_accept_delay);
3244 
3245 		if (hdev->auto_accept_delay > 0) {
3246 			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3247 			mod_timer(&conn->auto_accept_timer, jiffies + delay);
3248 			goto unlock;
3249 		}
3250 
3251 		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3252 			     sizeof(ev->bdaddr), &ev->bdaddr);
3253 		goto unlock;
3254 	}
3255 
3256 confirm:
3257 	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3258 				  confirm_hint);
3259 
3260 unlock:
3261 	hci_dev_unlock(hdev);
3262 }
3263 
3264 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3265 					 struct sk_buff *skb)
3266 {
3267 	struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3268 
3269 	BT_DBG("%s", hdev->name);
3270 
3271 	hci_dev_lock(hdev);
3272 
3273 	if (test_bit(HCI_MGMT, &hdev->dev_flags))
3274 		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3275 
3276 	hci_dev_unlock(hdev);
3277 }
3278 
3279 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3280 					 struct sk_buff *skb)
3281 {
3282 	struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3283 	struct hci_conn *conn;
3284 
3285 	BT_DBG("%s", hdev->name);
3286 
3287 	hci_dev_lock(hdev);
3288 
3289 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3290 	if (!conn)
3291 		goto unlock;
3292 
3293 	/* To avoid duplicate auth_failed events to user space we check
3294 	 * the HCI_CONN_AUTH_PEND flag which will be set if we
3295 	 * initiated the authentication. A traditional auth_complete
3296 	 * event gets always produced as initiator and is also mapped to
3297 	 * the mgmt_auth_failed event */
3298 	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3299 		mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3300 				 ev->status);
3301 
3302 	hci_conn_put(conn);
3303 
3304 unlock:
3305 	hci_dev_unlock(hdev);
3306 }
3307 
3308 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3309 					 struct sk_buff *skb)
3310 {
3311 	struct hci_ev_remote_host_features *ev = (void *) skb->data;
3312 	struct inquiry_entry *ie;
3313 
3314 	BT_DBG("%s", hdev->name);
3315 
3316 	hci_dev_lock(hdev);
3317 
3318 	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3319 	if (ie)
3320 		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3321 
3322 	hci_dev_unlock(hdev);
3323 }
3324 
3325 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3326 					    struct sk_buff *skb)
3327 {
3328 	struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3329 	struct oob_data *data;
3330 
3331 	BT_DBG("%s", hdev->name);
3332 
3333 	hci_dev_lock(hdev);
3334 
3335 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3336 		goto unlock;
3337 
3338 	data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3339 	if (data) {
3340 		struct hci_cp_remote_oob_data_reply cp;
3341 
3342 		bacpy(&cp.bdaddr, &ev->bdaddr);
3343 		memcpy(cp.hash, data->hash, sizeof(cp.hash));
3344 		memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3345 
3346 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3347 			     &cp);
3348 	} else {
3349 		struct hci_cp_remote_oob_data_neg_reply cp;
3350 
3351 		bacpy(&cp.bdaddr, &ev->bdaddr);
3352 		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3353 			     &cp);
3354 	}
3355 
3356 unlock:
3357 	hci_dev_unlock(hdev);
3358 }
3359 
3360 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3361 {
3362 	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3363 	struct hci_conn *conn;
3364 
3365 	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3366 
3367 	hci_dev_lock(hdev);
3368 
3369 	if (ev->status) {
3370 		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3371 		if (!conn)
3372 			goto unlock;
3373 
3374 		mgmt_connect_failed(hdev, &conn->dst, conn->type,
3375 				    conn->dst_type, ev->status);
3376 		hci_proto_connect_cfm(conn, ev->status);
3377 		conn->state = BT_CLOSED;
3378 		hci_conn_del(conn);
3379 		goto unlock;
3380 	}
3381 
3382 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3383 	if (!conn) {
3384 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3385 		if (!conn) {
3386 			BT_ERR("No memory for new connection");
3387 			hci_dev_unlock(hdev);
3388 			return;
3389 		}
3390 
3391 		conn->dst_type = ev->bdaddr_type;
3392 	}
3393 
3394 	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3395 		mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3396 				      conn->dst_type, 0, NULL, 0, NULL);
3397 
3398 	conn->sec_level = BT_SECURITY_LOW;
3399 	conn->handle = __le16_to_cpu(ev->handle);
3400 	conn->state = BT_CONNECTED;
3401 
3402 	hci_conn_hold_device(conn);
3403 	hci_conn_add_sysfs(conn);
3404 
3405 	hci_proto_connect_cfm(conn, ev->status);
3406 
3407 unlock:
3408 	hci_dev_unlock(hdev);
3409 }
3410 
3411 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3412 {
3413 	u8 num_reports = skb->data[0];
3414 	void *ptr = &skb->data[1];
3415 	s8 rssi;
3416 
3417 	hci_dev_lock(hdev);
3418 
3419 	while (num_reports--) {
3420 		struct hci_ev_le_advertising_info *ev = ptr;
3421 
3422 		rssi = ev->data[ev->length];
3423 		mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3424 				  NULL, rssi, 0, 1, ev->data, ev->length);
3425 
3426 		ptr += sizeof(*ev) + ev->length + 1;
3427 	}
3428 
3429 	hci_dev_unlock(hdev);
3430 }
3431 
3432 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3433 {
3434 	struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3435 	struct hci_cp_le_ltk_reply cp;
3436 	struct hci_cp_le_ltk_neg_reply neg;
3437 	struct hci_conn *conn;
3438 	struct smp_ltk *ltk;
3439 
3440 	BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3441 
3442 	hci_dev_lock(hdev);
3443 
3444 	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3445 	if (conn == NULL)
3446 		goto not_found;
3447 
3448 	ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3449 	if (ltk == NULL)
3450 		goto not_found;
3451 
3452 	memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3453 	cp.handle = cpu_to_le16(conn->handle);
3454 
3455 	if (ltk->authenticated)
3456 		conn->sec_level = BT_SECURITY_HIGH;
3457 
3458 	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3459 
3460 	if (ltk->type & HCI_SMP_STK) {
3461 		list_del(&ltk->list);
3462 		kfree(ltk);
3463 	}
3464 
3465 	hci_dev_unlock(hdev);
3466 
3467 	return;
3468 
3469 not_found:
3470 	neg.handle = ev->handle;
3471 	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3472 	hci_dev_unlock(hdev);
3473 }
3474 
3475 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3476 {
3477 	struct hci_ev_le_meta *le_ev = (void *) skb->data;
3478 
3479 	skb_pull(skb, sizeof(*le_ev));
3480 
3481 	switch (le_ev->subevent) {
3482 	case HCI_EV_LE_CONN_COMPLETE:
3483 		hci_le_conn_complete_evt(hdev, skb);
3484 		break;
3485 
3486 	case HCI_EV_LE_ADVERTISING_REPORT:
3487 		hci_le_adv_report_evt(hdev, skb);
3488 		break;
3489 
3490 	case HCI_EV_LE_LTK_REQ:
3491 		hci_le_ltk_request_evt(hdev, skb);
3492 		break;
3493 
3494 	default:
3495 		break;
3496 	}
3497 }
3498 
3499 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3500 {
3501 	struct hci_event_hdr *hdr = (void *) skb->data;
3502 	__u8 event = hdr->evt;
3503 
3504 	skb_pull(skb, HCI_EVENT_HDR_SIZE);
3505 
3506 	switch (event) {
3507 	case HCI_EV_INQUIRY_COMPLETE:
3508 		hci_inquiry_complete_evt(hdev, skb);
3509 		break;
3510 
3511 	case HCI_EV_INQUIRY_RESULT:
3512 		hci_inquiry_result_evt(hdev, skb);
3513 		break;
3514 
3515 	case HCI_EV_CONN_COMPLETE:
3516 		hci_conn_complete_evt(hdev, skb);
3517 		break;
3518 
3519 	case HCI_EV_CONN_REQUEST:
3520 		hci_conn_request_evt(hdev, skb);
3521 		break;
3522 
3523 	case HCI_EV_DISCONN_COMPLETE:
3524 		hci_disconn_complete_evt(hdev, skb);
3525 		break;
3526 
3527 	case HCI_EV_AUTH_COMPLETE:
3528 		hci_auth_complete_evt(hdev, skb);
3529 		break;
3530 
3531 	case HCI_EV_REMOTE_NAME:
3532 		hci_remote_name_evt(hdev, skb);
3533 		break;
3534 
3535 	case HCI_EV_ENCRYPT_CHANGE:
3536 		hci_encrypt_change_evt(hdev, skb);
3537 		break;
3538 
3539 	case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3540 		hci_change_link_key_complete_evt(hdev, skb);
3541 		break;
3542 
3543 	case HCI_EV_REMOTE_FEATURES:
3544 		hci_remote_features_evt(hdev, skb);
3545 		break;
3546 
3547 	case HCI_EV_REMOTE_VERSION:
3548 		hci_remote_version_evt(hdev, skb);
3549 		break;
3550 
3551 	case HCI_EV_QOS_SETUP_COMPLETE:
3552 		hci_qos_setup_complete_evt(hdev, skb);
3553 		break;
3554 
3555 	case HCI_EV_CMD_COMPLETE:
3556 		hci_cmd_complete_evt(hdev, skb);
3557 		break;
3558 
3559 	case HCI_EV_CMD_STATUS:
3560 		hci_cmd_status_evt(hdev, skb);
3561 		break;
3562 
3563 	case HCI_EV_ROLE_CHANGE:
3564 		hci_role_change_evt(hdev, skb);
3565 		break;
3566 
3567 	case HCI_EV_NUM_COMP_PKTS:
3568 		hci_num_comp_pkts_evt(hdev, skb);
3569 		break;
3570 
3571 	case HCI_EV_MODE_CHANGE:
3572 		hci_mode_change_evt(hdev, skb);
3573 		break;
3574 
3575 	case HCI_EV_PIN_CODE_REQ:
3576 		hci_pin_code_request_evt(hdev, skb);
3577 		break;
3578 
3579 	case HCI_EV_LINK_KEY_REQ:
3580 		hci_link_key_request_evt(hdev, skb);
3581 		break;
3582 
3583 	case HCI_EV_LINK_KEY_NOTIFY:
3584 		hci_link_key_notify_evt(hdev, skb);
3585 		break;
3586 
3587 	case HCI_EV_CLOCK_OFFSET:
3588 		hci_clock_offset_evt(hdev, skb);
3589 		break;
3590 
3591 	case HCI_EV_PKT_TYPE_CHANGE:
3592 		hci_pkt_type_change_evt(hdev, skb);
3593 		break;
3594 
3595 	case HCI_EV_PSCAN_REP_MODE:
3596 		hci_pscan_rep_mode_evt(hdev, skb);
3597 		break;
3598 
3599 	case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3600 		hci_inquiry_result_with_rssi_evt(hdev, skb);
3601 		break;
3602 
3603 	case HCI_EV_REMOTE_EXT_FEATURES:
3604 		hci_remote_ext_features_evt(hdev, skb);
3605 		break;
3606 
3607 	case HCI_EV_SYNC_CONN_COMPLETE:
3608 		hci_sync_conn_complete_evt(hdev, skb);
3609 		break;
3610 
3611 	case HCI_EV_SYNC_CONN_CHANGED:
3612 		hci_sync_conn_changed_evt(hdev, skb);
3613 		break;
3614 
3615 	case HCI_EV_SNIFF_SUBRATE:
3616 		hci_sniff_subrate_evt(hdev, skb);
3617 		break;
3618 
3619 	case HCI_EV_EXTENDED_INQUIRY_RESULT:
3620 		hci_extended_inquiry_result_evt(hdev, skb);
3621 		break;
3622 
3623 	case HCI_EV_KEY_REFRESH_COMPLETE:
3624 		hci_key_refresh_complete_evt(hdev, skb);
3625 		break;
3626 
3627 	case HCI_EV_IO_CAPA_REQUEST:
3628 		hci_io_capa_request_evt(hdev, skb);
3629 		break;
3630 
3631 	case HCI_EV_IO_CAPA_REPLY:
3632 		hci_io_capa_reply_evt(hdev, skb);
3633 		break;
3634 
3635 	case HCI_EV_USER_CONFIRM_REQUEST:
3636 		hci_user_confirm_request_evt(hdev, skb);
3637 		break;
3638 
3639 	case HCI_EV_USER_PASSKEY_REQUEST:
3640 		hci_user_passkey_request_evt(hdev, skb);
3641 		break;
3642 
3643 	case HCI_EV_SIMPLE_PAIR_COMPLETE:
3644 		hci_simple_pair_complete_evt(hdev, skb);
3645 		break;
3646 
3647 	case HCI_EV_REMOTE_HOST_FEATURES:
3648 		hci_remote_host_features_evt(hdev, skb);
3649 		break;
3650 
3651 	case HCI_EV_LE_META:
3652 		hci_le_meta_evt(hdev, skb);
3653 		break;
3654 
3655 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3656 		hci_remote_oob_data_request_evt(hdev, skb);
3657 		break;
3658 
3659 	case HCI_EV_NUM_COMP_BLOCKS:
3660 		hci_num_comp_blocks_evt(hdev, skb);
3661 		break;
3662 
3663 	default:
3664 		BT_DBG("%s event 0x%2.2x", hdev->name, event);
3665 		break;
3666 	}
3667 
3668 	kfree_skb(skb);
3669 	hdev->stat.evt_rx++;
3670 }
3671