xref: /openbmc/linux/net/bluetooth/hci_core.c (revision a080a92a)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <linux/property.h>
34 #include <asm/unaligned.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/mgmt.h>
40 
41 #include "hci_request.h"
42 #include "hci_debugfs.h"
43 #include "smp.h"
44 #include "leds.h"
45 
46 static void hci_rx_work(struct work_struct *work);
47 static void hci_cmd_work(struct work_struct *work);
48 static void hci_tx_work(struct work_struct *work);
49 
50 /* HCI device list */
51 LIST_HEAD(hci_dev_list);
52 DEFINE_RWLOCK(hci_dev_list_lock);
53 
54 /* HCI callback list */
55 LIST_HEAD(hci_cb_list);
56 DEFINE_MUTEX(hci_cb_list_lock);
57 
58 /* HCI ID Numbering */
59 static DEFINE_IDA(hci_index_ida);
60 
61 /* ---- HCI debugfs entries ---- */
62 
63 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
64 			     size_t count, loff_t *ppos)
65 {
66 	struct hci_dev *hdev = file->private_data;
67 	char buf[3];
68 
69 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
70 	buf[1] = '\n';
71 	buf[2] = '\0';
72 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 }
74 
75 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
76 			      size_t count, loff_t *ppos)
77 {
78 	struct hci_dev *hdev = file->private_data;
79 	struct sk_buff *skb;
80 	bool enable;
81 	int err;
82 
83 	if (!test_bit(HCI_UP, &hdev->flags))
84 		return -ENETDOWN;
85 
86 	err = kstrtobool_from_user(user_buf, count, &enable);
87 	if (err)
88 		return err;
89 
90 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
91 		return -EALREADY;
92 
93 	hci_req_sync_lock(hdev);
94 	if (enable)
95 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
96 				     HCI_CMD_TIMEOUT);
97 	else
98 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99 				     HCI_CMD_TIMEOUT);
100 	hci_req_sync_unlock(hdev);
101 
102 	if (IS_ERR(skb))
103 		return PTR_ERR(skb);
104 
105 	kfree_skb(skb);
106 
107 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
108 
109 	return count;
110 }
111 
112 static const struct file_operations dut_mode_fops = {
113 	.open		= simple_open,
114 	.read		= dut_mode_read,
115 	.write		= dut_mode_write,
116 	.llseek		= default_llseek,
117 };
118 
119 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
120 				size_t count, loff_t *ppos)
121 {
122 	struct hci_dev *hdev = file->private_data;
123 	char buf[3];
124 
125 	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
126 	buf[1] = '\n';
127 	buf[2] = '\0';
128 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
129 }
130 
131 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
132 				 size_t count, loff_t *ppos)
133 {
134 	struct hci_dev *hdev = file->private_data;
135 	bool enable;
136 	int err;
137 
138 	err = kstrtobool_from_user(user_buf, count, &enable);
139 	if (err)
140 		return err;
141 
142 	/* When the diagnostic flags are not persistent and the transport
143 	 * is not active or in user channel operation, then there is no need
144 	 * for the vendor callback. Instead just store the desired value and
145 	 * the setting will be programmed when the controller gets powered on.
146 	 */
147 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
148 	    (!test_bit(HCI_RUNNING, &hdev->flags) ||
149 	     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
150 		goto done;
151 
152 	hci_req_sync_lock(hdev);
153 	err = hdev->set_diag(hdev, enable);
154 	hci_req_sync_unlock(hdev);
155 
156 	if (err < 0)
157 		return err;
158 
159 done:
160 	if (enable)
161 		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162 	else
163 		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
164 
165 	return count;
166 }
167 
168 static const struct file_operations vendor_diag_fops = {
169 	.open		= simple_open,
170 	.read		= vendor_diag_read,
171 	.write		= vendor_diag_write,
172 	.llseek		= default_llseek,
173 };
174 
175 static void hci_debugfs_create_basic(struct hci_dev *hdev)
176 {
177 	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
178 			    &dut_mode_fops);
179 
180 	if (hdev->set_diag)
181 		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
182 				    &vendor_diag_fops);
183 }
184 
185 static int hci_reset_req(struct hci_request *req, unsigned long opt)
186 {
187 	BT_DBG("%s %ld", req->hdev->name, opt);
188 
189 	/* Reset device */
190 	set_bit(HCI_RESET, &req->hdev->flags);
191 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
192 	return 0;
193 }
194 
195 static void bredr_init(struct hci_request *req)
196 {
197 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 
199 	/* Read Local Supported Features */
200 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201 
202 	/* Read Local Version */
203 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204 
205 	/* Read BD Address */
206 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
207 }
208 
209 static void amp_init1(struct hci_request *req)
210 {
211 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212 
213 	/* Read Local Version */
214 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 
216 	/* Read Local Supported Commands */
217 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218 
219 	/* Read Local AMP Info */
220 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221 
222 	/* Read Data Blk size */
223 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224 
225 	/* Read Flow Control Mode */
226 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227 
228 	/* Read Location Data */
229 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
230 }
231 
232 static int amp_init2(struct hci_request *req)
233 {
234 	/* Read Local Supported Features. Not all AMP controllers
235 	 * support this so it's placed conditionally in the second
236 	 * stage init.
237 	 */
238 	if (req->hdev->commands[14] & 0x20)
239 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
240 
241 	return 0;
242 }
243 
244 static int hci_init1_req(struct hci_request *req, unsigned long opt)
245 {
246 	struct hci_dev *hdev = req->hdev;
247 
248 	BT_DBG("%s %ld", hdev->name, opt);
249 
250 	/* Reset */
251 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
252 		hci_reset_req(req, 0);
253 
254 	switch (hdev->dev_type) {
255 	case HCI_PRIMARY:
256 		bredr_init(req);
257 		break;
258 	case HCI_AMP:
259 		amp_init1(req);
260 		break;
261 	default:
262 		bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
263 		break;
264 	}
265 
266 	return 0;
267 }
268 
269 static void bredr_setup(struct hci_request *req)
270 {
271 	__le16 param;
272 	__u8 flt_type;
273 
274 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
275 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276 
277 	/* Read Class of Device */
278 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279 
280 	/* Read Local Name */
281 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282 
283 	/* Read Voice Setting */
284 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285 
286 	/* Read Number of Supported IAC */
287 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288 
289 	/* Read Current IAC LAP */
290 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291 
292 	/* Clear Event Filters */
293 	flt_type = HCI_FLT_CLEAR_ALL;
294 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295 
296 	/* Connection accept timeout ~20 secs */
297 	param = cpu_to_le16(0x7d00);
298 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
299 }
300 
301 static void le_setup(struct hci_request *req)
302 {
303 	struct hci_dev *hdev = req->hdev;
304 
305 	/* Read LE Buffer Size */
306 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307 
308 	/* Read LE Local Supported Features */
309 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310 
311 	/* Read LE Supported States */
312 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313 
314 	/* LE-only controllers have LE implicitly enabled */
315 	if (!lmp_bredr_capable(hdev))
316 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
317 }
318 
319 static void hci_setup_event_mask(struct hci_request *req)
320 {
321 	struct hci_dev *hdev = req->hdev;
322 
323 	/* The second byte is 0xff instead of 0x9f (two reserved bits
324 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
325 	 * command otherwise.
326 	 */
327 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328 
329 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
330 	 * any event mask for pre 1.2 devices.
331 	 */
332 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
333 		return;
334 
335 	if (lmp_bredr_capable(hdev)) {
336 		events[4] |= 0x01; /* Flow Specification Complete */
337 	} else {
338 		/* Use a different default for LE-only devices */
339 		memset(events, 0, sizeof(events));
340 		events[1] |= 0x20; /* Command Complete */
341 		events[1] |= 0x40; /* Command Status */
342 		events[1] |= 0x80; /* Hardware Error */
343 
344 		/* If the controller supports the Disconnect command, enable
345 		 * the corresponding event. In addition enable packet flow
346 		 * control related events.
347 		 */
348 		if (hdev->commands[0] & 0x20) {
349 			events[0] |= 0x10; /* Disconnection Complete */
350 			events[2] |= 0x04; /* Number of Completed Packets */
351 			events[3] |= 0x02; /* Data Buffer Overflow */
352 		}
353 
354 		/* If the controller supports the Read Remote Version
355 		 * Information command, enable the corresponding event.
356 		 */
357 		if (hdev->commands[2] & 0x80)
358 			events[1] |= 0x08; /* Read Remote Version Information
359 					    * Complete
360 					    */
361 
362 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
363 			events[0] |= 0x80; /* Encryption Change */
364 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
365 		}
366 	}
367 
368 	if (lmp_inq_rssi_capable(hdev) ||
369 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
370 		events[4] |= 0x02; /* Inquiry Result with RSSI */
371 
372 	if (lmp_ext_feat_capable(hdev))
373 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
374 
375 	if (lmp_esco_capable(hdev)) {
376 		events[5] |= 0x08; /* Synchronous Connection Complete */
377 		events[5] |= 0x10; /* Synchronous Connection Changed */
378 	}
379 
380 	if (lmp_sniffsubr_capable(hdev))
381 		events[5] |= 0x20; /* Sniff Subrating */
382 
383 	if (lmp_pause_enc_capable(hdev))
384 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
385 
386 	if (lmp_ext_inq_capable(hdev))
387 		events[5] |= 0x40; /* Extended Inquiry Result */
388 
389 	if (lmp_no_flush_capable(hdev))
390 		events[7] |= 0x01; /* Enhanced Flush Complete */
391 
392 	if (lmp_lsto_capable(hdev))
393 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
394 
395 	if (lmp_ssp_capable(hdev)) {
396 		events[6] |= 0x01;	/* IO Capability Request */
397 		events[6] |= 0x02;	/* IO Capability Response */
398 		events[6] |= 0x04;	/* User Confirmation Request */
399 		events[6] |= 0x08;	/* User Passkey Request */
400 		events[6] |= 0x10;	/* Remote OOB Data Request */
401 		events[6] |= 0x20;	/* Simple Pairing Complete */
402 		events[7] |= 0x04;	/* User Passkey Notification */
403 		events[7] |= 0x08;	/* Keypress Notification */
404 		events[7] |= 0x10;	/* Remote Host Supported
405 					 * Features Notification
406 					 */
407 	}
408 
409 	if (lmp_le_capable(hdev))
410 		events[7] |= 0x20;	/* LE Meta-Event */
411 
412 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
413 }
414 
415 static int hci_init2_req(struct hci_request *req, unsigned long opt)
416 {
417 	struct hci_dev *hdev = req->hdev;
418 
419 	if (hdev->dev_type == HCI_AMP)
420 		return amp_init2(req);
421 
422 	if (lmp_bredr_capable(hdev))
423 		bredr_setup(req);
424 	else
425 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426 
427 	if (lmp_le_capable(hdev))
428 		le_setup(req);
429 
430 	/* All Bluetooth 1.2 and later controllers should support the
431 	 * HCI command for reading the local supported commands.
432 	 *
433 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
434 	 * but do not have support for this command. If that is the case,
435 	 * the driver can quirk the behavior and skip reading the local
436 	 * supported commands.
437 	 */
438 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
439 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
440 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441 
442 	if (lmp_ssp_capable(hdev)) {
443 		/* When SSP is available, then the host features page
444 		 * should also be available as well. However some
445 		 * controllers list the max_page as 0 as long as SSP
446 		 * has not been enabled. To achieve proper debugging
447 		 * output, force the minimum max_page to 1 at least.
448 		 */
449 		hdev->max_page = 0x01;
450 
451 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
452 			u8 mode = 0x01;
453 
454 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
455 				    sizeof(mode), &mode);
456 		} else {
457 			struct hci_cp_write_eir cp;
458 
459 			memset(hdev->eir, 0, sizeof(hdev->eir));
460 			memset(&cp, 0, sizeof(cp));
461 
462 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
463 		}
464 	}
465 
466 	if (lmp_inq_rssi_capable(hdev) ||
467 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
468 		u8 mode;
469 
470 		/* If Extended Inquiry Result events are supported, then
471 		 * they are clearly preferred over Inquiry Result with RSSI
472 		 * events.
473 		 */
474 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475 
476 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
477 	}
478 
479 	if (lmp_inq_tx_pwr_capable(hdev))
480 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481 
482 	if (lmp_ext_feat_capable(hdev)) {
483 		struct hci_cp_read_local_ext_features cp;
484 
485 		cp.page = 0x01;
486 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
487 			    sizeof(cp), &cp);
488 	}
489 
490 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491 		u8 enable = 1;
492 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
493 			    &enable);
494 	}
495 
496 	return 0;
497 }
498 
499 static void hci_setup_link_policy(struct hci_request *req)
500 {
501 	struct hci_dev *hdev = req->hdev;
502 	struct hci_cp_write_def_link_policy cp;
503 	u16 link_policy = 0;
504 
505 	if (lmp_rswitch_capable(hdev))
506 		link_policy |= HCI_LP_RSWITCH;
507 	if (lmp_hold_capable(hdev))
508 		link_policy |= HCI_LP_HOLD;
509 	if (lmp_sniff_capable(hdev))
510 		link_policy |= HCI_LP_SNIFF;
511 	if (lmp_park_capable(hdev))
512 		link_policy |= HCI_LP_PARK;
513 
514 	cp.policy = cpu_to_le16(link_policy);
515 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
516 }
517 
518 static void hci_set_le_support(struct hci_request *req)
519 {
520 	struct hci_dev *hdev = req->hdev;
521 	struct hci_cp_write_le_host_supported cp;
522 
523 	/* LE-only devices do not support explicit enablement */
524 	if (!lmp_bredr_capable(hdev))
525 		return;
526 
527 	memset(&cp, 0, sizeof(cp));
528 
529 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
530 		cp.le = 0x01;
531 		cp.simul = 0x00;
532 	}
533 
534 	if (cp.le != lmp_host_le_capable(hdev))
535 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
536 			    &cp);
537 }
538 
539 static void hci_set_event_mask_page_2(struct hci_request *req)
540 {
541 	struct hci_dev *hdev = req->hdev;
542 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
543 	bool changed = false;
544 
545 	/* If Connectionless Slave Broadcast master role is supported
546 	 * enable all necessary events for it.
547 	 */
548 	if (lmp_csb_master_capable(hdev)) {
549 		events[1] |= 0x40;	/* Triggered Clock Capture */
550 		events[1] |= 0x80;	/* Synchronization Train Complete */
551 		events[2] |= 0x10;	/* Slave Page Response Timeout */
552 		events[2] |= 0x20;	/* CSB Channel Map Change */
553 		changed = true;
554 	}
555 
556 	/* If Connectionless Slave Broadcast slave role is supported
557 	 * enable all necessary events for it.
558 	 */
559 	if (lmp_csb_slave_capable(hdev)) {
560 		events[2] |= 0x01;	/* Synchronization Train Received */
561 		events[2] |= 0x02;	/* CSB Receive */
562 		events[2] |= 0x04;	/* CSB Timeout */
563 		events[2] |= 0x08;	/* Truncated Page Complete */
564 		changed = true;
565 	}
566 
567 	/* Enable Authenticated Payload Timeout Expired event if supported */
568 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
569 		events[2] |= 0x80;
570 		changed = true;
571 	}
572 
573 	/* Some Broadcom based controllers indicate support for Set Event
574 	 * Mask Page 2 command, but then actually do not support it. Since
575 	 * the default value is all bits set to zero, the command is only
576 	 * required if the event mask has to be changed. In case no change
577 	 * to the event mask is needed, skip this command.
578 	 */
579 	if (changed)
580 		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
581 			    sizeof(events), events);
582 }
583 
584 static int hci_init3_req(struct hci_request *req, unsigned long opt)
585 {
586 	struct hci_dev *hdev = req->hdev;
587 	u8 p;
588 
589 	hci_setup_event_mask(req);
590 
591 	if (hdev->commands[6] & 0x20 &&
592 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
593 		struct hci_cp_read_stored_link_key cp;
594 
595 		bacpy(&cp.bdaddr, BDADDR_ANY);
596 		cp.read_all = 0x01;
597 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
598 	}
599 
600 	if (hdev->commands[5] & 0x10)
601 		hci_setup_link_policy(req);
602 
603 	if (hdev->commands[8] & 0x01)
604 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605 
606 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
607 	 * support the Read Page Scan Type command. Check support for
608 	 * this command in the bit mask of supported commands.
609 	 */
610 	if (hdev->commands[13] & 0x01)
611 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612 
613 	if (lmp_le_capable(hdev)) {
614 		u8 events[8];
615 
616 		memset(events, 0, sizeof(events));
617 
618 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
619 			events[0] |= 0x10;	/* LE Long Term Key Request */
620 
621 		/* If controller supports the Connection Parameters Request
622 		 * Link Layer Procedure, enable the corresponding event.
623 		 */
624 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
625 			events[0] |= 0x20;	/* LE Remote Connection
626 						 * Parameter Request
627 						 */
628 
629 		/* If the controller supports the Data Length Extension
630 		 * feature, enable the corresponding event.
631 		 */
632 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
633 			events[0] |= 0x40;	/* LE Data Length Change */
634 
635 		/* If the controller supports Extended Scanner Filter
636 		 * Policies, enable the correspondig event.
637 		 */
638 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
639 			events[1] |= 0x04;	/* LE Direct Advertising
640 						 * Report
641 						 */
642 
643 		/* If the controller supports Channel Selection Algorithm #2
644 		 * feature, enable the corresponding event.
645 		 */
646 		if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
647 			events[2] |= 0x08;	/* LE Channel Selection
648 						 * Algorithm
649 						 */
650 
651 		/* If the controller supports the LE Set Scan Enable command,
652 		 * enable the corresponding advertising report event.
653 		 */
654 		if (hdev->commands[26] & 0x08)
655 			events[0] |= 0x02;	/* LE Advertising Report */
656 
657 		/* If the controller supports the LE Create Connection
658 		 * command, enable the corresponding event.
659 		 */
660 		if (hdev->commands[26] & 0x10)
661 			events[0] |= 0x01;	/* LE Connection Complete */
662 
663 		/* If the controller supports the LE Connection Update
664 		 * command, enable the corresponding event.
665 		 */
666 		if (hdev->commands[27] & 0x04)
667 			events[0] |= 0x04;	/* LE Connection Update
668 						 * Complete
669 						 */
670 
671 		/* If the controller supports the LE Read Remote Used Features
672 		 * command, enable the corresponding event.
673 		 */
674 		if (hdev->commands[27] & 0x20)
675 			events[0] |= 0x08;	/* LE Read Remote Used
676 						 * Features Complete
677 						 */
678 
679 		/* If the controller supports the LE Read Local P-256
680 		 * Public Key command, enable the corresponding event.
681 		 */
682 		if (hdev->commands[34] & 0x02)
683 			events[0] |= 0x80;	/* LE Read Local P-256
684 						 * Public Key Complete
685 						 */
686 
687 		/* If the controller supports the LE Generate DHKey
688 		 * command, enable the corresponding event.
689 		 */
690 		if (hdev->commands[34] & 0x04)
691 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
692 
693 		/* If the controller supports the LE Set Default PHY or
694 		 * LE Set PHY commands, enable the corresponding event.
695 		 */
696 		if (hdev->commands[35] & (0x20 | 0x40))
697 			events[1] |= 0x08;        /* LE PHY Update Complete */
698 
699 		/* If the controller supports LE Set Extended Scan Parameters
700 		 * and LE Set Extended Scan Enable commands, enable the
701 		 * corresponding event.
702 		 */
703 		if (use_ext_scan(hdev))
704 			events[1] |= 0x10;	/* LE Extended Advertising
705 						 * Report
706 						 */
707 
708 		/* If the controller supports the LE Extended Create Connection
709 		 * command, enable the corresponding event.
710 		 */
711 		if (use_ext_conn(hdev))
712 			events[1] |= 0x02;      /* LE Enhanced Connection
713 						 * Complete
714 						 */
715 
716 		/* If the controller supports the LE Extended Advertising
717 		 * command, enable the corresponding event.
718 		 */
719 		if (ext_adv_capable(hdev))
720 			events[2] |= 0x02;	/* LE Advertising Set
721 						 * Terminated
722 						 */
723 
724 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
725 			    events);
726 
727 		/* Read LE Advertising Channel TX Power */
728 		if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
729 			/* HCI TS spec forbids mixing of legacy and extended
730 			 * advertising commands wherein READ_ADV_TX_POWER is
731 			 * also included. So do not call it if extended adv
732 			 * is supported otherwise controller will return
733 			 * COMMAND_DISALLOWED for extended commands.
734 			 */
735 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
736 		}
737 
738 		if (hdev->commands[26] & 0x40) {
739 			/* Read LE White List Size */
740 			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
741 				    0, NULL);
742 		}
743 
744 		if (hdev->commands[26] & 0x80) {
745 			/* Clear LE White List */
746 			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
747 		}
748 
749 		if (hdev->commands[34] & 0x40) {
750 			/* Read LE Resolving List Size */
751 			hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
752 				    0, NULL);
753 		}
754 
755 		if (hdev->commands[34] & 0x20) {
756 			/* Clear LE Resolving List */
757 			hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
758 		}
759 
760 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
761 			/* Read LE Maximum Data Length */
762 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
763 
764 			/* Read LE Suggested Default Data Length */
765 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
766 		}
767 
768 		if (ext_adv_capable(hdev)) {
769 			/* Read LE Number of Supported Advertising Sets */
770 			hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
771 				    0, NULL);
772 		}
773 
774 		hci_set_le_support(req);
775 	}
776 
777 	/* Read features beyond page 1 if available */
778 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
779 		struct hci_cp_read_local_ext_features cp;
780 
781 		cp.page = p;
782 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
783 			    sizeof(cp), &cp);
784 	}
785 
786 	return 0;
787 }
788 
789 static int hci_init4_req(struct hci_request *req, unsigned long opt)
790 {
791 	struct hci_dev *hdev = req->hdev;
792 
793 	/* Some Broadcom based Bluetooth controllers do not support the
794 	 * Delete Stored Link Key command. They are clearly indicating its
795 	 * absence in the bit mask of supported commands.
796 	 *
797 	 * Check the supported commands and only if the the command is marked
798 	 * as supported send it. If not supported assume that the controller
799 	 * does not have actual support for stored link keys which makes this
800 	 * command redundant anyway.
801 	 *
802 	 * Some controllers indicate that they support handling deleting
803 	 * stored link keys, but they don't. The quirk lets a driver
804 	 * just disable this command.
805 	 */
806 	if (hdev->commands[6] & 0x80 &&
807 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
808 		struct hci_cp_delete_stored_link_key cp;
809 
810 		bacpy(&cp.bdaddr, BDADDR_ANY);
811 		cp.delete_all = 0x01;
812 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
813 			    sizeof(cp), &cp);
814 	}
815 
816 	/* Set event mask page 2 if the HCI command for it is supported */
817 	if (hdev->commands[22] & 0x04)
818 		hci_set_event_mask_page_2(req);
819 
820 	/* Read local codec list if the HCI command is supported */
821 	if (hdev->commands[29] & 0x20)
822 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
823 
824 	/* Get MWS transport configuration if the HCI command is supported */
825 	if (hdev->commands[30] & 0x08)
826 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
827 
828 	/* Check for Synchronization Train support */
829 	if (lmp_sync_train_capable(hdev))
830 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
831 
832 	/* Enable Secure Connections if supported and configured */
833 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
834 	    bredr_sc_enabled(hdev)) {
835 		u8 support = 0x01;
836 
837 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
838 			    sizeof(support), &support);
839 	}
840 
841 	/* Set Suggested Default Data Length to maximum if supported */
842 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 		struct hci_cp_le_write_def_data_len cp;
844 
845 		cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
846 		cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
847 		hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
848 	}
849 
850 	/* Set Default PHY parameters if command is supported */
851 	if (hdev->commands[35] & 0x20) {
852 		struct hci_cp_le_set_default_phy cp;
853 
854 		cp.all_phys = 0x00;
855 		cp.tx_phys = hdev->le_tx_def_phys;
856 		cp.rx_phys = hdev->le_rx_def_phys;
857 
858 		hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
859 	}
860 
861 	return 0;
862 }
863 
864 static int __hci_init(struct hci_dev *hdev)
865 {
866 	int err;
867 
868 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
869 	if (err < 0)
870 		return err;
871 
872 	if (hci_dev_test_flag(hdev, HCI_SETUP))
873 		hci_debugfs_create_basic(hdev);
874 
875 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
876 	if (err < 0)
877 		return err;
878 
879 	/* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
880 	 * BR/EDR/LE type controllers. AMP controllers only need the
881 	 * first two stages of init.
882 	 */
883 	if (hdev->dev_type != HCI_PRIMARY)
884 		return 0;
885 
886 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
887 	if (err < 0)
888 		return err;
889 
890 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
891 	if (err < 0)
892 		return err;
893 
894 	/* This function is only called when the controller is actually in
895 	 * configured state. When the controller is marked as unconfigured,
896 	 * this initialization procedure is not run.
897 	 *
898 	 * It means that it is possible that a controller runs through its
899 	 * setup phase and then discovers missing settings. If that is the
900 	 * case, then this function will not be called. It then will only
901 	 * be called during the config phase.
902 	 *
903 	 * So only when in setup phase or config phase, create the debugfs
904 	 * entries and register the SMP channels.
905 	 */
906 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
907 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
908 		return 0;
909 
910 	hci_debugfs_create_common(hdev);
911 
912 	if (lmp_bredr_capable(hdev))
913 		hci_debugfs_create_bredr(hdev);
914 
915 	if (lmp_le_capable(hdev))
916 		hci_debugfs_create_le(hdev);
917 
918 	return 0;
919 }
920 
921 static int hci_init0_req(struct hci_request *req, unsigned long opt)
922 {
923 	struct hci_dev *hdev = req->hdev;
924 
925 	BT_DBG("%s %ld", hdev->name, opt);
926 
927 	/* Reset */
928 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
929 		hci_reset_req(req, 0);
930 
931 	/* Read Local Version */
932 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933 
934 	/* Read BD Address */
935 	if (hdev->set_bdaddr)
936 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
937 
938 	return 0;
939 }
940 
941 static int __hci_unconf_init(struct hci_dev *hdev)
942 {
943 	int err;
944 
945 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
946 		return 0;
947 
948 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
949 	if (err < 0)
950 		return err;
951 
952 	if (hci_dev_test_flag(hdev, HCI_SETUP))
953 		hci_debugfs_create_basic(hdev);
954 
955 	return 0;
956 }
957 
958 static int hci_scan_req(struct hci_request *req, unsigned long opt)
959 {
960 	__u8 scan = opt;
961 
962 	BT_DBG("%s %x", req->hdev->name, scan);
963 
964 	/* Inquiry and Page scans */
965 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
966 	return 0;
967 }
968 
969 static int hci_auth_req(struct hci_request *req, unsigned long opt)
970 {
971 	__u8 auth = opt;
972 
973 	BT_DBG("%s %x", req->hdev->name, auth);
974 
975 	/* Authentication */
976 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
977 	return 0;
978 }
979 
980 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
981 {
982 	__u8 encrypt = opt;
983 
984 	BT_DBG("%s %x", req->hdev->name, encrypt);
985 
986 	/* Encryption */
987 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
988 	return 0;
989 }
990 
991 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
992 {
993 	__le16 policy = cpu_to_le16(opt);
994 
995 	BT_DBG("%s %x", req->hdev->name, policy);
996 
997 	/* Default link policy */
998 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
999 	return 0;
1000 }
1001 
1002 /* Get HCI device by index.
1003  * Device is held on return. */
1004 struct hci_dev *hci_dev_get(int index)
1005 {
1006 	struct hci_dev *hdev = NULL, *d;
1007 
1008 	BT_DBG("%d", index);
1009 
1010 	if (index < 0)
1011 		return NULL;
1012 
1013 	read_lock(&hci_dev_list_lock);
1014 	list_for_each_entry(d, &hci_dev_list, list) {
1015 		if (d->id == index) {
1016 			hdev = hci_dev_hold(d);
1017 			break;
1018 		}
1019 	}
1020 	read_unlock(&hci_dev_list_lock);
1021 	return hdev;
1022 }
1023 
1024 /* ---- Inquiry support ---- */
1025 
1026 bool hci_discovery_active(struct hci_dev *hdev)
1027 {
1028 	struct discovery_state *discov = &hdev->discovery;
1029 
1030 	switch (discov->state) {
1031 	case DISCOVERY_FINDING:
1032 	case DISCOVERY_RESOLVING:
1033 		return true;
1034 
1035 	default:
1036 		return false;
1037 	}
1038 }
1039 
1040 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1041 {
1042 	int old_state = hdev->discovery.state;
1043 
1044 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1045 
1046 	if (old_state == state)
1047 		return;
1048 
1049 	hdev->discovery.state = state;
1050 
1051 	switch (state) {
1052 	case DISCOVERY_STOPPED:
1053 		hci_update_background_scan(hdev);
1054 
1055 		if (old_state != DISCOVERY_STARTING)
1056 			mgmt_discovering(hdev, 0);
1057 		break;
1058 	case DISCOVERY_STARTING:
1059 		break;
1060 	case DISCOVERY_FINDING:
1061 		mgmt_discovering(hdev, 1);
1062 		break;
1063 	case DISCOVERY_RESOLVING:
1064 		break;
1065 	case DISCOVERY_STOPPING:
1066 		break;
1067 	}
1068 }
1069 
1070 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1071 {
1072 	struct discovery_state *cache = &hdev->discovery;
1073 	struct inquiry_entry *p, *n;
1074 
1075 	list_for_each_entry_safe(p, n, &cache->all, all) {
1076 		list_del(&p->all);
1077 		kfree(p);
1078 	}
1079 
1080 	INIT_LIST_HEAD(&cache->unknown);
1081 	INIT_LIST_HEAD(&cache->resolve);
1082 }
1083 
1084 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1085 					       bdaddr_t *bdaddr)
1086 {
1087 	struct discovery_state *cache = &hdev->discovery;
1088 	struct inquiry_entry *e;
1089 
1090 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1091 
1092 	list_for_each_entry(e, &cache->all, all) {
1093 		if (!bacmp(&e->data.bdaddr, bdaddr))
1094 			return e;
1095 	}
1096 
1097 	return NULL;
1098 }
1099 
1100 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1101 						       bdaddr_t *bdaddr)
1102 {
1103 	struct discovery_state *cache = &hdev->discovery;
1104 	struct inquiry_entry *e;
1105 
1106 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1107 
1108 	list_for_each_entry(e, &cache->unknown, list) {
1109 		if (!bacmp(&e->data.bdaddr, bdaddr))
1110 			return e;
1111 	}
1112 
1113 	return NULL;
1114 }
1115 
1116 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1117 						       bdaddr_t *bdaddr,
1118 						       int state)
1119 {
1120 	struct discovery_state *cache = &hdev->discovery;
1121 	struct inquiry_entry *e;
1122 
1123 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1124 
1125 	list_for_each_entry(e, &cache->resolve, list) {
1126 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1127 			return e;
1128 		if (!bacmp(&e->data.bdaddr, bdaddr))
1129 			return e;
1130 	}
1131 
1132 	return NULL;
1133 }
1134 
1135 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1136 				      struct inquiry_entry *ie)
1137 {
1138 	struct discovery_state *cache = &hdev->discovery;
1139 	struct list_head *pos = &cache->resolve;
1140 	struct inquiry_entry *p;
1141 
1142 	list_del(&ie->list);
1143 
1144 	list_for_each_entry(p, &cache->resolve, list) {
1145 		if (p->name_state != NAME_PENDING &&
1146 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1147 			break;
1148 		pos = &p->list;
1149 	}
1150 
1151 	list_add(&ie->list, pos);
1152 }
1153 
1154 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1155 			     bool name_known)
1156 {
1157 	struct discovery_state *cache = &hdev->discovery;
1158 	struct inquiry_entry *ie;
1159 	u32 flags = 0;
1160 
1161 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1162 
1163 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1164 
1165 	if (!data->ssp_mode)
1166 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1167 
1168 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1169 	if (ie) {
1170 		if (!ie->data.ssp_mode)
1171 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1172 
1173 		if (ie->name_state == NAME_NEEDED &&
1174 		    data->rssi != ie->data.rssi) {
1175 			ie->data.rssi = data->rssi;
1176 			hci_inquiry_cache_update_resolve(hdev, ie);
1177 		}
1178 
1179 		goto update;
1180 	}
1181 
1182 	/* Entry not in the cache. Add new one. */
1183 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1184 	if (!ie) {
1185 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1186 		goto done;
1187 	}
1188 
1189 	list_add(&ie->all, &cache->all);
1190 
1191 	if (name_known) {
1192 		ie->name_state = NAME_KNOWN;
1193 	} else {
1194 		ie->name_state = NAME_NOT_KNOWN;
1195 		list_add(&ie->list, &cache->unknown);
1196 	}
1197 
1198 update:
1199 	if (name_known && ie->name_state != NAME_KNOWN &&
1200 	    ie->name_state != NAME_PENDING) {
1201 		ie->name_state = NAME_KNOWN;
1202 		list_del(&ie->list);
1203 	}
1204 
1205 	memcpy(&ie->data, data, sizeof(*data));
1206 	ie->timestamp = jiffies;
1207 	cache->timestamp = jiffies;
1208 
1209 	if (ie->name_state == NAME_NOT_KNOWN)
1210 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211 
1212 done:
1213 	return flags;
1214 }
1215 
1216 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1217 {
1218 	struct discovery_state *cache = &hdev->discovery;
1219 	struct inquiry_info *info = (struct inquiry_info *) buf;
1220 	struct inquiry_entry *e;
1221 	int copied = 0;
1222 
1223 	list_for_each_entry(e, &cache->all, all) {
1224 		struct inquiry_data *data = &e->data;
1225 
1226 		if (copied >= num)
1227 			break;
1228 
1229 		bacpy(&info->bdaddr, &data->bdaddr);
1230 		info->pscan_rep_mode	= data->pscan_rep_mode;
1231 		info->pscan_period_mode	= data->pscan_period_mode;
1232 		info->pscan_mode	= data->pscan_mode;
1233 		memcpy(info->dev_class, data->dev_class, 3);
1234 		info->clock_offset	= data->clock_offset;
1235 
1236 		info++;
1237 		copied++;
1238 	}
1239 
1240 	BT_DBG("cache %p, copied %d", cache, copied);
1241 	return copied;
1242 }
1243 
1244 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1245 {
1246 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1247 	struct hci_dev *hdev = req->hdev;
1248 	struct hci_cp_inquiry cp;
1249 
1250 	BT_DBG("%s", hdev->name);
1251 
1252 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1253 		return 0;
1254 
1255 	/* Start Inquiry */
1256 	memcpy(&cp.lap, &ir->lap, 3);
1257 	cp.length  = ir->length;
1258 	cp.num_rsp = ir->num_rsp;
1259 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1260 
1261 	return 0;
1262 }
1263 
1264 int hci_inquiry(void __user *arg)
1265 {
1266 	__u8 __user *ptr = arg;
1267 	struct hci_inquiry_req ir;
1268 	struct hci_dev *hdev;
1269 	int err = 0, do_inquiry = 0, max_rsp;
1270 	long timeo;
1271 	__u8 *buf;
1272 
1273 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1274 		return -EFAULT;
1275 
1276 	hdev = hci_dev_get(ir.dev_id);
1277 	if (!hdev)
1278 		return -ENODEV;
1279 
1280 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1281 		err = -EBUSY;
1282 		goto done;
1283 	}
1284 
1285 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1286 		err = -EOPNOTSUPP;
1287 		goto done;
1288 	}
1289 
1290 	if (hdev->dev_type != HCI_PRIMARY) {
1291 		err = -EOPNOTSUPP;
1292 		goto done;
1293 	}
1294 
1295 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1296 		err = -EOPNOTSUPP;
1297 		goto done;
1298 	}
1299 
1300 	hci_dev_lock(hdev);
1301 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1302 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1303 		hci_inquiry_cache_flush(hdev);
1304 		do_inquiry = 1;
1305 	}
1306 	hci_dev_unlock(hdev);
1307 
1308 	timeo = ir.length * msecs_to_jiffies(2000);
1309 
1310 	if (do_inquiry) {
1311 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1312 				   timeo, NULL);
1313 		if (err < 0)
1314 			goto done;
1315 
1316 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1317 		 * cleared). If it is interrupted by a signal, return -EINTR.
1318 		 */
1319 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1320 				TASK_INTERRUPTIBLE))
1321 			return -EINTR;
1322 	}
1323 
1324 	/* for unlimited number of responses we will use buffer with
1325 	 * 255 entries
1326 	 */
1327 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1328 
1329 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1330 	 * copy it to the user space.
1331 	 */
1332 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1333 	if (!buf) {
1334 		err = -ENOMEM;
1335 		goto done;
1336 	}
1337 
1338 	hci_dev_lock(hdev);
1339 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1340 	hci_dev_unlock(hdev);
1341 
1342 	BT_DBG("num_rsp %d", ir.num_rsp);
1343 
1344 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1345 		ptr += sizeof(ir);
1346 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1347 				 ir.num_rsp))
1348 			err = -EFAULT;
1349 	} else
1350 		err = -EFAULT;
1351 
1352 	kfree(buf);
1353 
1354 done:
1355 	hci_dev_put(hdev);
1356 	return err;
1357 }
1358 
1359 /**
1360  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1361  *				       (BD_ADDR) for a HCI device from
1362  *				       a firmware node property.
1363  * @hdev:	The HCI device
1364  *
1365  * Search the firmware node for 'local-bd-address'.
1366  *
1367  * All-zero BD addresses are rejected, because those could be properties
1368  * that exist in the firmware tables, but were not updated by the firmware. For
1369  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1370  */
1371 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1372 {
1373 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1374 	bdaddr_t ba;
1375 	int ret;
1376 
1377 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1378 					    (u8 *)&ba, sizeof(ba));
1379 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1380 		return;
1381 
1382 	bacpy(&hdev->public_addr, &ba);
1383 }
1384 
1385 static int hci_dev_do_open(struct hci_dev *hdev)
1386 {
1387 	int ret = 0;
1388 
1389 	BT_DBG("%s %p", hdev->name, hdev);
1390 
1391 	hci_req_sync_lock(hdev);
1392 
1393 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1394 		ret = -ENODEV;
1395 		goto done;
1396 	}
1397 
1398 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1399 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400 		/* Check for rfkill but allow the HCI setup stage to
1401 		 * proceed (which in itself doesn't cause any RF activity).
1402 		 */
1403 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1404 			ret = -ERFKILL;
1405 			goto done;
1406 		}
1407 
1408 		/* Check for valid public address or a configured static
1409 		 * random adddress, but let the HCI setup proceed to
1410 		 * be able to determine if there is a public address
1411 		 * or not.
1412 		 *
1413 		 * In case of user channel usage, it is not important
1414 		 * if a public address or static random address is
1415 		 * available.
1416 		 *
1417 		 * This check is only valid for BR/EDR controllers
1418 		 * since AMP controllers do not have an address.
1419 		 */
1420 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1421 		    hdev->dev_type == HCI_PRIMARY &&
1422 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1423 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1424 			ret = -EADDRNOTAVAIL;
1425 			goto done;
1426 		}
1427 	}
1428 
1429 	if (test_bit(HCI_UP, &hdev->flags)) {
1430 		ret = -EALREADY;
1431 		goto done;
1432 	}
1433 
1434 	if (hdev->open(hdev)) {
1435 		ret = -EIO;
1436 		goto done;
1437 	}
1438 
1439 	set_bit(HCI_RUNNING, &hdev->flags);
1440 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1441 
1442 	atomic_set(&hdev->cmd_cnt, 1);
1443 	set_bit(HCI_INIT, &hdev->flags);
1444 
1445 	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1446 	    test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1447 		bool invalid_bdaddr;
1448 
1449 		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1450 
1451 		if (hdev->setup)
1452 			ret = hdev->setup(hdev);
1453 
1454 		/* The transport driver can set the quirk to mark the
1455 		 * BD_ADDR invalid before creating the HCI device or in
1456 		 * its setup callback.
1457 		 */
1458 		invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1459 					  &hdev->quirks);
1460 
1461 		if (ret)
1462 			goto setup_failed;
1463 
1464 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1465 			if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1466 				hci_dev_get_bd_addr_from_property(hdev);
1467 
1468 			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1469 			    hdev->set_bdaddr) {
1470 				ret = hdev->set_bdaddr(hdev,
1471 						       &hdev->public_addr);
1472 
1473 				/* If setting of the BD_ADDR from the device
1474 				 * property succeeds, then treat the address
1475 				 * as valid even if the invalid BD_ADDR
1476 				 * quirk indicates otherwise.
1477 				 */
1478 				if (!ret)
1479 					invalid_bdaddr = false;
1480 			}
1481 		}
1482 
1483 setup_failed:
1484 		/* The transport driver can set these quirks before
1485 		 * creating the HCI device or in its setup callback.
1486 		 *
1487 		 * For the invalid BD_ADDR quirk it is possible that
1488 		 * it becomes a valid address if the bootloader does
1489 		 * provide it (see above).
1490 		 *
1491 		 * In case any of them is set, the controller has to
1492 		 * start up as unconfigured.
1493 		 */
1494 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1495 		    invalid_bdaddr)
1496 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1497 
1498 		/* For an unconfigured controller it is required to
1499 		 * read at least the version information provided by
1500 		 * the Read Local Version Information command.
1501 		 *
1502 		 * If the set_bdaddr driver callback is provided, then
1503 		 * also the original Bluetooth public device address
1504 		 * will be read using the Read BD Address command.
1505 		 */
1506 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1507 			ret = __hci_unconf_init(hdev);
1508 	}
1509 
1510 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1511 		/* If public address change is configured, ensure that
1512 		 * the address gets programmed. If the driver does not
1513 		 * support changing the public address, fail the power
1514 		 * on procedure.
1515 		 */
1516 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1517 		    hdev->set_bdaddr)
1518 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1519 		else
1520 			ret = -EADDRNOTAVAIL;
1521 	}
1522 
1523 	if (!ret) {
1524 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1525 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1526 			ret = __hci_init(hdev);
1527 			if (!ret && hdev->post_init)
1528 				ret = hdev->post_init(hdev);
1529 		}
1530 	}
1531 
1532 	/* If the HCI Reset command is clearing all diagnostic settings,
1533 	 * then they need to be reprogrammed after the init procedure
1534 	 * completed.
1535 	 */
1536 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1537 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1538 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1539 		ret = hdev->set_diag(hdev, true);
1540 
1541 	clear_bit(HCI_INIT, &hdev->flags);
1542 
1543 	if (!ret) {
1544 		hci_dev_hold(hdev);
1545 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1546 		hci_adv_instances_set_rpa_expired(hdev, true);
1547 		set_bit(HCI_UP, &hdev->flags);
1548 		hci_sock_dev_event(hdev, HCI_DEV_UP);
1549 		hci_leds_update_powered(hdev, true);
1550 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1551 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1552 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1553 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1554 		    hci_dev_test_flag(hdev, HCI_MGMT) &&
1555 		    hdev->dev_type == HCI_PRIMARY) {
1556 			ret = __hci_req_hci_power_on(hdev);
1557 			mgmt_power_on(hdev, ret);
1558 		}
1559 	} else {
1560 		/* Init failed, cleanup */
1561 		flush_work(&hdev->tx_work);
1562 		flush_work(&hdev->cmd_work);
1563 		flush_work(&hdev->rx_work);
1564 
1565 		skb_queue_purge(&hdev->cmd_q);
1566 		skb_queue_purge(&hdev->rx_q);
1567 
1568 		if (hdev->flush)
1569 			hdev->flush(hdev);
1570 
1571 		if (hdev->sent_cmd) {
1572 			kfree_skb(hdev->sent_cmd);
1573 			hdev->sent_cmd = NULL;
1574 		}
1575 
1576 		clear_bit(HCI_RUNNING, &hdev->flags);
1577 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1578 
1579 		hdev->close(hdev);
1580 		hdev->flags &= BIT(HCI_RAW);
1581 	}
1582 
1583 done:
1584 	hci_req_sync_unlock(hdev);
1585 	return ret;
1586 }
1587 
1588 /* ---- HCI ioctl helpers ---- */
1589 
1590 int hci_dev_open(__u16 dev)
1591 {
1592 	struct hci_dev *hdev;
1593 	int err;
1594 
1595 	hdev = hci_dev_get(dev);
1596 	if (!hdev)
1597 		return -ENODEV;
1598 
1599 	/* Devices that are marked as unconfigured can only be powered
1600 	 * up as user channel. Trying to bring them up as normal devices
1601 	 * will result into a failure. Only user channel operation is
1602 	 * possible.
1603 	 *
1604 	 * When this function is called for a user channel, the flag
1605 	 * HCI_USER_CHANNEL will be set first before attempting to
1606 	 * open the device.
1607 	 */
1608 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1609 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1610 		err = -EOPNOTSUPP;
1611 		goto done;
1612 	}
1613 
1614 	/* We need to ensure that no other power on/off work is pending
1615 	 * before proceeding to call hci_dev_do_open. This is
1616 	 * particularly important if the setup procedure has not yet
1617 	 * completed.
1618 	 */
1619 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1620 		cancel_delayed_work(&hdev->power_off);
1621 
1622 	/* After this call it is guaranteed that the setup procedure
1623 	 * has finished. This means that error conditions like RFKILL
1624 	 * or no valid public or static random address apply.
1625 	 */
1626 	flush_workqueue(hdev->req_workqueue);
1627 
1628 	/* For controllers not using the management interface and that
1629 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1630 	 * so that pairing works for them. Once the management interface
1631 	 * is in use this bit will be cleared again and userspace has
1632 	 * to explicitly enable it.
1633 	 */
1634 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1635 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1636 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1637 
1638 	err = hci_dev_do_open(hdev);
1639 
1640 done:
1641 	hci_dev_put(hdev);
1642 	return err;
1643 }
1644 
1645 /* This function requires the caller holds hdev->lock */
1646 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1647 {
1648 	struct hci_conn_params *p;
1649 
1650 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1651 		if (p->conn) {
1652 			hci_conn_drop(p->conn);
1653 			hci_conn_put(p->conn);
1654 			p->conn = NULL;
1655 		}
1656 		list_del_init(&p->action);
1657 	}
1658 
1659 	BT_DBG("All LE pending actions cleared");
1660 }
1661 
1662 int hci_dev_do_close(struct hci_dev *hdev)
1663 {
1664 	bool auto_off;
1665 
1666 	BT_DBG("%s %p", hdev->name, hdev);
1667 
1668 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1669 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1670 	    test_bit(HCI_UP, &hdev->flags)) {
1671 		/* Execute vendor specific shutdown routine */
1672 		if (hdev->shutdown)
1673 			hdev->shutdown(hdev);
1674 	}
1675 
1676 	cancel_delayed_work(&hdev->power_off);
1677 
1678 	hci_request_cancel_all(hdev);
1679 	hci_req_sync_lock(hdev);
1680 
1681 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1682 		cancel_delayed_work_sync(&hdev->cmd_timer);
1683 		hci_req_sync_unlock(hdev);
1684 		return 0;
1685 	}
1686 
1687 	hci_leds_update_powered(hdev, false);
1688 
1689 	/* Flush RX and TX works */
1690 	flush_work(&hdev->tx_work);
1691 	flush_work(&hdev->rx_work);
1692 
1693 	if (hdev->discov_timeout > 0) {
1694 		hdev->discov_timeout = 0;
1695 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1696 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 	}
1698 
1699 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1700 		cancel_delayed_work(&hdev->service_cache);
1701 
1702 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1703 		struct adv_info *adv_instance;
1704 
1705 		cancel_delayed_work_sync(&hdev->rpa_expired);
1706 
1707 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1708 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1709 	}
1710 
1711 	/* Avoid potential lockdep warnings from the *_flush() calls by
1712 	 * ensuring the workqueue is empty up front.
1713 	 */
1714 	drain_workqueue(hdev->workqueue);
1715 
1716 	hci_dev_lock(hdev);
1717 
1718 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1719 
1720 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1721 
1722 	if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1723 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1724 	    hci_dev_test_flag(hdev, HCI_MGMT))
1725 		__mgmt_power_off(hdev);
1726 
1727 	hci_inquiry_cache_flush(hdev);
1728 	hci_pend_le_actions_clear(hdev);
1729 	hci_conn_hash_flush(hdev);
1730 	hci_dev_unlock(hdev);
1731 
1732 	smp_unregister(hdev);
1733 
1734 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1735 
1736 	if (hdev->flush)
1737 		hdev->flush(hdev);
1738 
1739 	/* Reset device */
1740 	skb_queue_purge(&hdev->cmd_q);
1741 	atomic_set(&hdev->cmd_cnt, 1);
1742 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1743 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1744 		set_bit(HCI_INIT, &hdev->flags);
1745 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1746 		clear_bit(HCI_INIT, &hdev->flags);
1747 	}
1748 
1749 	/* flush cmd  work */
1750 	flush_work(&hdev->cmd_work);
1751 
1752 	/* Drop queues */
1753 	skb_queue_purge(&hdev->rx_q);
1754 	skb_queue_purge(&hdev->cmd_q);
1755 	skb_queue_purge(&hdev->raw_q);
1756 
1757 	/* Drop last sent command */
1758 	if (hdev->sent_cmd) {
1759 		cancel_delayed_work_sync(&hdev->cmd_timer);
1760 		kfree_skb(hdev->sent_cmd);
1761 		hdev->sent_cmd = NULL;
1762 	}
1763 
1764 	clear_bit(HCI_RUNNING, &hdev->flags);
1765 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1766 
1767 	/* After this point our queues are empty
1768 	 * and no tasks are scheduled. */
1769 	hdev->close(hdev);
1770 
1771 	/* Clear flags */
1772 	hdev->flags &= BIT(HCI_RAW);
1773 	hci_dev_clear_volatile_flags(hdev);
1774 
1775 	/* Controller radio is available but is currently powered down */
1776 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1777 
1778 	memset(hdev->eir, 0, sizeof(hdev->eir));
1779 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1780 	bacpy(&hdev->random_addr, BDADDR_ANY);
1781 
1782 	hci_req_sync_unlock(hdev);
1783 
1784 	hci_dev_put(hdev);
1785 	return 0;
1786 }
1787 
1788 int hci_dev_close(__u16 dev)
1789 {
1790 	struct hci_dev *hdev;
1791 	int err;
1792 
1793 	hdev = hci_dev_get(dev);
1794 	if (!hdev)
1795 		return -ENODEV;
1796 
1797 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1798 		err = -EBUSY;
1799 		goto done;
1800 	}
1801 
1802 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1803 		cancel_delayed_work(&hdev->power_off);
1804 
1805 	err = hci_dev_do_close(hdev);
1806 
1807 done:
1808 	hci_dev_put(hdev);
1809 	return err;
1810 }
1811 
1812 static int hci_dev_do_reset(struct hci_dev *hdev)
1813 {
1814 	int ret;
1815 
1816 	BT_DBG("%s %p", hdev->name, hdev);
1817 
1818 	hci_req_sync_lock(hdev);
1819 
1820 	/* Drop queues */
1821 	skb_queue_purge(&hdev->rx_q);
1822 	skb_queue_purge(&hdev->cmd_q);
1823 
1824 	/* Avoid potential lockdep warnings from the *_flush() calls by
1825 	 * ensuring the workqueue is empty up front.
1826 	 */
1827 	drain_workqueue(hdev->workqueue);
1828 
1829 	hci_dev_lock(hdev);
1830 	hci_inquiry_cache_flush(hdev);
1831 	hci_conn_hash_flush(hdev);
1832 	hci_dev_unlock(hdev);
1833 
1834 	if (hdev->flush)
1835 		hdev->flush(hdev);
1836 
1837 	atomic_set(&hdev->cmd_cnt, 1);
1838 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1839 
1840 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1841 
1842 	hci_req_sync_unlock(hdev);
1843 	return ret;
1844 }
1845 
1846 int hci_dev_reset(__u16 dev)
1847 {
1848 	struct hci_dev *hdev;
1849 	int err;
1850 
1851 	hdev = hci_dev_get(dev);
1852 	if (!hdev)
1853 		return -ENODEV;
1854 
1855 	if (!test_bit(HCI_UP, &hdev->flags)) {
1856 		err = -ENETDOWN;
1857 		goto done;
1858 	}
1859 
1860 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1861 		err = -EBUSY;
1862 		goto done;
1863 	}
1864 
1865 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1866 		err = -EOPNOTSUPP;
1867 		goto done;
1868 	}
1869 
1870 	err = hci_dev_do_reset(hdev);
1871 
1872 done:
1873 	hci_dev_put(hdev);
1874 	return err;
1875 }
1876 
1877 int hci_dev_reset_stat(__u16 dev)
1878 {
1879 	struct hci_dev *hdev;
1880 	int ret = 0;
1881 
1882 	hdev = hci_dev_get(dev);
1883 	if (!hdev)
1884 		return -ENODEV;
1885 
1886 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1887 		ret = -EBUSY;
1888 		goto done;
1889 	}
1890 
1891 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1892 		ret = -EOPNOTSUPP;
1893 		goto done;
1894 	}
1895 
1896 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1897 
1898 done:
1899 	hci_dev_put(hdev);
1900 	return ret;
1901 }
1902 
1903 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1904 {
1905 	bool conn_changed, discov_changed;
1906 
1907 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1908 
1909 	if ((scan & SCAN_PAGE))
1910 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1911 							  HCI_CONNECTABLE);
1912 	else
1913 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1914 							   HCI_CONNECTABLE);
1915 
1916 	if ((scan & SCAN_INQUIRY)) {
1917 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1918 							    HCI_DISCOVERABLE);
1919 	} else {
1920 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1921 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1922 							     HCI_DISCOVERABLE);
1923 	}
1924 
1925 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1926 		return;
1927 
1928 	if (conn_changed || discov_changed) {
1929 		/* In case this was disabled through mgmt */
1930 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1931 
1932 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1933 			hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1934 
1935 		mgmt_new_settings(hdev);
1936 	}
1937 }
1938 
1939 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1940 {
1941 	struct hci_dev *hdev;
1942 	struct hci_dev_req dr;
1943 	int err = 0;
1944 
1945 	if (copy_from_user(&dr, arg, sizeof(dr)))
1946 		return -EFAULT;
1947 
1948 	hdev = hci_dev_get(dr.dev_id);
1949 	if (!hdev)
1950 		return -ENODEV;
1951 
1952 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1953 		err = -EBUSY;
1954 		goto done;
1955 	}
1956 
1957 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1958 		err = -EOPNOTSUPP;
1959 		goto done;
1960 	}
1961 
1962 	if (hdev->dev_type != HCI_PRIMARY) {
1963 		err = -EOPNOTSUPP;
1964 		goto done;
1965 	}
1966 
1967 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1968 		err = -EOPNOTSUPP;
1969 		goto done;
1970 	}
1971 
1972 	switch (cmd) {
1973 	case HCISETAUTH:
1974 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1975 				   HCI_INIT_TIMEOUT, NULL);
1976 		break;
1977 
1978 	case HCISETENCRYPT:
1979 		if (!lmp_encrypt_capable(hdev)) {
1980 			err = -EOPNOTSUPP;
1981 			break;
1982 		}
1983 
1984 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1985 			/* Auth must be enabled first */
1986 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1987 					   HCI_INIT_TIMEOUT, NULL);
1988 			if (err)
1989 				break;
1990 		}
1991 
1992 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1993 				   HCI_INIT_TIMEOUT, NULL);
1994 		break;
1995 
1996 	case HCISETSCAN:
1997 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1998 				   HCI_INIT_TIMEOUT, NULL);
1999 
2000 		/* Ensure that the connectable and discoverable states
2001 		 * get correctly modified as this was a non-mgmt change.
2002 		 */
2003 		if (!err)
2004 			hci_update_scan_state(hdev, dr.dev_opt);
2005 		break;
2006 
2007 	case HCISETLINKPOL:
2008 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2009 				   HCI_INIT_TIMEOUT, NULL);
2010 		break;
2011 
2012 	case HCISETLINKMODE:
2013 		hdev->link_mode = ((__u16) dr.dev_opt) &
2014 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
2015 		break;
2016 
2017 	case HCISETPTYPE:
2018 		if (hdev->pkt_type == (__u16) dr.dev_opt)
2019 			break;
2020 
2021 		hdev->pkt_type = (__u16) dr.dev_opt;
2022 		mgmt_phy_configuration_changed(hdev, NULL);
2023 		break;
2024 
2025 	case HCISETACLMTU:
2026 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2027 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2028 		break;
2029 
2030 	case HCISETSCOMTU:
2031 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2032 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2033 		break;
2034 
2035 	default:
2036 		err = -EINVAL;
2037 		break;
2038 	}
2039 
2040 done:
2041 	hci_dev_put(hdev);
2042 	return err;
2043 }
2044 
2045 int hci_get_dev_list(void __user *arg)
2046 {
2047 	struct hci_dev *hdev;
2048 	struct hci_dev_list_req *dl;
2049 	struct hci_dev_req *dr;
2050 	int n = 0, size, err;
2051 	__u16 dev_num;
2052 
2053 	if (get_user(dev_num, (__u16 __user *) arg))
2054 		return -EFAULT;
2055 
2056 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2057 		return -EINVAL;
2058 
2059 	size = sizeof(*dl) + dev_num * sizeof(*dr);
2060 
2061 	dl = kzalloc(size, GFP_KERNEL);
2062 	if (!dl)
2063 		return -ENOMEM;
2064 
2065 	dr = dl->dev_req;
2066 
2067 	read_lock(&hci_dev_list_lock);
2068 	list_for_each_entry(hdev, &hci_dev_list, list) {
2069 		unsigned long flags = hdev->flags;
2070 
2071 		/* When the auto-off is configured it means the transport
2072 		 * is running, but in that case still indicate that the
2073 		 * device is actually down.
2074 		 */
2075 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2076 			flags &= ~BIT(HCI_UP);
2077 
2078 		(dr + n)->dev_id  = hdev->id;
2079 		(dr + n)->dev_opt = flags;
2080 
2081 		if (++n >= dev_num)
2082 			break;
2083 	}
2084 	read_unlock(&hci_dev_list_lock);
2085 
2086 	dl->dev_num = n;
2087 	size = sizeof(*dl) + n * sizeof(*dr);
2088 
2089 	err = copy_to_user(arg, dl, size);
2090 	kfree(dl);
2091 
2092 	return err ? -EFAULT : 0;
2093 }
2094 
2095 int hci_get_dev_info(void __user *arg)
2096 {
2097 	struct hci_dev *hdev;
2098 	struct hci_dev_info di;
2099 	unsigned long flags;
2100 	int err = 0;
2101 
2102 	if (copy_from_user(&di, arg, sizeof(di)))
2103 		return -EFAULT;
2104 
2105 	hdev = hci_dev_get(di.dev_id);
2106 	if (!hdev)
2107 		return -ENODEV;
2108 
2109 	/* When the auto-off is configured it means the transport
2110 	 * is running, but in that case still indicate that the
2111 	 * device is actually down.
2112 	 */
2113 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2114 		flags = hdev->flags & ~BIT(HCI_UP);
2115 	else
2116 		flags = hdev->flags;
2117 
2118 	strcpy(di.name, hdev->name);
2119 	di.bdaddr   = hdev->bdaddr;
2120 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2121 	di.flags    = flags;
2122 	di.pkt_type = hdev->pkt_type;
2123 	if (lmp_bredr_capable(hdev)) {
2124 		di.acl_mtu  = hdev->acl_mtu;
2125 		di.acl_pkts = hdev->acl_pkts;
2126 		di.sco_mtu  = hdev->sco_mtu;
2127 		di.sco_pkts = hdev->sco_pkts;
2128 	} else {
2129 		di.acl_mtu  = hdev->le_mtu;
2130 		di.acl_pkts = hdev->le_pkts;
2131 		di.sco_mtu  = 0;
2132 		di.sco_pkts = 0;
2133 	}
2134 	di.link_policy = hdev->link_policy;
2135 	di.link_mode   = hdev->link_mode;
2136 
2137 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2138 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2139 
2140 	if (copy_to_user(arg, &di, sizeof(di)))
2141 		err = -EFAULT;
2142 
2143 	hci_dev_put(hdev);
2144 
2145 	return err;
2146 }
2147 
2148 /* ---- Interface to HCI drivers ---- */
2149 
2150 static int hci_rfkill_set_block(void *data, bool blocked)
2151 {
2152 	struct hci_dev *hdev = data;
2153 
2154 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2155 
2156 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2157 		return -EBUSY;
2158 
2159 	if (blocked) {
2160 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2161 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2162 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2163 			hci_dev_do_close(hdev);
2164 	} else {
2165 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2166 	}
2167 
2168 	return 0;
2169 }
2170 
2171 static const struct rfkill_ops hci_rfkill_ops = {
2172 	.set_block = hci_rfkill_set_block,
2173 };
2174 
2175 static void hci_power_on(struct work_struct *work)
2176 {
2177 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2178 	int err;
2179 
2180 	BT_DBG("%s", hdev->name);
2181 
2182 	if (test_bit(HCI_UP, &hdev->flags) &&
2183 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2184 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2185 		cancel_delayed_work(&hdev->power_off);
2186 		hci_req_sync_lock(hdev);
2187 		err = __hci_req_hci_power_on(hdev);
2188 		hci_req_sync_unlock(hdev);
2189 		mgmt_power_on(hdev, err);
2190 		return;
2191 	}
2192 
2193 	err = hci_dev_do_open(hdev);
2194 	if (err < 0) {
2195 		hci_dev_lock(hdev);
2196 		mgmt_set_powered_failed(hdev, err);
2197 		hci_dev_unlock(hdev);
2198 		return;
2199 	}
2200 
2201 	/* During the HCI setup phase, a few error conditions are
2202 	 * ignored and they need to be checked now. If they are still
2203 	 * valid, it is important to turn the device back off.
2204 	 */
2205 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2206 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2207 	    (hdev->dev_type == HCI_PRIMARY &&
2208 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2209 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2210 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2211 		hci_dev_do_close(hdev);
2212 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2213 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2214 				   HCI_AUTO_OFF_TIMEOUT);
2215 	}
2216 
2217 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2218 		/* For unconfigured devices, set the HCI_RAW flag
2219 		 * so that userspace can easily identify them.
2220 		 */
2221 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2222 			set_bit(HCI_RAW, &hdev->flags);
2223 
2224 		/* For fully configured devices, this will send
2225 		 * the Index Added event. For unconfigured devices,
2226 		 * it will send Unconfigued Index Added event.
2227 		 *
2228 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2229 		 * and no event will be send.
2230 		 */
2231 		mgmt_index_added(hdev);
2232 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2233 		/* When the controller is now configured, then it
2234 		 * is important to clear the HCI_RAW flag.
2235 		 */
2236 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2237 			clear_bit(HCI_RAW, &hdev->flags);
2238 
2239 		/* Powering on the controller with HCI_CONFIG set only
2240 		 * happens with the transition from unconfigured to
2241 		 * configured. This will send the Index Added event.
2242 		 */
2243 		mgmt_index_added(hdev);
2244 	}
2245 }
2246 
2247 static void hci_power_off(struct work_struct *work)
2248 {
2249 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2250 					    power_off.work);
2251 
2252 	BT_DBG("%s", hdev->name);
2253 
2254 	hci_dev_do_close(hdev);
2255 }
2256 
2257 static void hci_error_reset(struct work_struct *work)
2258 {
2259 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2260 
2261 	BT_DBG("%s", hdev->name);
2262 
2263 	if (hdev->hw_error)
2264 		hdev->hw_error(hdev, hdev->hw_error_code);
2265 	else
2266 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2267 
2268 	if (hci_dev_do_close(hdev))
2269 		return;
2270 
2271 	hci_dev_do_open(hdev);
2272 }
2273 
2274 void hci_uuids_clear(struct hci_dev *hdev)
2275 {
2276 	struct bt_uuid *uuid, *tmp;
2277 
2278 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2279 		list_del(&uuid->list);
2280 		kfree(uuid);
2281 	}
2282 }
2283 
2284 void hci_link_keys_clear(struct hci_dev *hdev)
2285 {
2286 	struct link_key *key;
2287 
2288 	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2289 		list_del_rcu(&key->list);
2290 		kfree_rcu(key, rcu);
2291 	}
2292 }
2293 
2294 void hci_smp_ltks_clear(struct hci_dev *hdev)
2295 {
2296 	struct smp_ltk *k;
2297 
2298 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2299 		list_del_rcu(&k->list);
2300 		kfree_rcu(k, rcu);
2301 	}
2302 }
2303 
2304 void hci_smp_irks_clear(struct hci_dev *hdev)
2305 {
2306 	struct smp_irk *k;
2307 
2308 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2309 		list_del_rcu(&k->list);
2310 		kfree_rcu(k, rcu);
2311 	}
2312 }
2313 
2314 void hci_blocked_keys_clear(struct hci_dev *hdev)
2315 {
2316 	struct blocked_key *b;
2317 
2318 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2319 		list_del_rcu(&b->list);
2320 		kfree_rcu(b, rcu);
2321 	}
2322 }
2323 
2324 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2325 {
2326 	bool blocked = false;
2327 	struct blocked_key *b;
2328 
2329 	rcu_read_lock();
2330 	list_for_each_entry(b, &hdev->blocked_keys, list) {
2331 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2332 			blocked = true;
2333 			break;
2334 		}
2335 	}
2336 
2337 	rcu_read_unlock();
2338 	return blocked;
2339 }
2340 
2341 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2342 {
2343 	struct link_key *k;
2344 
2345 	rcu_read_lock();
2346 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2347 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2348 			rcu_read_unlock();
2349 
2350 			if (hci_is_blocked_key(hdev,
2351 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2352 					       k->val)) {
2353 				bt_dev_warn_ratelimited(hdev,
2354 							"Link key blocked for %pMR",
2355 							&k->bdaddr);
2356 				return NULL;
2357 			}
2358 
2359 			return k;
2360 		}
2361 	}
2362 	rcu_read_unlock();
2363 
2364 	return NULL;
2365 }
2366 
2367 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2368 			       u8 key_type, u8 old_key_type)
2369 {
2370 	/* Legacy key */
2371 	if (key_type < 0x03)
2372 		return true;
2373 
2374 	/* Debug keys are insecure so don't store them persistently */
2375 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2376 		return false;
2377 
2378 	/* Changed combination key and there's no previous one */
2379 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2380 		return false;
2381 
2382 	/* Security mode 3 case */
2383 	if (!conn)
2384 		return true;
2385 
2386 	/* BR/EDR key derived using SC from an LE link */
2387 	if (conn->type == LE_LINK)
2388 		return true;
2389 
2390 	/* Neither local nor remote side had no-bonding as requirement */
2391 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2392 		return true;
2393 
2394 	/* Local side had dedicated bonding as requirement */
2395 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2396 		return true;
2397 
2398 	/* Remote side had dedicated bonding as requirement */
2399 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2400 		return true;
2401 
2402 	/* If none of the above criteria match, then don't store the key
2403 	 * persistently */
2404 	return false;
2405 }
2406 
2407 static u8 ltk_role(u8 type)
2408 {
2409 	if (type == SMP_LTK)
2410 		return HCI_ROLE_MASTER;
2411 
2412 	return HCI_ROLE_SLAVE;
2413 }
2414 
2415 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2416 			     u8 addr_type, u8 role)
2417 {
2418 	struct smp_ltk *k;
2419 
2420 	rcu_read_lock();
2421 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2422 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2423 			continue;
2424 
2425 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2426 			rcu_read_unlock();
2427 
2428 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2429 					       k->val)) {
2430 				bt_dev_warn_ratelimited(hdev,
2431 							"LTK blocked for %pMR",
2432 							&k->bdaddr);
2433 				return NULL;
2434 			}
2435 
2436 			return k;
2437 		}
2438 	}
2439 	rcu_read_unlock();
2440 
2441 	return NULL;
2442 }
2443 
2444 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2445 {
2446 	struct smp_irk *irk_to_return = NULL;
2447 	struct smp_irk *irk;
2448 
2449 	rcu_read_lock();
2450 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2451 		if (!bacmp(&irk->rpa, rpa)) {
2452 			irk_to_return = irk;
2453 			goto done;
2454 		}
2455 	}
2456 
2457 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2458 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2459 			bacpy(&irk->rpa, rpa);
2460 			irk_to_return = irk;
2461 			goto done;
2462 		}
2463 	}
2464 
2465 done:
2466 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2467 						irk_to_return->val)) {
2468 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2469 					&irk_to_return->bdaddr);
2470 		irk_to_return = NULL;
2471 	}
2472 
2473 	rcu_read_unlock();
2474 
2475 	return irk_to_return;
2476 }
2477 
2478 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2479 				     u8 addr_type)
2480 {
2481 	struct smp_irk *irk_to_return = NULL;
2482 	struct smp_irk *irk;
2483 
2484 	/* Identity Address must be public or static random */
2485 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2486 		return NULL;
2487 
2488 	rcu_read_lock();
2489 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2490 		if (addr_type == irk->addr_type &&
2491 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2492 			irk_to_return = irk;
2493 			goto done;
2494 		}
2495 	}
2496 
2497 done:
2498 
2499 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2500 						irk_to_return->val)) {
2501 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2502 					&irk_to_return->bdaddr);
2503 		irk_to_return = NULL;
2504 	}
2505 
2506 	rcu_read_unlock();
2507 
2508 	return irk_to_return;
2509 }
2510 
2511 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2512 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2513 				  u8 pin_len, bool *persistent)
2514 {
2515 	struct link_key *key, *old_key;
2516 	u8 old_key_type;
2517 
2518 	old_key = hci_find_link_key(hdev, bdaddr);
2519 	if (old_key) {
2520 		old_key_type = old_key->type;
2521 		key = old_key;
2522 	} else {
2523 		old_key_type = conn ? conn->key_type : 0xff;
2524 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2525 		if (!key)
2526 			return NULL;
2527 		list_add_rcu(&key->list, &hdev->link_keys);
2528 	}
2529 
2530 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2531 
2532 	/* Some buggy controller combinations generate a changed
2533 	 * combination key for legacy pairing even when there's no
2534 	 * previous key */
2535 	if (type == HCI_LK_CHANGED_COMBINATION &&
2536 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2537 		type = HCI_LK_COMBINATION;
2538 		if (conn)
2539 			conn->key_type = type;
2540 	}
2541 
2542 	bacpy(&key->bdaddr, bdaddr);
2543 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2544 	key->pin_len = pin_len;
2545 
2546 	if (type == HCI_LK_CHANGED_COMBINATION)
2547 		key->type = old_key_type;
2548 	else
2549 		key->type = type;
2550 
2551 	if (persistent)
2552 		*persistent = hci_persistent_key(hdev, conn, type,
2553 						 old_key_type);
2554 
2555 	return key;
2556 }
2557 
2558 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2559 			    u8 addr_type, u8 type, u8 authenticated,
2560 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2561 {
2562 	struct smp_ltk *key, *old_key;
2563 	u8 role = ltk_role(type);
2564 
2565 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2566 	if (old_key)
2567 		key = old_key;
2568 	else {
2569 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2570 		if (!key)
2571 			return NULL;
2572 		list_add_rcu(&key->list, &hdev->long_term_keys);
2573 	}
2574 
2575 	bacpy(&key->bdaddr, bdaddr);
2576 	key->bdaddr_type = addr_type;
2577 	memcpy(key->val, tk, sizeof(key->val));
2578 	key->authenticated = authenticated;
2579 	key->ediv = ediv;
2580 	key->rand = rand;
2581 	key->enc_size = enc_size;
2582 	key->type = type;
2583 
2584 	return key;
2585 }
2586 
2587 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2588 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2589 {
2590 	struct smp_irk *irk;
2591 
2592 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2593 	if (!irk) {
2594 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2595 		if (!irk)
2596 			return NULL;
2597 
2598 		bacpy(&irk->bdaddr, bdaddr);
2599 		irk->addr_type = addr_type;
2600 
2601 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2602 	}
2603 
2604 	memcpy(irk->val, val, 16);
2605 	bacpy(&irk->rpa, rpa);
2606 
2607 	return irk;
2608 }
2609 
2610 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2611 {
2612 	struct link_key *key;
2613 
2614 	key = hci_find_link_key(hdev, bdaddr);
2615 	if (!key)
2616 		return -ENOENT;
2617 
2618 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2619 
2620 	list_del_rcu(&key->list);
2621 	kfree_rcu(key, rcu);
2622 
2623 	return 0;
2624 }
2625 
2626 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2627 {
2628 	struct smp_ltk *k;
2629 	int removed = 0;
2630 
2631 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2632 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2633 			continue;
2634 
2635 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2636 
2637 		list_del_rcu(&k->list);
2638 		kfree_rcu(k, rcu);
2639 		removed++;
2640 	}
2641 
2642 	return removed ? 0 : -ENOENT;
2643 }
2644 
2645 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2646 {
2647 	struct smp_irk *k;
2648 
2649 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2650 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2651 			continue;
2652 
2653 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2654 
2655 		list_del_rcu(&k->list);
2656 		kfree_rcu(k, rcu);
2657 	}
2658 }
2659 
2660 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2661 {
2662 	struct smp_ltk *k;
2663 	struct smp_irk *irk;
2664 	u8 addr_type;
2665 
2666 	if (type == BDADDR_BREDR) {
2667 		if (hci_find_link_key(hdev, bdaddr))
2668 			return true;
2669 		return false;
2670 	}
2671 
2672 	/* Convert to HCI addr type which struct smp_ltk uses */
2673 	if (type == BDADDR_LE_PUBLIC)
2674 		addr_type = ADDR_LE_DEV_PUBLIC;
2675 	else
2676 		addr_type = ADDR_LE_DEV_RANDOM;
2677 
2678 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2679 	if (irk) {
2680 		bdaddr = &irk->bdaddr;
2681 		addr_type = irk->addr_type;
2682 	}
2683 
2684 	rcu_read_lock();
2685 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2686 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2687 			rcu_read_unlock();
2688 			return true;
2689 		}
2690 	}
2691 	rcu_read_unlock();
2692 
2693 	return false;
2694 }
2695 
2696 /* HCI command timer function */
2697 static void hci_cmd_timeout(struct work_struct *work)
2698 {
2699 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2700 					    cmd_timer.work);
2701 
2702 	if (hdev->sent_cmd) {
2703 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2704 		u16 opcode = __le16_to_cpu(sent->opcode);
2705 
2706 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2707 	} else {
2708 		bt_dev_err(hdev, "command tx timeout");
2709 	}
2710 
2711 	if (hdev->cmd_timeout)
2712 		hdev->cmd_timeout(hdev);
2713 
2714 	atomic_set(&hdev->cmd_cnt, 1);
2715 	queue_work(hdev->workqueue, &hdev->cmd_work);
2716 }
2717 
2718 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2719 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2720 {
2721 	struct oob_data *data;
2722 
2723 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2724 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2725 			continue;
2726 		if (data->bdaddr_type != bdaddr_type)
2727 			continue;
2728 		return data;
2729 	}
2730 
2731 	return NULL;
2732 }
2733 
2734 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2735 			       u8 bdaddr_type)
2736 {
2737 	struct oob_data *data;
2738 
2739 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2740 	if (!data)
2741 		return -ENOENT;
2742 
2743 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2744 
2745 	list_del(&data->list);
2746 	kfree(data);
2747 
2748 	return 0;
2749 }
2750 
2751 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2752 {
2753 	struct oob_data *data, *n;
2754 
2755 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2756 		list_del(&data->list);
2757 		kfree(data);
2758 	}
2759 }
2760 
2761 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2762 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2763 			    u8 *hash256, u8 *rand256)
2764 {
2765 	struct oob_data *data;
2766 
2767 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2768 	if (!data) {
2769 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2770 		if (!data)
2771 			return -ENOMEM;
2772 
2773 		bacpy(&data->bdaddr, bdaddr);
2774 		data->bdaddr_type = bdaddr_type;
2775 		list_add(&data->list, &hdev->remote_oob_data);
2776 	}
2777 
2778 	if (hash192 && rand192) {
2779 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2780 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2781 		if (hash256 && rand256)
2782 			data->present = 0x03;
2783 	} else {
2784 		memset(data->hash192, 0, sizeof(data->hash192));
2785 		memset(data->rand192, 0, sizeof(data->rand192));
2786 		if (hash256 && rand256)
2787 			data->present = 0x02;
2788 		else
2789 			data->present = 0x00;
2790 	}
2791 
2792 	if (hash256 && rand256) {
2793 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2794 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2795 	} else {
2796 		memset(data->hash256, 0, sizeof(data->hash256));
2797 		memset(data->rand256, 0, sizeof(data->rand256));
2798 		if (hash192 && rand192)
2799 			data->present = 0x01;
2800 	}
2801 
2802 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2803 
2804 	return 0;
2805 }
2806 
2807 /* This function requires the caller holds hdev->lock */
2808 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2809 {
2810 	struct adv_info *adv_instance;
2811 
2812 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2813 		if (adv_instance->instance == instance)
2814 			return adv_instance;
2815 	}
2816 
2817 	return NULL;
2818 }
2819 
2820 /* This function requires the caller holds hdev->lock */
2821 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2822 {
2823 	struct adv_info *cur_instance;
2824 
2825 	cur_instance = hci_find_adv_instance(hdev, instance);
2826 	if (!cur_instance)
2827 		return NULL;
2828 
2829 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2830 					    struct adv_info, list))
2831 		return list_first_entry(&hdev->adv_instances,
2832 						 struct adv_info, list);
2833 	else
2834 		return list_next_entry(cur_instance, list);
2835 }
2836 
2837 /* This function requires the caller holds hdev->lock */
2838 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2839 {
2840 	struct adv_info *adv_instance;
2841 
2842 	adv_instance = hci_find_adv_instance(hdev, instance);
2843 	if (!adv_instance)
2844 		return -ENOENT;
2845 
2846 	BT_DBG("%s removing %dMR", hdev->name, instance);
2847 
2848 	if (hdev->cur_adv_instance == instance) {
2849 		if (hdev->adv_instance_timeout) {
2850 			cancel_delayed_work(&hdev->adv_instance_expire);
2851 			hdev->adv_instance_timeout = 0;
2852 		}
2853 		hdev->cur_adv_instance = 0x00;
2854 	}
2855 
2856 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2857 
2858 	list_del(&adv_instance->list);
2859 	kfree(adv_instance);
2860 
2861 	hdev->adv_instance_cnt--;
2862 
2863 	return 0;
2864 }
2865 
2866 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2867 {
2868 	struct adv_info *adv_instance, *n;
2869 
2870 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2871 		adv_instance->rpa_expired = rpa_expired;
2872 }
2873 
2874 /* This function requires the caller holds hdev->lock */
2875 void hci_adv_instances_clear(struct hci_dev *hdev)
2876 {
2877 	struct adv_info *adv_instance, *n;
2878 
2879 	if (hdev->adv_instance_timeout) {
2880 		cancel_delayed_work(&hdev->adv_instance_expire);
2881 		hdev->adv_instance_timeout = 0;
2882 	}
2883 
2884 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2885 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2886 		list_del(&adv_instance->list);
2887 		kfree(adv_instance);
2888 	}
2889 
2890 	hdev->adv_instance_cnt = 0;
2891 	hdev->cur_adv_instance = 0x00;
2892 }
2893 
2894 static void adv_instance_rpa_expired(struct work_struct *work)
2895 {
2896 	struct adv_info *adv_instance = container_of(work, struct adv_info,
2897 						     rpa_expired_cb.work);
2898 
2899 	BT_DBG("");
2900 
2901 	adv_instance->rpa_expired = true;
2902 }
2903 
2904 /* This function requires the caller holds hdev->lock */
2905 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2906 			 u16 adv_data_len, u8 *adv_data,
2907 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2908 			 u16 timeout, u16 duration)
2909 {
2910 	struct adv_info *adv_instance;
2911 
2912 	adv_instance = hci_find_adv_instance(hdev, instance);
2913 	if (adv_instance) {
2914 		memset(adv_instance->adv_data, 0,
2915 		       sizeof(adv_instance->adv_data));
2916 		memset(adv_instance->scan_rsp_data, 0,
2917 		       sizeof(adv_instance->scan_rsp_data));
2918 	} else {
2919 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2920 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2921 			return -EOVERFLOW;
2922 
2923 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2924 		if (!adv_instance)
2925 			return -ENOMEM;
2926 
2927 		adv_instance->pending = true;
2928 		adv_instance->instance = instance;
2929 		list_add(&adv_instance->list, &hdev->adv_instances);
2930 		hdev->adv_instance_cnt++;
2931 	}
2932 
2933 	adv_instance->flags = flags;
2934 	adv_instance->adv_data_len = adv_data_len;
2935 	adv_instance->scan_rsp_len = scan_rsp_len;
2936 
2937 	if (adv_data_len)
2938 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2939 
2940 	if (scan_rsp_len)
2941 		memcpy(adv_instance->scan_rsp_data,
2942 		       scan_rsp_data, scan_rsp_len);
2943 
2944 	adv_instance->timeout = timeout;
2945 	adv_instance->remaining_time = timeout;
2946 
2947 	if (duration == 0)
2948 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2949 	else
2950 		adv_instance->duration = duration;
2951 
2952 	adv_instance->tx_power = HCI_TX_POWER_INVALID;
2953 
2954 	INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2955 			  adv_instance_rpa_expired);
2956 
2957 	BT_DBG("%s for %dMR", hdev->name, instance);
2958 
2959 	return 0;
2960 }
2961 
2962 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2963 					 bdaddr_t *bdaddr, u8 type)
2964 {
2965 	struct bdaddr_list *b;
2966 
2967 	list_for_each_entry(b, bdaddr_list, list) {
2968 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2969 			return b;
2970 	}
2971 
2972 	return NULL;
2973 }
2974 
2975 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2976 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2977 				u8 type)
2978 {
2979 	struct bdaddr_list_with_irk *b;
2980 
2981 	list_for_each_entry(b, bdaddr_list, list) {
2982 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2983 			return b;
2984 	}
2985 
2986 	return NULL;
2987 }
2988 
2989 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2990 {
2991 	struct bdaddr_list *b, *n;
2992 
2993 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2994 		list_del(&b->list);
2995 		kfree(b);
2996 	}
2997 }
2998 
2999 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3000 {
3001 	struct bdaddr_list *entry;
3002 
3003 	if (!bacmp(bdaddr, BDADDR_ANY))
3004 		return -EBADF;
3005 
3006 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
3007 		return -EEXIST;
3008 
3009 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3010 	if (!entry)
3011 		return -ENOMEM;
3012 
3013 	bacpy(&entry->bdaddr, bdaddr);
3014 	entry->bdaddr_type = type;
3015 
3016 	list_add(&entry->list, list);
3017 
3018 	return 0;
3019 }
3020 
3021 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3022 					u8 type, u8 *peer_irk, u8 *local_irk)
3023 {
3024 	struct bdaddr_list_with_irk *entry;
3025 
3026 	if (!bacmp(bdaddr, BDADDR_ANY))
3027 		return -EBADF;
3028 
3029 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
3030 		return -EEXIST;
3031 
3032 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3033 	if (!entry)
3034 		return -ENOMEM;
3035 
3036 	bacpy(&entry->bdaddr, bdaddr);
3037 	entry->bdaddr_type = type;
3038 
3039 	if (peer_irk)
3040 		memcpy(entry->peer_irk, peer_irk, 16);
3041 
3042 	if (local_irk)
3043 		memcpy(entry->local_irk, local_irk, 16);
3044 
3045 	list_add(&entry->list, list);
3046 
3047 	return 0;
3048 }
3049 
3050 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3051 {
3052 	struct bdaddr_list *entry;
3053 
3054 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3055 		hci_bdaddr_list_clear(list);
3056 		return 0;
3057 	}
3058 
3059 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3060 	if (!entry)
3061 		return -ENOENT;
3062 
3063 	list_del(&entry->list);
3064 	kfree(entry);
3065 
3066 	return 0;
3067 }
3068 
3069 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3070 							u8 type)
3071 {
3072 	struct bdaddr_list_with_irk *entry;
3073 
3074 	if (!bacmp(bdaddr, BDADDR_ANY)) {
3075 		hci_bdaddr_list_clear(list);
3076 		return 0;
3077 	}
3078 
3079 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3080 	if (!entry)
3081 		return -ENOENT;
3082 
3083 	list_del(&entry->list);
3084 	kfree(entry);
3085 
3086 	return 0;
3087 }
3088 
3089 /* This function requires the caller holds hdev->lock */
3090 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3091 					       bdaddr_t *addr, u8 addr_type)
3092 {
3093 	struct hci_conn_params *params;
3094 
3095 	list_for_each_entry(params, &hdev->le_conn_params, list) {
3096 		if (bacmp(&params->addr, addr) == 0 &&
3097 		    params->addr_type == addr_type) {
3098 			return params;
3099 		}
3100 	}
3101 
3102 	return NULL;
3103 }
3104 
3105 /* This function requires the caller holds hdev->lock */
3106 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3107 						  bdaddr_t *addr, u8 addr_type)
3108 {
3109 	struct hci_conn_params *param;
3110 
3111 	list_for_each_entry(param, list, action) {
3112 		if (bacmp(&param->addr, addr) == 0 &&
3113 		    param->addr_type == addr_type)
3114 			return param;
3115 	}
3116 
3117 	return NULL;
3118 }
3119 
3120 /* This function requires the caller holds hdev->lock */
3121 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3122 					    bdaddr_t *addr, u8 addr_type)
3123 {
3124 	struct hci_conn_params *params;
3125 
3126 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3127 	if (params)
3128 		return params;
3129 
3130 	params = kzalloc(sizeof(*params), GFP_KERNEL);
3131 	if (!params) {
3132 		bt_dev_err(hdev, "out of memory");
3133 		return NULL;
3134 	}
3135 
3136 	bacpy(&params->addr, addr);
3137 	params->addr_type = addr_type;
3138 
3139 	list_add(&params->list, &hdev->le_conn_params);
3140 	INIT_LIST_HEAD(&params->action);
3141 
3142 	params->conn_min_interval = hdev->le_conn_min_interval;
3143 	params->conn_max_interval = hdev->le_conn_max_interval;
3144 	params->conn_latency = hdev->le_conn_latency;
3145 	params->supervision_timeout = hdev->le_supv_timeout;
3146 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
3147 
3148 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3149 
3150 	return params;
3151 }
3152 
3153 static void hci_conn_params_free(struct hci_conn_params *params)
3154 {
3155 	if (params->conn) {
3156 		hci_conn_drop(params->conn);
3157 		hci_conn_put(params->conn);
3158 	}
3159 
3160 	list_del(&params->action);
3161 	list_del(&params->list);
3162 	kfree(params);
3163 }
3164 
3165 /* This function requires the caller holds hdev->lock */
3166 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3167 {
3168 	struct hci_conn_params *params;
3169 
3170 	params = hci_conn_params_lookup(hdev, addr, addr_type);
3171 	if (!params)
3172 		return;
3173 
3174 	hci_conn_params_free(params);
3175 
3176 	hci_update_background_scan(hdev);
3177 
3178 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
3179 }
3180 
3181 /* This function requires the caller holds hdev->lock */
3182 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3183 {
3184 	struct hci_conn_params *params, *tmp;
3185 
3186 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3187 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3188 			continue;
3189 
3190 		/* If trying to estabilish one time connection to disabled
3191 		 * device, leave the params, but mark them as just once.
3192 		 */
3193 		if (params->explicit_connect) {
3194 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3195 			continue;
3196 		}
3197 
3198 		list_del(&params->list);
3199 		kfree(params);
3200 	}
3201 
3202 	BT_DBG("All LE disabled connection parameters were removed");
3203 }
3204 
3205 /* This function requires the caller holds hdev->lock */
3206 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3207 {
3208 	struct hci_conn_params *params, *tmp;
3209 
3210 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3211 		hci_conn_params_free(params);
3212 
3213 	BT_DBG("All LE connection parameters were removed");
3214 }
3215 
3216 /* Copy the Identity Address of the controller.
3217  *
3218  * If the controller has a public BD_ADDR, then by default use that one.
3219  * If this is a LE only controller without a public address, default to
3220  * the static random address.
3221  *
3222  * For debugging purposes it is possible to force controllers with a
3223  * public address to use the static random address instead.
3224  *
3225  * In case BR/EDR has been disabled on a dual-mode controller and
3226  * userspace has configured a static address, then that address
3227  * becomes the identity address instead of the public BR/EDR address.
3228  */
3229 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3230 			       u8 *bdaddr_type)
3231 {
3232 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3233 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3234 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3235 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3236 		bacpy(bdaddr, &hdev->static_addr);
3237 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3238 	} else {
3239 		bacpy(bdaddr, &hdev->bdaddr);
3240 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3241 	}
3242 }
3243 
3244 /* Alloc HCI device */
3245 struct hci_dev *hci_alloc_dev(void)
3246 {
3247 	struct hci_dev *hdev;
3248 
3249 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3250 	if (!hdev)
3251 		return NULL;
3252 
3253 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3254 	hdev->esco_type = (ESCO_HV1);
3255 	hdev->link_mode = (HCI_LM_ACCEPT);
3256 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3257 	hdev->io_capability = 0x03;	/* No Input No Output */
3258 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3259 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3260 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3261 	hdev->adv_instance_cnt = 0;
3262 	hdev->cur_adv_instance = 0x00;
3263 	hdev->adv_instance_timeout = 0;
3264 
3265 	hdev->sniff_max_interval = 800;
3266 	hdev->sniff_min_interval = 80;
3267 
3268 	hdev->le_adv_channel_map = 0x07;
3269 	hdev->le_adv_min_interval = 0x0800;
3270 	hdev->le_adv_max_interval = 0x0800;
3271 	hdev->le_scan_interval = 0x0060;
3272 	hdev->le_scan_window = 0x0030;
3273 	hdev->le_conn_min_interval = 0x0018;
3274 	hdev->le_conn_max_interval = 0x0028;
3275 	hdev->le_conn_latency = 0x0000;
3276 	hdev->le_supv_timeout = 0x002a;
3277 	hdev->le_def_tx_len = 0x001b;
3278 	hdev->le_def_tx_time = 0x0148;
3279 	hdev->le_max_tx_len = 0x001b;
3280 	hdev->le_max_tx_time = 0x0148;
3281 	hdev->le_max_rx_len = 0x001b;
3282 	hdev->le_max_rx_time = 0x0148;
3283 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3284 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3285 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3286 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3287 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3288 
3289 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3290 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3291 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3292 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3293 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3294 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3295 
3296 	mutex_init(&hdev->lock);
3297 	mutex_init(&hdev->req_lock);
3298 
3299 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3300 	INIT_LIST_HEAD(&hdev->blacklist);
3301 	INIT_LIST_HEAD(&hdev->whitelist);
3302 	INIT_LIST_HEAD(&hdev->uuids);
3303 	INIT_LIST_HEAD(&hdev->link_keys);
3304 	INIT_LIST_HEAD(&hdev->long_term_keys);
3305 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3306 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3307 	INIT_LIST_HEAD(&hdev->le_white_list);
3308 	INIT_LIST_HEAD(&hdev->le_resolv_list);
3309 	INIT_LIST_HEAD(&hdev->le_conn_params);
3310 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3311 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3312 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3313 	INIT_LIST_HEAD(&hdev->adv_instances);
3314 	INIT_LIST_HEAD(&hdev->blocked_keys);
3315 
3316 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3317 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3318 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3319 	INIT_WORK(&hdev->power_on, hci_power_on);
3320 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3321 
3322 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3323 
3324 	skb_queue_head_init(&hdev->rx_q);
3325 	skb_queue_head_init(&hdev->cmd_q);
3326 	skb_queue_head_init(&hdev->raw_q);
3327 
3328 	init_waitqueue_head(&hdev->req_wait_q);
3329 
3330 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3331 
3332 	hci_request_setup(hdev);
3333 
3334 	hci_init_sysfs(hdev);
3335 	discovery_init(hdev);
3336 
3337 	return hdev;
3338 }
3339 EXPORT_SYMBOL(hci_alloc_dev);
3340 
3341 /* Free HCI device */
3342 void hci_free_dev(struct hci_dev *hdev)
3343 {
3344 	/* will free via device release */
3345 	put_device(&hdev->dev);
3346 }
3347 EXPORT_SYMBOL(hci_free_dev);
3348 
3349 /* Register HCI device */
3350 int hci_register_dev(struct hci_dev *hdev)
3351 {
3352 	int id, error;
3353 
3354 	if (!hdev->open || !hdev->close || !hdev->send)
3355 		return -EINVAL;
3356 
3357 	/* Do not allow HCI_AMP devices to register at index 0,
3358 	 * so the index can be used as the AMP controller ID.
3359 	 */
3360 	switch (hdev->dev_type) {
3361 	case HCI_PRIMARY:
3362 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3363 		break;
3364 	case HCI_AMP:
3365 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3366 		break;
3367 	default:
3368 		return -EINVAL;
3369 	}
3370 
3371 	if (id < 0)
3372 		return id;
3373 
3374 	sprintf(hdev->name, "hci%d", id);
3375 	hdev->id = id;
3376 
3377 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3378 
3379 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3380 	if (!hdev->workqueue) {
3381 		error = -ENOMEM;
3382 		goto err;
3383 	}
3384 
3385 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3386 						      hdev->name);
3387 	if (!hdev->req_workqueue) {
3388 		destroy_workqueue(hdev->workqueue);
3389 		error = -ENOMEM;
3390 		goto err;
3391 	}
3392 
3393 	if (!IS_ERR_OR_NULL(bt_debugfs))
3394 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3395 
3396 	dev_set_name(&hdev->dev, "%s", hdev->name);
3397 
3398 	error = device_add(&hdev->dev);
3399 	if (error < 0)
3400 		goto err_wqueue;
3401 
3402 	hci_leds_init(hdev);
3403 
3404 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3405 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3406 				    hdev);
3407 	if (hdev->rfkill) {
3408 		if (rfkill_register(hdev->rfkill) < 0) {
3409 			rfkill_destroy(hdev->rfkill);
3410 			hdev->rfkill = NULL;
3411 		}
3412 	}
3413 
3414 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3415 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3416 
3417 	hci_dev_set_flag(hdev, HCI_SETUP);
3418 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3419 
3420 	if (hdev->dev_type == HCI_PRIMARY) {
3421 		/* Assume BR/EDR support until proven otherwise (such as
3422 		 * through reading supported features during init.
3423 		 */
3424 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3425 	}
3426 
3427 	write_lock(&hci_dev_list_lock);
3428 	list_add(&hdev->list, &hci_dev_list);
3429 	write_unlock(&hci_dev_list_lock);
3430 
3431 	/* Devices that are marked for raw-only usage are unconfigured
3432 	 * and should not be included in normal operation.
3433 	 */
3434 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3435 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3436 
3437 	hci_sock_dev_event(hdev, HCI_DEV_REG);
3438 	hci_dev_hold(hdev);
3439 
3440 	queue_work(hdev->req_workqueue, &hdev->power_on);
3441 
3442 	return id;
3443 
3444 err_wqueue:
3445 	destroy_workqueue(hdev->workqueue);
3446 	destroy_workqueue(hdev->req_workqueue);
3447 err:
3448 	ida_simple_remove(&hci_index_ida, hdev->id);
3449 
3450 	return error;
3451 }
3452 EXPORT_SYMBOL(hci_register_dev);
3453 
3454 /* Unregister HCI device */
3455 void hci_unregister_dev(struct hci_dev *hdev)
3456 {
3457 	int id;
3458 
3459 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3460 
3461 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3462 
3463 	id = hdev->id;
3464 
3465 	write_lock(&hci_dev_list_lock);
3466 	list_del(&hdev->list);
3467 	write_unlock(&hci_dev_list_lock);
3468 
3469 	cancel_work_sync(&hdev->power_on);
3470 
3471 	hci_dev_do_close(hdev);
3472 
3473 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3474 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3475 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3476 		hci_dev_lock(hdev);
3477 		mgmt_index_removed(hdev);
3478 		hci_dev_unlock(hdev);
3479 	}
3480 
3481 	/* mgmt_index_removed should take care of emptying the
3482 	 * pending list */
3483 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3484 
3485 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3486 
3487 	if (hdev->rfkill) {
3488 		rfkill_unregister(hdev->rfkill);
3489 		rfkill_destroy(hdev->rfkill);
3490 	}
3491 
3492 	device_del(&hdev->dev);
3493 
3494 	debugfs_remove_recursive(hdev->debugfs);
3495 	kfree_const(hdev->hw_info);
3496 	kfree_const(hdev->fw_info);
3497 
3498 	destroy_workqueue(hdev->workqueue);
3499 	destroy_workqueue(hdev->req_workqueue);
3500 
3501 	hci_dev_lock(hdev);
3502 	hci_bdaddr_list_clear(&hdev->blacklist);
3503 	hci_bdaddr_list_clear(&hdev->whitelist);
3504 	hci_uuids_clear(hdev);
3505 	hci_link_keys_clear(hdev);
3506 	hci_smp_ltks_clear(hdev);
3507 	hci_smp_irks_clear(hdev);
3508 	hci_remote_oob_data_clear(hdev);
3509 	hci_adv_instances_clear(hdev);
3510 	hci_bdaddr_list_clear(&hdev->le_white_list);
3511 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
3512 	hci_conn_params_clear_all(hdev);
3513 	hci_discovery_filter_clear(hdev);
3514 	hci_blocked_keys_clear(hdev);
3515 	hci_dev_unlock(hdev);
3516 
3517 	hci_dev_put(hdev);
3518 
3519 	ida_simple_remove(&hci_index_ida, id);
3520 }
3521 EXPORT_SYMBOL(hci_unregister_dev);
3522 
3523 /* Suspend HCI device */
3524 int hci_suspend_dev(struct hci_dev *hdev)
3525 {
3526 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3527 	return 0;
3528 }
3529 EXPORT_SYMBOL(hci_suspend_dev);
3530 
3531 /* Resume HCI device */
3532 int hci_resume_dev(struct hci_dev *hdev)
3533 {
3534 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3535 	return 0;
3536 }
3537 EXPORT_SYMBOL(hci_resume_dev);
3538 
3539 /* Reset HCI device */
3540 int hci_reset_dev(struct hci_dev *hdev)
3541 {
3542 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3543 	struct sk_buff *skb;
3544 
3545 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3546 	if (!skb)
3547 		return -ENOMEM;
3548 
3549 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3550 	skb_put_data(skb, hw_err, 3);
3551 
3552 	/* Send Hardware Error to upper stack */
3553 	return hci_recv_frame(hdev, skb);
3554 }
3555 EXPORT_SYMBOL(hci_reset_dev);
3556 
3557 /* Receive frame from HCI drivers */
3558 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3559 {
3560 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3561 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3562 		kfree_skb(skb);
3563 		return -ENXIO;
3564 	}
3565 
3566 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3567 	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3568 	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3569 	    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3570 		kfree_skb(skb);
3571 		return -EINVAL;
3572 	}
3573 
3574 	/* Incoming skb */
3575 	bt_cb(skb)->incoming = 1;
3576 
3577 	/* Time stamp */
3578 	__net_timestamp(skb);
3579 
3580 	skb_queue_tail(&hdev->rx_q, skb);
3581 	queue_work(hdev->workqueue, &hdev->rx_work);
3582 
3583 	return 0;
3584 }
3585 EXPORT_SYMBOL(hci_recv_frame);
3586 
3587 /* Receive diagnostic message from HCI drivers */
3588 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3589 {
3590 	/* Mark as diagnostic packet */
3591 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3592 
3593 	/* Time stamp */
3594 	__net_timestamp(skb);
3595 
3596 	skb_queue_tail(&hdev->rx_q, skb);
3597 	queue_work(hdev->workqueue, &hdev->rx_work);
3598 
3599 	return 0;
3600 }
3601 EXPORT_SYMBOL(hci_recv_diag);
3602 
3603 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3604 {
3605 	va_list vargs;
3606 
3607 	va_start(vargs, fmt);
3608 	kfree_const(hdev->hw_info);
3609 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3610 	va_end(vargs);
3611 }
3612 EXPORT_SYMBOL(hci_set_hw_info);
3613 
3614 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3615 {
3616 	va_list vargs;
3617 
3618 	va_start(vargs, fmt);
3619 	kfree_const(hdev->fw_info);
3620 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3621 	va_end(vargs);
3622 }
3623 EXPORT_SYMBOL(hci_set_fw_info);
3624 
3625 /* ---- Interface to upper protocols ---- */
3626 
3627 int hci_register_cb(struct hci_cb *cb)
3628 {
3629 	BT_DBG("%p name %s", cb, cb->name);
3630 
3631 	mutex_lock(&hci_cb_list_lock);
3632 	list_add_tail(&cb->list, &hci_cb_list);
3633 	mutex_unlock(&hci_cb_list_lock);
3634 
3635 	return 0;
3636 }
3637 EXPORT_SYMBOL(hci_register_cb);
3638 
3639 int hci_unregister_cb(struct hci_cb *cb)
3640 {
3641 	BT_DBG("%p name %s", cb, cb->name);
3642 
3643 	mutex_lock(&hci_cb_list_lock);
3644 	list_del(&cb->list);
3645 	mutex_unlock(&hci_cb_list_lock);
3646 
3647 	return 0;
3648 }
3649 EXPORT_SYMBOL(hci_unregister_cb);
3650 
3651 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3652 {
3653 	int err;
3654 
3655 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3656 	       skb->len);
3657 
3658 	/* Time stamp */
3659 	__net_timestamp(skb);
3660 
3661 	/* Send copy to monitor */
3662 	hci_send_to_monitor(hdev, skb);
3663 
3664 	if (atomic_read(&hdev->promisc)) {
3665 		/* Send copy to the sockets */
3666 		hci_send_to_sock(hdev, skb);
3667 	}
3668 
3669 	/* Get rid of skb owner, prior to sending to the driver. */
3670 	skb_orphan(skb);
3671 
3672 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3673 		kfree_skb(skb);
3674 		return;
3675 	}
3676 
3677 	err = hdev->send(hdev, skb);
3678 	if (err < 0) {
3679 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3680 		kfree_skb(skb);
3681 	}
3682 }
3683 
3684 /* Send HCI command */
3685 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3686 		 const void *param)
3687 {
3688 	struct sk_buff *skb;
3689 
3690 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3691 
3692 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3693 	if (!skb) {
3694 		bt_dev_err(hdev, "no memory for command");
3695 		return -ENOMEM;
3696 	}
3697 
3698 	/* Stand-alone HCI commands must be flagged as
3699 	 * single-command requests.
3700 	 */
3701 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3702 
3703 	skb_queue_tail(&hdev->cmd_q, skb);
3704 	queue_work(hdev->workqueue, &hdev->cmd_work);
3705 
3706 	return 0;
3707 }
3708 
3709 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3710 		   const void *param)
3711 {
3712 	struct sk_buff *skb;
3713 
3714 	if (hci_opcode_ogf(opcode) != 0x3f) {
3715 		/* A controller receiving a command shall respond with either
3716 		 * a Command Status Event or a Command Complete Event.
3717 		 * Therefore, all standard HCI commands must be sent via the
3718 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3719 		 * Some vendors do not comply with this rule for vendor-specific
3720 		 * commands and do not return any event. We want to support
3721 		 * unresponded commands for such cases only.
3722 		 */
3723 		bt_dev_err(hdev, "unresponded command not supported");
3724 		return -EINVAL;
3725 	}
3726 
3727 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3728 	if (!skb) {
3729 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3730 			   opcode);
3731 		return -ENOMEM;
3732 	}
3733 
3734 	hci_send_frame(hdev, skb);
3735 
3736 	return 0;
3737 }
3738 EXPORT_SYMBOL(__hci_cmd_send);
3739 
3740 /* Get data from the previously sent command */
3741 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3742 {
3743 	struct hci_command_hdr *hdr;
3744 
3745 	if (!hdev->sent_cmd)
3746 		return NULL;
3747 
3748 	hdr = (void *) hdev->sent_cmd->data;
3749 
3750 	if (hdr->opcode != cpu_to_le16(opcode))
3751 		return NULL;
3752 
3753 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3754 
3755 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3756 }
3757 
3758 /* Send HCI command and wait for command commplete event */
3759 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3760 			     const void *param, u32 timeout)
3761 {
3762 	struct sk_buff *skb;
3763 
3764 	if (!test_bit(HCI_UP, &hdev->flags))
3765 		return ERR_PTR(-ENETDOWN);
3766 
3767 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3768 
3769 	hci_req_sync_lock(hdev);
3770 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3771 	hci_req_sync_unlock(hdev);
3772 
3773 	return skb;
3774 }
3775 EXPORT_SYMBOL(hci_cmd_sync);
3776 
3777 /* Send ACL data */
3778 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3779 {
3780 	struct hci_acl_hdr *hdr;
3781 	int len = skb->len;
3782 
3783 	skb_push(skb, HCI_ACL_HDR_SIZE);
3784 	skb_reset_transport_header(skb);
3785 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3786 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3787 	hdr->dlen   = cpu_to_le16(len);
3788 }
3789 
3790 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3791 			  struct sk_buff *skb, __u16 flags)
3792 {
3793 	struct hci_conn *conn = chan->conn;
3794 	struct hci_dev *hdev = conn->hdev;
3795 	struct sk_buff *list;
3796 
3797 	skb->len = skb_headlen(skb);
3798 	skb->data_len = 0;
3799 
3800 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3801 
3802 	switch (hdev->dev_type) {
3803 	case HCI_PRIMARY:
3804 		hci_add_acl_hdr(skb, conn->handle, flags);
3805 		break;
3806 	case HCI_AMP:
3807 		hci_add_acl_hdr(skb, chan->handle, flags);
3808 		break;
3809 	default:
3810 		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3811 		return;
3812 	}
3813 
3814 	list = skb_shinfo(skb)->frag_list;
3815 	if (!list) {
3816 		/* Non fragmented */
3817 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3818 
3819 		skb_queue_tail(queue, skb);
3820 	} else {
3821 		/* Fragmented */
3822 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3823 
3824 		skb_shinfo(skb)->frag_list = NULL;
3825 
3826 		/* Queue all fragments atomically. We need to use spin_lock_bh
3827 		 * here because of 6LoWPAN links, as there this function is
3828 		 * called from softirq and using normal spin lock could cause
3829 		 * deadlocks.
3830 		 */
3831 		spin_lock_bh(&queue->lock);
3832 
3833 		__skb_queue_tail(queue, skb);
3834 
3835 		flags &= ~ACL_START;
3836 		flags |= ACL_CONT;
3837 		do {
3838 			skb = list; list = list->next;
3839 
3840 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3841 			hci_add_acl_hdr(skb, conn->handle, flags);
3842 
3843 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3844 
3845 			__skb_queue_tail(queue, skb);
3846 		} while (list);
3847 
3848 		spin_unlock_bh(&queue->lock);
3849 	}
3850 }
3851 
3852 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3853 {
3854 	struct hci_dev *hdev = chan->conn->hdev;
3855 
3856 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3857 
3858 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3859 
3860 	queue_work(hdev->workqueue, &hdev->tx_work);
3861 }
3862 
3863 /* Send SCO data */
3864 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3865 {
3866 	struct hci_dev *hdev = conn->hdev;
3867 	struct hci_sco_hdr hdr;
3868 
3869 	BT_DBG("%s len %d", hdev->name, skb->len);
3870 
3871 	hdr.handle = cpu_to_le16(conn->handle);
3872 	hdr.dlen   = skb->len;
3873 
3874 	skb_push(skb, HCI_SCO_HDR_SIZE);
3875 	skb_reset_transport_header(skb);
3876 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3877 
3878 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3879 
3880 	skb_queue_tail(&conn->data_q, skb);
3881 	queue_work(hdev->workqueue, &hdev->tx_work);
3882 }
3883 
3884 /* ---- HCI TX task (outgoing data) ---- */
3885 
3886 /* HCI Connection scheduler */
3887 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3888 				     int *quote)
3889 {
3890 	struct hci_conn_hash *h = &hdev->conn_hash;
3891 	struct hci_conn *conn = NULL, *c;
3892 	unsigned int num = 0, min = ~0;
3893 
3894 	/* We don't have to lock device here. Connections are always
3895 	 * added and removed with TX task disabled. */
3896 
3897 	rcu_read_lock();
3898 
3899 	list_for_each_entry_rcu(c, &h->list, list) {
3900 		if (c->type != type || skb_queue_empty(&c->data_q))
3901 			continue;
3902 
3903 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3904 			continue;
3905 
3906 		num++;
3907 
3908 		if (c->sent < min) {
3909 			min  = c->sent;
3910 			conn = c;
3911 		}
3912 
3913 		if (hci_conn_num(hdev, type) == num)
3914 			break;
3915 	}
3916 
3917 	rcu_read_unlock();
3918 
3919 	if (conn) {
3920 		int cnt, q;
3921 
3922 		switch (conn->type) {
3923 		case ACL_LINK:
3924 			cnt = hdev->acl_cnt;
3925 			break;
3926 		case SCO_LINK:
3927 		case ESCO_LINK:
3928 			cnt = hdev->sco_cnt;
3929 			break;
3930 		case LE_LINK:
3931 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3932 			break;
3933 		default:
3934 			cnt = 0;
3935 			bt_dev_err(hdev, "unknown link type %d", conn->type);
3936 		}
3937 
3938 		q = cnt / num;
3939 		*quote = q ? q : 1;
3940 	} else
3941 		*quote = 0;
3942 
3943 	BT_DBG("conn %p quote %d", conn, *quote);
3944 	return conn;
3945 }
3946 
3947 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3948 {
3949 	struct hci_conn_hash *h = &hdev->conn_hash;
3950 	struct hci_conn *c;
3951 
3952 	bt_dev_err(hdev, "link tx timeout");
3953 
3954 	rcu_read_lock();
3955 
3956 	/* Kill stalled connections */
3957 	list_for_each_entry_rcu(c, &h->list, list) {
3958 		if (c->type == type && c->sent) {
3959 			bt_dev_err(hdev, "killing stalled connection %pMR",
3960 				   &c->dst);
3961 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3962 		}
3963 	}
3964 
3965 	rcu_read_unlock();
3966 }
3967 
3968 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3969 				      int *quote)
3970 {
3971 	struct hci_conn_hash *h = &hdev->conn_hash;
3972 	struct hci_chan *chan = NULL;
3973 	unsigned int num = 0, min = ~0, cur_prio = 0;
3974 	struct hci_conn *conn;
3975 	int cnt, q, conn_num = 0;
3976 
3977 	BT_DBG("%s", hdev->name);
3978 
3979 	rcu_read_lock();
3980 
3981 	list_for_each_entry_rcu(conn, &h->list, list) {
3982 		struct hci_chan *tmp;
3983 
3984 		if (conn->type != type)
3985 			continue;
3986 
3987 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3988 			continue;
3989 
3990 		conn_num++;
3991 
3992 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3993 			struct sk_buff *skb;
3994 
3995 			if (skb_queue_empty(&tmp->data_q))
3996 				continue;
3997 
3998 			skb = skb_peek(&tmp->data_q);
3999 			if (skb->priority < cur_prio)
4000 				continue;
4001 
4002 			if (skb->priority > cur_prio) {
4003 				num = 0;
4004 				min = ~0;
4005 				cur_prio = skb->priority;
4006 			}
4007 
4008 			num++;
4009 
4010 			if (conn->sent < min) {
4011 				min  = conn->sent;
4012 				chan = tmp;
4013 			}
4014 		}
4015 
4016 		if (hci_conn_num(hdev, type) == conn_num)
4017 			break;
4018 	}
4019 
4020 	rcu_read_unlock();
4021 
4022 	if (!chan)
4023 		return NULL;
4024 
4025 	switch (chan->conn->type) {
4026 	case ACL_LINK:
4027 		cnt = hdev->acl_cnt;
4028 		break;
4029 	case AMP_LINK:
4030 		cnt = hdev->block_cnt;
4031 		break;
4032 	case SCO_LINK:
4033 	case ESCO_LINK:
4034 		cnt = hdev->sco_cnt;
4035 		break;
4036 	case LE_LINK:
4037 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4038 		break;
4039 	default:
4040 		cnt = 0;
4041 		bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4042 	}
4043 
4044 	q = cnt / num;
4045 	*quote = q ? q : 1;
4046 	BT_DBG("chan %p quote %d", chan, *quote);
4047 	return chan;
4048 }
4049 
4050 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4051 {
4052 	struct hci_conn_hash *h = &hdev->conn_hash;
4053 	struct hci_conn *conn;
4054 	int num = 0;
4055 
4056 	BT_DBG("%s", hdev->name);
4057 
4058 	rcu_read_lock();
4059 
4060 	list_for_each_entry_rcu(conn, &h->list, list) {
4061 		struct hci_chan *chan;
4062 
4063 		if (conn->type != type)
4064 			continue;
4065 
4066 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4067 			continue;
4068 
4069 		num++;
4070 
4071 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4072 			struct sk_buff *skb;
4073 
4074 			if (chan->sent) {
4075 				chan->sent = 0;
4076 				continue;
4077 			}
4078 
4079 			if (skb_queue_empty(&chan->data_q))
4080 				continue;
4081 
4082 			skb = skb_peek(&chan->data_q);
4083 			if (skb->priority >= HCI_PRIO_MAX - 1)
4084 				continue;
4085 
4086 			skb->priority = HCI_PRIO_MAX - 1;
4087 
4088 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4089 			       skb->priority);
4090 		}
4091 
4092 		if (hci_conn_num(hdev, type) == num)
4093 			break;
4094 	}
4095 
4096 	rcu_read_unlock();
4097 
4098 }
4099 
4100 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4101 {
4102 	/* Calculate count of blocks used by this packet */
4103 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4104 }
4105 
4106 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4107 {
4108 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4109 		/* ACL tx timeout must be longer than maximum
4110 		 * link supervision timeout (40.9 seconds) */
4111 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4112 				       HCI_ACL_TX_TIMEOUT))
4113 			hci_link_tx_to(hdev, ACL_LINK);
4114 	}
4115 }
4116 
4117 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4118 {
4119 	unsigned int cnt = hdev->acl_cnt;
4120 	struct hci_chan *chan;
4121 	struct sk_buff *skb;
4122 	int quote;
4123 
4124 	__check_timeout(hdev, cnt);
4125 
4126 	while (hdev->acl_cnt &&
4127 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4128 		u32 priority = (skb_peek(&chan->data_q))->priority;
4129 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4130 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4131 			       skb->len, skb->priority);
4132 
4133 			/* Stop if priority has changed */
4134 			if (skb->priority < priority)
4135 				break;
4136 
4137 			skb = skb_dequeue(&chan->data_q);
4138 
4139 			hci_conn_enter_active_mode(chan->conn,
4140 						   bt_cb(skb)->force_active);
4141 
4142 			hci_send_frame(hdev, skb);
4143 			hdev->acl_last_tx = jiffies;
4144 
4145 			hdev->acl_cnt--;
4146 			chan->sent++;
4147 			chan->conn->sent++;
4148 		}
4149 	}
4150 
4151 	if (cnt != hdev->acl_cnt)
4152 		hci_prio_recalculate(hdev, ACL_LINK);
4153 }
4154 
4155 static void hci_sched_acl_blk(struct hci_dev *hdev)
4156 {
4157 	unsigned int cnt = hdev->block_cnt;
4158 	struct hci_chan *chan;
4159 	struct sk_buff *skb;
4160 	int quote;
4161 	u8 type;
4162 
4163 	__check_timeout(hdev, cnt);
4164 
4165 	BT_DBG("%s", hdev->name);
4166 
4167 	if (hdev->dev_type == HCI_AMP)
4168 		type = AMP_LINK;
4169 	else
4170 		type = ACL_LINK;
4171 
4172 	while (hdev->block_cnt > 0 &&
4173 	       (chan = hci_chan_sent(hdev, type, &quote))) {
4174 		u32 priority = (skb_peek(&chan->data_q))->priority;
4175 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4176 			int blocks;
4177 
4178 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4179 			       skb->len, skb->priority);
4180 
4181 			/* Stop if priority has changed */
4182 			if (skb->priority < priority)
4183 				break;
4184 
4185 			skb = skb_dequeue(&chan->data_q);
4186 
4187 			blocks = __get_blocks(hdev, skb);
4188 			if (blocks > hdev->block_cnt)
4189 				return;
4190 
4191 			hci_conn_enter_active_mode(chan->conn,
4192 						   bt_cb(skb)->force_active);
4193 
4194 			hci_send_frame(hdev, skb);
4195 			hdev->acl_last_tx = jiffies;
4196 
4197 			hdev->block_cnt -= blocks;
4198 			quote -= blocks;
4199 
4200 			chan->sent += blocks;
4201 			chan->conn->sent += blocks;
4202 		}
4203 	}
4204 
4205 	if (cnt != hdev->block_cnt)
4206 		hci_prio_recalculate(hdev, type);
4207 }
4208 
4209 static void hci_sched_acl(struct hci_dev *hdev)
4210 {
4211 	BT_DBG("%s", hdev->name);
4212 
4213 	/* No ACL link over BR/EDR controller */
4214 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4215 		return;
4216 
4217 	/* No AMP link over AMP controller */
4218 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4219 		return;
4220 
4221 	switch (hdev->flow_ctl_mode) {
4222 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4223 		hci_sched_acl_pkt(hdev);
4224 		break;
4225 
4226 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4227 		hci_sched_acl_blk(hdev);
4228 		break;
4229 	}
4230 }
4231 
4232 /* Schedule SCO */
4233 static void hci_sched_sco(struct hci_dev *hdev)
4234 {
4235 	struct hci_conn *conn;
4236 	struct sk_buff *skb;
4237 	int quote;
4238 
4239 	BT_DBG("%s", hdev->name);
4240 
4241 	if (!hci_conn_num(hdev, SCO_LINK))
4242 		return;
4243 
4244 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4245 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4246 			BT_DBG("skb %p len %d", skb, skb->len);
4247 			hci_send_frame(hdev, skb);
4248 
4249 			conn->sent++;
4250 			if (conn->sent == ~0)
4251 				conn->sent = 0;
4252 		}
4253 	}
4254 }
4255 
4256 static void hci_sched_esco(struct hci_dev *hdev)
4257 {
4258 	struct hci_conn *conn;
4259 	struct sk_buff *skb;
4260 	int quote;
4261 
4262 	BT_DBG("%s", hdev->name);
4263 
4264 	if (!hci_conn_num(hdev, ESCO_LINK))
4265 		return;
4266 
4267 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4268 						     &quote))) {
4269 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4270 			BT_DBG("skb %p len %d", skb, skb->len);
4271 			hci_send_frame(hdev, skb);
4272 
4273 			conn->sent++;
4274 			if (conn->sent == ~0)
4275 				conn->sent = 0;
4276 		}
4277 	}
4278 }
4279 
4280 static void hci_sched_le(struct hci_dev *hdev)
4281 {
4282 	struct hci_chan *chan;
4283 	struct sk_buff *skb;
4284 	int quote, cnt, tmp;
4285 
4286 	BT_DBG("%s", hdev->name);
4287 
4288 	if (!hci_conn_num(hdev, LE_LINK))
4289 		return;
4290 
4291 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4292 
4293 	__check_timeout(hdev, cnt);
4294 
4295 	tmp = cnt;
4296 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4297 		u32 priority = (skb_peek(&chan->data_q))->priority;
4298 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4299 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4300 			       skb->len, skb->priority);
4301 
4302 			/* Stop if priority has changed */
4303 			if (skb->priority < priority)
4304 				break;
4305 
4306 			skb = skb_dequeue(&chan->data_q);
4307 
4308 			hci_send_frame(hdev, skb);
4309 			hdev->le_last_tx = jiffies;
4310 
4311 			cnt--;
4312 			chan->sent++;
4313 			chan->conn->sent++;
4314 		}
4315 	}
4316 
4317 	if (hdev->le_pkts)
4318 		hdev->le_cnt = cnt;
4319 	else
4320 		hdev->acl_cnt = cnt;
4321 
4322 	if (cnt != tmp)
4323 		hci_prio_recalculate(hdev, LE_LINK);
4324 }
4325 
4326 static void hci_tx_work(struct work_struct *work)
4327 {
4328 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4329 	struct sk_buff *skb;
4330 
4331 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4332 	       hdev->sco_cnt, hdev->le_cnt);
4333 
4334 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4335 		/* Schedule queues and send stuff to HCI driver */
4336 		hci_sched_acl(hdev);
4337 		hci_sched_sco(hdev);
4338 		hci_sched_esco(hdev);
4339 		hci_sched_le(hdev);
4340 	}
4341 
4342 	/* Send next queued raw (unknown type) packet */
4343 	while ((skb = skb_dequeue(&hdev->raw_q)))
4344 		hci_send_frame(hdev, skb);
4345 }
4346 
4347 /* ----- HCI RX task (incoming data processing) ----- */
4348 
4349 /* ACL data packet */
4350 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4351 {
4352 	struct hci_acl_hdr *hdr = (void *) skb->data;
4353 	struct hci_conn *conn;
4354 	__u16 handle, flags;
4355 
4356 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4357 
4358 	handle = __le16_to_cpu(hdr->handle);
4359 	flags  = hci_flags(handle);
4360 	handle = hci_handle(handle);
4361 
4362 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4363 	       handle, flags);
4364 
4365 	hdev->stat.acl_rx++;
4366 
4367 	hci_dev_lock(hdev);
4368 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4369 	hci_dev_unlock(hdev);
4370 
4371 	if (conn) {
4372 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4373 
4374 		/* Send to upper protocol */
4375 		l2cap_recv_acldata(conn, skb, flags);
4376 		return;
4377 	} else {
4378 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4379 			   handle);
4380 	}
4381 
4382 	kfree_skb(skb);
4383 }
4384 
4385 /* SCO data packet */
4386 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4387 {
4388 	struct hci_sco_hdr *hdr = (void *) skb->data;
4389 	struct hci_conn *conn;
4390 	__u16 handle;
4391 
4392 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4393 
4394 	handle = __le16_to_cpu(hdr->handle);
4395 
4396 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4397 
4398 	hdev->stat.sco_rx++;
4399 
4400 	hci_dev_lock(hdev);
4401 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4402 	hci_dev_unlock(hdev);
4403 
4404 	if (conn) {
4405 		/* Send to upper protocol */
4406 		sco_recv_scodata(conn, skb);
4407 		return;
4408 	} else {
4409 		bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4410 			   handle);
4411 	}
4412 
4413 	kfree_skb(skb);
4414 }
4415 
4416 static bool hci_req_is_complete(struct hci_dev *hdev)
4417 {
4418 	struct sk_buff *skb;
4419 
4420 	skb = skb_peek(&hdev->cmd_q);
4421 	if (!skb)
4422 		return true;
4423 
4424 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4425 }
4426 
4427 static void hci_resend_last(struct hci_dev *hdev)
4428 {
4429 	struct hci_command_hdr *sent;
4430 	struct sk_buff *skb;
4431 	u16 opcode;
4432 
4433 	if (!hdev->sent_cmd)
4434 		return;
4435 
4436 	sent = (void *) hdev->sent_cmd->data;
4437 	opcode = __le16_to_cpu(sent->opcode);
4438 	if (opcode == HCI_OP_RESET)
4439 		return;
4440 
4441 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4442 	if (!skb)
4443 		return;
4444 
4445 	skb_queue_head(&hdev->cmd_q, skb);
4446 	queue_work(hdev->workqueue, &hdev->cmd_work);
4447 }
4448 
4449 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4450 			  hci_req_complete_t *req_complete,
4451 			  hci_req_complete_skb_t *req_complete_skb)
4452 {
4453 	struct sk_buff *skb;
4454 	unsigned long flags;
4455 
4456 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4457 
4458 	/* If the completed command doesn't match the last one that was
4459 	 * sent we need to do special handling of it.
4460 	 */
4461 	if (!hci_sent_cmd_data(hdev, opcode)) {
4462 		/* Some CSR based controllers generate a spontaneous
4463 		 * reset complete event during init and any pending
4464 		 * command will never be completed. In such a case we
4465 		 * need to resend whatever was the last sent
4466 		 * command.
4467 		 */
4468 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4469 			hci_resend_last(hdev);
4470 
4471 		return;
4472 	}
4473 
4474 	/* If we reach this point this event matches the last command sent */
4475 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4476 
4477 	/* If the command succeeded and there's still more commands in
4478 	 * this request the request is not yet complete.
4479 	 */
4480 	if (!status && !hci_req_is_complete(hdev))
4481 		return;
4482 
4483 	/* If this was the last command in a request the complete
4484 	 * callback would be found in hdev->sent_cmd instead of the
4485 	 * command queue (hdev->cmd_q).
4486 	 */
4487 	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4488 		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4489 		return;
4490 	}
4491 
4492 	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4493 		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4494 		return;
4495 	}
4496 
4497 	/* Remove all pending commands belonging to this request */
4498 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4499 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4500 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4501 			__skb_queue_head(&hdev->cmd_q, skb);
4502 			break;
4503 		}
4504 
4505 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4506 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4507 		else
4508 			*req_complete = bt_cb(skb)->hci.req_complete;
4509 		kfree_skb(skb);
4510 	}
4511 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4512 }
4513 
4514 static void hci_rx_work(struct work_struct *work)
4515 {
4516 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4517 	struct sk_buff *skb;
4518 
4519 	BT_DBG("%s", hdev->name);
4520 
4521 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4522 		/* Send copy to monitor */
4523 		hci_send_to_monitor(hdev, skb);
4524 
4525 		if (atomic_read(&hdev->promisc)) {
4526 			/* Send copy to the sockets */
4527 			hci_send_to_sock(hdev, skb);
4528 		}
4529 
4530 		/* If the device has been opened in HCI_USER_CHANNEL,
4531 		 * the userspace has exclusive access to device.
4532 		 * When device is HCI_INIT, we still need to process
4533 		 * the data packets to the driver in order
4534 		 * to complete its setup().
4535 		 */
4536 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4537 		    !test_bit(HCI_INIT, &hdev->flags)) {
4538 			kfree_skb(skb);
4539 			continue;
4540 		}
4541 
4542 		if (test_bit(HCI_INIT, &hdev->flags)) {
4543 			/* Don't process data packets in this states. */
4544 			switch (hci_skb_pkt_type(skb)) {
4545 			case HCI_ACLDATA_PKT:
4546 			case HCI_SCODATA_PKT:
4547 			case HCI_ISODATA_PKT:
4548 				kfree_skb(skb);
4549 				continue;
4550 			}
4551 		}
4552 
4553 		/* Process frame */
4554 		switch (hci_skb_pkt_type(skb)) {
4555 		case HCI_EVENT_PKT:
4556 			BT_DBG("%s Event packet", hdev->name);
4557 			hci_event_packet(hdev, skb);
4558 			break;
4559 
4560 		case HCI_ACLDATA_PKT:
4561 			BT_DBG("%s ACL data packet", hdev->name);
4562 			hci_acldata_packet(hdev, skb);
4563 			break;
4564 
4565 		case HCI_SCODATA_PKT:
4566 			BT_DBG("%s SCO data packet", hdev->name);
4567 			hci_scodata_packet(hdev, skb);
4568 			break;
4569 
4570 		default:
4571 			kfree_skb(skb);
4572 			break;
4573 		}
4574 	}
4575 }
4576 
4577 static void hci_cmd_work(struct work_struct *work)
4578 {
4579 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4580 	struct sk_buff *skb;
4581 
4582 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4583 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4584 
4585 	/* Send queued commands */
4586 	if (atomic_read(&hdev->cmd_cnt)) {
4587 		skb = skb_dequeue(&hdev->cmd_q);
4588 		if (!skb)
4589 			return;
4590 
4591 		kfree_skb(hdev->sent_cmd);
4592 
4593 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4594 		if (hdev->sent_cmd) {
4595 			if (hci_req_status_pend(hdev))
4596 				hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4597 			atomic_dec(&hdev->cmd_cnt);
4598 			hci_send_frame(hdev, skb);
4599 			if (test_bit(HCI_RESET, &hdev->flags))
4600 				cancel_delayed_work(&hdev->cmd_timer);
4601 			else
4602 				schedule_delayed_work(&hdev->cmd_timer,
4603 						      HCI_CMD_TIMEOUT);
4604 		} else {
4605 			skb_queue_head(&hdev->cmd_q, skb);
4606 			queue_work(hdev->workqueue, &hdev->cmd_work);
4607 		}
4608 	}
4609 }
4610