xref: /openbmc/linux/net/bluetooth/hci_core.c (revision 3b27d139)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34 
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39 
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47 
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51 
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55 
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58 
59 /* ----- HCI requests ----- */
60 
61 #define HCI_REQ_DONE	  0
62 #define HCI_REQ_PEND	  1
63 #define HCI_REQ_CANCELED  2
64 
65 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
67 
68 /* ---- HCI notifications ---- */
69 
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 	hci_sock_dev_event(hdev, event);
73 }
74 
75 /* ---- HCI debugfs entries ---- */
76 
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 			     size_t count, loff_t *ppos)
79 {
80 	struct hci_dev *hdev = file->private_data;
81 	char buf[3];
82 
83 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 	buf[1] = '\n';
85 	buf[2] = '\0';
86 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88 
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 			      size_t count, loff_t *ppos)
91 {
92 	struct hci_dev *hdev = file->private_data;
93 	struct sk_buff *skb;
94 	char buf[32];
95 	size_t buf_size = min(count, (sizeof(buf)-1));
96 	bool enable;
97 
98 	if (!test_bit(HCI_UP, &hdev->flags))
99 		return -ENETDOWN;
100 
101 	if (copy_from_user(buf, user_buf, buf_size))
102 		return -EFAULT;
103 
104 	buf[buf_size] = '\0';
105 	if (strtobool(buf, &enable))
106 		return -EINVAL;
107 
108 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109 		return -EALREADY;
110 
111 	hci_req_lock(hdev);
112 	if (enable)
113 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 				     HCI_CMD_TIMEOUT);
115 	else
116 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 				     HCI_CMD_TIMEOUT);
118 	hci_req_unlock(hdev);
119 
120 	if (IS_ERR(skb))
121 		return PTR_ERR(skb);
122 
123 	kfree_skb(skb);
124 
125 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
126 
127 	return count;
128 }
129 
130 static const struct file_operations dut_mode_fops = {
131 	.open		= simple_open,
132 	.read		= dut_mode_read,
133 	.write		= dut_mode_write,
134 	.llseek		= default_llseek,
135 };
136 
137 /* ---- HCI requests ---- */
138 
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 				  struct sk_buff *skb)
141 {
142 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
143 
144 	if (hdev->req_status == HCI_REQ_PEND) {
145 		hdev->req_result = result;
146 		hdev->req_status = HCI_REQ_DONE;
147 		if (skb)
148 			hdev->req_skb = skb_get(skb);
149 		wake_up_interruptible(&hdev->req_wait_q);
150 	}
151 }
152 
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
156 
157 	if (hdev->req_status == HCI_REQ_PEND) {
158 		hdev->req_result = err;
159 		hdev->req_status = HCI_REQ_CANCELED;
160 		wake_up_interruptible(&hdev->req_wait_q);
161 	}
162 }
163 
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165 				  const void *param, u8 event, u32 timeout)
166 {
167 	DECLARE_WAITQUEUE(wait, current);
168 	struct hci_request req;
169 	struct sk_buff *skb;
170 	int err = 0;
171 
172 	BT_DBG("%s", hdev->name);
173 
174 	hci_req_init(&req, hdev);
175 
176 	hci_req_add_ev(&req, opcode, plen, param, event);
177 
178 	hdev->req_status = HCI_REQ_PEND;
179 
180 	add_wait_queue(&hdev->req_wait_q, &wait);
181 	set_current_state(TASK_INTERRUPTIBLE);
182 
183 	err = hci_req_run_skb(&req, hci_req_sync_complete);
184 	if (err < 0) {
185 		remove_wait_queue(&hdev->req_wait_q, &wait);
186 		set_current_state(TASK_RUNNING);
187 		return ERR_PTR(err);
188 	}
189 
190 	schedule_timeout(timeout);
191 
192 	remove_wait_queue(&hdev->req_wait_q, &wait);
193 
194 	if (signal_pending(current))
195 		return ERR_PTR(-EINTR);
196 
197 	switch (hdev->req_status) {
198 	case HCI_REQ_DONE:
199 		err = -bt_to_errno(hdev->req_result);
200 		break;
201 
202 	case HCI_REQ_CANCELED:
203 		err = -hdev->req_result;
204 		break;
205 
206 	default:
207 		err = -ETIMEDOUT;
208 		break;
209 	}
210 
211 	hdev->req_status = hdev->req_result = 0;
212 	skb = hdev->req_skb;
213 	hdev->req_skb = NULL;
214 
215 	BT_DBG("%s end: err %d", hdev->name, err);
216 
217 	if (err < 0) {
218 		kfree_skb(skb);
219 		return ERR_PTR(err);
220 	}
221 
222 	if (!skb)
223 		return ERR_PTR(-ENODATA);
224 
225 	return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228 
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230 			       const void *param, u32 timeout)
231 {
232 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235 
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238 			  void (*func)(struct hci_request *req,
239 				      unsigned long opt),
240 			  unsigned long opt, __u32 timeout)
241 {
242 	struct hci_request req;
243 	DECLARE_WAITQUEUE(wait, current);
244 	int err = 0;
245 
246 	BT_DBG("%s start", hdev->name);
247 
248 	hci_req_init(&req, hdev);
249 
250 	hdev->req_status = HCI_REQ_PEND;
251 
252 	func(&req, opt);
253 
254 	add_wait_queue(&hdev->req_wait_q, &wait);
255 	set_current_state(TASK_INTERRUPTIBLE);
256 
257 	err = hci_req_run_skb(&req, hci_req_sync_complete);
258 	if (err < 0) {
259 		hdev->req_status = 0;
260 
261 		remove_wait_queue(&hdev->req_wait_q, &wait);
262 		set_current_state(TASK_RUNNING);
263 
264 		/* ENODATA means the HCI request command queue is empty.
265 		 * This can happen when a request with conditionals doesn't
266 		 * trigger any commands to be sent. This is normal behavior
267 		 * and should not trigger an error return.
268 		 */
269 		if (err == -ENODATA)
270 			return 0;
271 
272 		return err;
273 	}
274 
275 	schedule_timeout(timeout);
276 
277 	remove_wait_queue(&hdev->req_wait_q, &wait);
278 
279 	if (signal_pending(current))
280 		return -EINTR;
281 
282 	switch (hdev->req_status) {
283 	case HCI_REQ_DONE:
284 		err = -bt_to_errno(hdev->req_result);
285 		break;
286 
287 	case HCI_REQ_CANCELED:
288 		err = -hdev->req_result;
289 		break;
290 
291 	default:
292 		err = -ETIMEDOUT;
293 		break;
294 	}
295 
296 	hdev->req_status = hdev->req_result = 0;
297 
298 	BT_DBG("%s end: err %d", hdev->name, err);
299 
300 	return err;
301 }
302 
303 static int hci_req_sync(struct hci_dev *hdev,
304 			void (*req)(struct hci_request *req,
305 				    unsigned long opt),
306 			unsigned long opt, __u32 timeout)
307 {
308 	int ret;
309 
310 	if (!test_bit(HCI_UP, &hdev->flags))
311 		return -ENETDOWN;
312 
313 	/* Serialize all requests */
314 	hci_req_lock(hdev);
315 	ret = __hci_req_sync(hdev, req, opt, timeout);
316 	hci_req_unlock(hdev);
317 
318 	return ret;
319 }
320 
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323 	BT_DBG("%s %ld", req->hdev->name, opt);
324 
325 	/* Reset device */
326 	set_bit(HCI_RESET, &req->hdev->flags);
327 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329 
330 static void bredr_init(struct hci_request *req)
331 {
332 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333 
334 	/* Read Local Supported Features */
335 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336 
337 	/* Read Local Version */
338 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339 
340 	/* Read BD Address */
341 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343 
344 static void amp_init1(struct hci_request *req)
345 {
346 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347 
348 	/* Read Local Version */
349 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350 
351 	/* Read Local Supported Commands */
352 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353 
354 	/* Read Local AMP Info */
355 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356 
357 	/* Read Data Blk size */
358 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359 
360 	/* Read Flow Control Mode */
361 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362 
363 	/* Read Location Data */
364 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366 
367 static void amp_init2(struct hci_request *req)
368 {
369 	/* Read Local Supported Features. Not all AMP controllers
370 	 * support this so it's placed conditionally in the second
371 	 * stage init.
372 	 */
373 	if (req->hdev->commands[14] & 0x20)
374 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376 
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379 	struct hci_dev *hdev = req->hdev;
380 
381 	BT_DBG("%s %ld", hdev->name, opt);
382 
383 	/* Reset */
384 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385 		hci_reset_req(req, 0);
386 
387 	switch (hdev->dev_type) {
388 	case HCI_BREDR:
389 		bredr_init(req);
390 		break;
391 
392 	case HCI_AMP:
393 		amp_init1(req);
394 		break;
395 
396 	default:
397 		BT_ERR("Unknown device type %d", hdev->dev_type);
398 		break;
399 	}
400 }
401 
402 static void bredr_setup(struct hci_request *req)
403 {
404 	__le16 param;
405 	__u8 flt_type;
406 
407 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
408 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409 
410 	/* Read Class of Device */
411 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412 
413 	/* Read Local Name */
414 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415 
416 	/* Read Voice Setting */
417 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418 
419 	/* Read Number of Supported IAC */
420 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421 
422 	/* Read Current IAC LAP */
423 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424 
425 	/* Clear Event Filters */
426 	flt_type = HCI_FLT_CLEAR_ALL;
427 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428 
429 	/* Connection accept timeout ~20 secs */
430 	param = cpu_to_le16(0x7d00);
431 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433 
434 static void le_setup(struct hci_request *req)
435 {
436 	struct hci_dev *hdev = req->hdev;
437 
438 	/* Read LE Buffer Size */
439 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440 
441 	/* Read LE Local Supported Features */
442 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443 
444 	/* Read LE Supported States */
445 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446 
447 	/* Read LE White List Size */
448 	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449 
450 	/* Clear LE White List */
451 	hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452 
453 	/* LE-only controllers have LE implicitly enabled */
454 	if (!lmp_bredr_capable(hdev))
455 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457 
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460 	struct hci_dev *hdev = req->hdev;
461 
462 	/* The second byte is 0xff instead of 0x9f (two reserved bits
463 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 	 * command otherwise.
465 	 */
466 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467 
468 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 	 * any event mask for pre 1.2 devices.
470 	 */
471 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 		return;
473 
474 	if (lmp_bredr_capable(hdev)) {
475 		events[4] |= 0x01; /* Flow Specification Complete */
476 		events[4] |= 0x02; /* Inquiry Result with RSSI */
477 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 		events[5] |= 0x08; /* Synchronous Connection Complete */
479 		events[5] |= 0x10; /* Synchronous Connection Changed */
480 	} else {
481 		/* Use a different default for LE-only devices */
482 		memset(events, 0, sizeof(events));
483 		events[0] |= 0x10; /* Disconnection Complete */
484 		events[1] |= 0x08; /* Read Remote Version Information Complete */
485 		events[1] |= 0x20; /* Command Complete */
486 		events[1] |= 0x40; /* Command Status */
487 		events[1] |= 0x80; /* Hardware Error */
488 		events[2] |= 0x04; /* Number of Completed Packets */
489 		events[3] |= 0x02; /* Data Buffer Overflow */
490 
491 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 			events[0] |= 0x80; /* Encryption Change */
493 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
494 		}
495 	}
496 
497 	if (lmp_inq_rssi_capable(hdev))
498 		events[4] |= 0x02; /* Inquiry Result with RSSI */
499 
500 	if (lmp_sniffsubr_capable(hdev))
501 		events[5] |= 0x20; /* Sniff Subrating */
502 
503 	if (lmp_pause_enc_capable(hdev))
504 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
505 
506 	if (lmp_ext_inq_capable(hdev))
507 		events[5] |= 0x40; /* Extended Inquiry Result */
508 
509 	if (lmp_no_flush_capable(hdev))
510 		events[7] |= 0x01; /* Enhanced Flush Complete */
511 
512 	if (lmp_lsto_capable(hdev))
513 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
514 
515 	if (lmp_ssp_capable(hdev)) {
516 		events[6] |= 0x01;	/* IO Capability Request */
517 		events[6] |= 0x02;	/* IO Capability Response */
518 		events[6] |= 0x04;	/* User Confirmation Request */
519 		events[6] |= 0x08;	/* User Passkey Request */
520 		events[6] |= 0x10;	/* Remote OOB Data Request */
521 		events[6] |= 0x20;	/* Simple Pairing Complete */
522 		events[7] |= 0x04;	/* User Passkey Notification */
523 		events[7] |= 0x08;	/* Keypress Notification */
524 		events[7] |= 0x10;	/* Remote Host Supported
525 					 * Features Notification
526 					 */
527 	}
528 
529 	if (lmp_le_capable(hdev))
530 		events[7] |= 0x20;	/* LE Meta-Event */
531 
532 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534 
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537 	struct hci_dev *hdev = req->hdev;
538 
539 	if (hdev->dev_type == HCI_AMP)
540 		return amp_init2(req);
541 
542 	if (lmp_bredr_capable(hdev))
543 		bredr_setup(req);
544 	else
545 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546 
547 	if (lmp_le_capable(hdev))
548 		le_setup(req);
549 
550 	/* All Bluetooth 1.2 and later controllers should support the
551 	 * HCI command for reading the local supported commands.
552 	 *
553 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 	 * but do not have support for this command. If that is the case,
555 	 * the driver can quirk the behavior and skip reading the local
556 	 * supported commands.
557 	 */
558 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561 
562 	if (lmp_ssp_capable(hdev)) {
563 		/* When SSP is available, then the host features page
564 		 * should also be available as well. However some
565 		 * controllers list the max_page as 0 as long as SSP
566 		 * has not been enabled. To achieve proper debugging
567 		 * output, force the minimum max_page to 1 at least.
568 		 */
569 		hdev->max_page = 0x01;
570 
571 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572 			u8 mode = 0x01;
573 
574 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 				    sizeof(mode), &mode);
576 		} else {
577 			struct hci_cp_write_eir cp;
578 
579 			memset(hdev->eir, 0, sizeof(hdev->eir));
580 			memset(&cp, 0, sizeof(cp));
581 
582 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583 		}
584 	}
585 
586 	if (lmp_inq_rssi_capable(hdev) ||
587 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588 		u8 mode;
589 
590 		/* If Extended Inquiry Result events are supported, then
591 		 * they are clearly preferred over Inquiry Result with RSSI
592 		 * events.
593 		 */
594 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595 
596 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597 	}
598 
599 	if (lmp_inq_tx_pwr_capable(hdev))
600 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601 
602 	if (lmp_ext_feat_capable(hdev)) {
603 		struct hci_cp_read_local_ext_features cp;
604 
605 		cp.page = 0x01;
606 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 			    sizeof(cp), &cp);
608 	}
609 
610 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611 		u8 enable = 1;
612 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 			    &enable);
614 	}
615 }
616 
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619 	struct hci_dev *hdev = req->hdev;
620 	struct hci_cp_write_def_link_policy cp;
621 	u16 link_policy = 0;
622 
623 	if (lmp_rswitch_capable(hdev))
624 		link_policy |= HCI_LP_RSWITCH;
625 	if (lmp_hold_capable(hdev))
626 		link_policy |= HCI_LP_HOLD;
627 	if (lmp_sniff_capable(hdev))
628 		link_policy |= HCI_LP_SNIFF;
629 	if (lmp_park_capable(hdev))
630 		link_policy |= HCI_LP_PARK;
631 
632 	cp.policy = cpu_to_le16(link_policy);
633 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635 
636 static void hci_set_le_support(struct hci_request *req)
637 {
638 	struct hci_dev *hdev = req->hdev;
639 	struct hci_cp_write_le_host_supported cp;
640 
641 	/* LE-only devices do not support explicit enablement */
642 	if (!lmp_bredr_capable(hdev))
643 		return;
644 
645 	memset(&cp, 0, sizeof(cp));
646 
647 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648 		cp.le = 0x01;
649 		cp.simul = 0x00;
650 	}
651 
652 	if (cp.le != lmp_host_le_capable(hdev))
653 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 			    &cp);
655 }
656 
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659 	struct hci_dev *hdev = req->hdev;
660 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661 
662 	/* If Connectionless Slave Broadcast master role is supported
663 	 * enable all necessary events for it.
664 	 */
665 	if (lmp_csb_master_capable(hdev)) {
666 		events[1] |= 0x40;	/* Triggered Clock Capture */
667 		events[1] |= 0x80;	/* Synchronization Train Complete */
668 		events[2] |= 0x10;	/* Slave Page Response Timeout */
669 		events[2] |= 0x20;	/* CSB Channel Map Change */
670 	}
671 
672 	/* If Connectionless Slave Broadcast slave role is supported
673 	 * enable all necessary events for it.
674 	 */
675 	if (lmp_csb_slave_capable(hdev)) {
676 		events[2] |= 0x01;	/* Synchronization Train Received */
677 		events[2] |= 0x02;	/* CSB Receive */
678 		events[2] |= 0x04;	/* CSB Timeout */
679 		events[2] |= 0x08;	/* Truncated Page Complete */
680 	}
681 
682 	/* Enable Authenticated Payload Timeout Expired event if supported */
683 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684 		events[2] |= 0x80;
685 
686 	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688 
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691 	struct hci_dev *hdev = req->hdev;
692 	u8 p;
693 
694 	hci_setup_event_mask(req);
695 
696 	if (hdev->commands[6] & 0x20) {
697 		struct hci_cp_read_stored_link_key cp;
698 
699 		bacpy(&cp.bdaddr, BDADDR_ANY);
700 		cp.read_all = 0x01;
701 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
702 	}
703 
704 	if (hdev->commands[5] & 0x10)
705 		hci_setup_link_policy(req);
706 
707 	if (hdev->commands[8] & 0x01)
708 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
709 
710 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
711 	 * support the Read Page Scan Type command. Check support for
712 	 * this command in the bit mask of supported commands.
713 	 */
714 	if (hdev->commands[13] & 0x01)
715 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
716 
717 	if (lmp_le_capable(hdev)) {
718 		u8 events[8];
719 
720 		memset(events, 0, sizeof(events));
721 		events[0] = 0x0f;
722 
723 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724 			events[0] |= 0x10;	/* LE Long Term Key Request */
725 
726 		/* If controller supports the Connection Parameters Request
727 		 * Link Layer Procedure, enable the corresponding event.
728 		 */
729 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730 			events[0] |= 0x20;	/* LE Remote Connection
731 						 * Parameter Request
732 						 */
733 
734 		/* If the controller supports the Data Length Extension
735 		 * feature, enable the corresponding event.
736 		 */
737 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738 			events[0] |= 0x40;	/* LE Data Length Change */
739 
740 		/* If the controller supports Extended Scanner Filter
741 		 * Policies, enable the correspondig event.
742 		 */
743 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744 			events[1] |= 0x04;	/* LE Direct Advertising
745 						 * Report
746 						 */
747 
748 		/* If the controller supports the LE Read Local P-256
749 		 * Public Key command, enable the corresponding event.
750 		 */
751 		if (hdev->commands[34] & 0x02)
752 			events[0] |= 0x80;	/* LE Read Local P-256
753 						 * Public Key Complete
754 						 */
755 
756 		/* If the controller supports the LE Generate DHKey
757 		 * command, enable the corresponding event.
758 		 */
759 		if (hdev->commands[34] & 0x04)
760 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
761 
762 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763 			    events);
764 
765 		if (hdev->commands[25] & 0x40) {
766 			/* Read LE Advertising Channel TX Power */
767 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
768 		}
769 
770 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771 			/* Read LE Maximum Data Length */
772 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
773 
774 			/* Read LE Suggested Default Data Length */
775 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
776 		}
777 
778 		hci_set_le_support(req);
779 	}
780 
781 	/* Read features beyond page 1 if available */
782 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783 		struct hci_cp_read_local_ext_features cp;
784 
785 		cp.page = p;
786 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787 			    sizeof(cp), &cp);
788 	}
789 }
790 
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
792 {
793 	struct hci_dev *hdev = req->hdev;
794 
795 	/* Some Broadcom based Bluetooth controllers do not support the
796 	 * Delete Stored Link Key command. They are clearly indicating its
797 	 * absence in the bit mask of supported commands.
798 	 *
799 	 * Check the supported commands and only if the the command is marked
800 	 * as supported send it. If not supported assume that the controller
801 	 * does not have actual support for stored link keys which makes this
802 	 * command redundant anyway.
803 	 *
804 	 * Some controllers indicate that they support handling deleting
805 	 * stored link keys, but they don't. The quirk lets a driver
806 	 * just disable this command.
807 	 */
808 	if (hdev->commands[6] & 0x80 &&
809 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810 		struct hci_cp_delete_stored_link_key cp;
811 
812 		bacpy(&cp.bdaddr, BDADDR_ANY);
813 		cp.delete_all = 0x01;
814 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815 			    sizeof(cp), &cp);
816 	}
817 
818 	/* Set event mask page 2 if the HCI command for it is supported */
819 	if (hdev->commands[22] & 0x04)
820 		hci_set_event_mask_page_2(req);
821 
822 	/* Read local codec list if the HCI command is supported */
823 	if (hdev->commands[29] & 0x20)
824 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
825 
826 	/* Get MWS transport configuration if the HCI command is supported */
827 	if (hdev->commands[30] & 0x08)
828 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
829 
830 	/* Check for Synchronization Train support */
831 	if (lmp_sync_train_capable(hdev))
832 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
833 
834 	/* Enable Secure Connections if supported and configured */
835 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836 	    bredr_sc_enabled(hdev)) {
837 		u8 support = 0x01;
838 
839 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840 			    sizeof(support), &support);
841 	}
842 }
843 
844 static int __hci_init(struct hci_dev *hdev)
845 {
846 	int err;
847 
848 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849 	if (err < 0)
850 		return err;
851 
852 	/* The Device Under Test (DUT) mode is special and available for
853 	 * all controller types. So just create it early on.
854 	 */
855 	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856 		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857 				    &dut_mode_fops);
858 	}
859 
860 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861 	if (err < 0)
862 		return err;
863 
864 	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865 	 * BR/EDR/LE type controllers. AMP controllers only need the
866 	 * first two stages of init.
867 	 */
868 	if (hdev->dev_type != HCI_BREDR)
869 		return 0;
870 
871 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872 	if (err < 0)
873 		return err;
874 
875 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876 	if (err < 0)
877 		return err;
878 
879 	/* This function is only called when the controller is actually in
880 	 * configured state. When the controller is marked as unconfigured,
881 	 * this initialization procedure is not run.
882 	 *
883 	 * It means that it is possible that a controller runs through its
884 	 * setup phase and then discovers missing settings. If that is the
885 	 * case, then this function will not be called. It then will only
886 	 * be called during the config phase.
887 	 *
888 	 * So only when in setup phase or config phase, create the debugfs
889 	 * entries and register the SMP channels.
890 	 */
891 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
893 		return 0;
894 
895 	hci_debugfs_create_common(hdev);
896 
897 	if (lmp_bredr_capable(hdev))
898 		hci_debugfs_create_bredr(hdev);
899 
900 	if (lmp_le_capable(hdev))
901 		hci_debugfs_create_le(hdev);
902 
903 	return 0;
904 }
905 
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
907 {
908 	struct hci_dev *hdev = req->hdev;
909 
910 	BT_DBG("%s %ld", hdev->name, opt);
911 
912 	/* Reset */
913 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914 		hci_reset_req(req, 0);
915 
916 	/* Read Local Version */
917 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
918 
919 	/* Read BD Address */
920 	if (hdev->set_bdaddr)
921 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
922 }
923 
924 static int __hci_unconf_init(struct hci_dev *hdev)
925 {
926 	int err;
927 
928 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929 		return 0;
930 
931 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932 	if (err < 0)
933 		return err;
934 
935 	return 0;
936 }
937 
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
939 {
940 	__u8 scan = opt;
941 
942 	BT_DBG("%s %x", req->hdev->name, scan);
943 
944 	/* Inquiry and Page scans */
945 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
946 }
947 
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
949 {
950 	__u8 auth = opt;
951 
952 	BT_DBG("%s %x", req->hdev->name, auth);
953 
954 	/* Authentication */
955 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
956 }
957 
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
959 {
960 	__u8 encrypt = opt;
961 
962 	BT_DBG("%s %x", req->hdev->name, encrypt);
963 
964 	/* Encryption */
965 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
966 }
967 
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
969 {
970 	__le16 policy = cpu_to_le16(opt);
971 
972 	BT_DBG("%s %x", req->hdev->name, policy);
973 
974 	/* Default link policy */
975 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
976 }
977 
978 /* Get HCI device by index.
979  * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
981 {
982 	struct hci_dev *hdev = NULL, *d;
983 
984 	BT_DBG("%d", index);
985 
986 	if (index < 0)
987 		return NULL;
988 
989 	read_lock(&hci_dev_list_lock);
990 	list_for_each_entry(d, &hci_dev_list, list) {
991 		if (d->id == index) {
992 			hdev = hci_dev_hold(d);
993 			break;
994 		}
995 	}
996 	read_unlock(&hci_dev_list_lock);
997 	return hdev;
998 }
999 
1000 /* ---- Inquiry support ---- */
1001 
1002 bool hci_discovery_active(struct hci_dev *hdev)
1003 {
1004 	struct discovery_state *discov = &hdev->discovery;
1005 
1006 	switch (discov->state) {
1007 	case DISCOVERY_FINDING:
1008 	case DISCOVERY_RESOLVING:
1009 		return true;
1010 
1011 	default:
1012 		return false;
1013 	}
1014 }
1015 
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1017 {
1018 	int old_state = hdev->discovery.state;
1019 
1020 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1021 
1022 	if (old_state == state)
1023 		return;
1024 
1025 	hdev->discovery.state = state;
1026 
1027 	switch (state) {
1028 	case DISCOVERY_STOPPED:
1029 		hci_update_background_scan(hdev);
1030 
1031 		if (old_state != DISCOVERY_STARTING)
1032 			mgmt_discovering(hdev, 0);
1033 		break;
1034 	case DISCOVERY_STARTING:
1035 		break;
1036 	case DISCOVERY_FINDING:
1037 		mgmt_discovering(hdev, 1);
1038 		break;
1039 	case DISCOVERY_RESOLVING:
1040 		break;
1041 	case DISCOVERY_STOPPING:
1042 		break;
1043 	}
1044 }
1045 
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1047 {
1048 	struct discovery_state *cache = &hdev->discovery;
1049 	struct inquiry_entry *p, *n;
1050 
1051 	list_for_each_entry_safe(p, n, &cache->all, all) {
1052 		list_del(&p->all);
1053 		kfree(p);
1054 	}
1055 
1056 	INIT_LIST_HEAD(&cache->unknown);
1057 	INIT_LIST_HEAD(&cache->resolve);
1058 }
1059 
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061 					       bdaddr_t *bdaddr)
1062 {
1063 	struct discovery_state *cache = &hdev->discovery;
1064 	struct inquiry_entry *e;
1065 
1066 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1067 
1068 	list_for_each_entry(e, &cache->all, all) {
1069 		if (!bacmp(&e->data.bdaddr, bdaddr))
1070 			return e;
1071 	}
1072 
1073 	return NULL;
1074 }
1075 
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1077 						       bdaddr_t *bdaddr)
1078 {
1079 	struct discovery_state *cache = &hdev->discovery;
1080 	struct inquiry_entry *e;
1081 
1082 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1083 
1084 	list_for_each_entry(e, &cache->unknown, list) {
1085 		if (!bacmp(&e->data.bdaddr, bdaddr))
1086 			return e;
1087 	}
1088 
1089 	return NULL;
1090 }
1091 
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1093 						       bdaddr_t *bdaddr,
1094 						       int state)
1095 {
1096 	struct discovery_state *cache = &hdev->discovery;
1097 	struct inquiry_entry *e;
1098 
1099 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1100 
1101 	list_for_each_entry(e, &cache->resolve, list) {
1102 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103 			return e;
1104 		if (!bacmp(&e->data.bdaddr, bdaddr))
1105 			return e;
1106 	}
1107 
1108 	return NULL;
1109 }
1110 
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112 				      struct inquiry_entry *ie)
1113 {
1114 	struct discovery_state *cache = &hdev->discovery;
1115 	struct list_head *pos = &cache->resolve;
1116 	struct inquiry_entry *p;
1117 
1118 	list_del(&ie->list);
1119 
1120 	list_for_each_entry(p, &cache->resolve, list) {
1121 		if (p->name_state != NAME_PENDING &&
1122 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1123 			break;
1124 		pos = &p->list;
1125 	}
1126 
1127 	list_add(&ie->list, pos);
1128 }
1129 
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131 			     bool name_known)
1132 {
1133 	struct discovery_state *cache = &hdev->discovery;
1134 	struct inquiry_entry *ie;
1135 	u32 flags = 0;
1136 
1137 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1138 
1139 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1140 
1141 	if (!data->ssp_mode)
1142 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1143 
1144 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1145 	if (ie) {
1146 		if (!ie->data.ssp_mode)
1147 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148 
1149 		if (ie->name_state == NAME_NEEDED &&
1150 		    data->rssi != ie->data.rssi) {
1151 			ie->data.rssi = data->rssi;
1152 			hci_inquiry_cache_update_resolve(hdev, ie);
1153 		}
1154 
1155 		goto update;
1156 	}
1157 
1158 	/* Entry not in the cache. Add new one. */
1159 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1160 	if (!ie) {
1161 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162 		goto done;
1163 	}
1164 
1165 	list_add(&ie->all, &cache->all);
1166 
1167 	if (name_known) {
1168 		ie->name_state = NAME_KNOWN;
1169 	} else {
1170 		ie->name_state = NAME_NOT_KNOWN;
1171 		list_add(&ie->list, &cache->unknown);
1172 	}
1173 
1174 update:
1175 	if (name_known && ie->name_state != NAME_KNOWN &&
1176 	    ie->name_state != NAME_PENDING) {
1177 		ie->name_state = NAME_KNOWN;
1178 		list_del(&ie->list);
1179 	}
1180 
1181 	memcpy(&ie->data, data, sizeof(*data));
1182 	ie->timestamp = jiffies;
1183 	cache->timestamp = jiffies;
1184 
1185 	if (ie->name_state == NAME_NOT_KNOWN)
1186 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1187 
1188 done:
1189 	return flags;
1190 }
1191 
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1193 {
1194 	struct discovery_state *cache = &hdev->discovery;
1195 	struct inquiry_info *info = (struct inquiry_info *) buf;
1196 	struct inquiry_entry *e;
1197 	int copied = 0;
1198 
1199 	list_for_each_entry(e, &cache->all, all) {
1200 		struct inquiry_data *data = &e->data;
1201 
1202 		if (copied >= num)
1203 			break;
1204 
1205 		bacpy(&info->bdaddr, &data->bdaddr);
1206 		info->pscan_rep_mode	= data->pscan_rep_mode;
1207 		info->pscan_period_mode	= data->pscan_period_mode;
1208 		info->pscan_mode	= data->pscan_mode;
1209 		memcpy(info->dev_class, data->dev_class, 3);
1210 		info->clock_offset	= data->clock_offset;
1211 
1212 		info++;
1213 		copied++;
1214 	}
1215 
1216 	BT_DBG("cache %p, copied %d", cache, copied);
1217 	return copied;
1218 }
1219 
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1221 {
1222 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223 	struct hci_dev *hdev = req->hdev;
1224 	struct hci_cp_inquiry cp;
1225 
1226 	BT_DBG("%s", hdev->name);
1227 
1228 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1229 		return;
1230 
1231 	/* Start Inquiry */
1232 	memcpy(&cp.lap, &ir->lap, 3);
1233 	cp.length  = ir->length;
1234 	cp.num_rsp = ir->num_rsp;
1235 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1236 }
1237 
1238 int hci_inquiry(void __user *arg)
1239 {
1240 	__u8 __user *ptr = arg;
1241 	struct hci_inquiry_req ir;
1242 	struct hci_dev *hdev;
1243 	int err = 0, do_inquiry = 0, max_rsp;
1244 	long timeo;
1245 	__u8 *buf;
1246 
1247 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1248 		return -EFAULT;
1249 
1250 	hdev = hci_dev_get(ir.dev_id);
1251 	if (!hdev)
1252 		return -ENODEV;
1253 
1254 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1255 		err = -EBUSY;
1256 		goto done;
1257 	}
1258 
1259 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1260 		err = -EOPNOTSUPP;
1261 		goto done;
1262 	}
1263 
1264 	if (hdev->dev_type != HCI_BREDR) {
1265 		err = -EOPNOTSUPP;
1266 		goto done;
1267 	}
1268 
1269 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1270 		err = -EOPNOTSUPP;
1271 		goto done;
1272 	}
1273 
1274 	hci_dev_lock(hdev);
1275 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277 		hci_inquiry_cache_flush(hdev);
1278 		do_inquiry = 1;
1279 	}
1280 	hci_dev_unlock(hdev);
1281 
1282 	timeo = ir.length * msecs_to_jiffies(2000);
1283 
1284 	if (do_inquiry) {
1285 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286 				   timeo);
1287 		if (err < 0)
1288 			goto done;
1289 
1290 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291 		 * cleared). If it is interrupted by a signal, return -EINTR.
1292 		 */
1293 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294 				TASK_INTERRUPTIBLE))
1295 			return -EINTR;
1296 	}
1297 
1298 	/* for unlimited number of responses we will use buffer with
1299 	 * 255 entries
1300 	 */
1301 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1302 
1303 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304 	 * copy it to the user space.
1305 	 */
1306 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1307 	if (!buf) {
1308 		err = -ENOMEM;
1309 		goto done;
1310 	}
1311 
1312 	hci_dev_lock(hdev);
1313 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314 	hci_dev_unlock(hdev);
1315 
1316 	BT_DBG("num_rsp %d", ir.num_rsp);
1317 
1318 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319 		ptr += sizeof(ir);
1320 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1321 				 ir.num_rsp))
1322 			err = -EFAULT;
1323 	} else
1324 		err = -EFAULT;
1325 
1326 	kfree(buf);
1327 
1328 done:
1329 	hci_dev_put(hdev);
1330 	return err;
1331 }
1332 
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1334 {
1335 	int ret = 0;
1336 
1337 	BT_DBG("%s %p", hdev->name, hdev);
1338 
1339 	hci_req_lock(hdev);
1340 
1341 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1342 		ret = -ENODEV;
1343 		goto done;
1344 	}
1345 
1346 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348 		/* Check for rfkill but allow the HCI setup stage to
1349 		 * proceed (which in itself doesn't cause any RF activity).
1350 		 */
1351 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1352 			ret = -ERFKILL;
1353 			goto done;
1354 		}
1355 
1356 		/* Check for valid public address or a configured static
1357 		 * random adddress, but let the HCI setup proceed to
1358 		 * be able to determine if there is a public address
1359 		 * or not.
1360 		 *
1361 		 * In case of user channel usage, it is not important
1362 		 * if a public address or static random address is
1363 		 * available.
1364 		 *
1365 		 * This check is only valid for BR/EDR controllers
1366 		 * since AMP controllers do not have an address.
1367 		 */
1368 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369 		    hdev->dev_type == HCI_BREDR &&
1370 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372 			ret = -EADDRNOTAVAIL;
1373 			goto done;
1374 		}
1375 	}
1376 
1377 	if (test_bit(HCI_UP, &hdev->flags)) {
1378 		ret = -EALREADY;
1379 		goto done;
1380 	}
1381 
1382 	if (hdev->open(hdev)) {
1383 		ret = -EIO;
1384 		goto done;
1385 	}
1386 
1387 	atomic_set(&hdev->cmd_cnt, 1);
1388 	set_bit(HCI_INIT, &hdev->flags);
1389 
1390 	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1391 		if (hdev->setup)
1392 			ret = hdev->setup(hdev);
1393 
1394 		/* The transport driver can set these quirks before
1395 		 * creating the HCI device or in its setup callback.
1396 		 *
1397 		 * In case any of them is set, the controller has to
1398 		 * start up as unconfigured.
1399 		 */
1400 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401 		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403 
1404 		/* For an unconfigured controller it is required to
1405 		 * read at least the version information provided by
1406 		 * the Read Local Version Information command.
1407 		 *
1408 		 * If the set_bdaddr driver callback is provided, then
1409 		 * also the original Bluetooth public device address
1410 		 * will be read using the Read BD Address command.
1411 		 */
1412 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413 			ret = __hci_unconf_init(hdev);
1414 	}
1415 
1416 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417 		/* If public address change is configured, ensure that
1418 		 * the address gets programmed. If the driver does not
1419 		 * support changing the public address, fail the power
1420 		 * on procedure.
1421 		 */
1422 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423 		    hdev->set_bdaddr)
1424 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425 		else
1426 			ret = -EADDRNOTAVAIL;
1427 	}
1428 
1429 	if (!ret) {
1430 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432 			ret = __hci_init(hdev);
1433 	}
1434 
1435 	clear_bit(HCI_INIT, &hdev->flags);
1436 
1437 	if (!ret) {
1438 		hci_dev_hold(hdev);
1439 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440 		set_bit(HCI_UP, &hdev->flags);
1441 		hci_notify(hdev, HCI_DEV_UP);
1442 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446 		    hdev->dev_type == HCI_BREDR) {
1447 			hci_dev_lock(hdev);
1448 			mgmt_powered(hdev, 1);
1449 			hci_dev_unlock(hdev);
1450 		}
1451 	} else {
1452 		/* Init failed, cleanup */
1453 		flush_work(&hdev->tx_work);
1454 		flush_work(&hdev->cmd_work);
1455 		flush_work(&hdev->rx_work);
1456 
1457 		skb_queue_purge(&hdev->cmd_q);
1458 		skb_queue_purge(&hdev->rx_q);
1459 
1460 		if (hdev->flush)
1461 			hdev->flush(hdev);
1462 
1463 		if (hdev->sent_cmd) {
1464 			kfree_skb(hdev->sent_cmd);
1465 			hdev->sent_cmd = NULL;
1466 		}
1467 
1468 		hdev->close(hdev);
1469 		hdev->flags &= BIT(HCI_RAW);
1470 	}
1471 
1472 done:
1473 	hci_req_unlock(hdev);
1474 	return ret;
1475 }
1476 
1477 /* ---- HCI ioctl helpers ---- */
1478 
1479 int hci_dev_open(__u16 dev)
1480 {
1481 	struct hci_dev *hdev;
1482 	int err;
1483 
1484 	hdev = hci_dev_get(dev);
1485 	if (!hdev)
1486 		return -ENODEV;
1487 
1488 	/* Devices that are marked as unconfigured can only be powered
1489 	 * up as user channel. Trying to bring them up as normal devices
1490 	 * will result into a failure. Only user channel operation is
1491 	 * possible.
1492 	 *
1493 	 * When this function is called for a user channel, the flag
1494 	 * HCI_USER_CHANNEL will be set first before attempting to
1495 	 * open the device.
1496 	 */
1497 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1499 		err = -EOPNOTSUPP;
1500 		goto done;
1501 	}
1502 
1503 	/* We need to ensure that no other power on/off work is pending
1504 	 * before proceeding to call hci_dev_do_open. This is
1505 	 * particularly important if the setup procedure has not yet
1506 	 * completed.
1507 	 */
1508 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509 		cancel_delayed_work(&hdev->power_off);
1510 
1511 	/* After this call it is guaranteed that the setup procedure
1512 	 * has finished. This means that error conditions like RFKILL
1513 	 * or no valid public or static random address apply.
1514 	 */
1515 	flush_workqueue(hdev->req_workqueue);
1516 
1517 	/* For controllers not using the management interface and that
1518 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519 	 * so that pairing works for them. Once the management interface
1520 	 * is in use this bit will be cleared again and userspace has
1521 	 * to explicitly enable it.
1522 	 */
1523 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1525 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1526 
1527 	err = hci_dev_do_open(hdev);
1528 
1529 done:
1530 	hci_dev_put(hdev);
1531 	return err;
1532 }
1533 
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536 {
1537 	struct hci_conn_params *p;
1538 
1539 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1540 		if (p->conn) {
1541 			hci_conn_drop(p->conn);
1542 			hci_conn_put(p->conn);
1543 			p->conn = NULL;
1544 		}
1545 		list_del_init(&p->action);
1546 	}
1547 
1548 	BT_DBG("All LE pending actions cleared");
1549 }
1550 
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1552 {
1553 	BT_DBG("%s %p", hdev->name, hdev);
1554 
1555 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557 	    test_bit(HCI_UP, &hdev->flags)) {
1558 		/* Execute vendor specific shutdown routine */
1559 		if (hdev->shutdown)
1560 			hdev->shutdown(hdev);
1561 	}
1562 
1563 	cancel_delayed_work(&hdev->power_off);
1564 
1565 	hci_req_cancel(hdev, ENODEV);
1566 	hci_req_lock(hdev);
1567 
1568 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569 		cancel_delayed_work_sync(&hdev->cmd_timer);
1570 		hci_req_unlock(hdev);
1571 		return 0;
1572 	}
1573 
1574 	/* Flush RX and TX works */
1575 	flush_work(&hdev->tx_work);
1576 	flush_work(&hdev->rx_work);
1577 
1578 	if (hdev->discov_timeout > 0) {
1579 		cancel_delayed_work(&hdev->discov_off);
1580 		hdev->discov_timeout = 0;
1581 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583 	}
1584 
1585 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586 		cancel_delayed_work(&hdev->service_cache);
1587 
1588 	cancel_delayed_work_sync(&hdev->le_scan_disable);
1589 	cancel_delayed_work_sync(&hdev->le_scan_restart);
1590 
1591 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1592 		cancel_delayed_work_sync(&hdev->rpa_expired);
1593 
1594 	if (hdev->adv_instance_timeout) {
1595 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
1596 		hdev->adv_instance_timeout = 0;
1597 	}
1598 
1599 	/* Avoid potential lockdep warnings from the *_flush() calls by
1600 	 * ensuring the workqueue is empty up front.
1601 	 */
1602 	drain_workqueue(hdev->workqueue);
1603 
1604 	hci_dev_lock(hdev);
1605 
1606 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1607 
1608 	if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1609 		if (hdev->dev_type == HCI_BREDR)
1610 			mgmt_powered(hdev, 0);
1611 	}
1612 
1613 	hci_inquiry_cache_flush(hdev);
1614 	hci_pend_le_actions_clear(hdev);
1615 	hci_conn_hash_flush(hdev);
1616 	hci_dev_unlock(hdev);
1617 
1618 	smp_unregister(hdev);
1619 
1620 	hci_notify(hdev, HCI_DEV_DOWN);
1621 
1622 	if (hdev->flush)
1623 		hdev->flush(hdev);
1624 
1625 	/* Reset device */
1626 	skb_queue_purge(&hdev->cmd_q);
1627 	atomic_set(&hdev->cmd_cnt, 1);
1628 	if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1629 	    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1630 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1631 		set_bit(HCI_INIT, &hdev->flags);
1632 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1633 		clear_bit(HCI_INIT, &hdev->flags);
1634 	}
1635 
1636 	/* flush cmd  work */
1637 	flush_work(&hdev->cmd_work);
1638 
1639 	/* Drop queues */
1640 	skb_queue_purge(&hdev->rx_q);
1641 	skb_queue_purge(&hdev->cmd_q);
1642 	skb_queue_purge(&hdev->raw_q);
1643 
1644 	/* Drop last sent command */
1645 	if (hdev->sent_cmd) {
1646 		cancel_delayed_work_sync(&hdev->cmd_timer);
1647 		kfree_skb(hdev->sent_cmd);
1648 		hdev->sent_cmd = NULL;
1649 	}
1650 
1651 	/* After this point our queues are empty
1652 	 * and no tasks are scheduled. */
1653 	hdev->close(hdev);
1654 
1655 	/* Clear flags */
1656 	hdev->flags &= BIT(HCI_RAW);
1657 	hci_dev_clear_volatile_flags(hdev);
1658 
1659 	/* Controller radio is available but is currently powered down */
1660 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1661 
1662 	memset(hdev->eir, 0, sizeof(hdev->eir));
1663 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1664 	bacpy(&hdev->random_addr, BDADDR_ANY);
1665 
1666 	hci_req_unlock(hdev);
1667 
1668 	hci_dev_put(hdev);
1669 	return 0;
1670 }
1671 
1672 int hci_dev_close(__u16 dev)
1673 {
1674 	struct hci_dev *hdev;
1675 	int err;
1676 
1677 	hdev = hci_dev_get(dev);
1678 	if (!hdev)
1679 		return -ENODEV;
1680 
1681 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1682 		err = -EBUSY;
1683 		goto done;
1684 	}
1685 
1686 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1687 		cancel_delayed_work(&hdev->power_off);
1688 
1689 	err = hci_dev_do_close(hdev);
1690 
1691 done:
1692 	hci_dev_put(hdev);
1693 	return err;
1694 }
1695 
1696 static int hci_dev_do_reset(struct hci_dev *hdev)
1697 {
1698 	int ret;
1699 
1700 	BT_DBG("%s %p", hdev->name, hdev);
1701 
1702 	hci_req_lock(hdev);
1703 
1704 	/* Drop queues */
1705 	skb_queue_purge(&hdev->rx_q);
1706 	skb_queue_purge(&hdev->cmd_q);
1707 
1708 	/* Avoid potential lockdep warnings from the *_flush() calls by
1709 	 * ensuring the workqueue is empty up front.
1710 	 */
1711 	drain_workqueue(hdev->workqueue);
1712 
1713 	hci_dev_lock(hdev);
1714 	hci_inquiry_cache_flush(hdev);
1715 	hci_conn_hash_flush(hdev);
1716 	hci_dev_unlock(hdev);
1717 
1718 	if (hdev->flush)
1719 		hdev->flush(hdev);
1720 
1721 	atomic_set(&hdev->cmd_cnt, 1);
1722 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1723 
1724 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1725 
1726 	hci_req_unlock(hdev);
1727 	return ret;
1728 }
1729 
1730 int hci_dev_reset(__u16 dev)
1731 {
1732 	struct hci_dev *hdev;
1733 	int err;
1734 
1735 	hdev = hci_dev_get(dev);
1736 	if (!hdev)
1737 		return -ENODEV;
1738 
1739 	if (!test_bit(HCI_UP, &hdev->flags)) {
1740 		err = -ENETDOWN;
1741 		goto done;
1742 	}
1743 
1744 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1745 		err = -EBUSY;
1746 		goto done;
1747 	}
1748 
1749 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1750 		err = -EOPNOTSUPP;
1751 		goto done;
1752 	}
1753 
1754 	err = hci_dev_do_reset(hdev);
1755 
1756 done:
1757 	hci_dev_put(hdev);
1758 	return err;
1759 }
1760 
1761 int hci_dev_reset_stat(__u16 dev)
1762 {
1763 	struct hci_dev *hdev;
1764 	int ret = 0;
1765 
1766 	hdev = hci_dev_get(dev);
1767 	if (!hdev)
1768 		return -ENODEV;
1769 
1770 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1771 		ret = -EBUSY;
1772 		goto done;
1773 	}
1774 
1775 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1776 		ret = -EOPNOTSUPP;
1777 		goto done;
1778 	}
1779 
1780 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1781 
1782 done:
1783 	hci_dev_put(hdev);
1784 	return ret;
1785 }
1786 
1787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1788 {
1789 	bool conn_changed, discov_changed;
1790 
1791 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1792 
1793 	if ((scan & SCAN_PAGE))
1794 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1795 							  HCI_CONNECTABLE);
1796 	else
1797 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1798 							   HCI_CONNECTABLE);
1799 
1800 	if ((scan & SCAN_INQUIRY)) {
1801 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1802 							    HCI_DISCOVERABLE);
1803 	} else {
1804 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1806 							     HCI_DISCOVERABLE);
1807 	}
1808 
1809 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1810 		return;
1811 
1812 	if (conn_changed || discov_changed) {
1813 		/* In case this was disabled through mgmt */
1814 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1815 
1816 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1817 			mgmt_update_adv_data(hdev);
1818 
1819 		mgmt_new_settings(hdev);
1820 	}
1821 }
1822 
1823 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1824 {
1825 	struct hci_dev *hdev;
1826 	struct hci_dev_req dr;
1827 	int err = 0;
1828 
1829 	if (copy_from_user(&dr, arg, sizeof(dr)))
1830 		return -EFAULT;
1831 
1832 	hdev = hci_dev_get(dr.dev_id);
1833 	if (!hdev)
1834 		return -ENODEV;
1835 
1836 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1837 		err = -EBUSY;
1838 		goto done;
1839 	}
1840 
1841 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1842 		err = -EOPNOTSUPP;
1843 		goto done;
1844 	}
1845 
1846 	if (hdev->dev_type != HCI_BREDR) {
1847 		err = -EOPNOTSUPP;
1848 		goto done;
1849 	}
1850 
1851 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1852 		err = -EOPNOTSUPP;
1853 		goto done;
1854 	}
1855 
1856 	switch (cmd) {
1857 	case HCISETAUTH:
1858 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1859 				   HCI_INIT_TIMEOUT);
1860 		break;
1861 
1862 	case HCISETENCRYPT:
1863 		if (!lmp_encrypt_capable(hdev)) {
1864 			err = -EOPNOTSUPP;
1865 			break;
1866 		}
1867 
1868 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1869 			/* Auth must be enabled first */
1870 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871 					   HCI_INIT_TIMEOUT);
1872 			if (err)
1873 				break;
1874 		}
1875 
1876 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1877 				   HCI_INIT_TIMEOUT);
1878 		break;
1879 
1880 	case HCISETSCAN:
1881 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1882 				   HCI_INIT_TIMEOUT);
1883 
1884 		/* Ensure that the connectable and discoverable states
1885 		 * get correctly modified as this was a non-mgmt change.
1886 		 */
1887 		if (!err)
1888 			hci_update_scan_state(hdev, dr.dev_opt);
1889 		break;
1890 
1891 	case HCISETLINKPOL:
1892 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1893 				   HCI_INIT_TIMEOUT);
1894 		break;
1895 
1896 	case HCISETLINKMODE:
1897 		hdev->link_mode = ((__u16) dr.dev_opt) &
1898 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
1899 		break;
1900 
1901 	case HCISETPTYPE:
1902 		hdev->pkt_type = (__u16) dr.dev_opt;
1903 		break;
1904 
1905 	case HCISETACLMTU:
1906 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1907 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1908 		break;
1909 
1910 	case HCISETSCOMTU:
1911 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1912 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1913 		break;
1914 
1915 	default:
1916 		err = -EINVAL;
1917 		break;
1918 	}
1919 
1920 done:
1921 	hci_dev_put(hdev);
1922 	return err;
1923 }
1924 
1925 int hci_get_dev_list(void __user *arg)
1926 {
1927 	struct hci_dev *hdev;
1928 	struct hci_dev_list_req *dl;
1929 	struct hci_dev_req *dr;
1930 	int n = 0, size, err;
1931 	__u16 dev_num;
1932 
1933 	if (get_user(dev_num, (__u16 __user *) arg))
1934 		return -EFAULT;
1935 
1936 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1937 		return -EINVAL;
1938 
1939 	size = sizeof(*dl) + dev_num * sizeof(*dr);
1940 
1941 	dl = kzalloc(size, GFP_KERNEL);
1942 	if (!dl)
1943 		return -ENOMEM;
1944 
1945 	dr = dl->dev_req;
1946 
1947 	read_lock(&hci_dev_list_lock);
1948 	list_for_each_entry(hdev, &hci_dev_list, list) {
1949 		unsigned long flags = hdev->flags;
1950 
1951 		/* When the auto-off is configured it means the transport
1952 		 * is running, but in that case still indicate that the
1953 		 * device is actually down.
1954 		 */
1955 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1956 			flags &= ~BIT(HCI_UP);
1957 
1958 		(dr + n)->dev_id  = hdev->id;
1959 		(dr + n)->dev_opt = flags;
1960 
1961 		if (++n >= dev_num)
1962 			break;
1963 	}
1964 	read_unlock(&hci_dev_list_lock);
1965 
1966 	dl->dev_num = n;
1967 	size = sizeof(*dl) + n * sizeof(*dr);
1968 
1969 	err = copy_to_user(arg, dl, size);
1970 	kfree(dl);
1971 
1972 	return err ? -EFAULT : 0;
1973 }
1974 
1975 int hci_get_dev_info(void __user *arg)
1976 {
1977 	struct hci_dev *hdev;
1978 	struct hci_dev_info di;
1979 	unsigned long flags;
1980 	int err = 0;
1981 
1982 	if (copy_from_user(&di, arg, sizeof(di)))
1983 		return -EFAULT;
1984 
1985 	hdev = hci_dev_get(di.dev_id);
1986 	if (!hdev)
1987 		return -ENODEV;
1988 
1989 	/* When the auto-off is configured it means the transport
1990 	 * is running, but in that case still indicate that the
1991 	 * device is actually down.
1992 	 */
1993 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1994 		flags = hdev->flags & ~BIT(HCI_UP);
1995 	else
1996 		flags = hdev->flags;
1997 
1998 	strcpy(di.name, hdev->name);
1999 	di.bdaddr   = hdev->bdaddr;
2000 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2001 	di.flags    = flags;
2002 	di.pkt_type = hdev->pkt_type;
2003 	if (lmp_bredr_capable(hdev)) {
2004 		di.acl_mtu  = hdev->acl_mtu;
2005 		di.acl_pkts = hdev->acl_pkts;
2006 		di.sco_mtu  = hdev->sco_mtu;
2007 		di.sco_pkts = hdev->sco_pkts;
2008 	} else {
2009 		di.acl_mtu  = hdev->le_mtu;
2010 		di.acl_pkts = hdev->le_pkts;
2011 		di.sco_mtu  = 0;
2012 		di.sco_pkts = 0;
2013 	}
2014 	di.link_policy = hdev->link_policy;
2015 	di.link_mode   = hdev->link_mode;
2016 
2017 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2018 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2019 
2020 	if (copy_to_user(arg, &di, sizeof(di)))
2021 		err = -EFAULT;
2022 
2023 	hci_dev_put(hdev);
2024 
2025 	return err;
2026 }
2027 
2028 /* ---- Interface to HCI drivers ---- */
2029 
2030 static int hci_rfkill_set_block(void *data, bool blocked)
2031 {
2032 	struct hci_dev *hdev = data;
2033 
2034 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2035 
2036 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2037 		return -EBUSY;
2038 
2039 	if (blocked) {
2040 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2041 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2042 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2043 			hci_dev_do_close(hdev);
2044 	} else {
2045 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2046 	}
2047 
2048 	return 0;
2049 }
2050 
2051 static const struct rfkill_ops hci_rfkill_ops = {
2052 	.set_block = hci_rfkill_set_block,
2053 };
2054 
2055 static void hci_power_on(struct work_struct *work)
2056 {
2057 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2058 	int err;
2059 
2060 	BT_DBG("%s", hdev->name);
2061 
2062 	err = hci_dev_do_open(hdev);
2063 	if (err < 0) {
2064 		hci_dev_lock(hdev);
2065 		mgmt_set_powered_failed(hdev, err);
2066 		hci_dev_unlock(hdev);
2067 		return;
2068 	}
2069 
2070 	/* During the HCI setup phase, a few error conditions are
2071 	 * ignored and they need to be checked now. If they are still
2072 	 * valid, it is important to turn the device back off.
2073 	 */
2074 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2075 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2076 	    (hdev->dev_type == HCI_BREDR &&
2077 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2078 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2079 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2080 		hci_dev_do_close(hdev);
2081 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2082 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2083 				   HCI_AUTO_OFF_TIMEOUT);
2084 	}
2085 
2086 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2087 		/* For unconfigured devices, set the HCI_RAW flag
2088 		 * so that userspace can easily identify them.
2089 		 */
2090 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2091 			set_bit(HCI_RAW, &hdev->flags);
2092 
2093 		/* For fully configured devices, this will send
2094 		 * the Index Added event. For unconfigured devices,
2095 		 * it will send Unconfigued Index Added event.
2096 		 *
2097 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2098 		 * and no event will be send.
2099 		 */
2100 		mgmt_index_added(hdev);
2101 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2102 		/* When the controller is now configured, then it
2103 		 * is important to clear the HCI_RAW flag.
2104 		 */
2105 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2106 			clear_bit(HCI_RAW, &hdev->flags);
2107 
2108 		/* Powering on the controller with HCI_CONFIG set only
2109 		 * happens with the transition from unconfigured to
2110 		 * configured. This will send the Index Added event.
2111 		 */
2112 		mgmt_index_added(hdev);
2113 	}
2114 }
2115 
2116 static void hci_power_off(struct work_struct *work)
2117 {
2118 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2119 					    power_off.work);
2120 
2121 	BT_DBG("%s", hdev->name);
2122 
2123 	hci_dev_do_close(hdev);
2124 }
2125 
2126 static void hci_error_reset(struct work_struct *work)
2127 {
2128 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2129 
2130 	BT_DBG("%s", hdev->name);
2131 
2132 	if (hdev->hw_error)
2133 		hdev->hw_error(hdev, hdev->hw_error_code);
2134 	else
2135 		BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2136 		       hdev->hw_error_code);
2137 
2138 	if (hci_dev_do_close(hdev))
2139 		return;
2140 
2141 	hci_dev_do_open(hdev);
2142 }
2143 
2144 static void hci_discov_off(struct work_struct *work)
2145 {
2146 	struct hci_dev *hdev;
2147 
2148 	hdev = container_of(work, struct hci_dev, discov_off.work);
2149 
2150 	BT_DBG("%s", hdev->name);
2151 
2152 	mgmt_discoverable_timeout(hdev);
2153 }
2154 
2155 static void hci_adv_timeout_expire(struct work_struct *work)
2156 {
2157 	struct hci_dev *hdev;
2158 
2159 	hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2160 
2161 	BT_DBG("%s", hdev->name);
2162 
2163 	mgmt_adv_timeout_expired(hdev);
2164 }
2165 
2166 void hci_uuids_clear(struct hci_dev *hdev)
2167 {
2168 	struct bt_uuid *uuid, *tmp;
2169 
2170 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2171 		list_del(&uuid->list);
2172 		kfree(uuid);
2173 	}
2174 }
2175 
2176 void hci_link_keys_clear(struct hci_dev *hdev)
2177 {
2178 	struct link_key *key;
2179 
2180 	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2181 		list_del_rcu(&key->list);
2182 		kfree_rcu(key, rcu);
2183 	}
2184 }
2185 
2186 void hci_smp_ltks_clear(struct hci_dev *hdev)
2187 {
2188 	struct smp_ltk *k;
2189 
2190 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2191 		list_del_rcu(&k->list);
2192 		kfree_rcu(k, rcu);
2193 	}
2194 }
2195 
2196 void hci_smp_irks_clear(struct hci_dev *hdev)
2197 {
2198 	struct smp_irk *k;
2199 
2200 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2201 		list_del_rcu(&k->list);
2202 		kfree_rcu(k, rcu);
2203 	}
2204 }
2205 
2206 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2207 {
2208 	struct link_key *k;
2209 
2210 	rcu_read_lock();
2211 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2212 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2213 			rcu_read_unlock();
2214 			return k;
2215 		}
2216 	}
2217 	rcu_read_unlock();
2218 
2219 	return NULL;
2220 }
2221 
2222 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2223 			       u8 key_type, u8 old_key_type)
2224 {
2225 	/* Legacy key */
2226 	if (key_type < 0x03)
2227 		return true;
2228 
2229 	/* Debug keys are insecure so don't store them persistently */
2230 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2231 		return false;
2232 
2233 	/* Changed combination key and there's no previous one */
2234 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2235 		return false;
2236 
2237 	/* Security mode 3 case */
2238 	if (!conn)
2239 		return true;
2240 
2241 	/* BR/EDR key derived using SC from an LE link */
2242 	if (conn->type == LE_LINK)
2243 		return true;
2244 
2245 	/* Neither local nor remote side had no-bonding as requirement */
2246 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2247 		return true;
2248 
2249 	/* Local side had dedicated bonding as requirement */
2250 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2251 		return true;
2252 
2253 	/* Remote side had dedicated bonding as requirement */
2254 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2255 		return true;
2256 
2257 	/* If none of the above criteria match, then don't store the key
2258 	 * persistently */
2259 	return false;
2260 }
2261 
2262 static u8 ltk_role(u8 type)
2263 {
2264 	if (type == SMP_LTK)
2265 		return HCI_ROLE_MASTER;
2266 
2267 	return HCI_ROLE_SLAVE;
2268 }
2269 
2270 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271 			     u8 addr_type, u8 role)
2272 {
2273 	struct smp_ltk *k;
2274 
2275 	rcu_read_lock();
2276 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2278 			continue;
2279 
2280 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2281 			rcu_read_unlock();
2282 			return k;
2283 		}
2284 	}
2285 	rcu_read_unlock();
2286 
2287 	return NULL;
2288 }
2289 
2290 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2291 {
2292 	struct smp_irk *irk;
2293 
2294 	rcu_read_lock();
2295 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2296 		if (!bacmp(&irk->rpa, rpa)) {
2297 			rcu_read_unlock();
2298 			return irk;
2299 		}
2300 	}
2301 
2302 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2304 			bacpy(&irk->rpa, rpa);
2305 			rcu_read_unlock();
2306 			return irk;
2307 		}
2308 	}
2309 	rcu_read_unlock();
2310 
2311 	return NULL;
2312 }
2313 
2314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2315 				     u8 addr_type)
2316 {
2317 	struct smp_irk *irk;
2318 
2319 	/* Identity Address must be public or static random */
2320 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2321 		return NULL;
2322 
2323 	rcu_read_lock();
2324 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2325 		if (addr_type == irk->addr_type &&
2326 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2327 			rcu_read_unlock();
2328 			return irk;
2329 		}
2330 	}
2331 	rcu_read_unlock();
2332 
2333 	return NULL;
2334 }
2335 
2336 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2337 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2338 				  u8 pin_len, bool *persistent)
2339 {
2340 	struct link_key *key, *old_key;
2341 	u8 old_key_type;
2342 
2343 	old_key = hci_find_link_key(hdev, bdaddr);
2344 	if (old_key) {
2345 		old_key_type = old_key->type;
2346 		key = old_key;
2347 	} else {
2348 		old_key_type = conn ? conn->key_type : 0xff;
2349 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2350 		if (!key)
2351 			return NULL;
2352 		list_add_rcu(&key->list, &hdev->link_keys);
2353 	}
2354 
2355 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2356 
2357 	/* Some buggy controller combinations generate a changed
2358 	 * combination key for legacy pairing even when there's no
2359 	 * previous key */
2360 	if (type == HCI_LK_CHANGED_COMBINATION &&
2361 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2362 		type = HCI_LK_COMBINATION;
2363 		if (conn)
2364 			conn->key_type = type;
2365 	}
2366 
2367 	bacpy(&key->bdaddr, bdaddr);
2368 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2369 	key->pin_len = pin_len;
2370 
2371 	if (type == HCI_LK_CHANGED_COMBINATION)
2372 		key->type = old_key_type;
2373 	else
2374 		key->type = type;
2375 
2376 	if (persistent)
2377 		*persistent = hci_persistent_key(hdev, conn, type,
2378 						 old_key_type);
2379 
2380 	return key;
2381 }
2382 
2383 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384 			    u8 addr_type, u8 type, u8 authenticated,
2385 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2386 {
2387 	struct smp_ltk *key, *old_key;
2388 	u8 role = ltk_role(type);
2389 
2390 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2391 	if (old_key)
2392 		key = old_key;
2393 	else {
2394 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2395 		if (!key)
2396 			return NULL;
2397 		list_add_rcu(&key->list, &hdev->long_term_keys);
2398 	}
2399 
2400 	bacpy(&key->bdaddr, bdaddr);
2401 	key->bdaddr_type = addr_type;
2402 	memcpy(key->val, tk, sizeof(key->val));
2403 	key->authenticated = authenticated;
2404 	key->ediv = ediv;
2405 	key->rand = rand;
2406 	key->enc_size = enc_size;
2407 	key->type = type;
2408 
2409 	return key;
2410 }
2411 
2412 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2413 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2414 {
2415 	struct smp_irk *irk;
2416 
2417 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2418 	if (!irk) {
2419 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2420 		if (!irk)
2421 			return NULL;
2422 
2423 		bacpy(&irk->bdaddr, bdaddr);
2424 		irk->addr_type = addr_type;
2425 
2426 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2427 	}
2428 
2429 	memcpy(irk->val, val, 16);
2430 	bacpy(&irk->rpa, rpa);
2431 
2432 	return irk;
2433 }
2434 
2435 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2436 {
2437 	struct link_key *key;
2438 
2439 	key = hci_find_link_key(hdev, bdaddr);
2440 	if (!key)
2441 		return -ENOENT;
2442 
2443 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2444 
2445 	list_del_rcu(&key->list);
2446 	kfree_rcu(key, rcu);
2447 
2448 	return 0;
2449 }
2450 
2451 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2452 {
2453 	struct smp_ltk *k;
2454 	int removed = 0;
2455 
2456 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2457 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2458 			continue;
2459 
2460 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2461 
2462 		list_del_rcu(&k->list);
2463 		kfree_rcu(k, rcu);
2464 		removed++;
2465 	}
2466 
2467 	return removed ? 0 : -ENOENT;
2468 }
2469 
2470 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2471 {
2472 	struct smp_irk *k;
2473 
2474 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2475 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2476 			continue;
2477 
2478 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479 
2480 		list_del_rcu(&k->list);
2481 		kfree_rcu(k, rcu);
2482 	}
2483 }
2484 
2485 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2486 {
2487 	struct smp_ltk *k;
2488 	struct smp_irk *irk;
2489 	u8 addr_type;
2490 
2491 	if (type == BDADDR_BREDR) {
2492 		if (hci_find_link_key(hdev, bdaddr))
2493 			return true;
2494 		return false;
2495 	}
2496 
2497 	/* Convert to HCI addr type which struct smp_ltk uses */
2498 	if (type == BDADDR_LE_PUBLIC)
2499 		addr_type = ADDR_LE_DEV_PUBLIC;
2500 	else
2501 		addr_type = ADDR_LE_DEV_RANDOM;
2502 
2503 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2504 	if (irk) {
2505 		bdaddr = &irk->bdaddr;
2506 		addr_type = irk->addr_type;
2507 	}
2508 
2509 	rcu_read_lock();
2510 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2511 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2512 			rcu_read_unlock();
2513 			return true;
2514 		}
2515 	}
2516 	rcu_read_unlock();
2517 
2518 	return false;
2519 }
2520 
2521 /* HCI command timer function */
2522 static void hci_cmd_timeout(struct work_struct *work)
2523 {
2524 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2525 					    cmd_timer.work);
2526 
2527 	if (hdev->sent_cmd) {
2528 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2529 		u16 opcode = __le16_to_cpu(sent->opcode);
2530 
2531 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2532 	} else {
2533 		BT_ERR("%s command tx timeout", hdev->name);
2534 	}
2535 
2536 	atomic_set(&hdev->cmd_cnt, 1);
2537 	queue_work(hdev->workqueue, &hdev->cmd_work);
2538 }
2539 
2540 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2541 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2542 {
2543 	struct oob_data *data;
2544 
2545 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2546 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2547 			continue;
2548 		if (data->bdaddr_type != bdaddr_type)
2549 			continue;
2550 		return data;
2551 	}
2552 
2553 	return NULL;
2554 }
2555 
2556 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2557 			       u8 bdaddr_type)
2558 {
2559 	struct oob_data *data;
2560 
2561 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2562 	if (!data)
2563 		return -ENOENT;
2564 
2565 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2566 
2567 	list_del(&data->list);
2568 	kfree(data);
2569 
2570 	return 0;
2571 }
2572 
2573 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2574 {
2575 	struct oob_data *data, *n;
2576 
2577 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2578 		list_del(&data->list);
2579 		kfree(data);
2580 	}
2581 }
2582 
2583 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2584 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2585 			    u8 *hash256, u8 *rand256)
2586 {
2587 	struct oob_data *data;
2588 
2589 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2590 	if (!data) {
2591 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2592 		if (!data)
2593 			return -ENOMEM;
2594 
2595 		bacpy(&data->bdaddr, bdaddr);
2596 		data->bdaddr_type = bdaddr_type;
2597 		list_add(&data->list, &hdev->remote_oob_data);
2598 	}
2599 
2600 	if (hash192 && rand192) {
2601 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2602 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2603 		if (hash256 && rand256)
2604 			data->present = 0x03;
2605 	} else {
2606 		memset(data->hash192, 0, sizeof(data->hash192));
2607 		memset(data->rand192, 0, sizeof(data->rand192));
2608 		if (hash256 && rand256)
2609 			data->present = 0x02;
2610 		else
2611 			data->present = 0x00;
2612 	}
2613 
2614 	if (hash256 && rand256) {
2615 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2616 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2617 	} else {
2618 		memset(data->hash256, 0, sizeof(data->hash256));
2619 		memset(data->rand256, 0, sizeof(data->rand256));
2620 		if (hash192 && rand192)
2621 			data->present = 0x01;
2622 	}
2623 
2624 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2625 
2626 	return 0;
2627 }
2628 
2629 /* This function requires the caller holds hdev->lock */
2630 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2631 {
2632 	struct adv_info *adv_instance;
2633 
2634 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2635 		if (adv_instance->instance == instance)
2636 			return adv_instance;
2637 	}
2638 
2639 	return NULL;
2640 }
2641 
2642 /* This function requires the caller holds hdev->lock */
2643 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2644 	struct adv_info *cur_instance;
2645 
2646 	cur_instance = hci_find_adv_instance(hdev, instance);
2647 	if (!cur_instance)
2648 		return NULL;
2649 
2650 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2651 					    struct adv_info, list))
2652 		return list_first_entry(&hdev->adv_instances,
2653 						 struct adv_info, list);
2654 	else
2655 		return list_next_entry(cur_instance, list);
2656 }
2657 
2658 /* This function requires the caller holds hdev->lock */
2659 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2660 {
2661 	struct adv_info *adv_instance;
2662 
2663 	adv_instance = hci_find_adv_instance(hdev, instance);
2664 	if (!adv_instance)
2665 		return -ENOENT;
2666 
2667 	BT_DBG("%s removing %dMR", hdev->name, instance);
2668 
2669 	if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2670 		cancel_delayed_work(&hdev->adv_instance_expire);
2671 		hdev->adv_instance_timeout = 0;
2672 	}
2673 
2674 	list_del(&adv_instance->list);
2675 	kfree(adv_instance);
2676 
2677 	hdev->adv_instance_cnt--;
2678 
2679 	return 0;
2680 }
2681 
2682 /* This function requires the caller holds hdev->lock */
2683 void hci_adv_instances_clear(struct hci_dev *hdev)
2684 {
2685 	struct adv_info *adv_instance, *n;
2686 
2687 	if (hdev->adv_instance_timeout) {
2688 		cancel_delayed_work(&hdev->adv_instance_expire);
2689 		hdev->adv_instance_timeout = 0;
2690 	}
2691 
2692 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2693 		list_del(&adv_instance->list);
2694 		kfree(adv_instance);
2695 	}
2696 
2697 	hdev->adv_instance_cnt = 0;
2698 }
2699 
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702 			 u16 adv_data_len, u8 *adv_data,
2703 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2704 			 u16 timeout, u16 duration)
2705 {
2706 	struct adv_info *adv_instance;
2707 
2708 	adv_instance = hci_find_adv_instance(hdev, instance);
2709 	if (adv_instance) {
2710 		memset(adv_instance->adv_data, 0,
2711 		       sizeof(adv_instance->adv_data));
2712 		memset(adv_instance->scan_rsp_data, 0,
2713 		       sizeof(adv_instance->scan_rsp_data));
2714 	} else {
2715 		if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717 			return -EOVERFLOW;
2718 
2719 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2720 		if (!adv_instance)
2721 			return -ENOMEM;
2722 
2723 		adv_instance->pending = true;
2724 		adv_instance->instance = instance;
2725 		list_add(&adv_instance->list, &hdev->adv_instances);
2726 		hdev->adv_instance_cnt++;
2727 	}
2728 
2729 	adv_instance->flags = flags;
2730 	adv_instance->adv_data_len = adv_data_len;
2731 	adv_instance->scan_rsp_len = scan_rsp_len;
2732 
2733 	if (adv_data_len)
2734 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2735 
2736 	if (scan_rsp_len)
2737 		memcpy(adv_instance->scan_rsp_data,
2738 		       scan_rsp_data, scan_rsp_len);
2739 
2740 	adv_instance->timeout = timeout;
2741 	adv_instance->remaining_time = timeout;
2742 
2743 	if (duration == 0)
2744 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2745 	else
2746 		adv_instance->duration = duration;
2747 
2748 	BT_DBG("%s for %dMR", hdev->name, instance);
2749 
2750 	return 0;
2751 }
2752 
2753 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2754 					 bdaddr_t *bdaddr, u8 type)
2755 {
2756 	struct bdaddr_list *b;
2757 
2758 	list_for_each_entry(b, bdaddr_list, list) {
2759 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2760 			return b;
2761 	}
2762 
2763 	return NULL;
2764 }
2765 
2766 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2767 {
2768 	struct list_head *p, *n;
2769 
2770 	list_for_each_safe(p, n, bdaddr_list) {
2771 		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2772 
2773 		list_del(p);
2774 		kfree(b);
2775 	}
2776 }
2777 
2778 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2779 {
2780 	struct bdaddr_list *entry;
2781 
2782 	if (!bacmp(bdaddr, BDADDR_ANY))
2783 		return -EBADF;
2784 
2785 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2786 		return -EEXIST;
2787 
2788 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2789 	if (!entry)
2790 		return -ENOMEM;
2791 
2792 	bacpy(&entry->bdaddr, bdaddr);
2793 	entry->bdaddr_type = type;
2794 
2795 	list_add(&entry->list, list);
2796 
2797 	return 0;
2798 }
2799 
2800 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2801 {
2802 	struct bdaddr_list *entry;
2803 
2804 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2805 		hci_bdaddr_list_clear(list);
2806 		return 0;
2807 	}
2808 
2809 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2810 	if (!entry)
2811 		return -ENOENT;
2812 
2813 	list_del(&entry->list);
2814 	kfree(entry);
2815 
2816 	return 0;
2817 }
2818 
2819 /* This function requires the caller holds hdev->lock */
2820 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2821 					       bdaddr_t *addr, u8 addr_type)
2822 {
2823 	struct hci_conn_params *params;
2824 
2825 	/* The conn params list only contains identity addresses */
2826 	if (!hci_is_identity_address(addr, addr_type))
2827 		return NULL;
2828 
2829 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2830 		if (bacmp(&params->addr, addr) == 0 &&
2831 		    params->addr_type == addr_type) {
2832 			return params;
2833 		}
2834 	}
2835 
2836 	return NULL;
2837 }
2838 
2839 /* This function requires the caller holds hdev->lock */
2840 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2841 						  bdaddr_t *addr, u8 addr_type)
2842 {
2843 	struct hci_conn_params *param;
2844 
2845 	/* The list only contains identity addresses */
2846 	if (!hci_is_identity_address(addr, addr_type))
2847 		return NULL;
2848 
2849 	list_for_each_entry(param, list, action) {
2850 		if (bacmp(&param->addr, addr) == 0 &&
2851 		    param->addr_type == addr_type)
2852 			return param;
2853 	}
2854 
2855 	return NULL;
2856 }
2857 
2858 /* This function requires the caller holds hdev->lock */
2859 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2860 					    bdaddr_t *addr, u8 addr_type)
2861 {
2862 	struct hci_conn_params *params;
2863 
2864 	if (!hci_is_identity_address(addr, addr_type))
2865 		return NULL;
2866 
2867 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2868 	if (params)
2869 		return params;
2870 
2871 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2872 	if (!params) {
2873 		BT_ERR("Out of memory");
2874 		return NULL;
2875 	}
2876 
2877 	bacpy(&params->addr, addr);
2878 	params->addr_type = addr_type;
2879 
2880 	list_add(&params->list, &hdev->le_conn_params);
2881 	INIT_LIST_HEAD(&params->action);
2882 
2883 	params->conn_min_interval = hdev->le_conn_min_interval;
2884 	params->conn_max_interval = hdev->le_conn_max_interval;
2885 	params->conn_latency = hdev->le_conn_latency;
2886 	params->supervision_timeout = hdev->le_supv_timeout;
2887 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2888 
2889 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2890 
2891 	return params;
2892 }
2893 
2894 static void hci_conn_params_free(struct hci_conn_params *params)
2895 {
2896 	if (params->conn) {
2897 		hci_conn_drop(params->conn);
2898 		hci_conn_put(params->conn);
2899 	}
2900 
2901 	list_del(&params->action);
2902 	list_del(&params->list);
2903 	kfree(params);
2904 }
2905 
2906 /* This function requires the caller holds hdev->lock */
2907 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2908 {
2909 	struct hci_conn_params *params;
2910 
2911 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2912 	if (!params)
2913 		return;
2914 
2915 	hci_conn_params_free(params);
2916 
2917 	hci_update_background_scan(hdev);
2918 
2919 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2920 }
2921 
2922 /* This function requires the caller holds hdev->lock */
2923 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2924 {
2925 	struct hci_conn_params *params, *tmp;
2926 
2927 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2928 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2929 			continue;
2930 		list_del(&params->list);
2931 		kfree(params);
2932 	}
2933 
2934 	BT_DBG("All LE disabled connection parameters were removed");
2935 }
2936 
2937 /* This function requires the caller holds hdev->lock */
2938 void hci_conn_params_clear_all(struct hci_dev *hdev)
2939 {
2940 	struct hci_conn_params *params, *tmp;
2941 
2942 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2943 		hci_conn_params_free(params);
2944 
2945 	hci_update_background_scan(hdev);
2946 
2947 	BT_DBG("All LE connection parameters were removed");
2948 }
2949 
2950 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2951 {
2952 	if (status) {
2953 		BT_ERR("Failed to start inquiry: status %d", status);
2954 
2955 		hci_dev_lock(hdev);
2956 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2957 		hci_dev_unlock(hdev);
2958 		return;
2959 	}
2960 }
2961 
2962 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2963 					  u16 opcode)
2964 {
2965 	/* General inquiry access code (GIAC) */
2966 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2967 	struct hci_cp_inquiry cp;
2968 	int err;
2969 
2970 	if (status) {
2971 		BT_ERR("Failed to disable LE scanning: status %d", status);
2972 		return;
2973 	}
2974 
2975 	hdev->discovery.scan_start = 0;
2976 
2977 	switch (hdev->discovery.type) {
2978 	case DISCOV_TYPE_LE:
2979 		hci_dev_lock(hdev);
2980 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2981 		hci_dev_unlock(hdev);
2982 		break;
2983 
2984 	case DISCOV_TYPE_INTERLEAVED:
2985 		hci_dev_lock(hdev);
2986 
2987 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2988 			     &hdev->quirks)) {
2989 			/* If we were running LE only scan, change discovery
2990 			 * state. If we were running both LE and BR/EDR inquiry
2991 			 * simultaneously, and BR/EDR inquiry is already
2992 			 * finished, stop discovery, otherwise BR/EDR inquiry
2993 			 * will stop discovery when finished. If we will resolve
2994 			 * remote device name, do not change discovery state.
2995 			 */
2996 			if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2997 			    hdev->discovery.state != DISCOVERY_RESOLVING)
2998 				hci_discovery_set_state(hdev,
2999 							DISCOVERY_STOPPED);
3000 		} else {
3001 			struct hci_request req;
3002 
3003 			hci_inquiry_cache_flush(hdev);
3004 
3005 			hci_req_init(&req, hdev);
3006 
3007 			memset(&cp, 0, sizeof(cp));
3008 			memcpy(&cp.lap, lap, sizeof(cp.lap));
3009 			cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3010 			hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3011 
3012 			err = hci_req_run(&req, inquiry_complete);
3013 			if (err) {
3014 				BT_ERR("Inquiry request failed: err %d", err);
3015 				hci_discovery_set_state(hdev,
3016 							DISCOVERY_STOPPED);
3017 			}
3018 		}
3019 
3020 		hci_dev_unlock(hdev);
3021 		break;
3022 	}
3023 }
3024 
3025 static void le_scan_disable_work(struct work_struct *work)
3026 {
3027 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3028 					    le_scan_disable.work);
3029 	struct hci_request req;
3030 	int err;
3031 
3032 	BT_DBG("%s", hdev->name);
3033 
3034 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3035 
3036 	hci_req_init(&req, hdev);
3037 
3038 	hci_req_add_le_scan_disable(&req);
3039 
3040 	err = hci_req_run(&req, le_scan_disable_work_complete);
3041 	if (err)
3042 		BT_ERR("Disable LE scanning request failed: err %d", err);
3043 }
3044 
3045 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3046 					  u16 opcode)
3047 {
3048 	unsigned long timeout, duration, scan_start, now;
3049 
3050 	BT_DBG("%s", hdev->name);
3051 
3052 	if (status) {
3053 		BT_ERR("Failed to restart LE scan: status %d", status);
3054 		return;
3055 	}
3056 
3057 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3058 	    !hdev->discovery.scan_start)
3059 		return;
3060 
3061 	/* When the scan was started, hdev->le_scan_disable has been queued
3062 	 * after duration from scan_start. During scan restart this job
3063 	 * has been canceled, and we need to queue it again after proper
3064 	 * timeout, to make sure that scan does not run indefinitely.
3065 	 */
3066 	duration = hdev->discovery.scan_duration;
3067 	scan_start = hdev->discovery.scan_start;
3068 	now = jiffies;
3069 	if (now - scan_start <= duration) {
3070 		int elapsed;
3071 
3072 		if (now >= scan_start)
3073 			elapsed = now - scan_start;
3074 		else
3075 			elapsed = ULONG_MAX - scan_start + now;
3076 
3077 		timeout = duration - elapsed;
3078 	} else {
3079 		timeout = 0;
3080 	}
3081 	queue_delayed_work(hdev->workqueue,
3082 			   &hdev->le_scan_disable, timeout);
3083 }
3084 
3085 static void le_scan_restart_work(struct work_struct *work)
3086 {
3087 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3088 					    le_scan_restart.work);
3089 	struct hci_request req;
3090 	struct hci_cp_le_set_scan_enable cp;
3091 	int err;
3092 
3093 	BT_DBG("%s", hdev->name);
3094 
3095 	/* If controller is not scanning we are done. */
3096 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3097 		return;
3098 
3099 	hci_req_init(&req, hdev);
3100 
3101 	hci_req_add_le_scan_disable(&req);
3102 
3103 	memset(&cp, 0, sizeof(cp));
3104 	cp.enable = LE_SCAN_ENABLE;
3105 	cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3106 	hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3107 
3108 	err = hci_req_run(&req, le_scan_restart_work_complete);
3109 	if (err)
3110 		BT_ERR("Restart LE scan request failed: err %d", err);
3111 }
3112 
3113 /* Copy the Identity Address of the controller.
3114  *
3115  * If the controller has a public BD_ADDR, then by default use that one.
3116  * If this is a LE only controller without a public address, default to
3117  * the static random address.
3118  *
3119  * For debugging purposes it is possible to force controllers with a
3120  * public address to use the static random address instead.
3121  *
3122  * In case BR/EDR has been disabled on a dual-mode controller and
3123  * userspace has configured a static address, then that address
3124  * becomes the identity address instead of the public BR/EDR address.
3125  */
3126 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3127 			       u8 *bdaddr_type)
3128 {
3129 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3130 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3131 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3132 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3133 		bacpy(bdaddr, &hdev->static_addr);
3134 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3135 	} else {
3136 		bacpy(bdaddr, &hdev->bdaddr);
3137 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3138 	}
3139 }
3140 
3141 /* Alloc HCI device */
3142 struct hci_dev *hci_alloc_dev(void)
3143 {
3144 	struct hci_dev *hdev;
3145 
3146 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3147 	if (!hdev)
3148 		return NULL;
3149 
3150 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3151 	hdev->esco_type = (ESCO_HV1);
3152 	hdev->link_mode = (HCI_LM_ACCEPT);
3153 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3154 	hdev->io_capability = 0x03;	/* No Input No Output */
3155 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3156 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3157 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3158 	hdev->adv_instance_cnt = 0;
3159 	hdev->cur_adv_instance = 0x00;
3160 	hdev->adv_instance_timeout = 0;
3161 
3162 	hdev->sniff_max_interval = 800;
3163 	hdev->sniff_min_interval = 80;
3164 
3165 	hdev->le_adv_channel_map = 0x07;
3166 	hdev->le_adv_min_interval = 0x0800;
3167 	hdev->le_adv_max_interval = 0x0800;
3168 	hdev->le_scan_interval = 0x0060;
3169 	hdev->le_scan_window = 0x0030;
3170 	hdev->le_conn_min_interval = 0x0028;
3171 	hdev->le_conn_max_interval = 0x0038;
3172 	hdev->le_conn_latency = 0x0000;
3173 	hdev->le_supv_timeout = 0x002a;
3174 	hdev->le_def_tx_len = 0x001b;
3175 	hdev->le_def_tx_time = 0x0148;
3176 	hdev->le_max_tx_len = 0x001b;
3177 	hdev->le_max_tx_time = 0x0148;
3178 	hdev->le_max_rx_len = 0x001b;
3179 	hdev->le_max_rx_time = 0x0148;
3180 
3181 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3182 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3183 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3184 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3185 
3186 	mutex_init(&hdev->lock);
3187 	mutex_init(&hdev->req_lock);
3188 
3189 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3190 	INIT_LIST_HEAD(&hdev->blacklist);
3191 	INIT_LIST_HEAD(&hdev->whitelist);
3192 	INIT_LIST_HEAD(&hdev->uuids);
3193 	INIT_LIST_HEAD(&hdev->link_keys);
3194 	INIT_LIST_HEAD(&hdev->long_term_keys);
3195 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3196 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3197 	INIT_LIST_HEAD(&hdev->le_white_list);
3198 	INIT_LIST_HEAD(&hdev->le_conn_params);
3199 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3200 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3201 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3202 	INIT_LIST_HEAD(&hdev->adv_instances);
3203 
3204 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3205 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3206 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3207 	INIT_WORK(&hdev->power_on, hci_power_on);
3208 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3209 
3210 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3211 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3212 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3213 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3214 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3215 
3216 	skb_queue_head_init(&hdev->rx_q);
3217 	skb_queue_head_init(&hdev->cmd_q);
3218 	skb_queue_head_init(&hdev->raw_q);
3219 
3220 	init_waitqueue_head(&hdev->req_wait_q);
3221 
3222 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3223 
3224 	hci_init_sysfs(hdev);
3225 	discovery_init(hdev);
3226 
3227 	return hdev;
3228 }
3229 EXPORT_SYMBOL(hci_alloc_dev);
3230 
3231 /* Free HCI device */
3232 void hci_free_dev(struct hci_dev *hdev)
3233 {
3234 	/* will free via device release */
3235 	put_device(&hdev->dev);
3236 }
3237 EXPORT_SYMBOL(hci_free_dev);
3238 
3239 /* Register HCI device */
3240 int hci_register_dev(struct hci_dev *hdev)
3241 {
3242 	int id, error;
3243 
3244 	if (!hdev->open || !hdev->close || !hdev->send)
3245 		return -EINVAL;
3246 
3247 	/* Do not allow HCI_AMP devices to register at index 0,
3248 	 * so the index can be used as the AMP controller ID.
3249 	 */
3250 	switch (hdev->dev_type) {
3251 	case HCI_BREDR:
3252 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3253 		break;
3254 	case HCI_AMP:
3255 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3256 		break;
3257 	default:
3258 		return -EINVAL;
3259 	}
3260 
3261 	if (id < 0)
3262 		return id;
3263 
3264 	sprintf(hdev->name, "hci%d", id);
3265 	hdev->id = id;
3266 
3267 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3268 
3269 	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3270 					  WQ_MEM_RECLAIM, 1, hdev->name);
3271 	if (!hdev->workqueue) {
3272 		error = -ENOMEM;
3273 		goto err;
3274 	}
3275 
3276 	hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3277 					      WQ_MEM_RECLAIM, 1, hdev->name);
3278 	if (!hdev->req_workqueue) {
3279 		destroy_workqueue(hdev->workqueue);
3280 		error = -ENOMEM;
3281 		goto err;
3282 	}
3283 
3284 	if (!IS_ERR_OR_NULL(bt_debugfs))
3285 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3286 
3287 	dev_set_name(&hdev->dev, "%s", hdev->name);
3288 
3289 	error = device_add(&hdev->dev);
3290 	if (error < 0)
3291 		goto err_wqueue;
3292 
3293 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3294 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3295 				    hdev);
3296 	if (hdev->rfkill) {
3297 		if (rfkill_register(hdev->rfkill) < 0) {
3298 			rfkill_destroy(hdev->rfkill);
3299 			hdev->rfkill = NULL;
3300 		}
3301 	}
3302 
3303 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3304 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3305 
3306 	hci_dev_set_flag(hdev, HCI_SETUP);
3307 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3308 
3309 	if (hdev->dev_type == HCI_BREDR) {
3310 		/* Assume BR/EDR support until proven otherwise (such as
3311 		 * through reading supported features during init.
3312 		 */
3313 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3314 	}
3315 
3316 	write_lock(&hci_dev_list_lock);
3317 	list_add(&hdev->list, &hci_dev_list);
3318 	write_unlock(&hci_dev_list_lock);
3319 
3320 	/* Devices that are marked for raw-only usage are unconfigured
3321 	 * and should not be included in normal operation.
3322 	 */
3323 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3324 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3325 
3326 	hci_notify(hdev, HCI_DEV_REG);
3327 	hci_dev_hold(hdev);
3328 
3329 	queue_work(hdev->req_workqueue, &hdev->power_on);
3330 
3331 	return id;
3332 
3333 err_wqueue:
3334 	destroy_workqueue(hdev->workqueue);
3335 	destroy_workqueue(hdev->req_workqueue);
3336 err:
3337 	ida_simple_remove(&hci_index_ida, hdev->id);
3338 
3339 	return error;
3340 }
3341 EXPORT_SYMBOL(hci_register_dev);
3342 
3343 /* Unregister HCI device */
3344 void hci_unregister_dev(struct hci_dev *hdev)
3345 {
3346 	int id;
3347 
3348 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3349 
3350 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3351 
3352 	id = hdev->id;
3353 
3354 	write_lock(&hci_dev_list_lock);
3355 	list_del(&hdev->list);
3356 	write_unlock(&hci_dev_list_lock);
3357 
3358 	hci_dev_do_close(hdev);
3359 
3360 	cancel_work_sync(&hdev->power_on);
3361 
3362 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3363 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3364 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3365 		hci_dev_lock(hdev);
3366 		mgmt_index_removed(hdev);
3367 		hci_dev_unlock(hdev);
3368 	}
3369 
3370 	/* mgmt_index_removed should take care of emptying the
3371 	 * pending list */
3372 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3373 
3374 	hci_notify(hdev, HCI_DEV_UNREG);
3375 
3376 	if (hdev->rfkill) {
3377 		rfkill_unregister(hdev->rfkill);
3378 		rfkill_destroy(hdev->rfkill);
3379 	}
3380 
3381 	device_del(&hdev->dev);
3382 
3383 	debugfs_remove_recursive(hdev->debugfs);
3384 
3385 	destroy_workqueue(hdev->workqueue);
3386 	destroy_workqueue(hdev->req_workqueue);
3387 
3388 	hci_dev_lock(hdev);
3389 	hci_bdaddr_list_clear(&hdev->blacklist);
3390 	hci_bdaddr_list_clear(&hdev->whitelist);
3391 	hci_uuids_clear(hdev);
3392 	hci_link_keys_clear(hdev);
3393 	hci_smp_ltks_clear(hdev);
3394 	hci_smp_irks_clear(hdev);
3395 	hci_remote_oob_data_clear(hdev);
3396 	hci_adv_instances_clear(hdev);
3397 	hci_bdaddr_list_clear(&hdev->le_white_list);
3398 	hci_conn_params_clear_all(hdev);
3399 	hci_discovery_filter_clear(hdev);
3400 	hci_dev_unlock(hdev);
3401 
3402 	hci_dev_put(hdev);
3403 
3404 	ida_simple_remove(&hci_index_ida, id);
3405 }
3406 EXPORT_SYMBOL(hci_unregister_dev);
3407 
3408 /* Suspend HCI device */
3409 int hci_suspend_dev(struct hci_dev *hdev)
3410 {
3411 	hci_notify(hdev, HCI_DEV_SUSPEND);
3412 	return 0;
3413 }
3414 EXPORT_SYMBOL(hci_suspend_dev);
3415 
3416 /* Resume HCI device */
3417 int hci_resume_dev(struct hci_dev *hdev)
3418 {
3419 	hci_notify(hdev, HCI_DEV_RESUME);
3420 	return 0;
3421 }
3422 EXPORT_SYMBOL(hci_resume_dev);
3423 
3424 /* Reset HCI device */
3425 int hci_reset_dev(struct hci_dev *hdev)
3426 {
3427 	const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3428 	struct sk_buff *skb;
3429 
3430 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3431 	if (!skb)
3432 		return -ENOMEM;
3433 
3434 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3435 	memcpy(skb_put(skb, 3), hw_err, 3);
3436 
3437 	/* Send Hardware Error to upper stack */
3438 	return hci_recv_frame(hdev, skb);
3439 }
3440 EXPORT_SYMBOL(hci_reset_dev);
3441 
3442 /* Receive frame from HCI drivers */
3443 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3444 {
3445 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3446 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3447 		kfree_skb(skb);
3448 		return -ENXIO;
3449 	}
3450 
3451 	/* Incoming skb */
3452 	bt_cb(skb)->incoming = 1;
3453 
3454 	/* Time stamp */
3455 	__net_timestamp(skb);
3456 
3457 	skb_queue_tail(&hdev->rx_q, skb);
3458 	queue_work(hdev->workqueue, &hdev->rx_work);
3459 
3460 	return 0;
3461 }
3462 EXPORT_SYMBOL(hci_recv_frame);
3463 
3464 /* ---- Interface to upper protocols ---- */
3465 
3466 int hci_register_cb(struct hci_cb *cb)
3467 {
3468 	BT_DBG("%p name %s", cb, cb->name);
3469 
3470 	mutex_lock(&hci_cb_list_lock);
3471 	list_add_tail(&cb->list, &hci_cb_list);
3472 	mutex_unlock(&hci_cb_list_lock);
3473 
3474 	return 0;
3475 }
3476 EXPORT_SYMBOL(hci_register_cb);
3477 
3478 int hci_unregister_cb(struct hci_cb *cb)
3479 {
3480 	BT_DBG("%p name %s", cb, cb->name);
3481 
3482 	mutex_lock(&hci_cb_list_lock);
3483 	list_del(&cb->list);
3484 	mutex_unlock(&hci_cb_list_lock);
3485 
3486 	return 0;
3487 }
3488 EXPORT_SYMBOL(hci_unregister_cb);
3489 
3490 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3491 {
3492 	int err;
3493 
3494 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3495 
3496 	/* Time stamp */
3497 	__net_timestamp(skb);
3498 
3499 	/* Send copy to monitor */
3500 	hci_send_to_monitor(hdev, skb);
3501 
3502 	if (atomic_read(&hdev->promisc)) {
3503 		/* Send copy to the sockets */
3504 		hci_send_to_sock(hdev, skb);
3505 	}
3506 
3507 	/* Get rid of skb owner, prior to sending to the driver. */
3508 	skb_orphan(skb);
3509 
3510 	err = hdev->send(hdev, skb);
3511 	if (err < 0) {
3512 		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3513 		kfree_skb(skb);
3514 	}
3515 }
3516 
3517 /* Send HCI command */
3518 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3519 		 const void *param)
3520 {
3521 	struct sk_buff *skb;
3522 
3523 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3524 
3525 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3526 	if (!skb) {
3527 		BT_ERR("%s no memory for command", hdev->name);
3528 		return -ENOMEM;
3529 	}
3530 
3531 	/* Stand-alone HCI commands must be flagged as
3532 	 * single-command requests.
3533 	 */
3534 	bt_cb(skb)->req.start = true;
3535 
3536 	skb_queue_tail(&hdev->cmd_q, skb);
3537 	queue_work(hdev->workqueue, &hdev->cmd_work);
3538 
3539 	return 0;
3540 }
3541 
3542 /* Get data from the previously sent command */
3543 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3544 {
3545 	struct hci_command_hdr *hdr;
3546 
3547 	if (!hdev->sent_cmd)
3548 		return NULL;
3549 
3550 	hdr = (void *) hdev->sent_cmd->data;
3551 
3552 	if (hdr->opcode != cpu_to_le16(opcode))
3553 		return NULL;
3554 
3555 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3556 
3557 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3558 }
3559 
3560 /* Send ACL data */
3561 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3562 {
3563 	struct hci_acl_hdr *hdr;
3564 	int len = skb->len;
3565 
3566 	skb_push(skb, HCI_ACL_HDR_SIZE);
3567 	skb_reset_transport_header(skb);
3568 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3569 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3570 	hdr->dlen   = cpu_to_le16(len);
3571 }
3572 
3573 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3574 			  struct sk_buff *skb, __u16 flags)
3575 {
3576 	struct hci_conn *conn = chan->conn;
3577 	struct hci_dev *hdev = conn->hdev;
3578 	struct sk_buff *list;
3579 
3580 	skb->len = skb_headlen(skb);
3581 	skb->data_len = 0;
3582 
3583 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3584 
3585 	switch (hdev->dev_type) {
3586 	case HCI_BREDR:
3587 		hci_add_acl_hdr(skb, conn->handle, flags);
3588 		break;
3589 	case HCI_AMP:
3590 		hci_add_acl_hdr(skb, chan->handle, flags);
3591 		break;
3592 	default:
3593 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3594 		return;
3595 	}
3596 
3597 	list = skb_shinfo(skb)->frag_list;
3598 	if (!list) {
3599 		/* Non fragmented */
3600 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3601 
3602 		skb_queue_tail(queue, skb);
3603 	} else {
3604 		/* Fragmented */
3605 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3606 
3607 		skb_shinfo(skb)->frag_list = NULL;
3608 
3609 		/* Queue all fragments atomically. We need to use spin_lock_bh
3610 		 * here because of 6LoWPAN links, as there this function is
3611 		 * called from softirq and using normal spin lock could cause
3612 		 * deadlocks.
3613 		 */
3614 		spin_lock_bh(&queue->lock);
3615 
3616 		__skb_queue_tail(queue, skb);
3617 
3618 		flags &= ~ACL_START;
3619 		flags |= ACL_CONT;
3620 		do {
3621 			skb = list; list = list->next;
3622 
3623 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3624 			hci_add_acl_hdr(skb, conn->handle, flags);
3625 
3626 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3627 
3628 			__skb_queue_tail(queue, skb);
3629 		} while (list);
3630 
3631 		spin_unlock_bh(&queue->lock);
3632 	}
3633 }
3634 
3635 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3636 {
3637 	struct hci_dev *hdev = chan->conn->hdev;
3638 
3639 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3640 
3641 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3642 
3643 	queue_work(hdev->workqueue, &hdev->tx_work);
3644 }
3645 
3646 /* Send SCO data */
3647 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3648 {
3649 	struct hci_dev *hdev = conn->hdev;
3650 	struct hci_sco_hdr hdr;
3651 
3652 	BT_DBG("%s len %d", hdev->name, skb->len);
3653 
3654 	hdr.handle = cpu_to_le16(conn->handle);
3655 	hdr.dlen   = skb->len;
3656 
3657 	skb_push(skb, HCI_SCO_HDR_SIZE);
3658 	skb_reset_transport_header(skb);
3659 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3660 
3661 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3662 
3663 	skb_queue_tail(&conn->data_q, skb);
3664 	queue_work(hdev->workqueue, &hdev->tx_work);
3665 }
3666 
3667 /* ---- HCI TX task (outgoing data) ---- */
3668 
3669 /* HCI Connection scheduler */
3670 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3671 				     int *quote)
3672 {
3673 	struct hci_conn_hash *h = &hdev->conn_hash;
3674 	struct hci_conn *conn = NULL, *c;
3675 	unsigned int num = 0, min = ~0;
3676 
3677 	/* We don't have to lock device here. Connections are always
3678 	 * added and removed with TX task disabled. */
3679 
3680 	rcu_read_lock();
3681 
3682 	list_for_each_entry_rcu(c, &h->list, list) {
3683 		if (c->type != type || skb_queue_empty(&c->data_q))
3684 			continue;
3685 
3686 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3687 			continue;
3688 
3689 		num++;
3690 
3691 		if (c->sent < min) {
3692 			min  = c->sent;
3693 			conn = c;
3694 		}
3695 
3696 		if (hci_conn_num(hdev, type) == num)
3697 			break;
3698 	}
3699 
3700 	rcu_read_unlock();
3701 
3702 	if (conn) {
3703 		int cnt, q;
3704 
3705 		switch (conn->type) {
3706 		case ACL_LINK:
3707 			cnt = hdev->acl_cnt;
3708 			break;
3709 		case SCO_LINK:
3710 		case ESCO_LINK:
3711 			cnt = hdev->sco_cnt;
3712 			break;
3713 		case LE_LINK:
3714 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3715 			break;
3716 		default:
3717 			cnt = 0;
3718 			BT_ERR("Unknown link type");
3719 		}
3720 
3721 		q = cnt / num;
3722 		*quote = q ? q : 1;
3723 	} else
3724 		*quote = 0;
3725 
3726 	BT_DBG("conn %p quote %d", conn, *quote);
3727 	return conn;
3728 }
3729 
3730 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3731 {
3732 	struct hci_conn_hash *h = &hdev->conn_hash;
3733 	struct hci_conn *c;
3734 
3735 	BT_ERR("%s link tx timeout", hdev->name);
3736 
3737 	rcu_read_lock();
3738 
3739 	/* Kill stalled connections */
3740 	list_for_each_entry_rcu(c, &h->list, list) {
3741 		if (c->type == type && c->sent) {
3742 			BT_ERR("%s killing stalled connection %pMR",
3743 			       hdev->name, &c->dst);
3744 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3745 		}
3746 	}
3747 
3748 	rcu_read_unlock();
3749 }
3750 
3751 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3752 				      int *quote)
3753 {
3754 	struct hci_conn_hash *h = &hdev->conn_hash;
3755 	struct hci_chan *chan = NULL;
3756 	unsigned int num = 0, min = ~0, cur_prio = 0;
3757 	struct hci_conn *conn;
3758 	int cnt, q, conn_num = 0;
3759 
3760 	BT_DBG("%s", hdev->name);
3761 
3762 	rcu_read_lock();
3763 
3764 	list_for_each_entry_rcu(conn, &h->list, list) {
3765 		struct hci_chan *tmp;
3766 
3767 		if (conn->type != type)
3768 			continue;
3769 
3770 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3771 			continue;
3772 
3773 		conn_num++;
3774 
3775 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3776 			struct sk_buff *skb;
3777 
3778 			if (skb_queue_empty(&tmp->data_q))
3779 				continue;
3780 
3781 			skb = skb_peek(&tmp->data_q);
3782 			if (skb->priority < cur_prio)
3783 				continue;
3784 
3785 			if (skb->priority > cur_prio) {
3786 				num = 0;
3787 				min = ~0;
3788 				cur_prio = skb->priority;
3789 			}
3790 
3791 			num++;
3792 
3793 			if (conn->sent < min) {
3794 				min  = conn->sent;
3795 				chan = tmp;
3796 			}
3797 		}
3798 
3799 		if (hci_conn_num(hdev, type) == conn_num)
3800 			break;
3801 	}
3802 
3803 	rcu_read_unlock();
3804 
3805 	if (!chan)
3806 		return NULL;
3807 
3808 	switch (chan->conn->type) {
3809 	case ACL_LINK:
3810 		cnt = hdev->acl_cnt;
3811 		break;
3812 	case AMP_LINK:
3813 		cnt = hdev->block_cnt;
3814 		break;
3815 	case SCO_LINK:
3816 	case ESCO_LINK:
3817 		cnt = hdev->sco_cnt;
3818 		break;
3819 	case LE_LINK:
3820 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3821 		break;
3822 	default:
3823 		cnt = 0;
3824 		BT_ERR("Unknown link type");
3825 	}
3826 
3827 	q = cnt / num;
3828 	*quote = q ? q : 1;
3829 	BT_DBG("chan %p quote %d", chan, *quote);
3830 	return chan;
3831 }
3832 
3833 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3834 {
3835 	struct hci_conn_hash *h = &hdev->conn_hash;
3836 	struct hci_conn *conn;
3837 	int num = 0;
3838 
3839 	BT_DBG("%s", hdev->name);
3840 
3841 	rcu_read_lock();
3842 
3843 	list_for_each_entry_rcu(conn, &h->list, list) {
3844 		struct hci_chan *chan;
3845 
3846 		if (conn->type != type)
3847 			continue;
3848 
3849 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3850 			continue;
3851 
3852 		num++;
3853 
3854 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3855 			struct sk_buff *skb;
3856 
3857 			if (chan->sent) {
3858 				chan->sent = 0;
3859 				continue;
3860 			}
3861 
3862 			if (skb_queue_empty(&chan->data_q))
3863 				continue;
3864 
3865 			skb = skb_peek(&chan->data_q);
3866 			if (skb->priority >= HCI_PRIO_MAX - 1)
3867 				continue;
3868 
3869 			skb->priority = HCI_PRIO_MAX - 1;
3870 
3871 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3872 			       skb->priority);
3873 		}
3874 
3875 		if (hci_conn_num(hdev, type) == num)
3876 			break;
3877 	}
3878 
3879 	rcu_read_unlock();
3880 
3881 }
3882 
3883 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3884 {
3885 	/* Calculate count of blocks used by this packet */
3886 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3887 }
3888 
3889 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3890 {
3891 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3892 		/* ACL tx timeout must be longer than maximum
3893 		 * link supervision timeout (40.9 seconds) */
3894 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3895 				       HCI_ACL_TX_TIMEOUT))
3896 			hci_link_tx_to(hdev, ACL_LINK);
3897 	}
3898 }
3899 
3900 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3901 {
3902 	unsigned int cnt = hdev->acl_cnt;
3903 	struct hci_chan *chan;
3904 	struct sk_buff *skb;
3905 	int quote;
3906 
3907 	__check_timeout(hdev, cnt);
3908 
3909 	while (hdev->acl_cnt &&
3910 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3911 		u32 priority = (skb_peek(&chan->data_q))->priority;
3912 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3913 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3914 			       skb->len, skb->priority);
3915 
3916 			/* Stop if priority has changed */
3917 			if (skb->priority < priority)
3918 				break;
3919 
3920 			skb = skb_dequeue(&chan->data_q);
3921 
3922 			hci_conn_enter_active_mode(chan->conn,
3923 						   bt_cb(skb)->force_active);
3924 
3925 			hci_send_frame(hdev, skb);
3926 			hdev->acl_last_tx = jiffies;
3927 
3928 			hdev->acl_cnt--;
3929 			chan->sent++;
3930 			chan->conn->sent++;
3931 		}
3932 	}
3933 
3934 	if (cnt != hdev->acl_cnt)
3935 		hci_prio_recalculate(hdev, ACL_LINK);
3936 }
3937 
3938 static void hci_sched_acl_blk(struct hci_dev *hdev)
3939 {
3940 	unsigned int cnt = hdev->block_cnt;
3941 	struct hci_chan *chan;
3942 	struct sk_buff *skb;
3943 	int quote;
3944 	u8 type;
3945 
3946 	__check_timeout(hdev, cnt);
3947 
3948 	BT_DBG("%s", hdev->name);
3949 
3950 	if (hdev->dev_type == HCI_AMP)
3951 		type = AMP_LINK;
3952 	else
3953 		type = ACL_LINK;
3954 
3955 	while (hdev->block_cnt > 0 &&
3956 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3957 		u32 priority = (skb_peek(&chan->data_q))->priority;
3958 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3959 			int blocks;
3960 
3961 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3962 			       skb->len, skb->priority);
3963 
3964 			/* Stop if priority has changed */
3965 			if (skb->priority < priority)
3966 				break;
3967 
3968 			skb = skb_dequeue(&chan->data_q);
3969 
3970 			blocks = __get_blocks(hdev, skb);
3971 			if (blocks > hdev->block_cnt)
3972 				return;
3973 
3974 			hci_conn_enter_active_mode(chan->conn,
3975 						   bt_cb(skb)->force_active);
3976 
3977 			hci_send_frame(hdev, skb);
3978 			hdev->acl_last_tx = jiffies;
3979 
3980 			hdev->block_cnt -= blocks;
3981 			quote -= blocks;
3982 
3983 			chan->sent += blocks;
3984 			chan->conn->sent += blocks;
3985 		}
3986 	}
3987 
3988 	if (cnt != hdev->block_cnt)
3989 		hci_prio_recalculate(hdev, type);
3990 }
3991 
3992 static void hci_sched_acl(struct hci_dev *hdev)
3993 {
3994 	BT_DBG("%s", hdev->name);
3995 
3996 	/* No ACL link over BR/EDR controller */
3997 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3998 		return;
3999 
4000 	/* No AMP link over AMP controller */
4001 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4002 		return;
4003 
4004 	switch (hdev->flow_ctl_mode) {
4005 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4006 		hci_sched_acl_pkt(hdev);
4007 		break;
4008 
4009 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4010 		hci_sched_acl_blk(hdev);
4011 		break;
4012 	}
4013 }
4014 
4015 /* Schedule SCO */
4016 static void hci_sched_sco(struct hci_dev *hdev)
4017 {
4018 	struct hci_conn *conn;
4019 	struct sk_buff *skb;
4020 	int quote;
4021 
4022 	BT_DBG("%s", hdev->name);
4023 
4024 	if (!hci_conn_num(hdev, SCO_LINK))
4025 		return;
4026 
4027 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4028 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4029 			BT_DBG("skb %p len %d", skb, skb->len);
4030 			hci_send_frame(hdev, skb);
4031 
4032 			conn->sent++;
4033 			if (conn->sent == ~0)
4034 				conn->sent = 0;
4035 		}
4036 	}
4037 }
4038 
4039 static void hci_sched_esco(struct hci_dev *hdev)
4040 {
4041 	struct hci_conn *conn;
4042 	struct sk_buff *skb;
4043 	int quote;
4044 
4045 	BT_DBG("%s", hdev->name);
4046 
4047 	if (!hci_conn_num(hdev, ESCO_LINK))
4048 		return;
4049 
4050 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4051 						     &quote))) {
4052 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4053 			BT_DBG("skb %p len %d", skb, skb->len);
4054 			hci_send_frame(hdev, skb);
4055 
4056 			conn->sent++;
4057 			if (conn->sent == ~0)
4058 				conn->sent = 0;
4059 		}
4060 	}
4061 }
4062 
4063 static void hci_sched_le(struct hci_dev *hdev)
4064 {
4065 	struct hci_chan *chan;
4066 	struct sk_buff *skb;
4067 	int quote, cnt, tmp;
4068 
4069 	BT_DBG("%s", hdev->name);
4070 
4071 	if (!hci_conn_num(hdev, LE_LINK))
4072 		return;
4073 
4074 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4075 		/* LE tx timeout must be longer than maximum
4076 		 * link supervision timeout (40.9 seconds) */
4077 		if (!hdev->le_cnt && hdev->le_pkts &&
4078 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
4079 			hci_link_tx_to(hdev, LE_LINK);
4080 	}
4081 
4082 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4083 	tmp = cnt;
4084 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4085 		u32 priority = (skb_peek(&chan->data_q))->priority;
4086 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4087 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4088 			       skb->len, skb->priority);
4089 
4090 			/* Stop if priority has changed */
4091 			if (skb->priority < priority)
4092 				break;
4093 
4094 			skb = skb_dequeue(&chan->data_q);
4095 
4096 			hci_send_frame(hdev, skb);
4097 			hdev->le_last_tx = jiffies;
4098 
4099 			cnt--;
4100 			chan->sent++;
4101 			chan->conn->sent++;
4102 		}
4103 	}
4104 
4105 	if (hdev->le_pkts)
4106 		hdev->le_cnt = cnt;
4107 	else
4108 		hdev->acl_cnt = cnt;
4109 
4110 	if (cnt != tmp)
4111 		hci_prio_recalculate(hdev, LE_LINK);
4112 }
4113 
4114 static void hci_tx_work(struct work_struct *work)
4115 {
4116 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4117 	struct sk_buff *skb;
4118 
4119 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4120 	       hdev->sco_cnt, hdev->le_cnt);
4121 
4122 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4123 		/* Schedule queues and send stuff to HCI driver */
4124 		hci_sched_acl(hdev);
4125 		hci_sched_sco(hdev);
4126 		hci_sched_esco(hdev);
4127 		hci_sched_le(hdev);
4128 	}
4129 
4130 	/* Send next queued raw (unknown type) packet */
4131 	while ((skb = skb_dequeue(&hdev->raw_q)))
4132 		hci_send_frame(hdev, skb);
4133 }
4134 
4135 /* ----- HCI RX task (incoming data processing) ----- */
4136 
4137 /* ACL data packet */
4138 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4139 {
4140 	struct hci_acl_hdr *hdr = (void *) skb->data;
4141 	struct hci_conn *conn;
4142 	__u16 handle, flags;
4143 
4144 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4145 
4146 	handle = __le16_to_cpu(hdr->handle);
4147 	flags  = hci_flags(handle);
4148 	handle = hci_handle(handle);
4149 
4150 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4151 	       handle, flags);
4152 
4153 	hdev->stat.acl_rx++;
4154 
4155 	hci_dev_lock(hdev);
4156 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4157 	hci_dev_unlock(hdev);
4158 
4159 	if (conn) {
4160 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4161 
4162 		/* Send to upper protocol */
4163 		l2cap_recv_acldata(conn, skb, flags);
4164 		return;
4165 	} else {
4166 		BT_ERR("%s ACL packet for unknown connection handle %d",
4167 		       hdev->name, handle);
4168 	}
4169 
4170 	kfree_skb(skb);
4171 }
4172 
4173 /* SCO data packet */
4174 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4175 {
4176 	struct hci_sco_hdr *hdr = (void *) skb->data;
4177 	struct hci_conn *conn;
4178 	__u16 handle;
4179 
4180 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4181 
4182 	handle = __le16_to_cpu(hdr->handle);
4183 
4184 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4185 
4186 	hdev->stat.sco_rx++;
4187 
4188 	hci_dev_lock(hdev);
4189 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4190 	hci_dev_unlock(hdev);
4191 
4192 	if (conn) {
4193 		/* Send to upper protocol */
4194 		sco_recv_scodata(conn, skb);
4195 		return;
4196 	} else {
4197 		BT_ERR("%s SCO packet for unknown connection handle %d",
4198 		       hdev->name, handle);
4199 	}
4200 
4201 	kfree_skb(skb);
4202 }
4203 
4204 static bool hci_req_is_complete(struct hci_dev *hdev)
4205 {
4206 	struct sk_buff *skb;
4207 
4208 	skb = skb_peek(&hdev->cmd_q);
4209 	if (!skb)
4210 		return true;
4211 
4212 	return bt_cb(skb)->req.start;
4213 }
4214 
4215 static void hci_resend_last(struct hci_dev *hdev)
4216 {
4217 	struct hci_command_hdr *sent;
4218 	struct sk_buff *skb;
4219 	u16 opcode;
4220 
4221 	if (!hdev->sent_cmd)
4222 		return;
4223 
4224 	sent = (void *) hdev->sent_cmd->data;
4225 	opcode = __le16_to_cpu(sent->opcode);
4226 	if (opcode == HCI_OP_RESET)
4227 		return;
4228 
4229 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4230 	if (!skb)
4231 		return;
4232 
4233 	skb_queue_head(&hdev->cmd_q, skb);
4234 	queue_work(hdev->workqueue, &hdev->cmd_work);
4235 }
4236 
4237 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4238 			  hci_req_complete_t *req_complete,
4239 			  hci_req_complete_skb_t *req_complete_skb)
4240 {
4241 	struct sk_buff *skb;
4242 	unsigned long flags;
4243 
4244 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4245 
4246 	/* If the completed command doesn't match the last one that was
4247 	 * sent we need to do special handling of it.
4248 	 */
4249 	if (!hci_sent_cmd_data(hdev, opcode)) {
4250 		/* Some CSR based controllers generate a spontaneous
4251 		 * reset complete event during init and any pending
4252 		 * command will never be completed. In such a case we
4253 		 * need to resend whatever was the last sent
4254 		 * command.
4255 		 */
4256 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4257 			hci_resend_last(hdev);
4258 
4259 		return;
4260 	}
4261 
4262 	/* If the command succeeded and there's still more commands in
4263 	 * this request the request is not yet complete.
4264 	 */
4265 	if (!status && !hci_req_is_complete(hdev))
4266 		return;
4267 
4268 	/* If this was the last command in a request the complete
4269 	 * callback would be found in hdev->sent_cmd instead of the
4270 	 * command queue (hdev->cmd_q).
4271 	 */
4272 	if (bt_cb(hdev->sent_cmd)->req.complete) {
4273 		*req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4274 		return;
4275 	}
4276 
4277 	if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4278 		*req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4279 		return;
4280 	}
4281 
4282 	/* Remove all pending commands belonging to this request */
4283 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4284 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4285 		if (bt_cb(skb)->req.start) {
4286 			__skb_queue_head(&hdev->cmd_q, skb);
4287 			break;
4288 		}
4289 
4290 		*req_complete = bt_cb(skb)->req.complete;
4291 		*req_complete_skb = bt_cb(skb)->req.complete_skb;
4292 		kfree_skb(skb);
4293 	}
4294 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4295 }
4296 
4297 static void hci_rx_work(struct work_struct *work)
4298 {
4299 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4300 	struct sk_buff *skb;
4301 
4302 	BT_DBG("%s", hdev->name);
4303 
4304 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4305 		/* Send copy to monitor */
4306 		hci_send_to_monitor(hdev, skb);
4307 
4308 		if (atomic_read(&hdev->promisc)) {
4309 			/* Send copy to the sockets */
4310 			hci_send_to_sock(hdev, skb);
4311 		}
4312 
4313 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4314 			kfree_skb(skb);
4315 			continue;
4316 		}
4317 
4318 		if (test_bit(HCI_INIT, &hdev->flags)) {
4319 			/* Don't process data packets in this states. */
4320 			switch (bt_cb(skb)->pkt_type) {
4321 			case HCI_ACLDATA_PKT:
4322 			case HCI_SCODATA_PKT:
4323 				kfree_skb(skb);
4324 				continue;
4325 			}
4326 		}
4327 
4328 		/* Process frame */
4329 		switch (bt_cb(skb)->pkt_type) {
4330 		case HCI_EVENT_PKT:
4331 			BT_DBG("%s Event packet", hdev->name);
4332 			hci_event_packet(hdev, skb);
4333 			break;
4334 
4335 		case HCI_ACLDATA_PKT:
4336 			BT_DBG("%s ACL data packet", hdev->name);
4337 			hci_acldata_packet(hdev, skb);
4338 			break;
4339 
4340 		case HCI_SCODATA_PKT:
4341 			BT_DBG("%s SCO data packet", hdev->name);
4342 			hci_scodata_packet(hdev, skb);
4343 			break;
4344 
4345 		default:
4346 			kfree_skb(skb);
4347 			break;
4348 		}
4349 	}
4350 }
4351 
4352 static void hci_cmd_work(struct work_struct *work)
4353 {
4354 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4355 	struct sk_buff *skb;
4356 
4357 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4358 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4359 
4360 	/* Send queued commands */
4361 	if (atomic_read(&hdev->cmd_cnt)) {
4362 		skb = skb_dequeue(&hdev->cmd_q);
4363 		if (!skb)
4364 			return;
4365 
4366 		kfree_skb(hdev->sent_cmd);
4367 
4368 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4369 		if (hdev->sent_cmd) {
4370 			atomic_dec(&hdev->cmd_cnt);
4371 			hci_send_frame(hdev, skb);
4372 			if (test_bit(HCI_RESET, &hdev->flags))
4373 				cancel_delayed_work(&hdev->cmd_timer);
4374 			else
4375 				schedule_delayed_work(&hdev->cmd_timer,
4376 						      HCI_CMD_TIMEOUT);
4377 		} else {
4378 			skb_queue_head(&hdev->cmd_q, skb);
4379 			queue_work(hdev->workqueue, &hdev->cmd_work);
4380 		}
4381 	}
4382 }
4383