xref: /openbmc/linux/net/bluetooth/hci_core.c (revision b85d4594)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34 
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39 
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47 
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51 
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55 
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58 
59 /* ----- HCI requests ----- */
60 
61 #define HCI_REQ_DONE	  0
62 #define HCI_REQ_PEND	  1
63 #define HCI_REQ_CANCELED  2
64 
65 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
67 
68 /* ---- HCI notifications ---- */
69 
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 	hci_sock_dev_event(hdev, event);
73 }
74 
75 /* ---- HCI debugfs entries ---- */
76 
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 			     size_t count, loff_t *ppos)
79 {
80 	struct hci_dev *hdev = file->private_data;
81 	char buf[3];
82 
83 	buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 	buf[1] = '\n';
85 	buf[2] = '\0';
86 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88 
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 			      size_t count, loff_t *ppos)
91 {
92 	struct hci_dev *hdev = file->private_data;
93 	struct sk_buff *skb;
94 	char buf[32];
95 	size_t buf_size = min(count, (sizeof(buf)-1));
96 	bool enable;
97 
98 	if (!test_bit(HCI_UP, &hdev->flags))
99 		return -ENETDOWN;
100 
101 	if (copy_from_user(buf, user_buf, buf_size))
102 		return -EFAULT;
103 
104 	buf[buf_size] = '\0';
105 	if (strtobool(buf, &enable))
106 		return -EINVAL;
107 
108 	if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109 		return -EALREADY;
110 
111 	hci_req_lock(hdev);
112 	if (enable)
113 		skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 				     HCI_CMD_TIMEOUT);
115 	else
116 		skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 				     HCI_CMD_TIMEOUT);
118 	hci_req_unlock(hdev);
119 
120 	if (IS_ERR(skb))
121 		return PTR_ERR(skb);
122 
123 	kfree_skb(skb);
124 
125 	hci_dev_change_flag(hdev, HCI_DUT_MODE);
126 
127 	return count;
128 }
129 
130 static const struct file_operations dut_mode_fops = {
131 	.open		= simple_open,
132 	.read		= dut_mode_read,
133 	.write		= dut_mode_write,
134 	.llseek		= default_llseek,
135 };
136 
137 /* ---- HCI requests ---- */
138 
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 				  struct sk_buff *skb)
141 {
142 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
143 
144 	if (hdev->req_status == HCI_REQ_PEND) {
145 		hdev->req_result = result;
146 		hdev->req_status = HCI_REQ_DONE;
147 		if (skb)
148 			hdev->req_skb = skb_get(skb);
149 		wake_up_interruptible(&hdev->req_wait_q);
150 	}
151 }
152 
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
156 
157 	if (hdev->req_status == HCI_REQ_PEND) {
158 		hdev->req_result = err;
159 		hdev->req_status = HCI_REQ_CANCELED;
160 		wake_up_interruptible(&hdev->req_wait_q);
161 	}
162 }
163 
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165 				  const void *param, u8 event, u32 timeout)
166 {
167 	DECLARE_WAITQUEUE(wait, current);
168 	struct hci_request req;
169 	struct sk_buff *skb;
170 	int err = 0;
171 
172 	BT_DBG("%s", hdev->name);
173 
174 	hci_req_init(&req, hdev);
175 
176 	hci_req_add_ev(&req, opcode, plen, param, event);
177 
178 	hdev->req_status = HCI_REQ_PEND;
179 
180 	add_wait_queue(&hdev->req_wait_q, &wait);
181 	set_current_state(TASK_INTERRUPTIBLE);
182 
183 	err = hci_req_run_skb(&req, hci_req_sync_complete);
184 	if (err < 0) {
185 		remove_wait_queue(&hdev->req_wait_q, &wait);
186 		set_current_state(TASK_RUNNING);
187 		return ERR_PTR(err);
188 	}
189 
190 	schedule_timeout(timeout);
191 
192 	remove_wait_queue(&hdev->req_wait_q, &wait);
193 
194 	if (signal_pending(current))
195 		return ERR_PTR(-EINTR);
196 
197 	switch (hdev->req_status) {
198 	case HCI_REQ_DONE:
199 		err = -bt_to_errno(hdev->req_result);
200 		break;
201 
202 	case HCI_REQ_CANCELED:
203 		err = -hdev->req_result;
204 		break;
205 
206 	default:
207 		err = -ETIMEDOUT;
208 		break;
209 	}
210 
211 	hdev->req_status = hdev->req_result = 0;
212 	skb = hdev->req_skb;
213 	hdev->req_skb = NULL;
214 
215 	BT_DBG("%s end: err %d", hdev->name, err);
216 
217 	if (err < 0) {
218 		kfree_skb(skb);
219 		return ERR_PTR(err);
220 	}
221 
222 	if (!skb)
223 		return ERR_PTR(-ENODATA);
224 
225 	return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228 
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230 			       const void *param, u32 timeout)
231 {
232 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235 
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238 			  void (*func)(struct hci_request *req,
239 				      unsigned long opt),
240 			  unsigned long opt, __u32 timeout)
241 {
242 	struct hci_request req;
243 	DECLARE_WAITQUEUE(wait, current);
244 	int err = 0;
245 
246 	BT_DBG("%s start", hdev->name);
247 
248 	hci_req_init(&req, hdev);
249 
250 	hdev->req_status = HCI_REQ_PEND;
251 
252 	func(&req, opt);
253 
254 	add_wait_queue(&hdev->req_wait_q, &wait);
255 	set_current_state(TASK_INTERRUPTIBLE);
256 
257 	err = hci_req_run_skb(&req, hci_req_sync_complete);
258 	if (err < 0) {
259 		hdev->req_status = 0;
260 
261 		remove_wait_queue(&hdev->req_wait_q, &wait);
262 		set_current_state(TASK_RUNNING);
263 
264 		/* ENODATA means the HCI request command queue is empty.
265 		 * This can happen when a request with conditionals doesn't
266 		 * trigger any commands to be sent. This is normal behavior
267 		 * and should not trigger an error return.
268 		 */
269 		if (err == -ENODATA)
270 			return 0;
271 
272 		return err;
273 	}
274 
275 	schedule_timeout(timeout);
276 
277 	remove_wait_queue(&hdev->req_wait_q, &wait);
278 
279 	if (signal_pending(current))
280 		return -EINTR;
281 
282 	switch (hdev->req_status) {
283 	case HCI_REQ_DONE:
284 		err = -bt_to_errno(hdev->req_result);
285 		break;
286 
287 	case HCI_REQ_CANCELED:
288 		err = -hdev->req_result;
289 		break;
290 
291 	default:
292 		err = -ETIMEDOUT;
293 		break;
294 	}
295 
296 	hdev->req_status = hdev->req_result = 0;
297 
298 	BT_DBG("%s end: err %d", hdev->name, err);
299 
300 	return err;
301 }
302 
303 static int hci_req_sync(struct hci_dev *hdev,
304 			void (*req)(struct hci_request *req,
305 				    unsigned long opt),
306 			unsigned long opt, __u32 timeout)
307 {
308 	int ret;
309 
310 	if (!test_bit(HCI_UP, &hdev->flags))
311 		return -ENETDOWN;
312 
313 	/* Serialize all requests */
314 	hci_req_lock(hdev);
315 	ret = __hci_req_sync(hdev, req, opt, timeout);
316 	hci_req_unlock(hdev);
317 
318 	return ret;
319 }
320 
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323 	BT_DBG("%s %ld", req->hdev->name, opt);
324 
325 	/* Reset device */
326 	set_bit(HCI_RESET, &req->hdev->flags);
327 	hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329 
330 static void bredr_init(struct hci_request *req)
331 {
332 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333 
334 	/* Read Local Supported Features */
335 	hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336 
337 	/* Read Local Version */
338 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339 
340 	/* Read BD Address */
341 	hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343 
344 static void amp_init1(struct hci_request *req)
345 {
346 	req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347 
348 	/* Read Local Version */
349 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350 
351 	/* Read Local Supported Commands */
352 	hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353 
354 	/* Read Local AMP Info */
355 	hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356 
357 	/* Read Data Blk size */
358 	hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359 
360 	/* Read Flow Control Mode */
361 	hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362 
363 	/* Read Location Data */
364 	hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366 
367 static void amp_init2(struct hci_request *req)
368 {
369 	/* Read Local Supported Features. Not all AMP controllers
370 	 * support this so it's placed conditionally in the second
371 	 * stage init.
372 	 */
373 	if (req->hdev->commands[14] & 0x20)
374 		hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376 
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379 	struct hci_dev *hdev = req->hdev;
380 
381 	BT_DBG("%s %ld", hdev->name, opt);
382 
383 	/* Reset */
384 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385 		hci_reset_req(req, 0);
386 
387 	switch (hdev->dev_type) {
388 	case HCI_BREDR:
389 		bredr_init(req);
390 		break;
391 
392 	case HCI_AMP:
393 		amp_init1(req);
394 		break;
395 
396 	default:
397 		BT_ERR("Unknown device type %d", hdev->dev_type);
398 		break;
399 	}
400 }
401 
402 static void bredr_setup(struct hci_request *req)
403 {
404 	__le16 param;
405 	__u8 flt_type;
406 
407 	/* Read Buffer Size (ACL mtu, max pkt, etc.) */
408 	hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409 
410 	/* Read Class of Device */
411 	hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412 
413 	/* Read Local Name */
414 	hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415 
416 	/* Read Voice Setting */
417 	hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418 
419 	/* Read Number of Supported IAC */
420 	hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421 
422 	/* Read Current IAC LAP */
423 	hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424 
425 	/* Clear Event Filters */
426 	flt_type = HCI_FLT_CLEAR_ALL;
427 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428 
429 	/* Connection accept timeout ~20 secs */
430 	param = cpu_to_le16(0x7d00);
431 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433 
434 static void le_setup(struct hci_request *req)
435 {
436 	struct hci_dev *hdev = req->hdev;
437 
438 	/* Read LE Buffer Size */
439 	hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440 
441 	/* Read LE Local Supported Features */
442 	hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443 
444 	/* Read LE Supported States */
445 	hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446 
447 	/* Read LE White List Size */
448 	hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449 
450 	/* Clear LE White List */
451 	hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452 
453 	/* LE-only controllers have LE implicitly enabled */
454 	if (!lmp_bredr_capable(hdev))
455 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457 
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460 	struct hci_dev *hdev = req->hdev;
461 
462 	/* The second byte is 0xff instead of 0x9f (two reserved bits
463 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 	 * command otherwise.
465 	 */
466 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467 
468 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 	 * any event mask for pre 1.2 devices.
470 	 */
471 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 		return;
473 
474 	if (lmp_bredr_capable(hdev)) {
475 		events[4] |= 0x01; /* Flow Specification Complete */
476 		events[4] |= 0x02; /* Inquiry Result with RSSI */
477 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 		events[5] |= 0x08; /* Synchronous Connection Complete */
479 		events[5] |= 0x10; /* Synchronous Connection Changed */
480 	} else {
481 		/* Use a different default for LE-only devices */
482 		memset(events, 0, sizeof(events));
483 		events[0] |= 0x10; /* Disconnection Complete */
484 		events[1] |= 0x08; /* Read Remote Version Information Complete */
485 		events[1] |= 0x20; /* Command Complete */
486 		events[1] |= 0x40; /* Command Status */
487 		events[1] |= 0x80; /* Hardware Error */
488 		events[2] |= 0x04; /* Number of Completed Packets */
489 		events[3] |= 0x02; /* Data Buffer Overflow */
490 
491 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 			events[0] |= 0x80; /* Encryption Change */
493 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
494 		}
495 	}
496 
497 	if (lmp_inq_rssi_capable(hdev))
498 		events[4] |= 0x02; /* Inquiry Result with RSSI */
499 
500 	if (lmp_sniffsubr_capable(hdev))
501 		events[5] |= 0x20; /* Sniff Subrating */
502 
503 	if (lmp_pause_enc_capable(hdev))
504 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
505 
506 	if (lmp_ext_inq_capable(hdev))
507 		events[5] |= 0x40; /* Extended Inquiry Result */
508 
509 	if (lmp_no_flush_capable(hdev))
510 		events[7] |= 0x01; /* Enhanced Flush Complete */
511 
512 	if (lmp_lsto_capable(hdev))
513 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
514 
515 	if (lmp_ssp_capable(hdev)) {
516 		events[6] |= 0x01;	/* IO Capability Request */
517 		events[6] |= 0x02;	/* IO Capability Response */
518 		events[6] |= 0x04;	/* User Confirmation Request */
519 		events[6] |= 0x08;	/* User Passkey Request */
520 		events[6] |= 0x10;	/* Remote OOB Data Request */
521 		events[6] |= 0x20;	/* Simple Pairing Complete */
522 		events[7] |= 0x04;	/* User Passkey Notification */
523 		events[7] |= 0x08;	/* Keypress Notification */
524 		events[7] |= 0x10;	/* Remote Host Supported
525 					 * Features Notification
526 					 */
527 	}
528 
529 	if (lmp_le_capable(hdev))
530 		events[7] |= 0x20;	/* LE Meta-Event */
531 
532 	hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534 
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537 	struct hci_dev *hdev = req->hdev;
538 
539 	if (hdev->dev_type == HCI_AMP)
540 		return amp_init2(req);
541 
542 	if (lmp_bredr_capable(hdev))
543 		bredr_setup(req);
544 	else
545 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546 
547 	if (lmp_le_capable(hdev))
548 		le_setup(req);
549 
550 	/* All Bluetooth 1.2 and later controllers should support the
551 	 * HCI command for reading the local supported commands.
552 	 *
553 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 	 * but do not have support for this command. If that is the case,
555 	 * the driver can quirk the behavior and skip reading the local
556 	 * supported commands.
557 	 */
558 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560 		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561 
562 	if (lmp_ssp_capable(hdev)) {
563 		/* When SSP is available, then the host features page
564 		 * should also be available as well. However some
565 		 * controllers list the max_page as 0 as long as SSP
566 		 * has not been enabled. To achieve proper debugging
567 		 * output, force the minimum max_page to 1 at least.
568 		 */
569 		hdev->max_page = 0x01;
570 
571 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572 			u8 mode = 0x01;
573 
574 			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 				    sizeof(mode), &mode);
576 		} else {
577 			struct hci_cp_write_eir cp;
578 
579 			memset(hdev->eir, 0, sizeof(hdev->eir));
580 			memset(&cp, 0, sizeof(cp));
581 
582 			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583 		}
584 	}
585 
586 	if (lmp_inq_rssi_capable(hdev) ||
587 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588 		u8 mode;
589 
590 		/* If Extended Inquiry Result events are supported, then
591 		 * they are clearly preferred over Inquiry Result with RSSI
592 		 * events.
593 		 */
594 		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595 
596 		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597 	}
598 
599 	if (lmp_inq_tx_pwr_capable(hdev))
600 		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601 
602 	if (lmp_ext_feat_capable(hdev)) {
603 		struct hci_cp_read_local_ext_features cp;
604 
605 		cp.page = 0x01;
606 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 			    sizeof(cp), &cp);
608 	}
609 
610 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611 		u8 enable = 1;
612 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 			    &enable);
614 	}
615 }
616 
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619 	struct hci_dev *hdev = req->hdev;
620 	struct hci_cp_write_def_link_policy cp;
621 	u16 link_policy = 0;
622 
623 	if (lmp_rswitch_capable(hdev))
624 		link_policy |= HCI_LP_RSWITCH;
625 	if (lmp_hold_capable(hdev))
626 		link_policy |= HCI_LP_HOLD;
627 	if (lmp_sniff_capable(hdev))
628 		link_policy |= HCI_LP_SNIFF;
629 	if (lmp_park_capable(hdev))
630 		link_policy |= HCI_LP_PARK;
631 
632 	cp.policy = cpu_to_le16(link_policy);
633 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635 
636 static void hci_set_le_support(struct hci_request *req)
637 {
638 	struct hci_dev *hdev = req->hdev;
639 	struct hci_cp_write_le_host_supported cp;
640 
641 	/* LE-only devices do not support explicit enablement */
642 	if (!lmp_bredr_capable(hdev))
643 		return;
644 
645 	memset(&cp, 0, sizeof(cp));
646 
647 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648 		cp.le = 0x01;
649 		cp.simul = 0x00;
650 	}
651 
652 	if (cp.le != lmp_host_le_capable(hdev))
653 		hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 			    &cp);
655 }
656 
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659 	struct hci_dev *hdev = req->hdev;
660 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661 
662 	/* If Connectionless Slave Broadcast master role is supported
663 	 * enable all necessary events for it.
664 	 */
665 	if (lmp_csb_master_capable(hdev)) {
666 		events[1] |= 0x40;	/* Triggered Clock Capture */
667 		events[1] |= 0x80;	/* Synchronization Train Complete */
668 		events[2] |= 0x10;	/* Slave Page Response Timeout */
669 		events[2] |= 0x20;	/* CSB Channel Map Change */
670 	}
671 
672 	/* If Connectionless Slave Broadcast slave role is supported
673 	 * enable all necessary events for it.
674 	 */
675 	if (lmp_csb_slave_capable(hdev)) {
676 		events[2] |= 0x01;	/* Synchronization Train Received */
677 		events[2] |= 0x02;	/* CSB Receive */
678 		events[2] |= 0x04;	/* CSB Timeout */
679 		events[2] |= 0x08;	/* Truncated Page Complete */
680 	}
681 
682 	/* Enable Authenticated Payload Timeout Expired event if supported */
683 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684 		events[2] |= 0x80;
685 
686 	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688 
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691 	struct hci_dev *hdev = req->hdev;
692 	u8 p;
693 
694 	hci_setup_event_mask(req);
695 
696 	if (hdev->commands[6] & 0x20) {
697 		struct hci_cp_read_stored_link_key cp;
698 
699 		bacpy(&cp.bdaddr, BDADDR_ANY);
700 		cp.read_all = 0x01;
701 		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
702 	}
703 
704 	if (hdev->commands[5] & 0x10)
705 		hci_setup_link_policy(req);
706 
707 	if (hdev->commands[8] & 0x01)
708 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
709 
710 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
711 	 * support the Read Page Scan Type command. Check support for
712 	 * this command in the bit mask of supported commands.
713 	 */
714 	if (hdev->commands[13] & 0x01)
715 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
716 
717 	if (lmp_le_capable(hdev)) {
718 		u8 events[8];
719 
720 		memset(events, 0, sizeof(events));
721 		events[0] = 0x0f;
722 
723 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724 			events[0] |= 0x10;	/* LE Long Term Key Request */
725 
726 		/* If controller supports the Connection Parameters Request
727 		 * Link Layer Procedure, enable the corresponding event.
728 		 */
729 		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730 			events[0] |= 0x20;	/* LE Remote Connection
731 						 * Parameter Request
732 						 */
733 
734 		/* If the controller supports the Data Length Extension
735 		 * feature, enable the corresponding event.
736 		 */
737 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738 			events[0] |= 0x40;	/* LE Data Length Change */
739 
740 		/* If the controller supports Extended Scanner Filter
741 		 * Policies, enable the correspondig event.
742 		 */
743 		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744 			events[1] |= 0x04;	/* LE Direct Advertising
745 						 * Report
746 						 */
747 
748 		/* If the controller supports the LE Read Local P-256
749 		 * Public Key command, enable the corresponding event.
750 		 */
751 		if (hdev->commands[34] & 0x02)
752 			events[0] |= 0x80;	/* LE Read Local P-256
753 						 * Public Key Complete
754 						 */
755 
756 		/* If the controller supports the LE Generate DHKey
757 		 * command, enable the corresponding event.
758 		 */
759 		if (hdev->commands[34] & 0x04)
760 			events[1] |= 0x01;	/* LE Generate DHKey Complete */
761 
762 		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763 			    events);
764 
765 		if (hdev->commands[25] & 0x40) {
766 			/* Read LE Advertising Channel TX Power */
767 			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
768 		}
769 
770 		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771 			/* Read LE Maximum Data Length */
772 			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
773 
774 			/* Read LE Suggested Default Data Length */
775 			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
776 		}
777 
778 		hci_set_le_support(req);
779 	}
780 
781 	/* Read features beyond page 1 if available */
782 	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783 		struct hci_cp_read_local_ext_features cp;
784 
785 		cp.page = p;
786 		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787 			    sizeof(cp), &cp);
788 	}
789 }
790 
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
792 {
793 	struct hci_dev *hdev = req->hdev;
794 
795 	/* Some Broadcom based Bluetooth controllers do not support the
796 	 * Delete Stored Link Key command. They are clearly indicating its
797 	 * absence in the bit mask of supported commands.
798 	 *
799 	 * Check the supported commands and only if the the command is marked
800 	 * as supported send it. If not supported assume that the controller
801 	 * does not have actual support for stored link keys which makes this
802 	 * command redundant anyway.
803 	 *
804 	 * Some controllers indicate that they support handling deleting
805 	 * stored link keys, but they don't. The quirk lets a driver
806 	 * just disable this command.
807 	 */
808 	if (hdev->commands[6] & 0x80 &&
809 	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810 		struct hci_cp_delete_stored_link_key cp;
811 
812 		bacpy(&cp.bdaddr, BDADDR_ANY);
813 		cp.delete_all = 0x01;
814 		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815 			    sizeof(cp), &cp);
816 	}
817 
818 	/* Set event mask page 2 if the HCI command for it is supported */
819 	if (hdev->commands[22] & 0x04)
820 		hci_set_event_mask_page_2(req);
821 
822 	/* Read local codec list if the HCI command is supported */
823 	if (hdev->commands[29] & 0x20)
824 		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
825 
826 	/* Get MWS transport configuration if the HCI command is supported */
827 	if (hdev->commands[30] & 0x08)
828 		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
829 
830 	/* Check for Synchronization Train support */
831 	if (lmp_sync_train_capable(hdev))
832 		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
833 
834 	/* Enable Secure Connections if supported and configured */
835 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836 	    bredr_sc_enabled(hdev)) {
837 		u8 support = 0x01;
838 
839 		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840 			    sizeof(support), &support);
841 	}
842 }
843 
844 static int __hci_init(struct hci_dev *hdev)
845 {
846 	int err;
847 
848 	err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849 	if (err < 0)
850 		return err;
851 
852 	/* The Device Under Test (DUT) mode is special and available for
853 	 * all controller types. So just create it early on.
854 	 */
855 	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856 		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857 				    &dut_mode_fops);
858 	}
859 
860 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861 	if (err < 0)
862 		return err;
863 
864 	/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865 	 * BR/EDR/LE type controllers. AMP controllers only need the
866 	 * first two stages of init.
867 	 */
868 	if (hdev->dev_type != HCI_BREDR)
869 		return 0;
870 
871 	err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872 	if (err < 0)
873 		return err;
874 
875 	err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876 	if (err < 0)
877 		return err;
878 
879 	/* This function is only called when the controller is actually in
880 	 * configured state. When the controller is marked as unconfigured,
881 	 * this initialization procedure is not run.
882 	 *
883 	 * It means that it is possible that a controller runs through its
884 	 * setup phase and then discovers missing settings. If that is the
885 	 * case, then this function will not be called. It then will only
886 	 * be called during the config phase.
887 	 *
888 	 * So only when in setup phase or config phase, create the debugfs
889 	 * entries and register the SMP channels.
890 	 */
891 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
893 		return 0;
894 
895 	hci_debugfs_create_common(hdev);
896 
897 	if (lmp_bredr_capable(hdev))
898 		hci_debugfs_create_bredr(hdev);
899 
900 	if (lmp_le_capable(hdev))
901 		hci_debugfs_create_le(hdev);
902 
903 	return 0;
904 }
905 
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
907 {
908 	struct hci_dev *hdev = req->hdev;
909 
910 	BT_DBG("%s %ld", hdev->name, opt);
911 
912 	/* Reset */
913 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914 		hci_reset_req(req, 0);
915 
916 	/* Read Local Version */
917 	hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
918 
919 	/* Read BD Address */
920 	if (hdev->set_bdaddr)
921 		hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
922 }
923 
924 static int __hci_unconf_init(struct hci_dev *hdev)
925 {
926 	int err;
927 
928 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929 		return 0;
930 
931 	err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932 	if (err < 0)
933 		return err;
934 
935 	return 0;
936 }
937 
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
939 {
940 	__u8 scan = opt;
941 
942 	BT_DBG("%s %x", req->hdev->name, scan);
943 
944 	/* Inquiry and Page scans */
945 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
946 }
947 
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
949 {
950 	__u8 auth = opt;
951 
952 	BT_DBG("%s %x", req->hdev->name, auth);
953 
954 	/* Authentication */
955 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
956 }
957 
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
959 {
960 	__u8 encrypt = opt;
961 
962 	BT_DBG("%s %x", req->hdev->name, encrypt);
963 
964 	/* Encryption */
965 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
966 }
967 
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
969 {
970 	__le16 policy = cpu_to_le16(opt);
971 
972 	BT_DBG("%s %x", req->hdev->name, policy);
973 
974 	/* Default link policy */
975 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
976 }
977 
978 /* Get HCI device by index.
979  * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
981 {
982 	struct hci_dev *hdev = NULL, *d;
983 
984 	BT_DBG("%d", index);
985 
986 	if (index < 0)
987 		return NULL;
988 
989 	read_lock(&hci_dev_list_lock);
990 	list_for_each_entry(d, &hci_dev_list, list) {
991 		if (d->id == index) {
992 			hdev = hci_dev_hold(d);
993 			break;
994 		}
995 	}
996 	read_unlock(&hci_dev_list_lock);
997 	return hdev;
998 }
999 
1000 /* ---- Inquiry support ---- */
1001 
1002 bool hci_discovery_active(struct hci_dev *hdev)
1003 {
1004 	struct discovery_state *discov = &hdev->discovery;
1005 
1006 	switch (discov->state) {
1007 	case DISCOVERY_FINDING:
1008 	case DISCOVERY_RESOLVING:
1009 		return true;
1010 
1011 	default:
1012 		return false;
1013 	}
1014 }
1015 
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1017 {
1018 	int old_state = hdev->discovery.state;
1019 
1020 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1021 
1022 	if (old_state == state)
1023 		return;
1024 
1025 	hdev->discovery.state = state;
1026 
1027 	switch (state) {
1028 	case DISCOVERY_STOPPED:
1029 		hci_update_background_scan(hdev);
1030 
1031 		if (old_state != DISCOVERY_STARTING)
1032 			mgmt_discovering(hdev, 0);
1033 		break;
1034 	case DISCOVERY_STARTING:
1035 		break;
1036 	case DISCOVERY_FINDING:
1037 		mgmt_discovering(hdev, 1);
1038 		break;
1039 	case DISCOVERY_RESOLVING:
1040 		break;
1041 	case DISCOVERY_STOPPING:
1042 		break;
1043 	}
1044 }
1045 
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1047 {
1048 	struct discovery_state *cache = &hdev->discovery;
1049 	struct inquiry_entry *p, *n;
1050 
1051 	list_for_each_entry_safe(p, n, &cache->all, all) {
1052 		list_del(&p->all);
1053 		kfree(p);
1054 	}
1055 
1056 	INIT_LIST_HEAD(&cache->unknown);
1057 	INIT_LIST_HEAD(&cache->resolve);
1058 }
1059 
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061 					       bdaddr_t *bdaddr)
1062 {
1063 	struct discovery_state *cache = &hdev->discovery;
1064 	struct inquiry_entry *e;
1065 
1066 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1067 
1068 	list_for_each_entry(e, &cache->all, all) {
1069 		if (!bacmp(&e->data.bdaddr, bdaddr))
1070 			return e;
1071 	}
1072 
1073 	return NULL;
1074 }
1075 
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1077 						       bdaddr_t *bdaddr)
1078 {
1079 	struct discovery_state *cache = &hdev->discovery;
1080 	struct inquiry_entry *e;
1081 
1082 	BT_DBG("cache %p, %pMR", cache, bdaddr);
1083 
1084 	list_for_each_entry(e, &cache->unknown, list) {
1085 		if (!bacmp(&e->data.bdaddr, bdaddr))
1086 			return e;
1087 	}
1088 
1089 	return NULL;
1090 }
1091 
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1093 						       bdaddr_t *bdaddr,
1094 						       int state)
1095 {
1096 	struct discovery_state *cache = &hdev->discovery;
1097 	struct inquiry_entry *e;
1098 
1099 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1100 
1101 	list_for_each_entry(e, &cache->resolve, list) {
1102 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103 			return e;
1104 		if (!bacmp(&e->data.bdaddr, bdaddr))
1105 			return e;
1106 	}
1107 
1108 	return NULL;
1109 }
1110 
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112 				      struct inquiry_entry *ie)
1113 {
1114 	struct discovery_state *cache = &hdev->discovery;
1115 	struct list_head *pos = &cache->resolve;
1116 	struct inquiry_entry *p;
1117 
1118 	list_del(&ie->list);
1119 
1120 	list_for_each_entry(p, &cache->resolve, list) {
1121 		if (p->name_state != NAME_PENDING &&
1122 		    abs(p->data.rssi) >= abs(ie->data.rssi))
1123 			break;
1124 		pos = &p->list;
1125 	}
1126 
1127 	list_add(&ie->list, pos);
1128 }
1129 
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131 			     bool name_known)
1132 {
1133 	struct discovery_state *cache = &hdev->discovery;
1134 	struct inquiry_entry *ie;
1135 	u32 flags = 0;
1136 
1137 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1138 
1139 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1140 
1141 	if (!data->ssp_mode)
1142 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1143 
1144 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1145 	if (ie) {
1146 		if (!ie->data.ssp_mode)
1147 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148 
1149 		if (ie->name_state == NAME_NEEDED &&
1150 		    data->rssi != ie->data.rssi) {
1151 			ie->data.rssi = data->rssi;
1152 			hci_inquiry_cache_update_resolve(hdev, ie);
1153 		}
1154 
1155 		goto update;
1156 	}
1157 
1158 	/* Entry not in the cache. Add new one. */
1159 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1160 	if (!ie) {
1161 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162 		goto done;
1163 	}
1164 
1165 	list_add(&ie->all, &cache->all);
1166 
1167 	if (name_known) {
1168 		ie->name_state = NAME_KNOWN;
1169 	} else {
1170 		ie->name_state = NAME_NOT_KNOWN;
1171 		list_add(&ie->list, &cache->unknown);
1172 	}
1173 
1174 update:
1175 	if (name_known && ie->name_state != NAME_KNOWN &&
1176 	    ie->name_state != NAME_PENDING) {
1177 		ie->name_state = NAME_KNOWN;
1178 		list_del(&ie->list);
1179 	}
1180 
1181 	memcpy(&ie->data, data, sizeof(*data));
1182 	ie->timestamp = jiffies;
1183 	cache->timestamp = jiffies;
1184 
1185 	if (ie->name_state == NAME_NOT_KNOWN)
1186 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1187 
1188 done:
1189 	return flags;
1190 }
1191 
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1193 {
1194 	struct discovery_state *cache = &hdev->discovery;
1195 	struct inquiry_info *info = (struct inquiry_info *) buf;
1196 	struct inquiry_entry *e;
1197 	int copied = 0;
1198 
1199 	list_for_each_entry(e, &cache->all, all) {
1200 		struct inquiry_data *data = &e->data;
1201 
1202 		if (copied >= num)
1203 			break;
1204 
1205 		bacpy(&info->bdaddr, &data->bdaddr);
1206 		info->pscan_rep_mode	= data->pscan_rep_mode;
1207 		info->pscan_period_mode	= data->pscan_period_mode;
1208 		info->pscan_mode	= data->pscan_mode;
1209 		memcpy(info->dev_class, data->dev_class, 3);
1210 		info->clock_offset	= data->clock_offset;
1211 
1212 		info++;
1213 		copied++;
1214 	}
1215 
1216 	BT_DBG("cache %p, copied %d", cache, copied);
1217 	return copied;
1218 }
1219 
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1221 {
1222 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223 	struct hci_dev *hdev = req->hdev;
1224 	struct hci_cp_inquiry cp;
1225 
1226 	BT_DBG("%s", hdev->name);
1227 
1228 	if (test_bit(HCI_INQUIRY, &hdev->flags))
1229 		return;
1230 
1231 	/* Start Inquiry */
1232 	memcpy(&cp.lap, &ir->lap, 3);
1233 	cp.length  = ir->length;
1234 	cp.num_rsp = ir->num_rsp;
1235 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1236 }
1237 
1238 int hci_inquiry(void __user *arg)
1239 {
1240 	__u8 __user *ptr = arg;
1241 	struct hci_inquiry_req ir;
1242 	struct hci_dev *hdev;
1243 	int err = 0, do_inquiry = 0, max_rsp;
1244 	long timeo;
1245 	__u8 *buf;
1246 
1247 	if (copy_from_user(&ir, ptr, sizeof(ir)))
1248 		return -EFAULT;
1249 
1250 	hdev = hci_dev_get(ir.dev_id);
1251 	if (!hdev)
1252 		return -ENODEV;
1253 
1254 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1255 		err = -EBUSY;
1256 		goto done;
1257 	}
1258 
1259 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1260 		err = -EOPNOTSUPP;
1261 		goto done;
1262 	}
1263 
1264 	if (hdev->dev_type != HCI_BREDR) {
1265 		err = -EOPNOTSUPP;
1266 		goto done;
1267 	}
1268 
1269 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1270 		err = -EOPNOTSUPP;
1271 		goto done;
1272 	}
1273 
1274 	hci_dev_lock(hdev);
1275 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277 		hci_inquiry_cache_flush(hdev);
1278 		do_inquiry = 1;
1279 	}
1280 	hci_dev_unlock(hdev);
1281 
1282 	timeo = ir.length * msecs_to_jiffies(2000);
1283 
1284 	if (do_inquiry) {
1285 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286 				   timeo);
1287 		if (err < 0)
1288 			goto done;
1289 
1290 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291 		 * cleared). If it is interrupted by a signal, return -EINTR.
1292 		 */
1293 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294 				TASK_INTERRUPTIBLE))
1295 			return -EINTR;
1296 	}
1297 
1298 	/* for unlimited number of responses we will use buffer with
1299 	 * 255 entries
1300 	 */
1301 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1302 
1303 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304 	 * copy it to the user space.
1305 	 */
1306 	buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1307 	if (!buf) {
1308 		err = -ENOMEM;
1309 		goto done;
1310 	}
1311 
1312 	hci_dev_lock(hdev);
1313 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314 	hci_dev_unlock(hdev);
1315 
1316 	BT_DBG("num_rsp %d", ir.num_rsp);
1317 
1318 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319 		ptr += sizeof(ir);
1320 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1321 				 ir.num_rsp))
1322 			err = -EFAULT;
1323 	} else
1324 		err = -EFAULT;
1325 
1326 	kfree(buf);
1327 
1328 done:
1329 	hci_dev_put(hdev);
1330 	return err;
1331 }
1332 
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1334 {
1335 	int ret = 0;
1336 
1337 	BT_DBG("%s %p", hdev->name, hdev);
1338 
1339 	hci_req_lock(hdev);
1340 
1341 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1342 		ret = -ENODEV;
1343 		goto done;
1344 	}
1345 
1346 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348 		/* Check for rfkill but allow the HCI setup stage to
1349 		 * proceed (which in itself doesn't cause any RF activity).
1350 		 */
1351 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1352 			ret = -ERFKILL;
1353 			goto done;
1354 		}
1355 
1356 		/* Check for valid public address or a configured static
1357 		 * random adddress, but let the HCI setup proceed to
1358 		 * be able to determine if there is a public address
1359 		 * or not.
1360 		 *
1361 		 * In case of user channel usage, it is not important
1362 		 * if a public address or static random address is
1363 		 * available.
1364 		 *
1365 		 * This check is only valid for BR/EDR controllers
1366 		 * since AMP controllers do not have an address.
1367 		 */
1368 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369 		    hdev->dev_type == HCI_BREDR &&
1370 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372 			ret = -EADDRNOTAVAIL;
1373 			goto done;
1374 		}
1375 	}
1376 
1377 	if (test_bit(HCI_UP, &hdev->flags)) {
1378 		ret = -EALREADY;
1379 		goto done;
1380 	}
1381 
1382 	if (hdev->open(hdev)) {
1383 		ret = -EIO;
1384 		goto done;
1385 	}
1386 
1387 	atomic_set(&hdev->cmd_cnt, 1);
1388 	set_bit(HCI_INIT, &hdev->flags);
1389 
1390 	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1391 		if (hdev->setup)
1392 			ret = hdev->setup(hdev);
1393 
1394 		/* The transport driver can set these quirks before
1395 		 * creating the HCI device or in its setup callback.
1396 		 *
1397 		 * In case any of them is set, the controller has to
1398 		 * start up as unconfigured.
1399 		 */
1400 		if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401 		    test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402 			hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403 
1404 		/* For an unconfigured controller it is required to
1405 		 * read at least the version information provided by
1406 		 * the Read Local Version Information command.
1407 		 *
1408 		 * If the set_bdaddr driver callback is provided, then
1409 		 * also the original Bluetooth public device address
1410 		 * will be read using the Read BD Address command.
1411 		 */
1412 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413 			ret = __hci_unconf_init(hdev);
1414 	}
1415 
1416 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417 		/* If public address change is configured, ensure that
1418 		 * the address gets programmed. If the driver does not
1419 		 * support changing the public address, fail the power
1420 		 * on procedure.
1421 		 */
1422 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423 		    hdev->set_bdaddr)
1424 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425 		else
1426 			ret = -EADDRNOTAVAIL;
1427 	}
1428 
1429 	if (!ret) {
1430 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432 			ret = __hci_init(hdev);
1433 	}
1434 
1435 	clear_bit(HCI_INIT, &hdev->flags);
1436 
1437 	if (!ret) {
1438 		hci_dev_hold(hdev);
1439 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440 		set_bit(HCI_UP, &hdev->flags);
1441 		hci_notify(hdev, HCI_DEV_UP);
1442 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446 		    hdev->dev_type == HCI_BREDR) {
1447 			hci_dev_lock(hdev);
1448 			mgmt_powered(hdev, 1);
1449 			hci_dev_unlock(hdev);
1450 		}
1451 	} else {
1452 		/* Init failed, cleanup */
1453 		flush_work(&hdev->tx_work);
1454 		flush_work(&hdev->cmd_work);
1455 		flush_work(&hdev->rx_work);
1456 
1457 		skb_queue_purge(&hdev->cmd_q);
1458 		skb_queue_purge(&hdev->rx_q);
1459 
1460 		if (hdev->flush)
1461 			hdev->flush(hdev);
1462 
1463 		if (hdev->sent_cmd) {
1464 			kfree_skb(hdev->sent_cmd);
1465 			hdev->sent_cmd = NULL;
1466 		}
1467 
1468 		hdev->close(hdev);
1469 		hdev->flags &= BIT(HCI_RAW);
1470 	}
1471 
1472 done:
1473 	hci_req_unlock(hdev);
1474 	return ret;
1475 }
1476 
1477 /* ---- HCI ioctl helpers ---- */
1478 
1479 int hci_dev_open(__u16 dev)
1480 {
1481 	struct hci_dev *hdev;
1482 	int err;
1483 
1484 	hdev = hci_dev_get(dev);
1485 	if (!hdev)
1486 		return -ENODEV;
1487 
1488 	/* Devices that are marked as unconfigured can only be powered
1489 	 * up as user channel. Trying to bring them up as normal devices
1490 	 * will result into a failure. Only user channel operation is
1491 	 * possible.
1492 	 *
1493 	 * When this function is called for a user channel, the flag
1494 	 * HCI_USER_CHANNEL will be set first before attempting to
1495 	 * open the device.
1496 	 */
1497 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1499 		err = -EOPNOTSUPP;
1500 		goto done;
1501 	}
1502 
1503 	/* We need to ensure that no other power on/off work is pending
1504 	 * before proceeding to call hci_dev_do_open. This is
1505 	 * particularly important if the setup procedure has not yet
1506 	 * completed.
1507 	 */
1508 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509 		cancel_delayed_work(&hdev->power_off);
1510 
1511 	/* After this call it is guaranteed that the setup procedure
1512 	 * has finished. This means that error conditions like RFKILL
1513 	 * or no valid public or static random address apply.
1514 	 */
1515 	flush_workqueue(hdev->req_workqueue);
1516 
1517 	/* For controllers not using the management interface and that
1518 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519 	 * so that pairing works for them. Once the management interface
1520 	 * is in use this bit will be cleared again and userspace has
1521 	 * to explicitly enable it.
1522 	 */
1523 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524 	    !hci_dev_test_flag(hdev, HCI_MGMT))
1525 		hci_dev_set_flag(hdev, HCI_BONDABLE);
1526 
1527 	err = hci_dev_do_open(hdev);
1528 
1529 done:
1530 	hci_dev_put(hdev);
1531 	return err;
1532 }
1533 
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536 {
1537 	struct hci_conn_params *p;
1538 
1539 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1540 		if (p->conn) {
1541 			hci_conn_drop(p->conn);
1542 			hci_conn_put(p->conn);
1543 			p->conn = NULL;
1544 		}
1545 		list_del_init(&p->action);
1546 	}
1547 
1548 	BT_DBG("All LE pending actions cleared");
1549 }
1550 
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1552 {
1553 	BT_DBG("%s %p", hdev->name, hdev);
1554 
1555 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557 	    test_bit(HCI_UP, &hdev->flags)) {
1558 		/* Execute vendor specific shutdown routine */
1559 		if (hdev->shutdown)
1560 			hdev->shutdown(hdev);
1561 	}
1562 
1563 	cancel_delayed_work(&hdev->power_off);
1564 
1565 	hci_req_cancel(hdev, ENODEV);
1566 	hci_req_lock(hdev);
1567 
1568 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569 		cancel_delayed_work_sync(&hdev->cmd_timer);
1570 		hci_req_unlock(hdev);
1571 		return 0;
1572 	}
1573 
1574 	/* Flush RX and TX works */
1575 	flush_work(&hdev->tx_work);
1576 	flush_work(&hdev->rx_work);
1577 
1578 	if (hdev->discov_timeout > 0) {
1579 		cancel_delayed_work(&hdev->discov_off);
1580 		hdev->discov_timeout = 0;
1581 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583 	}
1584 
1585 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586 		cancel_delayed_work(&hdev->service_cache);
1587 
1588 	cancel_delayed_work_sync(&hdev->le_scan_disable);
1589 	cancel_delayed_work_sync(&hdev->le_scan_restart);
1590 
1591 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1592 		cancel_delayed_work_sync(&hdev->rpa_expired);
1593 
1594 	if (hdev->adv_instance_timeout) {
1595 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
1596 		hdev->adv_instance_timeout = 0;
1597 	}
1598 
1599 	/* Avoid potential lockdep warnings from the *_flush() calls by
1600 	 * ensuring the workqueue is empty up front.
1601 	 */
1602 	drain_workqueue(hdev->workqueue);
1603 
1604 	hci_dev_lock(hdev);
1605 
1606 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1607 
1608 	if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1609 		if (hdev->dev_type == HCI_BREDR)
1610 			mgmt_powered(hdev, 0);
1611 	}
1612 
1613 	hci_inquiry_cache_flush(hdev);
1614 	hci_pend_le_actions_clear(hdev);
1615 	hci_conn_hash_flush(hdev);
1616 	hci_dev_unlock(hdev);
1617 
1618 	smp_unregister(hdev);
1619 
1620 	hci_notify(hdev, HCI_DEV_DOWN);
1621 
1622 	if (hdev->flush)
1623 		hdev->flush(hdev);
1624 
1625 	/* Reset device */
1626 	skb_queue_purge(&hdev->cmd_q);
1627 	atomic_set(&hdev->cmd_cnt, 1);
1628 	if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1629 	    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1630 	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1631 		set_bit(HCI_INIT, &hdev->flags);
1632 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1633 		clear_bit(HCI_INIT, &hdev->flags);
1634 	}
1635 
1636 	/* flush cmd  work */
1637 	flush_work(&hdev->cmd_work);
1638 
1639 	/* Drop queues */
1640 	skb_queue_purge(&hdev->rx_q);
1641 	skb_queue_purge(&hdev->cmd_q);
1642 	skb_queue_purge(&hdev->raw_q);
1643 
1644 	/* Drop last sent command */
1645 	if (hdev->sent_cmd) {
1646 		cancel_delayed_work_sync(&hdev->cmd_timer);
1647 		kfree_skb(hdev->sent_cmd);
1648 		hdev->sent_cmd = NULL;
1649 	}
1650 
1651 	/* After this point our queues are empty
1652 	 * and no tasks are scheduled. */
1653 	hdev->close(hdev);
1654 
1655 	/* Clear flags */
1656 	hdev->flags &= BIT(HCI_RAW);
1657 	hci_dev_clear_volatile_flags(hdev);
1658 
1659 	/* Controller radio is available but is currently powered down */
1660 	hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1661 
1662 	memset(hdev->eir, 0, sizeof(hdev->eir));
1663 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1664 	bacpy(&hdev->random_addr, BDADDR_ANY);
1665 
1666 	hci_req_unlock(hdev);
1667 
1668 	hci_dev_put(hdev);
1669 	return 0;
1670 }
1671 
1672 int hci_dev_close(__u16 dev)
1673 {
1674 	struct hci_dev *hdev;
1675 	int err;
1676 
1677 	hdev = hci_dev_get(dev);
1678 	if (!hdev)
1679 		return -ENODEV;
1680 
1681 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1682 		err = -EBUSY;
1683 		goto done;
1684 	}
1685 
1686 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1687 		cancel_delayed_work(&hdev->power_off);
1688 
1689 	err = hci_dev_do_close(hdev);
1690 
1691 done:
1692 	hci_dev_put(hdev);
1693 	return err;
1694 }
1695 
1696 static int hci_dev_do_reset(struct hci_dev *hdev)
1697 {
1698 	int ret;
1699 
1700 	BT_DBG("%s %p", hdev->name, hdev);
1701 
1702 	hci_req_lock(hdev);
1703 
1704 	/* Drop queues */
1705 	skb_queue_purge(&hdev->rx_q);
1706 	skb_queue_purge(&hdev->cmd_q);
1707 
1708 	/* Avoid potential lockdep warnings from the *_flush() calls by
1709 	 * ensuring the workqueue is empty up front.
1710 	 */
1711 	drain_workqueue(hdev->workqueue);
1712 
1713 	hci_dev_lock(hdev);
1714 	hci_inquiry_cache_flush(hdev);
1715 	hci_conn_hash_flush(hdev);
1716 	hci_dev_unlock(hdev);
1717 
1718 	if (hdev->flush)
1719 		hdev->flush(hdev);
1720 
1721 	atomic_set(&hdev->cmd_cnt, 1);
1722 	hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1723 
1724 	ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1725 
1726 	hci_req_unlock(hdev);
1727 	return ret;
1728 }
1729 
1730 int hci_dev_reset(__u16 dev)
1731 {
1732 	struct hci_dev *hdev;
1733 	int err;
1734 
1735 	hdev = hci_dev_get(dev);
1736 	if (!hdev)
1737 		return -ENODEV;
1738 
1739 	if (!test_bit(HCI_UP, &hdev->flags)) {
1740 		err = -ENETDOWN;
1741 		goto done;
1742 	}
1743 
1744 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1745 		err = -EBUSY;
1746 		goto done;
1747 	}
1748 
1749 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1750 		err = -EOPNOTSUPP;
1751 		goto done;
1752 	}
1753 
1754 	err = hci_dev_do_reset(hdev);
1755 
1756 done:
1757 	hci_dev_put(hdev);
1758 	return err;
1759 }
1760 
1761 int hci_dev_reset_stat(__u16 dev)
1762 {
1763 	struct hci_dev *hdev;
1764 	int ret = 0;
1765 
1766 	hdev = hci_dev_get(dev);
1767 	if (!hdev)
1768 		return -ENODEV;
1769 
1770 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1771 		ret = -EBUSY;
1772 		goto done;
1773 	}
1774 
1775 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1776 		ret = -EOPNOTSUPP;
1777 		goto done;
1778 	}
1779 
1780 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1781 
1782 done:
1783 	hci_dev_put(hdev);
1784 	return ret;
1785 }
1786 
1787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1788 {
1789 	bool conn_changed, discov_changed;
1790 
1791 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
1792 
1793 	if ((scan & SCAN_PAGE))
1794 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1795 							  HCI_CONNECTABLE);
1796 	else
1797 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1798 							   HCI_CONNECTABLE);
1799 
1800 	if ((scan & SCAN_INQUIRY)) {
1801 		discov_changed = !hci_dev_test_and_set_flag(hdev,
1802 							    HCI_DISCOVERABLE);
1803 	} else {
1804 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1806 							     HCI_DISCOVERABLE);
1807 	}
1808 
1809 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
1810 		return;
1811 
1812 	if (conn_changed || discov_changed) {
1813 		/* In case this was disabled through mgmt */
1814 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1815 
1816 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1817 			mgmt_update_adv_data(hdev);
1818 
1819 		mgmt_new_settings(hdev);
1820 	}
1821 }
1822 
1823 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1824 {
1825 	struct hci_dev *hdev;
1826 	struct hci_dev_req dr;
1827 	int err = 0;
1828 
1829 	if (copy_from_user(&dr, arg, sizeof(dr)))
1830 		return -EFAULT;
1831 
1832 	hdev = hci_dev_get(dr.dev_id);
1833 	if (!hdev)
1834 		return -ENODEV;
1835 
1836 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1837 		err = -EBUSY;
1838 		goto done;
1839 	}
1840 
1841 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1842 		err = -EOPNOTSUPP;
1843 		goto done;
1844 	}
1845 
1846 	if (hdev->dev_type != HCI_BREDR) {
1847 		err = -EOPNOTSUPP;
1848 		goto done;
1849 	}
1850 
1851 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1852 		err = -EOPNOTSUPP;
1853 		goto done;
1854 	}
1855 
1856 	switch (cmd) {
1857 	case HCISETAUTH:
1858 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1859 				   HCI_INIT_TIMEOUT);
1860 		break;
1861 
1862 	case HCISETENCRYPT:
1863 		if (!lmp_encrypt_capable(hdev)) {
1864 			err = -EOPNOTSUPP;
1865 			break;
1866 		}
1867 
1868 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
1869 			/* Auth must be enabled first */
1870 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871 					   HCI_INIT_TIMEOUT);
1872 			if (err)
1873 				break;
1874 		}
1875 
1876 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1877 				   HCI_INIT_TIMEOUT);
1878 		break;
1879 
1880 	case HCISETSCAN:
1881 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1882 				   HCI_INIT_TIMEOUT);
1883 
1884 		/* Ensure that the connectable and discoverable states
1885 		 * get correctly modified as this was a non-mgmt change.
1886 		 */
1887 		if (!err)
1888 			hci_update_scan_state(hdev, dr.dev_opt);
1889 		break;
1890 
1891 	case HCISETLINKPOL:
1892 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1893 				   HCI_INIT_TIMEOUT);
1894 		break;
1895 
1896 	case HCISETLINKMODE:
1897 		hdev->link_mode = ((__u16) dr.dev_opt) &
1898 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
1899 		break;
1900 
1901 	case HCISETPTYPE:
1902 		hdev->pkt_type = (__u16) dr.dev_opt;
1903 		break;
1904 
1905 	case HCISETACLMTU:
1906 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1907 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1908 		break;
1909 
1910 	case HCISETSCOMTU:
1911 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1912 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1913 		break;
1914 
1915 	default:
1916 		err = -EINVAL;
1917 		break;
1918 	}
1919 
1920 done:
1921 	hci_dev_put(hdev);
1922 	return err;
1923 }
1924 
1925 int hci_get_dev_list(void __user *arg)
1926 {
1927 	struct hci_dev *hdev;
1928 	struct hci_dev_list_req *dl;
1929 	struct hci_dev_req *dr;
1930 	int n = 0, size, err;
1931 	__u16 dev_num;
1932 
1933 	if (get_user(dev_num, (__u16 __user *) arg))
1934 		return -EFAULT;
1935 
1936 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1937 		return -EINVAL;
1938 
1939 	size = sizeof(*dl) + dev_num * sizeof(*dr);
1940 
1941 	dl = kzalloc(size, GFP_KERNEL);
1942 	if (!dl)
1943 		return -ENOMEM;
1944 
1945 	dr = dl->dev_req;
1946 
1947 	read_lock(&hci_dev_list_lock);
1948 	list_for_each_entry(hdev, &hci_dev_list, list) {
1949 		unsigned long flags = hdev->flags;
1950 
1951 		/* When the auto-off is configured it means the transport
1952 		 * is running, but in that case still indicate that the
1953 		 * device is actually down.
1954 		 */
1955 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1956 			flags &= ~BIT(HCI_UP);
1957 
1958 		(dr + n)->dev_id  = hdev->id;
1959 		(dr + n)->dev_opt = flags;
1960 
1961 		if (++n >= dev_num)
1962 			break;
1963 	}
1964 	read_unlock(&hci_dev_list_lock);
1965 
1966 	dl->dev_num = n;
1967 	size = sizeof(*dl) + n * sizeof(*dr);
1968 
1969 	err = copy_to_user(arg, dl, size);
1970 	kfree(dl);
1971 
1972 	return err ? -EFAULT : 0;
1973 }
1974 
1975 int hci_get_dev_info(void __user *arg)
1976 {
1977 	struct hci_dev *hdev;
1978 	struct hci_dev_info di;
1979 	unsigned long flags;
1980 	int err = 0;
1981 
1982 	if (copy_from_user(&di, arg, sizeof(di)))
1983 		return -EFAULT;
1984 
1985 	hdev = hci_dev_get(di.dev_id);
1986 	if (!hdev)
1987 		return -ENODEV;
1988 
1989 	/* When the auto-off is configured it means the transport
1990 	 * is running, but in that case still indicate that the
1991 	 * device is actually down.
1992 	 */
1993 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1994 		flags = hdev->flags & ~BIT(HCI_UP);
1995 	else
1996 		flags = hdev->flags;
1997 
1998 	strcpy(di.name, hdev->name);
1999 	di.bdaddr   = hdev->bdaddr;
2000 	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2001 	di.flags    = flags;
2002 	di.pkt_type = hdev->pkt_type;
2003 	if (lmp_bredr_capable(hdev)) {
2004 		di.acl_mtu  = hdev->acl_mtu;
2005 		di.acl_pkts = hdev->acl_pkts;
2006 		di.sco_mtu  = hdev->sco_mtu;
2007 		di.sco_pkts = hdev->sco_pkts;
2008 	} else {
2009 		di.acl_mtu  = hdev->le_mtu;
2010 		di.acl_pkts = hdev->le_pkts;
2011 		di.sco_mtu  = 0;
2012 		di.sco_pkts = 0;
2013 	}
2014 	di.link_policy = hdev->link_policy;
2015 	di.link_mode   = hdev->link_mode;
2016 
2017 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2018 	memcpy(&di.features, &hdev->features, sizeof(di.features));
2019 
2020 	if (copy_to_user(arg, &di, sizeof(di)))
2021 		err = -EFAULT;
2022 
2023 	hci_dev_put(hdev);
2024 
2025 	return err;
2026 }
2027 
2028 /* ---- Interface to HCI drivers ---- */
2029 
2030 static int hci_rfkill_set_block(void *data, bool blocked)
2031 {
2032 	struct hci_dev *hdev = data;
2033 
2034 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2035 
2036 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2037 		return -EBUSY;
2038 
2039 	if (blocked) {
2040 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2041 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2042 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
2043 			hci_dev_do_close(hdev);
2044 	} else {
2045 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
2046 	}
2047 
2048 	return 0;
2049 }
2050 
2051 static const struct rfkill_ops hci_rfkill_ops = {
2052 	.set_block = hci_rfkill_set_block,
2053 };
2054 
2055 static void hci_power_on(struct work_struct *work)
2056 {
2057 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2058 	int err;
2059 
2060 	BT_DBG("%s", hdev->name);
2061 
2062 	err = hci_dev_do_open(hdev);
2063 	if (err < 0) {
2064 		hci_dev_lock(hdev);
2065 		mgmt_set_powered_failed(hdev, err);
2066 		hci_dev_unlock(hdev);
2067 		return;
2068 	}
2069 
2070 	/* During the HCI setup phase, a few error conditions are
2071 	 * ignored and they need to be checked now. If they are still
2072 	 * valid, it is important to turn the device back off.
2073 	 */
2074 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2075 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2076 	    (hdev->dev_type == HCI_BREDR &&
2077 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2078 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2079 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2080 		hci_dev_do_close(hdev);
2081 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2082 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2083 				   HCI_AUTO_OFF_TIMEOUT);
2084 	}
2085 
2086 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2087 		/* For unconfigured devices, set the HCI_RAW flag
2088 		 * so that userspace can easily identify them.
2089 		 */
2090 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2091 			set_bit(HCI_RAW, &hdev->flags);
2092 
2093 		/* For fully configured devices, this will send
2094 		 * the Index Added event. For unconfigured devices,
2095 		 * it will send Unconfigued Index Added event.
2096 		 *
2097 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2098 		 * and no event will be send.
2099 		 */
2100 		mgmt_index_added(hdev);
2101 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2102 		/* When the controller is now configured, then it
2103 		 * is important to clear the HCI_RAW flag.
2104 		 */
2105 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2106 			clear_bit(HCI_RAW, &hdev->flags);
2107 
2108 		/* Powering on the controller with HCI_CONFIG set only
2109 		 * happens with the transition from unconfigured to
2110 		 * configured. This will send the Index Added event.
2111 		 */
2112 		mgmt_index_added(hdev);
2113 	}
2114 }
2115 
2116 static void hci_power_off(struct work_struct *work)
2117 {
2118 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2119 					    power_off.work);
2120 
2121 	BT_DBG("%s", hdev->name);
2122 
2123 	hci_dev_do_close(hdev);
2124 }
2125 
2126 static void hci_error_reset(struct work_struct *work)
2127 {
2128 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2129 
2130 	BT_DBG("%s", hdev->name);
2131 
2132 	if (hdev->hw_error)
2133 		hdev->hw_error(hdev, hdev->hw_error_code);
2134 	else
2135 		BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2136 		       hdev->hw_error_code);
2137 
2138 	if (hci_dev_do_close(hdev))
2139 		return;
2140 
2141 	hci_dev_do_open(hdev);
2142 }
2143 
2144 static void hci_discov_off(struct work_struct *work)
2145 {
2146 	struct hci_dev *hdev;
2147 
2148 	hdev = container_of(work, struct hci_dev, discov_off.work);
2149 
2150 	BT_DBG("%s", hdev->name);
2151 
2152 	mgmt_discoverable_timeout(hdev);
2153 }
2154 
2155 static void hci_adv_timeout_expire(struct work_struct *work)
2156 {
2157 	struct hci_dev *hdev;
2158 
2159 	hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2160 
2161 	BT_DBG("%s", hdev->name);
2162 
2163 	mgmt_adv_timeout_expired(hdev);
2164 }
2165 
2166 void hci_uuids_clear(struct hci_dev *hdev)
2167 {
2168 	struct bt_uuid *uuid, *tmp;
2169 
2170 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2171 		list_del(&uuid->list);
2172 		kfree(uuid);
2173 	}
2174 }
2175 
2176 void hci_link_keys_clear(struct hci_dev *hdev)
2177 {
2178 	struct link_key *key;
2179 
2180 	list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2181 		list_del_rcu(&key->list);
2182 		kfree_rcu(key, rcu);
2183 	}
2184 }
2185 
2186 void hci_smp_ltks_clear(struct hci_dev *hdev)
2187 {
2188 	struct smp_ltk *k;
2189 
2190 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2191 		list_del_rcu(&k->list);
2192 		kfree_rcu(k, rcu);
2193 	}
2194 }
2195 
2196 void hci_smp_irks_clear(struct hci_dev *hdev)
2197 {
2198 	struct smp_irk *k;
2199 
2200 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2201 		list_del_rcu(&k->list);
2202 		kfree_rcu(k, rcu);
2203 	}
2204 }
2205 
2206 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2207 {
2208 	struct link_key *k;
2209 
2210 	rcu_read_lock();
2211 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2212 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
2213 			rcu_read_unlock();
2214 			return k;
2215 		}
2216 	}
2217 	rcu_read_unlock();
2218 
2219 	return NULL;
2220 }
2221 
2222 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2223 			       u8 key_type, u8 old_key_type)
2224 {
2225 	/* Legacy key */
2226 	if (key_type < 0x03)
2227 		return true;
2228 
2229 	/* Debug keys are insecure so don't store them persistently */
2230 	if (key_type == HCI_LK_DEBUG_COMBINATION)
2231 		return false;
2232 
2233 	/* Changed combination key and there's no previous one */
2234 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2235 		return false;
2236 
2237 	/* Security mode 3 case */
2238 	if (!conn)
2239 		return true;
2240 
2241 	/* BR/EDR key derived using SC from an LE link */
2242 	if (conn->type == LE_LINK)
2243 		return true;
2244 
2245 	/* Neither local nor remote side had no-bonding as requirement */
2246 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2247 		return true;
2248 
2249 	/* Local side had dedicated bonding as requirement */
2250 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2251 		return true;
2252 
2253 	/* Remote side had dedicated bonding as requirement */
2254 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2255 		return true;
2256 
2257 	/* If none of the above criteria match, then don't store the key
2258 	 * persistently */
2259 	return false;
2260 }
2261 
2262 static u8 ltk_role(u8 type)
2263 {
2264 	if (type == SMP_LTK)
2265 		return HCI_ROLE_MASTER;
2266 
2267 	return HCI_ROLE_SLAVE;
2268 }
2269 
2270 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271 			     u8 addr_type, u8 role)
2272 {
2273 	struct smp_ltk *k;
2274 
2275 	rcu_read_lock();
2276 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2278 			continue;
2279 
2280 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2281 			rcu_read_unlock();
2282 			return k;
2283 		}
2284 	}
2285 	rcu_read_unlock();
2286 
2287 	return NULL;
2288 }
2289 
2290 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2291 {
2292 	struct smp_irk *irk;
2293 
2294 	rcu_read_lock();
2295 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2296 		if (!bacmp(&irk->rpa, rpa)) {
2297 			rcu_read_unlock();
2298 			return irk;
2299 		}
2300 	}
2301 
2302 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303 		if (smp_irk_matches(hdev, irk->val, rpa)) {
2304 			bacpy(&irk->rpa, rpa);
2305 			rcu_read_unlock();
2306 			return irk;
2307 		}
2308 	}
2309 	rcu_read_unlock();
2310 
2311 	return NULL;
2312 }
2313 
2314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2315 				     u8 addr_type)
2316 {
2317 	struct smp_irk *irk;
2318 
2319 	/* Identity Address must be public or static random */
2320 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2321 		return NULL;
2322 
2323 	rcu_read_lock();
2324 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2325 		if (addr_type == irk->addr_type &&
2326 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
2327 			rcu_read_unlock();
2328 			return irk;
2329 		}
2330 	}
2331 	rcu_read_unlock();
2332 
2333 	return NULL;
2334 }
2335 
2336 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2337 				  bdaddr_t *bdaddr, u8 *val, u8 type,
2338 				  u8 pin_len, bool *persistent)
2339 {
2340 	struct link_key *key, *old_key;
2341 	u8 old_key_type;
2342 
2343 	old_key = hci_find_link_key(hdev, bdaddr);
2344 	if (old_key) {
2345 		old_key_type = old_key->type;
2346 		key = old_key;
2347 	} else {
2348 		old_key_type = conn ? conn->key_type : 0xff;
2349 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2350 		if (!key)
2351 			return NULL;
2352 		list_add_rcu(&key->list, &hdev->link_keys);
2353 	}
2354 
2355 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2356 
2357 	/* Some buggy controller combinations generate a changed
2358 	 * combination key for legacy pairing even when there's no
2359 	 * previous key */
2360 	if (type == HCI_LK_CHANGED_COMBINATION &&
2361 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2362 		type = HCI_LK_COMBINATION;
2363 		if (conn)
2364 			conn->key_type = type;
2365 	}
2366 
2367 	bacpy(&key->bdaddr, bdaddr);
2368 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2369 	key->pin_len = pin_len;
2370 
2371 	if (type == HCI_LK_CHANGED_COMBINATION)
2372 		key->type = old_key_type;
2373 	else
2374 		key->type = type;
2375 
2376 	if (persistent)
2377 		*persistent = hci_persistent_key(hdev, conn, type,
2378 						 old_key_type);
2379 
2380 	return key;
2381 }
2382 
2383 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384 			    u8 addr_type, u8 type, u8 authenticated,
2385 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2386 {
2387 	struct smp_ltk *key, *old_key;
2388 	u8 role = ltk_role(type);
2389 
2390 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2391 	if (old_key)
2392 		key = old_key;
2393 	else {
2394 		key = kzalloc(sizeof(*key), GFP_KERNEL);
2395 		if (!key)
2396 			return NULL;
2397 		list_add_rcu(&key->list, &hdev->long_term_keys);
2398 	}
2399 
2400 	bacpy(&key->bdaddr, bdaddr);
2401 	key->bdaddr_type = addr_type;
2402 	memcpy(key->val, tk, sizeof(key->val));
2403 	key->authenticated = authenticated;
2404 	key->ediv = ediv;
2405 	key->rand = rand;
2406 	key->enc_size = enc_size;
2407 	key->type = type;
2408 
2409 	return key;
2410 }
2411 
2412 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2413 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
2414 {
2415 	struct smp_irk *irk;
2416 
2417 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2418 	if (!irk) {
2419 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2420 		if (!irk)
2421 			return NULL;
2422 
2423 		bacpy(&irk->bdaddr, bdaddr);
2424 		irk->addr_type = addr_type;
2425 
2426 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2427 	}
2428 
2429 	memcpy(irk->val, val, 16);
2430 	bacpy(&irk->rpa, rpa);
2431 
2432 	return irk;
2433 }
2434 
2435 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2436 {
2437 	struct link_key *key;
2438 
2439 	key = hci_find_link_key(hdev, bdaddr);
2440 	if (!key)
2441 		return -ENOENT;
2442 
2443 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2444 
2445 	list_del_rcu(&key->list);
2446 	kfree_rcu(key, rcu);
2447 
2448 	return 0;
2449 }
2450 
2451 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2452 {
2453 	struct smp_ltk *k;
2454 	int removed = 0;
2455 
2456 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2457 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2458 			continue;
2459 
2460 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2461 
2462 		list_del_rcu(&k->list);
2463 		kfree_rcu(k, rcu);
2464 		removed++;
2465 	}
2466 
2467 	return removed ? 0 : -ENOENT;
2468 }
2469 
2470 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2471 {
2472 	struct smp_irk *k;
2473 
2474 	list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2475 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2476 			continue;
2477 
2478 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479 
2480 		list_del_rcu(&k->list);
2481 		kfree_rcu(k, rcu);
2482 	}
2483 }
2484 
2485 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2486 {
2487 	struct smp_ltk *k;
2488 	struct smp_irk *irk;
2489 	u8 addr_type;
2490 
2491 	if (type == BDADDR_BREDR) {
2492 		if (hci_find_link_key(hdev, bdaddr))
2493 			return true;
2494 		return false;
2495 	}
2496 
2497 	/* Convert to HCI addr type which struct smp_ltk uses */
2498 	if (type == BDADDR_LE_PUBLIC)
2499 		addr_type = ADDR_LE_DEV_PUBLIC;
2500 	else
2501 		addr_type = ADDR_LE_DEV_RANDOM;
2502 
2503 	irk = hci_get_irk(hdev, bdaddr, addr_type);
2504 	if (irk) {
2505 		bdaddr = &irk->bdaddr;
2506 		addr_type = irk->addr_type;
2507 	}
2508 
2509 	rcu_read_lock();
2510 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2511 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2512 			rcu_read_unlock();
2513 			return true;
2514 		}
2515 	}
2516 	rcu_read_unlock();
2517 
2518 	return false;
2519 }
2520 
2521 /* HCI command timer function */
2522 static void hci_cmd_timeout(struct work_struct *work)
2523 {
2524 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2525 					    cmd_timer.work);
2526 
2527 	if (hdev->sent_cmd) {
2528 		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2529 		u16 opcode = __le16_to_cpu(sent->opcode);
2530 
2531 		BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2532 	} else {
2533 		BT_ERR("%s command tx timeout", hdev->name);
2534 	}
2535 
2536 	atomic_set(&hdev->cmd_cnt, 1);
2537 	queue_work(hdev->workqueue, &hdev->cmd_work);
2538 }
2539 
2540 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2541 					  bdaddr_t *bdaddr, u8 bdaddr_type)
2542 {
2543 	struct oob_data *data;
2544 
2545 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
2546 		if (bacmp(bdaddr, &data->bdaddr) != 0)
2547 			continue;
2548 		if (data->bdaddr_type != bdaddr_type)
2549 			continue;
2550 		return data;
2551 	}
2552 
2553 	return NULL;
2554 }
2555 
2556 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2557 			       u8 bdaddr_type)
2558 {
2559 	struct oob_data *data;
2560 
2561 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2562 	if (!data)
2563 		return -ENOENT;
2564 
2565 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2566 
2567 	list_del(&data->list);
2568 	kfree(data);
2569 
2570 	return 0;
2571 }
2572 
2573 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2574 {
2575 	struct oob_data *data, *n;
2576 
2577 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2578 		list_del(&data->list);
2579 		kfree(data);
2580 	}
2581 }
2582 
2583 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2584 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
2585 			    u8 *hash256, u8 *rand256)
2586 {
2587 	struct oob_data *data;
2588 
2589 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2590 	if (!data) {
2591 		data = kmalloc(sizeof(*data), GFP_KERNEL);
2592 		if (!data)
2593 			return -ENOMEM;
2594 
2595 		bacpy(&data->bdaddr, bdaddr);
2596 		data->bdaddr_type = bdaddr_type;
2597 		list_add(&data->list, &hdev->remote_oob_data);
2598 	}
2599 
2600 	if (hash192 && rand192) {
2601 		memcpy(data->hash192, hash192, sizeof(data->hash192));
2602 		memcpy(data->rand192, rand192, sizeof(data->rand192));
2603 		if (hash256 && rand256)
2604 			data->present = 0x03;
2605 	} else {
2606 		memset(data->hash192, 0, sizeof(data->hash192));
2607 		memset(data->rand192, 0, sizeof(data->rand192));
2608 		if (hash256 && rand256)
2609 			data->present = 0x02;
2610 		else
2611 			data->present = 0x00;
2612 	}
2613 
2614 	if (hash256 && rand256) {
2615 		memcpy(data->hash256, hash256, sizeof(data->hash256));
2616 		memcpy(data->rand256, rand256, sizeof(data->rand256));
2617 	} else {
2618 		memset(data->hash256, 0, sizeof(data->hash256));
2619 		memset(data->rand256, 0, sizeof(data->rand256));
2620 		if (hash192 && rand192)
2621 			data->present = 0x01;
2622 	}
2623 
2624 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
2625 
2626 	return 0;
2627 }
2628 
2629 /* This function requires the caller holds hdev->lock */
2630 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2631 {
2632 	struct adv_info *adv_instance;
2633 
2634 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2635 		if (adv_instance->instance == instance)
2636 			return adv_instance;
2637 	}
2638 
2639 	return NULL;
2640 }
2641 
2642 /* This function requires the caller holds hdev->lock */
2643 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2644 	struct adv_info *cur_instance;
2645 
2646 	cur_instance = hci_find_adv_instance(hdev, instance);
2647 	if (!cur_instance)
2648 		return NULL;
2649 
2650 	if (cur_instance == list_last_entry(&hdev->adv_instances,
2651 					    struct adv_info, list))
2652 		return list_first_entry(&hdev->adv_instances,
2653 						 struct adv_info, list);
2654 	else
2655 		return list_next_entry(cur_instance, list);
2656 }
2657 
2658 /* This function requires the caller holds hdev->lock */
2659 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2660 {
2661 	struct adv_info *adv_instance;
2662 
2663 	adv_instance = hci_find_adv_instance(hdev, instance);
2664 	if (!adv_instance)
2665 		return -ENOENT;
2666 
2667 	BT_DBG("%s removing %dMR", hdev->name, instance);
2668 
2669 	if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2670 		cancel_delayed_work(&hdev->adv_instance_expire);
2671 		hdev->adv_instance_timeout = 0;
2672 	}
2673 
2674 	list_del(&adv_instance->list);
2675 	kfree(adv_instance);
2676 
2677 	hdev->adv_instance_cnt--;
2678 
2679 	return 0;
2680 }
2681 
2682 /* This function requires the caller holds hdev->lock */
2683 void hci_adv_instances_clear(struct hci_dev *hdev)
2684 {
2685 	struct adv_info *adv_instance, *n;
2686 
2687 	if (hdev->adv_instance_timeout) {
2688 		cancel_delayed_work(&hdev->adv_instance_expire);
2689 		hdev->adv_instance_timeout = 0;
2690 	}
2691 
2692 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2693 		list_del(&adv_instance->list);
2694 		kfree(adv_instance);
2695 	}
2696 
2697 	hdev->adv_instance_cnt = 0;
2698 }
2699 
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702 			 u16 adv_data_len, u8 *adv_data,
2703 			 u16 scan_rsp_len, u8 *scan_rsp_data,
2704 			 u16 timeout, u16 duration)
2705 {
2706 	struct adv_info *adv_instance;
2707 
2708 	adv_instance = hci_find_adv_instance(hdev, instance);
2709 	if (adv_instance) {
2710 		memset(adv_instance->adv_data, 0,
2711 		       sizeof(adv_instance->adv_data));
2712 		memset(adv_instance->scan_rsp_data, 0,
2713 		       sizeof(adv_instance->scan_rsp_data));
2714 	} else {
2715 		if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716 		    instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717 			return -EOVERFLOW;
2718 
2719 		adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2720 		if (!adv_instance)
2721 			return -ENOMEM;
2722 
2723 		adv_instance->pending = true;
2724 		adv_instance->instance = instance;
2725 		list_add(&adv_instance->list, &hdev->adv_instances);
2726 		hdev->adv_instance_cnt++;
2727 	}
2728 
2729 	adv_instance->flags = flags;
2730 	adv_instance->adv_data_len = adv_data_len;
2731 	adv_instance->scan_rsp_len = scan_rsp_len;
2732 
2733 	if (adv_data_len)
2734 		memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2735 
2736 	if (scan_rsp_len)
2737 		memcpy(adv_instance->scan_rsp_data,
2738 		       scan_rsp_data, scan_rsp_len);
2739 
2740 	adv_instance->timeout = timeout;
2741 	adv_instance->remaining_time = timeout;
2742 
2743 	if (duration == 0)
2744 		adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2745 	else
2746 		adv_instance->duration = duration;
2747 
2748 	BT_DBG("%s for %dMR", hdev->name, instance);
2749 
2750 	return 0;
2751 }
2752 
2753 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2754 					 bdaddr_t *bdaddr, u8 type)
2755 {
2756 	struct bdaddr_list *b;
2757 
2758 	list_for_each_entry(b, bdaddr_list, list) {
2759 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2760 			return b;
2761 	}
2762 
2763 	return NULL;
2764 }
2765 
2766 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2767 {
2768 	struct list_head *p, *n;
2769 
2770 	list_for_each_safe(p, n, bdaddr_list) {
2771 		struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2772 
2773 		list_del(p);
2774 		kfree(b);
2775 	}
2776 }
2777 
2778 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2779 {
2780 	struct bdaddr_list *entry;
2781 
2782 	if (!bacmp(bdaddr, BDADDR_ANY))
2783 		return -EBADF;
2784 
2785 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2786 		return -EEXIST;
2787 
2788 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2789 	if (!entry)
2790 		return -ENOMEM;
2791 
2792 	bacpy(&entry->bdaddr, bdaddr);
2793 	entry->bdaddr_type = type;
2794 
2795 	list_add(&entry->list, list);
2796 
2797 	return 0;
2798 }
2799 
2800 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2801 {
2802 	struct bdaddr_list *entry;
2803 
2804 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2805 		hci_bdaddr_list_clear(list);
2806 		return 0;
2807 	}
2808 
2809 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2810 	if (!entry)
2811 		return -ENOENT;
2812 
2813 	list_del(&entry->list);
2814 	kfree(entry);
2815 
2816 	return 0;
2817 }
2818 
2819 /* This function requires the caller holds hdev->lock */
2820 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2821 					       bdaddr_t *addr, u8 addr_type)
2822 {
2823 	struct hci_conn_params *params;
2824 
2825 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2826 		if (bacmp(&params->addr, addr) == 0 &&
2827 		    params->addr_type == addr_type) {
2828 			return params;
2829 		}
2830 	}
2831 
2832 	return NULL;
2833 }
2834 
2835 /* This function requires the caller holds hdev->lock */
2836 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2837 						  bdaddr_t *addr, u8 addr_type)
2838 {
2839 	struct hci_conn_params *param;
2840 
2841 	list_for_each_entry(param, list, action) {
2842 		if (bacmp(&param->addr, addr) == 0 &&
2843 		    param->addr_type == addr_type)
2844 			return param;
2845 	}
2846 
2847 	return NULL;
2848 }
2849 
2850 /* This function requires the caller holds hdev->lock */
2851 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2852 						    bdaddr_t *addr,
2853 						    u8 addr_type)
2854 {
2855 	struct hci_conn_params *param;
2856 
2857 	list_for_each_entry(param, &hdev->pend_le_conns, action) {
2858 		if (bacmp(&param->addr, addr) == 0 &&
2859 		    param->addr_type == addr_type &&
2860 		    param->explicit_connect)
2861 			return param;
2862 	}
2863 
2864 	return NULL;
2865 }
2866 
2867 /* This function requires the caller holds hdev->lock */
2868 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2869 					    bdaddr_t *addr, u8 addr_type)
2870 {
2871 	struct hci_conn_params *params;
2872 
2873 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2874 	if (params)
2875 		return params;
2876 
2877 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2878 	if (!params) {
2879 		BT_ERR("Out of memory");
2880 		return NULL;
2881 	}
2882 
2883 	bacpy(&params->addr, addr);
2884 	params->addr_type = addr_type;
2885 
2886 	list_add(&params->list, &hdev->le_conn_params);
2887 	INIT_LIST_HEAD(&params->action);
2888 
2889 	params->conn_min_interval = hdev->le_conn_min_interval;
2890 	params->conn_max_interval = hdev->le_conn_max_interval;
2891 	params->conn_latency = hdev->le_conn_latency;
2892 	params->supervision_timeout = hdev->le_supv_timeout;
2893 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2894 
2895 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2896 
2897 	return params;
2898 }
2899 
2900 static void hci_conn_params_free(struct hci_conn_params *params)
2901 {
2902 	if (params->conn) {
2903 		hci_conn_drop(params->conn);
2904 		hci_conn_put(params->conn);
2905 	}
2906 
2907 	list_del(&params->action);
2908 	list_del(&params->list);
2909 	kfree(params);
2910 }
2911 
2912 /* This function requires the caller holds hdev->lock */
2913 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2914 {
2915 	struct hci_conn_params *params;
2916 
2917 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2918 	if (!params)
2919 		return;
2920 
2921 	hci_conn_params_free(params);
2922 
2923 	hci_update_background_scan(hdev);
2924 
2925 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2926 }
2927 
2928 /* This function requires the caller holds hdev->lock */
2929 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2930 {
2931 	struct hci_conn_params *params, *tmp;
2932 
2933 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2934 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2935 			continue;
2936 
2937 		/* If trying to estabilish one time connection to disabled
2938 		 * device, leave the params, but mark them as just once.
2939 		 */
2940 		if (params->explicit_connect) {
2941 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2942 			continue;
2943 		}
2944 
2945 		list_del(&params->list);
2946 		kfree(params);
2947 	}
2948 
2949 	BT_DBG("All LE disabled connection parameters were removed");
2950 }
2951 
2952 /* This function requires the caller holds hdev->lock */
2953 void hci_conn_params_clear_all(struct hci_dev *hdev)
2954 {
2955 	struct hci_conn_params *params, *tmp;
2956 
2957 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2958 		hci_conn_params_free(params);
2959 
2960 	hci_update_background_scan(hdev);
2961 
2962 	BT_DBG("All LE connection parameters were removed");
2963 }
2964 
2965 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2966 {
2967 	if (status) {
2968 		BT_ERR("Failed to start inquiry: status %d", status);
2969 
2970 		hci_dev_lock(hdev);
2971 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2972 		hci_dev_unlock(hdev);
2973 		return;
2974 	}
2975 }
2976 
2977 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2978 					  u16 opcode)
2979 {
2980 	/* General inquiry access code (GIAC) */
2981 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
2982 	struct hci_cp_inquiry cp;
2983 	int err;
2984 
2985 	if (status) {
2986 		BT_ERR("Failed to disable LE scanning: status %d", status);
2987 		return;
2988 	}
2989 
2990 	hdev->discovery.scan_start = 0;
2991 
2992 	switch (hdev->discovery.type) {
2993 	case DISCOV_TYPE_LE:
2994 		hci_dev_lock(hdev);
2995 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2996 		hci_dev_unlock(hdev);
2997 		break;
2998 
2999 	case DISCOV_TYPE_INTERLEAVED:
3000 		hci_dev_lock(hdev);
3001 
3002 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3003 			     &hdev->quirks)) {
3004 			/* If we were running LE only scan, change discovery
3005 			 * state. If we were running both LE and BR/EDR inquiry
3006 			 * simultaneously, and BR/EDR inquiry is already
3007 			 * finished, stop discovery, otherwise BR/EDR inquiry
3008 			 * will stop discovery when finished. If we will resolve
3009 			 * remote device name, do not change discovery state.
3010 			 */
3011 			if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3012 			    hdev->discovery.state != DISCOVERY_RESOLVING)
3013 				hci_discovery_set_state(hdev,
3014 							DISCOVERY_STOPPED);
3015 		} else {
3016 			struct hci_request req;
3017 
3018 			hci_inquiry_cache_flush(hdev);
3019 
3020 			hci_req_init(&req, hdev);
3021 
3022 			memset(&cp, 0, sizeof(cp));
3023 			memcpy(&cp.lap, lap, sizeof(cp.lap));
3024 			cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3025 			hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3026 
3027 			err = hci_req_run(&req, inquiry_complete);
3028 			if (err) {
3029 				BT_ERR("Inquiry request failed: err %d", err);
3030 				hci_discovery_set_state(hdev,
3031 							DISCOVERY_STOPPED);
3032 			}
3033 		}
3034 
3035 		hci_dev_unlock(hdev);
3036 		break;
3037 	}
3038 }
3039 
3040 static void le_scan_disable_work(struct work_struct *work)
3041 {
3042 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3043 					    le_scan_disable.work);
3044 	struct hci_request req;
3045 	int err;
3046 
3047 	BT_DBG("%s", hdev->name);
3048 
3049 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3050 
3051 	hci_req_init(&req, hdev);
3052 
3053 	hci_req_add_le_scan_disable(&req);
3054 
3055 	err = hci_req_run(&req, le_scan_disable_work_complete);
3056 	if (err)
3057 		BT_ERR("Disable LE scanning request failed: err %d", err);
3058 }
3059 
3060 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3061 					  u16 opcode)
3062 {
3063 	unsigned long timeout, duration, scan_start, now;
3064 
3065 	BT_DBG("%s", hdev->name);
3066 
3067 	if (status) {
3068 		BT_ERR("Failed to restart LE scan: status %d", status);
3069 		return;
3070 	}
3071 
3072 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3073 	    !hdev->discovery.scan_start)
3074 		return;
3075 
3076 	/* When the scan was started, hdev->le_scan_disable has been queued
3077 	 * after duration from scan_start. During scan restart this job
3078 	 * has been canceled, and we need to queue it again after proper
3079 	 * timeout, to make sure that scan does not run indefinitely.
3080 	 */
3081 	duration = hdev->discovery.scan_duration;
3082 	scan_start = hdev->discovery.scan_start;
3083 	now = jiffies;
3084 	if (now - scan_start <= duration) {
3085 		int elapsed;
3086 
3087 		if (now >= scan_start)
3088 			elapsed = now - scan_start;
3089 		else
3090 			elapsed = ULONG_MAX - scan_start + now;
3091 
3092 		timeout = duration - elapsed;
3093 	} else {
3094 		timeout = 0;
3095 	}
3096 	queue_delayed_work(hdev->workqueue,
3097 			   &hdev->le_scan_disable, timeout);
3098 }
3099 
3100 static void le_scan_restart_work(struct work_struct *work)
3101 {
3102 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3103 					    le_scan_restart.work);
3104 	struct hci_request req;
3105 	struct hci_cp_le_set_scan_enable cp;
3106 	int err;
3107 
3108 	BT_DBG("%s", hdev->name);
3109 
3110 	/* If controller is not scanning we are done. */
3111 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3112 		return;
3113 
3114 	hci_req_init(&req, hdev);
3115 
3116 	hci_req_add_le_scan_disable(&req);
3117 
3118 	memset(&cp, 0, sizeof(cp));
3119 	cp.enable = LE_SCAN_ENABLE;
3120 	cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3121 	hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3122 
3123 	err = hci_req_run(&req, le_scan_restart_work_complete);
3124 	if (err)
3125 		BT_ERR("Restart LE scan request failed: err %d", err);
3126 }
3127 
3128 /* Copy the Identity Address of the controller.
3129  *
3130  * If the controller has a public BD_ADDR, then by default use that one.
3131  * If this is a LE only controller without a public address, default to
3132  * the static random address.
3133  *
3134  * For debugging purposes it is possible to force controllers with a
3135  * public address to use the static random address instead.
3136  *
3137  * In case BR/EDR has been disabled on a dual-mode controller and
3138  * userspace has configured a static address, then that address
3139  * becomes the identity address instead of the public BR/EDR address.
3140  */
3141 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3142 			       u8 *bdaddr_type)
3143 {
3144 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3145 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3146 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3147 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
3148 		bacpy(bdaddr, &hdev->static_addr);
3149 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
3150 	} else {
3151 		bacpy(bdaddr, &hdev->bdaddr);
3152 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
3153 	}
3154 }
3155 
3156 /* Alloc HCI device */
3157 struct hci_dev *hci_alloc_dev(void)
3158 {
3159 	struct hci_dev *hdev;
3160 
3161 	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3162 	if (!hdev)
3163 		return NULL;
3164 
3165 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3166 	hdev->esco_type = (ESCO_HV1);
3167 	hdev->link_mode = (HCI_LM_ACCEPT);
3168 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
3169 	hdev->io_capability = 0x03;	/* No Input No Output */
3170 	hdev->manufacturer = 0xffff;	/* Default to internal use */
3171 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3172 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3173 	hdev->adv_instance_cnt = 0;
3174 	hdev->cur_adv_instance = 0x00;
3175 	hdev->adv_instance_timeout = 0;
3176 
3177 	hdev->sniff_max_interval = 800;
3178 	hdev->sniff_min_interval = 80;
3179 
3180 	hdev->le_adv_channel_map = 0x07;
3181 	hdev->le_adv_min_interval = 0x0800;
3182 	hdev->le_adv_max_interval = 0x0800;
3183 	hdev->le_scan_interval = 0x0060;
3184 	hdev->le_scan_window = 0x0030;
3185 	hdev->le_conn_min_interval = 0x0028;
3186 	hdev->le_conn_max_interval = 0x0038;
3187 	hdev->le_conn_latency = 0x0000;
3188 	hdev->le_supv_timeout = 0x002a;
3189 	hdev->le_def_tx_len = 0x001b;
3190 	hdev->le_def_tx_time = 0x0148;
3191 	hdev->le_max_tx_len = 0x001b;
3192 	hdev->le_max_tx_time = 0x0148;
3193 	hdev->le_max_rx_len = 0x001b;
3194 	hdev->le_max_rx_time = 0x0148;
3195 
3196 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3197 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3198 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3199 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3200 
3201 	mutex_init(&hdev->lock);
3202 	mutex_init(&hdev->req_lock);
3203 
3204 	INIT_LIST_HEAD(&hdev->mgmt_pending);
3205 	INIT_LIST_HEAD(&hdev->blacklist);
3206 	INIT_LIST_HEAD(&hdev->whitelist);
3207 	INIT_LIST_HEAD(&hdev->uuids);
3208 	INIT_LIST_HEAD(&hdev->link_keys);
3209 	INIT_LIST_HEAD(&hdev->long_term_keys);
3210 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3211 	INIT_LIST_HEAD(&hdev->remote_oob_data);
3212 	INIT_LIST_HEAD(&hdev->le_white_list);
3213 	INIT_LIST_HEAD(&hdev->le_conn_params);
3214 	INIT_LIST_HEAD(&hdev->pend_le_conns);
3215 	INIT_LIST_HEAD(&hdev->pend_le_reports);
3216 	INIT_LIST_HEAD(&hdev->conn_hash.list);
3217 	INIT_LIST_HEAD(&hdev->adv_instances);
3218 
3219 	INIT_WORK(&hdev->rx_work, hci_rx_work);
3220 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3221 	INIT_WORK(&hdev->tx_work, hci_tx_work);
3222 	INIT_WORK(&hdev->power_on, hci_power_on);
3223 	INIT_WORK(&hdev->error_reset, hci_error_reset);
3224 
3225 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3226 	INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3227 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3228 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3229 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3230 
3231 	skb_queue_head_init(&hdev->rx_q);
3232 	skb_queue_head_init(&hdev->cmd_q);
3233 	skb_queue_head_init(&hdev->raw_q);
3234 
3235 	init_waitqueue_head(&hdev->req_wait_q);
3236 
3237 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3238 
3239 	hci_init_sysfs(hdev);
3240 	discovery_init(hdev);
3241 
3242 	return hdev;
3243 }
3244 EXPORT_SYMBOL(hci_alloc_dev);
3245 
3246 /* Free HCI device */
3247 void hci_free_dev(struct hci_dev *hdev)
3248 {
3249 	/* will free via device release */
3250 	put_device(&hdev->dev);
3251 }
3252 EXPORT_SYMBOL(hci_free_dev);
3253 
3254 /* Register HCI device */
3255 int hci_register_dev(struct hci_dev *hdev)
3256 {
3257 	int id, error;
3258 
3259 	if (!hdev->open || !hdev->close || !hdev->send)
3260 		return -EINVAL;
3261 
3262 	/* Do not allow HCI_AMP devices to register at index 0,
3263 	 * so the index can be used as the AMP controller ID.
3264 	 */
3265 	switch (hdev->dev_type) {
3266 	case HCI_BREDR:
3267 		id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3268 		break;
3269 	case HCI_AMP:
3270 		id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3271 		break;
3272 	default:
3273 		return -EINVAL;
3274 	}
3275 
3276 	if (id < 0)
3277 		return id;
3278 
3279 	sprintf(hdev->name, "hci%d", id);
3280 	hdev->id = id;
3281 
3282 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3283 
3284 	hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3285 					  WQ_MEM_RECLAIM, 1, hdev->name);
3286 	if (!hdev->workqueue) {
3287 		error = -ENOMEM;
3288 		goto err;
3289 	}
3290 
3291 	hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3292 					      WQ_MEM_RECLAIM, 1, hdev->name);
3293 	if (!hdev->req_workqueue) {
3294 		destroy_workqueue(hdev->workqueue);
3295 		error = -ENOMEM;
3296 		goto err;
3297 	}
3298 
3299 	if (!IS_ERR_OR_NULL(bt_debugfs))
3300 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3301 
3302 	dev_set_name(&hdev->dev, "%s", hdev->name);
3303 
3304 	error = device_add(&hdev->dev);
3305 	if (error < 0)
3306 		goto err_wqueue;
3307 
3308 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3309 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3310 				    hdev);
3311 	if (hdev->rfkill) {
3312 		if (rfkill_register(hdev->rfkill) < 0) {
3313 			rfkill_destroy(hdev->rfkill);
3314 			hdev->rfkill = NULL;
3315 		}
3316 	}
3317 
3318 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3319 		hci_dev_set_flag(hdev, HCI_RFKILLED);
3320 
3321 	hci_dev_set_flag(hdev, HCI_SETUP);
3322 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3323 
3324 	if (hdev->dev_type == HCI_BREDR) {
3325 		/* Assume BR/EDR support until proven otherwise (such as
3326 		 * through reading supported features during init.
3327 		 */
3328 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3329 	}
3330 
3331 	write_lock(&hci_dev_list_lock);
3332 	list_add(&hdev->list, &hci_dev_list);
3333 	write_unlock(&hci_dev_list_lock);
3334 
3335 	/* Devices that are marked for raw-only usage are unconfigured
3336 	 * and should not be included in normal operation.
3337 	 */
3338 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3339 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3340 
3341 	hci_notify(hdev, HCI_DEV_REG);
3342 	hci_dev_hold(hdev);
3343 
3344 	queue_work(hdev->req_workqueue, &hdev->power_on);
3345 
3346 	return id;
3347 
3348 err_wqueue:
3349 	destroy_workqueue(hdev->workqueue);
3350 	destroy_workqueue(hdev->req_workqueue);
3351 err:
3352 	ida_simple_remove(&hci_index_ida, hdev->id);
3353 
3354 	return error;
3355 }
3356 EXPORT_SYMBOL(hci_register_dev);
3357 
3358 /* Unregister HCI device */
3359 void hci_unregister_dev(struct hci_dev *hdev)
3360 {
3361 	int id;
3362 
3363 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3364 
3365 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
3366 
3367 	id = hdev->id;
3368 
3369 	write_lock(&hci_dev_list_lock);
3370 	list_del(&hdev->list);
3371 	write_unlock(&hci_dev_list_lock);
3372 
3373 	hci_dev_do_close(hdev);
3374 
3375 	cancel_work_sync(&hdev->power_on);
3376 
3377 	if (!test_bit(HCI_INIT, &hdev->flags) &&
3378 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
3379 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3380 		hci_dev_lock(hdev);
3381 		mgmt_index_removed(hdev);
3382 		hci_dev_unlock(hdev);
3383 	}
3384 
3385 	/* mgmt_index_removed should take care of emptying the
3386 	 * pending list */
3387 	BUG_ON(!list_empty(&hdev->mgmt_pending));
3388 
3389 	hci_notify(hdev, HCI_DEV_UNREG);
3390 
3391 	if (hdev->rfkill) {
3392 		rfkill_unregister(hdev->rfkill);
3393 		rfkill_destroy(hdev->rfkill);
3394 	}
3395 
3396 	device_del(&hdev->dev);
3397 
3398 	debugfs_remove_recursive(hdev->debugfs);
3399 
3400 	destroy_workqueue(hdev->workqueue);
3401 	destroy_workqueue(hdev->req_workqueue);
3402 
3403 	hci_dev_lock(hdev);
3404 	hci_bdaddr_list_clear(&hdev->blacklist);
3405 	hci_bdaddr_list_clear(&hdev->whitelist);
3406 	hci_uuids_clear(hdev);
3407 	hci_link_keys_clear(hdev);
3408 	hci_smp_ltks_clear(hdev);
3409 	hci_smp_irks_clear(hdev);
3410 	hci_remote_oob_data_clear(hdev);
3411 	hci_adv_instances_clear(hdev);
3412 	hci_bdaddr_list_clear(&hdev->le_white_list);
3413 	hci_conn_params_clear_all(hdev);
3414 	hci_discovery_filter_clear(hdev);
3415 	hci_dev_unlock(hdev);
3416 
3417 	hci_dev_put(hdev);
3418 
3419 	ida_simple_remove(&hci_index_ida, id);
3420 }
3421 EXPORT_SYMBOL(hci_unregister_dev);
3422 
3423 /* Suspend HCI device */
3424 int hci_suspend_dev(struct hci_dev *hdev)
3425 {
3426 	hci_notify(hdev, HCI_DEV_SUSPEND);
3427 	return 0;
3428 }
3429 EXPORT_SYMBOL(hci_suspend_dev);
3430 
3431 /* Resume HCI device */
3432 int hci_resume_dev(struct hci_dev *hdev)
3433 {
3434 	hci_notify(hdev, HCI_DEV_RESUME);
3435 	return 0;
3436 }
3437 EXPORT_SYMBOL(hci_resume_dev);
3438 
3439 /* Reset HCI device */
3440 int hci_reset_dev(struct hci_dev *hdev)
3441 {
3442 	const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3443 	struct sk_buff *skb;
3444 
3445 	skb = bt_skb_alloc(3, GFP_ATOMIC);
3446 	if (!skb)
3447 		return -ENOMEM;
3448 
3449 	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3450 	memcpy(skb_put(skb, 3), hw_err, 3);
3451 
3452 	/* Send Hardware Error to upper stack */
3453 	return hci_recv_frame(hdev, skb);
3454 }
3455 EXPORT_SYMBOL(hci_reset_dev);
3456 
3457 /* Receive frame from HCI drivers */
3458 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3459 {
3460 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3461 		      && !test_bit(HCI_INIT, &hdev->flags))) {
3462 		kfree_skb(skb);
3463 		return -ENXIO;
3464 	}
3465 
3466 	/* Incoming skb */
3467 	bt_cb(skb)->incoming = 1;
3468 
3469 	/* Time stamp */
3470 	__net_timestamp(skb);
3471 
3472 	skb_queue_tail(&hdev->rx_q, skb);
3473 	queue_work(hdev->workqueue, &hdev->rx_work);
3474 
3475 	return 0;
3476 }
3477 EXPORT_SYMBOL(hci_recv_frame);
3478 
3479 /* ---- Interface to upper protocols ---- */
3480 
3481 int hci_register_cb(struct hci_cb *cb)
3482 {
3483 	BT_DBG("%p name %s", cb, cb->name);
3484 
3485 	mutex_lock(&hci_cb_list_lock);
3486 	list_add_tail(&cb->list, &hci_cb_list);
3487 	mutex_unlock(&hci_cb_list_lock);
3488 
3489 	return 0;
3490 }
3491 EXPORT_SYMBOL(hci_register_cb);
3492 
3493 int hci_unregister_cb(struct hci_cb *cb)
3494 {
3495 	BT_DBG("%p name %s", cb, cb->name);
3496 
3497 	mutex_lock(&hci_cb_list_lock);
3498 	list_del(&cb->list);
3499 	mutex_unlock(&hci_cb_list_lock);
3500 
3501 	return 0;
3502 }
3503 EXPORT_SYMBOL(hci_unregister_cb);
3504 
3505 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3506 {
3507 	int err;
3508 
3509 	BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3510 
3511 	/* Time stamp */
3512 	__net_timestamp(skb);
3513 
3514 	/* Send copy to monitor */
3515 	hci_send_to_monitor(hdev, skb);
3516 
3517 	if (atomic_read(&hdev->promisc)) {
3518 		/* Send copy to the sockets */
3519 		hci_send_to_sock(hdev, skb);
3520 	}
3521 
3522 	/* Get rid of skb owner, prior to sending to the driver. */
3523 	skb_orphan(skb);
3524 
3525 	err = hdev->send(hdev, skb);
3526 	if (err < 0) {
3527 		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3528 		kfree_skb(skb);
3529 	}
3530 }
3531 
3532 /* Send HCI command */
3533 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3534 		 const void *param)
3535 {
3536 	struct sk_buff *skb;
3537 
3538 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3539 
3540 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3541 	if (!skb) {
3542 		BT_ERR("%s no memory for command", hdev->name);
3543 		return -ENOMEM;
3544 	}
3545 
3546 	/* Stand-alone HCI commands must be flagged as
3547 	 * single-command requests.
3548 	 */
3549 	bt_cb(skb)->req.start = true;
3550 
3551 	skb_queue_tail(&hdev->cmd_q, skb);
3552 	queue_work(hdev->workqueue, &hdev->cmd_work);
3553 
3554 	return 0;
3555 }
3556 
3557 /* Get data from the previously sent command */
3558 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3559 {
3560 	struct hci_command_hdr *hdr;
3561 
3562 	if (!hdev->sent_cmd)
3563 		return NULL;
3564 
3565 	hdr = (void *) hdev->sent_cmd->data;
3566 
3567 	if (hdr->opcode != cpu_to_le16(opcode))
3568 		return NULL;
3569 
3570 	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3571 
3572 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3573 }
3574 
3575 /* Send ACL data */
3576 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3577 {
3578 	struct hci_acl_hdr *hdr;
3579 	int len = skb->len;
3580 
3581 	skb_push(skb, HCI_ACL_HDR_SIZE);
3582 	skb_reset_transport_header(skb);
3583 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3584 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3585 	hdr->dlen   = cpu_to_le16(len);
3586 }
3587 
3588 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3589 			  struct sk_buff *skb, __u16 flags)
3590 {
3591 	struct hci_conn *conn = chan->conn;
3592 	struct hci_dev *hdev = conn->hdev;
3593 	struct sk_buff *list;
3594 
3595 	skb->len = skb_headlen(skb);
3596 	skb->data_len = 0;
3597 
3598 	bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3599 
3600 	switch (hdev->dev_type) {
3601 	case HCI_BREDR:
3602 		hci_add_acl_hdr(skb, conn->handle, flags);
3603 		break;
3604 	case HCI_AMP:
3605 		hci_add_acl_hdr(skb, chan->handle, flags);
3606 		break;
3607 	default:
3608 		BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3609 		return;
3610 	}
3611 
3612 	list = skb_shinfo(skb)->frag_list;
3613 	if (!list) {
3614 		/* Non fragmented */
3615 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3616 
3617 		skb_queue_tail(queue, skb);
3618 	} else {
3619 		/* Fragmented */
3620 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3621 
3622 		skb_shinfo(skb)->frag_list = NULL;
3623 
3624 		/* Queue all fragments atomically. We need to use spin_lock_bh
3625 		 * here because of 6LoWPAN links, as there this function is
3626 		 * called from softirq and using normal spin lock could cause
3627 		 * deadlocks.
3628 		 */
3629 		spin_lock_bh(&queue->lock);
3630 
3631 		__skb_queue_tail(queue, skb);
3632 
3633 		flags &= ~ACL_START;
3634 		flags |= ACL_CONT;
3635 		do {
3636 			skb = list; list = list->next;
3637 
3638 			bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3639 			hci_add_acl_hdr(skb, conn->handle, flags);
3640 
3641 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3642 
3643 			__skb_queue_tail(queue, skb);
3644 		} while (list);
3645 
3646 		spin_unlock_bh(&queue->lock);
3647 	}
3648 }
3649 
3650 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3651 {
3652 	struct hci_dev *hdev = chan->conn->hdev;
3653 
3654 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3655 
3656 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3657 
3658 	queue_work(hdev->workqueue, &hdev->tx_work);
3659 }
3660 
3661 /* Send SCO data */
3662 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3663 {
3664 	struct hci_dev *hdev = conn->hdev;
3665 	struct hci_sco_hdr hdr;
3666 
3667 	BT_DBG("%s len %d", hdev->name, skb->len);
3668 
3669 	hdr.handle = cpu_to_le16(conn->handle);
3670 	hdr.dlen   = skb->len;
3671 
3672 	skb_push(skb, HCI_SCO_HDR_SIZE);
3673 	skb_reset_transport_header(skb);
3674 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3675 
3676 	bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3677 
3678 	skb_queue_tail(&conn->data_q, skb);
3679 	queue_work(hdev->workqueue, &hdev->tx_work);
3680 }
3681 
3682 /* ---- HCI TX task (outgoing data) ---- */
3683 
3684 /* HCI Connection scheduler */
3685 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3686 				     int *quote)
3687 {
3688 	struct hci_conn_hash *h = &hdev->conn_hash;
3689 	struct hci_conn *conn = NULL, *c;
3690 	unsigned int num = 0, min = ~0;
3691 
3692 	/* We don't have to lock device here. Connections are always
3693 	 * added and removed with TX task disabled. */
3694 
3695 	rcu_read_lock();
3696 
3697 	list_for_each_entry_rcu(c, &h->list, list) {
3698 		if (c->type != type || skb_queue_empty(&c->data_q))
3699 			continue;
3700 
3701 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3702 			continue;
3703 
3704 		num++;
3705 
3706 		if (c->sent < min) {
3707 			min  = c->sent;
3708 			conn = c;
3709 		}
3710 
3711 		if (hci_conn_num(hdev, type) == num)
3712 			break;
3713 	}
3714 
3715 	rcu_read_unlock();
3716 
3717 	if (conn) {
3718 		int cnt, q;
3719 
3720 		switch (conn->type) {
3721 		case ACL_LINK:
3722 			cnt = hdev->acl_cnt;
3723 			break;
3724 		case SCO_LINK:
3725 		case ESCO_LINK:
3726 			cnt = hdev->sco_cnt;
3727 			break;
3728 		case LE_LINK:
3729 			cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3730 			break;
3731 		default:
3732 			cnt = 0;
3733 			BT_ERR("Unknown link type");
3734 		}
3735 
3736 		q = cnt / num;
3737 		*quote = q ? q : 1;
3738 	} else
3739 		*quote = 0;
3740 
3741 	BT_DBG("conn %p quote %d", conn, *quote);
3742 	return conn;
3743 }
3744 
3745 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3746 {
3747 	struct hci_conn_hash *h = &hdev->conn_hash;
3748 	struct hci_conn *c;
3749 
3750 	BT_ERR("%s link tx timeout", hdev->name);
3751 
3752 	rcu_read_lock();
3753 
3754 	/* Kill stalled connections */
3755 	list_for_each_entry_rcu(c, &h->list, list) {
3756 		if (c->type == type && c->sent) {
3757 			BT_ERR("%s killing stalled connection %pMR",
3758 			       hdev->name, &c->dst);
3759 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3760 		}
3761 	}
3762 
3763 	rcu_read_unlock();
3764 }
3765 
3766 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3767 				      int *quote)
3768 {
3769 	struct hci_conn_hash *h = &hdev->conn_hash;
3770 	struct hci_chan *chan = NULL;
3771 	unsigned int num = 0, min = ~0, cur_prio = 0;
3772 	struct hci_conn *conn;
3773 	int cnt, q, conn_num = 0;
3774 
3775 	BT_DBG("%s", hdev->name);
3776 
3777 	rcu_read_lock();
3778 
3779 	list_for_each_entry_rcu(conn, &h->list, list) {
3780 		struct hci_chan *tmp;
3781 
3782 		if (conn->type != type)
3783 			continue;
3784 
3785 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3786 			continue;
3787 
3788 		conn_num++;
3789 
3790 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3791 			struct sk_buff *skb;
3792 
3793 			if (skb_queue_empty(&tmp->data_q))
3794 				continue;
3795 
3796 			skb = skb_peek(&tmp->data_q);
3797 			if (skb->priority < cur_prio)
3798 				continue;
3799 
3800 			if (skb->priority > cur_prio) {
3801 				num = 0;
3802 				min = ~0;
3803 				cur_prio = skb->priority;
3804 			}
3805 
3806 			num++;
3807 
3808 			if (conn->sent < min) {
3809 				min  = conn->sent;
3810 				chan = tmp;
3811 			}
3812 		}
3813 
3814 		if (hci_conn_num(hdev, type) == conn_num)
3815 			break;
3816 	}
3817 
3818 	rcu_read_unlock();
3819 
3820 	if (!chan)
3821 		return NULL;
3822 
3823 	switch (chan->conn->type) {
3824 	case ACL_LINK:
3825 		cnt = hdev->acl_cnt;
3826 		break;
3827 	case AMP_LINK:
3828 		cnt = hdev->block_cnt;
3829 		break;
3830 	case SCO_LINK:
3831 	case ESCO_LINK:
3832 		cnt = hdev->sco_cnt;
3833 		break;
3834 	case LE_LINK:
3835 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3836 		break;
3837 	default:
3838 		cnt = 0;
3839 		BT_ERR("Unknown link type");
3840 	}
3841 
3842 	q = cnt / num;
3843 	*quote = q ? q : 1;
3844 	BT_DBG("chan %p quote %d", chan, *quote);
3845 	return chan;
3846 }
3847 
3848 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3849 {
3850 	struct hci_conn_hash *h = &hdev->conn_hash;
3851 	struct hci_conn *conn;
3852 	int num = 0;
3853 
3854 	BT_DBG("%s", hdev->name);
3855 
3856 	rcu_read_lock();
3857 
3858 	list_for_each_entry_rcu(conn, &h->list, list) {
3859 		struct hci_chan *chan;
3860 
3861 		if (conn->type != type)
3862 			continue;
3863 
3864 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3865 			continue;
3866 
3867 		num++;
3868 
3869 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3870 			struct sk_buff *skb;
3871 
3872 			if (chan->sent) {
3873 				chan->sent = 0;
3874 				continue;
3875 			}
3876 
3877 			if (skb_queue_empty(&chan->data_q))
3878 				continue;
3879 
3880 			skb = skb_peek(&chan->data_q);
3881 			if (skb->priority >= HCI_PRIO_MAX - 1)
3882 				continue;
3883 
3884 			skb->priority = HCI_PRIO_MAX - 1;
3885 
3886 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3887 			       skb->priority);
3888 		}
3889 
3890 		if (hci_conn_num(hdev, type) == num)
3891 			break;
3892 	}
3893 
3894 	rcu_read_unlock();
3895 
3896 }
3897 
3898 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3899 {
3900 	/* Calculate count of blocks used by this packet */
3901 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3902 }
3903 
3904 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3905 {
3906 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3907 		/* ACL tx timeout must be longer than maximum
3908 		 * link supervision timeout (40.9 seconds) */
3909 		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3910 				       HCI_ACL_TX_TIMEOUT))
3911 			hci_link_tx_to(hdev, ACL_LINK);
3912 	}
3913 }
3914 
3915 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3916 {
3917 	unsigned int cnt = hdev->acl_cnt;
3918 	struct hci_chan *chan;
3919 	struct sk_buff *skb;
3920 	int quote;
3921 
3922 	__check_timeout(hdev, cnt);
3923 
3924 	while (hdev->acl_cnt &&
3925 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3926 		u32 priority = (skb_peek(&chan->data_q))->priority;
3927 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3928 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3929 			       skb->len, skb->priority);
3930 
3931 			/* Stop if priority has changed */
3932 			if (skb->priority < priority)
3933 				break;
3934 
3935 			skb = skb_dequeue(&chan->data_q);
3936 
3937 			hci_conn_enter_active_mode(chan->conn,
3938 						   bt_cb(skb)->force_active);
3939 
3940 			hci_send_frame(hdev, skb);
3941 			hdev->acl_last_tx = jiffies;
3942 
3943 			hdev->acl_cnt--;
3944 			chan->sent++;
3945 			chan->conn->sent++;
3946 		}
3947 	}
3948 
3949 	if (cnt != hdev->acl_cnt)
3950 		hci_prio_recalculate(hdev, ACL_LINK);
3951 }
3952 
3953 static void hci_sched_acl_blk(struct hci_dev *hdev)
3954 {
3955 	unsigned int cnt = hdev->block_cnt;
3956 	struct hci_chan *chan;
3957 	struct sk_buff *skb;
3958 	int quote;
3959 	u8 type;
3960 
3961 	__check_timeout(hdev, cnt);
3962 
3963 	BT_DBG("%s", hdev->name);
3964 
3965 	if (hdev->dev_type == HCI_AMP)
3966 		type = AMP_LINK;
3967 	else
3968 		type = ACL_LINK;
3969 
3970 	while (hdev->block_cnt > 0 &&
3971 	       (chan = hci_chan_sent(hdev, type, &quote))) {
3972 		u32 priority = (skb_peek(&chan->data_q))->priority;
3973 		while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3974 			int blocks;
3975 
3976 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3977 			       skb->len, skb->priority);
3978 
3979 			/* Stop if priority has changed */
3980 			if (skb->priority < priority)
3981 				break;
3982 
3983 			skb = skb_dequeue(&chan->data_q);
3984 
3985 			blocks = __get_blocks(hdev, skb);
3986 			if (blocks > hdev->block_cnt)
3987 				return;
3988 
3989 			hci_conn_enter_active_mode(chan->conn,
3990 						   bt_cb(skb)->force_active);
3991 
3992 			hci_send_frame(hdev, skb);
3993 			hdev->acl_last_tx = jiffies;
3994 
3995 			hdev->block_cnt -= blocks;
3996 			quote -= blocks;
3997 
3998 			chan->sent += blocks;
3999 			chan->conn->sent += blocks;
4000 		}
4001 	}
4002 
4003 	if (cnt != hdev->block_cnt)
4004 		hci_prio_recalculate(hdev, type);
4005 }
4006 
4007 static void hci_sched_acl(struct hci_dev *hdev)
4008 {
4009 	BT_DBG("%s", hdev->name);
4010 
4011 	/* No ACL link over BR/EDR controller */
4012 	if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4013 		return;
4014 
4015 	/* No AMP link over AMP controller */
4016 	if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4017 		return;
4018 
4019 	switch (hdev->flow_ctl_mode) {
4020 	case HCI_FLOW_CTL_MODE_PACKET_BASED:
4021 		hci_sched_acl_pkt(hdev);
4022 		break;
4023 
4024 	case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4025 		hci_sched_acl_blk(hdev);
4026 		break;
4027 	}
4028 }
4029 
4030 /* Schedule SCO */
4031 static void hci_sched_sco(struct hci_dev *hdev)
4032 {
4033 	struct hci_conn *conn;
4034 	struct sk_buff *skb;
4035 	int quote;
4036 
4037 	BT_DBG("%s", hdev->name);
4038 
4039 	if (!hci_conn_num(hdev, SCO_LINK))
4040 		return;
4041 
4042 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4043 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4044 			BT_DBG("skb %p len %d", skb, skb->len);
4045 			hci_send_frame(hdev, skb);
4046 
4047 			conn->sent++;
4048 			if (conn->sent == ~0)
4049 				conn->sent = 0;
4050 		}
4051 	}
4052 }
4053 
4054 static void hci_sched_esco(struct hci_dev *hdev)
4055 {
4056 	struct hci_conn *conn;
4057 	struct sk_buff *skb;
4058 	int quote;
4059 
4060 	BT_DBG("%s", hdev->name);
4061 
4062 	if (!hci_conn_num(hdev, ESCO_LINK))
4063 		return;
4064 
4065 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4066 						     &quote))) {
4067 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4068 			BT_DBG("skb %p len %d", skb, skb->len);
4069 			hci_send_frame(hdev, skb);
4070 
4071 			conn->sent++;
4072 			if (conn->sent == ~0)
4073 				conn->sent = 0;
4074 		}
4075 	}
4076 }
4077 
4078 static void hci_sched_le(struct hci_dev *hdev)
4079 {
4080 	struct hci_chan *chan;
4081 	struct sk_buff *skb;
4082 	int quote, cnt, tmp;
4083 
4084 	BT_DBG("%s", hdev->name);
4085 
4086 	if (!hci_conn_num(hdev, LE_LINK))
4087 		return;
4088 
4089 	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4090 		/* LE tx timeout must be longer than maximum
4091 		 * link supervision timeout (40.9 seconds) */
4092 		if (!hdev->le_cnt && hdev->le_pkts &&
4093 		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
4094 			hci_link_tx_to(hdev, LE_LINK);
4095 	}
4096 
4097 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4098 	tmp = cnt;
4099 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4100 		u32 priority = (skb_peek(&chan->data_q))->priority;
4101 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
4102 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4103 			       skb->len, skb->priority);
4104 
4105 			/* Stop if priority has changed */
4106 			if (skb->priority < priority)
4107 				break;
4108 
4109 			skb = skb_dequeue(&chan->data_q);
4110 
4111 			hci_send_frame(hdev, skb);
4112 			hdev->le_last_tx = jiffies;
4113 
4114 			cnt--;
4115 			chan->sent++;
4116 			chan->conn->sent++;
4117 		}
4118 	}
4119 
4120 	if (hdev->le_pkts)
4121 		hdev->le_cnt = cnt;
4122 	else
4123 		hdev->acl_cnt = cnt;
4124 
4125 	if (cnt != tmp)
4126 		hci_prio_recalculate(hdev, LE_LINK);
4127 }
4128 
4129 static void hci_tx_work(struct work_struct *work)
4130 {
4131 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4132 	struct sk_buff *skb;
4133 
4134 	BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4135 	       hdev->sco_cnt, hdev->le_cnt);
4136 
4137 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4138 		/* Schedule queues and send stuff to HCI driver */
4139 		hci_sched_acl(hdev);
4140 		hci_sched_sco(hdev);
4141 		hci_sched_esco(hdev);
4142 		hci_sched_le(hdev);
4143 	}
4144 
4145 	/* Send next queued raw (unknown type) packet */
4146 	while ((skb = skb_dequeue(&hdev->raw_q)))
4147 		hci_send_frame(hdev, skb);
4148 }
4149 
4150 /* ----- HCI RX task (incoming data processing) ----- */
4151 
4152 /* ACL data packet */
4153 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4154 {
4155 	struct hci_acl_hdr *hdr = (void *) skb->data;
4156 	struct hci_conn *conn;
4157 	__u16 handle, flags;
4158 
4159 	skb_pull(skb, HCI_ACL_HDR_SIZE);
4160 
4161 	handle = __le16_to_cpu(hdr->handle);
4162 	flags  = hci_flags(handle);
4163 	handle = hci_handle(handle);
4164 
4165 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4166 	       handle, flags);
4167 
4168 	hdev->stat.acl_rx++;
4169 
4170 	hci_dev_lock(hdev);
4171 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4172 	hci_dev_unlock(hdev);
4173 
4174 	if (conn) {
4175 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4176 
4177 		/* Send to upper protocol */
4178 		l2cap_recv_acldata(conn, skb, flags);
4179 		return;
4180 	} else {
4181 		BT_ERR("%s ACL packet for unknown connection handle %d",
4182 		       hdev->name, handle);
4183 	}
4184 
4185 	kfree_skb(skb);
4186 }
4187 
4188 /* SCO data packet */
4189 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4190 {
4191 	struct hci_sco_hdr *hdr = (void *) skb->data;
4192 	struct hci_conn *conn;
4193 	__u16 handle;
4194 
4195 	skb_pull(skb, HCI_SCO_HDR_SIZE);
4196 
4197 	handle = __le16_to_cpu(hdr->handle);
4198 
4199 	BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4200 
4201 	hdev->stat.sco_rx++;
4202 
4203 	hci_dev_lock(hdev);
4204 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4205 	hci_dev_unlock(hdev);
4206 
4207 	if (conn) {
4208 		/* Send to upper protocol */
4209 		sco_recv_scodata(conn, skb);
4210 		return;
4211 	} else {
4212 		BT_ERR("%s SCO packet for unknown connection handle %d",
4213 		       hdev->name, handle);
4214 	}
4215 
4216 	kfree_skb(skb);
4217 }
4218 
4219 static bool hci_req_is_complete(struct hci_dev *hdev)
4220 {
4221 	struct sk_buff *skb;
4222 
4223 	skb = skb_peek(&hdev->cmd_q);
4224 	if (!skb)
4225 		return true;
4226 
4227 	return bt_cb(skb)->req.start;
4228 }
4229 
4230 static void hci_resend_last(struct hci_dev *hdev)
4231 {
4232 	struct hci_command_hdr *sent;
4233 	struct sk_buff *skb;
4234 	u16 opcode;
4235 
4236 	if (!hdev->sent_cmd)
4237 		return;
4238 
4239 	sent = (void *) hdev->sent_cmd->data;
4240 	opcode = __le16_to_cpu(sent->opcode);
4241 	if (opcode == HCI_OP_RESET)
4242 		return;
4243 
4244 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4245 	if (!skb)
4246 		return;
4247 
4248 	skb_queue_head(&hdev->cmd_q, skb);
4249 	queue_work(hdev->workqueue, &hdev->cmd_work);
4250 }
4251 
4252 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4253 			  hci_req_complete_t *req_complete,
4254 			  hci_req_complete_skb_t *req_complete_skb)
4255 {
4256 	struct sk_buff *skb;
4257 	unsigned long flags;
4258 
4259 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4260 
4261 	/* If the completed command doesn't match the last one that was
4262 	 * sent we need to do special handling of it.
4263 	 */
4264 	if (!hci_sent_cmd_data(hdev, opcode)) {
4265 		/* Some CSR based controllers generate a spontaneous
4266 		 * reset complete event during init and any pending
4267 		 * command will never be completed. In such a case we
4268 		 * need to resend whatever was the last sent
4269 		 * command.
4270 		 */
4271 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4272 			hci_resend_last(hdev);
4273 
4274 		return;
4275 	}
4276 
4277 	/* If the command succeeded and there's still more commands in
4278 	 * this request the request is not yet complete.
4279 	 */
4280 	if (!status && !hci_req_is_complete(hdev))
4281 		return;
4282 
4283 	/* If this was the last command in a request the complete
4284 	 * callback would be found in hdev->sent_cmd instead of the
4285 	 * command queue (hdev->cmd_q).
4286 	 */
4287 	if (bt_cb(hdev->sent_cmd)->req.complete) {
4288 		*req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4289 		return;
4290 	}
4291 
4292 	if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4293 		*req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4294 		return;
4295 	}
4296 
4297 	/* Remove all pending commands belonging to this request */
4298 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4299 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4300 		if (bt_cb(skb)->req.start) {
4301 			__skb_queue_head(&hdev->cmd_q, skb);
4302 			break;
4303 		}
4304 
4305 		*req_complete = bt_cb(skb)->req.complete;
4306 		*req_complete_skb = bt_cb(skb)->req.complete_skb;
4307 		kfree_skb(skb);
4308 	}
4309 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4310 }
4311 
4312 static void hci_rx_work(struct work_struct *work)
4313 {
4314 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4315 	struct sk_buff *skb;
4316 
4317 	BT_DBG("%s", hdev->name);
4318 
4319 	while ((skb = skb_dequeue(&hdev->rx_q))) {
4320 		/* Send copy to monitor */
4321 		hci_send_to_monitor(hdev, skb);
4322 
4323 		if (atomic_read(&hdev->promisc)) {
4324 			/* Send copy to the sockets */
4325 			hci_send_to_sock(hdev, skb);
4326 		}
4327 
4328 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4329 			kfree_skb(skb);
4330 			continue;
4331 		}
4332 
4333 		if (test_bit(HCI_INIT, &hdev->flags)) {
4334 			/* Don't process data packets in this states. */
4335 			switch (bt_cb(skb)->pkt_type) {
4336 			case HCI_ACLDATA_PKT:
4337 			case HCI_SCODATA_PKT:
4338 				kfree_skb(skb);
4339 				continue;
4340 			}
4341 		}
4342 
4343 		/* Process frame */
4344 		switch (bt_cb(skb)->pkt_type) {
4345 		case HCI_EVENT_PKT:
4346 			BT_DBG("%s Event packet", hdev->name);
4347 			hci_event_packet(hdev, skb);
4348 			break;
4349 
4350 		case HCI_ACLDATA_PKT:
4351 			BT_DBG("%s ACL data packet", hdev->name);
4352 			hci_acldata_packet(hdev, skb);
4353 			break;
4354 
4355 		case HCI_SCODATA_PKT:
4356 			BT_DBG("%s SCO data packet", hdev->name);
4357 			hci_scodata_packet(hdev, skb);
4358 			break;
4359 
4360 		default:
4361 			kfree_skb(skb);
4362 			break;
4363 		}
4364 	}
4365 }
4366 
4367 static void hci_cmd_work(struct work_struct *work)
4368 {
4369 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4370 	struct sk_buff *skb;
4371 
4372 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4373 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4374 
4375 	/* Send queued commands */
4376 	if (atomic_read(&hdev->cmd_cnt)) {
4377 		skb = skb_dequeue(&hdev->cmd_q);
4378 		if (!skb)
4379 			return;
4380 
4381 		kfree_skb(hdev->sent_cmd);
4382 
4383 		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4384 		if (hdev->sent_cmd) {
4385 			atomic_dec(&hdev->cmd_cnt);
4386 			hci_send_frame(hdev, skb);
4387 			if (test_bit(HCI_RESET, &hdev->flags))
4388 				cancel_delayed_work(&hdev->cmd_timer);
4389 			else
4390 				schedule_delayed_work(&hdev->cmd_timer,
4391 						      HCI_CMD_TIMEOUT);
4392 		} else {
4393 			skb_queue_head(&hdev->cmd_q, skb);
4394 			queue_work(hdev->workqueue, &hdev->cmd_work);
4395 		}
4396 	}
4397 }
4398