xref: /openbmc/linux/net/bluetooth/hci_request.c (revision f5029f62)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34 
35 #define HCI_REQ_DONE	  0
36 #define HCI_REQ_PEND	  1
37 #define HCI_REQ_CANCELED  2
38 
39 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40 {
41 	skb_queue_head_init(&req->cmd_q);
42 	req->hdev = hdev;
43 	req->err = 0;
44 }
45 
46 void hci_req_purge(struct hci_request *req)
47 {
48 	skb_queue_purge(&req->cmd_q);
49 }
50 
51 bool hci_req_status_pend(struct hci_dev *hdev)
52 {
53 	return hdev->req_status == HCI_REQ_PEND;
54 }
55 
56 static int req_run(struct hci_request *req, hci_req_complete_t complete,
57 		   hci_req_complete_skb_t complete_skb)
58 {
59 	struct hci_dev *hdev = req->hdev;
60 	struct sk_buff *skb;
61 	unsigned long flags;
62 
63 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
64 
65 	/* If an error occurred during request building, remove all HCI
66 	 * commands queued on the HCI request queue.
67 	 */
68 	if (req->err) {
69 		skb_queue_purge(&req->cmd_q);
70 		return req->err;
71 	}
72 
73 	/* Do not allow empty requests */
74 	if (skb_queue_empty(&req->cmd_q))
75 		return -ENODATA;
76 
77 	skb = skb_peek_tail(&req->cmd_q);
78 	if (complete) {
79 		bt_cb(skb)->hci.req_complete = complete;
80 	} else if (complete_skb) {
81 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
82 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 	}
84 
85 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
86 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
87 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88 
89 	queue_work(hdev->workqueue, &hdev->cmd_work);
90 
91 	return 0;
92 }
93 
94 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95 {
96 	return req_run(req, complete, NULL);
97 }
98 
99 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100 {
101 	return req_run(req, NULL, complete);
102 }
103 
104 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 				  struct sk_buff *skb)
106 {
107 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
108 
109 	if (hdev->req_status == HCI_REQ_PEND) {
110 		hdev->req_result = result;
111 		hdev->req_status = HCI_REQ_DONE;
112 		if (skb)
113 			hdev->req_skb = skb_get(skb);
114 		wake_up_interruptible(&hdev->req_wait_q);
115 	}
116 }
117 
118 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119 {
120 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
121 
122 	if (hdev->req_status == HCI_REQ_PEND) {
123 		hdev->req_result = err;
124 		hdev->req_status = HCI_REQ_CANCELED;
125 		wake_up_interruptible(&hdev->req_wait_q);
126 	}
127 }
128 
129 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
130 				  const void *param, u8 event, u32 timeout)
131 {
132 	struct hci_request req;
133 	struct sk_buff *skb;
134 	int err = 0;
135 
136 	bt_dev_dbg(hdev, "");
137 
138 	hci_req_init(&req, hdev);
139 
140 	hci_req_add_ev(&req, opcode, plen, param, event);
141 
142 	hdev->req_status = HCI_REQ_PEND;
143 
144 	err = hci_req_run_skb(&req, hci_req_sync_complete);
145 	if (err < 0)
146 		return ERR_PTR(err);
147 
148 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
149 			hdev->req_status != HCI_REQ_PEND, timeout);
150 
151 	if (err == -ERESTARTSYS)
152 		return ERR_PTR(-EINTR);
153 
154 	switch (hdev->req_status) {
155 	case HCI_REQ_DONE:
156 		err = -bt_to_errno(hdev->req_result);
157 		break;
158 
159 	case HCI_REQ_CANCELED:
160 		err = -hdev->req_result;
161 		break;
162 
163 	default:
164 		err = -ETIMEDOUT;
165 		break;
166 	}
167 
168 	hdev->req_status = hdev->req_result = 0;
169 	skb = hdev->req_skb;
170 	hdev->req_skb = NULL;
171 
172 	bt_dev_dbg(hdev, "end: err %d", err);
173 
174 	if (err < 0) {
175 		kfree_skb(skb);
176 		return ERR_PTR(err);
177 	}
178 
179 	if (!skb)
180 		return ERR_PTR(-ENODATA);
181 
182 	return skb;
183 }
184 EXPORT_SYMBOL(__hci_cmd_sync_ev);
185 
186 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
187 			       const void *param, u32 timeout)
188 {
189 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190 }
191 EXPORT_SYMBOL(__hci_cmd_sync);
192 
193 /* Execute request and wait for completion. */
194 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 						     unsigned long opt),
196 		   unsigned long opt, u32 timeout, u8 *hci_status)
197 {
198 	struct hci_request req;
199 	int err = 0;
200 
201 	bt_dev_dbg(hdev, "start");
202 
203 	hci_req_init(&req, hdev);
204 
205 	hdev->req_status = HCI_REQ_PEND;
206 
207 	err = func(&req, opt);
208 	if (err) {
209 		if (hci_status)
210 			*hci_status = HCI_ERROR_UNSPECIFIED;
211 		return err;
212 	}
213 
214 	err = hci_req_run_skb(&req, hci_req_sync_complete);
215 	if (err < 0) {
216 		hdev->req_status = 0;
217 
218 		/* ENODATA means the HCI request command queue is empty.
219 		 * This can happen when a request with conditionals doesn't
220 		 * trigger any commands to be sent. This is normal behavior
221 		 * and should not trigger an error return.
222 		 */
223 		if (err == -ENODATA) {
224 			if (hci_status)
225 				*hci_status = 0;
226 			return 0;
227 		}
228 
229 		if (hci_status)
230 			*hci_status = HCI_ERROR_UNSPECIFIED;
231 
232 		return err;
233 	}
234 
235 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
236 			hdev->req_status != HCI_REQ_PEND, timeout);
237 
238 	if (err == -ERESTARTSYS)
239 		return -EINTR;
240 
241 	switch (hdev->req_status) {
242 	case HCI_REQ_DONE:
243 		err = -bt_to_errno(hdev->req_result);
244 		if (hci_status)
245 			*hci_status = hdev->req_result;
246 		break;
247 
248 	case HCI_REQ_CANCELED:
249 		err = -hdev->req_result;
250 		if (hci_status)
251 			*hci_status = HCI_ERROR_UNSPECIFIED;
252 		break;
253 
254 	default:
255 		err = -ETIMEDOUT;
256 		if (hci_status)
257 			*hci_status = HCI_ERROR_UNSPECIFIED;
258 		break;
259 	}
260 
261 	kfree_skb(hdev->req_skb);
262 	hdev->req_skb = NULL;
263 	hdev->req_status = hdev->req_result = 0;
264 
265 	bt_dev_dbg(hdev, "end: err %d", err);
266 
267 	return err;
268 }
269 
270 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 						  unsigned long opt),
272 		 unsigned long opt, u32 timeout, u8 *hci_status)
273 {
274 	int ret;
275 
276 	/* Serialize all requests */
277 	hci_req_sync_lock(hdev);
278 	/* check the state after obtaing the lock to protect the HCI_UP
279 	 * against any races from hci_dev_do_close when the controller
280 	 * gets removed.
281 	 */
282 	if (test_bit(HCI_UP, &hdev->flags))
283 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 	else
285 		ret = -ENETDOWN;
286 	hci_req_sync_unlock(hdev);
287 
288 	return ret;
289 }
290 
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 				const void *param)
293 {
294 	int len = HCI_COMMAND_HDR_SIZE + plen;
295 	struct hci_command_hdr *hdr;
296 	struct sk_buff *skb;
297 
298 	skb = bt_skb_alloc(len, GFP_ATOMIC);
299 	if (!skb)
300 		return NULL;
301 
302 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
303 	hdr->opcode = cpu_to_le16(opcode);
304 	hdr->plen   = plen;
305 
306 	if (plen)
307 		skb_put_data(skb, param, plen);
308 
309 	bt_dev_dbg(hdev, "skb len %d", skb->len);
310 
311 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 	hci_skb_opcode(skb) = opcode;
313 
314 	return skb;
315 }
316 
317 /* Queue a command to an asynchronous HCI request */
318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 		    const void *param, u8 event)
320 {
321 	struct hci_dev *hdev = req->hdev;
322 	struct sk_buff *skb;
323 
324 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
325 
326 	/* If an error occurred during request building, there is no point in
327 	 * queueing the HCI command. We can simply return.
328 	 */
329 	if (req->err)
330 		return;
331 
332 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 	if (!skb) {
334 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
335 			   opcode);
336 		req->err = -ENOMEM;
337 		return;
338 	}
339 
340 	if (skb_queue_empty(&req->cmd_q))
341 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342 
343 	bt_cb(skb)->hci.req_event = event;
344 
345 	skb_queue_tail(&req->cmd_q, skb);
346 }
347 
348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 		 const void *param)
350 {
351 	hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353 
354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356 	struct hci_dev *hdev = req->hdev;
357 	struct hci_cp_write_page_scan_activity acp;
358 	u8 type;
359 
360 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 		return;
362 
363 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 		return;
365 
366 	if (enable) {
367 		type = PAGE_SCAN_TYPE_INTERLACED;
368 
369 		/* 160 msec page scan interval */
370 		acp.interval = cpu_to_le16(0x0100);
371 	} else {
372 		type = hdev->def_page_scan_type;
373 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
374 	}
375 
376 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
377 
378 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 			    sizeof(acp), &acp);
382 
383 	if (hdev->page_scan_type != type)
384 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 }
386 
387 static void start_interleave_scan(struct hci_dev *hdev)
388 {
389 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
390 	queue_delayed_work(hdev->req_workqueue,
391 			   &hdev->interleave_scan, 0);
392 }
393 
394 static bool is_interleave_scanning(struct hci_dev *hdev)
395 {
396 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
397 }
398 
399 static void cancel_interleave_scan(struct hci_dev *hdev)
400 {
401 	bt_dev_dbg(hdev, "cancelling interleave scan");
402 
403 	cancel_delayed_work_sync(&hdev->interleave_scan);
404 
405 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
406 }
407 
408 /* Return true if interleave_scan wasn't started until exiting this function,
409  * otherwise, return false
410  */
411 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
412 {
413 	/* Do interleaved scan only if all of the following are true:
414 	 * - There is at least one ADV monitor
415 	 * - At least one pending LE connection or one device to be scanned for
416 	 * - Monitor offloading is not supported
417 	 * If so, we should alternate between allowlist scan and one without
418 	 * any filters to save power.
419 	 */
420 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
421 				!(list_empty(&hdev->pend_le_conns) &&
422 				  list_empty(&hdev->pend_le_reports)) &&
423 				hci_get_adv_monitor_offload_ext(hdev) ==
424 				    HCI_ADV_MONITOR_EXT_NONE;
425 	bool is_interleaving = is_interleave_scanning(hdev);
426 
427 	if (use_interleaving && !is_interleaving) {
428 		start_interleave_scan(hdev);
429 		bt_dev_dbg(hdev, "starting interleave scan");
430 		return true;
431 	}
432 
433 	if (!use_interleaving && is_interleaving)
434 		cancel_interleave_scan(hdev);
435 
436 	return false;
437 }
438 
439 /* This function controls the background scanning based on hdev->pend_le_conns
440  * list. If there are pending LE connection we start the background scanning,
441  * otherwise we stop it.
442  *
443  * This function requires the caller holds hdev->lock.
444  */
445 static void __hci_update_background_scan(struct hci_request *req)
446 {
447 	struct hci_dev *hdev = req->hdev;
448 
449 	if (!test_bit(HCI_UP, &hdev->flags) ||
450 	    test_bit(HCI_INIT, &hdev->flags) ||
451 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
452 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
453 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
454 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
455 		return;
456 
457 	/* No point in doing scanning if LE support hasn't been enabled */
458 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
459 		return;
460 
461 	/* If discovery is active don't interfere with it */
462 	if (hdev->discovery.state != DISCOVERY_STOPPED)
463 		return;
464 
465 	/* Reset RSSI and UUID filters when starting background scanning
466 	 * since these filters are meant for service discovery only.
467 	 *
468 	 * The Start Discovery and Start Service Discovery operations
469 	 * ensure to set proper values for RSSI threshold and UUID
470 	 * filter list. So it is safe to just reset them here.
471 	 */
472 	hci_discovery_filter_clear(hdev);
473 
474 	bt_dev_dbg(hdev, "ADV monitoring is %s",
475 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
476 
477 	if (list_empty(&hdev->pend_le_conns) &&
478 	    list_empty(&hdev->pend_le_reports) &&
479 	    !hci_is_adv_monitoring(hdev)) {
480 		/* If there is no pending LE connections or devices
481 		 * to be scanned for or no ADV monitors, we should stop the
482 		 * background scanning.
483 		 */
484 
485 		/* If controller is not scanning we are done. */
486 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
487 			return;
488 
489 		hci_req_add_le_scan_disable(req, false);
490 
491 		bt_dev_dbg(hdev, "stopping background scanning");
492 	} else {
493 		/* If there is at least one pending LE connection, we should
494 		 * keep the background scan running.
495 		 */
496 
497 		/* If controller is connecting, we should not start scanning
498 		 * since some controllers are not able to scan and connect at
499 		 * the same time.
500 		 */
501 		if (hci_lookup_le_connect(hdev))
502 			return;
503 
504 		/* If controller is currently scanning, we stop it to ensure we
505 		 * don't miss any advertising (due to duplicates filter).
506 		 */
507 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
508 			hci_req_add_le_scan_disable(req, false);
509 
510 		hci_req_add_le_passive_scan(req);
511 		bt_dev_dbg(hdev, "starting background scanning");
512 	}
513 }
514 
515 void __hci_req_update_name(struct hci_request *req)
516 {
517 	struct hci_dev *hdev = req->hdev;
518 	struct hci_cp_write_local_name cp;
519 
520 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
521 
522 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
523 }
524 
525 void __hci_req_update_eir(struct hci_request *req)
526 {
527 	struct hci_dev *hdev = req->hdev;
528 	struct hci_cp_write_eir cp;
529 
530 	if (!hdev_is_powered(hdev))
531 		return;
532 
533 	if (!lmp_ext_inq_capable(hdev))
534 		return;
535 
536 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
537 		return;
538 
539 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
540 		return;
541 
542 	memset(&cp, 0, sizeof(cp));
543 
544 	eir_create(hdev, cp.data);
545 
546 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
547 		return;
548 
549 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
550 
551 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
552 }
553 
554 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
555 {
556 	struct hci_dev *hdev = req->hdev;
557 
558 	if (hdev->scanning_paused) {
559 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
560 		return;
561 	}
562 
563 	if (hdev->suspended)
564 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
565 
566 	if (use_ext_scan(hdev)) {
567 		struct hci_cp_le_set_ext_scan_enable cp;
568 
569 		memset(&cp, 0, sizeof(cp));
570 		cp.enable = LE_SCAN_DISABLE;
571 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
572 			    &cp);
573 	} else {
574 		struct hci_cp_le_set_scan_enable cp;
575 
576 		memset(&cp, 0, sizeof(cp));
577 		cp.enable = LE_SCAN_DISABLE;
578 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
579 	}
580 
581 	/* Disable address resolution */
582 	if (use_ll_privacy(hdev) &&
583 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
584 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
585 		__u8 enable = 0x00;
586 
587 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
588 	}
589 }
590 
591 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
592 				 u8 bdaddr_type)
593 {
594 	struct hci_cp_le_del_from_accept_list cp;
595 
596 	cp.bdaddr_type = bdaddr_type;
597 	bacpy(&cp.bdaddr, bdaddr);
598 
599 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
600 		   cp.bdaddr_type);
601 	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
602 
603 	if (use_ll_privacy(req->hdev) &&
604 	    hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
605 		struct smp_irk *irk;
606 
607 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
608 		if (irk) {
609 			struct hci_cp_le_del_from_resolv_list cp;
610 
611 			cp.bdaddr_type = bdaddr_type;
612 			bacpy(&cp.bdaddr, bdaddr);
613 
614 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
615 				    sizeof(cp), &cp);
616 		}
617 	}
618 }
619 
620 /* Adds connection to accept list if needed. On error, returns -1. */
621 static int add_to_accept_list(struct hci_request *req,
622 			      struct hci_conn_params *params, u8 *num_entries,
623 			      bool allow_rpa)
624 {
625 	struct hci_cp_le_add_to_accept_list cp;
626 	struct hci_dev *hdev = req->hdev;
627 
628 	/* Already in accept list */
629 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
630 				   params->addr_type))
631 		return 0;
632 
633 	/* Select filter policy to accept all advertising */
634 	if (*num_entries >= hdev->le_accept_list_size)
635 		return -1;
636 
637 	/* Accept list can not be used with RPAs */
638 	if (!allow_rpa &&
639 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
640 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
641 		return -1;
642 	}
643 
644 	/* During suspend, only wakeable devices can be in accept list */
645 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
646 						   params->current_flags))
647 		return 0;
648 
649 	*num_entries += 1;
650 	cp.bdaddr_type = params->addr_type;
651 	bacpy(&cp.bdaddr, &params->addr);
652 
653 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
654 		   cp.bdaddr_type);
655 	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
656 
657 	if (use_ll_privacy(hdev) &&
658 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
659 		struct smp_irk *irk;
660 
661 		irk = hci_find_irk_by_addr(hdev, &params->addr,
662 					   params->addr_type);
663 		if (irk) {
664 			struct hci_cp_le_add_to_resolv_list cp;
665 
666 			cp.bdaddr_type = params->addr_type;
667 			bacpy(&cp.bdaddr, &params->addr);
668 			memcpy(cp.peer_irk, irk->val, 16);
669 
670 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
671 				memcpy(cp.local_irk, hdev->irk, 16);
672 			else
673 				memset(cp.local_irk, 0, 16);
674 
675 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
676 				    sizeof(cp), &cp);
677 		}
678 	}
679 
680 	return 0;
681 }
682 
683 static u8 update_accept_list(struct hci_request *req)
684 {
685 	struct hci_dev *hdev = req->hdev;
686 	struct hci_conn_params *params;
687 	struct bdaddr_list *b;
688 	u8 num_entries = 0;
689 	bool pend_conn, pend_report;
690 	/* We allow usage of accept list even with RPAs in suspend. In the worst
691 	 * case, we won't be able to wake from devices that use the privacy1.2
692 	 * features. Additionally, once we support privacy1.2 and IRK
693 	 * offloading, we can update this to also check for those conditions.
694 	 */
695 	bool allow_rpa = hdev->suspended;
696 
697 	if (use_ll_privacy(hdev) &&
698 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
699 		allow_rpa = true;
700 
701 	/* Go through the current accept list programmed into the
702 	 * controller one by one and check if that address is still
703 	 * in the list of pending connections or list of devices to
704 	 * report. If not present in either list, then queue the
705 	 * command to remove it from the controller.
706 	 */
707 	list_for_each_entry(b, &hdev->le_accept_list, list) {
708 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
709 						      &b->bdaddr,
710 						      b->bdaddr_type);
711 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
712 							&b->bdaddr,
713 							b->bdaddr_type);
714 
715 		/* If the device is not likely to connect or report,
716 		 * remove it from the accept list.
717 		 */
718 		if (!pend_conn && !pend_report) {
719 			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
720 			continue;
721 		}
722 
723 		/* Accept list can not be used with RPAs */
724 		if (!allow_rpa &&
725 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
726 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
727 			return 0x00;
728 		}
729 
730 		num_entries++;
731 	}
732 
733 	/* Since all no longer valid accept list entries have been
734 	 * removed, walk through the list of pending connections
735 	 * and ensure that any new device gets programmed into
736 	 * the controller.
737 	 *
738 	 * If the list of the devices is larger than the list of
739 	 * available accept list entries in the controller, then
740 	 * just abort and return filer policy value to not use the
741 	 * accept list.
742 	 */
743 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
744 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
745 			return 0x00;
746 	}
747 
748 	/* After adding all new pending connections, walk through
749 	 * the list of pending reports and also add these to the
750 	 * accept list if there is still space. Abort if space runs out.
751 	 */
752 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
754 			return 0x00;
755 	}
756 
757 	/* Use the allowlist unless the following conditions are all true:
758 	 * - We are not currently suspending
759 	 * - There are 1 or more ADV monitors registered and it's not offloaded
760 	 * - Interleaved scanning is not currently using the allowlist
761 	 */
762 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
763 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
764 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
765 		return 0x00;
766 
767 	/* Select filter policy to use accept list */
768 	return 0x01;
769 }
770 
771 static bool scan_use_rpa(struct hci_dev *hdev)
772 {
773 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
774 }
775 
776 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
777 			       u16 window, u8 own_addr_type, u8 filter_policy,
778 			       bool filter_dup, bool addr_resolv)
779 {
780 	struct hci_dev *hdev = req->hdev;
781 
782 	if (hdev->scanning_paused) {
783 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
784 		return;
785 	}
786 
787 	if (use_ll_privacy(hdev) &&
788 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
789 	    addr_resolv) {
790 		u8 enable = 0x01;
791 
792 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
793 	}
794 
795 	/* Use ext scanning if set ext scan param and ext scan enable is
796 	 * supported
797 	 */
798 	if (use_ext_scan(hdev)) {
799 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
800 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
801 		struct hci_cp_le_scan_phy_params *phy_params;
802 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
803 		u32 plen;
804 
805 		ext_param_cp = (void *)data;
806 		phy_params = (void *)ext_param_cp->data;
807 
808 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
809 		ext_param_cp->own_addr_type = own_addr_type;
810 		ext_param_cp->filter_policy = filter_policy;
811 
812 		plen = sizeof(*ext_param_cp);
813 
814 		if (scan_1m(hdev) || scan_2m(hdev)) {
815 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
816 
817 			memset(phy_params, 0, sizeof(*phy_params));
818 			phy_params->type = type;
819 			phy_params->interval = cpu_to_le16(interval);
820 			phy_params->window = cpu_to_le16(window);
821 
822 			plen += sizeof(*phy_params);
823 			phy_params++;
824 		}
825 
826 		if (scan_coded(hdev)) {
827 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
828 
829 			memset(phy_params, 0, sizeof(*phy_params));
830 			phy_params->type = type;
831 			phy_params->interval = cpu_to_le16(interval);
832 			phy_params->window = cpu_to_le16(window);
833 
834 			plen += sizeof(*phy_params);
835 			phy_params++;
836 		}
837 
838 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
839 			    plen, ext_param_cp);
840 
841 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
842 		ext_enable_cp.enable = LE_SCAN_ENABLE;
843 		ext_enable_cp.filter_dup = filter_dup;
844 
845 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
846 			    sizeof(ext_enable_cp), &ext_enable_cp);
847 	} else {
848 		struct hci_cp_le_set_scan_param param_cp;
849 		struct hci_cp_le_set_scan_enable enable_cp;
850 
851 		memset(&param_cp, 0, sizeof(param_cp));
852 		param_cp.type = type;
853 		param_cp.interval = cpu_to_le16(interval);
854 		param_cp.window = cpu_to_le16(window);
855 		param_cp.own_address_type = own_addr_type;
856 		param_cp.filter_policy = filter_policy;
857 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
858 			    &param_cp);
859 
860 		memset(&enable_cp, 0, sizeof(enable_cp));
861 		enable_cp.enable = LE_SCAN_ENABLE;
862 		enable_cp.filter_dup = filter_dup;
863 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
864 			    &enable_cp);
865 	}
866 }
867 
868 /* Returns true if an le connection is in the scanning state */
869 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
870 {
871 	struct hci_conn_hash *h = &hdev->conn_hash;
872 	struct hci_conn  *c;
873 
874 	rcu_read_lock();
875 
876 	list_for_each_entry_rcu(c, &h->list, list) {
877 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
878 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
879 			rcu_read_unlock();
880 			return true;
881 		}
882 	}
883 
884 	rcu_read_unlock();
885 
886 	return false;
887 }
888 
889 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
890  * controller based address resolution to be able to reconfigure
891  * resolving list.
892  */
893 void hci_req_add_le_passive_scan(struct hci_request *req)
894 {
895 	struct hci_dev *hdev = req->hdev;
896 	u8 own_addr_type;
897 	u8 filter_policy;
898 	u16 window, interval;
899 	/* Default is to enable duplicates filter */
900 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
901 	/* Background scanning should run with address resolution */
902 	bool addr_resolv = true;
903 
904 	if (hdev->scanning_paused) {
905 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
906 		return;
907 	}
908 
909 	/* Set require_privacy to false since no SCAN_REQ are send
910 	 * during passive scanning. Not using an non-resolvable address
911 	 * here is important so that peer devices using direct
912 	 * advertising with our address will be correctly reported
913 	 * by the controller.
914 	 */
915 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
916 				      &own_addr_type))
917 		return;
918 
919 	if (hdev->enable_advmon_interleave_scan &&
920 	    __hci_update_interleaved_scan(hdev))
921 		return;
922 
923 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
924 	/* Adding or removing entries from the accept list must
925 	 * happen before enabling scanning. The controller does
926 	 * not allow accept list modification while scanning.
927 	 */
928 	filter_policy = update_accept_list(req);
929 
930 	/* When the controller is using random resolvable addresses and
931 	 * with that having LE privacy enabled, then controllers with
932 	 * Extended Scanner Filter Policies support can now enable support
933 	 * for handling directed advertising.
934 	 *
935 	 * So instead of using filter polices 0x00 (no accept list)
936 	 * and 0x01 (accept list enabled) use the new filter policies
937 	 * 0x02 (no accept list) and 0x03 (accept list enabled).
938 	 */
939 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
940 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
941 		filter_policy |= 0x02;
942 
943 	if (hdev->suspended) {
944 		window = hdev->le_scan_window_suspend;
945 		interval = hdev->le_scan_int_suspend;
946 
947 		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
948 	} else if (hci_is_le_conn_scanning(hdev)) {
949 		window = hdev->le_scan_window_connect;
950 		interval = hdev->le_scan_int_connect;
951 	} else if (hci_is_adv_monitoring(hdev)) {
952 		window = hdev->le_scan_window_adv_monitor;
953 		interval = hdev->le_scan_int_adv_monitor;
954 
955 		/* Disable duplicates filter when scanning for advertisement
956 		 * monitor for the following reasons.
957 		 *
958 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
959 		 * controllers ignore RSSI_Sampling_Period when the duplicates
960 		 * filter is enabled.
961 		 *
962 		 * For SW pattern filtering, when we're not doing interleaved
963 		 * scanning, it is necessary to disable duplicates filter,
964 		 * otherwise hosts can only receive one advertisement and it's
965 		 * impossible to know if a peer is still in range.
966 		 */
967 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
968 	} else {
969 		window = hdev->le_scan_window;
970 		interval = hdev->le_scan_interval;
971 	}
972 
973 	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
974 		   filter_policy);
975 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
976 			   own_addr_type, filter_policy, filter_dup,
977 			   addr_resolv);
978 }
979 
980 static void hci_req_clear_event_filter(struct hci_request *req)
981 {
982 	struct hci_cp_set_event_filter f;
983 
984 	if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
985 		return;
986 
987 	if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
988 		memset(&f, 0, sizeof(f));
989 		f.flt_type = HCI_FLT_CLEAR_ALL;
990 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
991 	}
992 }
993 
994 static void hci_req_set_event_filter(struct hci_request *req)
995 {
996 	struct bdaddr_list_with_flags *b;
997 	struct hci_cp_set_event_filter f;
998 	struct hci_dev *hdev = req->hdev;
999 	u8 scan = SCAN_DISABLED;
1000 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1001 
1002 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1003 		return;
1004 
1005 	/* Always clear event filter when starting */
1006 	hci_req_clear_event_filter(req);
1007 
1008 	list_for_each_entry(b, &hdev->accept_list, list) {
1009 		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1010 					b->current_flags))
1011 			continue;
1012 
1013 		memset(&f, 0, sizeof(f));
1014 		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1015 		f.flt_type = HCI_FLT_CONN_SETUP;
1016 		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1017 		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1018 
1019 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1020 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1021 		scan = SCAN_PAGE;
1022 	}
1023 
1024 	if (scan && !scanning) {
1025 		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1026 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1027 	} else if (!scan && scanning) {
1028 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1029 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1030 	}
1031 }
1032 
1033 static void cancel_adv_timeout(struct hci_dev *hdev)
1034 {
1035 	if (hdev->adv_instance_timeout) {
1036 		hdev->adv_instance_timeout = 0;
1037 		cancel_delayed_work(&hdev->adv_instance_expire);
1038 	}
1039 }
1040 
1041 /* This function requires the caller holds hdev->lock */
1042 void __hci_req_pause_adv_instances(struct hci_request *req)
1043 {
1044 	bt_dev_dbg(req->hdev, "Pausing advertising instances");
1045 
1046 	/* Call to disable any advertisements active on the controller.
1047 	 * This will succeed even if no advertisements are configured.
1048 	 */
1049 	__hci_req_disable_advertising(req);
1050 
1051 	/* If we are using software rotation, pause the loop */
1052 	if (!ext_adv_capable(req->hdev))
1053 		cancel_adv_timeout(req->hdev);
1054 }
1055 
1056 /* This function requires the caller holds hdev->lock */
1057 static void __hci_req_resume_adv_instances(struct hci_request *req)
1058 {
1059 	struct adv_info *adv;
1060 
1061 	bt_dev_dbg(req->hdev, "Resuming advertising instances");
1062 
1063 	if (ext_adv_capable(req->hdev)) {
1064 		/* Call for each tracked instance to be re-enabled */
1065 		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1066 			__hci_req_enable_ext_advertising(req,
1067 							 adv->instance);
1068 		}
1069 
1070 	} else {
1071 		/* Schedule for most recent instance to be restarted and begin
1072 		 * the software rotation loop
1073 		 */
1074 		__hci_req_schedule_adv_instance(req,
1075 						req->hdev->cur_adv_instance,
1076 						true);
1077 	}
1078 }
1079 
1080 /* This function requires the caller holds hdev->lock */
1081 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1082 {
1083 	struct hci_request req;
1084 
1085 	hci_req_init(&req, hdev);
1086 	__hci_req_resume_adv_instances(&req);
1087 
1088 	return hci_req_run(&req, NULL);
1089 }
1090 
1091 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1092 {
1093 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1094 		   status);
1095 	if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1096 	    test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1097 		clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1098 		clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1099 		wake_up(&hdev->suspend_wait_q);
1100 	}
1101 
1102 	if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1103 		clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1104 		wake_up(&hdev->suspend_wait_q);
1105 	}
1106 }
1107 
1108 static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
1109 						bool suspending)
1110 {
1111 	struct hci_dev *hdev = req->hdev;
1112 
1113 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1114 	case HCI_ADV_MONITOR_EXT_MSFT:
1115 		if (suspending)
1116 			msft_suspend(hdev);
1117 		else
1118 			msft_resume(hdev);
1119 		break;
1120 	default:
1121 		return;
1122 	}
1123 
1124 	/* No need to block when enabling since it's on resume path */
1125 	if (hdev->suspended && suspending)
1126 		set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1127 }
1128 
1129 /* Call with hci_dev_lock */
1130 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1131 {
1132 	int old_state;
1133 	struct hci_conn *conn;
1134 	struct hci_request req;
1135 	u8 page_scan;
1136 	int disconnect_counter;
1137 
1138 	if (next == hdev->suspend_state) {
1139 		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1140 		goto done;
1141 	}
1142 
1143 	hdev->suspend_state = next;
1144 	hci_req_init(&req, hdev);
1145 
1146 	if (next == BT_SUSPEND_DISCONNECT) {
1147 		/* Mark device as suspended */
1148 		hdev->suspended = true;
1149 
1150 		/* Pause discovery if not already stopped */
1151 		old_state = hdev->discovery.state;
1152 		if (old_state != DISCOVERY_STOPPED) {
1153 			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1154 			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1155 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1156 		}
1157 
1158 		hdev->discovery_paused = true;
1159 		hdev->discovery_old_state = old_state;
1160 
1161 		/* Stop directed advertising */
1162 		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1163 		if (old_state) {
1164 			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1165 			cancel_delayed_work(&hdev->discov_off);
1166 			queue_delayed_work(hdev->req_workqueue,
1167 					   &hdev->discov_off, 0);
1168 		}
1169 
1170 		/* Pause other advertisements */
1171 		if (hdev->adv_instance_cnt)
1172 			__hci_req_pause_adv_instances(&req);
1173 
1174 		hdev->advertising_paused = true;
1175 		hdev->advertising_old_state = old_state;
1176 
1177 		/* Disable page scan if enabled */
1178 		if (test_bit(HCI_PSCAN, &hdev->flags)) {
1179 			page_scan = SCAN_DISABLED;
1180 			hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1181 				    &page_scan);
1182 			set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1183 		}
1184 
1185 		/* Disable LE passive scan if enabled */
1186 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1187 			cancel_interleave_scan(hdev);
1188 			hci_req_add_le_scan_disable(&req, false);
1189 		}
1190 
1191 		/* Disable advertisement filters */
1192 		hci_req_prepare_adv_monitor_suspend(&req, true);
1193 
1194 		/* Prevent disconnects from causing scanning to be re-enabled */
1195 		hdev->scanning_paused = true;
1196 
1197 		/* Run commands before disconnecting */
1198 		hci_req_run(&req, suspend_req_complete);
1199 
1200 		disconnect_counter = 0;
1201 		/* Soft disconnect everything (power off) */
1202 		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1203 			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1204 			disconnect_counter++;
1205 		}
1206 
1207 		if (disconnect_counter > 0) {
1208 			bt_dev_dbg(hdev,
1209 				   "Had %d disconnects. Will wait on them",
1210 				   disconnect_counter);
1211 			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1212 		}
1213 	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1214 		/* Unpause to take care of updating scanning params */
1215 		hdev->scanning_paused = false;
1216 		/* Enable event filter for paired devices */
1217 		hci_req_set_event_filter(&req);
1218 		/* Enable passive scan at lower duty cycle */
1219 		__hci_update_background_scan(&req);
1220 		/* Pause scan changes again. */
1221 		hdev->scanning_paused = true;
1222 		hci_req_run(&req, suspend_req_complete);
1223 	} else {
1224 		hdev->suspended = false;
1225 		hdev->scanning_paused = false;
1226 
1227 		/* Clear any event filters and restore scan state */
1228 		hci_req_clear_event_filter(&req);
1229 		__hci_req_update_scan(&req);
1230 
1231 		/* Reset passive/background scanning to normal */
1232 		__hci_update_background_scan(&req);
1233 		/* Enable all of the advertisement filters */
1234 		hci_req_prepare_adv_monitor_suspend(&req, false);
1235 
1236 		/* Unpause directed advertising */
1237 		hdev->advertising_paused = false;
1238 		if (hdev->advertising_old_state) {
1239 			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1240 				hdev->suspend_tasks);
1241 			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1242 			queue_work(hdev->req_workqueue,
1243 				   &hdev->discoverable_update);
1244 			hdev->advertising_old_state = 0;
1245 		}
1246 
1247 		/* Resume other advertisements */
1248 		if (hdev->adv_instance_cnt)
1249 			__hci_req_resume_adv_instances(&req);
1250 
1251 		/* Unpause discovery */
1252 		hdev->discovery_paused = false;
1253 		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1254 		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1255 			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1256 			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1257 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1258 		}
1259 
1260 		hci_req_run(&req, suspend_req_complete);
1261 	}
1262 
1263 	hdev->suspend_state = next;
1264 
1265 done:
1266 	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1267 	wake_up(&hdev->suspend_wait_q);
1268 }
1269 
1270 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1271 {
1272 	return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1273 }
1274 
1275 void __hci_req_disable_advertising(struct hci_request *req)
1276 {
1277 	if (ext_adv_capable(req->hdev)) {
1278 		__hci_req_disable_ext_adv_instance(req, 0x00);
1279 
1280 	} else {
1281 		u8 enable = 0x00;
1282 
1283 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1284 	}
1285 }
1286 
1287 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1288 {
1289 	/* If privacy is not enabled don't use RPA */
1290 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1291 		return false;
1292 
1293 	/* If basic privacy mode is enabled use RPA */
1294 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1295 		return true;
1296 
1297 	/* If limited privacy mode is enabled don't use RPA if we're
1298 	 * both discoverable and bondable.
1299 	 */
1300 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1301 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1302 		return false;
1303 
1304 	/* We're neither bondable nor discoverable in the limited
1305 	 * privacy mode, therefore use RPA.
1306 	 */
1307 	return true;
1308 }
1309 
1310 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1311 {
1312 	/* If there is no connection we are OK to advertise. */
1313 	if (hci_conn_num(hdev, LE_LINK) == 0)
1314 		return true;
1315 
1316 	/* Check le_states if there is any connection in peripheral role. */
1317 	if (hdev->conn_hash.le_num_peripheral > 0) {
1318 		/* Peripheral connection state and non connectable mode bit 20.
1319 		 */
1320 		if (!connectable && !(hdev->le_states[2] & 0x10))
1321 			return false;
1322 
1323 		/* Peripheral connection state and connectable mode bit 38
1324 		 * and scannable bit 21.
1325 		 */
1326 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1327 				    !(hdev->le_states[2] & 0x20)))
1328 			return false;
1329 	}
1330 
1331 	/* Check le_states if there is any connection in central role. */
1332 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1333 		/* Central connection state and non connectable mode bit 18. */
1334 		if (!connectable && !(hdev->le_states[2] & 0x02))
1335 			return false;
1336 
1337 		/* Central connection state and connectable mode bit 35 and
1338 		 * scannable 19.
1339 		 */
1340 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1341 				    !(hdev->le_states[2] & 0x08)))
1342 			return false;
1343 	}
1344 
1345 	return true;
1346 }
1347 
1348 void __hci_req_enable_advertising(struct hci_request *req)
1349 {
1350 	struct hci_dev *hdev = req->hdev;
1351 	struct adv_info *adv;
1352 	struct hci_cp_le_set_adv_param cp;
1353 	u8 own_addr_type, enable = 0x01;
1354 	bool connectable;
1355 	u16 adv_min_interval, adv_max_interval;
1356 	u32 flags;
1357 
1358 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1359 	adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1360 
1361 	/* If the "connectable" instance flag was not set, then choose between
1362 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1363 	 */
1364 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1365 		      mgmt_get_connectable(hdev);
1366 
1367 	if (!is_advertising_allowed(hdev, connectable))
1368 		return;
1369 
1370 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1371 		__hci_req_disable_advertising(req);
1372 
1373 	/* Clear the HCI_LE_ADV bit temporarily so that the
1374 	 * hci_update_random_address knows that it's safe to go ahead
1375 	 * and write a new random address. The flag will be set back on
1376 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1377 	 */
1378 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1379 
1380 	/* Set require_privacy to true only when non-connectable
1381 	 * advertising is used. In that case it is fine to use a
1382 	 * non-resolvable private address.
1383 	 */
1384 	if (hci_update_random_address(req, !connectable,
1385 				      adv_use_rpa(hdev, flags),
1386 				      &own_addr_type) < 0)
1387 		return;
1388 
1389 	memset(&cp, 0, sizeof(cp));
1390 
1391 	if (adv) {
1392 		adv_min_interval = adv->min_interval;
1393 		adv_max_interval = adv->max_interval;
1394 	} else {
1395 		adv_min_interval = hdev->le_adv_min_interval;
1396 		adv_max_interval = hdev->le_adv_max_interval;
1397 	}
1398 
1399 	if (connectable) {
1400 		cp.type = LE_ADV_IND;
1401 	} else {
1402 		if (adv_cur_instance_is_scannable(hdev))
1403 			cp.type = LE_ADV_SCAN_IND;
1404 		else
1405 			cp.type = LE_ADV_NONCONN_IND;
1406 
1407 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1408 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1409 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1410 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1411 		}
1412 	}
1413 
1414 	cp.min_interval = cpu_to_le16(adv_min_interval);
1415 	cp.max_interval = cpu_to_le16(adv_max_interval);
1416 	cp.own_address_type = own_addr_type;
1417 	cp.channel_map = hdev->le_adv_channel_map;
1418 
1419 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1420 
1421 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1422 }
1423 
1424 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1425 {
1426 	struct hci_dev *hdev = req->hdev;
1427 	u8 len;
1428 
1429 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1430 		return;
1431 
1432 	if (ext_adv_capable(hdev)) {
1433 		struct {
1434 			struct hci_cp_le_set_ext_scan_rsp_data cp;
1435 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1436 		} pdu;
1437 
1438 		memset(&pdu, 0, sizeof(pdu));
1439 
1440 		len = eir_create_scan_rsp(hdev, instance, pdu.data);
1441 
1442 		if (hdev->scan_rsp_data_len == len &&
1443 		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
1444 			return;
1445 
1446 		memcpy(hdev->scan_rsp_data, pdu.data, len);
1447 		hdev->scan_rsp_data_len = len;
1448 
1449 		pdu.cp.handle = instance;
1450 		pdu.cp.length = len;
1451 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1452 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1453 
1454 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1455 			    sizeof(pdu.cp) + len, &pdu.cp);
1456 	} else {
1457 		struct hci_cp_le_set_scan_rsp_data cp;
1458 
1459 		memset(&cp, 0, sizeof(cp));
1460 
1461 		len = eir_create_scan_rsp(hdev, instance, cp.data);
1462 
1463 		if (hdev->scan_rsp_data_len == len &&
1464 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1465 			return;
1466 
1467 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1468 		hdev->scan_rsp_data_len = len;
1469 
1470 		cp.length = len;
1471 
1472 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1473 	}
1474 }
1475 
1476 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1477 {
1478 	struct hci_dev *hdev = req->hdev;
1479 	u8 len;
1480 
1481 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1482 		return;
1483 
1484 	if (ext_adv_capable(hdev)) {
1485 		struct {
1486 			struct hci_cp_le_set_ext_adv_data cp;
1487 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1488 		} pdu;
1489 
1490 		memset(&pdu, 0, sizeof(pdu));
1491 
1492 		len = eir_create_adv_data(hdev, instance, pdu.data);
1493 
1494 		/* There's nothing to do if the data hasn't changed */
1495 		if (hdev->adv_data_len == len &&
1496 		    memcmp(pdu.data, hdev->adv_data, len) == 0)
1497 			return;
1498 
1499 		memcpy(hdev->adv_data, pdu.data, len);
1500 		hdev->adv_data_len = len;
1501 
1502 		pdu.cp.length = len;
1503 		pdu.cp.handle = instance;
1504 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1505 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1506 
1507 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1508 			    sizeof(pdu.cp) + len, &pdu.cp);
1509 	} else {
1510 		struct hci_cp_le_set_adv_data cp;
1511 
1512 		memset(&cp, 0, sizeof(cp));
1513 
1514 		len = eir_create_adv_data(hdev, instance, cp.data);
1515 
1516 		/* There's nothing to do if the data hasn't changed */
1517 		if (hdev->adv_data_len == len &&
1518 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1519 			return;
1520 
1521 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1522 		hdev->adv_data_len = len;
1523 
1524 		cp.length = len;
1525 
1526 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1527 	}
1528 }
1529 
1530 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1531 {
1532 	struct hci_request req;
1533 
1534 	hci_req_init(&req, hdev);
1535 	__hci_req_update_adv_data(&req, instance);
1536 
1537 	return hci_req_run(&req, NULL);
1538 }
1539 
1540 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1541 					    u16 opcode)
1542 {
1543 	BT_DBG("%s status %u", hdev->name, status);
1544 }
1545 
1546 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1547 {
1548 	struct hci_request req;
1549 	__u8 enable = 0x00;
1550 
1551 	if (!use_ll_privacy(hdev) &&
1552 	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1553 		return;
1554 
1555 	hci_req_init(&req, hdev);
1556 
1557 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1558 
1559 	hci_req_run(&req, enable_addr_resolution_complete);
1560 }
1561 
1562 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1563 {
1564 	bt_dev_dbg(hdev, "status %u", status);
1565 }
1566 
1567 void hci_req_reenable_advertising(struct hci_dev *hdev)
1568 {
1569 	struct hci_request req;
1570 
1571 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1572 	    list_empty(&hdev->adv_instances))
1573 		return;
1574 
1575 	hci_req_init(&req, hdev);
1576 
1577 	if (hdev->cur_adv_instance) {
1578 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1579 						true);
1580 	} else {
1581 		if (ext_adv_capable(hdev)) {
1582 			__hci_req_start_ext_adv(&req, 0x00);
1583 		} else {
1584 			__hci_req_update_adv_data(&req, 0x00);
1585 			__hci_req_update_scan_rsp_data(&req, 0x00);
1586 			__hci_req_enable_advertising(&req);
1587 		}
1588 	}
1589 
1590 	hci_req_run(&req, adv_enable_complete);
1591 }
1592 
1593 static void adv_timeout_expire(struct work_struct *work)
1594 {
1595 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1596 					    adv_instance_expire.work);
1597 
1598 	struct hci_request req;
1599 	u8 instance;
1600 
1601 	bt_dev_dbg(hdev, "");
1602 
1603 	hci_dev_lock(hdev);
1604 
1605 	hdev->adv_instance_timeout = 0;
1606 
1607 	instance = hdev->cur_adv_instance;
1608 	if (instance == 0x00)
1609 		goto unlock;
1610 
1611 	hci_req_init(&req, hdev);
1612 
1613 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1614 
1615 	if (list_empty(&hdev->adv_instances))
1616 		__hci_req_disable_advertising(&req);
1617 
1618 	hci_req_run(&req, NULL);
1619 
1620 unlock:
1621 	hci_dev_unlock(hdev);
1622 }
1623 
1624 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1625 					   unsigned long opt)
1626 {
1627 	struct hci_dev *hdev = req->hdev;
1628 	int ret = 0;
1629 
1630 	hci_dev_lock(hdev);
1631 
1632 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1633 		hci_req_add_le_scan_disable(req, false);
1634 	hci_req_add_le_passive_scan(req);
1635 
1636 	switch (hdev->interleave_scan_state) {
1637 	case INTERLEAVE_SCAN_ALLOWLIST:
1638 		bt_dev_dbg(hdev, "next state: allowlist");
1639 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1640 		break;
1641 	case INTERLEAVE_SCAN_NO_FILTER:
1642 		bt_dev_dbg(hdev, "next state: no filter");
1643 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1644 		break;
1645 	case INTERLEAVE_SCAN_NONE:
1646 		BT_ERR("unexpected error");
1647 		ret = -1;
1648 	}
1649 
1650 	hci_dev_unlock(hdev);
1651 
1652 	return ret;
1653 }
1654 
1655 static void interleave_scan_work(struct work_struct *work)
1656 {
1657 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1658 					    interleave_scan.work);
1659 	u8 status;
1660 	unsigned long timeout;
1661 
1662 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1663 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1664 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1665 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1666 	} else {
1667 		bt_dev_err(hdev, "unexpected error");
1668 		return;
1669 	}
1670 
1671 	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1672 		     HCI_CMD_TIMEOUT, &status);
1673 
1674 	/* Don't continue interleaving if it was canceled */
1675 	if (is_interleave_scanning(hdev))
1676 		queue_delayed_work(hdev->req_workqueue,
1677 				   &hdev->interleave_scan, timeout);
1678 }
1679 
1680 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1681 			   bool use_rpa, struct adv_info *adv_instance,
1682 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1683 {
1684 	int err;
1685 
1686 	bacpy(rand_addr, BDADDR_ANY);
1687 
1688 	/* If privacy is enabled use a resolvable private address. If
1689 	 * current RPA has expired then generate a new one.
1690 	 */
1691 	if (use_rpa) {
1692 		/* If Controller supports LL Privacy use own address type is
1693 		 * 0x03
1694 		 */
1695 		if (use_ll_privacy(hdev) &&
1696 		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
1697 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1698 		else
1699 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1700 
1701 		if (adv_instance) {
1702 			if (adv_rpa_valid(adv_instance))
1703 				return 0;
1704 		} else {
1705 			if (rpa_valid(hdev))
1706 				return 0;
1707 		}
1708 
1709 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1710 		if (err < 0) {
1711 			bt_dev_err(hdev, "failed to generate new RPA");
1712 			return err;
1713 		}
1714 
1715 		bacpy(rand_addr, &hdev->rpa);
1716 
1717 		return 0;
1718 	}
1719 
1720 	/* In case of required privacy without resolvable private address,
1721 	 * use an non-resolvable private address. This is useful for
1722 	 * non-connectable advertising.
1723 	 */
1724 	if (require_privacy) {
1725 		bdaddr_t nrpa;
1726 
1727 		while (true) {
1728 			/* The non-resolvable private address is generated
1729 			 * from random six bytes with the two most significant
1730 			 * bits cleared.
1731 			 */
1732 			get_random_bytes(&nrpa, 6);
1733 			nrpa.b[5] &= 0x3f;
1734 
1735 			/* The non-resolvable private address shall not be
1736 			 * equal to the public address.
1737 			 */
1738 			if (bacmp(&hdev->bdaddr, &nrpa))
1739 				break;
1740 		}
1741 
1742 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1743 		bacpy(rand_addr, &nrpa);
1744 
1745 		return 0;
1746 	}
1747 
1748 	/* No privacy so use a public address. */
1749 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1750 
1751 	return 0;
1752 }
1753 
1754 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1755 {
1756 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1757 }
1758 
1759 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1760 {
1761 	struct hci_dev *hdev = req->hdev;
1762 
1763 	/* If we're advertising or initiating an LE connection we can't
1764 	 * go ahead and change the random address at this time. This is
1765 	 * because the eventual initiator address used for the
1766 	 * subsequently created connection will be undefined (some
1767 	 * controllers use the new address and others the one we had
1768 	 * when the operation started).
1769 	 *
1770 	 * In this kind of scenario skip the update and let the random
1771 	 * address be updated at the next cycle.
1772 	 */
1773 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1774 	    hci_lookup_le_connect(hdev)) {
1775 		bt_dev_dbg(hdev, "Deferring random address update");
1776 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1777 		return;
1778 	}
1779 
1780 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1781 }
1782 
1783 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1784 {
1785 	struct hci_cp_le_set_ext_adv_params cp;
1786 	struct hci_dev *hdev = req->hdev;
1787 	bool connectable;
1788 	u32 flags;
1789 	bdaddr_t random_addr;
1790 	u8 own_addr_type;
1791 	int err;
1792 	struct adv_info *adv_instance;
1793 	bool secondary_adv;
1794 
1795 	if (instance > 0) {
1796 		adv_instance = hci_find_adv_instance(hdev, instance);
1797 		if (!adv_instance)
1798 			return -EINVAL;
1799 	} else {
1800 		adv_instance = NULL;
1801 	}
1802 
1803 	flags = hci_adv_instance_flags(hdev, instance);
1804 
1805 	/* If the "connectable" instance flag was not set, then choose between
1806 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1807 	 */
1808 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1809 		      mgmt_get_connectable(hdev);
1810 
1811 	if (!is_advertising_allowed(hdev, connectable))
1812 		return -EPERM;
1813 
1814 	/* Set require_privacy to true only when non-connectable
1815 	 * advertising is used. In that case it is fine to use a
1816 	 * non-resolvable private address.
1817 	 */
1818 	err = hci_get_random_address(hdev, !connectable,
1819 				     adv_use_rpa(hdev, flags), adv_instance,
1820 				     &own_addr_type, &random_addr);
1821 	if (err < 0)
1822 		return err;
1823 
1824 	memset(&cp, 0, sizeof(cp));
1825 
1826 	if (adv_instance) {
1827 		hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1828 		hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1829 		cp.tx_power = adv_instance->tx_power;
1830 	} else {
1831 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1832 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1833 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1834 	}
1835 
1836 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1837 
1838 	if (connectable) {
1839 		if (secondary_adv)
1840 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1841 		else
1842 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1843 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1844 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1845 		if (secondary_adv)
1846 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1847 		else
1848 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1849 	} else {
1850 		if (secondary_adv)
1851 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1852 		else
1853 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1854 	}
1855 
1856 	cp.own_addr_type = own_addr_type;
1857 	cp.channel_map = hdev->le_adv_channel_map;
1858 	cp.handle = instance;
1859 
1860 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1861 		cp.primary_phy = HCI_ADV_PHY_1M;
1862 		cp.secondary_phy = HCI_ADV_PHY_2M;
1863 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1864 		cp.primary_phy = HCI_ADV_PHY_CODED;
1865 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1866 	} else {
1867 		/* In all other cases use 1M */
1868 		cp.primary_phy = HCI_ADV_PHY_1M;
1869 		cp.secondary_phy = HCI_ADV_PHY_1M;
1870 	}
1871 
1872 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1873 
1874 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1875 	    bacmp(&random_addr, BDADDR_ANY)) {
1876 		struct hci_cp_le_set_adv_set_rand_addr cp;
1877 
1878 		/* Check if random address need to be updated */
1879 		if (adv_instance) {
1880 			if (!bacmp(&random_addr, &adv_instance->random_addr))
1881 				return 0;
1882 		} else {
1883 			if (!bacmp(&random_addr, &hdev->random_addr))
1884 				return 0;
1885 			/* Instance 0x00 doesn't have an adv_info, instead it
1886 			 * uses hdev->random_addr to track its address so
1887 			 * whenever it needs to be updated this also set the
1888 			 * random address since hdev->random_addr is shared with
1889 			 * scan state machine.
1890 			 */
1891 			set_random_addr(req, &random_addr);
1892 		}
1893 
1894 		memset(&cp, 0, sizeof(cp));
1895 
1896 		cp.handle = instance;
1897 		bacpy(&cp.bdaddr, &random_addr);
1898 
1899 		hci_req_add(req,
1900 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1901 			    sizeof(cp), &cp);
1902 	}
1903 
1904 	return 0;
1905 }
1906 
1907 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1908 {
1909 	struct hci_dev *hdev = req->hdev;
1910 	struct hci_cp_le_set_ext_adv_enable *cp;
1911 	struct hci_cp_ext_adv_set *adv_set;
1912 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1913 	struct adv_info *adv_instance;
1914 
1915 	if (instance > 0) {
1916 		adv_instance = hci_find_adv_instance(hdev, instance);
1917 		if (!adv_instance)
1918 			return -EINVAL;
1919 	} else {
1920 		adv_instance = NULL;
1921 	}
1922 
1923 	cp = (void *) data;
1924 	adv_set = (void *) cp->data;
1925 
1926 	memset(cp, 0, sizeof(*cp));
1927 
1928 	cp->enable = 0x01;
1929 	cp->num_of_sets = 0x01;
1930 
1931 	memset(adv_set, 0, sizeof(*adv_set));
1932 
1933 	adv_set->handle = instance;
1934 
1935 	/* Set duration per instance since controller is responsible for
1936 	 * scheduling it.
1937 	 */
1938 	if (adv_instance && adv_instance->duration) {
1939 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1940 
1941 		/* Time = N * 10 ms */
1942 		adv_set->duration = cpu_to_le16(duration / 10);
1943 	}
1944 
1945 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1946 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1947 		    data);
1948 
1949 	return 0;
1950 }
1951 
1952 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1953 {
1954 	struct hci_dev *hdev = req->hdev;
1955 	struct hci_cp_le_set_ext_adv_enable *cp;
1956 	struct hci_cp_ext_adv_set *adv_set;
1957 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1958 	u8 req_size;
1959 
1960 	/* If request specifies an instance that doesn't exist, fail */
1961 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1962 		return -EINVAL;
1963 
1964 	memset(data, 0, sizeof(data));
1965 
1966 	cp = (void *)data;
1967 	adv_set = (void *)cp->data;
1968 
1969 	/* Instance 0x00 indicates all advertising instances will be disabled */
1970 	cp->num_of_sets = !!instance;
1971 	cp->enable = 0x00;
1972 
1973 	adv_set->handle = instance;
1974 
1975 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1976 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1977 
1978 	return 0;
1979 }
1980 
1981 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1982 {
1983 	struct hci_dev *hdev = req->hdev;
1984 
1985 	/* If request specifies an instance that doesn't exist, fail */
1986 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1987 		return -EINVAL;
1988 
1989 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1990 
1991 	return 0;
1992 }
1993 
1994 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1995 {
1996 	struct hci_dev *hdev = req->hdev;
1997 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1998 	int err;
1999 
2000 	/* If instance isn't pending, the chip knows about it, and it's safe to
2001 	 * disable
2002 	 */
2003 	if (adv_instance && !adv_instance->pending)
2004 		__hci_req_disable_ext_adv_instance(req, instance);
2005 
2006 	err = __hci_req_setup_ext_adv_instance(req, instance);
2007 	if (err < 0)
2008 		return err;
2009 
2010 	__hci_req_update_scan_rsp_data(req, instance);
2011 	__hci_req_enable_ext_advertising(req, instance);
2012 
2013 	return 0;
2014 }
2015 
2016 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2017 				    bool force)
2018 {
2019 	struct hci_dev *hdev = req->hdev;
2020 	struct adv_info *adv_instance = NULL;
2021 	u16 timeout;
2022 
2023 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2024 	    list_empty(&hdev->adv_instances))
2025 		return -EPERM;
2026 
2027 	if (hdev->adv_instance_timeout)
2028 		return -EBUSY;
2029 
2030 	adv_instance = hci_find_adv_instance(hdev, instance);
2031 	if (!adv_instance)
2032 		return -ENOENT;
2033 
2034 	/* A zero timeout means unlimited advertising. As long as there is
2035 	 * only one instance, duration should be ignored. We still set a timeout
2036 	 * in case further instances are being added later on.
2037 	 *
2038 	 * If the remaining lifetime of the instance is more than the duration
2039 	 * then the timeout corresponds to the duration, otherwise it will be
2040 	 * reduced to the remaining instance lifetime.
2041 	 */
2042 	if (adv_instance->timeout == 0 ||
2043 	    adv_instance->duration <= adv_instance->remaining_time)
2044 		timeout = adv_instance->duration;
2045 	else
2046 		timeout = adv_instance->remaining_time;
2047 
2048 	/* The remaining time is being reduced unless the instance is being
2049 	 * advertised without time limit.
2050 	 */
2051 	if (adv_instance->timeout)
2052 		adv_instance->remaining_time =
2053 				adv_instance->remaining_time - timeout;
2054 
2055 	/* Only use work for scheduling instances with legacy advertising */
2056 	if (!ext_adv_capable(hdev)) {
2057 		hdev->adv_instance_timeout = timeout;
2058 		queue_delayed_work(hdev->req_workqueue,
2059 			   &hdev->adv_instance_expire,
2060 			   msecs_to_jiffies(timeout * 1000));
2061 	}
2062 
2063 	/* If we're just re-scheduling the same instance again then do not
2064 	 * execute any HCI commands. This happens when a single instance is
2065 	 * being advertised.
2066 	 */
2067 	if (!force && hdev->cur_adv_instance == instance &&
2068 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2069 		return 0;
2070 
2071 	hdev->cur_adv_instance = instance;
2072 	if (ext_adv_capable(hdev)) {
2073 		__hci_req_start_ext_adv(req, instance);
2074 	} else {
2075 		__hci_req_update_adv_data(req, instance);
2076 		__hci_req_update_scan_rsp_data(req, instance);
2077 		__hci_req_enable_advertising(req);
2078 	}
2079 
2080 	return 0;
2081 }
2082 
2083 /* For a single instance:
2084  * - force == true: The instance will be removed even when its remaining
2085  *   lifetime is not zero.
2086  * - force == false: the instance will be deactivated but kept stored unless
2087  *   the remaining lifetime is zero.
2088  *
2089  * For instance == 0x00:
2090  * - force == true: All instances will be removed regardless of their timeout
2091  *   setting.
2092  * - force == false: Only instances that have a timeout will be removed.
2093  */
2094 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2095 				struct hci_request *req, u8 instance,
2096 				bool force)
2097 {
2098 	struct adv_info *adv_instance, *n, *next_instance = NULL;
2099 	int err;
2100 	u8 rem_inst;
2101 
2102 	/* Cancel any timeout concerning the removed instance(s). */
2103 	if (!instance || hdev->cur_adv_instance == instance)
2104 		cancel_adv_timeout(hdev);
2105 
2106 	/* Get the next instance to advertise BEFORE we remove
2107 	 * the current one. This can be the same instance again
2108 	 * if there is only one instance.
2109 	 */
2110 	if (instance && hdev->cur_adv_instance == instance)
2111 		next_instance = hci_get_next_instance(hdev, instance);
2112 
2113 	if (instance == 0x00) {
2114 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2115 					 list) {
2116 			if (!(force || adv_instance->timeout))
2117 				continue;
2118 
2119 			rem_inst = adv_instance->instance;
2120 			err = hci_remove_adv_instance(hdev, rem_inst);
2121 			if (!err)
2122 				mgmt_advertising_removed(sk, hdev, rem_inst);
2123 		}
2124 	} else {
2125 		adv_instance = hci_find_adv_instance(hdev, instance);
2126 
2127 		if (force || (adv_instance && adv_instance->timeout &&
2128 			      !adv_instance->remaining_time)) {
2129 			/* Don't advertise a removed instance. */
2130 			if (next_instance &&
2131 			    next_instance->instance == instance)
2132 				next_instance = NULL;
2133 
2134 			err = hci_remove_adv_instance(hdev, instance);
2135 			if (!err)
2136 				mgmt_advertising_removed(sk, hdev, instance);
2137 		}
2138 	}
2139 
2140 	if (!req || !hdev_is_powered(hdev) ||
2141 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2142 		return;
2143 
2144 	if (next_instance && !ext_adv_capable(hdev))
2145 		__hci_req_schedule_adv_instance(req, next_instance->instance,
2146 						false);
2147 }
2148 
2149 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2150 			      bool use_rpa, u8 *own_addr_type)
2151 {
2152 	struct hci_dev *hdev = req->hdev;
2153 	int err;
2154 
2155 	/* If privacy is enabled use a resolvable private address. If
2156 	 * current RPA has expired or there is something else than
2157 	 * the current RPA in use, then generate a new one.
2158 	 */
2159 	if (use_rpa) {
2160 		/* If Controller supports LL Privacy use own address type is
2161 		 * 0x03
2162 		 */
2163 		if (use_ll_privacy(hdev) &&
2164 		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2165 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2166 		else
2167 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2168 
2169 		if (rpa_valid(hdev))
2170 			return 0;
2171 
2172 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2173 		if (err < 0) {
2174 			bt_dev_err(hdev, "failed to generate new RPA");
2175 			return err;
2176 		}
2177 
2178 		set_random_addr(req, &hdev->rpa);
2179 
2180 		return 0;
2181 	}
2182 
2183 	/* In case of required privacy without resolvable private address,
2184 	 * use an non-resolvable private address. This is useful for active
2185 	 * scanning and non-connectable advertising.
2186 	 */
2187 	if (require_privacy) {
2188 		bdaddr_t nrpa;
2189 
2190 		while (true) {
2191 			/* The non-resolvable private address is generated
2192 			 * from random six bytes with the two most significant
2193 			 * bits cleared.
2194 			 */
2195 			get_random_bytes(&nrpa, 6);
2196 			nrpa.b[5] &= 0x3f;
2197 
2198 			/* The non-resolvable private address shall not be
2199 			 * equal to the public address.
2200 			 */
2201 			if (bacmp(&hdev->bdaddr, &nrpa))
2202 				break;
2203 		}
2204 
2205 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2206 		set_random_addr(req, &nrpa);
2207 		return 0;
2208 	}
2209 
2210 	/* If forcing static address is in use or there is no public
2211 	 * address use the static address as random address (but skip
2212 	 * the HCI command if the current random address is already the
2213 	 * static one.
2214 	 *
2215 	 * In case BR/EDR has been disabled on a dual-mode controller
2216 	 * and a static address has been configured, then use that
2217 	 * address instead of the public BR/EDR address.
2218 	 */
2219 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2220 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2221 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2222 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2223 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2224 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2225 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2226 				    &hdev->static_addr);
2227 		return 0;
2228 	}
2229 
2230 	/* Neither privacy nor static address is being used so use a
2231 	 * public address.
2232 	 */
2233 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2234 
2235 	return 0;
2236 }
2237 
2238 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2239 {
2240 	struct bdaddr_list *b;
2241 
2242 	list_for_each_entry(b, &hdev->accept_list, list) {
2243 		struct hci_conn *conn;
2244 
2245 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2246 		if (!conn)
2247 			return true;
2248 
2249 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2250 			return true;
2251 	}
2252 
2253 	return false;
2254 }
2255 
2256 void __hci_req_update_scan(struct hci_request *req)
2257 {
2258 	struct hci_dev *hdev = req->hdev;
2259 	u8 scan;
2260 
2261 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2262 		return;
2263 
2264 	if (!hdev_is_powered(hdev))
2265 		return;
2266 
2267 	if (mgmt_powering_down(hdev))
2268 		return;
2269 
2270 	if (hdev->scanning_paused)
2271 		return;
2272 
2273 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2274 	    disconnected_accept_list_entries(hdev))
2275 		scan = SCAN_PAGE;
2276 	else
2277 		scan = SCAN_DISABLED;
2278 
2279 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2280 		scan |= SCAN_INQUIRY;
2281 
2282 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2283 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2284 		return;
2285 
2286 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2287 }
2288 
2289 static int update_scan(struct hci_request *req, unsigned long opt)
2290 {
2291 	hci_dev_lock(req->hdev);
2292 	__hci_req_update_scan(req);
2293 	hci_dev_unlock(req->hdev);
2294 	return 0;
2295 }
2296 
2297 static void scan_update_work(struct work_struct *work)
2298 {
2299 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2300 
2301 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2302 }
2303 
2304 static int connectable_update(struct hci_request *req, unsigned long opt)
2305 {
2306 	struct hci_dev *hdev = req->hdev;
2307 
2308 	hci_dev_lock(hdev);
2309 
2310 	__hci_req_update_scan(req);
2311 
2312 	/* If BR/EDR is not enabled and we disable advertising as a
2313 	 * by-product of disabling connectable, we need to update the
2314 	 * advertising flags.
2315 	 */
2316 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2317 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2318 
2319 	/* Update the advertising parameters if necessary */
2320 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2321 	    !list_empty(&hdev->adv_instances)) {
2322 		if (ext_adv_capable(hdev))
2323 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2324 		else
2325 			__hci_req_enable_advertising(req);
2326 	}
2327 
2328 	__hci_update_background_scan(req);
2329 
2330 	hci_dev_unlock(hdev);
2331 
2332 	return 0;
2333 }
2334 
2335 static void connectable_update_work(struct work_struct *work)
2336 {
2337 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2338 					    connectable_update);
2339 	u8 status;
2340 
2341 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2342 	mgmt_set_connectable_complete(hdev, status);
2343 }
2344 
2345 static u8 get_service_classes(struct hci_dev *hdev)
2346 {
2347 	struct bt_uuid *uuid;
2348 	u8 val = 0;
2349 
2350 	list_for_each_entry(uuid, &hdev->uuids, list)
2351 		val |= uuid->svc_hint;
2352 
2353 	return val;
2354 }
2355 
2356 void __hci_req_update_class(struct hci_request *req)
2357 {
2358 	struct hci_dev *hdev = req->hdev;
2359 	u8 cod[3];
2360 
2361 	bt_dev_dbg(hdev, "");
2362 
2363 	if (!hdev_is_powered(hdev))
2364 		return;
2365 
2366 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2367 		return;
2368 
2369 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2370 		return;
2371 
2372 	cod[0] = hdev->minor_class;
2373 	cod[1] = hdev->major_class;
2374 	cod[2] = get_service_classes(hdev);
2375 
2376 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2377 		cod[1] |= 0x20;
2378 
2379 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2380 		return;
2381 
2382 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2383 }
2384 
2385 static void write_iac(struct hci_request *req)
2386 {
2387 	struct hci_dev *hdev = req->hdev;
2388 	struct hci_cp_write_current_iac_lap cp;
2389 
2390 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2391 		return;
2392 
2393 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2394 		/* Limited discoverable mode */
2395 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2396 		cp.iac_lap[0] = 0x00;	/* LIAC */
2397 		cp.iac_lap[1] = 0x8b;
2398 		cp.iac_lap[2] = 0x9e;
2399 		cp.iac_lap[3] = 0x33;	/* GIAC */
2400 		cp.iac_lap[4] = 0x8b;
2401 		cp.iac_lap[5] = 0x9e;
2402 	} else {
2403 		/* General discoverable mode */
2404 		cp.num_iac = 1;
2405 		cp.iac_lap[0] = 0x33;	/* GIAC */
2406 		cp.iac_lap[1] = 0x8b;
2407 		cp.iac_lap[2] = 0x9e;
2408 	}
2409 
2410 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2411 		    (cp.num_iac * 3) + 1, &cp);
2412 }
2413 
2414 static int discoverable_update(struct hci_request *req, unsigned long opt)
2415 {
2416 	struct hci_dev *hdev = req->hdev;
2417 
2418 	hci_dev_lock(hdev);
2419 
2420 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2421 		write_iac(req);
2422 		__hci_req_update_scan(req);
2423 		__hci_req_update_class(req);
2424 	}
2425 
2426 	/* Advertising instances don't use the global discoverable setting, so
2427 	 * only update AD if advertising was enabled using Set Advertising.
2428 	 */
2429 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2430 		__hci_req_update_adv_data(req, 0x00);
2431 
2432 		/* Discoverable mode affects the local advertising
2433 		 * address in limited privacy mode.
2434 		 */
2435 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2436 			if (ext_adv_capable(hdev))
2437 				__hci_req_start_ext_adv(req, 0x00);
2438 			else
2439 				__hci_req_enable_advertising(req);
2440 		}
2441 	}
2442 
2443 	hci_dev_unlock(hdev);
2444 
2445 	return 0;
2446 }
2447 
2448 static void discoverable_update_work(struct work_struct *work)
2449 {
2450 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2451 					    discoverable_update);
2452 	u8 status;
2453 
2454 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2455 	mgmt_set_discoverable_complete(hdev, status);
2456 }
2457 
2458 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2459 		      u8 reason)
2460 {
2461 	switch (conn->state) {
2462 	case BT_CONNECTED:
2463 	case BT_CONFIG:
2464 		if (conn->type == AMP_LINK) {
2465 			struct hci_cp_disconn_phy_link cp;
2466 
2467 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2468 			cp.reason = reason;
2469 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2470 				    &cp);
2471 		} else {
2472 			struct hci_cp_disconnect dc;
2473 
2474 			dc.handle = cpu_to_le16(conn->handle);
2475 			dc.reason = reason;
2476 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2477 		}
2478 
2479 		conn->state = BT_DISCONN;
2480 
2481 		break;
2482 	case BT_CONNECT:
2483 		if (conn->type == LE_LINK) {
2484 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2485 				break;
2486 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2487 				    0, NULL);
2488 		} else if (conn->type == ACL_LINK) {
2489 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2490 				break;
2491 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2492 				    6, &conn->dst);
2493 		}
2494 		break;
2495 	case BT_CONNECT2:
2496 		if (conn->type == ACL_LINK) {
2497 			struct hci_cp_reject_conn_req rej;
2498 
2499 			bacpy(&rej.bdaddr, &conn->dst);
2500 			rej.reason = reason;
2501 
2502 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2503 				    sizeof(rej), &rej);
2504 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2505 			struct hci_cp_reject_sync_conn_req rej;
2506 
2507 			bacpy(&rej.bdaddr, &conn->dst);
2508 
2509 			/* SCO rejection has its own limited set of
2510 			 * allowed error values (0x0D-0x0F) which isn't
2511 			 * compatible with most values passed to this
2512 			 * function. To be safe hard-code one of the
2513 			 * values that's suitable for SCO.
2514 			 */
2515 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2516 
2517 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2518 				    sizeof(rej), &rej);
2519 		}
2520 		break;
2521 	default:
2522 		conn->state = BT_CLOSED;
2523 		break;
2524 	}
2525 }
2526 
2527 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2528 {
2529 	if (status)
2530 		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2531 }
2532 
2533 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2534 {
2535 	struct hci_request req;
2536 	int err;
2537 
2538 	hci_req_init(&req, conn->hdev);
2539 
2540 	__hci_abort_conn(&req, conn, reason);
2541 
2542 	err = hci_req_run(&req, abort_conn_complete);
2543 	if (err && err != -ENODATA) {
2544 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2545 		return err;
2546 	}
2547 
2548 	return 0;
2549 }
2550 
2551 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2552 {
2553 	hci_dev_lock(req->hdev);
2554 	__hci_update_background_scan(req);
2555 	hci_dev_unlock(req->hdev);
2556 	return 0;
2557 }
2558 
2559 static void bg_scan_update(struct work_struct *work)
2560 {
2561 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2562 					    bg_scan_update);
2563 	struct hci_conn *conn;
2564 	u8 status;
2565 	int err;
2566 
2567 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2568 	if (!err)
2569 		return;
2570 
2571 	hci_dev_lock(hdev);
2572 
2573 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2574 	if (conn)
2575 		hci_le_conn_failed(conn, status);
2576 
2577 	hci_dev_unlock(hdev);
2578 }
2579 
2580 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2581 {
2582 	hci_req_add_le_scan_disable(req, false);
2583 	return 0;
2584 }
2585 
2586 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2587 {
2588 	u8 length = opt;
2589 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2590 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2591 	struct hci_cp_inquiry cp;
2592 
2593 	if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2594 		return 0;
2595 
2596 	bt_dev_dbg(req->hdev, "");
2597 
2598 	hci_dev_lock(req->hdev);
2599 	hci_inquiry_cache_flush(req->hdev);
2600 	hci_dev_unlock(req->hdev);
2601 
2602 	memset(&cp, 0, sizeof(cp));
2603 
2604 	if (req->hdev->discovery.limited)
2605 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2606 	else
2607 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2608 
2609 	cp.length = length;
2610 
2611 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2612 
2613 	return 0;
2614 }
2615 
2616 static void le_scan_disable_work(struct work_struct *work)
2617 {
2618 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2619 					    le_scan_disable.work);
2620 	u8 status;
2621 
2622 	bt_dev_dbg(hdev, "");
2623 
2624 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2625 		return;
2626 
2627 	cancel_delayed_work(&hdev->le_scan_restart);
2628 
2629 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2630 	if (status) {
2631 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2632 			   status);
2633 		return;
2634 	}
2635 
2636 	hdev->discovery.scan_start = 0;
2637 
2638 	/* If we were running LE only scan, change discovery state. If
2639 	 * we were running both LE and BR/EDR inquiry simultaneously,
2640 	 * and BR/EDR inquiry is already finished, stop discovery,
2641 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2642 	 * If we will resolve remote device name, do not change
2643 	 * discovery state.
2644 	 */
2645 
2646 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2647 		goto discov_stopped;
2648 
2649 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2650 		return;
2651 
2652 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2653 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2654 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2655 			goto discov_stopped;
2656 
2657 		return;
2658 	}
2659 
2660 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2661 		     HCI_CMD_TIMEOUT, &status);
2662 	if (status) {
2663 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2664 		goto discov_stopped;
2665 	}
2666 
2667 	return;
2668 
2669 discov_stopped:
2670 	hci_dev_lock(hdev);
2671 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2672 	hci_dev_unlock(hdev);
2673 }
2674 
2675 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2676 {
2677 	struct hci_dev *hdev = req->hdev;
2678 
2679 	/* If controller is not scanning we are done. */
2680 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2681 		return 0;
2682 
2683 	if (hdev->scanning_paused) {
2684 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2685 		return 0;
2686 	}
2687 
2688 	hci_req_add_le_scan_disable(req, false);
2689 
2690 	if (use_ext_scan(hdev)) {
2691 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2692 
2693 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2694 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2695 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2696 
2697 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2698 			    sizeof(ext_enable_cp), &ext_enable_cp);
2699 	} else {
2700 		struct hci_cp_le_set_scan_enable cp;
2701 
2702 		memset(&cp, 0, sizeof(cp));
2703 		cp.enable = LE_SCAN_ENABLE;
2704 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2705 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2706 	}
2707 
2708 	return 0;
2709 }
2710 
2711 static void le_scan_restart_work(struct work_struct *work)
2712 {
2713 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2714 					    le_scan_restart.work);
2715 	unsigned long timeout, duration, scan_start, now;
2716 	u8 status;
2717 
2718 	bt_dev_dbg(hdev, "");
2719 
2720 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2721 	if (status) {
2722 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2723 			   status);
2724 		return;
2725 	}
2726 
2727 	hci_dev_lock(hdev);
2728 
2729 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2730 	    !hdev->discovery.scan_start)
2731 		goto unlock;
2732 
2733 	/* When the scan was started, hdev->le_scan_disable has been queued
2734 	 * after duration from scan_start. During scan restart this job
2735 	 * has been canceled, and we need to queue it again after proper
2736 	 * timeout, to make sure that scan does not run indefinitely.
2737 	 */
2738 	duration = hdev->discovery.scan_duration;
2739 	scan_start = hdev->discovery.scan_start;
2740 	now = jiffies;
2741 	if (now - scan_start <= duration) {
2742 		int elapsed;
2743 
2744 		if (now >= scan_start)
2745 			elapsed = now - scan_start;
2746 		else
2747 			elapsed = ULONG_MAX - scan_start + now;
2748 
2749 		timeout = duration - elapsed;
2750 	} else {
2751 		timeout = 0;
2752 	}
2753 
2754 	queue_delayed_work(hdev->req_workqueue,
2755 			   &hdev->le_scan_disable, timeout);
2756 
2757 unlock:
2758 	hci_dev_unlock(hdev);
2759 }
2760 
2761 static int active_scan(struct hci_request *req, unsigned long opt)
2762 {
2763 	uint16_t interval = opt;
2764 	struct hci_dev *hdev = req->hdev;
2765 	u8 own_addr_type;
2766 	/* Accept list is not used for discovery */
2767 	u8 filter_policy = 0x00;
2768 	/* Default is to enable duplicates filter */
2769 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2770 	/* Discovery doesn't require controller address resolution */
2771 	bool addr_resolv = false;
2772 	int err;
2773 
2774 	bt_dev_dbg(hdev, "");
2775 
2776 	/* If controller is scanning, it means the background scanning is
2777 	 * running. Thus, we should temporarily stop it in order to set the
2778 	 * discovery scanning parameters.
2779 	 */
2780 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2781 		hci_req_add_le_scan_disable(req, false);
2782 		cancel_interleave_scan(hdev);
2783 	}
2784 
2785 	/* All active scans will be done with either a resolvable private
2786 	 * address (when privacy feature has been enabled) or non-resolvable
2787 	 * private address.
2788 	 */
2789 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2790 					&own_addr_type);
2791 	if (err < 0)
2792 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2793 
2794 	if (hci_is_adv_monitoring(hdev)) {
2795 		/* Duplicate filter should be disabled when some advertisement
2796 		 * monitor is activated, otherwise AdvMon can only receive one
2797 		 * advertisement for one peer(*) during active scanning, and
2798 		 * might report loss to these peers.
2799 		 *
2800 		 * Note that different controllers have different meanings of
2801 		 * |duplicate|. Some of them consider packets with the same
2802 		 * address as duplicate, and others consider packets with the
2803 		 * same address and the same RSSI as duplicate. Although in the
2804 		 * latter case we don't need to disable duplicate filter, but
2805 		 * it is common to have active scanning for a short period of
2806 		 * time, the power impact should be neglectable.
2807 		 */
2808 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2809 	}
2810 
2811 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2812 			   hdev->le_scan_window_discovery, own_addr_type,
2813 			   filter_policy, filter_dup, addr_resolv);
2814 	return 0;
2815 }
2816 
2817 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2818 {
2819 	int err;
2820 
2821 	bt_dev_dbg(req->hdev, "");
2822 
2823 	err = active_scan(req, opt);
2824 	if (err)
2825 		return err;
2826 
2827 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2828 }
2829 
2830 static void start_discovery(struct hci_dev *hdev, u8 *status)
2831 {
2832 	unsigned long timeout;
2833 
2834 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2835 
2836 	switch (hdev->discovery.type) {
2837 	case DISCOV_TYPE_BREDR:
2838 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2839 			hci_req_sync(hdev, bredr_inquiry,
2840 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2841 				     status);
2842 		return;
2843 	case DISCOV_TYPE_INTERLEAVED:
2844 		/* When running simultaneous discovery, the LE scanning time
2845 		 * should occupy the whole discovery time sine BR/EDR inquiry
2846 		 * and LE scanning are scheduled by the controller.
2847 		 *
2848 		 * For interleaving discovery in comparison, BR/EDR inquiry
2849 		 * and LE scanning are done sequentially with separate
2850 		 * timeouts.
2851 		 */
2852 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2853 			     &hdev->quirks)) {
2854 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2855 			/* During simultaneous discovery, we double LE scan
2856 			 * interval. We must leave some time for the controller
2857 			 * to do BR/EDR inquiry.
2858 			 */
2859 			hci_req_sync(hdev, interleaved_discov,
2860 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2861 				     status);
2862 			break;
2863 		}
2864 
2865 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2866 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2867 			     HCI_CMD_TIMEOUT, status);
2868 		break;
2869 	case DISCOV_TYPE_LE:
2870 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2871 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2872 			     HCI_CMD_TIMEOUT, status);
2873 		break;
2874 	default:
2875 		*status = HCI_ERROR_UNSPECIFIED;
2876 		return;
2877 	}
2878 
2879 	if (*status)
2880 		return;
2881 
2882 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2883 
2884 	/* When service discovery is used and the controller has a
2885 	 * strict duplicate filter, it is important to remember the
2886 	 * start and duration of the scan. This is required for
2887 	 * restarting scanning during the discovery phase.
2888 	 */
2889 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2890 		     hdev->discovery.result_filtering) {
2891 		hdev->discovery.scan_start = jiffies;
2892 		hdev->discovery.scan_duration = timeout;
2893 	}
2894 
2895 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2896 			   timeout);
2897 }
2898 
2899 bool hci_req_stop_discovery(struct hci_request *req)
2900 {
2901 	struct hci_dev *hdev = req->hdev;
2902 	struct discovery_state *d = &hdev->discovery;
2903 	struct hci_cp_remote_name_req_cancel cp;
2904 	struct inquiry_entry *e;
2905 	bool ret = false;
2906 
2907 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2908 
2909 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2910 		if (test_bit(HCI_INQUIRY, &hdev->flags))
2911 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2912 
2913 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2914 			cancel_delayed_work(&hdev->le_scan_disable);
2915 			cancel_delayed_work(&hdev->le_scan_restart);
2916 			hci_req_add_le_scan_disable(req, false);
2917 		}
2918 
2919 		ret = true;
2920 	} else {
2921 		/* Passive scanning */
2922 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2923 			hci_req_add_le_scan_disable(req, false);
2924 			ret = true;
2925 		}
2926 	}
2927 
2928 	/* No further actions needed for LE-only discovery */
2929 	if (d->type == DISCOV_TYPE_LE)
2930 		return ret;
2931 
2932 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2933 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2934 						     NAME_PENDING);
2935 		if (!e)
2936 			return ret;
2937 
2938 		bacpy(&cp.bdaddr, &e->data.bdaddr);
2939 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2940 			    &cp);
2941 		ret = true;
2942 	}
2943 
2944 	return ret;
2945 }
2946 
2947 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2948 				      u16 opcode)
2949 {
2950 	bt_dev_dbg(hdev, "status %u", status);
2951 }
2952 
2953 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2954 {
2955 	struct hci_request req;
2956 	int err;
2957 	__u8 vnd_len, *vnd_data = NULL;
2958 	struct hci_op_configure_data_path *cmd = NULL;
2959 
2960 	hci_req_init(&req, hdev);
2961 
2962 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2963 					  &vnd_data);
2964 	if (err < 0)
2965 		goto error;
2966 
2967 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2968 	if (!cmd) {
2969 		err = -ENOMEM;
2970 		goto error;
2971 	}
2972 
2973 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2974 	if (err < 0)
2975 		goto error;
2976 
2977 	cmd->vnd_len = vnd_len;
2978 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
2979 
2980 	cmd->direction = 0x00;
2981 	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2982 
2983 	cmd->direction = 0x01;
2984 	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2985 
2986 	err = hci_req_run(&req, config_data_path_complete);
2987 error:
2988 
2989 	kfree(cmd);
2990 	kfree(vnd_data);
2991 	return err;
2992 }
2993 
2994 static int stop_discovery(struct hci_request *req, unsigned long opt)
2995 {
2996 	hci_dev_lock(req->hdev);
2997 	hci_req_stop_discovery(req);
2998 	hci_dev_unlock(req->hdev);
2999 
3000 	return 0;
3001 }
3002 
3003 static void discov_update(struct work_struct *work)
3004 {
3005 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3006 					    discov_update);
3007 	u8 status = 0;
3008 
3009 	switch (hdev->discovery.state) {
3010 	case DISCOVERY_STARTING:
3011 		start_discovery(hdev, &status);
3012 		mgmt_start_discovery_complete(hdev, status);
3013 		if (status)
3014 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3015 		else
3016 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3017 		break;
3018 	case DISCOVERY_STOPPING:
3019 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3020 		mgmt_stop_discovery_complete(hdev, status);
3021 		if (!status)
3022 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3023 		break;
3024 	case DISCOVERY_STOPPED:
3025 	default:
3026 		return;
3027 	}
3028 }
3029 
3030 static void discov_off(struct work_struct *work)
3031 {
3032 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3033 					    discov_off.work);
3034 
3035 	bt_dev_dbg(hdev, "");
3036 
3037 	hci_dev_lock(hdev);
3038 
3039 	/* When discoverable timeout triggers, then just make sure
3040 	 * the limited discoverable flag is cleared. Even in the case
3041 	 * of a timeout triggered from general discoverable, it is
3042 	 * safe to unconditionally clear the flag.
3043 	 */
3044 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3045 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3046 	hdev->discov_timeout = 0;
3047 
3048 	hci_dev_unlock(hdev);
3049 
3050 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3051 	mgmt_new_settings(hdev);
3052 }
3053 
3054 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3055 {
3056 	struct hci_dev *hdev = req->hdev;
3057 	u8 link_sec;
3058 
3059 	hci_dev_lock(hdev);
3060 
3061 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3062 	    !lmp_host_ssp_capable(hdev)) {
3063 		u8 mode = 0x01;
3064 
3065 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3066 
3067 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3068 			u8 support = 0x01;
3069 
3070 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3071 				    sizeof(support), &support);
3072 		}
3073 	}
3074 
3075 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3076 	    lmp_bredr_capable(hdev)) {
3077 		struct hci_cp_write_le_host_supported cp;
3078 
3079 		cp.le = 0x01;
3080 		cp.simul = 0x00;
3081 
3082 		/* Check first if we already have the right
3083 		 * host state (host features set)
3084 		 */
3085 		if (cp.le != lmp_host_le_capable(hdev) ||
3086 		    cp.simul != lmp_host_le_br_capable(hdev))
3087 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3088 				    sizeof(cp), &cp);
3089 	}
3090 
3091 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3092 		/* Make sure the controller has a good default for
3093 		 * advertising data. This also applies to the case
3094 		 * where BR/EDR was toggled during the AUTO_OFF phase.
3095 		 */
3096 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3097 		    list_empty(&hdev->adv_instances)) {
3098 			int err;
3099 
3100 			if (ext_adv_capable(hdev)) {
3101 				err = __hci_req_setup_ext_adv_instance(req,
3102 								       0x00);
3103 				if (!err)
3104 					__hci_req_update_scan_rsp_data(req,
3105 								       0x00);
3106 			} else {
3107 				err = 0;
3108 				__hci_req_update_adv_data(req, 0x00);
3109 				__hci_req_update_scan_rsp_data(req, 0x00);
3110 			}
3111 
3112 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3113 				if (!ext_adv_capable(hdev))
3114 					__hci_req_enable_advertising(req);
3115 				else if (!err)
3116 					__hci_req_enable_ext_advertising(req,
3117 									 0x00);
3118 			}
3119 		} else if (!list_empty(&hdev->adv_instances)) {
3120 			struct adv_info *adv_instance;
3121 
3122 			adv_instance = list_first_entry(&hdev->adv_instances,
3123 							struct adv_info, list);
3124 			__hci_req_schedule_adv_instance(req,
3125 							adv_instance->instance,
3126 							true);
3127 		}
3128 	}
3129 
3130 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3131 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3132 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3133 			    sizeof(link_sec), &link_sec);
3134 
3135 	if (lmp_bredr_capable(hdev)) {
3136 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3137 			__hci_req_write_fast_connectable(req, true);
3138 		else
3139 			__hci_req_write_fast_connectable(req, false);
3140 		__hci_req_update_scan(req);
3141 		__hci_req_update_class(req);
3142 		__hci_req_update_name(req);
3143 		__hci_req_update_eir(req);
3144 	}
3145 
3146 	hci_dev_unlock(hdev);
3147 	return 0;
3148 }
3149 
3150 int __hci_req_hci_power_on(struct hci_dev *hdev)
3151 {
3152 	/* Register the available SMP channels (BR/EDR and LE) only when
3153 	 * successfully powering on the controller. This late
3154 	 * registration is required so that LE SMP can clearly decide if
3155 	 * the public address or static address is used.
3156 	 */
3157 	smp_register(hdev);
3158 
3159 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3160 			      NULL);
3161 }
3162 
3163 void hci_request_setup(struct hci_dev *hdev)
3164 {
3165 	INIT_WORK(&hdev->discov_update, discov_update);
3166 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3167 	INIT_WORK(&hdev->scan_update, scan_update_work);
3168 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3169 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3170 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3171 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3172 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3173 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3174 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3175 }
3176 
3177 void hci_request_cancel_all(struct hci_dev *hdev)
3178 {
3179 	hci_req_sync_cancel(hdev, ENODEV);
3180 
3181 	cancel_work_sync(&hdev->discov_update);
3182 	cancel_work_sync(&hdev->bg_scan_update);
3183 	cancel_work_sync(&hdev->scan_update);
3184 	cancel_work_sync(&hdev->connectable_update);
3185 	cancel_work_sync(&hdev->discoverable_update);
3186 	cancel_delayed_work_sync(&hdev->discov_off);
3187 	cancel_delayed_work_sync(&hdev->le_scan_disable);
3188 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3189 
3190 	if (hdev->adv_instance_timeout) {
3191 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3192 		hdev->adv_instance_timeout = 0;
3193 	}
3194 
3195 	cancel_interleave_scan(hdev);
3196 }
3197