xref: /openbmc/linux/net/bluetooth/hci_request.c (revision c64d01b3ceba873aa8e8605598cec4a6bc6d1601)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34 
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37 	skb_queue_head_init(&req->cmd_q);
38 	req->hdev = hdev;
39 	req->err = 0;
40 }
41 
42 void hci_req_purge(struct hci_request *req)
43 {
44 	skb_queue_purge(&req->cmd_q);
45 }
46 
47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49 	return hdev->req_status == HCI_REQ_PEND;
50 }
51 
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 		   hci_req_complete_skb_t complete_skb)
54 {
55 	struct hci_dev *hdev = req->hdev;
56 	struct sk_buff *skb;
57 	unsigned long flags;
58 
59 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60 
61 	/* If an error occurred during request building, remove all HCI
62 	 * commands queued on the HCI request queue.
63 	 */
64 	if (req->err) {
65 		skb_queue_purge(&req->cmd_q);
66 		return req->err;
67 	}
68 
69 	/* Do not allow empty requests */
70 	if (skb_queue_empty(&req->cmd_q))
71 		return -ENODATA;
72 
73 	skb = skb_peek_tail(&req->cmd_q);
74 	if (complete) {
75 		bt_cb(skb)->hci.req_complete = complete;
76 	} else if (complete_skb) {
77 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 	}
80 
81 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84 
85 	queue_work(hdev->workqueue, &hdev->cmd_work);
86 
87 	return 0;
88 }
89 
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92 	return req_run(req, complete, NULL);
93 }
94 
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97 	return req_run(req, NULL, complete);
98 }
99 
100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 			   struct sk_buff *skb)
102 {
103 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
104 
105 	if (hdev->req_status == HCI_REQ_PEND) {
106 		hdev->req_result = result;
107 		hdev->req_status = HCI_REQ_DONE;
108 		if (skb)
109 			hdev->req_skb = skb_get(skb);
110 		wake_up_interruptible(&hdev->req_wait_q);
111 	}
112 }
113 
114 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
115 {
116 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
117 
118 	if (hdev->req_status == HCI_REQ_PEND) {
119 		hdev->req_result = err;
120 		hdev->req_status = HCI_REQ_CANCELED;
121 		wake_up_interruptible(&hdev->req_wait_q);
122 	}
123 }
124 
125 /* Execute request and wait for completion. */
126 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
127 						     unsigned long opt),
128 		   unsigned long opt, u32 timeout, u8 *hci_status)
129 {
130 	struct hci_request req;
131 	int err = 0;
132 
133 	bt_dev_dbg(hdev, "start");
134 
135 	hci_req_init(&req, hdev);
136 
137 	hdev->req_status = HCI_REQ_PEND;
138 
139 	err = func(&req, opt);
140 	if (err) {
141 		if (hci_status)
142 			*hci_status = HCI_ERROR_UNSPECIFIED;
143 		return err;
144 	}
145 
146 	err = hci_req_run_skb(&req, hci_req_sync_complete);
147 	if (err < 0) {
148 		hdev->req_status = 0;
149 
150 		/* ENODATA means the HCI request command queue is empty.
151 		 * This can happen when a request with conditionals doesn't
152 		 * trigger any commands to be sent. This is normal behavior
153 		 * and should not trigger an error return.
154 		 */
155 		if (err == -ENODATA) {
156 			if (hci_status)
157 				*hci_status = 0;
158 			return 0;
159 		}
160 
161 		if (hci_status)
162 			*hci_status = HCI_ERROR_UNSPECIFIED;
163 
164 		return err;
165 	}
166 
167 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
168 			hdev->req_status != HCI_REQ_PEND, timeout);
169 
170 	if (err == -ERESTARTSYS)
171 		return -EINTR;
172 
173 	switch (hdev->req_status) {
174 	case HCI_REQ_DONE:
175 		err = -bt_to_errno(hdev->req_result);
176 		if (hci_status)
177 			*hci_status = hdev->req_result;
178 		break;
179 
180 	case HCI_REQ_CANCELED:
181 		err = -hdev->req_result;
182 		if (hci_status)
183 			*hci_status = HCI_ERROR_UNSPECIFIED;
184 		break;
185 
186 	default:
187 		err = -ETIMEDOUT;
188 		if (hci_status)
189 			*hci_status = HCI_ERROR_UNSPECIFIED;
190 		break;
191 	}
192 
193 	kfree_skb(hdev->req_skb);
194 	hdev->req_skb = NULL;
195 	hdev->req_status = hdev->req_result = 0;
196 
197 	bt_dev_dbg(hdev, "end: err %d", err);
198 
199 	return err;
200 }
201 
202 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
203 						  unsigned long opt),
204 		 unsigned long opt, u32 timeout, u8 *hci_status)
205 {
206 	int ret;
207 
208 	/* Serialize all requests */
209 	hci_req_sync_lock(hdev);
210 	/* check the state after obtaing the lock to protect the HCI_UP
211 	 * against any races from hci_dev_do_close when the controller
212 	 * gets removed.
213 	 */
214 	if (test_bit(HCI_UP, &hdev->flags))
215 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
216 	else
217 		ret = -ENETDOWN;
218 	hci_req_sync_unlock(hdev);
219 
220 	return ret;
221 }
222 
223 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
224 				const void *param)
225 {
226 	int len = HCI_COMMAND_HDR_SIZE + plen;
227 	struct hci_command_hdr *hdr;
228 	struct sk_buff *skb;
229 
230 	skb = bt_skb_alloc(len, GFP_ATOMIC);
231 	if (!skb)
232 		return NULL;
233 
234 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
235 	hdr->opcode = cpu_to_le16(opcode);
236 	hdr->plen   = plen;
237 
238 	if (plen)
239 		skb_put_data(skb, param, plen);
240 
241 	bt_dev_dbg(hdev, "skb len %d", skb->len);
242 
243 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
244 	hci_skb_opcode(skb) = opcode;
245 
246 	return skb;
247 }
248 
249 /* Queue a command to an asynchronous HCI request */
250 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
251 		    const void *param, u8 event)
252 {
253 	struct hci_dev *hdev = req->hdev;
254 	struct sk_buff *skb;
255 
256 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
257 
258 	/* If an error occurred during request building, there is no point in
259 	 * queueing the HCI command. We can simply return.
260 	 */
261 	if (req->err)
262 		return;
263 
264 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
265 	if (!skb) {
266 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
267 			   opcode);
268 		req->err = -ENOMEM;
269 		return;
270 	}
271 
272 	if (skb_queue_empty(&req->cmd_q))
273 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
274 
275 	bt_cb(skb)->hci.req_event = event;
276 
277 	skb_queue_tail(&req->cmd_q, skb);
278 }
279 
280 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
281 		 const void *param)
282 {
283 	hci_req_add_ev(req, opcode, plen, param, 0);
284 }
285 
286 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
287 {
288 	struct hci_dev *hdev = req->hdev;
289 	struct hci_cp_write_page_scan_activity acp;
290 	u8 type;
291 
292 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
293 		return;
294 
295 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
296 		return;
297 
298 	if (enable) {
299 		type = PAGE_SCAN_TYPE_INTERLACED;
300 
301 		/* 160 msec page scan interval */
302 		acp.interval = cpu_to_le16(0x0100);
303 	} else {
304 		type = hdev->def_page_scan_type;
305 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
306 	}
307 
308 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
309 
310 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
311 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
312 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
313 			    sizeof(acp), &acp);
314 
315 	if (hdev->page_scan_type != type)
316 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
317 }
318 
319 static void start_interleave_scan(struct hci_dev *hdev)
320 {
321 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
322 	queue_delayed_work(hdev->req_workqueue,
323 			   &hdev->interleave_scan, 0);
324 }
325 
326 static bool is_interleave_scanning(struct hci_dev *hdev)
327 {
328 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
329 }
330 
331 static void cancel_interleave_scan(struct hci_dev *hdev)
332 {
333 	bt_dev_dbg(hdev, "cancelling interleave scan");
334 
335 	cancel_delayed_work_sync(&hdev->interleave_scan);
336 
337 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
338 }
339 
340 /* Return true if interleave_scan wasn't started until exiting this function,
341  * otherwise, return false
342  */
343 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
344 {
345 	/* Do interleaved scan only if all of the following are true:
346 	 * - There is at least one ADV monitor
347 	 * - At least one pending LE connection or one device to be scanned for
348 	 * - Monitor offloading is not supported
349 	 * If so, we should alternate between allowlist scan and one without
350 	 * any filters to save power.
351 	 */
352 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
353 				!(list_empty(&hdev->pend_le_conns) &&
354 				  list_empty(&hdev->pend_le_reports)) &&
355 				hci_get_adv_monitor_offload_ext(hdev) ==
356 				    HCI_ADV_MONITOR_EXT_NONE;
357 	bool is_interleaving = is_interleave_scanning(hdev);
358 
359 	if (use_interleaving && !is_interleaving) {
360 		start_interleave_scan(hdev);
361 		bt_dev_dbg(hdev, "starting interleave scan");
362 		return true;
363 	}
364 
365 	if (!use_interleaving && is_interleaving)
366 		cancel_interleave_scan(hdev);
367 
368 	return false;
369 }
370 
371 void __hci_req_update_name(struct hci_request *req)
372 {
373 	struct hci_dev *hdev = req->hdev;
374 	struct hci_cp_write_local_name cp;
375 
376 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
377 
378 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
379 }
380 
381 void __hci_req_update_eir(struct hci_request *req)
382 {
383 	struct hci_dev *hdev = req->hdev;
384 	struct hci_cp_write_eir cp;
385 
386 	if (!hdev_is_powered(hdev))
387 		return;
388 
389 	if (!lmp_ext_inq_capable(hdev))
390 		return;
391 
392 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
393 		return;
394 
395 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
396 		return;
397 
398 	memset(&cp, 0, sizeof(cp));
399 
400 	eir_create(hdev, cp.data);
401 
402 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
403 		return;
404 
405 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
406 
407 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
408 }
409 
410 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
411 {
412 	struct hci_dev *hdev = req->hdev;
413 
414 	if (hdev->scanning_paused) {
415 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
416 		return;
417 	}
418 
419 	if (use_ext_scan(hdev)) {
420 		struct hci_cp_le_set_ext_scan_enable cp;
421 
422 		memset(&cp, 0, sizeof(cp));
423 		cp.enable = LE_SCAN_DISABLE;
424 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
425 			    &cp);
426 	} else {
427 		struct hci_cp_le_set_scan_enable cp;
428 
429 		memset(&cp, 0, sizeof(cp));
430 		cp.enable = LE_SCAN_DISABLE;
431 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
432 	}
433 
434 	/* Disable address resolution */
435 	if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
436 		__u8 enable = 0x00;
437 
438 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
439 	}
440 }
441 
442 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
443 				 u8 bdaddr_type)
444 {
445 	struct hci_cp_le_del_from_accept_list cp;
446 
447 	cp.bdaddr_type = bdaddr_type;
448 	bacpy(&cp.bdaddr, bdaddr);
449 
450 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
451 		   cp.bdaddr_type);
452 	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
453 
454 	if (use_ll_privacy(req->hdev)) {
455 		struct smp_irk *irk;
456 
457 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
458 		if (irk) {
459 			struct hci_cp_le_del_from_resolv_list cp;
460 
461 			cp.bdaddr_type = bdaddr_type;
462 			bacpy(&cp.bdaddr, bdaddr);
463 
464 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
465 				    sizeof(cp), &cp);
466 		}
467 	}
468 }
469 
470 /* Adds connection to accept list if needed. On error, returns -1. */
471 static int add_to_accept_list(struct hci_request *req,
472 			      struct hci_conn_params *params, u8 *num_entries,
473 			      bool allow_rpa)
474 {
475 	struct hci_cp_le_add_to_accept_list cp;
476 	struct hci_dev *hdev = req->hdev;
477 
478 	/* Already in accept list */
479 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
480 				   params->addr_type))
481 		return 0;
482 
483 	/* Select filter policy to accept all advertising */
484 	if (*num_entries >= hdev->le_accept_list_size)
485 		return -1;
486 
487 	/* Accept list can not be used with RPAs */
488 	if (!allow_rpa &&
489 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
490 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
491 		return -1;
492 	}
493 
494 	/* During suspend, only wakeable devices can be in accept list */
495 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
496 						   params->current_flags))
497 		return 0;
498 
499 	*num_entries += 1;
500 	cp.bdaddr_type = params->addr_type;
501 	bacpy(&cp.bdaddr, &params->addr);
502 
503 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
504 		   cp.bdaddr_type);
505 	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
506 
507 	if (use_ll_privacy(hdev)) {
508 		struct smp_irk *irk;
509 
510 		irk = hci_find_irk_by_addr(hdev, &params->addr,
511 					   params->addr_type);
512 		if (irk) {
513 			struct hci_cp_le_add_to_resolv_list cp;
514 
515 			cp.bdaddr_type = params->addr_type;
516 			bacpy(&cp.bdaddr, &params->addr);
517 			memcpy(cp.peer_irk, irk->val, 16);
518 
519 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
520 				memcpy(cp.local_irk, hdev->irk, 16);
521 			else
522 				memset(cp.local_irk, 0, 16);
523 
524 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
525 				    sizeof(cp), &cp);
526 		}
527 	}
528 
529 	return 0;
530 }
531 
532 static u8 update_accept_list(struct hci_request *req)
533 {
534 	struct hci_dev *hdev = req->hdev;
535 	struct hci_conn_params *params;
536 	struct bdaddr_list *b;
537 	u8 num_entries = 0;
538 	bool pend_conn, pend_report;
539 	/* We allow usage of accept list even with RPAs in suspend. In the worst
540 	 * case, we won't be able to wake from devices that use the privacy1.2
541 	 * features. Additionally, once we support privacy1.2 and IRK
542 	 * offloading, we can update this to also check for those conditions.
543 	 */
544 	bool allow_rpa = hdev->suspended;
545 
546 	if (use_ll_privacy(hdev))
547 		allow_rpa = true;
548 
549 	/* Go through the current accept list programmed into the
550 	 * controller one by one and check if that address is still
551 	 * in the list of pending connections or list of devices to
552 	 * report. If not present in either list, then queue the
553 	 * command to remove it from the controller.
554 	 */
555 	list_for_each_entry(b, &hdev->le_accept_list, list) {
556 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
557 						      &b->bdaddr,
558 						      b->bdaddr_type);
559 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
560 							&b->bdaddr,
561 							b->bdaddr_type);
562 
563 		/* If the device is not likely to connect or report,
564 		 * remove it from the accept list.
565 		 */
566 		if (!pend_conn && !pend_report) {
567 			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
568 			continue;
569 		}
570 
571 		/* Accept list can not be used with RPAs */
572 		if (!allow_rpa &&
573 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
574 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
575 			return 0x00;
576 		}
577 
578 		num_entries++;
579 	}
580 
581 	/* Since all no longer valid accept list entries have been
582 	 * removed, walk through the list of pending connections
583 	 * and ensure that any new device gets programmed into
584 	 * the controller.
585 	 *
586 	 * If the list of the devices is larger than the list of
587 	 * available accept list entries in the controller, then
588 	 * just abort and return filer policy value to not use the
589 	 * accept list.
590 	 */
591 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
592 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
593 			return 0x00;
594 	}
595 
596 	/* After adding all new pending connections, walk through
597 	 * the list of pending reports and also add these to the
598 	 * accept list if there is still space. Abort if space runs out.
599 	 */
600 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
601 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
602 			return 0x00;
603 	}
604 
605 	/* Use the allowlist unless the following conditions are all true:
606 	 * - We are not currently suspending
607 	 * - There are 1 or more ADV monitors registered and it's not offloaded
608 	 * - Interleaved scanning is not currently using the allowlist
609 	 */
610 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
611 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
612 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
613 		return 0x00;
614 
615 	/* Select filter policy to use accept list */
616 	return 0x01;
617 }
618 
619 static bool scan_use_rpa(struct hci_dev *hdev)
620 {
621 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
622 }
623 
624 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
625 			       u16 window, u8 own_addr_type, u8 filter_policy,
626 			       bool filter_dup, bool addr_resolv)
627 {
628 	struct hci_dev *hdev = req->hdev;
629 
630 	if (hdev->scanning_paused) {
631 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
632 		return;
633 	}
634 
635 	if (use_ll_privacy(hdev) && addr_resolv) {
636 		u8 enable = 0x01;
637 
638 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
639 	}
640 
641 	/* Use ext scanning if set ext scan param and ext scan enable is
642 	 * supported
643 	 */
644 	if (use_ext_scan(hdev)) {
645 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
646 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
647 		struct hci_cp_le_scan_phy_params *phy_params;
648 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
649 		u32 plen;
650 
651 		ext_param_cp = (void *)data;
652 		phy_params = (void *)ext_param_cp->data;
653 
654 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
655 		ext_param_cp->own_addr_type = own_addr_type;
656 		ext_param_cp->filter_policy = filter_policy;
657 
658 		plen = sizeof(*ext_param_cp);
659 
660 		if (scan_1m(hdev) || scan_2m(hdev)) {
661 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
662 
663 			memset(phy_params, 0, sizeof(*phy_params));
664 			phy_params->type = type;
665 			phy_params->interval = cpu_to_le16(interval);
666 			phy_params->window = cpu_to_le16(window);
667 
668 			plen += sizeof(*phy_params);
669 			phy_params++;
670 		}
671 
672 		if (scan_coded(hdev)) {
673 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
674 
675 			memset(phy_params, 0, sizeof(*phy_params));
676 			phy_params->type = type;
677 			phy_params->interval = cpu_to_le16(interval);
678 			phy_params->window = cpu_to_le16(window);
679 
680 			plen += sizeof(*phy_params);
681 			phy_params++;
682 		}
683 
684 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
685 			    plen, ext_param_cp);
686 
687 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
688 		ext_enable_cp.enable = LE_SCAN_ENABLE;
689 		ext_enable_cp.filter_dup = filter_dup;
690 
691 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
692 			    sizeof(ext_enable_cp), &ext_enable_cp);
693 	} else {
694 		struct hci_cp_le_set_scan_param param_cp;
695 		struct hci_cp_le_set_scan_enable enable_cp;
696 
697 		memset(&param_cp, 0, sizeof(param_cp));
698 		param_cp.type = type;
699 		param_cp.interval = cpu_to_le16(interval);
700 		param_cp.window = cpu_to_le16(window);
701 		param_cp.own_address_type = own_addr_type;
702 		param_cp.filter_policy = filter_policy;
703 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
704 			    &param_cp);
705 
706 		memset(&enable_cp, 0, sizeof(enable_cp));
707 		enable_cp.enable = LE_SCAN_ENABLE;
708 		enable_cp.filter_dup = filter_dup;
709 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
710 			    &enable_cp);
711 	}
712 }
713 
714 /* Returns true if an le connection is in the scanning state */
715 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
716 {
717 	struct hci_conn_hash *h = &hdev->conn_hash;
718 	struct hci_conn  *c;
719 
720 	rcu_read_lock();
721 
722 	list_for_each_entry_rcu(c, &h->list, list) {
723 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
724 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
725 			rcu_read_unlock();
726 			return true;
727 		}
728 	}
729 
730 	rcu_read_unlock();
731 
732 	return false;
733 }
734 
735 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
736  * controller based address resolution to be able to reconfigure
737  * resolving list.
738  */
739 void hci_req_add_le_passive_scan(struct hci_request *req)
740 {
741 	struct hci_dev *hdev = req->hdev;
742 	u8 own_addr_type;
743 	u8 filter_policy;
744 	u16 window, interval;
745 	/* Default is to enable duplicates filter */
746 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
747 	/* Background scanning should run with address resolution */
748 	bool addr_resolv = true;
749 
750 	if (hdev->scanning_paused) {
751 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
752 		return;
753 	}
754 
755 	/* Set require_privacy to false since no SCAN_REQ are send
756 	 * during passive scanning. Not using an non-resolvable address
757 	 * here is important so that peer devices using direct
758 	 * advertising with our address will be correctly reported
759 	 * by the controller.
760 	 */
761 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
762 				      &own_addr_type))
763 		return;
764 
765 	if (hdev->enable_advmon_interleave_scan &&
766 	    __hci_update_interleaved_scan(hdev))
767 		return;
768 
769 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
770 	/* Adding or removing entries from the accept list must
771 	 * happen before enabling scanning. The controller does
772 	 * not allow accept list modification while scanning.
773 	 */
774 	filter_policy = update_accept_list(req);
775 
776 	/* When the controller is using random resolvable addresses and
777 	 * with that having LE privacy enabled, then controllers with
778 	 * Extended Scanner Filter Policies support can now enable support
779 	 * for handling directed advertising.
780 	 *
781 	 * So instead of using filter polices 0x00 (no accept list)
782 	 * and 0x01 (accept list enabled) use the new filter policies
783 	 * 0x02 (no accept list) and 0x03 (accept list enabled).
784 	 */
785 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
786 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
787 		filter_policy |= 0x02;
788 
789 	if (hdev->suspended) {
790 		window = hdev->le_scan_window_suspend;
791 		interval = hdev->le_scan_int_suspend;
792 	} else if (hci_is_le_conn_scanning(hdev)) {
793 		window = hdev->le_scan_window_connect;
794 		interval = hdev->le_scan_int_connect;
795 	} else if (hci_is_adv_monitoring(hdev)) {
796 		window = hdev->le_scan_window_adv_monitor;
797 		interval = hdev->le_scan_int_adv_monitor;
798 
799 		/* Disable duplicates filter when scanning for advertisement
800 		 * monitor for the following reasons.
801 		 *
802 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
803 		 * controllers ignore RSSI_Sampling_Period when the duplicates
804 		 * filter is enabled.
805 		 *
806 		 * For SW pattern filtering, when we're not doing interleaved
807 		 * scanning, it is necessary to disable duplicates filter,
808 		 * otherwise hosts can only receive one advertisement and it's
809 		 * impossible to know if a peer is still in range.
810 		 */
811 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
812 	} else {
813 		window = hdev->le_scan_window;
814 		interval = hdev->le_scan_interval;
815 	}
816 
817 	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
818 		   filter_policy);
819 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
820 			   own_addr_type, filter_policy, filter_dup,
821 			   addr_resolv);
822 }
823 
824 static void cancel_adv_timeout(struct hci_dev *hdev)
825 {
826 	if (hdev->adv_instance_timeout) {
827 		hdev->adv_instance_timeout = 0;
828 		cancel_delayed_work(&hdev->adv_instance_expire);
829 	}
830 }
831 
832 /* This function requires the caller holds hdev->lock */
833 void __hci_req_pause_adv_instances(struct hci_request *req)
834 {
835 	bt_dev_dbg(req->hdev, "Pausing advertising instances");
836 
837 	/* Call to disable any advertisements active on the controller.
838 	 * This will succeed even if no advertisements are configured.
839 	 */
840 	__hci_req_disable_advertising(req);
841 
842 	/* If we are using software rotation, pause the loop */
843 	if (!ext_adv_capable(req->hdev))
844 		cancel_adv_timeout(req->hdev);
845 }
846 
847 /* This function requires the caller holds hdev->lock */
848 static void __hci_req_resume_adv_instances(struct hci_request *req)
849 {
850 	struct adv_info *adv;
851 
852 	bt_dev_dbg(req->hdev, "Resuming advertising instances");
853 
854 	if (ext_adv_capable(req->hdev)) {
855 		/* Call for each tracked instance to be re-enabled */
856 		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
857 			__hci_req_enable_ext_advertising(req,
858 							 adv->instance);
859 		}
860 
861 	} else {
862 		/* Schedule for most recent instance to be restarted and begin
863 		 * the software rotation loop
864 		 */
865 		__hci_req_schedule_adv_instance(req,
866 						req->hdev->cur_adv_instance,
867 						true);
868 	}
869 }
870 
871 /* This function requires the caller holds hdev->lock */
872 int hci_req_resume_adv_instances(struct hci_dev *hdev)
873 {
874 	struct hci_request req;
875 
876 	hci_req_init(&req, hdev);
877 	__hci_req_resume_adv_instances(&req);
878 
879 	return hci_req_run(&req, NULL);
880 }
881 
882 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
883 {
884 	return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
885 }
886 
887 void __hci_req_disable_advertising(struct hci_request *req)
888 {
889 	if (ext_adv_capable(req->hdev)) {
890 		__hci_req_disable_ext_adv_instance(req, 0x00);
891 
892 	} else {
893 		u8 enable = 0x00;
894 
895 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
896 	}
897 }
898 
899 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
900 {
901 	/* If privacy is not enabled don't use RPA */
902 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
903 		return false;
904 
905 	/* If basic privacy mode is enabled use RPA */
906 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
907 		return true;
908 
909 	/* If limited privacy mode is enabled don't use RPA if we're
910 	 * both discoverable and bondable.
911 	 */
912 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
913 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
914 		return false;
915 
916 	/* We're neither bondable nor discoverable in the limited
917 	 * privacy mode, therefore use RPA.
918 	 */
919 	return true;
920 }
921 
922 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
923 {
924 	/* If there is no connection we are OK to advertise. */
925 	if (hci_conn_num(hdev, LE_LINK) == 0)
926 		return true;
927 
928 	/* Check le_states if there is any connection in peripheral role. */
929 	if (hdev->conn_hash.le_num_peripheral > 0) {
930 		/* Peripheral connection state and non connectable mode bit 20.
931 		 */
932 		if (!connectable && !(hdev->le_states[2] & 0x10))
933 			return false;
934 
935 		/* Peripheral connection state and connectable mode bit 38
936 		 * and scannable bit 21.
937 		 */
938 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
939 				    !(hdev->le_states[2] & 0x20)))
940 			return false;
941 	}
942 
943 	/* Check le_states if there is any connection in central role. */
944 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
945 		/* Central connection state and non connectable mode bit 18. */
946 		if (!connectable && !(hdev->le_states[2] & 0x02))
947 			return false;
948 
949 		/* Central connection state and connectable mode bit 35 and
950 		 * scannable 19.
951 		 */
952 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
953 				    !(hdev->le_states[2] & 0x08)))
954 			return false;
955 	}
956 
957 	return true;
958 }
959 
960 void __hci_req_enable_advertising(struct hci_request *req)
961 {
962 	struct hci_dev *hdev = req->hdev;
963 	struct adv_info *adv;
964 	struct hci_cp_le_set_adv_param cp;
965 	u8 own_addr_type, enable = 0x01;
966 	bool connectable;
967 	u16 adv_min_interval, adv_max_interval;
968 	u32 flags;
969 
970 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
971 	adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
972 
973 	/* If the "connectable" instance flag was not set, then choose between
974 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
975 	 */
976 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
977 		      mgmt_get_connectable(hdev);
978 
979 	if (!is_advertising_allowed(hdev, connectable))
980 		return;
981 
982 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
983 		__hci_req_disable_advertising(req);
984 
985 	/* Clear the HCI_LE_ADV bit temporarily so that the
986 	 * hci_update_random_address knows that it's safe to go ahead
987 	 * and write a new random address. The flag will be set back on
988 	 * as soon as the SET_ADV_ENABLE HCI command completes.
989 	 */
990 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
991 
992 	/* Set require_privacy to true only when non-connectable
993 	 * advertising is used. In that case it is fine to use a
994 	 * non-resolvable private address.
995 	 */
996 	if (hci_update_random_address(req, !connectable,
997 				      adv_use_rpa(hdev, flags),
998 				      &own_addr_type) < 0)
999 		return;
1000 
1001 	memset(&cp, 0, sizeof(cp));
1002 
1003 	if (adv) {
1004 		adv_min_interval = adv->min_interval;
1005 		adv_max_interval = adv->max_interval;
1006 	} else {
1007 		adv_min_interval = hdev->le_adv_min_interval;
1008 		adv_max_interval = hdev->le_adv_max_interval;
1009 	}
1010 
1011 	if (connectable) {
1012 		cp.type = LE_ADV_IND;
1013 	} else {
1014 		if (adv_cur_instance_is_scannable(hdev))
1015 			cp.type = LE_ADV_SCAN_IND;
1016 		else
1017 			cp.type = LE_ADV_NONCONN_IND;
1018 
1019 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1020 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1021 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1022 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1023 		}
1024 	}
1025 
1026 	cp.min_interval = cpu_to_le16(adv_min_interval);
1027 	cp.max_interval = cpu_to_le16(adv_max_interval);
1028 	cp.own_address_type = own_addr_type;
1029 	cp.channel_map = hdev->le_adv_channel_map;
1030 
1031 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1032 
1033 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1034 }
1035 
1036 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1037 {
1038 	struct hci_dev *hdev = req->hdev;
1039 	u8 len;
1040 
1041 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1042 		return;
1043 
1044 	if (ext_adv_capable(hdev)) {
1045 		struct {
1046 			struct hci_cp_le_set_ext_scan_rsp_data cp;
1047 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1048 		} pdu;
1049 
1050 		memset(&pdu, 0, sizeof(pdu));
1051 
1052 		len = eir_create_scan_rsp(hdev, instance, pdu.data);
1053 
1054 		if (hdev->scan_rsp_data_len == len &&
1055 		    !memcmp(pdu.data, hdev->scan_rsp_data, len))
1056 			return;
1057 
1058 		memcpy(hdev->scan_rsp_data, pdu.data, len);
1059 		hdev->scan_rsp_data_len = len;
1060 
1061 		pdu.cp.handle = instance;
1062 		pdu.cp.length = len;
1063 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1064 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1065 
1066 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1067 			    sizeof(pdu.cp) + len, &pdu.cp);
1068 	} else {
1069 		struct hci_cp_le_set_scan_rsp_data cp;
1070 
1071 		memset(&cp, 0, sizeof(cp));
1072 
1073 		len = eir_create_scan_rsp(hdev, instance, cp.data);
1074 
1075 		if (hdev->scan_rsp_data_len == len &&
1076 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1077 			return;
1078 
1079 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1080 		hdev->scan_rsp_data_len = len;
1081 
1082 		cp.length = len;
1083 
1084 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1085 	}
1086 }
1087 
1088 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1089 {
1090 	struct hci_dev *hdev = req->hdev;
1091 	u8 len;
1092 
1093 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1094 		return;
1095 
1096 	if (ext_adv_capable(hdev)) {
1097 		struct {
1098 			struct hci_cp_le_set_ext_adv_data cp;
1099 			u8 data[HCI_MAX_EXT_AD_LENGTH];
1100 		} pdu;
1101 
1102 		memset(&pdu, 0, sizeof(pdu));
1103 
1104 		len = eir_create_adv_data(hdev, instance, pdu.data);
1105 
1106 		/* There's nothing to do if the data hasn't changed */
1107 		if (hdev->adv_data_len == len &&
1108 		    memcmp(pdu.data, hdev->adv_data, len) == 0)
1109 			return;
1110 
1111 		memcpy(hdev->adv_data, pdu.data, len);
1112 		hdev->adv_data_len = len;
1113 
1114 		pdu.cp.length = len;
1115 		pdu.cp.handle = instance;
1116 		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1117 		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1118 
1119 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1120 			    sizeof(pdu.cp) + len, &pdu.cp);
1121 	} else {
1122 		struct hci_cp_le_set_adv_data cp;
1123 
1124 		memset(&cp, 0, sizeof(cp));
1125 
1126 		len = eir_create_adv_data(hdev, instance, cp.data);
1127 
1128 		/* There's nothing to do if the data hasn't changed */
1129 		if (hdev->adv_data_len == len &&
1130 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1131 			return;
1132 
1133 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1134 		hdev->adv_data_len = len;
1135 
1136 		cp.length = len;
1137 
1138 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1139 	}
1140 }
1141 
1142 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1143 {
1144 	struct hci_request req;
1145 
1146 	hci_req_init(&req, hdev);
1147 	__hci_req_update_adv_data(&req, instance);
1148 
1149 	return hci_req_run(&req, NULL);
1150 }
1151 
1152 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1153 					    u16 opcode)
1154 {
1155 	BT_DBG("%s status %u", hdev->name, status);
1156 }
1157 
1158 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1159 {
1160 	struct hci_request req;
1161 	__u8 enable = 0x00;
1162 
1163 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1164 		return;
1165 
1166 	hci_req_init(&req, hdev);
1167 
1168 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1169 
1170 	hci_req_run(&req, enable_addr_resolution_complete);
1171 }
1172 
1173 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1174 {
1175 	bt_dev_dbg(hdev, "status %u", status);
1176 }
1177 
1178 void hci_req_reenable_advertising(struct hci_dev *hdev)
1179 {
1180 	struct hci_request req;
1181 
1182 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1183 	    list_empty(&hdev->adv_instances))
1184 		return;
1185 
1186 	hci_req_init(&req, hdev);
1187 
1188 	if (hdev->cur_adv_instance) {
1189 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1190 						true);
1191 	} else {
1192 		if (ext_adv_capable(hdev)) {
1193 			__hci_req_start_ext_adv(&req, 0x00);
1194 		} else {
1195 			__hci_req_update_adv_data(&req, 0x00);
1196 			__hci_req_update_scan_rsp_data(&req, 0x00);
1197 			__hci_req_enable_advertising(&req);
1198 		}
1199 	}
1200 
1201 	hci_req_run(&req, adv_enable_complete);
1202 }
1203 
1204 static void adv_timeout_expire(struct work_struct *work)
1205 {
1206 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1207 					    adv_instance_expire.work);
1208 
1209 	struct hci_request req;
1210 	u8 instance;
1211 
1212 	bt_dev_dbg(hdev, "");
1213 
1214 	hci_dev_lock(hdev);
1215 
1216 	hdev->adv_instance_timeout = 0;
1217 
1218 	instance = hdev->cur_adv_instance;
1219 	if (instance == 0x00)
1220 		goto unlock;
1221 
1222 	hci_req_init(&req, hdev);
1223 
1224 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1225 
1226 	if (list_empty(&hdev->adv_instances))
1227 		__hci_req_disable_advertising(&req);
1228 
1229 	hci_req_run(&req, NULL);
1230 
1231 unlock:
1232 	hci_dev_unlock(hdev);
1233 }
1234 
1235 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1236 					   unsigned long opt)
1237 {
1238 	struct hci_dev *hdev = req->hdev;
1239 	int ret = 0;
1240 
1241 	hci_dev_lock(hdev);
1242 
1243 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1244 		hci_req_add_le_scan_disable(req, false);
1245 	hci_req_add_le_passive_scan(req);
1246 
1247 	switch (hdev->interleave_scan_state) {
1248 	case INTERLEAVE_SCAN_ALLOWLIST:
1249 		bt_dev_dbg(hdev, "next state: allowlist");
1250 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1251 		break;
1252 	case INTERLEAVE_SCAN_NO_FILTER:
1253 		bt_dev_dbg(hdev, "next state: no filter");
1254 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1255 		break;
1256 	case INTERLEAVE_SCAN_NONE:
1257 		BT_ERR("unexpected error");
1258 		ret = -1;
1259 	}
1260 
1261 	hci_dev_unlock(hdev);
1262 
1263 	return ret;
1264 }
1265 
1266 static void interleave_scan_work(struct work_struct *work)
1267 {
1268 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1269 					    interleave_scan.work);
1270 	u8 status;
1271 	unsigned long timeout;
1272 
1273 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1274 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1275 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1276 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1277 	} else {
1278 		bt_dev_err(hdev, "unexpected error");
1279 		return;
1280 	}
1281 
1282 	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1283 		     HCI_CMD_TIMEOUT, &status);
1284 
1285 	/* Don't continue interleaving if it was canceled */
1286 	if (is_interleave_scanning(hdev))
1287 		queue_delayed_work(hdev->req_workqueue,
1288 				   &hdev->interleave_scan, timeout);
1289 }
1290 
1291 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1292 			   bool use_rpa, struct adv_info *adv_instance,
1293 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1294 {
1295 	int err;
1296 
1297 	bacpy(rand_addr, BDADDR_ANY);
1298 
1299 	/* If privacy is enabled use a resolvable private address. If
1300 	 * current RPA has expired then generate a new one.
1301 	 */
1302 	if (use_rpa) {
1303 		/* If Controller supports LL Privacy use own address type is
1304 		 * 0x03
1305 		 */
1306 		if (use_ll_privacy(hdev))
1307 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1308 		else
1309 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1310 
1311 		if (adv_instance) {
1312 			if (adv_rpa_valid(adv_instance))
1313 				return 0;
1314 		} else {
1315 			if (rpa_valid(hdev))
1316 				return 0;
1317 		}
1318 
1319 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1320 		if (err < 0) {
1321 			bt_dev_err(hdev, "failed to generate new RPA");
1322 			return err;
1323 		}
1324 
1325 		bacpy(rand_addr, &hdev->rpa);
1326 
1327 		return 0;
1328 	}
1329 
1330 	/* In case of required privacy without resolvable private address,
1331 	 * use an non-resolvable private address. This is useful for
1332 	 * non-connectable advertising.
1333 	 */
1334 	if (require_privacy) {
1335 		bdaddr_t nrpa;
1336 
1337 		while (true) {
1338 			/* The non-resolvable private address is generated
1339 			 * from random six bytes with the two most significant
1340 			 * bits cleared.
1341 			 */
1342 			get_random_bytes(&nrpa, 6);
1343 			nrpa.b[5] &= 0x3f;
1344 
1345 			/* The non-resolvable private address shall not be
1346 			 * equal to the public address.
1347 			 */
1348 			if (bacmp(&hdev->bdaddr, &nrpa))
1349 				break;
1350 		}
1351 
1352 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1353 		bacpy(rand_addr, &nrpa);
1354 
1355 		return 0;
1356 	}
1357 
1358 	/* No privacy so use a public address. */
1359 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1360 
1361 	return 0;
1362 }
1363 
1364 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1365 {
1366 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1367 }
1368 
1369 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1370 {
1371 	struct hci_dev *hdev = req->hdev;
1372 
1373 	/* If we're advertising or initiating an LE connection we can't
1374 	 * go ahead and change the random address at this time. This is
1375 	 * because the eventual initiator address used for the
1376 	 * subsequently created connection will be undefined (some
1377 	 * controllers use the new address and others the one we had
1378 	 * when the operation started).
1379 	 *
1380 	 * In this kind of scenario skip the update and let the random
1381 	 * address be updated at the next cycle.
1382 	 */
1383 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1384 	    hci_lookup_le_connect(hdev)) {
1385 		bt_dev_dbg(hdev, "Deferring random address update");
1386 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1387 		return;
1388 	}
1389 
1390 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1391 }
1392 
1393 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1394 {
1395 	struct hci_cp_le_set_ext_adv_params cp;
1396 	struct hci_dev *hdev = req->hdev;
1397 	bool connectable;
1398 	u32 flags;
1399 	bdaddr_t random_addr;
1400 	u8 own_addr_type;
1401 	int err;
1402 	struct adv_info *adv_instance;
1403 	bool secondary_adv;
1404 
1405 	if (instance > 0) {
1406 		adv_instance = hci_find_adv_instance(hdev, instance);
1407 		if (!adv_instance)
1408 			return -EINVAL;
1409 	} else {
1410 		adv_instance = NULL;
1411 	}
1412 
1413 	flags = hci_adv_instance_flags(hdev, instance);
1414 
1415 	/* If the "connectable" instance flag was not set, then choose between
1416 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1417 	 */
1418 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1419 		      mgmt_get_connectable(hdev);
1420 
1421 	if (!is_advertising_allowed(hdev, connectable))
1422 		return -EPERM;
1423 
1424 	/* Set require_privacy to true only when non-connectable
1425 	 * advertising is used. In that case it is fine to use a
1426 	 * non-resolvable private address.
1427 	 */
1428 	err = hci_get_random_address(hdev, !connectable,
1429 				     adv_use_rpa(hdev, flags), adv_instance,
1430 				     &own_addr_type, &random_addr);
1431 	if (err < 0)
1432 		return err;
1433 
1434 	memset(&cp, 0, sizeof(cp));
1435 
1436 	if (adv_instance) {
1437 		hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1438 		hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1439 		cp.tx_power = adv_instance->tx_power;
1440 	} else {
1441 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1442 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1443 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1444 	}
1445 
1446 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1447 
1448 	if (connectable) {
1449 		if (secondary_adv)
1450 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1451 		else
1452 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1453 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1454 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1455 		if (secondary_adv)
1456 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1457 		else
1458 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1459 	} else {
1460 		if (secondary_adv)
1461 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1462 		else
1463 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1464 	}
1465 
1466 	cp.own_addr_type = own_addr_type;
1467 	cp.channel_map = hdev->le_adv_channel_map;
1468 	cp.handle = instance;
1469 
1470 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1471 		cp.primary_phy = HCI_ADV_PHY_1M;
1472 		cp.secondary_phy = HCI_ADV_PHY_2M;
1473 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1474 		cp.primary_phy = HCI_ADV_PHY_CODED;
1475 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1476 	} else {
1477 		/* In all other cases use 1M */
1478 		cp.primary_phy = HCI_ADV_PHY_1M;
1479 		cp.secondary_phy = HCI_ADV_PHY_1M;
1480 	}
1481 
1482 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1483 
1484 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1485 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1486 	    bacmp(&random_addr, BDADDR_ANY)) {
1487 		struct hci_cp_le_set_adv_set_rand_addr cp;
1488 
1489 		/* Check if random address need to be updated */
1490 		if (adv_instance) {
1491 			if (!bacmp(&random_addr, &adv_instance->random_addr))
1492 				return 0;
1493 		} else {
1494 			if (!bacmp(&random_addr, &hdev->random_addr))
1495 				return 0;
1496 			/* Instance 0x00 doesn't have an adv_info, instead it
1497 			 * uses hdev->random_addr to track its address so
1498 			 * whenever it needs to be updated this also set the
1499 			 * random address since hdev->random_addr is shared with
1500 			 * scan state machine.
1501 			 */
1502 			set_random_addr(req, &random_addr);
1503 		}
1504 
1505 		memset(&cp, 0, sizeof(cp));
1506 
1507 		cp.handle = instance;
1508 		bacpy(&cp.bdaddr, &random_addr);
1509 
1510 		hci_req_add(req,
1511 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1512 			    sizeof(cp), &cp);
1513 	}
1514 
1515 	return 0;
1516 }
1517 
1518 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1519 {
1520 	struct hci_dev *hdev = req->hdev;
1521 	struct hci_cp_le_set_ext_adv_enable *cp;
1522 	struct hci_cp_ext_adv_set *adv_set;
1523 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1524 	struct adv_info *adv_instance;
1525 
1526 	if (instance > 0) {
1527 		adv_instance = hci_find_adv_instance(hdev, instance);
1528 		if (!adv_instance)
1529 			return -EINVAL;
1530 	} else {
1531 		adv_instance = NULL;
1532 	}
1533 
1534 	cp = (void *) data;
1535 	adv_set = (void *) cp->data;
1536 
1537 	memset(cp, 0, sizeof(*cp));
1538 
1539 	cp->enable = 0x01;
1540 	cp->num_of_sets = 0x01;
1541 
1542 	memset(adv_set, 0, sizeof(*adv_set));
1543 
1544 	adv_set->handle = instance;
1545 
1546 	/* Set duration per instance since controller is responsible for
1547 	 * scheduling it.
1548 	 */
1549 	if (adv_instance && adv_instance->duration) {
1550 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1551 
1552 		/* Time = N * 10 ms */
1553 		adv_set->duration = cpu_to_le16(duration / 10);
1554 	}
1555 
1556 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1557 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1558 		    data);
1559 
1560 	return 0;
1561 }
1562 
1563 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1564 {
1565 	struct hci_dev *hdev = req->hdev;
1566 	struct hci_cp_le_set_ext_adv_enable *cp;
1567 	struct hci_cp_ext_adv_set *adv_set;
1568 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1569 	u8 req_size;
1570 
1571 	/* If request specifies an instance that doesn't exist, fail */
1572 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1573 		return -EINVAL;
1574 
1575 	memset(data, 0, sizeof(data));
1576 
1577 	cp = (void *)data;
1578 	adv_set = (void *)cp->data;
1579 
1580 	/* Instance 0x00 indicates all advertising instances will be disabled */
1581 	cp->num_of_sets = !!instance;
1582 	cp->enable = 0x00;
1583 
1584 	adv_set->handle = instance;
1585 
1586 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1587 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1588 
1589 	return 0;
1590 }
1591 
1592 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1593 {
1594 	struct hci_dev *hdev = req->hdev;
1595 
1596 	/* If request specifies an instance that doesn't exist, fail */
1597 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1598 		return -EINVAL;
1599 
1600 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1601 
1602 	return 0;
1603 }
1604 
1605 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1606 {
1607 	struct hci_dev *hdev = req->hdev;
1608 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1609 	int err;
1610 
1611 	/* If instance isn't pending, the chip knows about it, and it's safe to
1612 	 * disable
1613 	 */
1614 	if (adv_instance && !adv_instance->pending)
1615 		__hci_req_disable_ext_adv_instance(req, instance);
1616 
1617 	err = __hci_req_setup_ext_adv_instance(req, instance);
1618 	if (err < 0)
1619 		return err;
1620 
1621 	__hci_req_update_scan_rsp_data(req, instance);
1622 	__hci_req_enable_ext_advertising(req, instance);
1623 
1624 	return 0;
1625 }
1626 
1627 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1628 				    bool force)
1629 {
1630 	struct hci_dev *hdev = req->hdev;
1631 	struct adv_info *adv_instance = NULL;
1632 	u16 timeout;
1633 
1634 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1635 	    list_empty(&hdev->adv_instances))
1636 		return -EPERM;
1637 
1638 	if (hdev->adv_instance_timeout)
1639 		return -EBUSY;
1640 
1641 	adv_instance = hci_find_adv_instance(hdev, instance);
1642 	if (!adv_instance)
1643 		return -ENOENT;
1644 
1645 	/* A zero timeout means unlimited advertising. As long as there is
1646 	 * only one instance, duration should be ignored. We still set a timeout
1647 	 * in case further instances are being added later on.
1648 	 *
1649 	 * If the remaining lifetime of the instance is more than the duration
1650 	 * then the timeout corresponds to the duration, otherwise it will be
1651 	 * reduced to the remaining instance lifetime.
1652 	 */
1653 	if (adv_instance->timeout == 0 ||
1654 	    adv_instance->duration <= adv_instance->remaining_time)
1655 		timeout = adv_instance->duration;
1656 	else
1657 		timeout = adv_instance->remaining_time;
1658 
1659 	/* The remaining time is being reduced unless the instance is being
1660 	 * advertised without time limit.
1661 	 */
1662 	if (adv_instance->timeout)
1663 		adv_instance->remaining_time =
1664 				adv_instance->remaining_time - timeout;
1665 
1666 	/* Only use work for scheduling instances with legacy advertising */
1667 	if (!ext_adv_capable(hdev)) {
1668 		hdev->adv_instance_timeout = timeout;
1669 		queue_delayed_work(hdev->req_workqueue,
1670 			   &hdev->adv_instance_expire,
1671 			   msecs_to_jiffies(timeout * 1000));
1672 	}
1673 
1674 	/* If we're just re-scheduling the same instance again then do not
1675 	 * execute any HCI commands. This happens when a single instance is
1676 	 * being advertised.
1677 	 */
1678 	if (!force && hdev->cur_adv_instance == instance &&
1679 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1680 		return 0;
1681 
1682 	hdev->cur_adv_instance = instance;
1683 	if (ext_adv_capable(hdev)) {
1684 		__hci_req_start_ext_adv(req, instance);
1685 	} else {
1686 		__hci_req_update_adv_data(req, instance);
1687 		__hci_req_update_scan_rsp_data(req, instance);
1688 		__hci_req_enable_advertising(req);
1689 	}
1690 
1691 	return 0;
1692 }
1693 
1694 /* For a single instance:
1695  * - force == true: The instance will be removed even when its remaining
1696  *   lifetime is not zero.
1697  * - force == false: the instance will be deactivated but kept stored unless
1698  *   the remaining lifetime is zero.
1699  *
1700  * For instance == 0x00:
1701  * - force == true: All instances will be removed regardless of their timeout
1702  *   setting.
1703  * - force == false: Only instances that have a timeout will be removed.
1704  */
1705 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1706 				struct hci_request *req, u8 instance,
1707 				bool force)
1708 {
1709 	struct adv_info *adv_instance, *n, *next_instance = NULL;
1710 	int err;
1711 	u8 rem_inst;
1712 
1713 	/* Cancel any timeout concerning the removed instance(s). */
1714 	if (!instance || hdev->cur_adv_instance == instance)
1715 		cancel_adv_timeout(hdev);
1716 
1717 	/* Get the next instance to advertise BEFORE we remove
1718 	 * the current one. This can be the same instance again
1719 	 * if there is only one instance.
1720 	 */
1721 	if (instance && hdev->cur_adv_instance == instance)
1722 		next_instance = hci_get_next_instance(hdev, instance);
1723 
1724 	if (instance == 0x00) {
1725 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1726 					 list) {
1727 			if (!(force || adv_instance->timeout))
1728 				continue;
1729 
1730 			rem_inst = adv_instance->instance;
1731 			err = hci_remove_adv_instance(hdev, rem_inst);
1732 			if (!err)
1733 				mgmt_advertising_removed(sk, hdev, rem_inst);
1734 		}
1735 	} else {
1736 		adv_instance = hci_find_adv_instance(hdev, instance);
1737 
1738 		if (force || (adv_instance && adv_instance->timeout &&
1739 			      !adv_instance->remaining_time)) {
1740 			/* Don't advertise a removed instance. */
1741 			if (next_instance &&
1742 			    next_instance->instance == instance)
1743 				next_instance = NULL;
1744 
1745 			err = hci_remove_adv_instance(hdev, instance);
1746 			if (!err)
1747 				mgmt_advertising_removed(sk, hdev, instance);
1748 		}
1749 	}
1750 
1751 	if (!req || !hdev_is_powered(hdev) ||
1752 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1753 		return;
1754 
1755 	if (next_instance && !ext_adv_capable(hdev))
1756 		__hci_req_schedule_adv_instance(req, next_instance->instance,
1757 						false);
1758 }
1759 
1760 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1761 			      bool use_rpa, u8 *own_addr_type)
1762 {
1763 	struct hci_dev *hdev = req->hdev;
1764 	int err;
1765 
1766 	/* If privacy is enabled use a resolvable private address. If
1767 	 * current RPA has expired or there is something else than
1768 	 * the current RPA in use, then generate a new one.
1769 	 */
1770 	if (use_rpa) {
1771 		/* If Controller supports LL Privacy use own address type is
1772 		 * 0x03
1773 		 */
1774 		if (use_ll_privacy(hdev))
1775 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1776 		else
1777 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1778 
1779 		if (rpa_valid(hdev))
1780 			return 0;
1781 
1782 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1783 		if (err < 0) {
1784 			bt_dev_err(hdev, "failed to generate new RPA");
1785 			return err;
1786 		}
1787 
1788 		set_random_addr(req, &hdev->rpa);
1789 
1790 		return 0;
1791 	}
1792 
1793 	/* In case of required privacy without resolvable private address,
1794 	 * use an non-resolvable private address. This is useful for active
1795 	 * scanning and non-connectable advertising.
1796 	 */
1797 	if (require_privacy) {
1798 		bdaddr_t nrpa;
1799 
1800 		while (true) {
1801 			/* The non-resolvable private address is generated
1802 			 * from random six bytes with the two most significant
1803 			 * bits cleared.
1804 			 */
1805 			get_random_bytes(&nrpa, 6);
1806 			nrpa.b[5] &= 0x3f;
1807 
1808 			/* The non-resolvable private address shall not be
1809 			 * equal to the public address.
1810 			 */
1811 			if (bacmp(&hdev->bdaddr, &nrpa))
1812 				break;
1813 		}
1814 
1815 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1816 		set_random_addr(req, &nrpa);
1817 		return 0;
1818 	}
1819 
1820 	/* If forcing static address is in use or there is no public
1821 	 * address use the static address as random address (but skip
1822 	 * the HCI command if the current random address is already the
1823 	 * static one.
1824 	 *
1825 	 * In case BR/EDR has been disabled on a dual-mode controller
1826 	 * and a static address has been configured, then use that
1827 	 * address instead of the public BR/EDR address.
1828 	 */
1829 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1830 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1831 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1832 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1833 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1834 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1835 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1836 				    &hdev->static_addr);
1837 		return 0;
1838 	}
1839 
1840 	/* Neither privacy nor static address is being used so use a
1841 	 * public address.
1842 	 */
1843 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1844 
1845 	return 0;
1846 }
1847 
1848 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1849 {
1850 	struct bdaddr_list *b;
1851 
1852 	list_for_each_entry(b, &hdev->accept_list, list) {
1853 		struct hci_conn *conn;
1854 
1855 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1856 		if (!conn)
1857 			return true;
1858 
1859 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1860 			return true;
1861 	}
1862 
1863 	return false;
1864 }
1865 
1866 void __hci_req_update_scan(struct hci_request *req)
1867 {
1868 	struct hci_dev *hdev = req->hdev;
1869 	u8 scan;
1870 
1871 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1872 		return;
1873 
1874 	if (!hdev_is_powered(hdev))
1875 		return;
1876 
1877 	if (mgmt_powering_down(hdev))
1878 		return;
1879 
1880 	if (hdev->scanning_paused)
1881 		return;
1882 
1883 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1884 	    disconnected_accept_list_entries(hdev))
1885 		scan = SCAN_PAGE;
1886 	else
1887 		scan = SCAN_DISABLED;
1888 
1889 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1890 		scan |= SCAN_INQUIRY;
1891 
1892 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1893 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1894 		return;
1895 
1896 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1897 }
1898 
1899 static int update_scan(struct hci_request *req, unsigned long opt)
1900 {
1901 	hci_dev_lock(req->hdev);
1902 	__hci_req_update_scan(req);
1903 	hci_dev_unlock(req->hdev);
1904 	return 0;
1905 }
1906 
1907 static void scan_update_work(struct work_struct *work)
1908 {
1909 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1910 
1911 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1912 }
1913 
1914 static u8 get_service_classes(struct hci_dev *hdev)
1915 {
1916 	struct bt_uuid *uuid;
1917 	u8 val = 0;
1918 
1919 	list_for_each_entry(uuid, &hdev->uuids, list)
1920 		val |= uuid->svc_hint;
1921 
1922 	return val;
1923 }
1924 
1925 void __hci_req_update_class(struct hci_request *req)
1926 {
1927 	struct hci_dev *hdev = req->hdev;
1928 	u8 cod[3];
1929 
1930 	bt_dev_dbg(hdev, "");
1931 
1932 	if (!hdev_is_powered(hdev))
1933 		return;
1934 
1935 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1936 		return;
1937 
1938 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1939 		return;
1940 
1941 	cod[0] = hdev->minor_class;
1942 	cod[1] = hdev->major_class;
1943 	cod[2] = get_service_classes(hdev);
1944 
1945 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1946 		cod[1] |= 0x20;
1947 
1948 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1949 		return;
1950 
1951 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1952 }
1953 
1954 static void write_iac(struct hci_request *req)
1955 {
1956 	struct hci_dev *hdev = req->hdev;
1957 	struct hci_cp_write_current_iac_lap cp;
1958 
1959 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1960 		return;
1961 
1962 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1963 		/* Limited discoverable mode */
1964 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
1965 		cp.iac_lap[0] = 0x00;	/* LIAC */
1966 		cp.iac_lap[1] = 0x8b;
1967 		cp.iac_lap[2] = 0x9e;
1968 		cp.iac_lap[3] = 0x33;	/* GIAC */
1969 		cp.iac_lap[4] = 0x8b;
1970 		cp.iac_lap[5] = 0x9e;
1971 	} else {
1972 		/* General discoverable mode */
1973 		cp.num_iac = 1;
1974 		cp.iac_lap[0] = 0x33;	/* GIAC */
1975 		cp.iac_lap[1] = 0x8b;
1976 		cp.iac_lap[2] = 0x9e;
1977 	}
1978 
1979 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1980 		    (cp.num_iac * 3) + 1, &cp);
1981 }
1982 
1983 static int discoverable_update(struct hci_request *req, unsigned long opt)
1984 {
1985 	struct hci_dev *hdev = req->hdev;
1986 
1987 	hci_dev_lock(hdev);
1988 
1989 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1990 		write_iac(req);
1991 		__hci_req_update_scan(req);
1992 		__hci_req_update_class(req);
1993 	}
1994 
1995 	/* Advertising instances don't use the global discoverable setting, so
1996 	 * only update AD if advertising was enabled using Set Advertising.
1997 	 */
1998 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1999 		__hci_req_update_adv_data(req, 0x00);
2000 
2001 		/* Discoverable mode affects the local advertising
2002 		 * address in limited privacy mode.
2003 		 */
2004 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2005 			if (ext_adv_capable(hdev))
2006 				__hci_req_start_ext_adv(req, 0x00);
2007 			else
2008 				__hci_req_enable_advertising(req);
2009 		}
2010 	}
2011 
2012 	hci_dev_unlock(hdev);
2013 
2014 	return 0;
2015 }
2016 
2017 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2018 		      u8 reason)
2019 {
2020 	switch (conn->state) {
2021 	case BT_CONNECTED:
2022 	case BT_CONFIG:
2023 		if (conn->type == AMP_LINK) {
2024 			struct hci_cp_disconn_phy_link cp;
2025 
2026 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2027 			cp.reason = reason;
2028 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2029 				    &cp);
2030 		} else {
2031 			struct hci_cp_disconnect dc;
2032 
2033 			dc.handle = cpu_to_le16(conn->handle);
2034 			dc.reason = reason;
2035 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2036 		}
2037 
2038 		conn->state = BT_DISCONN;
2039 
2040 		break;
2041 	case BT_CONNECT:
2042 		if (conn->type == LE_LINK) {
2043 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2044 				break;
2045 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2046 				    0, NULL);
2047 		} else if (conn->type == ACL_LINK) {
2048 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2049 				break;
2050 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2051 				    6, &conn->dst);
2052 		}
2053 		break;
2054 	case BT_CONNECT2:
2055 		if (conn->type == ACL_LINK) {
2056 			struct hci_cp_reject_conn_req rej;
2057 
2058 			bacpy(&rej.bdaddr, &conn->dst);
2059 			rej.reason = reason;
2060 
2061 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2062 				    sizeof(rej), &rej);
2063 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2064 			struct hci_cp_reject_sync_conn_req rej;
2065 
2066 			bacpy(&rej.bdaddr, &conn->dst);
2067 
2068 			/* SCO rejection has its own limited set of
2069 			 * allowed error values (0x0D-0x0F) which isn't
2070 			 * compatible with most values passed to this
2071 			 * function. To be safe hard-code one of the
2072 			 * values that's suitable for SCO.
2073 			 */
2074 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2075 
2076 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2077 				    sizeof(rej), &rej);
2078 		}
2079 		break;
2080 	default:
2081 		conn->state = BT_CLOSED;
2082 		break;
2083 	}
2084 }
2085 
2086 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2087 {
2088 	if (status)
2089 		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2090 }
2091 
2092 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2093 {
2094 	struct hci_request req;
2095 	int err;
2096 
2097 	hci_req_init(&req, conn->hdev);
2098 
2099 	__hci_abort_conn(&req, conn, reason);
2100 
2101 	err = hci_req_run(&req, abort_conn_complete);
2102 	if (err && err != -ENODATA) {
2103 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2104 		return err;
2105 	}
2106 
2107 	return 0;
2108 }
2109 
2110 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2111 {
2112 	hci_req_add_le_scan_disable(req, false);
2113 	return 0;
2114 }
2115 
2116 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2117 {
2118 	u8 length = opt;
2119 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2120 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2121 	struct hci_cp_inquiry cp;
2122 
2123 	if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2124 		return 0;
2125 
2126 	bt_dev_dbg(req->hdev, "");
2127 
2128 	hci_dev_lock(req->hdev);
2129 	hci_inquiry_cache_flush(req->hdev);
2130 	hci_dev_unlock(req->hdev);
2131 
2132 	memset(&cp, 0, sizeof(cp));
2133 
2134 	if (req->hdev->discovery.limited)
2135 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2136 	else
2137 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2138 
2139 	cp.length = length;
2140 
2141 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2142 
2143 	return 0;
2144 }
2145 
2146 static void le_scan_disable_work(struct work_struct *work)
2147 {
2148 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2149 					    le_scan_disable.work);
2150 	u8 status;
2151 
2152 	bt_dev_dbg(hdev, "");
2153 
2154 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2155 		return;
2156 
2157 	cancel_delayed_work(&hdev->le_scan_restart);
2158 
2159 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2160 	if (status) {
2161 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2162 			   status);
2163 		return;
2164 	}
2165 
2166 	hdev->discovery.scan_start = 0;
2167 
2168 	/* If we were running LE only scan, change discovery state. If
2169 	 * we were running both LE and BR/EDR inquiry simultaneously,
2170 	 * and BR/EDR inquiry is already finished, stop discovery,
2171 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2172 	 * If we will resolve remote device name, do not change
2173 	 * discovery state.
2174 	 */
2175 
2176 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2177 		goto discov_stopped;
2178 
2179 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2180 		return;
2181 
2182 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2183 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2184 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2185 			goto discov_stopped;
2186 
2187 		return;
2188 	}
2189 
2190 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2191 		     HCI_CMD_TIMEOUT, &status);
2192 	if (status) {
2193 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2194 		goto discov_stopped;
2195 	}
2196 
2197 	return;
2198 
2199 discov_stopped:
2200 	hci_dev_lock(hdev);
2201 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2202 	hci_dev_unlock(hdev);
2203 }
2204 
2205 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2206 {
2207 	struct hci_dev *hdev = req->hdev;
2208 
2209 	/* If controller is not scanning we are done. */
2210 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2211 		return 0;
2212 
2213 	if (hdev->scanning_paused) {
2214 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2215 		return 0;
2216 	}
2217 
2218 	hci_req_add_le_scan_disable(req, false);
2219 
2220 	if (use_ext_scan(hdev)) {
2221 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2222 
2223 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2224 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2225 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2226 
2227 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2228 			    sizeof(ext_enable_cp), &ext_enable_cp);
2229 	} else {
2230 		struct hci_cp_le_set_scan_enable cp;
2231 
2232 		memset(&cp, 0, sizeof(cp));
2233 		cp.enable = LE_SCAN_ENABLE;
2234 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2235 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2236 	}
2237 
2238 	return 0;
2239 }
2240 
2241 static void le_scan_restart_work(struct work_struct *work)
2242 {
2243 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2244 					    le_scan_restart.work);
2245 	unsigned long timeout, duration, scan_start, now;
2246 	u8 status;
2247 
2248 	bt_dev_dbg(hdev, "");
2249 
2250 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2251 	if (status) {
2252 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2253 			   status);
2254 		return;
2255 	}
2256 
2257 	hci_dev_lock(hdev);
2258 
2259 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2260 	    !hdev->discovery.scan_start)
2261 		goto unlock;
2262 
2263 	/* When the scan was started, hdev->le_scan_disable has been queued
2264 	 * after duration from scan_start. During scan restart this job
2265 	 * has been canceled, and we need to queue it again after proper
2266 	 * timeout, to make sure that scan does not run indefinitely.
2267 	 */
2268 	duration = hdev->discovery.scan_duration;
2269 	scan_start = hdev->discovery.scan_start;
2270 	now = jiffies;
2271 	if (now - scan_start <= duration) {
2272 		int elapsed;
2273 
2274 		if (now >= scan_start)
2275 			elapsed = now - scan_start;
2276 		else
2277 			elapsed = ULONG_MAX - scan_start + now;
2278 
2279 		timeout = duration - elapsed;
2280 	} else {
2281 		timeout = 0;
2282 	}
2283 
2284 	queue_delayed_work(hdev->req_workqueue,
2285 			   &hdev->le_scan_disable, timeout);
2286 
2287 unlock:
2288 	hci_dev_unlock(hdev);
2289 }
2290 
2291 static int active_scan(struct hci_request *req, unsigned long opt)
2292 {
2293 	uint16_t interval = opt;
2294 	struct hci_dev *hdev = req->hdev;
2295 	u8 own_addr_type;
2296 	/* Accept list is not used for discovery */
2297 	u8 filter_policy = 0x00;
2298 	/* Default is to enable duplicates filter */
2299 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2300 	/* Discovery doesn't require controller address resolution */
2301 	bool addr_resolv = false;
2302 	int err;
2303 
2304 	bt_dev_dbg(hdev, "");
2305 
2306 	/* If controller is scanning, it means the background scanning is
2307 	 * running. Thus, we should temporarily stop it in order to set the
2308 	 * discovery scanning parameters.
2309 	 */
2310 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2311 		hci_req_add_le_scan_disable(req, false);
2312 		cancel_interleave_scan(hdev);
2313 	}
2314 
2315 	/* All active scans will be done with either a resolvable private
2316 	 * address (when privacy feature has been enabled) or non-resolvable
2317 	 * private address.
2318 	 */
2319 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2320 					&own_addr_type);
2321 	if (err < 0)
2322 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2323 
2324 	if (hci_is_adv_monitoring(hdev)) {
2325 		/* Duplicate filter should be disabled when some advertisement
2326 		 * monitor is activated, otherwise AdvMon can only receive one
2327 		 * advertisement for one peer(*) during active scanning, and
2328 		 * might report loss to these peers.
2329 		 *
2330 		 * Note that different controllers have different meanings of
2331 		 * |duplicate|. Some of them consider packets with the same
2332 		 * address as duplicate, and others consider packets with the
2333 		 * same address and the same RSSI as duplicate. Although in the
2334 		 * latter case we don't need to disable duplicate filter, but
2335 		 * it is common to have active scanning for a short period of
2336 		 * time, the power impact should be neglectable.
2337 		 */
2338 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2339 	}
2340 
2341 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2342 			   hdev->le_scan_window_discovery, own_addr_type,
2343 			   filter_policy, filter_dup, addr_resolv);
2344 	return 0;
2345 }
2346 
2347 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2348 {
2349 	int err;
2350 
2351 	bt_dev_dbg(req->hdev, "");
2352 
2353 	err = active_scan(req, opt);
2354 	if (err)
2355 		return err;
2356 
2357 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2358 }
2359 
2360 static void start_discovery(struct hci_dev *hdev, u8 *status)
2361 {
2362 	unsigned long timeout;
2363 
2364 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2365 
2366 	switch (hdev->discovery.type) {
2367 	case DISCOV_TYPE_BREDR:
2368 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2369 			hci_req_sync(hdev, bredr_inquiry,
2370 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2371 				     status);
2372 		return;
2373 	case DISCOV_TYPE_INTERLEAVED:
2374 		/* When running simultaneous discovery, the LE scanning time
2375 		 * should occupy the whole discovery time sine BR/EDR inquiry
2376 		 * and LE scanning are scheduled by the controller.
2377 		 *
2378 		 * For interleaving discovery in comparison, BR/EDR inquiry
2379 		 * and LE scanning are done sequentially with separate
2380 		 * timeouts.
2381 		 */
2382 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2383 			     &hdev->quirks)) {
2384 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2385 			/* During simultaneous discovery, we double LE scan
2386 			 * interval. We must leave some time for the controller
2387 			 * to do BR/EDR inquiry.
2388 			 */
2389 			hci_req_sync(hdev, interleaved_discov,
2390 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2391 				     status);
2392 			break;
2393 		}
2394 
2395 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2396 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2397 			     HCI_CMD_TIMEOUT, status);
2398 		break;
2399 	case DISCOV_TYPE_LE:
2400 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2401 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2402 			     HCI_CMD_TIMEOUT, status);
2403 		break;
2404 	default:
2405 		*status = HCI_ERROR_UNSPECIFIED;
2406 		return;
2407 	}
2408 
2409 	if (*status)
2410 		return;
2411 
2412 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2413 
2414 	/* When service discovery is used and the controller has a
2415 	 * strict duplicate filter, it is important to remember the
2416 	 * start and duration of the scan. This is required for
2417 	 * restarting scanning during the discovery phase.
2418 	 */
2419 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2420 		     hdev->discovery.result_filtering) {
2421 		hdev->discovery.scan_start = jiffies;
2422 		hdev->discovery.scan_duration = timeout;
2423 	}
2424 
2425 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2426 			   timeout);
2427 }
2428 
2429 bool hci_req_stop_discovery(struct hci_request *req)
2430 {
2431 	struct hci_dev *hdev = req->hdev;
2432 	struct discovery_state *d = &hdev->discovery;
2433 	struct hci_cp_remote_name_req_cancel cp;
2434 	struct inquiry_entry *e;
2435 	bool ret = false;
2436 
2437 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2438 
2439 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2440 		if (test_bit(HCI_INQUIRY, &hdev->flags))
2441 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2442 
2443 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2444 			cancel_delayed_work(&hdev->le_scan_disable);
2445 			cancel_delayed_work(&hdev->le_scan_restart);
2446 			hci_req_add_le_scan_disable(req, false);
2447 		}
2448 
2449 		ret = true;
2450 	} else {
2451 		/* Passive scanning */
2452 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2453 			hci_req_add_le_scan_disable(req, false);
2454 			ret = true;
2455 		}
2456 	}
2457 
2458 	/* No further actions needed for LE-only discovery */
2459 	if (d->type == DISCOV_TYPE_LE)
2460 		return ret;
2461 
2462 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2463 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2464 						     NAME_PENDING);
2465 		if (!e)
2466 			return ret;
2467 
2468 		bacpy(&cp.bdaddr, &e->data.bdaddr);
2469 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2470 			    &cp);
2471 		ret = true;
2472 	}
2473 
2474 	return ret;
2475 }
2476 
2477 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2478 				      u16 opcode)
2479 {
2480 	bt_dev_dbg(hdev, "status %u", status);
2481 }
2482 
2483 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2484 {
2485 	struct hci_request req;
2486 	int err;
2487 	__u8 vnd_len, *vnd_data = NULL;
2488 	struct hci_op_configure_data_path *cmd = NULL;
2489 
2490 	hci_req_init(&req, hdev);
2491 
2492 	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2493 					  &vnd_data);
2494 	if (err < 0)
2495 		goto error;
2496 
2497 	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2498 	if (!cmd) {
2499 		err = -ENOMEM;
2500 		goto error;
2501 	}
2502 
2503 	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2504 	if (err < 0)
2505 		goto error;
2506 
2507 	cmd->vnd_len = vnd_len;
2508 	memcpy(cmd->vnd_data, vnd_data, vnd_len);
2509 
2510 	cmd->direction = 0x00;
2511 	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2512 
2513 	cmd->direction = 0x01;
2514 	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2515 
2516 	err = hci_req_run(&req, config_data_path_complete);
2517 error:
2518 
2519 	kfree(cmd);
2520 	kfree(vnd_data);
2521 	return err;
2522 }
2523 
2524 static int stop_discovery(struct hci_request *req, unsigned long opt)
2525 {
2526 	hci_dev_lock(req->hdev);
2527 	hci_req_stop_discovery(req);
2528 	hci_dev_unlock(req->hdev);
2529 
2530 	return 0;
2531 }
2532 
2533 static void discov_update(struct work_struct *work)
2534 {
2535 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2536 					    discov_update);
2537 	u8 status = 0;
2538 
2539 	switch (hdev->discovery.state) {
2540 	case DISCOVERY_STARTING:
2541 		start_discovery(hdev, &status);
2542 		mgmt_start_discovery_complete(hdev, status);
2543 		if (status)
2544 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2545 		else
2546 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2547 		break;
2548 	case DISCOVERY_STOPPING:
2549 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2550 		mgmt_stop_discovery_complete(hdev, status);
2551 		if (!status)
2552 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2553 		break;
2554 	case DISCOVERY_STOPPED:
2555 	default:
2556 		return;
2557 	}
2558 }
2559 
2560 static void discov_off(struct work_struct *work)
2561 {
2562 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2563 					    discov_off.work);
2564 
2565 	bt_dev_dbg(hdev, "");
2566 
2567 	hci_dev_lock(hdev);
2568 
2569 	/* When discoverable timeout triggers, then just make sure
2570 	 * the limited discoverable flag is cleared. Even in the case
2571 	 * of a timeout triggered from general discoverable, it is
2572 	 * safe to unconditionally clear the flag.
2573 	 */
2574 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2575 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2576 	hdev->discov_timeout = 0;
2577 
2578 	hci_dev_unlock(hdev);
2579 
2580 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2581 	mgmt_new_settings(hdev);
2582 }
2583 
2584 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2585 {
2586 	struct hci_dev *hdev = req->hdev;
2587 	u8 link_sec;
2588 
2589 	hci_dev_lock(hdev);
2590 
2591 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2592 	    !lmp_host_ssp_capable(hdev)) {
2593 		u8 mode = 0x01;
2594 
2595 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2596 
2597 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2598 			u8 support = 0x01;
2599 
2600 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2601 				    sizeof(support), &support);
2602 		}
2603 	}
2604 
2605 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2606 	    lmp_bredr_capable(hdev)) {
2607 		struct hci_cp_write_le_host_supported cp;
2608 
2609 		cp.le = 0x01;
2610 		cp.simul = 0x00;
2611 
2612 		/* Check first if we already have the right
2613 		 * host state (host features set)
2614 		 */
2615 		if (cp.le != lmp_host_le_capable(hdev) ||
2616 		    cp.simul != lmp_host_le_br_capable(hdev))
2617 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2618 				    sizeof(cp), &cp);
2619 	}
2620 
2621 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2622 		/* Make sure the controller has a good default for
2623 		 * advertising data. This also applies to the case
2624 		 * where BR/EDR was toggled during the AUTO_OFF phase.
2625 		 */
2626 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2627 		    list_empty(&hdev->adv_instances)) {
2628 			int err;
2629 
2630 			if (ext_adv_capable(hdev)) {
2631 				err = __hci_req_setup_ext_adv_instance(req,
2632 								       0x00);
2633 				if (!err)
2634 					__hci_req_update_scan_rsp_data(req,
2635 								       0x00);
2636 			} else {
2637 				err = 0;
2638 				__hci_req_update_adv_data(req, 0x00);
2639 				__hci_req_update_scan_rsp_data(req, 0x00);
2640 			}
2641 
2642 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2643 				if (!ext_adv_capable(hdev))
2644 					__hci_req_enable_advertising(req);
2645 				else if (!err)
2646 					__hci_req_enable_ext_advertising(req,
2647 									 0x00);
2648 			}
2649 		} else if (!list_empty(&hdev->adv_instances)) {
2650 			struct adv_info *adv_instance;
2651 
2652 			adv_instance = list_first_entry(&hdev->adv_instances,
2653 							struct adv_info, list);
2654 			__hci_req_schedule_adv_instance(req,
2655 							adv_instance->instance,
2656 							true);
2657 		}
2658 	}
2659 
2660 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2661 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2662 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2663 			    sizeof(link_sec), &link_sec);
2664 
2665 	if (lmp_bredr_capable(hdev)) {
2666 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2667 			__hci_req_write_fast_connectable(req, true);
2668 		else
2669 			__hci_req_write_fast_connectable(req, false);
2670 		__hci_req_update_scan(req);
2671 		__hci_req_update_class(req);
2672 		__hci_req_update_name(req);
2673 		__hci_req_update_eir(req);
2674 	}
2675 
2676 	hci_dev_unlock(hdev);
2677 	return 0;
2678 }
2679 
2680 int __hci_req_hci_power_on(struct hci_dev *hdev)
2681 {
2682 	/* Register the available SMP channels (BR/EDR and LE) only when
2683 	 * successfully powering on the controller. This late
2684 	 * registration is required so that LE SMP can clearly decide if
2685 	 * the public address or static address is used.
2686 	 */
2687 	smp_register(hdev);
2688 
2689 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2690 			      NULL);
2691 }
2692 
2693 void hci_request_setup(struct hci_dev *hdev)
2694 {
2695 	INIT_WORK(&hdev->discov_update, discov_update);
2696 	INIT_WORK(&hdev->scan_update, scan_update_work);
2697 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2698 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2699 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2700 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2701 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2702 }
2703 
2704 void hci_request_cancel_all(struct hci_dev *hdev)
2705 {
2706 	hci_req_sync_cancel(hdev, ENODEV);
2707 
2708 	cancel_work_sync(&hdev->discov_update);
2709 	cancel_work_sync(&hdev->scan_update);
2710 	cancel_delayed_work_sync(&hdev->discov_off);
2711 	cancel_delayed_work_sync(&hdev->le_scan_disable);
2712 	cancel_delayed_work_sync(&hdev->le_scan_restart);
2713 
2714 	if (hdev->adv_instance_timeout) {
2715 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
2716 		hdev->adv_instance_timeout = 0;
2717 	}
2718 
2719 	cancel_interleave_scan(hdev);
2720 }
2721