xref: /openbmc/linux/net/bluetooth/hci_request.c (revision 2ae1beb3)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34 
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37 	skb_queue_head_init(&req->cmd_q);
38 	req->hdev = hdev;
39 	req->err = 0;
40 }
41 
42 void hci_req_purge(struct hci_request *req)
43 {
44 	skb_queue_purge(&req->cmd_q);
45 }
46 
47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49 	return hdev->req_status == HCI_REQ_PEND;
50 }
51 
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 		   hci_req_complete_skb_t complete_skb)
54 {
55 	struct hci_dev *hdev = req->hdev;
56 	struct sk_buff *skb;
57 	unsigned long flags;
58 
59 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60 
61 	/* If an error occurred during request building, remove all HCI
62 	 * commands queued on the HCI request queue.
63 	 */
64 	if (req->err) {
65 		skb_queue_purge(&req->cmd_q);
66 		return req->err;
67 	}
68 
69 	/* Do not allow empty requests */
70 	if (skb_queue_empty(&req->cmd_q))
71 		return -ENODATA;
72 
73 	skb = skb_peek_tail(&req->cmd_q);
74 	if (complete) {
75 		bt_cb(skb)->hci.req_complete = complete;
76 	} else if (complete_skb) {
77 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79 	}
80 
81 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84 
85 	queue_work(hdev->workqueue, &hdev->cmd_work);
86 
87 	return 0;
88 }
89 
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92 	return req_run(req, complete, NULL);
93 }
94 
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97 	return req_run(req, NULL, complete);
98 }
99 
100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101 			   struct sk_buff *skb)
102 {
103 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
104 
105 	if (hdev->req_status == HCI_REQ_PEND) {
106 		hdev->req_result = result;
107 		hdev->req_status = HCI_REQ_DONE;
108 		if (skb) {
109 			kfree_skb(hdev->req_skb);
110 			hdev->req_skb = skb_get(skb);
111 		}
112 		wake_up_interruptible(&hdev->req_wait_q);
113 	}
114 }
115 
116 /* Execute request and wait for completion. */
117 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
118 						     unsigned long opt),
119 		   unsigned long opt, u32 timeout, u8 *hci_status)
120 {
121 	struct hci_request req;
122 	int err = 0;
123 
124 	bt_dev_dbg(hdev, "start");
125 
126 	hci_req_init(&req, hdev);
127 
128 	hdev->req_status = HCI_REQ_PEND;
129 
130 	err = func(&req, opt);
131 	if (err) {
132 		if (hci_status)
133 			*hci_status = HCI_ERROR_UNSPECIFIED;
134 		return err;
135 	}
136 
137 	err = hci_req_run_skb(&req, hci_req_sync_complete);
138 	if (err < 0) {
139 		hdev->req_status = 0;
140 
141 		/* ENODATA means the HCI request command queue is empty.
142 		 * This can happen when a request with conditionals doesn't
143 		 * trigger any commands to be sent. This is normal behavior
144 		 * and should not trigger an error return.
145 		 */
146 		if (err == -ENODATA) {
147 			if (hci_status)
148 				*hci_status = 0;
149 			return 0;
150 		}
151 
152 		if (hci_status)
153 			*hci_status = HCI_ERROR_UNSPECIFIED;
154 
155 		return err;
156 	}
157 
158 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
159 			hdev->req_status != HCI_REQ_PEND, timeout);
160 
161 	if (err == -ERESTARTSYS)
162 		return -EINTR;
163 
164 	switch (hdev->req_status) {
165 	case HCI_REQ_DONE:
166 		err = -bt_to_errno(hdev->req_result);
167 		if (hci_status)
168 			*hci_status = hdev->req_result;
169 		break;
170 
171 	case HCI_REQ_CANCELED:
172 		err = -hdev->req_result;
173 		if (hci_status)
174 			*hci_status = HCI_ERROR_UNSPECIFIED;
175 		break;
176 
177 	default:
178 		err = -ETIMEDOUT;
179 		if (hci_status)
180 			*hci_status = HCI_ERROR_UNSPECIFIED;
181 		break;
182 	}
183 
184 	kfree_skb(hdev->req_skb);
185 	hdev->req_skb = NULL;
186 	hdev->req_status = hdev->req_result = 0;
187 
188 	bt_dev_dbg(hdev, "end: err %d", err);
189 
190 	return err;
191 }
192 
193 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
194 						  unsigned long opt),
195 		 unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197 	int ret;
198 
199 	/* Serialize all requests */
200 	hci_req_sync_lock(hdev);
201 	/* check the state after obtaing the lock to protect the HCI_UP
202 	 * against any races from hci_dev_do_close when the controller
203 	 * gets removed.
204 	 */
205 	if (test_bit(HCI_UP, &hdev->flags))
206 		ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
207 	else
208 		ret = -ENETDOWN;
209 	hci_req_sync_unlock(hdev);
210 
211 	return ret;
212 }
213 
214 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
215 				const void *param)
216 {
217 	int len = HCI_COMMAND_HDR_SIZE + plen;
218 	struct hci_command_hdr *hdr;
219 	struct sk_buff *skb;
220 
221 	skb = bt_skb_alloc(len, GFP_ATOMIC);
222 	if (!skb)
223 		return NULL;
224 
225 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
226 	hdr->opcode = cpu_to_le16(opcode);
227 	hdr->plen   = plen;
228 
229 	if (plen)
230 		skb_put_data(skb, param, plen);
231 
232 	bt_dev_dbg(hdev, "skb len %d", skb->len);
233 
234 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
235 	hci_skb_opcode(skb) = opcode;
236 
237 	return skb;
238 }
239 
240 /* Queue a command to an asynchronous HCI request */
241 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
242 		    const void *param, u8 event)
243 {
244 	struct hci_dev *hdev = req->hdev;
245 	struct sk_buff *skb;
246 
247 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
248 
249 	/* If an error occurred during request building, there is no point in
250 	 * queueing the HCI command. We can simply return.
251 	 */
252 	if (req->err)
253 		return;
254 
255 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
256 	if (!skb) {
257 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
258 			   opcode);
259 		req->err = -ENOMEM;
260 		return;
261 	}
262 
263 	if (skb_queue_empty(&req->cmd_q))
264 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
265 
266 	hci_skb_event(skb) = event;
267 
268 	skb_queue_tail(&req->cmd_q, skb);
269 }
270 
271 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
272 		 const void *param)
273 {
274 	bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
275 	hci_req_add_ev(req, opcode, plen, param, 0);
276 }
277 
278 static void start_interleave_scan(struct hci_dev *hdev)
279 {
280 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
281 	queue_delayed_work(hdev->req_workqueue,
282 			   &hdev->interleave_scan, 0);
283 }
284 
285 static bool is_interleave_scanning(struct hci_dev *hdev)
286 {
287 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
288 }
289 
290 static void cancel_interleave_scan(struct hci_dev *hdev)
291 {
292 	bt_dev_dbg(hdev, "cancelling interleave scan");
293 
294 	cancel_delayed_work_sync(&hdev->interleave_scan);
295 
296 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
297 }
298 
299 /* Return true if interleave_scan wasn't started until exiting this function,
300  * otherwise, return false
301  */
302 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
303 {
304 	/* Do interleaved scan only if all of the following are true:
305 	 * - There is at least one ADV monitor
306 	 * - At least one pending LE connection or one device to be scanned for
307 	 * - Monitor offloading is not supported
308 	 * If so, we should alternate between allowlist scan and one without
309 	 * any filters to save power.
310 	 */
311 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
312 				!(list_empty(&hdev->pend_le_conns) &&
313 				  list_empty(&hdev->pend_le_reports)) &&
314 				hci_get_adv_monitor_offload_ext(hdev) ==
315 				    HCI_ADV_MONITOR_EXT_NONE;
316 	bool is_interleaving = is_interleave_scanning(hdev);
317 
318 	if (use_interleaving && !is_interleaving) {
319 		start_interleave_scan(hdev);
320 		bt_dev_dbg(hdev, "starting interleave scan");
321 		return true;
322 	}
323 
324 	if (!use_interleaving && is_interleaving)
325 		cancel_interleave_scan(hdev);
326 
327 	return false;
328 }
329 
330 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
331 {
332 	struct hci_dev *hdev = req->hdev;
333 
334 	if (hdev->scanning_paused) {
335 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
336 		return;
337 	}
338 
339 	if (use_ext_scan(hdev)) {
340 		struct hci_cp_le_set_ext_scan_enable cp;
341 
342 		memset(&cp, 0, sizeof(cp));
343 		cp.enable = LE_SCAN_DISABLE;
344 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
345 			    &cp);
346 	} else {
347 		struct hci_cp_le_set_scan_enable cp;
348 
349 		memset(&cp, 0, sizeof(cp));
350 		cp.enable = LE_SCAN_DISABLE;
351 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
352 	}
353 
354 	/* Disable address resolution */
355 	if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
356 		__u8 enable = 0x00;
357 
358 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
359 	}
360 }
361 
362 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
363 				 u8 bdaddr_type)
364 {
365 	struct hci_cp_le_del_from_accept_list cp;
366 
367 	cp.bdaddr_type = bdaddr_type;
368 	bacpy(&cp.bdaddr, bdaddr);
369 
370 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
371 		   cp.bdaddr_type);
372 	hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
373 
374 	if (use_ll_privacy(req->hdev)) {
375 		struct smp_irk *irk;
376 
377 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
378 		if (irk) {
379 			struct hci_cp_le_del_from_resolv_list cp;
380 
381 			cp.bdaddr_type = bdaddr_type;
382 			bacpy(&cp.bdaddr, bdaddr);
383 
384 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
385 				    sizeof(cp), &cp);
386 		}
387 	}
388 }
389 
390 /* Adds connection to accept list if needed. On error, returns -1. */
391 static int add_to_accept_list(struct hci_request *req,
392 			      struct hci_conn_params *params, u8 *num_entries,
393 			      bool allow_rpa)
394 {
395 	struct hci_cp_le_add_to_accept_list cp;
396 	struct hci_dev *hdev = req->hdev;
397 
398 	/* Already in accept list */
399 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
400 				   params->addr_type))
401 		return 0;
402 
403 	/* Select filter policy to accept all advertising */
404 	if (*num_entries >= hdev->le_accept_list_size)
405 		return -1;
406 
407 	/* Accept list can not be used with RPAs */
408 	if (!allow_rpa &&
409 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
410 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
411 		return -1;
412 	}
413 
414 	/* During suspend, only wakeable devices can be in accept list */
415 	if (hdev->suspended &&
416 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
417 		return 0;
418 
419 	*num_entries += 1;
420 	cp.bdaddr_type = params->addr_type;
421 	bacpy(&cp.bdaddr, &params->addr);
422 
423 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
424 		   cp.bdaddr_type);
425 	hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
426 
427 	if (use_ll_privacy(hdev)) {
428 		struct smp_irk *irk;
429 
430 		irk = hci_find_irk_by_addr(hdev, &params->addr,
431 					   params->addr_type);
432 		if (irk) {
433 			struct hci_cp_le_add_to_resolv_list cp;
434 
435 			cp.bdaddr_type = params->addr_type;
436 			bacpy(&cp.bdaddr, &params->addr);
437 			memcpy(cp.peer_irk, irk->val, 16);
438 
439 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
440 				memcpy(cp.local_irk, hdev->irk, 16);
441 			else
442 				memset(cp.local_irk, 0, 16);
443 
444 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
445 				    sizeof(cp), &cp);
446 		}
447 	}
448 
449 	return 0;
450 }
451 
452 static u8 update_accept_list(struct hci_request *req)
453 {
454 	struct hci_dev *hdev = req->hdev;
455 	struct hci_conn_params *params;
456 	struct bdaddr_list *b;
457 	u8 num_entries = 0;
458 	bool pend_conn, pend_report;
459 	/* We allow usage of accept list even with RPAs in suspend. In the worst
460 	 * case, we won't be able to wake from devices that use the privacy1.2
461 	 * features. Additionally, once we support privacy1.2 and IRK
462 	 * offloading, we can update this to also check for those conditions.
463 	 */
464 	bool allow_rpa = hdev->suspended;
465 
466 	if (use_ll_privacy(hdev))
467 		allow_rpa = true;
468 
469 	/* Go through the current accept list programmed into the
470 	 * controller one by one and check if that address is still
471 	 * in the list of pending connections or list of devices to
472 	 * report. If not present in either list, then queue the
473 	 * command to remove it from the controller.
474 	 */
475 	list_for_each_entry(b, &hdev->le_accept_list, list) {
476 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
477 						      &b->bdaddr,
478 						      b->bdaddr_type);
479 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
480 							&b->bdaddr,
481 							b->bdaddr_type);
482 
483 		/* If the device is not likely to connect or report,
484 		 * remove it from the accept list.
485 		 */
486 		if (!pend_conn && !pend_report) {
487 			del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
488 			continue;
489 		}
490 
491 		/* Accept list can not be used with RPAs */
492 		if (!allow_rpa &&
493 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
494 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
495 			return 0x00;
496 		}
497 
498 		num_entries++;
499 	}
500 
501 	/* Since all no longer valid accept list entries have been
502 	 * removed, walk through the list of pending connections
503 	 * and ensure that any new device gets programmed into
504 	 * the controller.
505 	 *
506 	 * If the list of the devices is larger than the list of
507 	 * available accept list entries in the controller, then
508 	 * just abort and return filer policy value to not use the
509 	 * accept list.
510 	 */
511 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
512 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
513 			return 0x00;
514 	}
515 
516 	/* After adding all new pending connections, walk through
517 	 * the list of pending reports and also add these to the
518 	 * accept list if there is still space. Abort if space runs out.
519 	 */
520 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
521 		if (add_to_accept_list(req, params, &num_entries, allow_rpa))
522 			return 0x00;
523 	}
524 
525 	/* Use the allowlist unless the following conditions are all true:
526 	 * - We are not currently suspending
527 	 * - There are 1 or more ADV monitors registered and it's not offloaded
528 	 * - Interleaved scanning is not currently using the allowlist
529 	 */
530 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
531 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
532 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
533 		return 0x00;
534 
535 	/* Select filter policy to use accept list */
536 	return 0x01;
537 }
538 
539 static bool scan_use_rpa(struct hci_dev *hdev)
540 {
541 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
542 }
543 
544 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
545 			       u16 window, u8 own_addr_type, u8 filter_policy,
546 			       bool filter_dup, bool addr_resolv)
547 {
548 	struct hci_dev *hdev = req->hdev;
549 
550 	if (hdev->scanning_paused) {
551 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
552 		return;
553 	}
554 
555 	if (use_ll_privacy(hdev) && addr_resolv) {
556 		u8 enable = 0x01;
557 
558 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
559 	}
560 
561 	/* Use ext scanning if set ext scan param and ext scan enable is
562 	 * supported
563 	 */
564 	if (use_ext_scan(hdev)) {
565 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
566 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
567 		struct hci_cp_le_scan_phy_params *phy_params;
568 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
569 		u32 plen;
570 
571 		ext_param_cp = (void *)data;
572 		phy_params = (void *)ext_param_cp->data;
573 
574 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
575 		ext_param_cp->own_addr_type = own_addr_type;
576 		ext_param_cp->filter_policy = filter_policy;
577 
578 		plen = sizeof(*ext_param_cp);
579 
580 		if (scan_1m(hdev) || scan_2m(hdev)) {
581 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
582 
583 			memset(phy_params, 0, sizeof(*phy_params));
584 			phy_params->type = type;
585 			phy_params->interval = cpu_to_le16(interval);
586 			phy_params->window = cpu_to_le16(window);
587 
588 			plen += sizeof(*phy_params);
589 			phy_params++;
590 		}
591 
592 		if (scan_coded(hdev)) {
593 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
594 
595 			memset(phy_params, 0, sizeof(*phy_params));
596 			phy_params->type = type;
597 			phy_params->interval = cpu_to_le16(interval);
598 			phy_params->window = cpu_to_le16(window);
599 
600 			plen += sizeof(*phy_params);
601 			phy_params++;
602 		}
603 
604 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
605 			    plen, ext_param_cp);
606 
607 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
608 		ext_enable_cp.enable = LE_SCAN_ENABLE;
609 		ext_enable_cp.filter_dup = filter_dup;
610 
611 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
612 			    sizeof(ext_enable_cp), &ext_enable_cp);
613 	} else {
614 		struct hci_cp_le_set_scan_param param_cp;
615 		struct hci_cp_le_set_scan_enable enable_cp;
616 
617 		memset(&param_cp, 0, sizeof(param_cp));
618 		param_cp.type = type;
619 		param_cp.interval = cpu_to_le16(interval);
620 		param_cp.window = cpu_to_le16(window);
621 		param_cp.own_address_type = own_addr_type;
622 		param_cp.filter_policy = filter_policy;
623 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
624 			    &param_cp);
625 
626 		memset(&enable_cp, 0, sizeof(enable_cp));
627 		enable_cp.enable = LE_SCAN_ENABLE;
628 		enable_cp.filter_dup = filter_dup;
629 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
630 			    &enable_cp);
631 	}
632 }
633 
634 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
635 static int hci_update_random_address(struct hci_request *req,
636 				     bool require_privacy, bool use_rpa,
637 				     u8 *own_addr_type)
638 {
639 	struct hci_dev *hdev = req->hdev;
640 	int err;
641 
642 	/* If privacy is enabled use a resolvable private address. If
643 	 * current RPA has expired or there is something else than
644 	 * the current RPA in use, then generate a new one.
645 	 */
646 	if (use_rpa) {
647 		/* If Controller supports LL Privacy use own address type is
648 		 * 0x03
649 		 */
650 		if (use_ll_privacy(hdev))
651 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
652 		else
653 			*own_addr_type = ADDR_LE_DEV_RANDOM;
654 
655 		if (rpa_valid(hdev))
656 			return 0;
657 
658 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
659 		if (err < 0) {
660 			bt_dev_err(hdev, "failed to generate new RPA");
661 			return err;
662 		}
663 
664 		set_random_addr(req, &hdev->rpa);
665 
666 		return 0;
667 	}
668 
669 	/* In case of required privacy without resolvable private address,
670 	 * use an non-resolvable private address. This is useful for active
671 	 * scanning and non-connectable advertising.
672 	 */
673 	if (require_privacy) {
674 		bdaddr_t nrpa;
675 
676 		while (true) {
677 			/* The non-resolvable private address is generated
678 			 * from random six bytes with the two most significant
679 			 * bits cleared.
680 			 */
681 			get_random_bytes(&nrpa, 6);
682 			nrpa.b[5] &= 0x3f;
683 
684 			/* The non-resolvable private address shall not be
685 			 * equal to the public address.
686 			 */
687 			if (bacmp(&hdev->bdaddr, &nrpa))
688 				break;
689 		}
690 
691 		*own_addr_type = ADDR_LE_DEV_RANDOM;
692 		set_random_addr(req, &nrpa);
693 		return 0;
694 	}
695 
696 	/* If forcing static address is in use or there is no public
697 	 * address use the static address as random address (but skip
698 	 * the HCI command if the current random address is already the
699 	 * static one.
700 	 *
701 	 * In case BR/EDR has been disabled on a dual-mode controller
702 	 * and a static address has been configured, then use that
703 	 * address instead of the public BR/EDR address.
704 	 */
705 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
706 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
707 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
708 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
709 		*own_addr_type = ADDR_LE_DEV_RANDOM;
710 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
711 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
712 				    &hdev->static_addr);
713 		return 0;
714 	}
715 
716 	/* Neither privacy nor static address is being used so use a
717 	 * public address.
718 	 */
719 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
720 
721 	return 0;
722 }
723 
724 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
725  * controller based address resolution to be able to reconfigure
726  * resolving list.
727  */
728 void hci_req_add_le_passive_scan(struct hci_request *req)
729 {
730 	struct hci_dev *hdev = req->hdev;
731 	u8 own_addr_type;
732 	u8 filter_policy;
733 	u16 window, interval;
734 	/* Default is to enable duplicates filter */
735 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
736 	/* Background scanning should run with address resolution */
737 	bool addr_resolv = true;
738 
739 	if (hdev->scanning_paused) {
740 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
741 		return;
742 	}
743 
744 	/* Set require_privacy to false since no SCAN_REQ are send
745 	 * during passive scanning. Not using an non-resolvable address
746 	 * here is important so that peer devices using direct
747 	 * advertising with our address will be correctly reported
748 	 * by the controller.
749 	 */
750 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
751 				      &own_addr_type))
752 		return;
753 
754 	if (hdev->enable_advmon_interleave_scan &&
755 	    __hci_update_interleaved_scan(hdev))
756 		return;
757 
758 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
759 	/* Adding or removing entries from the accept list must
760 	 * happen before enabling scanning. The controller does
761 	 * not allow accept list modification while scanning.
762 	 */
763 	filter_policy = update_accept_list(req);
764 
765 	/* When the controller is using random resolvable addresses and
766 	 * with that having LE privacy enabled, then controllers with
767 	 * Extended Scanner Filter Policies support can now enable support
768 	 * for handling directed advertising.
769 	 *
770 	 * So instead of using filter polices 0x00 (no accept list)
771 	 * and 0x01 (accept list enabled) use the new filter policies
772 	 * 0x02 (no accept list) and 0x03 (accept list enabled).
773 	 */
774 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
775 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776 		filter_policy |= 0x02;
777 
778 	if (hdev->suspended) {
779 		window = hdev->le_scan_window_suspend;
780 		interval = hdev->le_scan_int_suspend;
781 	} else if (hci_is_le_conn_scanning(hdev)) {
782 		window = hdev->le_scan_window_connect;
783 		interval = hdev->le_scan_int_connect;
784 	} else if (hci_is_adv_monitoring(hdev)) {
785 		window = hdev->le_scan_window_adv_monitor;
786 		interval = hdev->le_scan_int_adv_monitor;
787 
788 		/* Disable duplicates filter when scanning for advertisement
789 		 * monitor for the following reasons.
790 		 *
791 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792 		 * controllers ignore RSSI_Sampling_Period when the duplicates
793 		 * filter is enabled.
794 		 *
795 		 * For SW pattern filtering, when we're not doing interleaved
796 		 * scanning, it is necessary to disable duplicates filter,
797 		 * otherwise hosts can only receive one advertisement and it's
798 		 * impossible to know if a peer is still in range.
799 		 */
800 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
801 	} else {
802 		window = hdev->le_scan_window;
803 		interval = hdev->le_scan_interval;
804 	}
805 
806 	bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
807 		   filter_policy);
808 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
809 			   own_addr_type, filter_policy, filter_dup,
810 			   addr_resolv);
811 }
812 
813 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
814 					   unsigned long opt)
815 {
816 	struct hci_dev *hdev = req->hdev;
817 	int ret = 0;
818 
819 	hci_dev_lock(hdev);
820 
821 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
822 		hci_req_add_le_scan_disable(req, false);
823 	hci_req_add_le_passive_scan(req);
824 
825 	switch (hdev->interleave_scan_state) {
826 	case INTERLEAVE_SCAN_ALLOWLIST:
827 		bt_dev_dbg(hdev, "next state: allowlist");
828 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
829 		break;
830 	case INTERLEAVE_SCAN_NO_FILTER:
831 		bt_dev_dbg(hdev, "next state: no filter");
832 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
833 		break;
834 	case INTERLEAVE_SCAN_NONE:
835 		BT_ERR("unexpected error");
836 		ret = -1;
837 	}
838 
839 	hci_dev_unlock(hdev);
840 
841 	return ret;
842 }
843 
844 static void interleave_scan_work(struct work_struct *work)
845 {
846 	struct hci_dev *hdev = container_of(work, struct hci_dev,
847 					    interleave_scan.work);
848 	u8 status;
849 	unsigned long timeout;
850 
851 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
852 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
853 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
854 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
855 	} else {
856 		bt_dev_err(hdev, "unexpected error");
857 		return;
858 	}
859 
860 	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
861 		     HCI_CMD_TIMEOUT, &status);
862 
863 	/* Don't continue interleaving if it was canceled */
864 	if (is_interleave_scanning(hdev))
865 		queue_delayed_work(hdev->req_workqueue,
866 				   &hdev->interleave_scan, timeout);
867 }
868 
869 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
870 {
871 	struct hci_dev *hdev = req->hdev;
872 
873 	/* If we're advertising or initiating an LE connection we can't
874 	 * go ahead and change the random address at this time. This is
875 	 * because the eventual initiator address used for the
876 	 * subsequently created connection will be undefined (some
877 	 * controllers use the new address and others the one we had
878 	 * when the operation started).
879 	 *
880 	 * In this kind of scenario skip the update and let the random
881 	 * address be updated at the next cycle.
882 	 */
883 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
884 	    hci_lookup_le_connect(hdev)) {
885 		bt_dev_dbg(hdev, "Deferring random address update");
886 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
887 		return;
888 	}
889 
890 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
891 }
892 
893 void hci_request_setup(struct hci_dev *hdev)
894 {
895 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
896 }
897 
898 void hci_request_cancel_all(struct hci_dev *hdev)
899 {
900 	hci_cmd_sync_cancel_sync(hdev, ENODEV);
901 
902 	cancel_interleave_scan(hdev);
903 }
904