xref: /openbmc/linux/net/bluetooth/hci_request.c (revision de8c12110a130337c8e7e7b8250de0580e644dee)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 
34 #define HCI_REQ_DONE	  0
35 #define HCI_REQ_PEND	  1
36 #define HCI_REQ_CANCELED  2
37 
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 {
40 	skb_queue_head_init(&req->cmd_q);
41 	req->hdev = hdev;
42 	req->err = 0;
43 }
44 
45 void hci_req_purge(struct hci_request *req)
46 {
47 	skb_queue_purge(&req->cmd_q);
48 }
49 
50 bool hci_req_status_pend(struct hci_dev *hdev)
51 {
52 	return hdev->req_status == HCI_REQ_PEND;
53 }
54 
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 		   hci_req_complete_skb_t complete_skb)
57 {
58 	struct hci_dev *hdev = req->hdev;
59 	struct sk_buff *skb;
60 	unsigned long flags;
61 
62 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63 
64 	/* If an error occurred during request building, remove all HCI
65 	 * commands queued on the HCI request queue.
66 	 */
67 	if (req->err) {
68 		skb_queue_purge(&req->cmd_q);
69 		return req->err;
70 	}
71 
72 	/* Do not allow empty requests */
73 	if (skb_queue_empty(&req->cmd_q))
74 		return -ENODATA;
75 
76 	skb = skb_peek_tail(&req->cmd_q);
77 	if (complete) {
78 		bt_cb(skb)->hci.req_complete = complete;
79 	} else if (complete_skb) {
80 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82 	}
83 
84 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 
88 	queue_work(hdev->workqueue, &hdev->cmd_work);
89 
90 	return 0;
91 }
92 
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 {
95 	return req_run(req, complete, NULL);
96 }
97 
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 {
100 	return req_run(req, NULL, complete);
101 }
102 
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104 				  struct sk_buff *skb)
105 {
106 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
107 
108 	if (hdev->req_status == HCI_REQ_PEND) {
109 		hdev->req_result = result;
110 		hdev->req_status = HCI_REQ_DONE;
111 		if (skb)
112 			hdev->req_skb = skb_get(skb);
113 		wake_up_interruptible(&hdev->req_wait_q);
114 	}
115 }
116 
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 {
119 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
120 
121 	if (hdev->req_status == HCI_REQ_PEND) {
122 		hdev->req_result = err;
123 		hdev->req_status = HCI_REQ_CANCELED;
124 		wake_up_interruptible(&hdev->req_wait_q);
125 	}
126 }
127 
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 				  const void *param, u8 event, u32 timeout)
130 {
131 	struct hci_request req;
132 	struct sk_buff *skb;
133 	int err = 0;
134 
135 	bt_dev_dbg(hdev, "");
136 
137 	hci_req_init(&req, hdev);
138 
139 	hci_req_add_ev(&req, opcode, plen, param, event);
140 
141 	hdev->req_status = HCI_REQ_PEND;
142 
143 	err = hci_req_run_skb(&req, hci_req_sync_complete);
144 	if (err < 0)
145 		return ERR_PTR(err);
146 
147 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 			hdev->req_status != HCI_REQ_PEND, timeout);
149 
150 	if (err == -ERESTARTSYS)
151 		return ERR_PTR(-EINTR);
152 
153 	switch (hdev->req_status) {
154 	case HCI_REQ_DONE:
155 		err = -bt_to_errno(hdev->req_result);
156 		break;
157 
158 	case HCI_REQ_CANCELED:
159 		err = -hdev->req_result;
160 		break;
161 
162 	default:
163 		err = -ETIMEDOUT;
164 		break;
165 	}
166 
167 	hdev->req_status = hdev->req_result = 0;
168 	skb = hdev->req_skb;
169 	hdev->req_skb = NULL;
170 
171 	bt_dev_dbg(hdev, "end: err %d", err);
172 
173 	if (err < 0) {
174 		kfree_skb(skb);
175 		return ERR_PTR(err);
176 	}
177 
178 	if (!skb)
179 		return ERR_PTR(-ENODATA);
180 
181 	return skb;
182 }
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 			       const void *param, u32 timeout)
187 {
188 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 }
190 EXPORT_SYMBOL(__hci_cmd_sync);
191 
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 						     unsigned long opt),
195 		   unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197 	struct hci_request req;
198 	int err = 0;
199 
200 	bt_dev_dbg(hdev, "start");
201 
202 	hci_req_init(&req, hdev);
203 
204 	hdev->req_status = HCI_REQ_PEND;
205 
206 	err = func(&req, opt);
207 	if (err) {
208 		if (hci_status)
209 			*hci_status = HCI_ERROR_UNSPECIFIED;
210 		return err;
211 	}
212 
213 	err = hci_req_run_skb(&req, hci_req_sync_complete);
214 	if (err < 0) {
215 		hdev->req_status = 0;
216 
217 		/* ENODATA means the HCI request command queue is empty.
218 		 * This can happen when a request with conditionals doesn't
219 		 * trigger any commands to be sent. This is normal behavior
220 		 * and should not trigger an error return.
221 		 */
222 		if (err == -ENODATA) {
223 			if (hci_status)
224 				*hci_status = 0;
225 			return 0;
226 		}
227 
228 		if (hci_status)
229 			*hci_status = HCI_ERROR_UNSPECIFIED;
230 
231 		return err;
232 	}
233 
234 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 			hdev->req_status != HCI_REQ_PEND, timeout);
236 
237 	if (err == -ERESTARTSYS)
238 		return -EINTR;
239 
240 	switch (hdev->req_status) {
241 	case HCI_REQ_DONE:
242 		err = -bt_to_errno(hdev->req_result);
243 		if (hci_status)
244 			*hci_status = hdev->req_result;
245 		break;
246 
247 	case HCI_REQ_CANCELED:
248 		err = -hdev->req_result;
249 		if (hci_status)
250 			*hci_status = HCI_ERROR_UNSPECIFIED;
251 		break;
252 
253 	default:
254 		err = -ETIMEDOUT;
255 		if (hci_status)
256 			*hci_status = HCI_ERROR_UNSPECIFIED;
257 		break;
258 	}
259 
260 	kfree_skb(hdev->req_skb);
261 	hdev->req_skb = NULL;
262 	hdev->req_status = hdev->req_result = 0;
263 
264 	bt_dev_dbg(hdev, "end: err %d", err);
265 
266 	return err;
267 }
268 
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 						  unsigned long opt),
271 		 unsigned long opt, u32 timeout, u8 *hci_status)
272 {
273 	int ret;
274 
275 	if (!test_bit(HCI_UP, &hdev->flags))
276 		return -ENETDOWN;
277 
278 	/* Serialize all requests */
279 	hci_req_sync_lock(hdev);
280 	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
281 	hci_req_sync_unlock(hdev);
282 
283 	return ret;
284 }
285 
286 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
287 				const void *param)
288 {
289 	int len = HCI_COMMAND_HDR_SIZE + plen;
290 	struct hci_command_hdr *hdr;
291 	struct sk_buff *skb;
292 
293 	skb = bt_skb_alloc(len, GFP_ATOMIC);
294 	if (!skb)
295 		return NULL;
296 
297 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
298 	hdr->opcode = cpu_to_le16(opcode);
299 	hdr->plen   = plen;
300 
301 	if (plen)
302 		skb_put_data(skb, param, plen);
303 
304 	bt_dev_dbg(hdev, "skb len %d", skb->len);
305 
306 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
307 	hci_skb_opcode(skb) = opcode;
308 
309 	return skb;
310 }
311 
312 /* Queue a command to an asynchronous HCI request */
313 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
314 		    const void *param, u8 event)
315 {
316 	struct hci_dev *hdev = req->hdev;
317 	struct sk_buff *skb;
318 
319 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
320 
321 	/* If an error occurred during request building, there is no point in
322 	 * queueing the HCI command. We can simply return.
323 	 */
324 	if (req->err)
325 		return;
326 
327 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 	if (!skb) {
329 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
330 			   opcode);
331 		req->err = -ENOMEM;
332 		return;
333 	}
334 
335 	if (skb_queue_empty(&req->cmd_q))
336 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
337 
338 	bt_cb(skb)->hci.req_event = event;
339 
340 	skb_queue_tail(&req->cmd_q, skb);
341 }
342 
343 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
344 		 const void *param)
345 {
346 	hci_req_add_ev(req, opcode, plen, param, 0);
347 }
348 
349 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
350 {
351 	struct hci_dev *hdev = req->hdev;
352 	struct hci_cp_write_page_scan_activity acp;
353 	u8 type;
354 
355 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
356 		return;
357 
358 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
359 		return;
360 
361 	if (enable) {
362 		type = PAGE_SCAN_TYPE_INTERLACED;
363 
364 		/* 160 msec page scan interval */
365 		acp.interval = cpu_to_le16(0x0100);
366 	} else {
367 		type = hdev->def_page_scan_type;
368 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
369 	}
370 
371 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
372 
373 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
374 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
375 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
376 			    sizeof(acp), &acp);
377 
378 	if (hdev->page_scan_type != type)
379 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
380 }
381 
382 static void start_interleave_scan(struct hci_dev *hdev)
383 {
384 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
385 	queue_delayed_work(hdev->req_workqueue,
386 			   &hdev->interleave_scan, 0);
387 }
388 
389 static bool is_interleave_scanning(struct hci_dev *hdev)
390 {
391 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
392 }
393 
394 static void cancel_interleave_scan(struct hci_dev *hdev)
395 {
396 	bt_dev_dbg(hdev, "cancelling interleave scan");
397 
398 	cancel_delayed_work_sync(&hdev->interleave_scan);
399 
400 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
401 }
402 
403 /* Return true if interleave_scan wasn't started until exiting this function,
404  * otherwise, return false
405  */
406 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
407 {
408 	/* Do interleaved scan only if all of the following are true:
409 	 * - There is at least one ADV monitor
410 	 * - At least one pending LE connection or one device to be scanned for
411 	 * - Monitor offloading is not supported
412 	 * If so, we should alternate between allowlist scan and one without
413 	 * any filters to save power.
414 	 */
415 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
416 				!(list_empty(&hdev->pend_le_conns) &&
417 				  list_empty(&hdev->pend_le_reports)) &&
418 				hci_get_adv_monitor_offload_ext(hdev) ==
419 				    HCI_ADV_MONITOR_EXT_NONE;
420 	bool is_interleaving = is_interleave_scanning(hdev);
421 
422 	if (use_interleaving && !is_interleaving) {
423 		start_interleave_scan(hdev);
424 		bt_dev_dbg(hdev, "starting interleave scan");
425 		return true;
426 	}
427 
428 	if (!use_interleaving && is_interleaving)
429 		cancel_interleave_scan(hdev);
430 
431 	return false;
432 }
433 
434 /* This function controls the background scanning based on hdev->pend_le_conns
435  * list. If there are pending LE connection we start the background scanning,
436  * otherwise we stop it.
437  *
438  * This function requires the caller holds hdev->lock.
439  */
440 static void __hci_update_background_scan(struct hci_request *req)
441 {
442 	struct hci_dev *hdev = req->hdev;
443 
444 	if (!test_bit(HCI_UP, &hdev->flags) ||
445 	    test_bit(HCI_INIT, &hdev->flags) ||
446 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
447 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
448 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
450 		return;
451 
452 	/* No point in doing scanning if LE support hasn't been enabled */
453 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
454 		return;
455 
456 	/* If discovery is active don't interfere with it */
457 	if (hdev->discovery.state != DISCOVERY_STOPPED)
458 		return;
459 
460 	/* Reset RSSI and UUID filters when starting background scanning
461 	 * since these filters are meant for service discovery only.
462 	 *
463 	 * The Start Discovery and Start Service Discovery operations
464 	 * ensure to set proper values for RSSI threshold and UUID
465 	 * filter list. So it is safe to just reset them here.
466 	 */
467 	hci_discovery_filter_clear(hdev);
468 
469 	bt_dev_dbg(hdev, "ADV monitoring is %s",
470 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
471 
472 	if (list_empty(&hdev->pend_le_conns) &&
473 	    list_empty(&hdev->pend_le_reports) &&
474 	    !hci_is_adv_monitoring(hdev)) {
475 		/* If there is no pending LE connections or devices
476 		 * to be scanned for or no ADV monitors, we should stop the
477 		 * background scanning.
478 		 */
479 
480 		/* If controller is not scanning we are done. */
481 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
482 			return;
483 
484 		hci_req_add_le_scan_disable(req, false);
485 
486 		bt_dev_dbg(hdev, "stopping background scanning");
487 	} else {
488 		/* If there is at least one pending LE connection, we should
489 		 * keep the background scan running.
490 		 */
491 
492 		/* If controller is connecting, we should not start scanning
493 		 * since some controllers are not able to scan and connect at
494 		 * the same time.
495 		 */
496 		if (hci_lookup_le_connect(hdev))
497 			return;
498 
499 		/* If controller is currently scanning, we stop it to ensure we
500 		 * don't miss any advertising (due to duplicates filter).
501 		 */
502 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
503 			hci_req_add_le_scan_disable(req, false);
504 
505 		hci_req_add_le_passive_scan(req);
506 		bt_dev_dbg(hdev, "starting background scanning");
507 	}
508 }
509 
510 void __hci_req_update_name(struct hci_request *req)
511 {
512 	struct hci_dev *hdev = req->hdev;
513 	struct hci_cp_write_local_name cp;
514 
515 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
516 
517 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
518 }
519 
520 #define PNP_INFO_SVCLASS_ID		0x1200
521 
522 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
523 {
524 	u8 *ptr = data, *uuids_start = NULL;
525 	struct bt_uuid *uuid;
526 
527 	if (len < 4)
528 		return ptr;
529 
530 	list_for_each_entry(uuid, &hdev->uuids, list) {
531 		u16 uuid16;
532 
533 		if (uuid->size != 16)
534 			continue;
535 
536 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
537 		if (uuid16 < 0x1100)
538 			continue;
539 
540 		if (uuid16 == PNP_INFO_SVCLASS_ID)
541 			continue;
542 
543 		if (!uuids_start) {
544 			uuids_start = ptr;
545 			uuids_start[0] = 1;
546 			uuids_start[1] = EIR_UUID16_ALL;
547 			ptr += 2;
548 		}
549 
550 		/* Stop if not enough space to put next UUID */
551 		if ((ptr - data) + sizeof(u16) > len) {
552 			uuids_start[1] = EIR_UUID16_SOME;
553 			break;
554 		}
555 
556 		*ptr++ = (uuid16 & 0x00ff);
557 		*ptr++ = (uuid16 & 0xff00) >> 8;
558 		uuids_start[0] += sizeof(uuid16);
559 	}
560 
561 	return ptr;
562 }
563 
564 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
565 {
566 	u8 *ptr = data, *uuids_start = NULL;
567 	struct bt_uuid *uuid;
568 
569 	if (len < 6)
570 		return ptr;
571 
572 	list_for_each_entry(uuid, &hdev->uuids, list) {
573 		if (uuid->size != 32)
574 			continue;
575 
576 		if (!uuids_start) {
577 			uuids_start = ptr;
578 			uuids_start[0] = 1;
579 			uuids_start[1] = EIR_UUID32_ALL;
580 			ptr += 2;
581 		}
582 
583 		/* Stop if not enough space to put next UUID */
584 		if ((ptr - data) + sizeof(u32) > len) {
585 			uuids_start[1] = EIR_UUID32_SOME;
586 			break;
587 		}
588 
589 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
590 		ptr += sizeof(u32);
591 		uuids_start[0] += sizeof(u32);
592 	}
593 
594 	return ptr;
595 }
596 
597 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
598 {
599 	u8 *ptr = data, *uuids_start = NULL;
600 	struct bt_uuid *uuid;
601 
602 	if (len < 18)
603 		return ptr;
604 
605 	list_for_each_entry(uuid, &hdev->uuids, list) {
606 		if (uuid->size != 128)
607 			continue;
608 
609 		if (!uuids_start) {
610 			uuids_start = ptr;
611 			uuids_start[0] = 1;
612 			uuids_start[1] = EIR_UUID128_ALL;
613 			ptr += 2;
614 		}
615 
616 		/* Stop if not enough space to put next UUID */
617 		if ((ptr - data) + 16 > len) {
618 			uuids_start[1] = EIR_UUID128_SOME;
619 			break;
620 		}
621 
622 		memcpy(ptr, uuid->uuid, 16);
623 		ptr += 16;
624 		uuids_start[0] += 16;
625 	}
626 
627 	return ptr;
628 }
629 
630 static void create_eir(struct hci_dev *hdev, u8 *data)
631 {
632 	u8 *ptr = data;
633 	size_t name_len;
634 
635 	name_len = strlen(hdev->dev_name);
636 
637 	if (name_len > 0) {
638 		/* EIR Data type */
639 		if (name_len > 48) {
640 			name_len = 48;
641 			ptr[1] = EIR_NAME_SHORT;
642 		} else
643 			ptr[1] = EIR_NAME_COMPLETE;
644 
645 		/* EIR Data length */
646 		ptr[0] = name_len + 1;
647 
648 		memcpy(ptr + 2, hdev->dev_name, name_len);
649 
650 		ptr += (name_len + 2);
651 	}
652 
653 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
654 		ptr[0] = 2;
655 		ptr[1] = EIR_TX_POWER;
656 		ptr[2] = (u8) hdev->inq_tx_power;
657 
658 		ptr += 3;
659 	}
660 
661 	if (hdev->devid_source > 0) {
662 		ptr[0] = 9;
663 		ptr[1] = EIR_DEVICE_ID;
664 
665 		put_unaligned_le16(hdev->devid_source, ptr + 2);
666 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667 		put_unaligned_le16(hdev->devid_product, ptr + 6);
668 		put_unaligned_le16(hdev->devid_version, ptr + 8);
669 
670 		ptr += 10;
671 	}
672 
673 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
676 }
677 
678 void __hci_req_update_eir(struct hci_request *req)
679 {
680 	struct hci_dev *hdev = req->hdev;
681 	struct hci_cp_write_eir cp;
682 
683 	if (!hdev_is_powered(hdev))
684 		return;
685 
686 	if (!lmp_ext_inq_capable(hdev))
687 		return;
688 
689 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
690 		return;
691 
692 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
693 		return;
694 
695 	memset(&cp, 0, sizeof(cp));
696 
697 	create_eir(hdev, cp.data);
698 
699 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
700 		return;
701 
702 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
703 
704 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
705 }
706 
707 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
708 {
709 	struct hci_dev *hdev = req->hdev;
710 
711 	if (hdev->scanning_paused) {
712 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
713 		return;
714 	}
715 
716 	if (hdev->suspended)
717 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
718 
719 	if (use_ext_scan(hdev)) {
720 		struct hci_cp_le_set_ext_scan_enable cp;
721 
722 		memset(&cp, 0, sizeof(cp));
723 		cp.enable = LE_SCAN_DISABLE;
724 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
725 			    &cp);
726 	} else {
727 		struct hci_cp_le_set_scan_enable cp;
728 
729 		memset(&cp, 0, sizeof(cp));
730 		cp.enable = LE_SCAN_DISABLE;
731 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
732 	}
733 
734 	/* Disable address resolution */
735 	if (use_ll_privacy(hdev) &&
736 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
737 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
738 		__u8 enable = 0x00;
739 
740 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
741 	}
742 }
743 
744 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
745 				u8 bdaddr_type)
746 {
747 	struct hci_cp_le_del_from_white_list cp;
748 
749 	cp.bdaddr_type = bdaddr_type;
750 	bacpy(&cp.bdaddr, bdaddr);
751 
752 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
753 		   cp.bdaddr_type);
754 	hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
755 
756 	if (use_ll_privacy(req->hdev) &&
757 	    hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
758 		struct smp_irk *irk;
759 
760 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
761 		if (irk) {
762 			struct hci_cp_le_del_from_resolv_list cp;
763 
764 			cp.bdaddr_type = bdaddr_type;
765 			bacpy(&cp.bdaddr, bdaddr);
766 
767 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
768 				    sizeof(cp), &cp);
769 		}
770 	}
771 }
772 
773 /* Adds connection to white list if needed. On error, returns -1. */
774 static int add_to_white_list(struct hci_request *req,
775 			     struct hci_conn_params *params, u8 *num_entries,
776 			     bool allow_rpa)
777 {
778 	struct hci_cp_le_add_to_white_list cp;
779 	struct hci_dev *hdev = req->hdev;
780 
781 	/* Already in white list */
782 	if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
783 				   params->addr_type))
784 		return 0;
785 
786 	/* Select filter policy to accept all advertising */
787 	if (*num_entries >= hdev->le_white_list_size)
788 		return -1;
789 
790 	/* White list can not be used with RPAs */
791 	if (!allow_rpa &&
792 	    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
793 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
794 		return -1;
795 	}
796 
797 	/* During suspend, only wakeable devices can be in whitelist */
798 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
799 						   params->current_flags))
800 		return 0;
801 
802 	*num_entries += 1;
803 	cp.bdaddr_type = params->addr_type;
804 	bacpy(&cp.bdaddr, &params->addr);
805 
806 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
807 		   cp.bdaddr_type);
808 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
809 
810 	if (use_ll_privacy(hdev) &&
811 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
812 		struct smp_irk *irk;
813 
814 		irk = hci_find_irk_by_addr(hdev, &params->addr,
815 					   params->addr_type);
816 		if (irk) {
817 			struct hci_cp_le_add_to_resolv_list cp;
818 
819 			cp.bdaddr_type = params->addr_type;
820 			bacpy(&cp.bdaddr, &params->addr);
821 			memcpy(cp.peer_irk, irk->val, 16);
822 
823 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
824 				memcpy(cp.local_irk, hdev->irk, 16);
825 			else
826 				memset(cp.local_irk, 0, 16);
827 
828 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
829 				    sizeof(cp), &cp);
830 		}
831 	}
832 
833 	return 0;
834 }
835 
836 static u8 update_white_list(struct hci_request *req)
837 {
838 	struct hci_dev *hdev = req->hdev;
839 	struct hci_conn_params *params;
840 	struct bdaddr_list *b;
841 	u8 num_entries = 0;
842 	bool pend_conn, pend_report;
843 	/* We allow whitelisting even with RPAs in suspend. In the worst case,
844 	 * we won't be able to wake from devices that use the privacy1.2
845 	 * features. Additionally, once we support privacy1.2 and IRK
846 	 * offloading, we can update this to also check for those conditions.
847 	 */
848 	bool allow_rpa = hdev->suspended;
849 
850 	if (use_ll_privacy(hdev) &&
851 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
852 		allow_rpa = true;
853 
854 	/* Go through the current white list programmed into the
855 	 * controller one by one and check if that address is still
856 	 * in the list of pending connections or list of devices to
857 	 * report. If not present in either list, then queue the
858 	 * command to remove it from the controller.
859 	 */
860 	list_for_each_entry(b, &hdev->le_white_list, list) {
861 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
862 						      &b->bdaddr,
863 						      b->bdaddr_type);
864 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
865 							&b->bdaddr,
866 							b->bdaddr_type);
867 
868 		/* If the device is not likely to connect or report,
869 		 * remove it from the whitelist.
870 		 */
871 		if (!pend_conn && !pend_report) {
872 			del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
873 			continue;
874 		}
875 
876 		/* White list can not be used with RPAs */
877 		if (!allow_rpa &&
878 		    !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
879 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
880 			return 0x00;
881 		}
882 
883 		num_entries++;
884 	}
885 
886 	/* Since all no longer valid white list entries have been
887 	 * removed, walk through the list of pending connections
888 	 * and ensure that any new device gets programmed into
889 	 * the controller.
890 	 *
891 	 * If the list of the devices is larger than the list of
892 	 * available white list entries in the controller, then
893 	 * just abort and return filer policy value to not use the
894 	 * white list.
895 	 */
896 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
897 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
898 			return 0x00;
899 	}
900 
901 	/* After adding all new pending connections, walk through
902 	 * the list of pending reports and also add these to the
903 	 * white list if there is still space. Abort if space runs out.
904 	 */
905 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
906 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
907 			return 0x00;
908 	}
909 
910 	/* Use the allowlist unless the following conditions are all true:
911 	 * - We are not currently suspending
912 	 * - There are 1 or more ADV monitors registered and it's not offloaded
913 	 * - Interleaved scanning is not currently using the allowlist
914 	 */
915 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
916 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
917 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
918 		return 0x00;
919 
920 	/* Select filter policy to use white list */
921 	return 0x01;
922 }
923 
924 static bool scan_use_rpa(struct hci_dev *hdev)
925 {
926 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
927 }
928 
929 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
930 			       u16 window, u8 own_addr_type, u8 filter_policy,
931 			       bool addr_resolv)
932 {
933 	struct hci_dev *hdev = req->hdev;
934 
935 	if (hdev->scanning_paused) {
936 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
937 		return;
938 	}
939 
940 	if (use_ll_privacy(hdev) &&
941 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
942 	    addr_resolv) {
943 		u8 enable = 0x01;
944 
945 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
946 	}
947 
948 	/* Use ext scanning if set ext scan param and ext scan enable is
949 	 * supported
950 	 */
951 	if (use_ext_scan(hdev)) {
952 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
953 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
954 		struct hci_cp_le_scan_phy_params *phy_params;
955 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
956 		u32 plen;
957 
958 		ext_param_cp = (void *)data;
959 		phy_params = (void *)ext_param_cp->data;
960 
961 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
962 		ext_param_cp->own_addr_type = own_addr_type;
963 		ext_param_cp->filter_policy = filter_policy;
964 
965 		plen = sizeof(*ext_param_cp);
966 
967 		if (scan_1m(hdev) || scan_2m(hdev)) {
968 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
969 
970 			memset(phy_params, 0, sizeof(*phy_params));
971 			phy_params->type = type;
972 			phy_params->interval = cpu_to_le16(interval);
973 			phy_params->window = cpu_to_le16(window);
974 
975 			plen += sizeof(*phy_params);
976 			phy_params++;
977 		}
978 
979 		if (scan_coded(hdev)) {
980 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
981 
982 			memset(phy_params, 0, sizeof(*phy_params));
983 			phy_params->type = type;
984 			phy_params->interval = cpu_to_le16(interval);
985 			phy_params->window = cpu_to_le16(window);
986 
987 			plen += sizeof(*phy_params);
988 			phy_params++;
989 		}
990 
991 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
992 			    plen, ext_param_cp);
993 
994 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
995 		ext_enable_cp.enable = LE_SCAN_ENABLE;
996 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
997 
998 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
999 			    sizeof(ext_enable_cp), &ext_enable_cp);
1000 	} else {
1001 		struct hci_cp_le_set_scan_param param_cp;
1002 		struct hci_cp_le_set_scan_enable enable_cp;
1003 
1004 		memset(&param_cp, 0, sizeof(param_cp));
1005 		param_cp.type = type;
1006 		param_cp.interval = cpu_to_le16(interval);
1007 		param_cp.window = cpu_to_le16(window);
1008 		param_cp.own_address_type = own_addr_type;
1009 		param_cp.filter_policy = filter_policy;
1010 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1011 			    &param_cp);
1012 
1013 		memset(&enable_cp, 0, sizeof(enable_cp));
1014 		enable_cp.enable = LE_SCAN_ENABLE;
1015 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1016 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1017 			    &enable_cp);
1018 	}
1019 }
1020 
1021 /* Returns true if an le connection is in the scanning state */
1022 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1023 {
1024 	struct hci_conn_hash *h = &hdev->conn_hash;
1025 	struct hci_conn  *c;
1026 
1027 	rcu_read_lock();
1028 
1029 	list_for_each_entry_rcu(c, &h->list, list) {
1030 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
1031 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
1032 			rcu_read_unlock();
1033 			return true;
1034 		}
1035 	}
1036 
1037 	rcu_read_unlock();
1038 
1039 	return false;
1040 }
1041 
1042 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1043  * controller based address resolution to be able to reconfigure
1044  * resolving list.
1045  */
1046 void hci_req_add_le_passive_scan(struct hci_request *req)
1047 {
1048 	struct hci_dev *hdev = req->hdev;
1049 	u8 own_addr_type;
1050 	u8 filter_policy;
1051 	u16 window, interval;
1052 	/* Background scanning should run with address resolution */
1053 	bool addr_resolv = true;
1054 
1055 	if (hdev->scanning_paused) {
1056 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1057 		return;
1058 	}
1059 
1060 	/* Set require_privacy to false since no SCAN_REQ are send
1061 	 * during passive scanning. Not using an non-resolvable address
1062 	 * here is important so that peer devices using direct
1063 	 * advertising with our address will be correctly reported
1064 	 * by the controller.
1065 	 */
1066 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1067 				      &own_addr_type))
1068 		return;
1069 
1070 	if (hdev->enable_advmon_interleave_scan &&
1071 	    __hci_update_interleaved_scan(hdev))
1072 		return;
1073 
1074 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1075 	/* Adding or removing entries from the white list must
1076 	 * happen before enabling scanning. The controller does
1077 	 * not allow white list modification while scanning.
1078 	 */
1079 	filter_policy = update_white_list(req);
1080 
1081 	/* When the controller is using random resolvable addresses and
1082 	 * with that having LE privacy enabled, then controllers with
1083 	 * Extended Scanner Filter Policies support can now enable support
1084 	 * for handling directed advertising.
1085 	 *
1086 	 * So instead of using filter polices 0x00 (no whitelist)
1087 	 * and 0x01 (whitelist enabled) use the new filter policies
1088 	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1089 	 */
1090 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1091 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1092 		filter_policy |= 0x02;
1093 
1094 	if (hdev->suspended) {
1095 		window = hdev->le_scan_window_suspend;
1096 		interval = hdev->le_scan_int_suspend;
1097 
1098 		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1099 	} else if (hci_is_le_conn_scanning(hdev)) {
1100 		window = hdev->le_scan_window_connect;
1101 		interval = hdev->le_scan_int_connect;
1102 	} else if (hci_is_adv_monitoring(hdev)) {
1103 		window = hdev->le_scan_window_adv_monitor;
1104 		interval = hdev->le_scan_int_adv_monitor;
1105 	} else {
1106 		window = hdev->le_scan_window;
1107 		interval = hdev->le_scan_interval;
1108 	}
1109 
1110 	bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1111 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1112 			   own_addr_type, filter_policy, addr_resolv);
1113 }
1114 
1115 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1116 {
1117 	struct adv_info *adv_instance;
1118 
1119 	/* Instance 0x00 always set local name */
1120 	if (instance == 0x00)
1121 		return true;
1122 
1123 	adv_instance = hci_find_adv_instance(hdev, instance);
1124 	if (!adv_instance)
1125 		return false;
1126 
1127 	if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1128 	    adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1129 		return true;
1130 
1131 	return adv_instance->scan_rsp_len ? true : false;
1132 }
1133 
1134 static void hci_req_clear_event_filter(struct hci_request *req)
1135 {
1136 	struct hci_cp_set_event_filter f;
1137 
1138 	if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1139 		return;
1140 
1141 	if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1142 		memset(&f, 0, sizeof(f));
1143 		f.flt_type = HCI_FLT_CLEAR_ALL;
1144 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1145 	}
1146 }
1147 
1148 static void hci_req_set_event_filter(struct hci_request *req)
1149 {
1150 	struct bdaddr_list_with_flags *b;
1151 	struct hci_cp_set_event_filter f;
1152 	struct hci_dev *hdev = req->hdev;
1153 	u8 scan = SCAN_DISABLED;
1154 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1155 
1156 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1157 		return;
1158 
1159 	/* Always clear event filter when starting */
1160 	hci_req_clear_event_filter(req);
1161 
1162 	list_for_each_entry(b, &hdev->whitelist, list) {
1163 		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1164 					b->current_flags))
1165 			continue;
1166 
1167 		memset(&f, 0, sizeof(f));
1168 		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1169 		f.flt_type = HCI_FLT_CONN_SETUP;
1170 		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1171 		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1172 
1173 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1174 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1175 		scan = SCAN_PAGE;
1176 	}
1177 
1178 	if (scan && !scanning) {
1179 		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1180 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1181 	} else if (!scan && scanning) {
1182 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1183 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1184 	}
1185 }
1186 
1187 static void cancel_adv_timeout(struct hci_dev *hdev)
1188 {
1189 	if (hdev->adv_instance_timeout) {
1190 		hdev->adv_instance_timeout = 0;
1191 		cancel_delayed_work(&hdev->adv_instance_expire);
1192 	}
1193 }
1194 
1195 /* This function requires the caller holds hdev->lock */
1196 void __hci_req_pause_adv_instances(struct hci_request *req)
1197 {
1198 	bt_dev_dbg(req->hdev, "Pausing advertising instances");
1199 
1200 	/* Call to disable any advertisements active on the controller.
1201 	 * This will succeed even if no advertisements are configured.
1202 	 */
1203 	__hci_req_disable_advertising(req);
1204 
1205 	/* If we are using software rotation, pause the loop */
1206 	if (!ext_adv_capable(req->hdev))
1207 		cancel_adv_timeout(req->hdev);
1208 }
1209 
1210 /* This function requires the caller holds hdev->lock */
1211 static void __hci_req_resume_adv_instances(struct hci_request *req)
1212 {
1213 	struct adv_info *adv;
1214 
1215 	bt_dev_dbg(req->hdev, "Resuming advertising instances");
1216 
1217 	if (ext_adv_capable(req->hdev)) {
1218 		/* Call for each tracked instance to be re-enabled */
1219 		list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1220 			__hci_req_enable_ext_advertising(req,
1221 							 adv->instance);
1222 		}
1223 
1224 	} else {
1225 		/* Schedule for most recent instance to be restarted and begin
1226 		 * the software rotation loop
1227 		 */
1228 		__hci_req_schedule_adv_instance(req,
1229 						req->hdev->cur_adv_instance,
1230 						true);
1231 	}
1232 }
1233 
1234 /* This function requires the caller holds hdev->lock */
1235 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1236 {
1237 	struct hci_request req;
1238 
1239 	hci_req_init(&req, hdev);
1240 	__hci_req_resume_adv_instances(&req);
1241 
1242 	return hci_req_run(&req, NULL);
1243 }
1244 
1245 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1246 {
1247 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1248 		   status);
1249 	if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1250 	    test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1251 		clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1252 		clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1253 		wake_up(&hdev->suspend_wait_q);
1254 	}
1255 
1256 	if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1257 		clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1258 		wake_up(&hdev->suspend_wait_q);
1259 	}
1260 }
1261 
1262 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1263 					      bool enable)
1264 {
1265 	struct hci_dev *hdev = req->hdev;
1266 
1267 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1268 	case HCI_ADV_MONITOR_EXT_MSFT:
1269 		msft_req_add_set_filter_enable(req, enable);
1270 		break;
1271 	default:
1272 		return;
1273 	}
1274 
1275 	/* No need to block when enabling since it's on resume path */
1276 	if (hdev->suspended && !enable)
1277 		set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1278 }
1279 
1280 /* Call with hci_dev_lock */
1281 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1282 {
1283 	int old_state;
1284 	struct hci_conn *conn;
1285 	struct hci_request req;
1286 	u8 page_scan;
1287 	int disconnect_counter;
1288 
1289 	if (next == hdev->suspend_state) {
1290 		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1291 		goto done;
1292 	}
1293 
1294 	hdev->suspend_state = next;
1295 	hci_req_init(&req, hdev);
1296 
1297 	if (next == BT_SUSPEND_DISCONNECT) {
1298 		/* Mark device as suspended */
1299 		hdev->suspended = true;
1300 
1301 		/* Pause discovery if not already stopped */
1302 		old_state = hdev->discovery.state;
1303 		if (old_state != DISCOVERY_STOPPED) {
1304 			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1305 			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1306 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1307 		}
1308 
1309 		hdev->discovery_paused = true;
1310 		hdev->discovery_old_state = old_state;
1311 
1312 		/* Stop directed advertising */
1313 		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1314 		if (old_state) {
1315 			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1316 			cancel_delayed_work(&hdev->discov_off);
1317 			queue_delayed_work(hdev->req_workqueue,
1318 					   &hdev->discov_off, 0);
1319 		}
1320 
1321 		/* Pause other advertisements */
1322 		if (hdev->adv_instance_cnt)
1323 			__hci_req_pause_adv_instances(&req);
1324 
1325 		hdev->advertising_paused = true;
1326 		hdev->advertising_old_state = old_state;
1327 
1328 		/* Disable page scan if enabled */
1329 		if (test_bit(HCI_PSCAN, &hdev->flags)) {
1330 			page_scan = SCAN_DISABLED;
1331 			hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1332 				    &page_scan);
1333 			set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1334 		}
1335 
1336 		/* Disable LE passive scan if enabled */
1337 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1338 			cancel_interleave_scan(hdev);
1339 			hci_req_add_le_scan_disable(&req, false);
1340 		}
1341 
1342 		/* Disable advertisement filters */
1343 		hci_req_add_set_adv_filter_enable(&req, false);
1344 
1345 		/* Prevent disconnects from causing scanning to be re-enabled */
1346 		hdev->scanning_paused = true;
1347 
1348 		/* Run commands before disconnecting */
1349 		hci_req_run(&req, suspend_req_complete);
1350 
1351 		disconnect_counter = 0;
1352 		/* Soft disconnect everything (power off) */
1353 		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1354 			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1355 			disconnect_counter++;
1356 		}
1357 
1358 		if (disconnect_counter > 0) {
1359 			bt_dev_dbg(hdev,
1360 				   "Had %d disconnects. Will wait on them",
1361 				   disconnect_counter);
1362 			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1363 		}
1364 	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1365 		/* Unpause to take care of updating scanning params */
1366 		hdev->scanning_paused = false;
1367 		/* Enable event filter for paired devices */
1368 		hci_req_set_event_filter(&req);
1369 		/* Enable passive scan at lower duty cycle */
1370 		__hci_update_background_scan(&req);
1371 		/* Pause scan changes again. */
1372 		hdev->scanning_paused = true;
1373 		hci_req_run(&req, suspend_req_complete);
1374 	} else {
1375 		hdev->suspended = false;
1376 		hdev->scanning_paused = false;
1377 
1378 		/* Clear any event filters and restore scan state */
1379 		hci_req_clear_event_filter(&req);
1380 		__hci_req_update_scan(&req);
1381 
1382 		/* Reset passive/background scanning to normal */
1383 		__hci_update_background_scan(&req);
1384 		/* Enable all of the advertisement filters */
1385 		hci_req_add_set_adv_filter_enable(&req, true);
1386 
1387 		/* Unpause directed advertising */
1388 		hdev->advertising_paused = false;
1389 		if (hdev->advertising_old_state) {
1390 			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1391 				hdev->suspend_tasks);
1392 			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1393 			queue_work(hdev->req_workqueue,
1394 				   &hdev->discoverable_update);
1395 			hdev->advertising_old_state = 0;
1396 		}
1397 
1398 		/* Resume other advertisements */
1399 		if (hdev->adv_instance_cnt)
1400 			__hci_req_resume_adv_instances(&req);
1401 
1402 		/* Unpause discovery */
1403 		hdev->discovery_paused = false;
1404 		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1405 		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1406 			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1407 			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1408 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1409 		}
1410 
1411 		hci_req_run(&req, suspend_req_complete);
1412 	}
1413 
1414 	hdev->suspend_state = next;
1415 
1416 done:
1417 	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1418 	wake_up(&hdev->suspend_wait_q);
1419 }
1420 
1421 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1422 {
1423 	return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1424 }
1425 
1426 void __hci_req_disable_advertising(struct hci_request *req)
1427 {
1428 	if (ext_adv_capable(req->hdev)) {
1429 		__hci_req_disable_ext_adv_instance(req, 0x00);
1430 
1431 	} else {
1432 		u8 enable = 0x00;
1433 
1434 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1435 	}
1436 }
1437 
1438 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1439 {
1440 	u32 flags;
1441 	struct adv_info *adv_instance;
1442 
1443 	if (instance == 0x00) {
1444 		/* Instance 0 always manages the "Tx Power" and "Flags"
1445 		 * fields
1446 		 */
1447 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1448 
1449 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1450 		 * corresponds to the "connectable" instance flag.
1451 		 */
1452 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1453 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1454 
1455 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1456 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1457 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1458 			flags |= MGMT_ADV_FLAG_DISCOV;
1459 
1460 		return flags;
1461 	}
1462 
1463 	adv_instance = hci_find_adv_instance(hdev, instance);
1464 
1465 	/* Return 0 when we got an invalid instance identifier. */
1466 	if (!adv_instance)
1467 		return 0;
1468 
1469 	return adv_instance->flags;
1470 }
1471 
1472 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1473 {
1474 	/* If privacy is not enabled don't use RPA */
1475 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1476 		return false;
1477 
1478 	/* If basic privacy mode is enabled use RPA */
1479 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1480 		return true;
1481 
1482 	/* If limited privacy mode is enabled don't use RPA if we're
1483 	 * both discoverable and bondable.
1484 	 */
1485 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1486 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1487 		return false;
1488 
1489 	/* We're neither bondable nor discoverable in the limited
1490 	 * privacy mode, therefore use RPA.
1491 	 */
1492 	return true;
1493 }
1494 
1495 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1496 {
1497 	/* If there is no connection we are OK to advertise. */
1498 	if (hci_conn_num(hdev, LE_LINK) == 0)
1499 		return true;
1500 
1501 	/* Check le_states if there is any connection in slave role. */
1502 	if (hdev->conn_hash.le_num_slave > 0) {
1503 		/* Slave connection state and non connectable mode bit 20. */
1504 		if (!connectable && !(hdev->le_states[2] & 0x10))
1505 			return false;
1506 
1507 		/* Slave connection state and connectable mode bit 38
1508 		 * and scannable bit 21.
1509 		 */
1510 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1511 				    !(hdev->le_states[2] & 0x20)))
1512 			return false;
1513 	}
1514 
1515 	/* Check le_states if there is any connection in master role. */
1516 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1517 		/* Master connection state and non connectable mode bit 18. */
1518 		if (!connectable && !(hdev->le_states[2] & 0x02))
1519 			return false;
1520 
1521 		/* Master connection state and connectable mode bit 35 and
1522 		 * scannable 19.
1523 		 */
1524 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1525 				    !(hdev->le_states[2] & 0x08)))
1526 			return false;
1527 	}
1528 
1529 	return true;
1530 }
1531 
1532 void __hci_req_enable_advertising(struct hci_request *req)
1533 {
1534 	struct hci_dev *hdev = req->hdev;
1535 	struct adv_info *adv_instance;
1536 	struct hci_cp_le_set_adv_param cp;
1537 	u8 own_addr_type, enable = 0x01;
1538 	bool connectable;
1539 	u16 adv_min_interval, adv_max_interval;
1540 	u32 flags;
1541 
1542 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1543 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1544 
1545 	/* If the "connectable" instance flag was not set, then choose between
1546 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1547 	 */
1548 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1549 		      mgmt_get_connectable(hdev);
1550 
1551 	if (!is_advertising_allowed(hdev, connectable))
1552 		return;
1553 
1554 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1555 		__hci_req_disable_advertising(req);
1556 
1557 	/* Clear the HCI_LE_ADV bit temporarily so that the
1558 	 * hci_update_random_address knows that it's safe to go ahead
1559 	 * and write a new random address. The flag will be set back on
1560 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1561 	 */
1562 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1563 
1564 	/* Set require_privacy to true only when non-connectable
1565 	 * advertising is used. In that case it is fine to use a
1566 	 * non-resolvable private address.
1567 	 */
1568 	if (hci_update_random_address(req, !connectable,
1569 				      adv_use_rpa(hdev, flags),
1570 				      &own_addr_type) < 0)
1571 		return;
1572 
1573 	memset(&cp, 0, sizeof(cp));
1574 
1575 	if (adv_instance) {
1576 		adv_min_interval = adv_instance->min_interval;
1577 		adv_max_interval = adv_instance->max_interval;
1578 	} else {
1579 		adv_min_interval = hdev->le_adv_min_interval;
1580 		adv_max_interval = hdev->le_adv_max_interval;
1581 	}
1582 
1583 	if (connectable) {
1584 		cp.type = LE_ADV_IND;
1585 	} else {
1586 		if (adv_cur_instance_is_scannable(hdev))
1587 			cp.type = LE_ADV_SCAN_IND;
1588 		else
1589 			cp.type = LE_ADV_NONCONN_IND;
1590 
1591 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1592 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1593 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1594 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1595 		}
1596 	}
1597 
1598 	cp.min_interval = cpu_to_le16(adv_min_interval);
1599 	cp.max_interval = cpu_to_le16(adv_max_interval);
1600 	cp.own_address_type = own_addr_type;
1601 	cp.channel_map = hdev->le_adv_channel_map;
1602 
1603 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1604 
1605 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1606 }
1607 
1608 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1609 {
1610 	size_t short_len;
1611 	size_t complete_len;
1612 
1613 	/* no space left for name (+ NULL + type + len) */
1614 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1615 		return ad_len;
1616 
1617 	/* use complete name if present and fits */
1618 	complete_len = strlen(hdev->dev_name);
1619 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1620 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1621 				       hdev->dev_name, complete_len + 1);
1622 
1623 	/* use short name if present */
1624 	short_len = strlen(hdev->short_name);
1625 	if (short_len)
1626 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1627 				       hdev->short_name, short_len + 1);
1628 
1629 	/* use shortened full name if present, we already know that name
1630 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1631 	 */
1632 	if (complete_len) {
1633 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1634 
1635 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1636 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1637 
1638 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1639 				       sizeof(name));
1640 	}
1641 
1642 	return ad_len;
1643 }
1644 
1645 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1646 {
1647 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1648 }
1649 
1650 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1651 {
1652 	u8 scan_rsp_len = 0;
1653 
1654 	if (hdev->appearance)
1655 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1656 
1657 	return append_local_name(hdev, ptr, scan_rsp_len);
1658 }
1659 
1660 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1661 					u8 *ptr)
1662 {
1663 	struct adv_info *adv_instance;
1664 	u32 instance_flags;
1665 	u8 scan_rsp_len = 0;
1666 
1667 	adv_instance = hci_find_adv_instance(hdev, instance);
1668 	if (!adv_instance)
1669 		return 0;
1670 
1671 	instance_flags = adv_instance->flags;
1672 
1673 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1674 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1675 
1676 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1677 	       adv_instance->scan_rsp_len);
1678 
1679 	scan_rsp_len += adv_instance->scan_rsp_len;
1680 
1681 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1682 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1683 
1684 	return scan_rsp_len;
1685 }
1686 
1687 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1688 {
1689 	struct hci_dev *hdev = req->hdev;
1690 	u8 len;
1691 
1692 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1693 		return;
1694 
1695 	if (ext_adv_capable(hdev)) {
1696 		struct hci_cp_le_set_ext_scan_rsp_data cp;
1697 
1698 		memset(&cp, 0, sizeof(cp));
1699 
1700 		if (instance)
1701 			len = create_instance_scan_rsp_data(hdev, instance,
1702 							    cp.data);
1703 		else
1704 			len = create_default_scan_rsp_data(hdev, cp.data);
1705 
1706 		if (hdev->scan_rsp_data_len == len &&
1707 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1708 			return;
1709 
1710 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1711 		hdev->scan_rsp_data_len = len;
1712 
1713 		cp.handle = instance;
1714 		cp.length = len;
1715 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1716 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1717 
1718 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1719 			    &cp);
1720 	} else {
1721 		struct hci_cp_le_set_scan_rsp_data cp;
1722 
1723 		memset(&cp, 0, sizeof(cp));
1724 
1725 		if (instance)
1726 			len = create_instance_scan_rsp_data(hdev, instance,
1727 							    cp.data);
1728 		else
1729 			len = create_default_scan_rsp_data(hdev, cp.data);
1730 
1731 		if (hdev->scan_rsp_data_len == len &&
1732 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1733 			return;
1734 
1735 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1736 		hdev->scan_rsp_data_len = len;
1737 
1738 		cp.length = len;
1739 
1740 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1741 	}
1742 }
1743 
1744 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1745 {
1746 	struct adv_info *adv_instance = NULL;
1747 	u8 ad_len = 0, flags = 0;
1748 	u32 instance_flags;
1749 
1750 	/* Return 0 when the current instance identifier is invalid. */
1751 	if (instance) {
1752 		adv_instance = hci_find_adv_instance(hdev, instance);
1753 		if (!adv_instance)
1754 			return 0;
1755 	}
1756 
1757 	instance_flags = get_adv_instance_flags(hdev, instance);
1758 
1759 	/* If instance already has the flags set skip adding it once
1760 	 * again.
1761 	 */
1762 	if (adv_instance && eir_get_data(adv_instance->adv_data,
1763 					 adv_instance->adv_data_len, EIR_FLAGS,
1764 					 NULL))
1765 		goto skip_flags;
1766 
1767 	/* The Add Advertising command allows userspace to set both the general
1768 	 * and limited discoverable flags.
1769 	 */
1770 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1771 		flags |= LE_AD_GENERAL;
1772 
1773 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1774 		flags |= LE_AD_LIMITED;
1775 
1776 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1777 		flags |= LE_AD_NO_BREDR;
1778 
1779 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1780 		/* If a discovery flag wasn't provided, simply use the global
1781 		 * settings.
1782 		 */
1783 		if (!flags)
1784 			flags |= mgmt_get_adv_discov_flags(hdev);
1785 
1786 		/* If flags would still be empty, then there is no need to
1787 		 * include the "Flags" AD field".
1788 		 */
1789 		if (flags) {
1790 			ptr[0] = 0x02;
1791 			ptr[1] = EIR_FLAGS;
1792 			ptr[2] = flags;
1793 
1794 			ad_len += 3;
1795 			ptr += 3;
1796 		}
1797 	}
1798 
1799 skip_flags:
1800 	if (adv_instance) {
1801 		memcpy(ptr, adv_instance->adv_data,
1802 		       adv_instance->adv_data_len);
1803 		ad_len += adv_instance->adv_data_len;
1804 		ptr += adv_instance->adv_data_len;
1805 	}
1806 
1807 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1808 		s8 adv_tx_power;
1809 
1810 		if (ext_adv_capable(hdev)) {
1811 			if (adv_instance)
1812 				adv_tx_power = adv_instance->tx_power;
1813 			else
1814 				adv_tx_power = hdev->adv_tx_power;
1815 		} else {
1816 			adv_tx_power = hdev->adv_tx_power;
1817 		}
1818 
1819 		/* Provide Tx Power only if we can provide a valid value for it */
1820 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1821 			ptr[0] = 0x02;
1822 			ptr[1] = EIR_TX_POWER;
1823 			ptr[2] = (u8)adv_tx_power;
1824 
1825 			ad_len += 3;
1826 			ptr += 3;
1827 		}
1828 	}
1829 
1830 	return ad_len;
1831 }
1832 
1833 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1834 {
1835 	struct hci_dev *hdev = req->hdev;
1836 	u8 len;
1837 
1838 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1839 		return;
1840 
1841 	if (ext_adv_capable(hdev)) {
1842 		struct hci_cp_le_set_ext_adv_data cp;
1843 
1844 		memset(&cp, 0, sizeof(cp));
1845 
1846 		len = create_instance_adv_data(hdev, instance, cp.data);
1847 
1848 		/* There's nothing to do if the data hasn't changed */
1849 		if (hdev->adv_data_len == len &&
1850 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1851 			return;
1852 
1853 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1854 		hdev->adv_data_len = len;
1855 
1856 		cp.length = len;
1857 		cp.handle = instance;
1858 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1859 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1860 
1861 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1862 	} else {
1863 		struct hci_cp_le_set_adv_data cp;
1864 
1865 		memset(&cp, 0, sizeof(cp));
1866 
1867 		len = create_instance_adv_data(hdev, instance, cp.data);
1868 
1869 		/* There's nothing to do if the data hasn't changed */
1870 		if (hdev->adv_data_len == len &&
1871 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1872 			return;
1873 
1874 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1875 		hdev->adv_data_len = len;
1876 
1877 		cp.length = len;
1878 
1879 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1880 	}
1881 }
1882 
1883 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1884 {
1885 	struct hci_request req;
1886 
1887 	hci_req_init(&req, hdev);
1888 	__hci_req_update_adv_data(&req, instance);
1889 
1890 	return hci_req_run(&req, NULL);
1891 }
1892 
1893 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1894 					    u16 opcode)
1895 {
1896 	BT_DBG("%s status %u", hdev->name, status);
1897 }
1898 
1899 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1900 {
1901 	struct hci_request req;
1902 	__u8 enable = 0x00;
1903 
1904 	if (!use_ll_privacy(hdev) &&
1905 	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1906 		return;
1907 
1908 	hci_req_init(&req, hdev);
1909 
1910 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1911 
1912 	hci_req_run(&req, enable_addr_resolution_complete);
1913 }
1914 
1915 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1916 {
1917 	bt_dev_dbg(hdev, "status %u", status);
1918 }
1919 
1920 void hci_req_reenable_advertising(struct hci_dev *hdev)
1921 {
1922 	struct hci_request req;
1923 
1924 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1925 	    list_empty(&hdev->adv_instances))
1926 		return;
1927 
1928 	hci_req_init(&req, hdev);
1929 
1930 	if (hdev->cur_adv_instance) {
1931 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1932 						true);
1933 	} else {
1934 		if (ext_adv_capable(hdev)) {
1935 			__hci_req_start_ext_adv(&req, 0x00);
1936 		} else {
1937 			__hci_req_update_adv_data(&req, 0x00);
1938 			__hci_req_update_scan_rsp_data(&req, 0x00);
1939 			__hci_req_enable_advertising(&req);
1940 		}
1941 	}
1942 
1943 	hci_req_run(&req, adv_enable_complete);
1944 }
1945 
1946 static void adv_timeout_expire(struct work_struct *work)
1947 {
1948 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1949 					    adv_instance_expire.work);
1950 
1951 	struct hci_request req;
1952 	u8 instance;
1953 
1954 	bt_dev_dbg(hdev, "");
1955 
1956 	hci_dev_lock(hdev);
1957 
1958 	hdev->adv_instance_timeout = 0;
1959 
1960 	instance = hdev->cur_adv_instance;
1961 	if (instance == 0x00)
1962 		goto unlock;
1963 
1964 	hci_req_init(&req, hdev);
1965 
1966 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1967 
1968 	if (list_empty(&hdev->adv_instances))
1969 		__hci_req_disable_advertising(&req);
1970 
1971 	hci_req_run(&req, NULL);
1972 
1973 unlock:
1974 	hci_dev_unlock(hdev);
1975 }
1976 
1977 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1978 					   unsigned long opt)
1979 {
1980 	struct hci_dev *hdev = req->hdev;
1981 	int ret = 0;
1982 
1983 	hci_dev_lock(hdev);
1984 
1985 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1986 		hci_req_add_le_scan_disable(req, false);
1987 	hci_req_add_le_passive_scan(req);
1988 
1989 	switch (hdev->interleave_scan_state) {
1990 	case INTERLEAVE_SCAN_ALLOWLIST:
1991 		bt_dev_dbg(hdev, "next state: allowlist");
1992 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1993 		break;
1994 	case INTERLEAVE_SCAN_NO_FILTER:
1995 		bt_dev_dbg(hdev, "next state: no filter");
1996 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1997 		break;
1998 	case INTERLEAVE_SCAN_NONE:
1999 		BT_ERR("unexpected error");
2000 		ret = -1;
2001 	}
2002 
2003 	hci_dev_unlock(hdev);
2004 
2005 	return ret;
2006 }
2007 
2008 static void interleave_scan_work(struct work_struct *work)
2009 {
2010 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2011 					    interleave_scan.work);
2012 	u8 status;
2013 	unsigned long timeout;
2014 
2015 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2016 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2017 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2018 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2019 	} else {
2020 		bt_dev_err(hdev, "unexpected error");
2021 		return;
2022 	}
2023 
2024 	hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2025 		     HCI_CMD_TIMEOUT, &status);
2026 
2027 	/* Don't continue interleaving if it was canceled */
2028 	if (is_interleave_scanning(hdev))
2029 		queue_delayed_work(hdev->req_workqueue,
2030 				   &hdev->interleave_scan, timeout);
2031 }
2032 
2033 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2034 			   bool use_rpa, struct adv_info *adv_instance,
2035 			   u8 *own_addr_type, bdaddr_t *rand_addr)
2036 {
2037 	int err;
2038 
2039 	bacpy(rand_addr, BDADDR_ANY);
2040 
2041 	/* If privacy is enabled use a resolvable private address. If
2042 	 * current RPA has expired then generate a new one.
2043 	 */
2044 	if (use_rpa) {
2045 		int to;
2046 
2047 		/* If Controller supports LL Privacy use own address type is
2048 		 * 0x03
2049 		 */
2050 		if (use_ll_privacy(hdev) &&
2051 		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2052 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2053 		else
2054 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2055 
2056 		if (adv_instance) {
2057 			if (!adv_instance->rpa_expired &&
2058 			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
2059 				return 0;
2060 
2061 			adv_instance->rpa_expired = false;
2062 		} else {
2063 			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2064 			    !bacmp(&hdev->random_addr, &hdev->rpa))
2065 				return 0;
2066 		}
2067 
2068 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2069 		if (err < 0) {
2070 			bt_dev_err(hdev, "failed to generate new RPA");
2071 			return err;
2072 		}
2073 
2074 		bacpy(rand_addr, &hdev->rpa);
2075 
2076 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2077 		if (adv_instance)
2078 			queue_delayed_work(hdev->workqueue,
2079 					   &adv_instance->rpa_expired_cb, to);
2080 		else
2081 			queue_delayed_work(hdev->workqueue,
2082 					   &hdev->rpa_expired, to);
2083 
2084 		return 0;
2085 	}
2086 
2087 	/* In case of required privacy without resolvable private address,
2088 	 * use an non-resolvable private address. This is useful for
2089 	 * non-connectable advertising.
2090 	 */
2091 	if (require_privacy) {
2092 		bdaddr_t nrpa;
2093 
2094 		while (true) {
2095 			/* The non-resolvable private address is generated
2096 			 * from random six bytes with the two most significant
2097 			 * bits cleared.
2098 			 */
2099 			get_random_bytes(&nrpa, 6);
2100 			nrpa.b[5] &= 0x3f;
2101 
2102 			/* The non-resolvable private address shall not be
2103 			 * equal to the public address.
2104 			 */
2105 			if (bacmp(&hdev->bdaddr, &nrpa))
2106 				break;
2107 		}
2108 
2109 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2110 		bacpy(rand_addr, &nrpa);
2111 
2112 		return 0;
2113 	}
2114 
2115 	/* No privacy so use a public address. */
2116 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2117 
2118 	return 0;
2119 }
2120 
2121 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2122 {
2123 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2124 }
2125 
2126 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2127 {
2128 	struct hci_cp_le_set_ext_adv_params cp;
2129 	struct hci_dev *hdev = req->hdev;
2130 	bool connectable;
2131 	u32 flags;
2132 	bdaddr_t random_addr;
2133 	u8 own_addr_type;
2134 	int err;
2135 	struct adv_info *adv_instance;
2136 	bool secondary_adv;
2137 
2138 	if (instance > 0) {
2139 		adv_instance = hci_find_adv_instance(hdev, instance);
2140 		if (!adv_instance)
2141 			return -EINVAL;
2142 	} else {
2143 		adv_instance = NULL;
2144 	}
2145 
2146 	flags = get_adv_instance_flags(hdev, instance);
2147 
2148 	/* If the "connectable" instance flag was not set, then choose between
2149 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2150 	 */
2151 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2152 		      mgmt_get_connectable(hdev);
2153 
2154 	if (!is_advertising_allowed(hdev, connectable))
2155 		return -EPERM;
2156 
2157 	/* Set require_privacy to true only when non-connectable
2158 	 * advertising is used. In that case it is fine to use a
2159 	 * non-resolvable private address.
2160 	 */
2161 	err = hci_get_random_address(hdev, !connectable,
2162 				     adv_use_rpa(hdev, flags), adv_instance,
2163 				     &own_addr_type, &random_addr);
2164 	if (err < 0)
2165 		return err;
2166 
2167 	memset(&cp, 0, sizeof(cp));
2168 
2169 	if (adv_instance) {
2170 		hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2171 		hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2172 		cp.tx_power = adv_instance->tx_power;
2173 	} else {
2174 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2175 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2176 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2177 	}
2178 
2179 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2180 
2181 	if (connectable) {
2182 		if (secondary_adv)
2183 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2184 		else
2185 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2186 	} else if (adv_instance_is_scannable(hdev, instance) ||
2187 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2188 		if (secondary_adv)
2189 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2190 		else
2191 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2192 	} else {
2193 		if (secondary_adv)
2194 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2195 		else
2196 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2197 	}
2198 
2199 	cp.own_addr_type = own_addr_type;
2200 	cp.channel_map = hdev->le_adv_channel_map;
2201 	cp.handle = instance;
2202 
2203 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
2204 		cp.primary_phy = HCI_ADV_PHY_1M;
2205 		cp.secondary_phy = HCI_ADV_PHY_2M;
2206 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2207 		cp.primary_phy = HCI_ADV_PHY_CODED;
2208 		cp.secondary_phy = HCI_ADV_PHY_CODED;
2209 	} else {
2210 		/* In all other cases use 1M */
2211 		cp.primary_phy = HCI_ADV_PHY_1M;
2212 		cp.secondary_phy = HCI_ADV_PHY_1M;
2213 	}
2214 
2215 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2216 
2217 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2218 	    bacmp(&random_addr, BDADDR_ANY)) {
2219 		struct hci_cp_le_set_adv_set_rand_addr cp;
2220 
2221 		/* Check if random address need to be updated */
2222 		if (adv_instance) {
2223 			if (!bacmp(&random_addr, &adv_instance->random_addr))
2224 				return 0;
2225 		} else {
2226 			if (!bacmp(&random_addr, &hdev->random_addr))
2227 				return 0;
2228 		}
2229 
2230 		memset(&cp, 0, sizeof(cp));
2231 
2232 		cp.handle = instance;
2233 		bacpy(&cp.bdaddr, &random_addr);
2234 
2235 		hci_req_add(req,
2236 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2237 			    sizeof(cp), &cp);
2238 	}
2239 
2240 	return 0;
2241 }
2242 
2243 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2244 {
2245 	struct hci_dev *hdev = req->hdev;
2246 	struct hci_cp_le_set_ext_adv_enable *cp;
2247 	struct hci_cp_ext_adv_set *adv_set;
2248 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2249 	struct adv_info *adv_instance;
2250 
2251 	if (instance > 0) {
2252 		adv_instance = hci_find_adv_instance(hdev, instance);
2253 		if (!adv_instance)
2254 			return -EINVAL;
2255 	} else {
2256 		adv_instance = NULL;
2257 	}
2258 
2259 	cp = (void *) data;
2260 	adv_set = (void *) cp->data;
2261 
2262 	memset(cp, 0, sizeof(*cp));
2263 
2264 	cp->enable = 0x01;
2265 	cp->num_of_sets = 0x01;
2266 
2267 	memset(adv_set, 0, sizeof(*adv_set));
2268 
2269 	adv_set->handle = instance;
2270 
2271 	/* Set duration per instance since controller is responsible for
2272 	 * scheduling it.
2273 	 */
2274 	if (adv_instance && adv_instance->duration) {
2275 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2276 
2277 		/* Time = N * 10 ms */
2278 		adv_set->duration = cpu_to_le16(duration / 10);
2279 	}
2280 
2281 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2282 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2283 		    data);
2284 
2285 	return 0;
2286 }
2287 
2288 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2289 {
2290 	struct hci_dev *hdev = req->hdev;
2291 	struct hci_cp_le_set_ext_adv_enable *cp;
2292 	struct hci_cp_ext_adv_set *adv_set;
2293 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2294 	u8 req_size;
2295 
2296 	/* If request specifies an instance that doesn't exist, fail */
2297 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2298 		return -EINVAL;
2299 
2300 	memset(data, 0, sizeof(data));
2301 
2302 	cp = (void *)data;
2303 	adv_set = (void *)cp->data;
2304 
2305 	/* Instance 0x00 indicates all advertising instances will be disabled */
2306 	cp->num_of_sets = !!instance;
2307 	cp->enable = 0x00;
2308 
2309 	adv_set->handle = instance;
2310 
2311 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2312 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2313 
2314 	return 0;
2315 }
2316 
2317 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2318 {
2319 	struct hci_dev *hdev = req->hdev;
2320 
2321 	/* If request specifies an instance that doesn't exist, fail */
2322 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2323 		return -EINVAL;
2324 
2325 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2326 
2327 	return 0;
2328 }
2329 
2330 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2331 {
2332 	struct hci_dev *hdev = req->hdev;
2333 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2334 	int err;
2335 
2336 	/* If instance isn't pending, the chip knows about it, and it's safe to
2337 	 * disable
2338 	 */
2339 	if (adv_instance && !adv_instance->pending)
2340 		__hci_req_disable_ext_adv_instance(req, instance);
2341 
2342 	err = __hci_req_setup_ext_adv_instance(req, instance);
2343 	if (err < 0)
2344 		return err;
2345 
2346 	__hci_req_update_scan_rsp_data(req, instance);
2347 	__hci_req_enable_ext_advertising(req, instance);
2348 
2349 	return 0;
2350 }
2351 
2352 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2353 				    bool force)
2354 {
2355 	struct hci_dev *hdev = req->hdev;
2356 	struct adv_info *adv_instance = NULL;
2357 	u16 timeout;
2358 
2359 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2360 	    list_empty(&hdev->adv_instances))
2361 		return -EPERM;
2362 
2363 	if (hdev->adv_instance_timeout)
2364 		return -EBUSY;
2365 
2366 	adv_instance = hci_find_adv_instance(hdev, instance);
2367 	if (!adv_instance)
2368 		return -ENOENT;
2369 
2370 	/* A zero timeout means unlimited advertising. As long as there is
2371 	 * only one instance, duration should be ignored. We still set a timeout
2372 	 * in case further instances are being added later on.
2373 	 *
2374 	 * If the remaining lifetime of the instance is more than the duration
2375 	 * then the timeout corresponds to the duration, otherwise it will be
2376 	 * reduced to the remaining instance lifetime.
2377 	 */
2378 	if (adv_instance->timeout == 0 ||
2379 	    adv_instance->duration <= adv_instance->remaining_time)
2380 		timeout = adv_instance->duration;
2381 	else
2382 		timeout = adv_instance->remaining_time;
2383 
2384 	/* The remaining time is being reduced unless the instance is being
2385 	 * advertised without time limit.
2386 	 */
2387 	if (adv_instance->timeout)
2388 		adv_instance->remaining_time =
2389 				adv_instance->remaining_time - timeout;
2390 
2391 	/* Only use work for scheduling instances with legacy advertising */
2392 	if (!ext_adv_capable(hdev)) {
2393 		hdev->adv_instance_timeout = timeout;
2394 		queue_delayed_work(hdev->req_workqueue,
2395 			   &hdev->adv_instance_expire,
2396 			   msecs_to_jiffies(timeout * 1000));
2397 	}
2398 
2399 	/* If we're just re-scheduling the same instance again then do not
2400 	 * execute any HCI commands. This happens when a single instance is
2401 	 * being advertised.
2402 	 */
2403 	if (!force && hdev->cur_adv_instance == instance &&
2404 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2405 		return 0;
2406 
2407 	hdev->cur_adv_instance = instance;
2408 	if (ext_adv_capable(hdev)) {
2409 		__hci_req_start_ext_adv(req, instance);
2410 	} else {
2411 		__hci_req_update_adv_data(req, instance);
2412 		__hci_req_update_scan_rsp_data(req, instance);
2413 		__hci_req_enable_advertising(req);
2414 	}
2415 
2416 	return 0;
2417 }
2418 
2419 /* For a single instance:
2420  * - force == true: The instance will be removed even when its remaining
2421  *   lifetime is not zero.
2422  * - force == false: the instance will be deactivated but kept stored unless
2423  *   the remaining lifetime is zero.
2424  *
2425  * For instance == 0x00:
2426  * - force == true: All instances will be removed regardless of their timeout
2427  *   setting.
2428  * - force == false: Only instances that have a timeout will be removed.
2429  */
2430 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2431 				struct hci_request *req, u8 instance,
2432 				bool force)
2433 {
2434 	struct adv_info *adv_instance, *n, *next_instance = NULL;
2435 	int err;
2436 	u8 rem_inst;
2437 
2438 	/* Cancel any timeout concerning the removed instance(s). */
2439 	if (!instance || hdev->cur_adv_instance == instance)
2440 		cancel_adv_timeout(hdev);
2441 
2442 	/* Get the next instance to advertise BEFORE we remove
2443 	 * the current one. This can be the same instance again
2444 	 * if there is only one instance.
2445 	 */
2446 	if (instance && hdev->cur_adv_instance == instance)
2447 		next_instance = hci_get_next_instance(hdev, instance);
2448 
2449 	if (instance == 0x00) {
2450 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2451 					 list) {
2452 			if (!(force || adv_instance->timeout))
2453 				continue;
2454 
2455 			rem_inst = adv_instance->instance;
2456 			err = hci_remove_adv_instance(hdev, rem_inst);
2457 			if (!err)
2458 				mgmt_advertising_removed(sk, hdev, rem_inst);
2459 		}
2460 	} else {
2461 		adv_instance = hci_find_adv_instance(hdev, instance);
2462 
2463 		if (force || (adv_instance && adv_instance->timeout &&
2464 			      !adv_instance->remaining_time)) {
2465 			/* Don't advertise a removed instance. */
2466 			if (next_instance &&
2467 			    next_instance->instance == instance)
2468 				next_instance = NULL;
2469 
2470 			err = hci_remove_adv_instance(hdev, instance);
2471 			if (!err)
2472 				mgmt_advertising_removed(sk, hdev, instance);
2473 		}
2474 	}
2475 
2476 	if (!req || !hdev_is_powered(hdev) ||
2477 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2478 		return;
2479 
2480 	if (next_instance && !ext_adv_capable(hdev))
2481 		__hci_req_schedule_adv_instance(req, next_instance->instance,
2482 						false);
2483 }
2484 
2485 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2486 {
2487 	struct hci_dev *hdev = req->hdev;
2488 
2489 	/* If we're advertising or initiating an LE connection we can't
2490 	 * go ahead and change the random address at this time. This is
2491 	 * because the eventual initiator address used for the
2492 	 * subsequently created connection will be undefined (some
2493 	 * controllers use the new address and others the one we had
2494 	 * when the operation started).
2495 	 *
2496 	 * In this kind of scenario skip the update and let the random
2497 	 * address be updated at the next cycle.
2498 	 */
2499 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2500 	    hci_lookup_le_connect(hdev)) {
2501 		bt_dev_dbg(hdev, "Deferring random address update");
2502 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2503 		return;
2504 	}
2505 
2506 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2507 }
2508 
2509 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2510 			      bool use_rpa, u8 *own_addr_type)
2511 {
2512 	struct hci_dev *hdev = req->hdev;
2513 	int err;
2514 
2515 	/* If privacy is enabled use a resolvable private address. If
2516 	 * current RPA has expired or there is something else than
2517 	 * the current RPA in use, then generate a new one.
2518 	 */
2519 	if (use_rpa) {
2520 		int to;
2521 
2522 		/* If Controller supports LL Privacy use own address type is
2523 		 * 0x03
2524 		 */
2525 		if (use_ll_privacy(hdev) &&
2526 		    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2527 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2528 		else
2529 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2530 
2531 		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2532 		    !bacmp(&hdev->random_addr, &hdev->rpa))
2533 			return 0;
2534 
2535 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2536 		if (err < 0) {
2537 			bt_dev_err(hdev, "failed to generate new RPA");
2538 			return err;
2539 		}
2540 
2541 		set_random_addr(req, &hdev->rpa);
2542 
2543 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2544 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2545 
2546 		return 0;
2547 	}
2548 
2549 	/* In case of required privacy without resolvable private address,
2550 	 * use an non-resolvable private address. This is useful for active
2551 	 * scanning and non-connectable advertising.
2552 	 */
2553 	if (require_privacy) {
2554 		bdaddr_t nrpa;
2555 
2556 		while (true) {
2557 			/* The non-resolvable private address is generated
2558 			 * from random six bytes with the two most significant
2559 			 * bits cleared.
2560 			 */
2561 			get_random_bytes(&nrpa, 6);
2562 			nrpa.b[5] &= 0x3f;
2563 
2564 			/* The non-resolvable private address shall not be
2565 			 * equal to the public address.
2566 			 */
2567 			if (bacmp(&hdev->bdaddr, &nrpa))
2568 				break;
2569 		}
2570 
2571 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2572 		set_random_addr(req, &nrpa);
2573 		return 0;
2574 	}
2575 
2576 	/* If forcing static address is in use or there is no public
2577 	 * address use the static address as random address (but skip
2578 	 * the HCI command if the current random address is already the
2579 	 * static one.
2580 	 *
2581 	 * In case BR/EDR has been disabled on a dual-mode controller
2582 	 * and a static address has been configured, then use that
2583 	 * address instead of the public BR/EDR address.
2584 	 */
2585 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2586 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2587 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2588 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2589 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2590 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2591 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2592 				    &hdev->static_addr);
2593 		return 0;
2594 	}
2595 
2596 	/* Neither privacy nor static address is being used so use a
2597 	 * public address.
2598 	 */
2599 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2600 
2601 	return 0;
2602 }
2603 
2604 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2605 {
2606 	struct bdaddr_list *b;
2607 
2608 	list_for_each_entry(b, &hdev->whitelist, list) {
2609 		struct hci_conn *conn;
2610 
2611 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2612 		if (!conn)
2613 			return true;
2614 
2615 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2616 			return true;
2617 	}
2618 
2619 	return false;
2620 }
2621 
2622 void __hci_req_update_scan(struct hci_request *req)
2623 {
2624 	struct hci_dev *hdev = req->hdev;
2625 	u8 scan;
2626 
2627 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2628 		return;
2629 
2630 	if (!hdev_is_powered(hdev))
2631 		return;
2632 
2633 	if (mgmt_powering_down(hdev))
2634 		return;
2635 
2636 	if (hdev->scanning_paused)
2637 		return;
2638 
2639 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2640 	    disconnected_whitelist_entries(hdev))
2641 		scan = SCAN_PAGE;
2642 	else
2643 		scan = SCAN_DISABLED;
2644 
2645 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2646 		scan |= SCAN_INQUIRY;
2647 
2648 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2649 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2650 		return;
2651 
2652 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2653 }
2654 
2655 static int update_scan(struct hci_request *req, unsigned long opt)
2656 {
2657 	hci_dev_lock(req->hdev);
2658 	__hci_req_update_scan(req);
2659 	hci_dev_unlock(req->hdev);
2660 	return 0;
2661 }
2662 
2663 static void scan_update_work(struct work_struct *work)
2664 {
2665 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2666 
2667 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2668 }
2669 
2670 static int connectable_update(struct hci_request *req, unsigned long opt)
2671 {
2672 	struct hci_dev *hdev = req->hdev;
2673 
2674 	hci_dev_lock(hdev);
2675 
2676 	__hci_req_update_scan(req);
2677 
2678 	/* If BR/EDR is not enabled and we disable advertising as a
2679 	 * by-product of disabling connectable, we need to update the
2680 	 * advertising flags.
2681 	 */
2682 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2683 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2684 
2685 	/* Update the advertising parameters if necessary */
2686 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2687 	    !list_empty(&hdev->adv_instances)) {
2688 		if (ext_adv_capable(hdev))
2689 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2690 		else
2691 			__hci_req_enable_advertising(req);
2692 	}
2693 
2694 	__hci_update_background_scan(req);
2695 
2696 	hci_dev_unlock(hdev);
2697 
2698 	return 0;
2699 }
2700 
2701 static void connectable_update_work(struct work_struct *work)
2702 {
2703 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2704 					    connectable_update);
2705 	u8 status;
2706 
2707 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2708 	mgmt_set_connectable_complete(hdev, status);
2709 }
2710 
2711 static u8 get_service_classes(struct hci_dev *hdev)
2712 {
2713 	struct bt_uuid *uuid;
2714 	u8 val = 0;
2715 
2716 	list_for_each_entry(uuid, &hdev->uuids, list)
2717 		val |= uuid->svc_hint;
2718 
2719 	return val;
2720 }
2721 
2722 void __hci_req_update_class(struct hci_request *req)
2723 {
2724 	struct hci_dev *hdev = req->hdev;
2725 	u8 cod[3];
2726 
2727 	bt_dev_dbg(hdev, "");
2728 
2729 	if (!hdev_is_powered(hdev))
2730 		return;
2731 
2732 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2733 		return;
2734 
2735 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2736 		return;
2737 
2738 	cod[0] = hdev->minor_class;
2739 	cod[1] = hdev->major_class;
2740 	cod[2] = get_service_classes(hdev);
2741 
2742 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2743 		cod[1] |= 0x20;
2744 
2745 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2746 		return;
2747 
2748 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2749 }
2750 
2751 static void write_iac(struct hci_request *req)
2752 {
2753 	struct hci_dev *hdev = req->hdev;
2754 	struct hci_cp_write_current_iac_lap cp;
2755 
2756 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2757 		return;
2758 
2759 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2760 		/* Limited discoverable mode */
2761 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2762 		cp.iac_lap[0] = 0x00;	/* LIAC */
2763 		cp.iac_lap[1] = 0x8b;
2764 		cp.iac_lap[2] = 0x9e;
2765 		cp.iac_lap[3] = 0x33;	/* GIAC */
2766 		cp.iac_lap[4] = 0x8b;
2767 		cp.iac_lap[5] = 0x9e;
2768 	} else {
2769 		/* General discoverable mode */
2770 		cp.num_iac = 1;
2771 		cp.iac_lap[0] = 0x33;	/* GIAC */
2772 		cp.iac_lap[1] = 0x8b;
2773 		cp.iac_lap[2] = 0x9e;
2774 	}
2775 
2776 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2777 		    (cp.num_iac * 3) + 1, &cp);
2778 }
2779 
2780 static int discoverable_update(struct hci_request *req, unsigned long opt)
2781 {
2782 	struct hci_dev *hdev = req->hdev;
2783 
2784 	hci_dev_lock(hdev);
2785 
2786 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2787 		write_iac(req);
2788 		__hci_req_update_scan(req);
2789 		__hci_req_update_class(req);
2790 	}
2791 
2792 	/* Advertising instances don't use the global discoverable setting, so
2793 	 * only update AD if advertising was enabled using Set Advertising.
2794 	 */
2795 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2796 		__hci_req_update_adv_data(req, 0x00);
2797 
2798 		/* Discoverable mode affects the local advertising
2799 		 * address in limited privacy mode.
2800 		 */
2801 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2802 			if (ext_adv_capable(hdev))
2803 				__hci_req_start_ext_adv(req, 0x00);
2804 			else
2805 				__hci_req_enable_advertising(req);
2806 		}
2807 	}
2808 
2809 	hci_dev_unlock(hdev);
2810 
2811 	return 0;
2812 }
2813 
2814 static void discoverable_update_work(struct work_struct *work)
2815 {
2816 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2817 					    discoverable_update);
2818 	u8 status;
2819 
2820 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2821 	mgmt_set_discoverable_complete(hdev, status);
2822 }
2823 
2824 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2825 		      u8 reason)
2826 {
2827 	switch (conn->state) {
2828 	case BT_CONNECTED:
2829 	case BT_CONFIG:
2830 		if (conn->type == AMP_LINK) {
2831 			struct hci_cp_disconn_phy_link cp;
2832 
2833 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2834 			cp.reason = reason;
2835 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2836 				    &cp);
2837 		} else {
2838 			struct hci_cp_disconnect dc;
2839 
2840 			dc.handle = cpu_to_le16(conn->handle);
2841 			dc.reason = reason;
2842 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2843 		}
2844 
2845 		conn->state = BT_DISCONN;
2846 
2847 		break;
2848 	case BT_CONNECT:
2849 		if (conn->type == LE_LINK) {
2850 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2851 				break;
2852 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2853 				    0, NULL);
2854 		} else if (conn->type == ACL_LINK) {
2855 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2856 				break;
2857 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2858 				    6, &conn->dst);
2859 		}
2860 		break;
2861 	case BT_CONNECT2:
2862 		if (conn->type == ACL_LINK) {
2863 			struct hci_cp_reject_conn_req rej;
2864 
2865 			bacpy(&rej.bdaddr, &conn->dst);
2866 			rej.reason = reason;
2867 
2868 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2869 				    sizeof(rej), &rej);
2870 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2871 			struct hci_cp_reject_sync_conn_req rej;
2872 
2873 			bacpy(&rej.bdaddr, &conn->dst);
2874 
2875 			/* SCO rejection has its own limited set of
2876 			 * allowed error values (0x0D-0x0F) which isn't
2877 			 * compatible with most values passed to this
2878 			 * function. To be safe hard-code one of the
2879 			 * values that's suitable for SCO.
2880 			 */
2881 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2882 
2883 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2884 				    sizeof(rej), &rej);
2885 		}
2886 		break;
2887 	default:
2888 		conn->state = BT_CLOSED;
2889 		break;
2890 	}
2891 }
2892 
2893 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2894 {
2895 	if (status)
2896 		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2897 }
2898 
2899 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2900 {
2901 	struct hci_request req;
2902 	int err;
2903 
2904 	hci_req_init(&req, conn->hdev);
2905 
2906 	__hci_abort_conn(&req, conn, reason);
2907 
2908 	err = hci_req_run(&req, abort_conn_complete);
2909 	if (err && err != -ENODATA) {
2910 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2911 		return err;
2912 	}
2913 
2914 	return 0;
2915 }
2916 
2917 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2918 {
2919 	hci_dev_lock(req->hdev);
2920 	__hci_update_background_scan(req);
2921 	hci_dev_unlock(req->hdev);
2922 	return 0;
2923 }
2924 
2925 static void bg_scan_update(struct work_struct *work)
2926 {
2927 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2928 					    bg_scan_update);
2929 	struct hci_conn *conn;
2930 	u8 status;
2931 	int err;
2932 
2933 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2934 	if (!err)
2935 		return;
2936 
2937 	hci_dev_lock(hdev);
2938 
2939 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2940 	if (conn)
2941 		hci_le_conn_failed(conn, status);
2942 
2943 	hci_dev_unlock(hdev);
2944 }
2945 
2946 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2947 {
2948 	hci_req_add_le_scan_disable(req, false);
2949 	return 0;
2950 }
2951 
2952 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2953 {
2954 	u8 length = opt;
2955 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2956 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2957 	struct hci_cp_inquiry cp;
2958 
2959 	if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2960 		return 0;
2961 
2962 	bt_dev_dbg(req->hdev, "");
2963 
2964 	hci_dev_lock(req->hdev);
2965 	hci_inquiry_cache_flush(req->hdev);
2966 	hci_dev_unlock(req->hdev);
2967 
2968 	memset(&cp, 0, sizeof(cp));
2969 
2970 	if (req->hdev->discovery.limited)
2971 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2972 	else
2973 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2974 
2975 	cp.length = length;
2976 
2977 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2978 
2979 	return 0;
2980 }
2981 
2982 static void le_scan_disable_work(struct work_struct *work)
2983 {
2984 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2985 					    le_scan_disable.work);
2986 	u8 status;
2987 
2988 	bt_dev_dbg(hdev, "");
2989 
2990 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2991 		return;
2992 
2993 	cancel_delayed_work(&hdev->le_scan_restart);
2994 
2995 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2996 	if (status) {
2997 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2998 			   status);
2999 		return;
3000 	}
3001 
3002 	hdev->discovery.scan_start = 0;
3003 
3004 	/* If we were running LE only scan, change discovery state. If
3005 	 * we were running both LE and BR/EDR inquiry simultaneously,
3006 	 * and BR/EDR inquiry is already finished, stop discovery,
3007 	 * otherwise BR/EDR inquiry will stop discovery when finished.
3008 	 * If we will resolve remote device name, do not change
3009 	 * discovery state.
3010 	 */
3011 
3012 	if (hdev->discovery.type == DISCOV_TYPE_LE)
3013 		goto discov_stopped;
3014 
3015 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3016 		return;
3017 
3018 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3019 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3020 		    hdev->discovery.state != DISCOVERY_RESOLVING)
3021 			goto discov_stopped;
3022 
3023 		return;
3024 	}
3025 
3026 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3027 		     HCI_CMD_TIMEOUT, &status);
3028 	if (status) {
3029 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3030 		goto discov_stopped;
3031 	}
3032 
3033 	return;
3034 
3035 discov_stopped:
3036 	hci_dev_lock(hdev);
3037 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3038 	hci_dev_unlock(hdev);
3039 }
3040 
3041 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3042 {
3043 	struct hci_dev *hdev = req->hdev;
3044 
3045 	/* If controller is not scanning we are done. */
3046 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3047 		return 0;
3048 
3049 	if (hdev->scanning_paused) {
3050 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
3051 		return 0;
3052 	}
3053 
3054 	hci_req_add_le_scan_disable(req, false);
3055 
3056 	if (use_ext_scan(hdev)) {
3057 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3058 
3059 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3060 		ext_enable_cp.enable = LE_SCAN_ENABLE;
3061 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3062 
3063 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3064 			    sizeof(ext_enable_cp), &ext_enable_cp);
3065 	} else {
3066 		struct hci_cp_le_set_scan_enable cp;
3067 
3068 		memset(&cp, 0, sizeof(cp));
3069 		cp.enable = LE_SCAN_ENABLE;
3070 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3071 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3072 	}
3073 
3074 	return 0;
3075 }
3076 
3077 static void le_scan_restart_work(struct work_struct *work)
3078 {
3079 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3080 					    le_scan_restart.work);
3081 	unsigned long timeout, duration, scan_start, now;
3082 	u8 status;
3083 
3084 	bt_dev_dbg(hdev, "");
3085 
3086 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3087 	if (status) {
3088 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
3089 			   status);
3090 		return;
3091 	}
3092 
3093 	hci_dev_lock(hdev);
3094 
3095 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3096 	    !hdev->discovery.scan_start)
3097 		goto unlock;
3098 
3099 	/* When the scan was started, hdev->le_scan_disable has been queued
3100 	 * after duration from scan_start. During scan restart this job
3101 	 * has been canceled, and we need to queue it again after proper
3102 	 * timeout, to make sure that scan does not run indefinitely.
3103 	 */
3104 	duration = hdev->discovery.scan_duration;
3105 	scan_start = hdev->discovery.scan_start;
3106 	now = jiffies;
3107 	if (now - scan_start <= duration) {
3108 		int elapsed;
3109 
3110 		if (now >= scan_start)
3111 			elapsed = now - scan_start;
3112 		else
3113 			elapsed = ULONG_MAX - scan_start + now;
3114 
3115 		timeout = duration - elapsed;
3116 	} else {
3117 		timeout = 0;
3118 	}
3119 
3120 	queue_delayed_work(hdev->req_workqueue,
3121 			   &hdev->le_scan_disable, timeout);
3122 
3123 unlock:
3124 	hci_dev_unlock(hdev);
3125 }
3126 
3127 static int active_scan(struct hci_request *req, unsigned long opt)
3128 {
3129 	uint16_t interval = opt;
3130 	struct hci_dev *hdev = req->hdev;
3131 	u8 own_addr_type;
3132 	/* White list is not used for discovery */
3133 	u8 filter_policy = 0x00;
3134 	/* Discovery doesn't require controller address resolution */
3135 	bool addr_resolv = false;
3136 	int err;
3137 
3138 	bt_dev_dbg(hdev, "");
3139 
3140 	/* If controller is scanning, it means the background scanning is
3141 	 * running. Thus, we should temporarily stop it in order to set the
3142 	 * discovery scanning parameters.
3143 	 */
3144 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3145 		hci_req_add_le_scan_disable(req, false);
3146 		cancel_interleave_scan(hdev);
3147 	}
3148 
3149 	/* All active scans will be done with either a resolvable private
3150 	 * address (when privacy feature has been enabled) or non-resolvable
3151 	 * private address.
3152 	 */
3153 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3154 					&own_addr_type);
3155 	if (err < 0)
3156 		own_addr_type = ADDR_LE_DEV_PUBLIC;
3157 
3158 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3159 			   hdev->le_scan_window_discovery, own_addr_type,
3160 			   filter_policy, addr_resolv);
3161 	return 0;
3162 }
3163 
3164 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3165 {
3166 	int err;
3167 
3168 	bt_dev_dbg(req->hdev, "");
3169 
3170 	err = active_scan(req, opt);
3171 	if (err)
3172 		return err;
3173 
3174 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3175 }
3176 
3177 static void start_discovery(struct hci_dev *hdev, u8 *status)
3178 {
3179 	unsigned long timeout;
3180 
3181 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3182 
3183 	switch (hdev->discovery.type) {
3184 	case DISCOV_TYPE_BREDR:
3185 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3186 			hci_req_sync(hdev, bredr_inquiry,
3187 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3188 				     status);
3189 		return;
3190 	case DISCOV_TYPE_INTERLEAVED:
3191 		/* When running simultaneous discovery, the LE scanning time
3192 		 * should occupy the whole discovery time sine BR/EDR inquiry
3193 		 * and LE scanning are scheduled by the controller.
3194 		 *
3195 		 * For interleaving discovery in comparison, BR/EDR inquiry
3196 		 * and LE scanning are done sequentially with separate
3197 		 * timeouts.
3198 		 */
3199 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3200 			     &hdev->quirks)) {
3201 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3202 			/* During simultaneous discovery, we double LE scan
3203 			 * interval. We must leave some time for the controller
3204 			 * to do BR/EDR inquiry.
3205 			 */
3206 			hci_req_sync(hdev, interleaved_discov,
3207 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3208 				     status);
3209 			break;
3210 		}
3211 
3212 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3213 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3214 			     HCI_CMD_TIMEOUT, status);
3215 		break;
3216 	case DISCOV_TYPE_LE:
3217 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3218 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3219 			     HCI_CMD_TIMEOUT, status);
3220 		break;
3221 	default:
3222 		*status = HCI_ERROR_UNSPECIFIED;
3223 		return;
3224 	}
3225 
3226 	if (*status)
3227 		return;
3228 
3229 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3230 
3231 	/* When service discovery is used and the controller has a
3232 	 * strict duplicate filter, it is important to remember the
3233 	 * start and duration of the scan. This is required for
3234 	 * restarting scanning during the discovery phase.
3235 	 */
3236 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3237 		     hdev->discovery.result_filtering) {
3238 		hdev->discovery.scan_start = jiffies;
3239 		hdev->discovery.scan_duration = timeout;
3240 	}
3241 
3242 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3243 			   timeout);
3244 }
3245 
3246 bool hci_req_stop_discovery(struct hci_request *req)
3247 {
3248 	struct hci_dev *hdev = req->hdev;
3249 	struct discovery_state *d = &hdev->discovery;
3250 	struct hci_cp_remote_name_req_cancel cp;
3251 	struct inquiry_entry *e;
3252 	bool ret = false;
3253 
3254 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3255 
3256 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3257 		if (test_bit(HCI_INQUIRY, &hdev->flags))
3258 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3259 
3260 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3261 			cancel_delayed_work(&hdev->le_scan_disable);
3262 			cancel_delayed_work(&hdev->le_scan_restart);
3263 			hci_req_add_le_scan_disable(req, false);
3264 		}
3265 
3266 		ret = true;
3267 	} else {
3268 		/* Passive scanning */
3269 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3270 			hci_req_add_le_scan_disable(req, false);
3271 			ret = true;
3272 		}
3273 	}
3274 
3275 	/* No further actions needed for LE-only discovery */
3276 	if (d->type == DISCOV_TYPE_LE)
3277 		return ret;
3278 
3279 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3280 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3281 						     NAME_PENDING);
3282 		if (!e)
3283 			return ret;
3284 
3285 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3286 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3287 			    &cp);
3288 		ret = true;
3289 	}
3290 
3291 	return ret;
3292 }
3293 
3294 static int stop_discovery(struct hci_request *req, unsigned long opt)
3295 {
3296 	hci_dev_lock(req->hdev);
3297 	hci_req_stop_discovery(req);
3298 	hci_dev_unlock(req->hdev);
3299 
3300 	return 0;
3301 }
3302 
3303 static void discov_update(struct work_struct *work)
3304 {
3305 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3306 					    discov_update);
3307 	u8 status = 0;
3308 
3309 	switch (hdev->discovery.state) {
3310 	case DISCOVERY_STARTING:
3311 		start_discovery(hdev, &status);
3312 		mgmt_start_discovery_complete(hdev, status);
3313 		if (status)
3314 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3315 		else
3316 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3317 		break;
3318 	case DISCOVERY_STOPPING:
3319 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3320 		mgmt_stop_discovery_complete(hdev, status);
3321 		if (!status)
3322 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3323 		break;
3324 	case DISCOVERY_STOPPED:
3325 	default:
3326 		return;
3327 	}
3328 }
3329 
3330 static void discov_off(struct work_struct *work)
3331 {
3332 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3333 					    discov_off.work);
3334 
3335 	bt_dev_dbg(hdev, "");
3336 
3337 	hci_dev_lock(hdev);
3338 
3339 	/* When discoverable timeout triggers, then just make sure
3340 	 * the limited discoverable flag is cleared. Even in the case
3341 	 * of a timeout triggered from general discoverable, it is
3342 	 * safe to unconditionally clear the flag.
3343 	 */
3344 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3345 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3346 	hdev->discov_timeout = 0;
3347 
3348 	hci_dev_unlock(hdev);
3349 
3350 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3351 	mgmt_new_settings(hdev);
3352 }
3353 
3354 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3355 {
3356 	struct hci_dev *hdev = req->hdev;
3357 	u8 link_sec;
3358 
3359 	hci_dev_lock(hdev);
3360 
3361 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3362 	    !lmp_host_ssp_capable(hdev)) {
3363 		u8 mode = 0x01;
3364 
3365 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3366 
3367 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3368 			u8 support = 0x01;
3369 
3370 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3371 				    sizeof(support), &support);
3372 		}
3373 	}
3374 
3375 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3376 	    lmp_bredr_capable(hdev)) {
3377 		struct hci_cp_write_le_host_supported cp;
3378 
3379 		cp.le = 0x01;
3380 		cp.simul = 0x00;
3381 
3382 		/* Check first if we already have the right
3383 		 * host state (host features set)
3384 		 */
3385 		if (cp.le != lmp_host_le_capable(hdev) ||
3386 		    cp.simul != lmp_host_le_br_capable(hdev))
3387 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3388 				    sizeof(cp), &cp);
3389 	}
3390 
3391 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3392 		/* Make sure the controller has a good default for
3393 		 * advertising data. This also applies to the case
3394 		 * where BR/EDR was toggled during the AUTO_OFF phase.
3395 		 */
3396 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3397 		    list_empty(&hdev->adv_instances)) {
3398 			int err;
3399 
3400 			if (ext_adv_capable(hdev)) {
3401 				err = __hci_req_setup_ext_adv_instance(req,
3402 								       0x00);
3403 				if (!err)
3404 					__hci_req_update_scan_rsp_data(req,
3405 								       0x00);
3406 			} else {
3407 				err = 0;
3408 				__hci_req_update_adv_data(req, 0x00);
3409 				__hci_req_update_scan_rsp_data(req, 0x00);
3410 			}
3411 
3412 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3413 				if (!ext_adv_capable(hdev))
3414 					__hci_req_enable_advertising(req);
3415 				else if (!err)
3416 					__hci_req_enable_ext_advertising(req,
3417 									 0x00);
3418 			}
3419 		} else if (!list_empty(&hdev->adv_instances)) {
3420 			struct adv_info *adv_instance;
3421 
3422 			adv_instance = list_first_entry(&hdev->adv_instances,
3423 							struct adv_info, list);
3424 			__hci_req_schedule_adv_instance(req,
3425 							adv_instance->instance,
3426 							true);
3427 		}
3428 	}
3429 
3430 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3431 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3432 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3433 			    sizeof(link_sec), &link_sec);
3434 
3435 	if (lmp_bredr_capable(hdev)) {
3436 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3437 			__hci_req_write_fast_connectable(req, true);
3438 		else
3439 			__hci_req_write_fast_connectable(req, false);
3440 		__hci_req_update_scan(req);
3441 		__hci_req_update_class(req);
3442 		__hci_req_update_name(req);
3443 		__hci_req_update_eir(req);
3444 	}
3445 
3446 	hci_dev_unlock(hdev);
3447 	return 0;
3448 }
3449 
3450 int __hci_req_hci_power_on(struct hci_dev *hdev)
3451 {
3452 	/* Register the available SMP channels (BR/EDR and LE) only when
3453 	 * successfully powering on the controller. This late
3454 	 * registration is required so that LE SMP can clearly decide if
3455 	 * the public address or static address is used.
3456 	 */
3457 	smp_register(hdev);
3458 
3459 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3460 			      NULL);
3461 }
3462 
3463 void hci_request_setup(struct hci_dev *hdev)
3464 {
3465 	INIT_WORK(&hdev->discov_update, discov_update);
3466 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3467 	INIT_WORK(&hdev->scan_update, scan_update_work);
3468 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3469 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3470 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3471 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3472 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3473 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3474 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3475 }
3476 
3477 void hci_request_cancel_all(struct hci_dev *hdev)
3478 {
3479 	hci_req_sync_cancel(hdev, ENODEV);
3480 
3481 	cancel_work_sync(&hdev->discov_update);
3482 	cancel_work_sync(&hdev->bg_scan_update);
3483 	cancel_work_sync(&hdev->scan_update);
3484 	cancel_work_sync(&hdev->connectable_update);
3485 	cancel_work_sync(&hdev->discoverable_update);
3486 	cancel_delayed_work_sync(&hdev->discov_off);
3487 	cancel_delayed_work_sync(&hdev->le_scan_disable);
3488 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3489 
3490 	if (hdev->adv_instance_timeout) {
3491 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3492 		hdev->adv_instance_timeout = 0;
3493 	}
3494 
3495 	cancel_interleave_scan(hdev);
3496 }
3497