xref: /openbmc/linux/net/bluetooth/hci_sync.c (revision 2f2eb0c9de2eb69969aaf04feffb69310d3804b2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  */
7 
8 #include <net/bluetooth/bluetooth.h>
9 #include <net/bluetooth/hci_core.h>
10 #include <net/bluetooth/mgmt.h>
11 
12 #include "hci_request.h"
13 #include "smp.h"
14 #include "eir.h"
15 
16 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
17 				  struct sk_buff *skb)
18 {
19 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
20 
21 	if (hdev->req_status != HCI_REQ_PEND)
22 		return;
23 
24 	hdev->req_result = result;
25 	hdev->req_status = HCI_REQ_DONE;
26 
27 	if (skb) {
28 		struct sock *sk = hci_skb_sk(skb);
29 
30 		/* Drop sk reference if set */
31 		if (sk)
32 			sock_put(sk);
33 
34 		hdev->req_skb = skb_get(skb);
35 	}
36 
37 	wake_up_interruptible(&hdev->req_wait_q);
38 }
39 
40 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
41 					  u32 plen, const void *param,
42 					  struct sock *sk)
43 {
44 	int len = HCI_COMMAND_HDR_SIZE + plen;
45 	struct hci_command_hdr *hdr;
46 	struct sk_buff *skb;
47 
48 	skb = bt_skb_alloc(len, GFP_ATOMIC);
49 	if (!skb)
50 		return NULL;
51 
52 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
53 	hdr->opcode = cpu_to_le16(opcode);
54 	hdr->plen   = plen;
55 
56 	if (plen)
57 		skb_put_data(skb, param, plen);
58 
59 	bt_dev_dbg(hdev, "skb len %d", skb->len);
60 
61 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
62 	hci_skb_opcode(skb) = opcode;
63 
64 	/* Grab a reference if command needs to be associated with a sock (e.g.
65 	 * likely mgmt socket that initiated the command).
66 	 */
67 	if (sk) {
68 		hci_skb_sk(skb) = sk;
69 		sock_hold(sk);
70 	}
71 
72 	return skb;
73 }
74 
75 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
76 			     const void *param, u8 event, struct sock *sk)
77 {
78 	struct hci_dev *hdev = req->hdev;
79 	struct sk_buff *skb;
80 
81 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
82 
83 	/* If an error occurred during request building, there is no point in
84 	 * queueing the HCI command. We can simply return.
85 	 */
86 	if (req->err)
87 		return;
88 
89 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
90 	if (!skb) {
91 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
92 			   opcode);
93 		req->err = -ENOMEM;
94 		return;
95 	}
96 
97 	if (skb_queue_empty(&req->cmd_q))
98 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
99 
100 	bt_cb(skb)->hci.req_event = event;
101 
102 	skb_queue_tail(&req->cmd_q, skb);
103 }
104 
105 static int hci_cmd_sync_run(struct hci_request *req)
106 {
107 	struct hci_dev *hdev = req->hdev;
108 	struct sk_buff *skb;
109 	unsigned long flags;
110 
111 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
112 
113 	/* If an error occurred during request building, remove all HCI
114 	 * commands queued on the HCI request queue.
115 	 */
116 	if (req->err) {
117 		skb_queue_purge(&req->cmd_q);
118 		return req->err;
119 	}
120 
121 	/* Do not allow empty requests */
122 	if (skb_queue_empty(&req->cmd_q))
123 		return -ENODATA;
124 
125 	skb = skb_peek_tail(&req->cmd_q);
126 	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
127 	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
128 
129 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
130 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
131 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
132 
133 	queue_work(hdev->workqueue, &hdev->cmd_work);
134 
135 	return 0;
136 }
137 
138 /* This function requires the caller holds hdev->req_lock. */
139 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
140 				  const void *param, u8 event, u32 timeout,
141 				  struct sock *sk)
142 {
143 	struct hci_request req;
144 	struct sk_buff *skb;
145 	int err = 0;
146 
147 	bt_dev_dbg(hdev, "");
148 
149 	hci_req_init(&req, hdev);
150 
151 	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
152 
153 	hdev->req_status = HCI_REQ_PEND;
154 
155 	err = hci_cmd_sync_run(&req);
156 	if (err < 0)
157 		return ERR_PTR(err);
158 
159 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
160 					       hdev->req_status != HCI_REQ_PEND,
161 					       timeout);
162 
163 	if (err == -ERESTARTSYS)
164 		return ERR_PTR(-EINTR);
165 
166 	switch (hdev->req_status) {
167 	case HCI_REQ_DONE:
168 		err = -bt_to_errno(hdev->req_result);
169 		break;
170 
171 	case HCI_REQ_CANCELED:
172 		err = -hdev->req_result;
173 		break;
174 
175 	default:
176 		err = -ETIMEDOUT;
177 		break;
178 	}
179 
180 	hdev->req_status = 0;
181 	hdev->req_result = 0;
182 	skb = hdev->req_skb;
183 	hdev->req_skb = NULL;
184 
185 	bt_dev_dbg(hdev, "end: err %d", err);
186 
187 	if (err < 0) {
188 		kfree_skb(skb);
189 		return ERR_PTR(err);
190 	}
191 
192 	return skb;
193 }
194 EXPORT_SYMBOL(__hci_cmd_sync_sk);
195 
196 /* This function requires the caller holds hdev->req_lock. */
197 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
198 			       const void *param, u32 timeout)
199 {
200 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
201 }
202 EXPORT_SYMBOL(__hci_cmd_sync);
203 
204 /* Send HCI command and wait for command complete event */
205 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
206 			     const void *param, u32 timeout)
207 {
208 	struct sk_buff *skb;
209 
210 	if (!test_bit(HCI_UP, &hdev->flags))
211 		return ERR_PTR(-ENETDOWN);
212 
213 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
214 
215 	hci_req_sync_lock(hdev);
216 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
217 	hci_req_sync_unlock(hdev);
218 
219 	return skb;
220 }
221 EXPORT_SYMBOL(hci_cmd_sync);
222 
223 /* This function requires the caller holds hdev->req_lock. */
224 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
225 				  const void *param, u8 event, u32 timeout)
226 {
227 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
228 				 NULL);
229 }
230 EXPORT_SYMBOL(__hci_cmd_sync_ev);
231 
232 /* This function requires the caller holds hdev->req_lock. */
233 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
234 			     const void *param, u8 event, u32 timeout,
235 			     struct sock *sk)
236 {
237 	struct sk_buff *skb;
238 	u8 status;
239 
240 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
241 	if (IS_ERR(skb)) {
242 		bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
243 			   PTR_ERR(skb));
244 		return PTR_ERR(skb);
245 	}
246 
247 	/* If command return a status event skb will be set to NULL as there are
248 	 * no parameters, in case of failure IS_ERR(skb) would have be set to
249 	 * the actual error would be found with PTR_ERR(skb).
250 	 */
251 	if (!skb)
252 		return 0;
253 
254 	status = skb->data[0];
255 
256 	kfree_skb(skb);
257 
258 	return status;
259 }
260 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
261 
262 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
263 			  const void *param, u32 timeout)
264 {
265 	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
266 					NULL);
267 }
268 EXPORT_SYMBOL(__hci_cmd_sync_status);
269 
270 static void hci_cmd_sync_work(struct work_struct *work)
271 {
272 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
273 	struct hci_cmd_sync_work_entry *entry;
274 	hci_cmd_sync_work_func_t func;
275 	hci_cmd_sync_work_destroy_t destroy;
276 	void *data;
277 
278 	bt_dev_dbg(hdev, "");
279 
280 	mutex_lock(&hdev->cmd_sync_work_lock);
281 	entry = list_first_entry(&hdev->cmd_sync_work_list,
282 				 struct hci_cmd_sync_work_entry, list);
283 	if (entry) {
284 		list_del(&entry->list);
285 		func = entry->func;
286 		data = entry->data;
287 		destroy = entry->destroy;
288 		kfree(entry);
289 	} else {
290 		func = NULL;
291 		data = NULL;
292 		destroy = NULL;
293 	}
294 	mutex_unlock(&hdev->cmd_sync_work_lock);
295 
296 	if (func) {
297 		int err;
298 
299 		hci_req_sync_lock(hdev);
300 
301 		err = func(hdev, data);
302 
303 		if (destroy)
304 			destroy(hdev, data, err);
305 
306 		hci_req_sync_unlock(hdev);
307 	}
308 }
309 
310 void hci_cmd_sync_init(struct hci_dev *hdev)
311 {
312 	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
313 	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
314 	mutex_init(&hdev->cmd_sync_work_lock);
315 }
316 
317 void hci_cmd_sync_clear(struct hci_dev *hdev)
318 {
319 	struct hci_cmd_sync_work_entry *entry, *tmp;
320 
321 	cancel_work_sync(&hdev->cmd_sync_work);
322 
323 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
324 		if (entry->destroy)
325 			entry->destroy(hdev, entry->data, -ECANCELED);
326 
327 		list_del(&entry->list);
328 		kfree(entry);
329 	}
330 }
331 
332 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
333 		       void *data, hci_cmd_sync_work_destroy_t destroy)
334 {
335 	struct hci_cmd_sync_work_entry *entry;
336 
337 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
338 	if (!entry)
339 		return -ENOMEM;
340 
341 	entry->func = func;
342 	entry->data = data;
343 	entry->destroy = destroy;
344 
345 	mutex_lock(&hdev->cmd_sync_work_lock);
346 	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
347 	mutex_unlock(&hdev->cmd_sync_work_lock);
348 
349 	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
350 
351 	return 0;
352 }
353 EXPORT_SYMBOL(hci_cmd_sync_queue);
354 
355 int hci_update_eir_sync(struct hci_dev *hdev)
356 {
357 	struct hci_cp_write_eir cp;
358 
359 	bt_dev_dbg(hdev, "");
360 
361 	if (!hdev_is_powered(hdev))
362 		return 0;
363 
364 	if (!lmp_ext_inq_capable(hdev))
365 		return 0;
366 
367 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
368 		return 0;
369 
370 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
371 		return 0;
372 
373 	memset(&cp, 0, sizeof(cp));
374 
375 	eir_create(hdev, cp.data);
376 
377 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
378 		return 0;
379 
380 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
381 
382 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
383 				     HCI_CMD_TIMEOUT);
384 }
385 
386 static u8 get_service_classes(struct hci_dev *hdev)
387 {
388 	struct bt_uuid *uuid;
389 	u8 val = 0;
390 
391 	list_for_each_entry(uuid, &hdev->uuids, list)
392 		val |= uuid->svc_hint;
393 
394 	return val;
395 }
396 
397 int hci_update_class_sync(struct hci_dev *hdev)
398 {
399 	u8 cod[3];
400 
401 	bt_dev_dbg(hdev, "");
402 
403 	if (!hdev_is_powered(hdev))
404 		return 0;
405 
406 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
407 		return 0;
408 
409 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
410 		return 0;
411 
412 	cod[0] = hdev->minor_class;
413 	cod[1] = hdev->major_class;
414 	cod[2] = get_service_classes(hdev);
415 
416 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
417 		cod[1] |= 0x20;
418 
419 	if (memcmp(cod, hdev->dev_class, 3) == 0)
420 		return 0;
421 
422 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
423 				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
424 }
425 
426 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
427 {
428 	/* If there is no connection we are OK to advertise. */
429 	if (hci_conn_num(hdev, LE_LINK) == 0)
430 		return true;
431 
432 	/* Check le_states if there is any connection in peripheral role. */
433 	if (hdev->conn_hash.le_num_peripheral > 0) {
434 		/* Peripheral connection state and non connectable mode
435 		 * bit 20.
436 		 */
437 		if (!connectable && !(hdev->le_states[2] & 0x10))
438 			return false;
439 
440 		/* Peripheral connection state and connectable mode bit 38
441 		 * and scannable bit 21.
442 		 */
443 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
444 				    !(hdev->le_states[2] & 0x20)))
445 			return false;
446 	}
447 
448 	/* Check le_states if there is any connection in central role. */
449 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
450 		/* Central connection state and non connectable mode bit 18. */
451 		if (!connectable && !(hdev->le_states[2] & 0x02))
452 			return false;
453 
454 		/* Central connection state and connectable mode bit 35 and
455 		 * scannable 19.
456 		 */
457 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
458 				    !(hdev->le_states[2] & 0x08)))
459 			return false;
460 	}
461 
462 	return true;
463 }
464 
465 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
466 {
467 	/* If privacy is not enabled don't use RPA */
468 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
469 		return false;
470 
471 	/* If basic privacy mode is enabled use RPA */
472 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
473 		return true;
474 
475 	/* If limited privacy mode is enabled don't use RPA if we're
476 	 * both discoverable and bondable.
477 	 */
478 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
479 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
480 		return false;
481 
482 	/* We're neither bondable nor discoverable in the limited
483 	 * privacy mode, therefore use RPA.
484 	 */
485 	return true;
486 }
487 
488 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
489 {
490 	/* If we're advertising or initiating an LE connection we can't
491 	 * go ahead and change the random address at this time. This is
492 	 * because the eventual initiator address used for the
493 	 * subsequently created connection will be undefined (some
494 	 * controllers use the new address and others the one we had
495 	 * when the operation started).
496 	 *
497 	 * In this kind of scenario skip the update and let the random
498 	 * address be updated at the next cycle.
499 	 */
500 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
501 	    hci_lookup_le_connect(hdev)) {
502 		bt_dev_dbg(hdev, "Deferring random address update");
503 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
504 		return 0;
505 	}
506 
507 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
508 				     6, rpa, HCI_CMD_TIMEOUT);
509 }
510 
511 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
512 				   bool rpa, u8 *own_addr_type)
513 {
514 	int err;
515 
516 	/* If privacy is enabled use a resolvable private address. If
517 	 * current RPA has expired or there is something else than
518 	 * the current RPA in use, then generate a new one.
519 	 */
520 	if (rpa) {
521 		/* If Controller supports LL Privacy use own address type is
522 		 * 0x03
523 		 */
524 		if (use_ll_privacy(hdev))
525 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
526 		else
527 			*own_addr_type = ADDR_LE_DEV_RANDOM;
528 
529 		/* Check if RPA is valid */
530 		if (rpa_valid(hdev))
531 			return 0;
532 
533 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
534 		if (err < 0) {
535 			bt_dev_err(hdev, "failed to generate new RPA");
536 			return err;
537 		}
538 
539 		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
540 		if (err)
541 			return err;
542 
543 		return 0;
544 	}
545 
546 	/* In case of required privacy without resolvable private address,
547 	 * use an non-resolvable private address. This is useful for active
548 	 * scanning and non-connectable advertising.
549 	 */
550 	if (require_privacy) {
551 		bdaddr_t nrpa;
552 
553 		while (true) {
554 			/* The non-resolvable private address is generated
555 			 * from random six bytes with the two most significant
556 			 * bits cleared.
557 			 */
558 			get_random_bytes(&nrpa, 6);
559 			nrpa.b[5] &= 0x3f;
560 
561 			/* The non-resolvable private address shall not be
562 			 * equal to the public address.
563 			 */
564 			if (bacmp(&hdev->bdaddr, &nrpa))
565 				break;
566 		}
567 
568 		*own_addr_type = ADDR_LE_DEV_RANDOM;
569 
570 		return hci_set_random_addr_sync(hdev, &nrpa);
571 	}
572 
573 	/* If forcing static address is in use or there is no public
574 	 * address use the static address as random address (but skip
575 	 * the HCI command if the current random address is already the
576 	 * static one.
577 	 *
578 	 * In case BR/EDR has been disabled on a dual-mode controller
579 	 * and a static address has been configured, then use that
580 	 * address instead of the public BR/EDR address.
581 	 */
582 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
583 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
584 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
585 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
586 		*own_addr_type = ADDR_LE_DEV_RANDOM;
587 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
588 			return hci_set_random_addr_sync(hdev,
589 							&hdev->static_addr);
590 		return 0;
591 	}
592 
593 	/* Neither privacy nor static address is being used so use a
594 	 * public address.
595 	 */
596 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
597 
598 	return 0;
599 }
600 
601 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
602 {
603 	struct hci_cp_le_set_ext_adv_enable *cp;
604 	struct hci_cp_ext_adv_set *set;
605 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
606 	u8 size;
607 
608 	/* If request specifies an instance that doesn't exist, fail */
609 	if (instance > 0) {
610 		struct adv_info *adv;
611 
612 		adv = hci_find_adv_instance(hdev, instance);
613 		if (!adv)
614 			return -EINVAL;
615 
616 		/* If not enabled there is nothing to do */
617 		if (!adv->enabled)
618 			return 0;
619 	}
620 
621 	memset(data, 0, sizeof(data));
622 
623 	cp = (void *)data;
624 	set = (void *)cp->data;
625 
626 	/* Instance 0x00 indicates all advertising instances will be disabled */
627 	cp->num_of_sets = !!instance;
628 	cp->enable = 0x00;
629 
630 	set->handle = instance;
631 
632 	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
633 
634 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
635 				     size, data, HCI_CMD_TIMEOUT);
636 }
637 
638 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
639 					    bdaddr_t *random_addr)
640 {
641 	struct hci_cp_le_set_adv_set_rand_addr cp;
642 	int err;
643 
644 	if (!instance) {
645 		/* Instance 0x00 doesn't have an adv_info, instead it uses
646 		 * hdev->random_addr to track its address so whenever it needs
647 		 * to be updated this also set the random address since
648 		 * hdev->random_addr is shared with scan state machine.
649 		 */
650 		err = hci_set_random_addr_sync(hdev, random_addr);
651 		if (err)
652 			return err;
653 	}
654 
655 	memset(&cp, 0, sizeof(cp));
656 
657 	cp.handle = instance;
658 	bacpy(&cp.bdaddr, random_addr);
659 
660 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
661 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
662 }
663 
664 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
665 {
666 	struct hci_cp_le_set_ext_adv_params cp;
667 	bool connectable;
668 	u32 flags;
669 	bdaddr_t random_addr;
670 	u8 own_addr_type;
671 	int err;
672 	struct adv_info *adv;
673 	bool secondary_adv;
674 
675 	if (instance > 0) {
676 		adv = hci_find_adv_instance(hdev, instance);
677 		if (!adv)
678 			return -EINVAL;
679 	} else {
680 		adv = NULL;
681 	}
682 
683 	/* Updating parameters of an active instance will return a
684 	 * Command Disallowed error, so we must first disable the
685 	 * instance if it is active.
686 	 */
687 	if (adv && !adv->pending) {
688 		err = hci_disable_ext_adv_instance_sync(hdev, instance);
689 		if (err)
690 			return err;
691 	}
692 
693 	flags = hci_adv_instance_flags(hdev, instance);
694 
695 	/* If the "connectable" instance flag was not set, then choose between
696 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
697 	 */
698 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
699 		      mgmt_get_connectable(hdev);
700 
701 	if (!is_advertising_allowed(hdev, connectable))
702 		return -EPERM;
703 
704 	/* Set require_privacy to true only when non-connectable
705 	 * advertising is used. In that case it is fine to use a
706 	 * non-resolvable private address.
707 	 */
708 	err = hci_get_random_address(hdev, !connectable,
709 				     adv_use_rpa(hdev, flags), adv,
710 				     &own_addr_type, &random_addr);
711 	if (err < 0)
712 		return err;
713 
714 	memset(&cp, 0, sizeof(cp));
715 
716 	if (adv) {
717 		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
718 		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
719 		cp.tx_power = adv->tx_power;
720 	} else {
721 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
722 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
723 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
724 	}
725 
726 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
727 
728 	if (connectable) {
729 		if (secondary_adv)
730 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
731 		else
732 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
733 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
734 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
735 		if (secondary_adv)
736 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
737 		else
738 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
739 	} else {
740 		if (secondary_adv)
741 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
742 		else
743 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
744 	}
745 
746 	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
747 	 * contains the peer’s Identity Address and the Peer_Address_Type
748 	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
749 	 * These parameters are used to locate the corresponding local IRK in
750 	 * the resolving list; this IRK is used to generate their own address
751 	 * used in the advertisement.
752 	 */
753 	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
754 		hci_copy_identity_address(hdev, &cp.peer_addr,
755 					  &cp.peer_addr_type);
756 
757 	cp.own_addr_type = own_addr_type;
758 	cp.channel_map = hdev->le_adv_channel_map;
759 	cp.handle = instance;
760 
761 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
762 		cp.primary_phy = HCI_ADV_PHY_1M;
763 		cp.secondary_phy = HCI_ADV_PHY_2M;
764 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
765 		cp.primary_phy = HCI_ADV_PHY_CODED;
766 		cp.secondary_phy = HCI_ADV_PHY_CODED;
767 	} else {
768 		/* In all other cases use 1M */
769 		cp.primary_phy = HCI_ADV_PHY_1M;
770 		cp.secondary_phy = HCI_ADV_PHY_1M;
771 	}
772 
773 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
774 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
775 	if (err)
776 		return err;
777 
778 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
779 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
780 	    bacmp(&random_addr, BDADDR_ANY)) {
781 		/* Check if random address need to be updated */
782 		if (adv) {
783 			if (!bacmp(&random_addr, &adv->random_addr))
784 				return 0;
785 		} else {
786 			if (!bacmp(&random_addr, &hdev->random_addr))
787 				return 0;
788 		}
789 
790 		return hci_set_adv_set_random_addr_sync(hdev, instance,
791 							&random_addr);
792 	}
793 
794 	return 0;
795 }
796 
797 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
798 {
799 	struct {
800 		struct hci_cp_le_set_ext_scan_rsp_data cp;
801 		u8 data[HCI_MAX_EXT_AD_LENGTH];
802 	} pdu;
803 	u8 len;
804 
805 	memset(&pdu, 0, sizeof(pdu));
806 
807 	len = eir_create_scan_rsp(hdev, instance, pdu.data);
808 
809 	if (hdev->scan_rsp_data_len == len &&
810 	    !memcmp(pdu.data, hdev->scan_rsp_data, len))
811 		return 0;
812 
813 	memcpy(hdev->scan_rsp_data, pdu.data, len);
814 	hdev->scan_rsp_data_len = len;
815 
816 	pdu.cp.handle = instance;
817 	pdu.cp.length = len;
818 	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
819 	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
820 
821 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
822 				     sizeof(pdu.cp) + len, &pdu.cp,
823 				     HCI_CMD_TIMEOUT);
824 }
825 
826 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
827 {
828 	struct hci_cp_le_set_scan_rsp_data cp;
829 	u8 len;
830 
831 	memset(&cp, 0, sizeof(cp));
832 
833 	len = eir_create_scan_rsp(hdev, instance, cp.data);
834 
835 	if (hdev->scan_rsp_data_len == len &&
836 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
837 		return 0;
838 
839 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
840 	hdev->scan_rsp_data_len = len;
841 
842 	cp.length = len;
843 
844 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
845 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
846 }
847 
848 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
849 {
850 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
851 		return 0;
852 
853 	if (ext_adv_capable(hdev))
854 		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
855 
856 	return __hci_set_scan_rsp_data_sync(hdev, instance);
857 }
858 
859 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
860 {
861 	struct hci_cp_le_set_ext_adv_enable *cp;
862 	struct hci_cp_ext_adv_set *set;
863 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
864 	struct adv_info *adv;
865 
866 	if (instance > 0) {
867 		adv = hci_find_adv_instance(hdev, instance);
868 		if (!adv)
869 			return -EINVAL;
870 		/* If already enabled there is nothing to do */
871 		if (adv->enabled)
872 			return 0;
873 	} else {
874 		adv = NULL;
875 	}
876 
877 	cp = (void *)data;
878 	set = (void *)cp->data;
879 
880 	memset(cp, 0, sizeof(*cp));
881 
882 	cp->enable = 0x01;
883 	cp->num_of_sets = 0x01;
884 
885 	memset(set, 0, sizeof(*set));
886 
887 	set->handle = instance;
888 
889 	/* Set duration per instance since controller is responsible for
890 	 * scheduling it.
891 	 */
892 	if (adv && adv->duration) {
893 		u16 duration = adv->timeout * MSEC_PER_SEC;
894 
895 		/* Time = N * 10 ms */
896 		set->duration = cpu_to_le16(duration / 10);
897 	}
898 
899 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
900 				     sizeof(*cp) +
901 				     sizeof(*set) * cp->num_of_sets,
902 				     data, HCI_CMD_TIMEOUT);
903 }
904 
905 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
906 {
907 	int err;
908 
909 	err = hci_setup_ext_adv_instance_sync(hdev, instance);
910 	if (err)
911 		return err;
912 
913 	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
914 	if (err)
915 		return err;
916 
917 	return hci_enable_ext_advertising_sync(hdev, instance);
918 }
919 
920 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
921 {
922 	int err;
923 
924 	if (ext_adv_capable(hdev))
925 		return hci_start_ext_adv_sync(hdev, instance);
926 
927 	err = hci_update_adv_data_sync(hdev, instance);
928 	if (err)
929 		return err;
930 
931 	err = hci_update_scan_rsp_data_sync(hdev, instance);
932 	if (err)
933 		return err;
934 
935 	return hci_enable_advertising_sync(hdev);
936 }
937 
938 int hci_enable_advertising_sync(struct hci_dev *hdev)
939 {
940 	struct adv_info *adv_instance;
941 	struct hci_cp_le_set_adv_param cp;
942 	u8 own_addr_type, enable = 0x01;
943 	bool connectable;
944 	u16 adv_min_interval, adv_max_interval;
945 	u32 flags;
946 	u8 status;
947 
948 	if (ext_adv_capable(hdev))
949 		return hci_enable_ext_advertising_sync(hdev,
950 						       hdev->cur_adv_instance);
951 
952 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
953 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
954 
955 	/* If the "connectable" instance flag was not set, then choose between
956 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
957 	 */
958 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
959 		      mgmt_get_connectable(hdev);
960 
961 	if (!is_advertising_allowed(hdev, connectable))
962 		return -EINVAL;
963 
964 	status = hci_disable_advertising_sync(hdev);
965 	if (status)
966 		return status;
967 
968 	/* Clear the HCI_LE_ADV bit temporarily so that the
969 	 * hci_update_random_address knows that it's safe to go ahead
970 	 * and write a new random address. The flag will be set back on
971 	 * as soon as the SET_ADV_ENABLE HCI command completes.
972 	 */
973 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
974 
975 	/* Set require_privacy to true only when non-connectable
976 	 * advertising is used. In that case it is fine to use a
977 	 * non-resolvable private address.
978 	 */
979 	status = hci_update_random_address_sync(hdev, !connectable,
980 						adv_use_rpa(hdev, flags),
981 						&own_addr_type);
982 	if (status)
983 		return status;
984 
985 	memset(&cp, 0, sizeof(cp));
986 
987 	if (adv_instance) {
988 		adv_min_interval = adv_instance->min_interval;
989 		adv_max_interval = adv_instance->max_interval;
990 	} else {
991 		adv_min_interval = hdev->le_adv_min_interval;
992 		adv_max_interval = hdev->le_adv_max_interval;
993 	}
994 
995 	if (connectable) {
996 		cp.type = LE_ADV_IND;
997 	} else {
998 		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
999 			cp.type = LE_ADV_SCAN_IND;
1000 		else
1001 			cp.type = LE_ADV_NONCONN_IND;
1002 
1003 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1004 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1005 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1006 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1007 		}
1008 	}
1009 
1010 	cp.min_interval = cpu_to_le16(adv_min_interval);
1011 	cp.max_interval = cpu_to_le16(adv_max_interval);
1012 	cp.own_address_type = own_addr_type;
1013 	cp.channel_map = hdev->le_adv_channel_map;
1014 
1015 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1016 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1017 	if (status)
1018 		return status;
1019 
1020 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1021 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1022 }
1023 
1024 static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1025 {
1026 	return hci_enable_advertising_sync(hdev);
1027 }
1028 
1029 int hci_enable_advertising(struct hci_dev *hdev)
1030 {
1031 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1032 	    list_empty(&hdev->adv_instances))
1033 		return 0;
1034 
1035 	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1036 }
1037 
1038 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1039 				     struct sock *sk)
1040 {
1041 	int err;
1042 
1043 	if (!ext_adv_capable(hdev))
1044 		return 0;
1045 
1046 	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1047 	if (err)
1048 		return err;
1049 
1050 	/* If request specifies an instance that doesn't exist, fail */
1051 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1052 		return -EINVAL;
1053 
1054 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1055 					sizeof(instance), &instance, 0,
1056 					HCI_CMD_TIMEOUT, sk);
1057 }
1058 
1059 static void cancel_adv_timeout(struct hci_dev *hdev)
1060 {
1061 	if (hdev->adv_instance_timeout) {
1062 		hdev->adv_instance_timeout = 0;
1063 		cancel_delayed_work(&hdev->adv_instance_expire);
1064 	}
1065 }
1066 
1067 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1068 {
1069 	struct {
1070 		struct hci_cp_le_set_ext_adv_data cp;
1071 		u8 data[HCI_MAX_EXT_AD_LENGTH];
1072 	} pdu;
1073 	u8 len;
1074 
1075 	memset(&pdu, 0, sizeof(pdu));
1076 
1077 	len = eir_create_adv_data(hdev, instance, pdu.data);
1078 
1079 	/* There's nothing to do if the data hasn't changed */
1080 	if (hdev->adv_data_len == len &&
1081 	    memcmp(pdu.data, hdev->adv_data, len) == 0)
1082 		return 0;
1083 
1084 	memcpy(hdev->adv_data, pdu.data, len);
1085 	hdev->adv_data_len = len;
1086 
1087 	pdu.cp.length = len;
1088 	pdu.cp.handle = instance;
1089 	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1090 	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1091 
1092 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1093 				     sizeof(pdu.cp) + len, &pdu.cp,
1094 				     HCI_CMD_TIMEOUT);
1095 }
1096 
1097 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1098 {
1099 	struct hci_cp_le_set_adv_data cp;
1100 	u8 len;
1101 
1102 	memset(&cp, 0, sizeof(cp));
1103 
1104 	len = eir_create_adv_data(hdev, instance, cp.data);
1105 
1106 	/* There's nothing to do if the data hasn't changed */
1107 	if (hdev->adv_data_len == len &&
1108 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1109 		return 0;
1110 
1111 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1112 	hdev->adv_data_len = len;
1113 
1114 	cp.length = len;
1115 
1116 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1117 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1118 }
1119 
1120 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1121 {
1122 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1123 		return 0;
1124 
1125 	if (ext_adv_capable(hdev))
1126 		return hci_set_ext_adv_data_sync(hdev, instance);
1127 
1128 	return hci_set_adv_data_sync(hdev, instance);
1129 }
1130 
1131 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1132 				   bool force)
1133 {
1134 	struct adv_info *adv = NULL;
1135 	u16 timeout;
1136 
1137 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1138 		return -EPERM;
1139 
1140 	if (hdev->adv_instance_timeout)
1141 		return -EBUSY;
1142 
1143 	adv = hci_find_adv_instance(hdev, instance);
1144 	if (!adv)
1145 		return -ENOENT;
1146 
1147 	/* A zero timeout means unlimited advertising. As long as there is
1148 	 * only one instance, duration should be ignored. We still set a timeout
1149 	 * in case further instances are being added later on.
1150 	 *
1151 	 * If the remaining lifetime of the instance is more than the duration
1152 	 * then the timeout corresponds to the duration, otherwise it will be
1153 	 * reduced to the remaining instance lifetime.
1154 	 */
1155 	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1156 		timeout = adv->duration;
1157 	else
1158 		timeout = adv->remaining_time;
1159 
1160 	/* The remaining time is being reduced unless the instance is being
1161 	 * advertised without time limit.
1162 	 */
1163 	if (adv->timeout)
1164 		adv->remaining_time = adv->remaining_time - timeout;
1165 
1166 	/* Only use work for scheduling instances with legacy advertising */
1167 	if (!ext_adv_capable(hdev)) {
1168 		hdev->adv_instance_timeout = timeout;
1169 		queue_delayed_work(hdev->req_workqueue,
1170 				   &hdev->adv_instance_expire,
1171 				   msecs_to_jiffies(timeout * 1000));
1172 	}
1173 
1174 	/* If we're just re-scheduling the same instance again then do not
1175 	 * execute any HCI commands. This happens when a single instance is
1176 	 * being advertised.
1177 	 */
1178 	if (!force && hdev->cur_adv_instance == instance &&
1179 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1180 		return 0;
1181 
1182 	hdev->cur_adv_instance = instance;
1183 
1184 	return hci_start_adv_sync(hdev, instance);
1185 }
1186 
1187 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1188 {
1189 	int err;
1190 
1191 	if (!ext_adv_capable(hdev))
1192 		return 0;
1193 
1194 	/* Disable instance 0x00 to disable all instances */
1195 	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1196 	if (err)
1197 		return err;
1198 
1199 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1200 					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1201 }
1202 
1203 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1204 {
1205 	struct adv_info *adv, *n;
1206 
1207 	if (ext_adv_capable(hdev))
1208 		/* Remove all existing sets */
1209 		return hci_clear_adv_sets_sync(hdev, sk);
1210 
1211 	/* This is safe as long as there is no command send while the lock is
1212 	 * held.
1213 	 */
1214 	hci_dev_lock(hdev);
1215 
1216 	/* Cleanup non-ext instances */
1217 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1218 		u8 instance = adv->instance;
1219 		int err;
1220 
1221 		if (!(force || adv->timeout))
1222 			continue;
1223 
1224 		err = hci_remove_adv_instance(hdev, instance);
1225 		if (!err)
1226 			mgmt_advertising_removed(sk, hdev, instance);
1227 	}
1228 
1229 	hci_dev_unlock(hdev);
1230 
1231 	return 0;
1232 }
1233 
1234 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1235 			       struct sock *sk)
1236 {
1237 	int err;
1238 
1239 	/* If we use extended advertising, instance has to be removed first. */
1240 	if (ext_adv_capable(hdev))
1241 		return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
1242 
1243 	/* This is safe as long as there is no command send while the lock is
1244 	 * held.
1245 	 */
1246 	hci_dev_lock(hdev);
1247 
1248 	err = hci_remove_adv_instance(hdev, instance);
1249 	if (!err)
1250 		mgmt_advertising_removed(sk, hdev, instance);
1251 
1252 	hci_dev_unlock(hdev);
1253 
1254 	return err;
1255 }
1256 
1257 /* For a single instance:
1258  * - force == true: The instance will be removed even when its remaining
1259  *   lifetime is not zero.
1260  * - force == false: the instance will be deactivated but kept stored unless
1261  *   the remaining lifetime is zero.
1262  *
1263  * For instance == 0x00:
1264  * - force == true: All instances will be removed regardless of their timeout
1265  *   setting.
1266  * - force == false: Only instances that have a timeout will be removed.
1267  */
1268 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
1269 				u8 instance, bool force)
1270 {
1271 	struct adv_info *next = NULL;
1272 	int err;
1273 
1274 	/* Cancel any timeout concerning the removed instance(s). */
1275 	if (!instance || hdev->cur_adv_instance == instance)
1276 		cancel_adv_timeout(hdev);
1277 
1278 	/* Get the next instance to advertise BEFORE we remove
1279 	 * the current one. This can be the same instance again
1280 	 * if there is only one instance.
1281 	 */
1282 	if (hdev->cur_adv_instance == instance)
1283 		next = hci_get_next_instance(hdev, instance);
1284 
1285 	if (!instance) {
1286 		err = hci_clear_adv_sync(hdev, sk, force);
1287 		if (err)
1288 			return err;
1289 	} else {
1290 		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
1291 
1292 		if (force || (adv && adv->timeout && !adv->remaining_time)) {
1293 			/* Don't advertise a removed instance. */
1294 			if (next && next->instance == instance)
1295 				next = NULL;
1296 
1297 			err = hci_remove_adv_sync(hdev, instance, sk);
1298 			if (err)
1299 				return err;
1300 		}
1301 	}
1302 
1303 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
1304 		return 0;
1305 
1306 	if (next && !ext_adv_capable(hdev))
1307 		hci_schedule_adv_instance_sync(hdev, next->instance, false);
1308 
1309 	return 0;
1310 }
1311 
1312 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
1313 {
1314 	struct hci_cp_read_rssi cp;
1315 
1316 	cp.handle = handle;
1317 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
1318 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1319 }
1320 
1321 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
1322 {
1323 	struct hci_cp_read_tx_power cp;
1324 
1325 	cp.handle = handle;
1326 	cp.type = type;
1327 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
1328 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1329 }
1330 
1331 int hci_disable_advertising_sync(struct hci_dev *hdev)
1332 {
1333 	u8 enable = 0x00;
1334 
1335 	/* If controller is not advertising we are done. */
1336 	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
1337 		return 0;
1338 
1339 	if (ext_adv_capable(hdev))
1340 		return hci_disable_ext_adv_instance_sync(hdev, 0x00);
1341 
1342 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1343 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1344 }
1345 
1346 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
1347 					   u8 filter_dup)
1348 {
1349 	struct hci_cp_le_set_ext_scan_enable cp;
1350 
1351 	memset(&cp, 0, sizeof(cp));
1352 	cp.enable = val;
1353 	cp.filter_dup = filter_dup;
1354 
1355 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1356 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1357 }
1358 
1359 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
1360 				       u8 filter_dup)
1361 {
1362 	struct hci_cp_le_set_scan_enable cp;
1363 
1364 	if (use_ext_scan(hdev))
1365 		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
1366 
1367 	memset(&cp, 0, sizeof(cp));
1368 	cp.enable = val;
1369 	cp.filter_dup = filter_dup;
1370 
1371 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
1372 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1373 }
1374 
1375 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
1376 {
1377 	if (!use_ll_privacy(hdev))
1378 		return 0;
1379 
1380 	/* If controller is not/already resolving we are done. */
1381 	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1382 		return 0;
1383 
1384 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
1385 				     sizeof(val), &val, HCI_CMD_TIMEOUT);
1386 }
1387 
1388 int hci_scan_disable_sync(struct hci_dev *hdev)
1389 {
1390 	int err;
1391 
1392 	/* If controller is not scanning we are done. */
1393 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1394 		return 0;
1395 
1396 	if (hdev->scanning_paused) {
1397 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1398 		return 0;
1399 	}
1400 
1401 	if (hdev->suspended)
1402 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1403 
1404 	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
1405 	if (err) {
1406 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
1407 		return err;
1408 	}
1409 
1410 	return err;
1411 }
1412 
1413 static bool scan_use_rpa(struct hci_dev *hdev)
1414 {
1415 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
1416 }
1417 
1418 static void hci_start_interleave_scan(struct hci_dev *hdev)
1419 {
1420 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1421 	queue_delayed_work(hdev->req_workqueue,
1422 			   &hdev->interleave_scan, 0);
1423 }
1424 
1425 static bool is_interleave_scanning(struct hci_dev *hdev)
1426 {
1427 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
1428 }
1429 
1430 static void cancel_interleave_scan(struct hci_dev *hdev)
1431 {
1432 	bt_dev_dbg(hdev, "cancelling interleave scan");
1433 
1434 	cancel_delayed_work_sync(&hdev->interleave_scan);
1435 
1436 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
1437 }
1438 
1439 /* Return true if interleave_scan wasn't started until exiting this function,
1440  * otherwise, return false
1441  */
1442 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
1443 {
1444 	/* Do interleaved scan only if all of the following are true:
1445 	 * - There is at least one ADV monitor
1446 	 * - At least one pending LE connection or one device to be scanned for
1447 	 * - Monitor offloading is not supported
1448 	 * If so, we should alternate between allowlist scan and one without
1449 	 * any filters to save power.
1450 	 */
1451 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
1452 				!(list_empty(&hdev->pend_le_conns) &&
1453 				  list_empty(&hdev->pend_le_reports)) &&
1454 				hci_get_adv_monitor_offload_ext(hdev) ==
1455 				    HCI_ADV_MONITOR_EXT_NONE;
1456 	bool is_interleaving = is_interleave_scanning(hdev);
1457 
1458 	if (use_interleaving && !is_interleaving) {
1459 		hci_start_interleave_scan(hdev);
1460 		bt_dev_dbg(hdev, "starting interleave scan");
1461 		return true;
1462 	}
1463 
1464 	if (!use_interleaving && is_interleaving)
1465 		cancel_interleave_scan(hdev);
1466 
1467 	return false;
1468 }
1469 
1470 /* Removes connection to resolve list if needed.*/
1471 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
1472 					bdaddr_t *bdaddr, u8 bdaddr_type)
1473 {
1474 	struct hci_cp_le_del_from_resolv_list cp;
1475 	struct bdaddr_list_with_irk *entry;
1476 
1477 	if (!use_ll_privacy(hdev))
1478 		return 0;
1479 
1480 	/* Check if the IRK has been programmed */
1481 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
1482 						bdaddr_type);
1483 	if (!entry)
1484 		return 0;
1485 
1486 	cp.bdaddr_type = bdaddr_type;
1487 	bacpy(&cp.bdaddr, bdaddr);
1488 
1489 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
1490 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1491 }
1492 
1493 static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
1494 				       bdaddr_t *bdaddr, u8 bdaddr_type)
1495 {
1496 	struct hci_cp_le_del_from_accept_list cp;
1497 	int err;
1498 
1499 	/* Check if device is on accept list before removing it */
1500 	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
1501 		return 0;
1502 
1503 	cp.bdaddr_type = bdaddr_type;
1504 	bacpy(&cp.bdaddr, bdaddr);
1505 
1506 	/* Ignore errors when removing from resolving list as that is likely
1507 	 * that the device was never added.
1508 	 */
1509 	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
1510 
1511 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
1512 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1513 	if (err) {
1514 		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
1515 		return err;
1516 	}
1517 
1518 	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
1519 		   cp.bdaddr_type);
1520 
1521 	return 0;
1522 }
1523 
1524 /* Adds connection to resolve list if needed.
1525  * Setting params to NULL programs local hdev->irk
1526  */
1527 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
1528 					struct hci_conn_params *params)
1529 {
1530 	struct hci_cp_le_add_to_resolv_list cp;
1531 	struct smp_irk *irk;
1532 	struct bdaddr_list_with_irk *entry;
1533 
1534 	if (!use_ll_privacy(hdev))
1535 		return 0;
1536 
1537 	/* Attempt to program local identity address, type and irk if params is
1538 	 * NULL.
1539 	 */
1540 	if (!params) {
1541 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1542 			return 0;
1543 
1544 		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
1545 		memcpy(cp.peer_irk, hdev->irk, 16);
1546 		goto done;
1547 	}
1548 
1549 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
1550 	if (!irk)
1551 		return 0;
1552 
1553 	/* Check if the IK has _not_ been programmed yet. */
1554 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
1555 						&params->addr,
1556 						params->addr_type);
1557 	if (entry)
1558 		return 0;
1559 
1560 	cp.bdaddr_type = params->addr_type;
1561 	bacpy(&cp.bdaddr, &params->addr);
1562 	memcpy(cp.peer_irk, irk->val, 16);
1563 
1564 done:
1565 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
1566 		memcpy(cp.local_irk, hdev->irk, 16);
1567 	else
1568 		memset(cp.local_irk, 0, 16);
1569 
1570 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
1571 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1572 }
1573 
1574 /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
1575  * this attempts to program the device in the resolving list as well.
1576  */
1577 static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
1578 				       struct hci_conn_params *params,
1579 				       u8 *num_entries)
1580 {
1581 	struct hci_cp_le_add_to_accept_list cp;
1582 	int err;
1583 
1584 	/* Already in accept list */
1585 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
1586 				   params->addr_type))
1587 		return 0;
1588 
1589 	/* Select filter policy to accept all advertising */
1590 	if (*num_entries >= hdev->le_accept_list_size)
1591 		return -ENOSPC;
1592 
1593 	/* Accept list can not be used with RPAs */
1594 	if (!use_ll_privacy(hdev) &&
1595 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
1596 		return -EINVAL;
1597 	}
1598 
1599 	/* During suspend, only wakeable devices can be in acceptlist */
1600 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1601 						   params->current_flags))
1602 		return 0;
1603 
1604 	/* Attempt to program the device in the resolving list first to avoid
1605 	 * having to rollback in case it fails since the resolving list is
1606 	 * dynamic it can probably be smaller than the accept list.
1607 	 */
1608 	err = hci_le_add_resolve_list_sync(hdev, params);
1609 	if (err) {
1610 		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
1611 		return err;
1612 	}
1613 
1614 	*num_entries += 1;
1615 	cp.bdaddr_type = params->addr_type;
1616 	bacpy(&cp.bdaddr, &params->addr);
1617 
1618 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
1619 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1620 	if (err) {
1621 		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
1622 		/* Rollback the device from the resolving list */
1623 		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
1624 		return err;
1625 	}
1626 
1627 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
1628 		   cp.bdaddr_type);
1629 
1630 	return 0;
1631 }
1632 
1633 /* This function disables all advertising instances (including 0x00) */
1634 static int hci_pause_advertising_sync(struct hci_dev *hdev)
1635 {
1636 	int err;
1637 
1638 	/* If there are no instances or advertising has already been paused
1639 	 * there is nothing to do.
1640 	 */
1641 	if (!hdev->adv_instance_cnt || hdev->advertising_paused)
1642 		return 0;
1643 
1644 	bt_dev_dbg(hdev, "Pausing advertising instances");
1645 
1646 	/* Call to disable any advertisements active on the controller.
1647 	 * This will succeed even if no advertisements are configured.
1648 	 */
1649 	err = hci_disable_advertising_sync(hdev);
1650 	if (err)
1651 		return err;
1652 
1653 	/* If we are using software rotation, pause the loop */
1654 	if (!ext_adv_capable(hdev))
1655 		cancel_adv_timeout(hdev);
1656 
1657 	hdev->advertising_paused = true;
1658 
1659 	return 0;
1660 }
1661 
1662 /* This function enables all user advertising instances (excluding 0x00) */
1663 static int hci_resume_advertising_sync(struct hci_dev *hdev)
1664 {
1665 	struct adv_info *adv, *tmp;
1666 	int err;
1667 
1668 	/* If advertising has not been paused there is nothing  to do. */
1669 	if (!hdev->advertising_paused)
1670 		return 0;
1671 
1672 	bt_dev_dbg(hdev, "Resuming advertising instances");
1673 
1674 	if (ext_adv_capable(hdev)) {
1675 		/* Call for each tracked instance to be re-enabled */
1676 		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
1677 			err = hci_enable_ext_advertising_sync(hdev,
1678 							      adv->instance);
1679 			if (!err)
1680 				continue;
1681 
1682 			/* If the instance cannot be resumed remove it */
1683 			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
1684 							 NULL);
1685 		}
1686 	} else {
1687 		/* Schedule for most recent instance to be restarted and begin
1688 		 * the software rotation loop
1689 		 */
1690 		err = hci_schedule_adv_instance_sync(hdev,
1691 						     hdev->cur_adv_instance,
1692 						     true);
1693 	}
1694 
1695 	hdev->advertising_paused = false;
1696 
1697 	return err;
1698 }
1699 
1700 /* Device must not be scanning when updating the accept list.
1701  *
1702  * Update is done using the following sequence:
1703  *
1704  * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
1705  * Remove Devices From Accept List ->
1706  * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
1707  * Add Devices to Accept List ->
1708  * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
1709  * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
1710  * Enable Scanning
1711  *
1712  * In case of failure advertising shall be restored to its original state and
1713  * return would disable accept list since either accept or resolving list could
1714  * not be programmed.
1715  *
1716  */
1717 static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
1718 {
1719 	struct hci_conn_params *params;
1720 	struct bdaddr_list *b, *t;
1721 	u8 num_entries = 0;
1722 	bool pend_conn, pend_report;
1723 	int err;
1724 
1725 	/* Pause advertising if resolving list can be used as controllers are
1726 	 * cannot accept resolving list modifications while advertising.
1727 	 */
1728 	if (use_ll_privacy(hdev)) {
1729 		err = hci_pause_advertising_sync(hdev);
1730 		if (err) {
1731 			bt_dev_err(hdev, "pause advertising failed: %d", err);
1732 			return 0x00;
1733 		}
1734 	}
1735 
1736 	/* Disable address resolution while reprogramming accept list since
1737 	 * devices that do have an IRK will be programmed in the resolving list
1738 	 * when LL Privacy is enabled.
1739 	 */
1740 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
1741 	if (err) {
1742 		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
1743 		goto done;
1744 	}
1745 
1746 	/* Go through the current accept list programmed into the
1747 	 * controller one by one and check if that address is still
1748 	 * in the list of pending connections or list of devices to
1749 	 * report. If not present in either list, then remove it from
1750 	 * the controller.
1751 	 */
1752 	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
1753 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
1754 						      &b->bdaddr,
1755 						      b->bdaddr_type);
1756 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
1757 							&b->bdaddr,
1758 							b->bdaddr_type);
1759 
1760 		/* If the device is not likely to connect or report,
1761 		 * remove it from the acceptlist.
1762 		 */
1763 		if (!pend_conn && !pend_report) {
1764 			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
1765 						    b->bdaddr_type);
1766 			continue;
1767 		}
1768 
1769 		num_entries++;
1770 	}
1771 
1772 	/* Since all no longer valid accept list entries have been
1773 	 * removed, walk through the list of pending connections
1774 	 * and ensure that any new device gets programmed into
1775 	 * the controller.
1776 	 *
1777 	 * If the list of the devices is larger than the list of
1778 	 * available accept list entries in the controller, then
1779 	 * just abort and return filer policy value to not use the
1780 	 * accept list.
1781 	 */
1782 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
1783 		err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
1784 		if (err)
1785 			goto done;
1786 	}
1787 
1788 	/* After adding all new pending connections, walk through
1789 	 * the list of pending reports and also add these to the
1790 	 * accept list if there is still space. Abort if space runs out.
1791 	 */
1792 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
1793 		err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
1794 		if (err)
1795 			goto done;
1796 	}
1797 
1798 	/* Use the allowlist unless the following conditions are all true:
1799 	 * - We are not currently suspending
1800 	 * - There are 1 or more ADV monitors registered and it's not offloaded
1801 	 * - Interleaved scanning is not currently using the allowlist
1802 	 */
1803 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
1804 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
1805 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
1806 		err = -EINVAL;
1807 
1808 done:
1809 	/* Enable address resolution when LL Privacy is enabled. */
1810 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
1811 	if (err)
1812 		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
1813 
1814 	/* Resume advertising if it was paused */
1815 	if (use_ll_privacy(hdev))
1816 		hci_resume_advertising_sync(hdev);
1817 
1818 	/* Select filter policy to use accept list */
1819 	return err ? 0x00 : 0x01;
1820 }
1821 
1822 /* Returns true if an le connection is in the scanning state */
1823 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1824 {
1825 	struct hci_conn_hash *h = &hdev->conn_hash;
1826 	struct hci_conn  *c;
1827 
1828 	rcu_read_lock();
1829 
1830 	list_for_each_entry_rcu(c, &h->list, list) {
1831 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
1832 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
1833 			rcu_read_unlock();
1834 			return true;
1835 		}
1836 	}
1837 
1838 	rcu_read_unlock();
1839 
1840 	return false;
1841 }
1842 
1843 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
1844 					  u16 interval, u16 window,
1845 					  u8 own_addr_type, u8 filter_policy)
1846 {
1847 	struct hci_cp_le_set_ext_scan_params *cp;
1848 	struct hci_cp_le_scan_phy_params *phy;
1849 	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
1850 	u8 num_phy = 0;
1851 
1852 	cp = (void *)data;
1853 	phy = (void *)cp->data;
1854 
1855 	memset(data, 0, sizeof(data));
1856 
1857 	cp->own_addr_type = own_addr_type;
1858 	cp->filter_policy = filter_policy;
1859 
1860 	if (scan_1m(hdev) || scan_2m(hdev)) {
1861 		cp->scanning_phys |= LE_SCAN_PHY_1M;
1862 
1863 		phy->type = type;
1864 		phy->interval = cpu_to_le16(interval);
1865 		phy->window = cpu_to_le16(window);
1866 
1867 		num_phy++;
1868 		phy++;
1869 	}
1870 
1871 	if (scan_coded(hdev)) {
1872 		cp->scanning_phys |= LE_SCAN_PHY_CODED;
1873 
1874 		phy->type = type;
1875 		phy->interval = cpu_to_le16(interval);
1876 		phy->window = cpu_to_le16(window);
1877 
1878 		num_phy++;
1879 		phy++;
1880 	}
1881 
1882 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
1883 				     sizeof(*cp) + sizeof(*phy) * num_phy,
1884 				     data, HCI_CMD_TIMEOUT);
1885 }
1886 
1887 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
1888 				      u16 interval, u16 window,
1889 				      u8 own_addr_type, u8 filter_policy)
1890 {
1891 	struct hci_cp_le_set_scan_param cp;
1892 
1893 	if (use_ext_scan(hdev))
1894 		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
1895 						      window, own_addr_type,
1896 						      filter_policy);
1897 
1898 	memset(&cp, 0, sizeof(cp));
1899 	cp.type = type;
1900 	cp.interval = cpu_to_le16(interval);
1901 	cp.window = cpu_to_le16(window);
1902 	cp.own_address_type = own_addr_type;
1903 	cp.filter_policy = filter_policy;
1904 
1905 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
1906 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1907 }
1908 
1909 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
1910 			       u16 window, u8 own_addr_type, u8 filter_policy,
1911 			       u8 filter_dup)
1912 {
1913 	int err;
1914 
1915 	if (hdev->scanning_paused) {
1916 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1917 		return 0;
1918 	}
1919 
1920 	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
1921 					 own_addr_type, filter_policy);
1922 	if (err)
1923 		return err;
1924 
1925 	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
1926 }
1927 
1928 int hci_passive_scan_sync(struct hci_dev *hdev)
1929 {
1930 	u8 own_addr_type;
1931 	u8 filter_policy;
1932 	u16 window, interval;
1933 	int err;
1934 
1935 	if (hdev->scanning_paused) {
1936 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
1937 		return 0;
1938 	}
1939 
1940 	err = hci_scan_disable_sync(hdev);
1941 	if (err) {
1942 		bt_dev_err(hdev, "disable scanning failed: %d", err);
1943 		return err;
1944 	}
1945 
1946 	/* Set require_privacy to false since no SCAN_REQ are send
1947 	 * during passive scanning. Not using an non-resolvable address
1948 	 * here is important so that peer devices using direct
1949 	 * advertising with our address will be correctly reported
1950 	 * by the controller.
1951 	 */
1952 	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
1953 					   &own_addr_type))
1954 		return 0;
1955 
1956 	if (hdev->enable_advmon_interleave_scan &&
1957 	    hci_update_interleaved_scan_sync(hdev))
1958 		return 0;
1959 
1960 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1961 
1962 	/* Adding or removing entries from the accept list must
1963 	 * happen before enabling scanning. The controller does
1964 	 * not allow accept list modification while scanning.
1965 	 */
1966 	filter_policy = hci_update_accept_list_sync(hdev);
1967 
1968 	/* When the controller is using random resolvable addresses and
1969 	 * with that having LE privacy enabled, then controllers with
1970 	 * Extended Scanner Filter Policies support can now enable support
1971 	 * for handling directed advertising.
1972 	 *
1973 	 * So instead of using filter polices 0x00 (no acceptlist)
1974 	 * and 0x01 (acceptlist enabled) use the new filter policies
1975 	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
1976 	 */
1977 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1978 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1979 		filter_policy |= 0x02;
1980 
1981 	if (hdev->suspended) {
1982 		window = hdev->le_scan_window_suspend;
1983 		interval = hdev->le_scan_int_suspend;
1984 
1985 		set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1986 	} else if (hci_is_le_conn_scanning(hdev)) {
1987 		window = hdev->le_scan_window_connect;
1988 		interval = hdev->le_scan_int_connect;
1989 	} else if (hci_is_adv_monitoring(hdev)) {
1990 		window = hdev->le_scan_window_adv_monitor;
1991 		interval = hdev->le_scan_int_adv_monitor;
1992 	} else {
1993 		window = hdev->le_scan_window;
1994 		interval = hdev->le_scan_interval;
1995 	}
1996 
1997 	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
1998 
1999 	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
2000 				   own_addr_type, filter_policy,
2001 				   LE_SCAN_FILTER_DUP_ENABLE);
2002 }
2003 
2004 /* This function controls the passive scanning based on hdev->pend_le_conns
2005  * list. If there are pending LE connection we start the background scanning,
2006  * otherwise we stop it in the following sequence:
2007  *
2008  * If there are devices to scan:
2009  *
2010  * Disable Scanning -> Update Accept List ->
2011  * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
2012  * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
2013  * Enable Scanning
2014  *
2015  * Otherwise:
2016  *
2017  * Disable Scanning
2018  */
2019 int hci_update_passive_scan_sync(struct hci_dev *hdev)
2020 {
2021 	int err;
2022 
2023 	if (!test_bit(HCI_UP, &hdev->flags) ||
2024 	    test_bit(HCI_INIT, &hdev->flags) ||
2025 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
2026 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
2027 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
2028 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2029 		return 0;
2030 
2031 	/* No point in doing scanning if LE support hasn't been enabled */
2032 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2033 		return 0;
2034 
2035 	/* If discovery is active don't interfere with it */
2036 	if (hdev->discovery.state != DISCOVERY_STOPPED)
2037 		return 0;
2038 
2039 	/* Reset RSSI and UUID filters when starting background scanning
2040 	 * since these filters are meant for service discovery only.
2041 	 *
2042 	 * The Start Discovery and Start Service Discovery operations
2043 	 * ensure to set proper values for RSSI threshold and UUID
2044 	 * filter list. So it is safe to just reset them here.
2045 	 */
2046 	hci_discovery_filter_clear(hdev);
2047 
2048 	bt_dev_dbg(hdev, "ADV monitoring is %s",
2049 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
2050 
2051 	if (list_empty(&hdev->pend_le_conns) &&
2052 	    list_empty(&hdev->pend_le_reports) &&
2053 	    !hci_is_adv_monitoring(hdev)) {
2054 		/* If there is no pending LE connections or devices
2055 		 * to be scanned for or no ADV monitors, we should stop the
2056 		 * background scanning.
2057 		 */
2058 
2059 		bt_dev_dbg(hdev, "stopping background scanning");
2060 
2061 		err = hci_scan_disable_sync(hdev);
2062 		if (err)
2063 			bt_dev_err(hdev, "stop background scanning failed: %d",
2064 				   err);
2065 	} else {
2066 		/* If there is at least one pending LE connection, we should
2067 		 * keep the background scan running.
2068 		 */
2069 
2070 		/* If controller is connecting, we should not start scanning
2071 		 * since some controllers are not able to scan and connect at
2072 		 * the same time.
2073 		 */
2074 		if (hci_lookup_le_connect(hdev))
2075 			return 0;
2076 
2077 		bt_dev_dbg(hdev, "start background scanning");
2078 
2079 		err = hci_passive_scan_sync(hdev);
2080 		if (err)
2081 			bt_dev_err(hdev, "start background scanning failed: %d",
2082 				   err);
2083 	}
2084 
2085 	return err;
2086 }
2087 
2088 static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
2089 {
2090 	return hci_update_passive_scan_sync(hdev);
2091 }
2092 
2093 int hci_update_passive_scan(struct hci_dev *hdev)
2094 {
2095 	/* Only queue if it would have any effect */
2096 	if (!test_bit(HCI_UP, &hdev->flags) ||
2097 	    test_bit(HCI_INIT, &hdev->flags) ||
2098 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
2099 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
2100 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
2101 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2102 		return 0;
2103 
2104 	return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
2105 }
2106 
2107 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
2108 {
2109 	int err;
2110 
2111 	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
2112 		return 0;
2113 
2114 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
2115 				    sizeof(val), &val, HCI_CMD_TIMEOUT);
2116 
2117 	if (!err) {
2118 		if (val) {
2119 			hdev->features[1][0] |= LMP_HOST_SC;
2120 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
2121 		} else {
2122 			hdev->features[1][0] &= ~LMP_HOST_SC;
2123 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
2124 		}
2125 	}
2126 
2127 	return err;
2128 }
2129 
2130 static int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
2131 {
2132 	int err;
2133 
2134 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
2135 	    lmp_host_ssp_capable(hdev))
2136 		return 0;
2137 
2138 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
2139 				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
2140 	if (err)
2141 		return err;
2142 
2143 	return hci_write_sc_support_sync(hdev, 0x01);
2144 }
2145 
2146 static int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le,
2147 					    u8 simul)
2148 {
2149 	struct hci_cp_write_le_host_supported cp;
2150 
2151 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2152 	    !lmp_bredr_capable(hdev))
2153 		return 0;
2154 
2155 	/* Check first if we already have the right host state
2156 	 * (host features set)
2157 	 */
2158 	if (le == lmp_host_le_capable(hdev) &&
2159 	    simul == lmp_host_le_br_capable(hdev))
2160 		return 0;
2161 
2162 	memset(&cp, 0, sizeof(cp));
2163 
2164 	cp.le = le;
2165 	cp.simul = simul;
2166 
2167 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2168 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2169 }
2170 
2171 static int hci_powered_update_adv_sync(struct hci_dev *hdev)
2172 {
2173 	struct adv_info *adv, *tmp;
2174 	int err;
2175 
2176 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2177 		return 0;
2178 
2179 	/* If RPA Resolution has not been enable yet it means the
2180 	 * resolving list is empty and we should attempt to program the
2181 	 * local IRK in order to support using own_addr_type
2182 	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
2183 	 */
2184 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2185 		hci_le_add_resolve_list_sync(hdev, NULL);
2186 		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2187 	}
2188 
2189 	/* Make sure the controller has a good default for
2190 	 * advertising data. This also applies to the case
2191 	 * where BR/EDR was toggled during the AUTO_OFF phase.
2192 	 */
2193 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2194 	    list_empty(&hdev->adv_instances)) {
2195 		if (ext_adv_capable(hdev)) {
2196 			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2197 			if (!err)
2198 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2199 		} else {
2200 			err = hci_update_adv_data_sync(hdev, 0x00);
2201 			if (!err)
2202 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2203 		}
2204 
2205 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2206 			hci_enable_advertising_sync(hdev);
2207 	}
2208 
2209 	/* Call for each tracked instance to be scheduled */
2210 	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
2211 		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
2212 
2213 	return 0;
2214 }
2215 
2216 static int hci_write_auth_enable_sync(struct hci_dev *hdev)
2217 {
2218 	u8 link_sec;
2219 
2220 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2221 	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
2222 		return 0;
2223 
2224 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
2225 				     sizeof(link_sec), &link_sec,
2226 				     HCI_CMD_TIMEOUT);
2227 }
2228 
2229 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
2230 {
2231 	struct hci_cp_write_page_scan_activity cp;
2232 	u8 type;
2233 	int err = 0;
2234 
2235 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2236 		return 0;
2237 
2238 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
2239 		return 0;
2240 
2241 	memset(&cp, 0, sizeof(cp));
2242 
2243 	if (enable) {
2244 		type = PAGE_SCAN_TYPE_INTERLACED;
2245 
2246 		/* 160 msec page scan interval */
2247 		cp.interval = cpu_to_le16(0x0100);
2248 	} else {
2249 		type = hdev->def_page_scan_type;
2250 		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
2251 	}
2252 
2253 	cp.window = cpu_to_le16(hdev->def_page_scan_window);
2254 
2255 	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
2256 	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
2257 		err = __hci_cmd_sync_status(hdev,
2258 					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
2259 					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2260 		if (err)
2261 			return err;
2262 	}
2263 
2264 	if (hdev->page_scan_type != type)
2265 		err = __hci_cmd_sync_status(hdev,
2266 					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
2267 					    sizeof(type), &type,
2268 					    HCI_CMD_TIMEOUT);
2269 
2270 	return err;
2271 }
2272 
2273 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2274 {
2275 	struct bdaddr_list *b;
2276 
2277 	list_for_each_entry(b, &hdev->accept_list, list) {
2278 		struct hci_conn *conn;
2279 
2280 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2281 		if (!conn)
2282 			return true;
2283 
2284 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2285 			return true;
2286 	}
2287 
2288 	return false;
2289 }
2290 
2291 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
2292 {
2293 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
2294 					    sizeof(val), &val,
2295 					    HCI_CMD_TIMEOUT);
2296 }
2297 
2298 int hci_update_scan_sync(struct hci_dev *hdev)
2299 {
2300 	u8 scan;
2301 
2302 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2303 		return 0;
2304 
2305 	if (!hdev_is_powered(hdev))
2306 		return 0;
2307 
2308 	if (mgmt_powering_down(hdev))
2309 		return 0;
2310 
2311 	if (hdev->scanning_paused)
2312 		return 0;
2313 
2314 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2315 	    disconnected_accept_list_entries(hdev))
2316 		scan = SCAN_PAGE;
2317 	else
2318 		scan = SCAN_DISABLED;
2319 
2320 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2321 		scan |= SCAN_INQUIRY;
2322 
2323 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2324 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2325 		return 0;
2326 
2327 	return hci_write_scan_enable_sync(hdev, scan);
2328 }
2329 
2330 static int hci_update_name_sync(struct hci_dev *hdev)
2331 {
2332 	struct hci_cp_write_local_name cp;
2333 
2334 	memset(&cp, 0, sizeof(cp));
2335 
2336 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2337 
2338 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
2339 					    sizeof(cp), &cp,
2340 					    HCI_CMD_TIMEOUT);
2341 }
2342 
2343 /* This function perform powered update HCI command sequence after the HCI init
2344  * sequence which end up resetting all states, the sequence is as follows:
2345  *
2346  * HCI_SSP_ENABLED(Enable SSP)
2347  * HCI_LE_ENABLED(Enable LE)
2348  * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
2349  * Update adv data)
2350  * Enable Authentication
2351  * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
2352  * Set Name -> Set EIR)
2353  */
2354 int hci_powered_update_sync(struct hci_dev *hdev)
2355 {
2356 	int err;
2357 
2358 	/* Register the available SMP channels (BR/EDR and LE) only when
2359 	 * successfully powering on the controller. This late
2360 	 * registration is required so that LE SMP can clearly decide if
2361 	 * the public address or static address is used.
2362 	 */
2363 	smp_register(hdev);
2364 
2365 	err = hci_write_ssp_mode_sync(hdev, 0x01);
2366 	if (err)
2367 		return err;
2368 
2369 	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
2370 	if (err)
2371 		return err;
2372 
2373 	err = hci_powered_update_adv_sync(hdev);
2374 	if (err)
2375 		return err;
2376 
2377 	err = hci_write_auth_enable_sync(hdev);
2378 	if (err)
2379 		return err;
2380 
2381 	if (lmp_bredr_capable(hdev)) {
2382 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2383 			hci_write_fast_connectable_sync(hdev, true);
2384 		else
2385 			hci_write_fast_connectable_sync(hdev, false);
2386 		hci_update_scan_sync(hdev);
2387 		hci_update_class_sync(hdev);
2388 		hci_update_name_sync(hdev);
2389 		hci_update_eir_sync(hdev);
2390 	}
2391 
2392 	return 0;
2393 }
2394 
2395 /* This function perform power on HCI command sequence as follows:
2396  *
2397  * If controller is already up (HCI_UP) performs hci_powered_update_sync
2398  * sequence otherwise run hci_dev_open_sync which will follow with
2399  * hci_powered_update_sync after the init sequence is completed.
2400  */
2401 static int hci_power_on_sync(struct hci_dev *hdev)
2402 {
2403 	int err;
2404 
2405 	if (test_bit(HCI_UP, &hdev->flags) &&
2406 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
2407 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2408 		cancel_delayed_work(&hdev->power_off);
2409 		return hci_powered_update_sync(hdev);
2410 	}
2411 
2412 	err = hci_dev_open_sync(hdev);
2413 	if (err < 0)
2414 		return err;
2415 
2416 	/* During the HCI setup phase, a few error conditions are
2417 	 * ignored and they need to be checked now. If they are still
2418 	 * valid, it is important to return the device back off.
2419 	 */
2420 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2421 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2422 	    (hdev->dev_type == HCI_PRIMARY &&
2423 	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2424 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2425 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2426 		hci_dev_close_sync(hdev);
2427 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2428 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2429 				   HCI_AUTO_OFF_TIMEOUT);
2430 	}
2431 
2432 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2433 		/* For unconfigured devices, set the HCI_RAW flag
2434 		 * so that userspace can easily identify them.
2435 		 */
2436 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2437 			set_bit(HCI_RAW, &hdev->flags);
2438 
2439 		/* For fully configured devices, this will send
2440 		 * the Index Added event. For unconfigured devices,
2441 		 * it will send Unconfigued Index Added event.
2442 		 *
2443 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2444 		 * and no event will be send.
2445 		 */
2446 		mgmt_index_added(hdev);
2447 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2448 		/* When the controller is now configured, then it
2449 		 * is important to clear the HCI_RAW flag.
2450 		 */
2451 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2452 			clear_bit(HCI_RAW, &hdev->flags);
2453 
2454 		/* Powering on the controller with HCI_CONFIG set only
2455 		 * happens with the transition from unconfigured to
2456 		 * configured. This will send the Index Added event.
2457 		 */
2458 		mgmt_index_added(hdev);
2459 	}
2460 
2461 	return 0;
2462 }
2463 
2464 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
2465 {
2466 	struct hci_cp_remote_name_req_cancel cp;
2467 
2468 	memset(&cp, 0, sizeof(cp));
2469 	bacpy(&cp.bdaddr, addr);
2470 
2471 	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
2472 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2473 }
2474 
2475 int hci_stop_discovery_sync(struct hci_dev *hdev)
2476 {
2477 	struct discovery_state *d = &hdev->discovery;
2478 	struct inquiry_entry *e;
2479 	int err;
2480 
2481 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2482 
2483 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2484 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2485 			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
2486 						    0, NULL, HCI_CMD_TIMEOUT);
2487 			if (err)
2488 				return err;
2489 		}
2490 
2491 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2492 			cancel_delayed_work(&hdev->le_scan_disable);
2493 			cancel_delayed_work(&hdev->le_scan_restart);
2494 
2495 			err = hci_scan_disable_sync(hdev);
2496 			if (err)
2497 				return err;
2498 		}
2499 
2500 	} else {
2501 		err = hci_scan_disable_sync(hdev);
2502 		if (err)
2503 			return err;
2504 	}
2505 
2506 	/* Resume advertising if it was paused */
2507 	if (use_ll_privacy(hdev))
2508 		hci_resume_advertising_sync(hdev);
2509 
2510 	/* No further actions needed for LE-only discovery */
2511 	if (d->type == DISCOV_TYPE_LE)
2512 		return 0;
2513 
2514 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2515 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2516 						     NAME_PENDING);
2517 		if (!e)
2518 			return 0;
2519 
2520 		return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
2521 	}
2522 
2523 	return 0;
2524 }
2525 
2526 static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
2527 					u8 reason)
2528 {
2529 	struct hci_cp_disconn_phy_link cp;
2530 
2531 	memset(&cp, 0, sizeof(cp));
2532 	cp.phy_handle = HCI_PHY_HANDLE(handle);
2533 	cp.reason = reason;
2534 
2535 	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
2536 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2537 }
2538 
2539 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
2540 			       u8 reason)
2541 {
2542 	struct hci_cp_disconnect cp;
2543 
2544 	if (conn->type == AMP_LINK)
2545 		return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
2546 
2547 	memset(&cp, 0, sizeof(cp));
2548 	cp.handle = cpu_to_le16(conn->handle);
2549 	cp.reason = reason;
2550 
2551 	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT,
2552 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2553 }
2554 
2555 static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
2556 				      struct hci_conn *conn)
2557 {
2558 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2559 		return 0;
2560 
2561 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
2562 				     6, &conn->dst, HCI_CMD_TIMEOUT);
2563 }
2564 
2565 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
2566 {
2567 	if (conn->type == LE_LINK)
2568 		return hci_le_connect_cancel_sync(hdev, conn);
2569 
2570 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
2571 		return 0;
2572 
2573 	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
2574 				     6, &conn->dst, HCI_CMD_TIMEOUT);
2575 }
2576 
2577 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
2578 			       u8 reason)
2579 {
2580 	struct hci_cp_reject_sync_conn_req cp;
2581 
2582 	memset(&cp, 0, sizeof(cp));
2583 	bacpy(&cp.bdaddr, &conn->dst);
2584 	cp.reason = reason;
2585 
2586 	/* SCO rejection has its own limited set of
2587 	 * allowed error values (0x0D-0x0F).
2588 	 */
2589 	if (reason < 0x0d || reason > 0x0f)
2590 		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2591 
2592 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
2593 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2594 }
2595 
2596 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
2597 				u8 reason)
2598 {
2599 	struct hci_cp_reject_conn_req cp;
2600 
2601 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
2602 		return hci_reject_sco_sync(hdev, conn, reason);
2603 
2604 	memset(&cp, 0, sizeof(cp));
2605 	bacpy(&cp.bdaddr, &conn->dst);
2606 	cp.reason = reason;
2607 
2608 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
2609 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2610 }
2611 
2612 static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
2613 			       u8 reason)
2614 {
2615 	switch (conn->state) {
2616 	case BT_CONNECTED:
2617 	case BT_CONFIG:
2618 		return hci_disconnect_sync(hdev, conn, reason);
2619 	case BT_CONNECT:
2620 		return hci_connect_cancel_sync(hdev, conn);
2621 	case BT_CONNECT2:
2622 		return hci_reject_conn_sync(hdev, conn, reason);
2623 	default:
2624 		conn->state = BT_CLOSED;
2625 		break;
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 /* This function perform power off HCI command sequence as follows:
2632  *
2633  * Clear Advertising
2634  * Stop Discovery
2635  * Disconnect all connections
2636  * hci_dev_close_sync
2637  */
2638 static int hci_power_off_sync(struct hci_dev *hdev)
2639 {
2640 	struct hci_conn *conn;
2641 	int err;
2642 
2643 	/* If controller is already down there is nothing to do */
2644 	if (!test_bit(HCI_UP, &hdev->flags))
2645 		return 0;
2646 
2647 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
2648 	    test_bit(HCI_PSCAN, &hdev->flags)) {
2649 		err = hci_write_scan_enable_sync(hdev, 0x00);
2650 		if (err)
2651 			return err;
2652 	}
2653 
2654 	err = hci_clear_adv_sync(hdev, NULL, false);
2655 	if (err)
2656 		return err;
2657 
2658 	err = hci_stop_discovery_sync(hdev);
2659 	if (err)
2660 		return err;
2661 
2662 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
2663 		/* 0x15 == Terminated due to Power Off */
2664 		hci_abort_conn_sync(hdev, conn, 0x15);
2665 	}
2666 
2667 	return hci_dev_close_sync(hdev);
2668 }
2669 
2670 int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
2671 {
2672 	if (val)
2673 		return hci_power_on_sync(hdev);
2674 
2675 	return hci_power_off_sync(hdev);
2676 }
2677 
2678 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
2679 {
2680 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2681 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2682 	struct hci_cp_inquiry cp;
2683 
2684 	bt_dev_dbg(hdev, "");
2685 
2686 	if (hci_dev_test_flag(hdev, HCI_INQUIRY))
2687 		return 0;
2688 
2689 	hci_dev_lock(hdev);
2690 	hci_inquiry_cache_flush(hdev);
2691 	hci_dev_unlock(hdev);
2692 
2693 	memset(&cp, 0, sizeof(cp));
2694 
2695 	if (hdev->discovery.limited)
2696 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2697 	else
2698 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2699 
2700 	cp.length = length;
2701 
2702 	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
2703 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2704 }
2705 
2706 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
2707 {
2708 	u8 own_addr_type;
2709 	/* Accept list is not used for discovery */
2710 	u8 filter_policy = 0x00;
2711 	/* Default is to enable duplicates filter */
2712 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2713 	int err;
2714 
2715 	bt_dev_dbg(hdev, "");
2716 
2717 	/* If controller is scanning, it means the passive scanning is
2718 	 * running. Thus, we should temporarily stop it in order to set the
2719 	 * discovery scanning parameters.
2720 	 */
2721 	err = hci_scan_disable_sync(hdev);
2722 	if (err) {
2723 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2724 		return err;
2725 	}
2726 
2727 	cancel_interleave_scan(hdev);
2728 
2729 	/* Pause advertising since active scanning disables address resolution
2730 	 * which advertising depend on in order to generate its RPAs.
2731 	 */
2732 	if (use_ll_privacy(hdev)) {
2733 		err = hci_pause_advertising_sync(hdev);
2734 		if (err) {
2735 			bt_dev_err(hdev, "pause advertising failed: %d", err);
2736 			goto failed;
2737 		}
2738 	}
2739 
2740 	/* Disable address resolution while doing active scanning since the
2741 	 * accept list shall not be used and all reports shall reach the host
2742 	 * anyway.
2743 	 */
2744 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2745 	if (err) {
2746 		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2747 			   err);
2748 		goto failed;
2749 	}
2750 
2751 	/* All active scans will be done with either a resolvable private
2752 	 * address (when privacy feature has been enabled) or non-resolvable
2753 	 * private address.
2754 	 */
2755 	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
2756 					     &own_addr_type);
2757 	if (err < 0)
2758 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2759 
2760 	if (hci_is_adv_monitoring(hdev)) {
2761 		/* Duplicate filter should be disabled when some advertisement
2762 		 * monitor is activated, otherwise AdvMon can only receive one
2763 		 * advertisement for one peer(*) during active scanning, and
2764 		 * might report loss to these peers.
2765 		 *
2766 		 * Note that different controllers have different meanings of
2767 		 * |duplicate|. Some of them consider packets with the same
2768 		 * address as duplicate, and others consider packets with the
2769 		 * same address and the same RSSI as duplicate. Although in the
2770 		 * latter case we don't need to disable duplicate filter, but
2771 		 * it is common to have active scanning for a short period of
2772 		 * time, the power impact should be neglectable.
2773 		 */
2774 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2775 	}
2776 
2777 	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
2778 				  hdev->le_scan_window_discovery,
2779 				  own_addr_type, filter_policy, filter_dup);
2780 	if (!err)
2781 		return err;
2782 
2783 failed:
2784 	/* Resume advertising if it was paused */
2785 	if (use_ll_privacy(hdev))
2786 		hci_resume_advertising_sync(hdev);
2787 
2788 	/* Resume passive scanning */
2789 	hci_update_passive_scan_sync(hdev);
2790 	return err;
2791 }
2792 
2793 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
2794 {
2795 	int err;
2796 
2797 	bt_dev_dbg(hdev, "");
2798 
2799 	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
2800 	if (err)
2801 		return err;
2802 
2803 	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
2804 }
2805 
2806 int hci_start_discovery_sync(struct hci_dev *hdev)
2807 {
2808 	unsigned long timeout;
2809 	int err;
2810 
2811 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2812 
2813 	switch (hdev->discovery.type) {
2814 	case DISCOV_TYPE_BREDR:
2815 		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
2816 	case DISCOV_TYPE_INTERLEAVED:
2817 		/* When running simultaneous discovery, the LE scanning time
2818 		 * should occupy the whole discovery time sine BR/EDR inquiry
2819 		 * and LE scanning are scheduled by the controller.
2820 		 *
2821 		 * For interleaving discovery in comparison, BR/EDR inquiry
2822 		 * and LE scanning are done sequentially with separate
2823 		 * timeouts.
2824 		 */
2825 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2826 			     &hdev->quirks)) {
2827 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2828 			/* During simultaneous discovery, we double LE scan
2829 			 * interval. We must leave some time for the controller
2830 			 * to do BR/EDR inquiry.
2831 			 */
2832 			err = hci_start_interleaved_discovery_sync(hdev);
2833 			break;
2834 		}
2835 
2836 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2837 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
2838 		break;
2839 	case DISCOV_TYPE_LE:
2840 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2841 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
2842 		break;
2843 	default:
2844 		return -EINVAL;
2845 	}
2846 
2847 	if (err)
2848 		return err;
2849 
2850 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2851 
2852 	/* When service discovery is used and the controller has a
2853 	 * strict duplicate filter, it is important to remember the
2854 	 * start and duration of the scan. This is required for
2855 	 * restarting scanning during the discovery phase.
2856 	 */
2857 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2858 	    hdev->discovery.result_filtering) {
2859 		hdev->discovery.scan_start = jiffies;
2860 		hdev->discovery.scan_duration = timeout;
2861 	}
2862 
2863 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2864 			   timeout);
2865 
2866 	return 0;
2867 }
2868