xref: /openbmc/linux/net/bluetooth/hci_request.c (revision 5c49bcce5c124406920843af65574104aaaa3309)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 
33 #define HCI_REQ_DONE	  0
34 #define HCI_REQ_PEND	  1
35 #define HCI_REQ_CANCELED  2
36 
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 	skb_queue_head_init(&req->cmd_q);
40 	req->hdev = hdev;
41 	req->err = 0;
42 }
43 
44 void hci_req_purge(struct hci_request *req)
45 {
46 	skb_queue_purge(&req->cmd_q);
47 }
48 
49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 	return hdev->req_status == HCI_REQ_PEND;
52 }
53 
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 		   hci_req_complete_skb_t complete_skb)
56 {
57 	struct hci_dev *hdev = req->hdev;
58 	struct sk_buff *skb;
59 	unsigned long flags;
60 
61 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62 
63 	/* If an error occurred during request building, remove all HCI
64 	 * commands queued on the HCI request queue.
65 	 */
66 	if (req->err) {
67 		skb_queue_purge(&req->cmd_q);
68 		return req->err;
69 	}
70 
71 	/* Do not allow empty requests */
72 	if (skb_queue_empty(&req->cmd_q))
73 		return -ENODATA;
74 
75 	skb = skb_peek_tail(&req->cmd_q);
76 	if (complete) {
77 		bt_cb(skb)->hci.req_complete = complete;
78 	} else if (complete_skb) {
79 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 	}
82 
83 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86 
87 	queue_work(hdev->workqueue, &hdev->cmd_work);
88 
89 	return 0;
90 }
91 
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 	return req_run(req, complete, NULL);
95 }
96 
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 	return req_run(req, NULL, complete);
100 }
101 
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 				  struct sk_buff *skb)
104 {
105 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		if (skb)
111 			hdev->req_skb = skb_get(skb);
112 		wake_up_interruptible(&hdev->req_wait_q);
113 	}
114 }
115 
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
119 
120 	if (hdev->req_status == HCI_REQ_PEND) {
121 		hdev->req_result = err;
122 		hdev->req_status = HCI_REQ_CANCELED;
123 		wake_up_interruptible(&hdev->req_wait_q);
124 	}
125 }
126 
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 				  const void *param, u8 event, u32 timeout)
129 {
130 	struct hci_request req;
131 	struct sk_buff *skb;
132 	int err = 0;
133 
134 	BT_DBG("%s", hdev->name);
135 
136 	hci_req_init(&req, hdev);
137 
138 	hci_req_add_ev(&req, opcode, plen, param, event);
139 
140 	hdev->req_status = HCI_REQ_PEND;
141 
142 	err = hci_req_run_skb(&req, hci_req_sync_complete);
143 	if (err < 0)
144 		return ERR_PTR(err);
145 
146 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 			hdev->req_status != HCI_REQ_PEND, timeout);
148 
149 	if (err == -ERESTARTSYS)
150 		return ERR_PTR(-EINTR);
151 
152 	switch (hdev->req_status) {
153 	case HCI_REQ_DONE:
154 		err = -bt_to_errno(hdev->req_result);
155 		break;
156 
157 	case HCI_REQ_CANCELED:
158 		err = -hdev->req_result;
159 		break;
160 
161 	default:
162 		err = -ETIMEDOUT;
163 		break;
164 	}
165 
166 	hdev->req_status = hdev->req_result = 0;
167 	skb = hdev->req_skb;
168 	hdev->req_skb = NULL;
169 
170 	BT_DBG("%s end: err %d", hdev->name, err);
171 
172 	if (err < 0) {
173 		kfree_skb(skb);
174 		return ERR_PTR(err);
175 	}
176 
177 	if (!skb)
178 		return ERR_PTR(-ENODATA);
179 
180 	return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183 
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 			       const void *param, u32 timeout)
186 {
187 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190 
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 						     unsigned long opt),
194 		   unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 	struct hci_request req;
197 	int err = 0;
198 
199 	BT_DBG("%s start", hdev->name);
200 
201 	hci_req_init(&req, hdev);
202 
203 	hdev->req_status = HCI_REQ_PEND;
204 
205 	err = func(&req, opt);
206 	if (err) {
207 		if (hci_status)
208 			*hci_status = HCI_ERROR_UNSPECIFIED;
209 		return err;
210 	}
211 
212 	err = hci_req_run_skb(&req, hci_req_sync_complete);
213 	if (err < 0) {
214 		hdev->req_status = 0;
215 
216 		/* ENODATA means the HCI request command queue is empty.
217 		 * This can happen when a request with conditionals doesn't
218 		 * trigger any commands to be sent. This is normal behavior
219 		 * and should not trigger an error return.
220 		 */
221 		if (err == -ENODATA) {
222 			if (hci_status)
223 				*hci_status = 0;
224 			return 0;
225 		}
226 
227 		if (hci_status)
228 			*hci_status = HCI_ERROR_UNSPECIFIED;
229 
230 		return err;
231 	}
232 
233 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 			hdev->req_status != HCI_REQ_PEND, timeout);
235 
236 	if (err == -ERESTARTSYS)
237 		return -EINTR;
238 
239 	switch (hdev->req_status) {
240 	case HCI_REQ_DONE:
241 		err = -bt_to_errno(hdev->req_result);
242 		if (hci_status)
243 			*hci_status = hdev->req_result;
244 		break;
245 
246 	case HCI_REQ_CANCELED:
247 		err = -hdev->req_result;
248 		if (hci_status)
249 			*hci_status = HCI_ERROR_UNSPECIFIED;
250 		break;
251 
252 	default:
253 		err = -ETIMEDOUT;
254 		if (hci_status)
255 			*hci_status = HCI_ERROR_UNSPECIFIED;
256 		break;
257 	}
258 
259 	kfree_skb(hdev->req_skb);
260 	hdev->req_skb = NULL;
261 	hdev->req_status = hdev->req_result = 0;
262 
263 	BT_DBG("%s end: err %d", hdev->name, err);
264 
265 	return err;
266 }
267 
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 						  unsigned long opt),
270 		 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 	int ret;
273 
274 	if (!test_bit(HCI_UP, &hdev->flags))
275 		return -ENETDOWN;
276 
277 	/* Serialize all requests */
278 	hci_req_sync_lock(hdev);
279 	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 	hci_req_sync_unlock(hdev);
281 
282 	return ret;
283 }
284 
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 				const void *param)
287 {
288 	int len = HCI_COMMAND_HDR_SIZE + plen;
289 	struct hci_command_hdr *hdr;
290 	struct sk_buff *skb;
291 
292 	skb = bt_skb_alloc(len, GFP_ATOMIC);
293 	if (!skb)
294 		return NULL;
295 
296 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 	hdr->opcode = cpu_to_le16(opcode);
298 	hdr->plen   = plen;
299 
300 	if (plen)
301 		skb_put_data(skb, param, plen);
302 
303 	BT_DBG("skb len %d", skb->len);
304 
305 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 	hci_skb_opcode(skb) = opcode;
307 
308 	return skb;
309 }
310 
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 		    const void *param, u8 event)
314 {
315 	struct hci_dev *hdev = req->hdev;
316 	struct sk_buff *skb;
317 
318 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319 
320 	/* If an error occurred during request building, there is no point in
321 	 * queueing the HCI command. We can simply return.
322 	 */
323 	if (req->err)
324 		return;
325 
326 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 	if (!skb) {
328 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 			   opcode);
330 		req->err = -ENOMEM;
331 		return;
332 	}
333 
334 	if (skb_queue_empty(&req->cmd_q))
335 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336 
337 	bt_cb(skb)->hci.req_event = event;
338 
339 	skb_queue_tail(&req->cmd_q, skb);
340 }
341 
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 		 const void *param)
344 {
345 	hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347 
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350 	struct hci_dev *hdev = req->hdev;
351 	struct hci_cp_write_page_scan_activity acp;
352 	u8 type;
353 
354 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 		return;
356 
357 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 		return;
359 
360 	if (enable) {
361 		type = PAGE_SCAN_TYPE_INTERLACED;
362 
363 		/* 160 msec page scan interval */
364 		acp.interval = cpu_to_le16(0x0100);
365 	} else {
366 		type = hdev->def_page_scan_type;
367 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
368 	}
369 
370 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
371 
372 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 			    sizeof(acp), &acp);
376 
377 	if (hdev->page_scan_type != type)
378 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379 }
380 
381 /* This function controls the background scanning based on hdev->pend_le_conns
382  * list. If there are pending LE connection we start the background scanning,
383  * otherwise we stop it.
384  *
385  * This function requires the caller holds hdev->lock.
386  */
387 static void __hci_update_background_scan(struct hci_request *req)
388 {
389 	struct hci_dev *hdev = req->hdev;
390 
391 	if (!test_bit(HCI_UP, &hdev->flags) ||
392 	    test_bit(HCI_INIT, &hdev->flags) ||
393 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
394 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 		return;
398 
399 	/* No point in doing scanning if LE support hasn't been enabled */
400 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 		return;
402 
403 	/* If discovery is active don't interfere with it */
404 	if (hdev->discovery.state != DISCOVERY_STOPPED)
405 		return;
406 
407 	/* Reset RSSI and UUID filters when starting background scanning
408 	 * since these filters are meant for service discovery only.
409 	 *
410 	 * The Start Discovery and Start Service Discovery operations
411 	 * ensure to set proper values for RSSI threshold and UUID
412 	 * filter list. So it is safe to just reset them here.
413 	 */
414 	hci_discovery_filter_clear(hdev);
415 
416 	BT_DBG("%s ADV monitoring is %s", hdev->name,
417 	       hci_is_adv_monitoring(hdev) ? "on" : "off");
418 
419 	if (list_empty(&hdev->pend_le_conns) &&
420 	    list_empty(&hdev->pend_le_reports) &&
421 	    !hci_is_adv_monitoring(hdev)) {
422 		/* If there is no pending LE connections or devices
423 		 * to be scanned for or no ADV monitors, we should stop the
424 		 * background scanning.
425 		 */
426 
427 		/* If controller is not scanning we are done. */
428 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 			return;
430 
431 		hci_req_add_le_scan_disable(req, false);
432 
433 		BT_DBG("%s stopping background scanning", hdev->name);
434 	} else {
435 		/* If there is at least one pending LE connection, we should
436 		 * keep the background scan running.
437 		 */
438 
439 		/* If controller is connecting, we should not start scanning
440 		 * since some controllers are not able to scan and connect at
441 		 * the same time.
442 		 */
443 		if (hci_lookup_le_connect(hdev))
444 			return;
445 
446 		/* If controller is currently scanning, we stop it to ensure we
447 		 * don't miss any advertising (due to duplicates filter).
448 		 */
449 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
450 			hci_req_add_le_scan_disable(req, false);
451 
452 		hci_req_add_le_passive_scan(req);
453 
454 		BT_DBG("%s starting background scanning", hdev->name);
455 	}
456 }
457 
458 void __hci_req_update_name(struct hci_request *req)
459 {
460 	struct hci_dev *hdev = req->hdev;
461 	struct hci_cp_write_local_name cp;
462 
463 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464 
465 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466 }
467 
468 #define PNP_INFO_SVCLASS_ID		0x1200
469 
470 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471 {
472 	u8 *ptr = data, *uuids_start = NULL;
473 	struct bt_uuid *uuid;
474 
475 	if (len < 4)
476 		return ptr;
477 
478 	list_for_each_entry(uuid, &hdev->uuids, list) {
479 		u16 uuid16;
480 
481 		if (uuid->size != 16)
482 			continue;
483 
484 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 		if (uuid16 < 0x1100)
486 			continue;
487 
488 		if (uuid16 == PNP_INFO_SVCLASS_ID)
489 			continue;
490 
491 		if (!uuids_start) {
492 			uuids_start = ptr;
493 			uuids_start[0] = 1;
494 			uuids_start[1] = EIR_UUID16_ALL;
495 			ptr += 2;
496 		}
497 
498 		/* Stop if not enough space to put next UUID */
499 		if ((ptr - data) + sizeof(u16) > len) {
500 			uuids_start[1] = EIR_UUID16_SOME;
501 			break;
502 		}
503 
504 		*ptr++ = (uuid16 & 0x00ff);
505 		*ptr++ = (uuid16 & 0xff00) >> 8;
506 		uuids_start[0] += sizeof(uuid16);
507 	}
508 
509 	return ptr;
510 }
511 
512 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513 {
514 	u8 *ptr = data, *uuids_start = NULL;
515 	struct bt_uuid *uuid;
516 
517 	if (len < 6)
518 		return ptr;
519 
520 	list_for_each_entry(uuid, &hdev->uuids, list) {
521 		if (uuid->size != 32)
522 			continue;
523 
524 		if (!uuids_start) {
525 			uuids_start = ptr;
526 			uuids_start[0] = 1;
527 			uuids_start[1] = EIR_UUID32_ALL;
528 			ptr += 2;
529 		}
530 
531 		/* Stop if not enough space to put next UUID */
532 		if ((ptr - data) + sizeof(u32) > len) {
533 			uuids_start[1] = EIR_UUID32_SOME;
534 			break;
535 		}
536 
537 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 		ptr += sizeof(u32);
539 		uuids_start[0] += sizeof(u32);
540 	}
541 
542 	return ptr;
543 }
544 
545 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546 {
547 	u8 *ptr = data, *uuids_start = NULL;
548 	struct bt_uuid *uuid;
549 
550 	if (len < 18)
551 		return ptr;
552 
553 	list_for_each_entry(uuid, &hdev->uuids, list) {
554 		if (uuid->size != 128)
555 			continue;
556 
557 		if (!uuids_start) {
558 			uuids_start = ptr;
559 			uuids_start[0] = 1;
560 			uuids_start[1] = EIR_UUID128_ALL;
561 			ptr += 2;
562 		}
563 
564 		/* Stop if not enough space to put next UUID */
565 		if ((ptr - data) + 16 > len) {
566 			uuids_start[1] = EIR_UUID128_SOME;
567 			break;
568 		}
569 
570 		memcpy(ptr, uuid->uuid, 16);
571 		ptr += 16;
572 		uuids_start[0] += 16;
573 	}
574 
575 	return ptr;
576 }
577 
578 static void create_eir(struct hci_dev *hdev, u8 *data)
579 {
580 	u8 *ptr = data;
581 	size_t name_len;
582 
583 	name_len = strlen(hdev->dev_name);
584 
585 	if (name_len > 0) {
586 		/* EIR Data type */
587 		if (name_len > 48) {
588 			name_len = 48;
589 			ptr[1] = EIR_NAME_SHORT;
590 		} else
591 			ptr[1] = EIR_NAME_COMPLETE;
592 
593 		/* EIR Data length */
594 		ptr[0] = name_len + 1;
595 
596 		memcpy(ptr + 2, hdev->dev_name, name_len);
597 
598 		ptr += (name_len + 2);
599 	}
600 
601 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 		ptr[0] = 2;
603 		ptr[1] = EIR_TX_POWER;
604 		ptr[2] = (u8) hdev->inq_tx_power;
605 
606 		ptr += 3;
607 	}
608 
609 	if (hdev->devid_source > 0) {
610 		ptr[0] = 9;
611 		ptr[1] = EIR_DEVICE_ID;
612 
613 		put_unaligned_le16(hdev->devid_source, ptr + 2);
614 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 		put_unaligned_le16(hdev->devid_product, ptr + 6);
616 		put_unaligned_le16(hdev->devid_version, ptr + 8);
617 
618 		ptr += 10;
619 	}
620 
621 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 }
625 
626 void __hci_req_update_eir(struct hci_request *req)
627 {
628 	struct hci_dev *hdev = req->hdev;
629 	struct hci_cp_write_eir cp;
630 
631 	if (!hdev_is_powered(hdev))
632 		return;
633 
634 	if (!lmp_ext_inq_capable(hdev))
635 		return;
636 
637 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 		return;
639 
640 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 		return;
642 
643 	memset(&cp, 0, sizeof(cp));
644 
645 	create_eir(hdev, cp.data);
646 
647 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 		return;
649 
650 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
651 
652 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653 }
654 
655 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
656 {
657 	struct hci_dev *hdev = req->hdev;
658 
659 	if (hdev->scanning_paused) {
660 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 		return;
662 	}
663 
664 	if (use_ext_scan(hdev)) {
665 		struct hci_cp_le_set_ext_scan_enable cp;
666 
667 		memset(&cp, 0, sizeof(cp));
668 		cp.enable = LE_SCAN_DISABLE;
669 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 			    &cp);
671 	} else {
672 		struct hci_cp_le_set_scan_enable cp;
673 
674 		memset(&cp, 0, sizeof(cp));
675 		cp.enable = LE_SCAN_DISABLE;
676 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 	}
678 
679 	/* Disable address resolution */
680 	if (use_ll_privacy(hdev) &&
681 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
682 		__u8 enable = 0x00;
683 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
684 	}
685 }
686 
687 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
688 				u8 bdaddr_type)
689 {
690 	struct hci_cp_le_del_from_white_list cp;
691 
692 	cp.bdaddr_type = bdaddr_type;
693 	bacpy(&cp.bdaddr, bdaddr);
694 
695 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
696 		   cp.bdaddr_type);
697 	hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
698 
699 	if (use_ll_privacy(req->hdev)) {
700 		struct smp_irk *irk;
701 
702 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
703 		if (irk) {
704 			struct hci_cp_le_del_from_resolv_list cp;
705 
706 			cp.bdaddr_type = bdaddr_type;
707 			bacpy(&cp.bdaddr, bdaddr);
708 
709 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
710 				    sizeof(cp), &cp);
711 		}
712 	}
713 }
714 
715 /* Adds connection to white list if needed. On error, returns -1. */
716 static int add_to_white_list(struct hci_request *req,
717 			     struct hci_conn_params *params, u8 *num_entries,
718 			     bool allow_rpa)
719 {
720 	struct hci_cp_le_add_to_white_list cp;
721 	struct hci_dev *hdev = req->hdev;
722 
723 	/* Already in white list */
724 	if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
725 				   params->addr_type))
726 		return 0;
727 
728 	/* Select filter policy to accept all advertising */
729 	if (*num_entries >= hdev->le_white_list_size)
730 		return -1;
731 
732 	/* White list can not be used with RPAs */
733 	if (!allow_rpa && !use_ll_privacy(hdev) &&
734 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
735 		return -1;
736 	}
737 
738 	/* During suspend, only wakeable devices can be in whitelist */
739 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
740 						   params->current_flags))
741 		return 0;
742 
743 	*num_entries += 1;
744 	cp.bdaddr_type = params->addr_type;
745 	bacpy(&cp.bdaddr, &params->addr);
746 
747 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
748 		   cp.bdaddr_type);
749 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
750 
751 	if (use_ll_privacy(hdev)) {
752 		struct smp_irk *irk;
753 
754 		irk = hci_find_irk_by_addr(hdev, &params->addr,
755 					   params->addr_type);
756 		if (irk) {
757 			struct hci_cp_le_add_to_resolv_list cp;
758 
759 			cp.bdaddr_type = params->addr_type;
760 			bacpy(&cp.bdaddr, &params->addr);
761 			memcpy(cp.peer_irk, irk->val, 16);
762 
763 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
764 				memcpy(cp.local_irk, hdev->irk, 16);
765 			else
766 				memset(cp.local_irk, 0, 16);
767 
768 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
769 				    sizeof(cp), &cp);
770 		}
771 	}
772 
773 	return 0;
774 }
775 
776 static u8 update_white_list(struct hci_request *req)
777 {
778 	struct hci_dev *hdev = req->hdev;
779 	struct hci_conn_params *params;
780 	struct bdaddr_list *b;
781 	u8 num_entries = 0;
782 	bool pend_conn, pend_report;
783 	/* We allow whitelisting even with RPAs in suspend. In the worst case,
784 	 * we won't be able to wake from devices that use the privacy1.2
785 	 * features. Additionally, once we support privacy1.2 and IRK
786 	 * offloading, we can update this to also check for those conditions.
787 	 */
788 	bool allow_rpa = hdev->suspended;
789 
790 	/* Go through the current white list programmed into the
791 	 * controller one by one and check if that address is still
792 	 * in the list of pending connections or list of devices to
793 	 * report. If not present in either list, then queue the
794 	 * command to remove it from the controller.
795 	 */
796 	list_for_each_entry(b, &hdev->le_white_list, list) {
797 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
798 						      &b->bdaddr,
799 						      b->bdaddr_type);
800 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
801 							&b->bdaddr,
802 							b->bdaddr_type);
803 
804 		/* If the device is not likely to connect or report,
805 		 * remove it from the whitelist.
806 		 */
807 		if (!pend_conn && !pend_report) {
808 			del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
809 			continue;
810 		}
811 
812 		/* White list can not be used with RPAs */
813 		if (!allow_rpa && !use_ll_privacy(hdev) &&
814 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
815 			return 0x00;
816 		}
817 
818 		num_entries++;
819 	}
820 
821 	/* Since all no longer valid white list entries have been
822 	 * removed, walk through the list of pending connections
823 	 * and ensure that any new device gets programmed into
824 	 * the controller.
825 	 *
826 	 * If the list of the devices is larger than the list of
827 	 * available white list entries in the controller, then
828 	 * just abort and return filer policy value to not use the
829 	 * white list.
830 	 */
831 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
832 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
833 			return 0x00;
834 	}
835 
836 	/* After adding all new pending connections, walk through
837 	 * the list of pending reports and also add these to the
838 	 * white list if there is still space. Abort if space runs out.
839 	 */
840 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
841 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
842 			return 0x00;
843 	}
844 
845 	/* Once the controller offloading of advertisement monitor is in place,
846 	 * the if condition should include the support of MSFT extension
847 	 * support. If suspend is ongoing, whitelist should be the default to
848 	 * prevent waking by random advertisements.
849 	 */
850 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
851 		return 0x00;
852 
853 	/* Select filter policy to use white list */
854 	return 0x01;
855 }
856 
857 static bool scan_use_rpa(struct hci_dev *hdev)
858 {
859 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
860 }
861 
862 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
863 			       u16 window, u8 own_addr_type, u8 filter_policy,
864 			       bool addr_resolv)
865 {
866 	struct hci_dev *hdev = req->hdev;
867 
868 	if (hdev->scanning_paused) {
869 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
870 		return;
871 	}
872 
873 	if (use_ll_privacy(hdev) && addr_resolv) {
874 		u8 enable = 0x01;
875 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
876 	}
877 
878 	/* Use ext scanning if set ext scan param and ext scan enable is
879 	 * supported
880 	 */
881 	if (use_ext_scan(hdev)) {
882 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
883 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
884 		struct hci_cp_le_scan_phy_params *phy_params;
885 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
886 		u32 plen;
887 
888 		ext_param_cp = (void *)data;
889 		phy_params = (void *)ext_param_cp->data;
890 
891 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
892 		ext_param_cp->own_addr_type = own_addr_type;
893 		ext_param_cp->filter_policy = filter_policy;
894 
895 		plen = sizeof(*ext_param_cp);
896 
897 		if (scan_1m(hdev) || scan_2m(hdev)) {
898 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
899 
900 			memset(phy_params, 0, sizeof(*phy_params));
901 			phy_params->type = type;
902 			phy_params->interval = cpu_to_le16(interval);
903 			phy_params->window = cpu_to_le16(window);
904 
905 			plen += sizeof(*phy_params);
906 			phy_params++;
907 		}
908 
909 		if (scan_coded(hdev)) {
910 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
911 
912 			memset(phy_params, 0, sizeof(*phy_params));
913 			phy_params->type = type;
914 			phy_params->interval = cpu_to_le16(interval);
915 			phy_params->window = cpu_to_le16(window);
916 
917 			plen += sizeof(*phy_params);
918 			phy_params++;
919 		}
920 
921 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
922 			    plen, ext_param_cp);
923 
924 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
925 		ext_enable_cp.enable = LE_SCAN_ENABLE;
926 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
927 
928 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
929 			    sizeof(ext_enable_cp), &ext_enable_cp);
930 	} else {
931 		struct hci_cp_le_set_scan_param param_cp;
932 		struct hci_cp_le_set_scan_enable enable_cp;
933 
934 		memset(&param_cp, 0, sizeof(param_cp));
935 		param_cp.type = type;
936 		param_cp.interval = cpu_to_le16(interval);
937 		param_cp.window = cpu_to_le16(window);
938 		param_cp.own_address_type = own_addr_type;
939 		param_cp.filter_policy = filter_policy;
940 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
941 			    &param_cp);
942 
943 		memset(&enable_cp, 0, sizeof(enable_cp));
944 		enable_cp.enable = LE_SCAN_ENABLE;
945 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
946 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
947 			    &enable_cp);
948 	}
949 }
950 
951 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
952  * controller based address resolution to be able to reconfigure
953  * resolving list.
954  */
955 void hci_req_add_le_passive_scan(struct hci_request *req)
956 {
957 	struct hci_dev *hdev = req->hdev;
958 	u8 own_addr_type;
959 	u8 filter_policy;
960 	u16 window, interval;
961 	/* Background scanning should run with address resolution */
962 	bool addr_resolv = true;
963 
964 	if (hdev->scanning_paused) {
965 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
966 		return;
967 	}
968 
969 	/* Set require_privacy to false since no SCAN_REQ are send
970 	 * during passive scanning. Not using an non-resolvable address
971 	 * here is important so that peer devices using direct
972 	 * advertising with our address will be correctly reported
973 	 * by the controller.
974 	 */
975 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
976 				      &own_addr_type))
977 		return;
978 
979 	/* Adding or removing entries from the white list must
980 	 * happen before enabling scanning. The controller does
981 	 * not allow white list modification while scanning.
982 	 */
983 	filter_policy = update_white_list(req);
984 
985 	/* When the controller is using random resolvable addresses and
986 	 * with that having LE privacy enabled, then controllers with
987 	 * Extended Scanner Filter Policies support can now enable support
988 	 * for handling directed advertising.
989 	 *
990 	 * So instead of using filter polices 0x00 (no whitelist)
991 	 * and 0x01 (whitelist enabled) use the new filter policies
992 	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
993 	 */
994 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
995 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
996 		filter_policy |= 0x02;
997 
998 	if (hdev->suspended) {
999 		window = hdev->le_scan_window_suspend;
1000 		interval = hdev->le_scan_int_suspend;
1001 	} else {
1002 		window = hdev->le_scan_window;
1003 		interval = hdev->le_scan_interval;
1004 	}
1005 
1006 	bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1007 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1008 			   own_addr_type, filter_policy, addr_resolv);
1009 }
1010 
1011 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1012 {
1013 	struct adv_info *adv_instance;
1014 
1015 	/* Instance 0x00 always set local name */
1016 	if (instance == 0x00)
1017 		return 1;
1018 
1019 	adv_instance = hci_find_adv_instance(hdev, instance);
1020 	if (!adv_instance)
1021 		return 0;
1022 
1023 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1024 	 * These are currently being ignored as they are not supported.
1025 	 */
1026 	return adv_instance->scan_rsp_len;
1027 }
1028 
1029 static void hci_req_clear_event_filter(struct hci_request *req)
1030 {
1031 	struct hci_cp_set_event_filter f;
1032 
1033 	memset(&f, 0, sizeof(f));
1034 	f.flt_type = HCI_FLT_CLEAR_ALL;
1035 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1036 
1037 	/* Update page scan state (since we may have modified it when setting
1038 	 * the event filter).
1039 	 */
1040 	__hci_req_update_scan(req);
1041 }
1042 
1043 static void hci_req_set_event_filter(struct hci_request *req)
1044 {
1045 	struct bdaddr_list_with_flags *b;
1046 	struct hci_cp_set_event_filter f;
1047 	struct hci_dev *hdev = req->hdev;
1048 	u8 scan = SCAN_DISABLED;
1049 
1050 	/* Always clear event filter when starting */
1051 	hci_req_clear_event_filter(req);
1052 
1053 	list_for_each_entry(b, &hdev->whitelist, list) {
1054 		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1055 					b->current_flags))
1056 			continue;
1057 
1058 		memset(&f, 0, sizeof(f));
1059 		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1060 		f.flt_type = HCI_FLT_CONN_SETUP;
1061 		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1062 		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1063 
1064 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1065 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1066 		scan = SCAN_PAGE;
1067 	}
1068 
1069 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1070 }
1071 
1072 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1073 {
1074 	/* Before changing params disable scan if enabled */
1075 	if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1076 		hci_req_add_le_scan_disable(req, false);
1077 
1078 	/* Configure params and enable scanning */
1079 	hci_req_add_le_passive_scan(req);
1080 
1081 	/* Block suspend notifier on response */
1082 	set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1083 }
1084 
1085 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1086 {
1087 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1088 		   status);
1089 	if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1090 	    test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1091 		wake_up(&hdev->suspend_wait_q);
1092 	}
1093 }
1094 
1095 /* Call with hci_dev_lock */
1096 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1097 {
1098 	int old_state;
1099 	struct hci_conn *conn;
1100 	struct hci_request req;
1101 	u8 page_scan;
1102 	int disconnect_counter;
1103 
1104 	if (next == hdev->suspend_state) {
1105 		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1106 		goto done;
1107 	}
1108 
1109 	hdev->suspend_state = next;
1110 	hci_req_init(&req, hdev);
1111 
1112 	if (next == BT_SUSPEND_DISCONNECT) {
1113 		/* Mark device as suspended */
1114 		hdev->suspended = true;
1115 
1116 		/* Pause discovery if not already stopped */
1117 		old_state = hdev->discovery.state;
1118 		if (old_state != DISCOVERY_STOPPED) {
1119 			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1120 			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1121 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1122 		}
1123 
1124 		hdev->discovery_paused = true;
1125 		hdev->discovery_old_state = old_state;
1126 
1127 		/* Stop advertising */
1128 		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1129 		if (old_state) {
1130 			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1131 			cancel_delayed_work(&hdev->discov_off);
1132 			queue_delayed_work(hdev->req_workqueue,
1133 					   &hdev->discov_off, 0);
1134 		}
1135 
1136 		hdev->advertising_paused = true;
1137 		hdev->advertising_old_state = old_state;
1138 		/* Disable page scan */
1139 		page_scan = SCAN_DISABLED;
1140 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1141 
1142 		/* Disable LE passive scan if enabled */
1143 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1144 			hci_req_add_le_scan_disable(&req, false);
1145 
1146 		/* Mark task needing completion */
1147 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1148 
1149 		/* Prevent disconnects from causing scanning to be re-enabled */
1150 		hdev->scanning_paused = true;
1151 
1152 		/* Run commands before disconnecting */
1153 		hci_req_run(&req, suspend_req_complete);
1154 
1155 		disconnect_counter = 0;
1156 		/* Soft disconnect everything (power off) */
1157 		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1158 			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1159 			disconnect_counter++;
1160 		}
1161 
1162 		if (disconnect_counter > 0) {
1163 			bt_dev_dbg(hdev,
1164 				   "Had %d disconnects. Will wait on them",
1165 				   disconnect_counter);
1166 			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1167 		}
1168 	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1169 		/* Unpause to take care of updating scanning params */
1170 		hdev->scanning_paused = false;
1171 		/* Enable event filter for paired devices */
1172 		hci_req_set_event_filter(&req);
1173 		/* Enable passive scan at lower duty cycle */
1174 		hci_req_config_le_suspend_scan(&req);
1175 		/* Pause scan changes again. */
1176 		hdev->scanning_paused = true;
1177 		hci_req_run(&req, suspend_req_complete);
1178 	} else {
1179 		hdev->suspended = false;
1180 		hdev->scanning_paused = false;
1181 
1182 		hci_req_clear_event_filter(&req);
1183 		/* Reset passive/background scanning to normal */
1184 		hci_req_config_le_suspend_scan(&req);
1185 
1186 		/* Unpause advertising */
1187 		hdev->advertising_paused = false;
1188 		if (hdev->advertising_old_state) {
1189 			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1190 				hdev->suspend_tasks);
1191 			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1192 			queue_work(hdev->req_workqueue,
1193 				   &hdev->discoverable_update);
1194 			hdev->advertising_old_state = 0;
1195 		}
1196 
1197 		/* Unpause discovery */
1198 		hdev->discovery_paused = false;
1199 		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1200 		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1201 			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1202 			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1203 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1204 		}
1205 
1206 		hci_req_run(&req, suspend_req_complete);
1207 	}
1208 
1209 	hdev->suspend_state = next;
1210 
1211 done:
1212 	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1213 	wake_up(&hdev->suspend_wait_q);
1214 }
1215 
1216 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1217 {
1218 	u8 instance = hdev->cur_adv_instance;
1219 	struct adv_info *adv_instance;
1220 
1221 	/* Instance 0x00 always set local name */
1222 	if (instance == 0x00)
1223 		return 1;
1224 
1225 	adv_instance = hci_find_adv_instance(hdev, instance);
1226 	if (!adv_instance)
1227 		return 0;
1228 
1229 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1230 	 * These are currently being ignored as they are not supported.
1231 	 */
1232 	return adv_instance->scan_rsp_len;
1233 }
1234 
1235 void __hci_req_disable_advertising(struct hci_request *req)
1236 {
1237 	if (ext_adv_capable(req->hdev)) {
1238 		__hci_req_disable_ext_adv_instance(req, 0x00);
1239 
1240 	} else {
1241 		u8 enable = 0x00;
1242 
1243 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1244 	}
1245 }
1246 
1247 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1248 {
1249 	u32 flags;
1250 	struct adv_info *adv_instance;
1251 
1252 	if (instance == 0x00) {
1253 		/* Instance 0 always manages the "Tx Power" and "Flags"
1254 		 * fields
1255 		 */
1256 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1257 
1258 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1259 		 * corresponds to the "connectable" instance flag.
1260 		 */
1261 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1262 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1263 
1264 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1265 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1266 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1267 			flags |= MGMT_ADV_FLAG_DISCOV;
1268 
1269 		return flags;
1270 	}
1271 
1272 	adv_instance = hci_find_adv_instance(hdev, instance);
1273 
1274 	/* Return 0 when we got an invalid instance identifier. */
1275 	if (!adv_instance)
1276 		return 0;
1277 
1278 	return adv_instance->flags;
1279 }
1280 
1281 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1282 {
1283 	/* If privacy is not enabled don't use RPA */
1284 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1285 		return false;
1286 
1287 	/* If basic privacy mode is enabled use RPA */
1288 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1289 		return true;
1290 
1291 	/* If limited privacy mode is enabled don't use RPA if we're
1292 	 * both discoverable and bondable.
1293 	 */
1294 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1295 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1296 		return false;
1297 
1298 	/* We're neither bondable nor discoverable in the limited
1299 	 * privacy mode, therefore use RPA.
1300 	 */
1301 	return true;
1302 }
1303 
1304 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1305 {
1306 	/* If there is no connection we are OK to advertise. */
1307 	if (hci_conn_num(hdev, LE_LINK) == 0)
1308 		return true;
1309 
1310 	/* Check le_states if there is any connection in slave role. */
1311 	if (hdev->conn_hash.le_num_slave > 0) {
1312 		/* Slave connection state and non connectable mode bit 20. */
1313 		if (!connectable && !(hdev->le_states[2] & 0x10))
1314 			return false;
1315 
1316 		/* Slave connection state and connectable mode bit 38
1317 		 * and scannable bit 21.
1318 		 */
1319 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1320 				    !(hdev->le_states[2] & 0x20)))
1321 			return false;
1322 	}
1323 
1324 	/* Check le_states if there is any connection in master role. */
1325 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1326 		/* Master connection state and non connectable mode bit 18. */
1327 		if (!connectable && !(hdev->le_states[2] & 0x02))
1328 			return false;
1329 
1330 		/* Master connection state and connectable mode bit 35 and
1331 		 * scannable 19.
1332 		 */
1333 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1334 				    !(hdev->le_states[2] & 0x08)))
1335 			return false;
1336 	}
1337 
1338 	return true;
1339 }
1340 
1341 void __hci_req_enable_advertising(struct hci_request *req)
1342 {
1343 	struct hci_dev *hdev = req->hdev;
1344 	struct hci_cp_le_set_adv_param cp;
1345 	u8 own_addr_type, enable = 0x01;
1346 	bool connectable;
1347 	u16 adv_min_interval, adv_max_interval;
1348 	u32 flags;
1349 
1350 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1351 
1352 	/* If the "connectable" instance flag was not set, then choose between
1353 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1354 	 */
1355 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1356 		      mgmt_get_connectable(hdev);
1357 
1358 	if (!is_advertising_allowed(hdev, connectable))
1359 		return;
1360 
1361 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1362 		__hci_req_disable_advertising(req);
1363 
1364 	/* Clear the HCI_LE_ADV bit temporarily so that the
1365 	 * hci_update_random_address knows that it's safe to go ahead
1366 	 * and write a new random address. The flag will be set back on
1367 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1368 	 */
1369 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1370 
1371 	/* Set require_privacy to true only when non-connectable
1372 	 * advertising is used. In that case it is fine to use a
1373 	 * non-resolvable private address.
1374 	 */
1375 	if (hci_update_random_address(req, !connectable,
1376 				      adv_use_rpa(hdev, flags),
1377 				      &own_addr_type) < 0)
1378 		return;
1379 
1380 	memset(&cp, 0, sizeof(cp));
1381 
1382 	if (connectable) {
1383 		cp.type = LE_ADV_IND;
1384 
1385 		adv_min_interval = hdev->le_adv_min_interval;
1386 		adv_max_interval = hdev->le_adv_max_interval;
1387 	} else {
1388 		if (get_cur_adv_instance_scan_rsp_len(hdev))
1389 			cp.type = LE_ADV_SCAN_IND;
1390 		else
1391 			cp.type = LE_ADV_NONCONN_IND;
1392 
1393 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1394 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1395 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1396 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1397 		} else {
1398 			adv_min_interval = hdev->le_adv_min_interval;
1399 			adv_max_interval = hdev->le_adv_max_interval;
1400 		}
1401 	}
1402 
1403 	cp.min_interval = cpu_to_le16(adv_min_interval);
1404 	cp.max_interval = cpu_to_le16(adv_max_interval);
1405 	cp.own_address_type = own_addr_type;
1406 	cp.channel_map = hdev->le_adv_channel_map;
1407 
1408 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1409 
1410 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1411 }
1412 
1413 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1414 {
1415 	size_t short_len;
1416 	size_t complete_len;
1417 
1418 	/* no space left for name (+ NULL + type + len) */
1419 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1420 		return ad_len;
1421 
1422 	/* use complete name if present and fits */
1423 	complete_len = strlen(hdev->dev_name);
1424 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1425 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1426 				       hdev->dev_name, complete_len + 1);
1427 
1428 	/* use short name if present */
1429 	short_len = strlen(hdev->short_name);
1430 	if (short_len)
1431 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1432 				       hdev->short_name, short_len + 1);
1433 
1434 	/* use shortened full name if present, we already know that name
1435 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1436 	 */
1437 	if (complete_len) {
1438 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1439 
1440 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1441 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1442 
1443 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1444 				       sizeof(name));
1445 	}
1446 
1447 	return ad_len;
1448 }
1449 
1450 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1451 {
1452 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1453 }
1454 
1455 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1456 {
1457 	u8 scan_rsp_len = 0;
1458 
1459 	if (hdev->appearance) {
1460 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1461 	}
1462 
1463 	return append_local_name(hdev, ptr, scan_rsp_len);
1464 }
1465 
1466 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1467 					u8 *ptr)
1468 {
1469 	struct adv_info *adv_instance;
1470 	u32 instance_flags;
1471 	u8 scan_rsp_len = 0;
1472 
1473 	adv_instance = hci_find_adv_instance(hdev, instance);
1474 	if (!adv_instance)
1475 		return 0;
1476 
1477 	instance_flags = adv_instance->flags;
1478 
1479 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1480 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1481 	}
1482 
1483 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1484 	       adv_instance->scan_rsp_len);
1485 
1486 	scan_rsp_len += adv_instance->scan_rsp_len;
1487 
1488 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1489 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1490 
1491 	return scan_rsp_len;
1492 }
1493 
1494 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1495 {
1496 	struct hci_dev *hdev = req->hdev;
1497 	u8 len;
1498 
1499 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 		return;
1501 
1502 	if (ext_adv_capable(hdev)) {
1503 		struct hci_cp_le_set_ext_scan_rsp_data cp;
1504 
1505 		memset(&cp, 0, sizeof(cp));
1506 
1507 		if (instance)
1508 			len = create_instance_scan_rsp_data(hdev, instance,
1509 							    cp.data);
1510 		else
1511 			len = create_default_scan_rsp_data(hdev, cp.data);
1512 
1513 		if (hdev->scan_rsp_data_len == len &&
1514 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1515 			return;
1516 
1517 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1518 		hdev->scan_rsp_data_len = len;
1519 
1520 		cp.handle = instance;
1521 		cp.length = len;
1522 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1523 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1524 
1525 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1526 			    &cp);
1527 	} else {
1528 		struct hci_cp_le_set_scan_rsp_data cp;
1529 
1530 		memset(&cp, 0, sizeof(cp));
1531 
1532 		if (instance)
1533 			len = create_instance_scan_rsp_data(hdev, instance,
1534 							    cp.data);
1535 		else
1536 			len = create_default_scan_rsp_data(hdev, cp.data);
1537 
1538 		if (hdev->scan_rsp_data_len == len &&
1539 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1540 			return;
1541 
1542 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1543 		hdev->scan_rsp_data_len = len;
1544 
1545 		cp.length = len;
1546 
1547 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1548 	}
1549 }
1550 
1551 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1552 {
1553 	struct adv_info *adv_instance = NULL;
1554 	u8 ad_len = 0, flags = 0;
1555 	u32 instance_flags;
1556 
1557 	/* Return 0 when the current instance identifier is invalid. */
1558 	if (instance) {
1559 		adv_instance = hci_find_adv_instance(hdev, instance);
1560 		if (!adv_instance)
1561 			return 0;
1562 	}
1563 
1564 	instance_flags = get_adv_instance_flags(hdev, instance);
1565 
1566 	/* If instance already has the flags set skip adding it once
1567 	 * again.
1568 	 */
1569 	if (adv_instance && eir_get_data(adv_instance->adv_data,
1570 					 adv_instance->adv_data_len, EIR_FLAGS,
1571 					 NULL))
1572 		goto skip_flags;
1573 
1574 	/* The Add Advertising command allows userspace to set both the general
1575 	 * and limited discoverable flags.
1576 	 */
1577 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1578 		flags |= LE_AD_GENERAL;
1579 
1580 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1581 		flags |= LE_AD_LIMITED;
1582 
1583 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1584 		flags |= LE_AD_NO_BREDR;
1585 
1586 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1587 		/* If a discovery flag wasn't provided, simply use the global
1588 		 * settings.
1589 		 */
1590 		if (!flags)
1591 			flags |= mgmt_get_adv_discov_flags(hdev);
1592 
1593 		/* If flags would still be empty, then there is no need to
1594 		 * include the "Flags" AD field".
1595 		 */
1596 		if (flags) {
1597 			ptr[0] = 0x02;
1598 			ptr[1] = EIR_FLAGS;
1599 			ptr[2] = flags;
1600 
1601 			ad_len += 3;
1602 			ptr += 3;
1603 		}
1604 	}
1605 
1606 skip_flags:
1607 	if (adv_instance) {
1608 		memcpy(ptr, adv_instance->adv_data,
1609 		       adv_instance->adv_data_len);
1610 		ad_len += adv_instance->adv_data_len;
1611 		ptr += adv_instance->adv_data_len;
1612 	}
1613 
1614 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1615 		s8 adv_tx_power;
1616 
1617 		if (ext_adv_capable(hdev)) {
1618 			if (adv_instance)
1619 				adv_tx_power = adv_instance->tx_power;
1620 			else
1621 				adv_tx_power = hdev->adv_tx_power;
1622 		} else {
1623 			adv_tx_power = hdev->adv_tx_power;
1624 		}
1625 
1626 		/* Provide Tx Power only if we can provide a valid value for it */
1627 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1628 			ptr[0] = 0x02;
1629 			ptr[1] = EIR_TX_POWER;
1630 			ptr[2] = (u8)adv_tx_power;
1631 
1632 			ad_len += 3;
1633 			ptr += 3;
1634 		}
1635 	}
1636 
1637 	return ad_len;
1638 }
1639 
1640 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1641 {
1642 	struct hci_dev *hdev = req->hdev;
1643 	u8 len;
1644 
1645 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1646 		return;
1647 
1648 	if (ext_adv_capable(hdev)) {
1649 		struct hci_cp_le_set_ext_adv_data cp;
1650 
1651 		memset(&cp, 0, sizeof(cp));
1652 
1653 		len = create_instance_adv_data(hdev, instance, cp.data);
1654 
1655 		/* There's nothing to do if the data hasn't changed */
1656 		if (hdev->adv_data_len == len &&
1657 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1658 			return;
1659 
1660 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1661 		hdev->adv_data_len = len;
1662 
1663 		cp.length = len;
1664 		cp.handle = instance;
1665 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1666 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1667 
1668 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1669 	} else {
1670 		struct hci_cp_le_set_adv_data cp;
1671 
1672 		memset(&cp, 0, sizeof(cp));
1673 
1674 		len = create_instance_adv_data(hdev, instance, cp.data);
1675 
1676 		/* There's nothing to do if the data hasn't changed */
1677 		if (hdev->adv_data_len == len &&
1678 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1679 			return;
1680 
1681 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1682 		hdev->adv_data_len = len;
1683 
1684 		cp.length = len;
1685 
1686 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1687 	}
1688 }
1689 
1690 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1691 {
1692 	struct hci_request req;
1693 
1694 	hci_req_init(&req, hdev);
1695 	__hci_req_update_adv_data(&req, instance);
1696 
1697 	return hci_req_run(&req, NULL);
1698 }
1699 
1700 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1701 					    u16 opcode)
1702 {
1703 	BT_DBG("%s status %u", hdev->name, status);
1704 }
1705 
1706 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1707 {
1708 	struct hci_request req;
1709 	__u8 enable = 0x00;
1710 
1711 	if (!use_ll_privacy(hdev) &&
1712 	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1713 		return;
1714 
1715 	hci_req_init(&req, hdev);
1716 
1717 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1718 
1719 	hci_req_run(&req, enable_addr_resolution_complete);
1720 }
1721 
1722 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1723 {
1724 	BT_DBG("%s status %u", hdev->name, status);
1725 }
1726 
1727 void hci_req_reenable_advertising(struct hci_dev *hdev)
1728 {
1729 	struct hci_request req;
1730 
1731 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1732 	    list_empty(&hdev->adv_instances))
1733 		return;
1734 
1735 	hci_req_init(&req, hdev);
1736 
1737 	if (hdev->cur_adv_instance) {
1738 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1739 						true);
1740 	} else {
1741 		if (ext_adv_capable(hdev)) {
1742 			__hci_req_start_ext_adv(&req, 0x00);
1743 		} else {
1744 			__hci_req_update_adv_data(&req, 0x00);
1745 			__hci_req_update_scan_rsp_data(&req, 0x00);
1746 			__hci_req_enable_advertising(&req);
1747 		}
1748 	}
1749 
1750 	hci_req_run(&req, adv_enable_complete);
1751 }
1752 
1753 static void adv_timeout_expire(struct work_struct *work)
1754 {
1755 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1756 					    adv_instance_expire.work);
1757 
1758 	struct hci_request req;
1759 	u8 instance;
1760 
1761 	BT_DBG("%s", hdev->name);
1762 
1763 	hci_dev_lock(hdev);
1764 
1765 	hdev->adv_instance_timeout = 0;
1766 
1767 	instance = hdev->cur_adv_instance;
1768 	if (instance == 0x00)
1769 		goto unlock;
1770 
1771 	hci_req_init(&req, hdev);
1772 
1773 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1774 
1775 	if (list_empty(&hdev->adv_instances))
1776 		__hci_req_disable_advertising(&req);
1777 
1778 	hci_req_run(&req, NULL);
1779 
1780 unlock:
1781 	hci_dev_unlock(hdev);
1782 }
1783 
1784 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1785 			   bool use_rpa, struct adv_info *adv_instance,
1786 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1787 {
1788 	int err;
1789 
1790 	bacpy(rand_addr, BDADDR_ANY);
1791 
1792 	/* If privacy is enabled use a resolvable private address. If
1793 	 * current RPA has expired then generate a new one.
1794 	 */
1795 	if (use_rpa) {
1796 		int to;
1797 
1798 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1799 
1800 		if (adv_instance) {
1801 			if (!adv_instance->rpa_expired &&
1802 			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
1803 				return 0;
1804 
1805 			adv_instance->rpa_expired = false;
1806 		} else {
1807 			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1808 			    !bacmp(&hdev->random_addr, &hdev->rpa))
1809 				return 0;
1810 		}
1811 
1812 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1813 		if (err < 0) {
1814 			bt_dev_err(hdev, "failed to generate new RPA");
1815 			return err;
1816 		}
1817 
1818 		bacpy(rand_addr, &hdev->rpa);
1819 
1820 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1821 		if (adv_instance)
1822 			queue_delayed_work(hdev->workqueue,
1823 					   &adv_instance->rpa_expired_cb, to);
1824 		else
1825 			queue_delayed_work(hdev->workqueue,
1826 					   &hdev->rpa_expired, to);
1827 
1828 		return 0;
1829 	}
1830 
1831 	/* In case of required privacy without resolvable private address,
1832 	 * use an non-resolvable private address. This is useful for
1833 	 * non-connectable advertising.
1834 	 */
1835 	if (require_privacy) {
1836 		bdaddr_t nrpa;
1837 
1838 		while (true) {
1839 			/* The non-resolvable private address is generated
1840 			 * from random six bytes with the two most significant
1841 			 * bits cleared.
1842 			 */
1843 			get_random_bytes(&nrpa, 6);
1844 			nrpa.b[5] &= 0x3f;
1845 
1846 			/* The non-resolvable private address shall not be
1847 			 * equal to the public address.
1848 			 */
1849 			if (bacmp(&hdev->bdaddr, &nrpa))
1850 				break;
1851 		}
1852 
1853 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1854 		bacpy(rand_addr, &nrpa);
1855 
1856 		return 0;
1857 	}
1858 
1859 	/* No privacy so use a public address. */
1860 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1861 
1862 	return 0;
1863 }
1864 
1865 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1866 {
1867 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1868 }
1869 
1870 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1871 {
1872 	struct hci_cp_le_set_ext_adv_params cp;
1873 	struct hci_dev *hdev = req->hdev;
1874 	bool connectable;
1875 	u32 flags;
1876 	bdaddr_t random_addr;
1877 	u8 own_addr_type;
1878 	int err;
1879 	struct adv_info *adv_instance;
1880 	bool secondary_adv;
1881 
1882 	if (instance > 0) {
1883 		adv_instance = hci_find_adv_instance(hdev, instance);
1884 		if (!adv_instance)
1885 			return -EINVAL;
1886 	} else {
1887 		adv_instance = NULL;
1888 	}
1889 
1890 	flags = get_adv_instance_flags(hdev, instance);
1891 
1892 	/* If the "connectable" instance flag was not set, then choose between
1893 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1894 	 */
1895 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1896 		      mgmt_get_connectable(hdev);
1897 
1898 	if (!is_advertising_allowed(hdev, connectable))
1899 		return -EPERM;
1900 
1901 	/* Set require_privacy to true only when non-connectable
1902 	 * advertising is used. In that case it is fine to use a
1903 	 * non-resolvable private address.
1904 	 */
1905 	err = hci_get_random_address(hdev, !connectable,
1906 				     adv_use_rpa(hdev, flags), adv_instance,
1907 				     &own_addr_type, &random_addr);
1908 	if (err < 0)
1909 		return err;
1910 
1911 	memset(&cp, 0, sizeof(cp));
1912 
1913 	/* In ext adv set param interval is 3 octets */
1914 	hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1915 	hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1916 
1917 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1918 
1919 	if (connectable) {
1920 		if (secondary_adv)
1921 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1922 		else
1923 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1924 	} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1925 		if (secondary_adv)
1926 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1927 		else
1928 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1929 	} else {
1930 		if (secondary_adv)
1931 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1932 		else
1933 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1934 	}
1935 
1936 	cp.own_addr_type = own_addr_type;
1937 	cp.channel_map = hdev->le_adv_channel_map;
1938 	cp.tx_power = 127;
1939 	cp.handle = instance;
1940 
1941 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1942 		cp.primary_phy = HCI_ADV_PHY_1M;
1943 		cp.secondary_phy = HCI_ADV_PHY_2M;
1944 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1945 		cp.primary_phy = HCI_ADV_PHY_CODED;
1946 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1947 	} else {
1948 		/* In all other cases use 1M */
1949 		cp.primary_phy = HCI_ADV_PHY_1M;
1950 		cp.secondary_phy = HCI_ADV_PHY_1M;
1951 	}
1952 
1953 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1954 
1955 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1956 	    bacmp(&random_addr, BDADDR_ANY)) {
1957 		struct hci_cp_le_set_adv_set_rand_addr cp;
1958 
1959 		/* Check if random address need to be updated */
1960 		if (adv_instance) {
1961 			if (!bacmp(&random_addr, &adv_instance->random_addr))
1962 				return 0;
1963 		} else {
1964 			if (!bacmp(&random_addr, &hdev->random_addr))
1965 				return 0;
1966 		}
1967 
1968 		memset(&cp, 0, sizeof(cp));
1969 
1970 		cp.handle = instance;
1971 		bacpy(&cp.bdaddr, &random_addr);
1972 
1973 		hci_req_add(req,
1974 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1975 			    sizeof(cp), &cp);
1976 	}
1977 
1978 	return 0;
1979 }
1980 
1981 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1982 {
1983 	struct hci_dev *hdev = req->hdev;
1984 	struct hci_cp_le_set_ext_adv_enable *cp;
1985 	struct hci_cp_ext_adv_set *adv_set;
1986 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1987 	struct adv_info *adv_instance;
1988 
1989 	if (instance > 0) {
1990 		adv_instance = hci_find_adv_instance(hdev, instance);
1991 		if (!adv_instance)
1992 			return -EINVAL;
1993 	} else {
1994 		adv_instance = NULL;
1995 	}
1996 
1997 	cp = (void *) data;
1998 	adv_set = (void *) cp->data;
1999 
2000 	memset(cp, 0, sizeof(*cp));
2001 
2002 	cp->enable = 0x01;
2003 	cp->num_of_sets = 0x01;
2004 
2005 	memset(adv_set, 0, sizeof(*adv_set));
2006 
2007 	adv_set->handle = instance;
2008 
2009 	/* Set duration per instance since controller is responsible for
2010 	 * scheduling it.
2011 	 */
2012 	if (adv_instance && adv_instance->duration) {
2013 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2014 
2015 		/* Time = N * 10 ms */
2016 		adv_set->duration = cpu_to_le16(duration / 10);
2017 	}
2018 
2019 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2020 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2021 		    data);
2022 
2023 	return 0;
2024 }
2025 
2026 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2027 {
2028 	struct hci_dev *hdev = req->hdev;
2029 	struct hci_cp_le_set_ext_adv_enable *cp;
2030 	struct hci_cp_ext_adv_set *adv_set;
2031 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2032 	u8 req_size;
2033 
2034 	/* If request specifies an instance that doesn't exist, fail */
2035 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2036 		return -EINVAL;
2037 
2038 	memset(data, 0, sizeof(data));
2039 
2040 	cp = (void *)data;
2041 	adv_set = (void *)cp->data;
2042 
2043 	/* Instance 0x00 indicates all advertising instances will be disabled */
2044 	cp->num_of_sets = !!instance;
2045 	cp->enable = 0x00;
2046 
2047 	adv_set->handle = instance;
2048 
2049 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2050 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2051 
2052 	return 0;
2053 }
2054 
2055 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2056 {
2057 	struct hci_dev *hdev = req->hdev;
2058 
2059 	/* If request specifies an instance that doesn't exist, fail */
2060 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2061 		return -EINVAL;
2062 
2063 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2064 
2065 	return 0;
2066 }
2067 
2068 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2069 {
2070 	struct hci_dev *hdev = req->hdev;
2071 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2072 	int err;
2073 
2074 	/* If instance isn't pending, the chip knows about it, and it's safe to
2075 	 * disable
2076 	 */
2077 	if (adv_instance && !adv_instance->pending)
2078 		__hci_req_disable_ext_adv_instance(req, instance);
2079 
2080 	err = __hci_req_setup_ext_adv_instance(req, instance);
2081 	if (err < 0)
2082 		return err;
2083 
2084 	__hci_req_update_scan_rsp_data(req, instance);
2085 	__hci_req_enable_ext_advertising(req, instance);
2086 
2087 	return 0;
2088 }
2089 
2090 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2091 				    bool force)
2092 {
2093 	struct hci_dev *hdev = req->hdev;
2094 	struct adv_info *adv_instance = NULL;
2095 	u16 timeout;
2096 
2097 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2098 	    list_empty(&hdev->adv_instances))
2099 		return -EPERM;
2100 
2101 	if (hdev->adv_instance_timeout)
2102 		return -EBUSY;
2103 
2104 	adv_instance = hci_find_adv_instance(hdev, instance);
2105 	if (!adv_instance)
2106 		return -ENOENT;
2107 
2108 	/* A zero timeout means unlimited advertising. As long as there is
2109 	 * only one instance, duration should be ignored. We still set a timeout
2110 	 * in case further instances are being added later on.
2111 	 *
2112 	 * If the remaining lifetime of the instance is more than the duration
2113 	 * then the timeout corresponds to the duration, otherwise it will be
2114 	 * reduced to the remaining instance lifetime.
2115 	 */
2116 	if (adv_instance->timeout == 0 ||
2117 	    adv_instance->duration <= adv_instance->remaining_time)
2118 		timeout = adv_instance->duration;
2119 	else
2120 		timeout = adv_instance->remaining_time;
2121 
2122 	/* The remaining time is being reduced unless the instance is being
2123 	 * advertised without time limit.
2124 	 */
2125 	if (adv_instance->timeout)
2126 		adv_instance->remaining_time =
2127 				adv_instance->remaining_time - timeout;
2128 
2129 	/* Only use work for scheduling instances with legacy advertising */
2130 	if (!ext_adv_capable(hdev)) {
2131 		hdev->adv_instance_timeout = timeout;
2132 		queue_delayed_work(hdev->req_workqueue,
2133 			   &hdev->adv_instance_expire,
2134 			   msecs_to_jiffies(timeout * 1000));
2135 	}
2136 
2137 	/* If we're just re-scheduling the same instance again then do not
2138 	 * execute any HCI commands. This happens when a single instance is
2139 	 * being advertised.
2140 	 */
2141 	if (!force && hdev->cur_adv_instance == instance &&
2142 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2143 		return 0;
2144 
2145 	hdev->cur_adv_instance = instance;
2146 	if (ext_adv_capable(hdev)) {
2147 		__hci_req_start_ext_adv(req, instance);
2148 	} else {
2149 		__hci_req_update_adv_data(req, instance);
2150 		__hci_req_update_scan_rsp_data(req, instance);
2151 		__hci_req_enable_advertising(req);
2152 	}
2153 
2154 	return 0;
2155 }
2156 
2157 static void cancel_adv_timeout(struct hci_dev *hdev)
2158 {
2159 	if (hdev->adv_instance_timeout) {
2160 		hdev->adv_instance_timeout = 0;
2161 		cancel_delayed_work(&hdev->adv_instance_expire);
2162 	}
2163 }
2164 
2165 /* For a single instance:
2166  * - force == true: The instance will be removed even when its remaining
2167  *   lifetime is not zero.
2168  * - force == false: the instance will be deactivated but kept stored unless
2169  *   the remaining lifetime is zero.
2170  *
2171  * For instance == 0x00:
2172  * - force == true: All instances will be removed regardless of their timeout
2173  *   setting.
2174  * - force == false: Only instances that have a timeout will be removed.
2175  */
2176 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2177 				struct hci_request *req, u8 instance,
2178 				bool force)
2179 {
2180 	struct adv_info *adv_instance, *n, *next_instance = NULL;
2181 	int err;
2182 	u8 rem_inst;
2183 
2184 	/* Cancel any timeout concerning the removed instance(s). */
2185 	if (!instance || hdev->cur_adv_instance == instance)
2186 		cancel_adv_timeout(hdev);
2187 
2188 	/* Get the next instance to advertise BEFORE we remove
2189 	 * the current one. This can be the same instance again
2190 	 * if there is only one instance.
2191 	 */
2192 	if (instance && hdev->cur_adv_instance == instance)
2193 		next_instance = hci_get_next_instance(hdev, instance);
2194 
2195 	if (instance == 0x00) {
2196 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2197 					 list) {
2198 			if (!(force || adv_instance->timeout))
2199 				continue;
2200 
2201 			rem_inst = adv_instance->instance;
2202 			err = hci_remove_adv_instance(hdev, rem_inst);
2203 			if (!err)
2204 				mgmt_advertising_removed(sk, hdev, rem_inst);
2205 		}
2206 	} else {
2207 		adv_instance = hci_find_adv_instance(hdev, instance);
2208 
2209 		if (force || (adv_instance && adv_instance->timeout &&
2210 			      !adv_instance->remaining_time)) {
2211 			/* Don't advertise a removed instance. */
2212 			if (next_instance &&
2213 			    next_instance->instance == instance)
2214 				next_instance = NULL;
2215 
2216 			err = hci_remove_adv_instance(hdev, instance);
2217 			if (!err)
2218 				mgmt_advertising_removed(sk, hdev, instance);
2219 		}
2220 	}
2221 
2222 	if (!req || !hdev_is_powered(hdev) ||
2223 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2224 		return;
2225 
2226 	if (next_instance && !ext_adv_capable(hdev))
2227 		__hci_req_schedule_adv_instance(req, next_instance->instance,
2228 						false);
2229 }
2230 
2231 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2232 {
2233 	struct hci_dev *hdev = req->hdev;
2234 
2235 	/* If we're advertising or initiating an LE connection we can't
2236 	 * go ahead and change the random address at this time. This is
2237 	 * because the eventual initiator address used for the
2238 	 * subsequently created connection will be undefined (some
2239 	 * controllers use the new address and others the one we had
2240 	 * when the operation started).
2241 	 *
2242 	 * In this kind of scenario skip the update and let the random
2243 	 * address be updated at the next cycle.
2244 	 */
2245 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2246 	    hci_lookup_le_connect(hdev)) {
2247 		BT_DBG("Deferring random address update");
2248 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2249 		return;
2250 	}
2251 
2252 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2253 }
2254 
2255 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2256 			      bool use_rpa, u8 *own_addr_type)
2257 {
2258 	struct hci_dev *hdev = req->hdev;
2259 	int err;
2260 
2261 	/* If privacy is enabled use a resolvable private address. If
2262 	 * current RPA has expired or there is something else than
2263 	 * the current RPA in use, then generate a new one.
2264 	 */
2265 	if (use_rpa) {
2266 		int to;
2267 
2268 		/* If Controller supports LL Privacy use own address type is
2269 		 * 0x03
2270 		 */
2271 		if (use_ll_privacy(hdev))
2272 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2273 		else
2274 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2275 
2276 		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2277 		    !bacmp(&hdev->random_addr, &hdev->rpa))
2278 			return 0;
2279 
2280 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2281 		if (err < 0) {
2282 			bt_dev_err(hdev, "failed to generate new RPA");
2283 			return err;
2284 		}
2285 
2286 		set_random_addr(req, &hdev->rpa);
2287 
2288 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2289 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2290 
2291 		return 0;
2292 	}
2293 
2294 	/* In case of required privacy without resolvable private address,
2295 	 * use an non-resolvable private address. This is useful for active
2296 	 * scanning and non-connectable advertising.
2297 	 */
2298 	if (require_privacy) {
2299 		bdaddr_t nrpa;
2300 
2301 		while (true) {
2302 			/* The non-resolvable private address is generated
2303 			 * from random six bytes with the two most significant
2304 			 * bits cleared.
2305 			 */
2306 			get_random_bytes(&nrpa, 6);
2307 			nrpa.b[5] &= 0x3f;
2308 
2309 			/* The non-resolvable private address shall not be
2310 			 * equal to the public address.
2311 			 */
2312 			if (bacmp(&hdev->bdaddr, &nrpa))
2313 				break;
2314 		}
2315 
2316 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2317 		set_random_addr(req, &nrpa);
2318 		return 0;
2319 	}
2320 
2321 	/* If forcing static address is in use or there is no public
2322 	 * address use the static address as random address (but skip
2323 	 * the HCI command if the current random address is already the
2324 	 * static one.
2325 	 *
2326 	 * In case BR/EDR has been disabled on a dual-mode controller
2327 	 * and a static address has been configured, then use that
2328 	 * address instead of the public BR/EDR address.
2329 	 */
2330 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2331 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2332 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2333 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2334 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2335 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2336 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2337 				    &hdev->static_addr);
2338 		return 0;
2339 	}
2340 
2341 	/* Neither privacy nor static address is being used so use a
2342 	 * public address.
2343 	 */
2344 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2345 
2346 	return 0;
2347 }
2348 
2349 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2350 {
2351 	struct bdaddr_list *b;
2352 
2353 	list_for_each_entry(b, &hdev->whitelist, list) {
2354 		struct hci_conn *conn;
2355 
2356 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2357 		if (!conn)
2358 			return true;
2359 
2360 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2361 			return true;
2362 	}
2363 
2364 	return false;
2365 }
2366 
2367 void __hci_req_update_scan(struct hci_request *req)
2368 {
2369 	struct hci_dev *hdev = req->hdev;
2370 	u8 scan;
2371 
2372 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2373 		return;
2374 
2375 	if (!hdev_is_powered(hdev))
2376 		return;
2377 
2378 	if (mgmt_powering_down(hdev))
2379 		return;
2380 
2381 	if (hdev->scanning_paused)
2382 		return;
2383 
2384 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2385 	    disconnected_whitelist_entries(hdev))
2386 		scan = SCAN_PAGE;
2387 	else
2388 		scan = SCAN_DISABLED;
2389 
2390 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2391 		scan |= SCAN_INQUIRY;
2392 
2393 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2394 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2395 		return;
2396 
2397 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2398 }
2399 
2400 static int update_scan(struct hci_request *req, unsigned long opt)
2401 {
2402 	hci_dev_lock(req->hdev);
2403 	__hci_req_update_scan(req);
2404 	hci_dev_unlock(req->hdev);
2405 	return 0;
2406 }
2407 
2408 static void scan_update_work(struct work_struct *work)
2409 {
2410 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2411 
2412 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2413 }
2414 
2415 static int connectable_update(struct hci_request *req, unsigned long opt)
2416 {
2417 	struct hci_dev *hdev = req->hdev;
2418 
2419 	hci_dev_lock(hdev);
2420 
2421 	__hci_req_update_scan(req);
2422 
2423 	/* If BR/EDR is not enabled and we disable advertising as a
2424 	 * by-product of disabling connectable, we need to update the
2425 	 * advertising flags.
2426 	 */
2427 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2428 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2429 
2430 	/* Update the advertising parameters if necessary */
2431 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2432 	    !list_empty(&hdev->adv_instances)) {
2433 		if (ext_adv_capable(hdev))
2434 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2435 		else
2436 			__hci_req_enable_advertising(req);
2437 	}
2438 
2439 	__hci_update_background_scan(req);
2440 
2441 	hci_dev_unlock(hdev);
2442 
2443 	return 0;
2444 }
2445 
2446 static void connectable_update_work(struct work_struct *work)
2447 {
2448 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2449 					    connectable_update);
2450 	u8 status;
2451 
2452 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2453 	mgmt_set_connectable_complete(hdev, status);
2454 }
2455 
2456 static u8 get_service_classes(struct hci_dev *hdev)
2457 {
2458 	struct bt_uuid *uuid;
2459 	u8 val = 0;
2460 
2461 	list_for_each_entry(uuid, &hdev->uuids, list)
2462 		val |= uuid->svc_hint;
2463 
2464 	return val;
2465 }
2466 
2467 void __hci_req_update_class(struct hci_request *req)
2468 {
2469 	struct hci_dev *hdev = req->hdev;
2470 	u8 cod[3];
2471 
2472 	BT_DBG("%s", hdev->name);
2473 
2474 	if (!hdev_is_powered(hdev))
2475 		return;
2476 
2477 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2478 		return;
2479 
2480 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2481 		return;
2482 
2483 	cod[0] = hdev->minor_class;
2484 	cod[1] = hdev->major_class;
2485 	cod[2] = get_service_classes(hdev);
2486 
2487 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2488 		cod[1] |= 0x20;
2489 
2490 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2491 		return;
2492 
2493 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2494 }
2495 
2496 static void write_iac(struct hci_request *req)
2497 {
2498 	struct hci_dev *hdev = req->hdev;
2499 	struct hci_cp_write_current_iac_lap cp;
2500 
2501 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2502 		return;
2503 
2504 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2505 		/* Limited discoverable mode */
2506 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2507 		cp.iac_lap[0] = 0x00;	/* LIAC */
2508 		cp.iac_lap[1] = 0x8b;
2509 		cp.iac_lap[2] = 0x9e;
2510 		cp.iac_lap[3] = 0x33;	/* GIAC */
2511 		cp.iac_lap[4] = 0x8b;
2512 		cp.iac_lap[5] = 0x9e;
2513 	} else {
2514 		/* General discoverable mode */
2515 		cp.num_iac = 1;
2516 		cp.iac_lap[0] = 0x33;	/* GIAC */
2517 		cp.iac_lap[1] = 0x8b;
2518 		cp.iac_lap[2] = 0x9e;
2519 	}
2520 
2521 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2522 		    (cp.num_iac * 3) + 1, &cp);
2523 }
2524 
2525 static int discoverable_update(struct hci_request *req, unsigned long opt)
2526 {
2527 	struct hci_dev *hdev = req->hdev;
2528 
2529 	hci_dev_lock(hdev);
2530 
2531 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2532 		write_iac(req);
2533 		__hci_req_update_scan(req);
2534 		__hci_req_update_class(req);
2535 	}
2536 
2537 	/* Advertising instances don't use the global discoverable setting, so
2538 	 * only update AD if advertising was enabled using Set Advertising.
2539 	 */
2540 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2541 		__hci_req_update_adv_data(req, 0x00);
2542 
2543 		/* Discoverable mode affects the local advertising
2544 		 * address in limited privacy mode.
2545 		 */
2546 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2547 			if (ext_adv_capable(hdev))
2548 				__hci_req_start_ext_adv(req, 0x00);
2549 			else
2550 				__hci_req_enable_advertising(req);
2551 		}
2552 	}
2553 
2554 	hci_dev_unlock(hdev);
2555 
2556 	return 0;
2557 }
2558 
2559 static void discoverable_update_work(struct work_struct *work)
2560 {
2561 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2562 					    discoverable_update);
2563 	u8 status;
2564 
2565 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2566 	mgmt_set_discoverable_complete(hdev, status);
2567 }
2568 
2569 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2570 		      u8 reason)
2571 {
2572 	switch (conn->state) {
2573 	case BT_CONNECTED:
2574 	case BT_CONFIG:
2575 		if (conn->type == AMP_LINK) {
2576 			struct hci_cp_disconn_phy_link cp;
2577 
2578 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2579 			cp.reason = reason;
2580 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2581 				    &cp);
2582 		} else {
2583 			struct hci_cp_disconnect dc;
2584 
2585 			dc.handle = cpu_to_le16(conn->handle);
2586 			dc.reason = reason;
2587 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2588 		}
2589 
2590 		conn->state = BT_DISCONN;
2591 
2592 		break;
2593 	case BT_CONNECT:
2594 		if (conn->type == LE_LINK) {
2595 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2596 				break;
2597 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2598 				    0, NULL);
2599 		} else if (conn->type == ACL_LINK) {
2600 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2601 				break;
2602 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2603 				    6, &conn->dst);
2604 		}
2605 		break;
2606 	case BT_CONNECT2:
2607 		if (conn->type == ACL_LINK) {
2608 			struct hci_cp_reject_conn_req rej;
2609 
2610 			bacpy(&rej.bdaddr, &conn->dst);
2611 			rej.reason = reason;
2612 
2613 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2614 				    sizeof(rej), &rej);
2615 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2616 			struct hci_cp_reject_sync_conn_req rej;
2617 
2618 			bacpy(&rej.bdaddr, &conn->dst);
2619 
2620 			/* SCO rejection has its own limited set of
2621 			 * allowed error values (0x0D-0x0F) which isn't
2622 			 * compatible with most values passed to this
2623 			 * function. To be safe hard-code one of the
2624 			 * values that's suitable for SCO.
2625 			 */
2626 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2627 
2628 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2629 				    sizeof(rej), &rej);
2630 		}
2631 		break;
2632 	default:
2633 		conn->state = BT_CLOSED;
2634 		break;
2635 	}
2636 }
2637 
2638 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2639 {
2640 	if (status)
2641 		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2642 }
2643 
2644 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2645 {
2646 	struct hci_request req;
2647 	int err;
2648 
2649 	hci_req_init(&req, conn->hdev);
2650 
2651 	__hci_abort_conn(&req, conn, reason);
2652 
2653 	err = hci_req_run(&req, abort_conn_complete);
2654 	if (err && err != -ENODATA) {
2655 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2656 		return err;
2657 	}
2658 
2659 	return 0;
2660 }
2661 
2662 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2663 {
2664 	hci_dev_lock(req->hdev);
2665 	__hci_update_background_scan(req);
2666 	hci_dev_unlock(req->hdev);
2667 	return 0;
2668 }
2669 
2670 static void bg_scan_update(struct work_struct *work)
2671 {
2672 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2673 					    bg_scan_update);
2674 	struct hci_conn *conn;
2675 	u8 status;
2676 	int err;
2677 
2678 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2679 	if (!err)
2680 		return;
2681 
2682 	hci_dev_lock(hdev);
2683 
2684 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2685 	if (conn)
2686 		hci_le_conn_failed(conn, status);
2687 
2688 	hci_dev_unlock(hdev);
2689 }
2690 
2691 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2692 {
2693 	hci_req_add_le_scan_disable(req, false);
2694 	return 0;
2695 }
2696 
2697 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2698 {
2699 	u8 length = opt;
2700 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2701 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2702 	struct hci_cp_inquiry cp;
2703 
2704 	BT_DBG("%s", req->hdev->name);
2705 
2706 	hci_dev_lock(req->hdev);
2707 	hci_inquiry_cache_flush(req->hdev);
2708 	hci_dev_unlock(req->hdev);
2709 
2710 	memset(&cp, 0, sizeof(cp));
2711 
2712 	if (req->hdev->discovery.limited)
2713 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2714 	else
2715 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2716 
2717 	cp.length = length;
2718 
2719 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2720 
2721 	return 0;
2722 }
2723 
2724 static void le_scan_disable_work(struct work_struct *work)
2725 {
2726 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2727 					    le_scan_disable.work);
2728 	u8 status;
2729 
2730 	BT_DBG("%s", hdev->name);
2731 
2732 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2733 		return;
2734 
2735 	cancel_delayed_work(&hdev->le_scan_restart);
2736 
2737 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2738 	if (status) {
2739 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2740 			   status);
2741 		return;
2742 	}
2743 
2744 	hdev->discovery.scan_start = 0;
2745 
2746 	/* If we were running LE only scan, change discovery state. If
2747 	 * we were running both LE and BR/EDR inquiry simultaneously,
2748 	 * and BR/EDR inquiry is already finished, stop discovery,
2749 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2750 	 * If we will resolve remote device name, do not change
2751 	 * discovery state.
2752 	 */
2753 
2754 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2755 		goto discov_stopped;
2756 
2757 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2758 		return;
2759 
2760 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2761 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2762 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2763 			goto discov_stopped;
2764 
2765 		return;
2766 	}
2767 
2768 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2769 		     HCI_CMD_TIMEOUT, &status);
2770 	if (status) {
2771 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2772 		goto discov_stopped;
2773 	}
2774 
2775 	return;
2776 
2777 discov_stopped:
2778 	hci_dev_lock(hdev);
2779 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2780 	hci_dev_unlock(hdev);
2781 }
2782 
2783 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2784 {
2785 	struct hci_dev *hdev = req->hdev;
2786 
2787 	/* If controller is not scanning we are done. */
2788 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2789 		return 0;
2790 
2791 	if (hdev->scanning_paused) {
2792 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2793 		return 0;
2794 	}
2795 
2796 	hci_req_add_le_scan_disable(req, false);
2797 
2798 	if (use_ext_scan(hdev)) {
2799 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2800 
2801 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2802 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2803 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2804 
2805 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2806 			    sizeof(ext_enable_cp), &ext_enable_cp);
2807 	} else {
2808 		struct hci_cp_le_set_scan_enable cp;
2809 
2810 		memset(&cp, 0, sizeof(cp));
2811 		cp.enable = LE_SCAN_ENABLE;
2812 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2813 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2814 	}
2815 
2816 	return 0;
2817 }
2818 
2819 static void le_scan_restart_work(struct work_struct *work)
2820 {
2821 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2822 					    le_scan_restart.work);
2823 	unsigned long timeout, duration, scan_start, now;
2824 	u8 status;
2825 
2826 	BT_DBG("%s", hdev->name);
2827 
2828 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2829 	if (status) {
2830 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2831 			   status);
2832 		return;
2833 	}
2834 
2835 	hci_dev_lock(hdev);
2836 
2837 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2838 	    !hdev->discovery.scan_start)
2839 		goto unlock;
2840 
2841 	/* When the scan was started, hdev->le_scan_disable has been queued
2842 	 * after duration from scan_start. During scan restart this job
2843 	 * has been canceled, and we need to queue it again after proper
2844 	 * timeout, to make sure that scan does not run indefinitely.
2845 	 */
2846 	duration = hdev->discovery.scan_duration;
2847 	scan_start = hdev->discovery.scan_start;
2848 	now = jiffies;
2849 	if (now - scan_start <= duration) {
2850 		int elapsed;
2851 
2852 		if (now >= scan_start)
2853 			elapsed = now - scan_start;
2854 		else
2855 			elapsed = ULONG_MAX - scan_start + now;
2856 
2857 		timeout = duration - elapsed;
2858 	} else {
2859 		timeout = 0;
2860 	}
2861 
2862 	queue_delayed_work(hdev->req_workqueue,
2863 			   &hdev->le_scan_disable, timeout);
2864 
2865 unlock:
2866 	hci_dev_unlock(hdev);
2867 }
2868 
2869 static int active_scan(struct hci_request *req, unsigned long opt)
2870 {
2871 	uint16_t interval = opt;
2872 	struct hci_dev *hdev = req->hdev;
2873 	u8 own_addr_type;
2874 	/* White list is not used for discovery */
2875 	u8 filter_policy = 0x00;
2876 	/* Discovery doesn't require controller address resolution */
2877 	bool addr_resolv = false;
2878 	int err;
2879 
2880 	BT_DBG("%s", hdev->name);
2881 
2882 	/* If controller is scanning, it means the background scanning is
2883 	 * running. Thus, we should temporarily stop it in order to set the
2884 	 * discovery scanning parameters.
2885 	 */
2886 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2887 		hci_req_add_le_scan_disable(req, false);
2888 
2889 	/* All active scans will be done with either a resolvable private
2890 	 * address (when privacy feature has been enabled) or non-resolvable
2891 	 * private address.
2892 	 */
2893 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2894 					&own_addr_type);
2895 	if (err < 0)
2896 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2897 
2898 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2899 			   hdev->le_scan_window_discovery, own_addr_type,
2900 			   filter_policy, addr_resolv);
2901 	return 0;
2902 }
2903 
2904 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2905 {
2906 	int err;
2907 
2908 	BT_DBG("%s", req->hdev->name);
2909 
2910 	err = active_scan(req, opt);
2911 	if (err)
2912 		return err;
2913 
2914 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2915 }
2916 
2917 static void start_discovery(struct hci_dev *hdev, u8 *status)
2918 {
2919 	unsigned long timeout;
2920 
2921 	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2922 
2923 	switch (hdev->discovery.type) {
2924 	case DISCOV_TYPE_BREDR:
2925 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2926 			hci_req_sync(hdev, bredr_inquiry,
2927 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2928 				     status);
2929 		return;
2930 	case DISCOV_TYPE_INTERLEAVED:
2931 		/* When running simultaneous discovery, the LE scanning time
2932 		 * should occupy the whole discovery time sine BR/EDR inquiry
2933 		 * and LE scanning are scheduled by the controller.
2934 		 *
2935 		 * For interleaving discovery in comparison, BR/EDR inquiry
2936 		 * and LE scanning are done sequentially with separate
2937 		 * timeouts.
2938 		 */
2939 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2940 			     &hdev->quirks)) {
2941 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2942 			/* During simultaneous discovery, we double LE scan
2943 			 * interval. We must leave some time for the controller
2944 			 * to do BR/EDR inquiry.
2945 			 */
2946 			hci_req_sync(hdev, interleaved_discov,
2947 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2948 				     status);
2949 			break;
2950 		}
2951 
2952 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2953 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2954 			     HCI_CMD_TIMEOUT, status);
2955 		break;
2956 	case DISCOV_TYPE_LE:
2957 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2958 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2959 			     HCI_CMD_TIMEOUT, status);
2960 		break;
2961 	default:
2962 		*status = HCI_ERROR_UNSPECIFIED;
2963 		return;
2964 	}
2965 
2966 	if (*status)
2967 		return;
2968 
2969 	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2970 
2971 	/* When service discovery is used and the controller has a
2972 	 * strict duplicate filter, it is important to remember the
2973 	 * start and duration of the scan. This is required for
2974 	 * restarting scanning during the discovery phase.
2975 	 */
2976 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2977 		     hdev->discovery.result_filtering) {
2978 		hdev->discovery.scan_start = jiffies;
2979 		hdev->discovery.scan_duration = timeout;
2980 	}
2981 
2982 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2983 			   timeout);
2984 }
2985 
2986 bool hci_req_stop_discovery(struct hci_request *req)
2987 {
2988 	struct hci_dev *hdev = req->hdev;
2989 	struct discovery_state *d = &hdev->discovery;
2990 	struct hci_cp_remote_name_req_cancel cp;
2991 	struct inquiry_entry *e;
2992 	bool ret = false;
2993 
2994 	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2995 
2996 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2997 		if (test_bit(HCI_INQUIRY, &hdev->flags))
2998 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2999 
3000 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3001 			cancel_delayed_work(&hdev->le_scan_disable);
3002 			hci_req_add_le_scan_disable(req, false);
3003 		}
3004 
3005 		ret = true;
3006 	} else {
3007 		/* Passive scanning */
3008 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3009 			hci_req_add_le_scan_disable(req, false);
3010 			ret = true;
3011 		}
3012 	}
3013 
3014 	/* No further actions needed for LE-only discovery */
3015 	if (d->type == DISCOV_TYPE_LE)
3016 		return ret;
3017 
3018 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3019 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3020 						     NAME_PENDING);
3021 		if (!e)
3022 			return ret;
3023 
3024 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3025 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3026 			    &cp);
3027 		ret = true;
3028 	}
3029 
3030 	return ret;
3031 }
3032 
3033 static int stop_discovery(struct hci_request *req, unsigned long opt)
3034 {
3035 	hci_dev_lock(req->hdev);
3036 	hci_req_stop_discovery(req);
3037 	hci_dev_unlock(req->hdev);
3038 
3039 	return 0;
3040 }
3041 
3042 static void discov_update(struct work_struct *work)
3043 {
3044 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3045 					    discov_update);
3046 	u8 status = 0;
3047 
3048 	switch (hdev->discovery.state) {
3049 	case DISCOVERY_STARTING:
3050 		start_discovery(hdev, &status);
3051 		mgmt_start_discovery_complete(hdev, status);
3052 		if (status)
3053 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3054 		else
3055 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3056 		break;
3057 	case DISCOVERY_STOPPING:
3058 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3059 		mgmt_stop_discovery_complete(hdev, status);
3060 		if (!status)
3061 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3062 		break;
3063 	case DISCOVERY_STOPPED:
3064 	default:
3065 		return;
3066 	}
3067 }
3068 
3069 static void discov_off(struct work_struct *work)
3070 {
3071 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3072 					    discov_off.work);
3073 
3074 	BT_DBG("%s", hdev->name);
3075 
3076 	hci_dev_lock(hdev);
3077 
3078 	/* When discoverable timeout triggers, then just make sure
3079 	 * the limited discoverable flag is cleared. Even in the case
3080 	 * of a timeout triggered from general discoverable, it is
3081 	 * safe to unconditionally clear the flag.
3082 	 */
3083 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3084 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3085 	hdev->discov_timeout = 0;
3086 
3087 	hci_dev_unlock(hdev);
3088 
3089 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3090 	mgmt_new_settings(hdev);
3091 }
3092 
3093 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3094 {
3095 	struct hci_dev *hdev = req->hdev;
3096 	u8 link_sec;
3097 
3098 	hci_dev_lock(hdev);
3099 
3100 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3101 	    !lmp_host_ssp_capable(hdev)) {
3102 		u8 mode = 0x01;
3103 
3104 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3105 
3106 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3107 			u8 support = 0x01;
3108 
3109 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3110 				    sizeof(support), &support);
3111 		}
3112 	}
3113 
3114 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3115 	    lmp_bredr_capable(hdev)) {
3116 		struct hci_cp_write_le_host_supported cp;
3117 
3118 		cp.le = 0x01;
3119 		cp.simul = 0x00;
3120 
3121 		/* Check first if we already have the right
3122 		 * host state (host features set)
3123 		 */
3124 		if (cp.le != lmp_host_le_capable(hdev) ||
3125 		    cp.simul != lmp_host_le_br_capable(hdev))
3126 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3127 				    sizeof(cp), &cp);
3128 	}
3129 
3130 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3131 		/* Make sure the controller has a good default for
3132 		 * advertising data. This also applies to the case
3133 		 * where BR/EDR was toggled during the AUTO_OFF phase.
3134 		 */
3135 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3136 		    list_empty(&hdev->adv_instances)) {
3137 			int err;
3138 
3139 			if (ext_adv_capable(hdev)) {
3140 				err = __hci_req_setup_ext_adv_instance(req,
3141 								       0x00);
3142 				if (!err)
3143 					__hci_req_update_scan_rsp_data(req,
3144 								       0x00);
3145 			} else {
3146 				err = 0;
3147 				__hci_req_update_adv_data(req, 0x00);
3148 				__hci_req_update_scan_rsp_data(req, 0x00);
3149 			}
3150 
3151 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3152 				if (!ext_adv_capable(hdev))
3153 					__hci_req_enable_advertising(req);
3154 				else if (!err)
3155 					__hci_req_enable_ext_advertising(req,
3156 									 0x00);
3157 			}
3158 		} else if (!list_empty(&hdev->adv_instances)) {
3159 			struct adv_info *adv_instance;
3160 
3161 			adv_instance = list_first_entry(&hdev->adv_instances,
3162 							struct adv_info, list);
3163 			__hci_req_schedule_adv_instance(req,
3164 							adv_instance->instance,
3165 							true);
3166 		}
3167 	}
3168 
3169 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3170 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3171 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3172 			    sizeof(link_sec), &link_sec);
3173 
3174 	if (lmp_bredr_capable(hdev)) {
3175 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3176 			__hci_req_write_fast_connectable(req, true);
3177 		else
3178 			__hci_req_write_fast_connectable(req, false);
3179 		__hci_req_update_scan(req);
3180 		__hci_req_update_class(req);
3181 		__hci_req_update_name(req);
3182 		__hci_req_update_eir(req);
3183 	}
3184 
3185 	hci_dev_unlock(hdev);
3186 	return 0;
3187 }
3188 
3189 int __hci_req_hci_power_on(struct hci_dev *hdev)
3190 {
3191 	/* Register the available SMP channels (BR/EDR and LE) only when
3192 	 * successfully powering on the controller. This late
3193 	 * registration is required so that LE SMP can clearly decide if
3194 	 * the public address or static address is used.
3195 	 */
3196 	smp_register(hdev);
3197 
3198 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3199 			      NULL);
3200 }
3201 
3202 void hci_request_setup(struct hci_dev *hdev)
3203 {
3204 	INIT_WORK(&hdev->discov_update, discov_update);
3205 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3206 	INIT_WORK(&hdev->scan_update, scan_update_work);
3207 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3208 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3209 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3210 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3211 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3212 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3213 }
3214 
3215 void hci_request_cancel_all(struct hci_dev *hdev)
3216 {
3217 	hci_req_sync_cancel(hdev, ENODEV);
3218 
3219 	cancel_work_sync(&hdev->discov_update);
3220 	cancel_work_sync(&hdev->bg_scan_update);
3221 	cancel_work_sync(&hdev->scan_update);
3222 	cancel_work_sync(&hdev->connectable_update);
3223 	cancel_work_sync(&hdev->discoverable_update);
3224 	cancel_delayed_work_sync(&hdev->discov_off);
3225 	cancel_delayed_work_sync(&hdev->le_scan_disable);
3226 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3227 
3228 	if (hdev->adv_instance_timeout) {
3229 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3230 		hdev->adv_instance_timeout = 0;
3231 	}
3232 }
3233