xref: /openbmc/linux/net/bluetooth/hci_request.c (revision 4bb1eb3c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2014 Intel Corporation
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9 
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23 
24 #include <linux/sched/signal.h>
25 
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29 
30 #include "smp.h"
31 #include "hci_request.h"
32 
33 #define HCI_REQ_DONE	  0
34 #define HCI_REQ_PEND	  1
35 #define HCI_REQ_CANCELED  2
36 
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 	skb_queue_head_init(&req->cmd_q);
40 	req->hdev = hdev;
41 	req->err = 0;
42 }
43 
44 void hci_req_purge(struct hci_request *req)
45 {
46 	skb_queue_purge(&req->cmd_q);
47 }
48 
49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 	return hdev->req_status == HCI_REQ_PEND;
52 }
53 
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 		   hci_req_complete_skb_t complete_skb)
56 {
57 	struct hci_dev *hdev = req->hdev;
58 	struct sk_buff *skb;
59 	unsigned long flags;
60 
61 	BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62 
63 	/* If an error occurred during request building, remove all HCI
64 	 * commands queued on the HCI request queue.
65 	 */
66 	if (req->err) {
67 		skb_queue_purge(&req->cmd_q);
68 		return req->err;
69 	}
70 
71 	/* Do not allow empty requests */
72 	if (skb_queue_empty(&req->cmd_q))
73 		return -ENODATA;
74 
75 	skb = skb_peek_tail(&req->cmd_q);
76 	if (complete) {
77 		bt_cb(skb)->hci.req_complete = complete;
78 	} else if (complete_skb) {
79 		bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 		bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 	}
82 
83 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86 
87 	queue_work(hdev->workqueue, &hdev->cmd_work);
88 
89 	return 0;
90 }
91 
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 	return req_run(req, complete, NULL);
95 }
96 
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 	return req_run(req, NULL, complete);
100 }
101 
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 				  struct sk_buff *skb)
104 {
105 	BT_DBG("%s result 0x%2.2x", hdev->name, result);
106 
107 	if (hdev->req_status == HCI_REQ_PEND) {
108 		hdev->req_result = result;
109 		hdev->req_status = HCI_REQ_DONE;
110 		if (skb)
111 			hdev->req_skb = skb_get(skb);
112 		wake_up_interruptible(&hdev->req_wait_q);
113 	}
114 }
115 
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 	BT_DBG("%s err 0x%2.2x", hdev->name, err);
119 
120 	if (hdev->req_status == HCI_REQ_PEND) {
121 		hdev->req_result = err;
122 		hdev->req_status = HCI_REQ_CANCELED;
123 		wake_up_interruptible(&hdev->req_wait_q);
124 	}
125 }
126 
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 				  const void *param, u8 event, u32 timeout)
129 {
130 	struct hci_request req;
131 	struct sk_buff *skb;
132 	int err = 0;
133 
134 	BT_DBG("%s", hdev->name);
135 
136 	hci_req_init(&req, hdev);
137 
138 	hci_req_add_ev(&req, opcode, plen, param, event);
139 
140 	hdev->req_status = HCI_REQ_PEND;
141 
142 	err = hci_req_run_skb(&req, hci_req_sync_complete);
143 	if (err < 0)
144 		return ERR_PTR(err);
145 
146 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 			hdev->req_status != HCI_REQ_PEND, timeout);
148 
149 	if (err == -ERESTARTSYS)
150 		return ERR_PTR(-EINTR);
151 
152 	switch (hdev->req_status) {
153 	case HCI_REQ_DONE:
154 		err = -bt_to_errno(hdev->req_result);
155 		break;
156 
157 	case HCI_REQ_CANCELED:
158 		err = -hdev->req_result;
159 		break;
160 
161 	default:
162 		err = -ETIMEDOUT;
163 		break;
164 	}
165 
166 	hdev->req_status = hdev->req_result = 0;
167 	skb = hdev->req_skb;
168 	hdev->req_skb = NULL;
169 
170 	BT_DBG("%s end: err %d", hdev->name, err);
171 
172 	if (err < 0) {
173 		kfree_skb(skb);
174 		return ERR_PTR(err);
175 	}
176 
177 	if (!skb)
178 		return ERR_PTR(-ENODATA);
179 
180 	return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183 
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 			       const void *param, u32 timeout)
186 {
187 	return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190 
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 						     unsigned long opt),
194 		   unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 	struct hci_request req;
197 	int err = 0;
198 
199 	BT_DBG("%s start", hdev->name);
200 
201 	hci_req_init(&req, hdev);
202 
203 	hdev->req_status = HCI_REQ_PEND;
204 
205 	err = func(&req, opt);
206 	if (err) {
207 		if (hci_status)
208 			*hci_status = HCI_ERROR_UNSPECIFIED;
209 		return err;
210 	}
211 
212 	err = hci_req_run_skb(&req, hci_req_sync_complete);
213 	if (err < 0) {
214 		hdev->req_status = 0;
215 
216 		/* ENODATA means the HCI request command queue is empty.
217 		 * This can happen when a request with conditionals doesn't
218 		 * trigger any commands to be sent. This is normal behavior
219 		 * and should not trigger an error return.
220 		 */
221 		if (err == -ENODATA) {
222 			if (hci_status)
223 				*hci_status = 0;
224 			return 0;
225 		}
226 
227 		if (hci_status)
228 			*hci_status = HCI_ERROR_UNSPECIFIED;
229 
230 		return err;
231 	}
232 
233 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 			hdev->req_status != HCI_REQ_PEND, timeout);
235 
236 	if (err == -ERESTARTSYS)
237 		return -EINTR;
238 
239 	switch (hdev->req_status) {
240 	case HCI_REQ_DONE:
241 		err = -bt_to_errno(hdev->req_result);
242 		if (hci_status)
243 			*hci_status = hdev->req_result;
244 		break;
245 
246 	case HCI_REQ_CANCELED:
247 		err = -hdev->req_result;
248 		if (hci_status)
249 			*hci_status = HCI_ERROR_UNSPECIFIED;
250 		break;
251 
252 	default:
253 		err = -ETIMEDOUT;
254 		if (hci_status)
255 			*hci_status = HCI_ERROR_UNSPECIFIED;
256 		break;
257 	}
258 
259 	kfree_skb(hdev->req_skb);
260 	hdev->req_skb = NULL;
261 	hdev->req_status = hdev->req_result = 0;
262 
263 	BT_DBG("%s end: err %d", hdev->name, err);
264 
265 	return err;
266 }
267 
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 						  unsigned long opt),
270 		 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 	int ret;
273 
274 	if (!test_bit(HCI_UP, &hdev->flags))
275 		return -ENETDOWN;
276 
277 	/* Serialize all requests */
278 	hci_req_sync_lock(hdev);
279 	ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 	hci_req_sync_unlock(hdev);
281 
282 	return ret;
283 }
284 
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 				const void *param)
287 {
288 	int len = HCI_COMMAND_HDR_SIZE + plen;
289 	struct hci_command_hdr *hdr;
290 	struct sk_buff *skb;
291 
292 	skb = bt_skb_alloc(len, GFP_ATOMIC);
293 	if (!skb)
294 		return NULL;
295 
296 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 	hdr->opcode = cpu_to_le16(opcode);
298 	hdr->plen   = plen;
299 
300 	if (plen)
301 		skb_put_data(skb, param, plen);
302 
303 	BT_DBG("skb len %d", skb->len);
304 
305 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 	hci_skb_opcode(skb) = opcode;
307 
308 	return skb;
309 }
310 
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 		    const void *param, u8 event)
314 {
315 	struct hci_dev *hdev = req->hdev;
316 	struct sk_buff *skb;
317 
318 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319 
320 	/* If an error occurred during request building, there is no point in
321 	 * queueing the HCI command. We can simply return.
322 	 */
323 	if (req->err)
324 		return;
325 
326 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 	if (!skb) {
328 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 			   opcode);
330 		req->err = -ENOMEM;
331 		return;
332 	}
333 
334 	if (skb_queue_empty(&req->cmd_q))
335 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336 
337 	bt_cb(skb)->hci.req_event = event;
338 
339 	skb_queue_tail(&req->cmd_q, skb);
340 }
341 
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 		 const void *param)
344 {
345 	hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347 
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350 	struct hci_dev *hdev = req->hdev;
351 	struct hci_cp_write_page_scan_activity acp;
352 	u8 type;
353 
354 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 		return;
356 
357 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 		return;
359 
360 	if (enable) {
361 		type = PAGE_SCAN_TYPE_INTERLACED;
362 
363 		/* 160 msec page scan interval */
364 		acp.interval = cpu_to_le16(0x0100);
365 	} else {
366 		type = hdev->def_page_scan_type;
367 		acp.interval = cpu_to_le16(hdev->def_page_scan_int);
368 	}
369 
370 	acp.window = cpu_to_le16(hdev->def_page_scan_window);
371 
372 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 			    sizeof(acp), &acp);
376 
377 	if (hdev->page_scan_type != type)
378 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379 }
380 
381 /* This function controls the background scanning based on hdev->pend_le_conns
382  * list. If there are pending LE connection we start the background scanning,
383  * otherwise we stop it.
384  *
385  * This function requires the caller holds hdev->lock.
386  */
387 static void __hci_update_background_scan(struct hci_request *req)
388 {
389 	struct hci_dev *hdev = req->hdev;
390 
391 	if (!test_bit(HCI_UP, &hdev->flags) ||
392 	    test_bit(HCI_INIT, &hdev->flags) ||
393 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
394 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 		return;
398 
399 	/* No point in doing scanning if LE support hasn't been enabled */
400 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 		return;
402 
403 	/* If discovery is active don't interfere with it */
404 	if (hdev->discovery.state != DISCOVERY_STOPPED)
405 		return;
406 
407 	/* Reset RSSI and UUID filters when starting background scanning
408 	 * since these filters are meant for service discovery only.
409 	 *
410 	 * The Start Discovery and Start Service Discovery operations
411 	 * ensure to set proper values for RSSI threshold and UUID
412 	 * filter list. So it is safe to just reset them here.
413 	 */
414 	hci_discovery_filter_clear(hdev);
415 
416 	BT_DBG("%s ADV monitoring is %s", hdev->name,
417 	       hci_is_adv_monitoring(hdev) ? "on" : "off");
418 
419 	if (list_empty(&hdev->pend_le_conns) &&
420 	    list_empty(&hdev->pend_le_reports) &&
421 	    !hci_is_adv_monitoring(hdev)) {
422 		/* If there is no pending LE connections or devices
423 		 * to be scanned for or no ADV monitors, we should stop the
424 		 * background scanning.
425 		 */
426 
427 		/* If controller is not scanning we are done. */
428 		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 			return;
430 
431 		hci_req_add_le_scan_disable(req, false);
432 
433 		BT_DBG("%s stopping background scanning", hdev->name);
434 	} else {
435 		/* If there is at least one pending LE connection, we should
436 		 * keep the background scan running.
437 		 */
438 
439 		/* If controller is connecting, we should not start scanning
440 		 * since some controllers are not able to scan and connect at
441 		 * the same time.
442 		 */
443 		if (hci_lookup_le_connect(hdev))
444 			return;
445 
446 		/* If controller is currently scanning, we stop it to ensure we
447 		 * don't miss any advertising (due to duplicates filter).
448 		 */
449 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
450 			hci_req_add_le_scan_disable(req, false);
451 
452 		hci_req_add_le_passive_scan(req);
453 
454 		BT_DBG("%s starting background scanning", hdev->name);
455 	}
456 }
457 
458 void __hci_req_update_name(struct hci_request *req)
459 {
460 	struct hci_dev *hdev = req->hdev;
461 	struct hci_cp_write_local_name cp;
462 
463 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464 
465 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466 }
467 
468 #define PNP_INFO_SVCLASS_ID		0x1200
469 
470 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471 {
472 	u8 *ptr = data, *uuids_start = NULL;
473 	struct bt_uuid *uuid;
474 
475 	if (len < 4)
476 		return ptr;
477 
478 	list_for_each_entry(uuid, &hdev->uuids, list) {
479 		u16 uuid16;
480 
481 		if (uuid->size != 16)
482 			continue;
483 
484 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 		if (uuid16 < 0x1100)
486 			continue;
487 
488 		if (uuid16 == PNP_INFO_SVCLASS_ID)
489 			continue;
490 
491 		if (!uuids_start) {
492 			uuids_start = ptr;
493 			uuids_start[0] = 1;
494 			uuids_start[1] = EIR_UUID16_ALL;
495 			ptr += 2;
496 		}
497 
498 		/* Stop if not enough space to put next UUID */
499 		if ((ptr - data) + sizeof(u16) > len) {
500 			uuids_start[1] = EIR_UUID16_SOME;
501 			break;
502 		}
503 
504 		*ptr++ = (uuid16 & 0x00ff);
505 		*ptr++ = (uuid16 & 0xff00) >> 8;
506 		uuids_start[0] += sizeof(uuid16);
507 	}
508 
509 	return ptr;
510 }
511 
512 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513 {
514 	u8 *ptr = data, *uuids_start = NULL;
515 	struct bt_uuid *uuid;
516 
517 	if (len < 6)
518 		return ptr;
519 
520 	list_for_each_entry(uuid, &hdev->uuids, list) {
521 		if (uuid->size != 32)
522 			continue;
523 
524 		if (!uuids_start) {
525 			uuids_start = ptr;
526 			uuids_start[0] = 1;
527 			uuids_start[1] = EIR_UUID32_ALL;
528 			ptr += 2;
529 		}
530 
531 		/* Stop if not enough space to put next UUID */
532 		if ((ptr - data) + sizeof(u32) > len) {
533 			uuids_start[1] = EIR_UUID32_SOME;
534 			break;
535 		}
536 
537 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 		ptr += sizeof(u32);
539 		uuids_start[0] += sizeof(u32);
540 	}
541 
542 	return ptr;
543 }
544 
545 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546 {
547 	u8 *ptr = data, *uuids_start = NULL;
548 	struct bt_uuid *uuid;
549 
550 	if (len < 18)
551 		return ptr;
552 
553 	list_for_each_entry(uuid, &hdev->uuids, list) {
554 		if (uuid->size != 128)
555 			continue;
556 
557 		if (!uuids_start) {
558 			uuids_start = ptr;
559 			uuids_start[0] = 1;
560 			uuids_start[1] = EIR_UUID128_ALL;
561 			ptr += 2;
562 		}
563 
564 		/* Stop if not enough space to put next UUID */
565 		if ((ptr - data) + 16 > len) {
566 			uuids_start[1] = EIR_UUID128_SOME;
567 			break;
568 		}
569 
570 		memcpy(ptr, uuid->uuid, 16);
571 		ptr += 16;
572 		uuids_start[0] += 16;
573 	}
574 
575 	return ptr;
576 }
577 
578 static void create_eir(struct hci_dev *hdev, u8 *data)
579 {
580 	u8 *ptr = data;
581 	size_t name_len;
582 
583 	name_len = strlen(hdev->dev_name);
584 
585 	if (name_len > 0) {
586 		/* EIR Data type */
587 		if (name_len > 48) {
588 			name_len = 48;
589 			ptr[1] = EIR_NAME_SHORT;
590 		} else
591 			ptr[1] = EIR_NAME_COMPLETE;
592 
593 		/* EIR Data length */
594 		ptr[0] = name_len + 1;
595 
596 		memcpy(ptr + 2, hdev->dev_name, name_len);
597 
598 		ptr += (name_len + 2);
599 	}
600 
601 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 		ptr[0] = 2;
603 		ptr[1] = EIR_TX_POWER;
604 		ptr[2] = (u8) hdev->inq_tx_power;
605 
606 		ptr += 3;
607 	}
608 
609 	if (hdev->devid_source > 0) {
610 		ptr[0] = 9;
611 		ptr[1] = EIR_DEVICE_ID;
612 
613 		put_unaligned_le16(hdev->devid_source, ptr + 2);
614 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 		put_unaligned_le16(hdev->devid_product, ptr + 6);
616 		put_unaligned_le16(hdev->devid_version, ptr + 8);
617 
618 		ptr += 10;
619 	}
620 
621 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 }
625 
626 void __hci_req_update_eir(struct hci_request *req)
627 {
628 	struct hci_dev *hdev = req->hdev;
629 	struct hci_cp_write_eir cp;
630 
631 	if (!hdev_is_powered(hdev))
632 		return;
633 
634 	if (!lmp_ext_inq_capable(hdev))
635 		return;
636 
637 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 		return;
639 
640 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 		return;
642 
643 	memset(&cp, 0, sizeof(cp));
644 
645 	create_eir(hdev, cp.data);
646 
647 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 		return;
649 
650 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
651 
652 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653 }
654 
655 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
656 {
657 	struct hci_dev *hdev = req->hdev;
658 
659 	if (hdev->scanning_paused) {
660 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 		return;
662 	}
663 
664 	if (use_ext_scan(hdev)) {
665 		struct hci_cp_le_set_ext_scan_enable cp;
666 
667 		memset(&cp, 0, sizeof(cp));
668 		cp.enable = LE_SCAN_DISABLE;
669 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 			    &cp);
671 	} else {
672 		struct hci_cp_le_set_scan_enable cp;
673 
674 		memset(&cp, 0, sizeof(cp));
675 		cp.enable = LE_SCAN_DISABLE;
676 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 	}
678 
679 	/* Disable address resolution */
680 	if (use_ll_privacy(hdev) &&
681 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
682 	    hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
683 		__u8 enable = 0x00;
684 
685 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
686 	}
687 }
688 
689 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
690 				u8 bdaddr_type)
691 {
692 	struct hci_cp_le_del_from_white_list cp;
693 
694 	cp.bdaddr_type = bdaddr_type;
695 	bacpy(&cp.bdaddr, bdaddr);
696 
697 	bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
698 		   cp.bdaddr_type);
699 	hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
700 
701 	if (use_ll_privacy(req->hdev)) {
702 		struct smp_irk *irk;
703 
704 		irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
705 		if (irk) {
706 			struct hci_cp_le_del_from_resolv_list cp;
707 
708 			cp.bdaddr_type = bdaddr_type;
709 			bacpy(&cp.bdaddr, bdaddr);
710 
711 			hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
712 				    sizeof(cp), &cp);
713 		}
714 	}
715 }
716 
717 /* Adds connection to white list if needed. On error, returns -1. */
718 static int add_to_white_list(struct hci_request *req,
719 			     struct hci_conn_params *params, u8 *num_entries,
720 			     bool allow_rpa)
721 {
722 	struct hci_cp_le_add_to_white_list cp;
723 	struct hci_dev *hdev = req->hdev;
724 
725 	/* Already in white list */
726 	if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
727 				   params->addr_type))
728 		return 0;
729 
730 	/* Select filter policy to accept all advertising */
731 	if (*num_entries >= hdev->le_white_list_size)
732 		return -1;
733 
734 	/* White list can not be used with RPAs */
735 	if (!allow_rpa && !use_ll_privacy(hdev) &&
736 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
737 		return -1;
738 	}
739 
740 	/* During suspend, only wakeable devices can be in whitelist */
741 	if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
742 						   params->current_flags))
743 		return 0;
744 
745 	*num_entries += 1;
746 	cp.bdaddr_type = params->addr_type;
747 	bacpy(&cp.bdaddr, &params->addr);
748 
749 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
750 		   cp.bdaddr_type);
751 	hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
752 
753 	if (use_ll_privacy(hdev)) {
754 		struct smp_irk *irk;
755 
756 		irk = hci_find_irk_by_addr(hdev, &params->addr,
757 					   params->addr_type);
758 		if (irk) {
759 			struct hci_cp_le_add_to_resolv_list cp;
760 
761 			cp.bdaddr_type = params->addr_type;
762 			bacpy(&cp.bdaddr, &params->addr);
763 			memcpy(cp.peer_irk, irk->val, 16);
764 
765 			if (hci_dev_test_flag(hdev, HCI_PRIVACY))
766 				memcpy(cp.local_irk, hdev->irk, 16);
767 			else
768 				memset(cp.local_irk, 0, 16);
769 
770 			hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
771 				    sizeof(cp), &cp);
772 		}
773 	}
774 
775 	return 0;
776 }
777 
778 static u8 update_white_list(struct hci_request *req)
779 {
780 	struct hci_dev *hdev = req->hdev;
781 	struct hci_conn_params *params;
782 	struct bdaddr_list *b;
783 	u8 num_entries = 0;
784 	bool pend_conn, pend_report;
785 	/* We allow whitelisting even with RPAs in suspend. In the worst case,
786 	 * we won't be able to wake from devices that use the privacy1.2
787 	 * features. Additionally, once we support privacy1.2 and IRK
788 	 * offloading, we can update this to also check for those conditions.
789 	 */
790 	bool allow_rpa = hdev->suspended;
791 
792 	/* Go through the current white list programmed into the
793 	 * controller one by one and check if that address is still
794 	 * in the list of pending connections or list of devices to
795 	 * report. If not present in either list, then queue the
796 	 * command to remove it from the controller.
797 	 */
798 	list_for_each_entry(b, &hdev->le_white_list, list) {
799 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
800 						      &b->bdaddr,
801 						      b->bdaddr_type);
802 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
803 							&b->bdaddr,
804 							b->bdaddr_type);
805 
806 		/* If the device is not likely to connect or report,
807 		 * remove it from the whitelist.
808 		 */
809 		if (!pend_conn && !pend_report) {
810 			del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
811 			continue;
812 		}
813 
814 		/* White list can not be used with RPAs */
815 		if (!allow_rpa && !use_ll_privacy(hdev) &&
816 		    hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
817 			return 0x00;
818 		}
819 
820 		num_entries++;
821 	}
822 
823 	/* Since all no longer valid white list entries have been
824 	 * removed, walk through the list of pending connections
825 	 * and ensure that any new device gets programmed into
826 	 * the controller.
827 	 *
828 	 * If the list of the devices is larger than the list of
829 	 * available white list entries in the controller, then
830 	 * just abort and return filer policy value to not use the
831 	 * white list.
832 	 */
833 	list_for_each_entry(params, &hdev->pend_le_conns, action) {
834 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
835 			return 0x00;
836 	}
837 
838 	/* After adding all new pending connections, walk through
839 	 * the list of pending reports and also add these to the
840 	 * white list if there is still space. Abort if space runs out.
841 	 */
842 	list_for_each_entry(params, &hdev->pend_le_reports, action) {
843 		if (add_to_white_list(req, params, &num_entries, allow_rpa))
844 			return 0x00;
845 	}
846 
847 	/* Once the controller offloading of advertisement monitor is in place,
848 	 * the if condition should include the support of MSFT extension
849 	 * support. If suspend is ongoing, whitelist should be the default to
850 	 * prevent waking by random advertisements.
851 	 */
852 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
853 		return 0x00;
854 
855 	/* Select filter policy to use white list */
856 	return 0x01;
857 }
858 
859 static bool scan_use_rpa(struct hci_dev *hdev)
860 {
861 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
862 }
863 
864 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
865 			       u16 window, u8 own_addr_type, u8 filter_policy,
866 			       bool addr_resolv)
867 {
868 	struct hci_dev *hdev = req->hdev;
869 
870 	if (hdev->scanning_paused) {
871 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
872 		return;
873 	}
874 
875 	if (use_ll_privacy(hdev) &&
876 	    hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
877 	    addr_resolv) {
878 		u8 enable = 0x01;
879 
880 		hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
881 	}
882 
883 	/* Use ext scanning if set ext scan param and ext scan enable is
884 	 * supported
885 	 */
886 	if (use_ext_scan(hdev)) {
887 		struct hci_cp_le_set_ext_scan_params *ext_param_cp;
888 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
889 		struct hci_cp_le_scan_phy_params *phy_params;
890 		u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
891 		u32 plen;
892 
893 		ext_param_cp = (void *)data;
894 		phy_params = (void *)ext_param_cp->data;
895 
896 		memset(ext_param_cp, 0, sizeof(*ext_param_cp));
897 		ext_param_cp->own_addr_type = own_addr_type;
898 		ext_param_cp->filter_policy = filter_policy;
899 
900 		plen = sizeof(*ext_param_cp);
901 
902 		if (scan_1m(hdev) || scan_2m(hdev)) {
903 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
904 
905 			memset(phy_params, 0, sizeof(*phy_params));
906 			phy_params->type = type;
907 			phy_params->interval = cpu_to_le16(interval);
908 			phy_params->window = cpu_to_le16(window);
909 
910 			plen += sizeof(*phy_params);
911 			phy_params++;
912 		}
913 
914 		if (scan_coded(hdev)) {
915 			ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
916 
917 			memset(phy_params, 0, sizeof(*phy_params));
918 			phy_params->type = type;
919 			phy_params->interval = cpu_to_le16(interval);
920 			phy_params->window = cpu_to_le16(window);
921 
922 			plen += sizeof(*phy_params);
923 			phy_params++;
924 		}
925 
926 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
927 			    plen, ext_param_cp);
928 
929 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
930 		ext_enable_cp.enable = LE_SCAN_ENABLE;
931 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
932 
933 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
934 			    sizeof(ext_enable_cp), &ext_enable_cp);
935 	} else {
936 		struct hci_cp_le_set_scan_param param_cp;
937 		struct hci_cp_le_set_scan_enable enable_cp;
938 
939 		memset(&param_cp, 0, sizeof(param_cp));
940 		param_cp.type = type;
941 		param_cp.interval = cpu_to_le16(interval);
942 		param_cp.window = cpu_to_le16(window);
943 		param_cp.own_address_type = own_addr_type;
944 		param_cp.filter_policy = filter_policy;
945 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
946 			    &param_cp);
947 
948 		memset(&enable_cp, 0, sizeof(enable_cp));
949 		enable_cp.enable = LE_SCAN_ENABLE;
950 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
951 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
952 			    &enable_cp);
953 	}
954 }
955 
956 /* Returns true if an le connection is in the scanning state */
957 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
958 {
959 	struct hci_conn_hash *h = &hdev->conn_hash;
960 	struct hci_conn  *c;
961 
962 	rcu_read_lock();
963 
964 	list_for_each_entry_rcu(c, &h->list, list) {
965 		if (c->type == LE_LINK && c->state == BT_CONNECT &&
966 		    test_bit(HCI_CONN_SCANNING, &c->flags)) {
967 			rcu_read_unlock();
968 			return true;
969 		}
970 	}
971 
972 	rcu_read_unlock();
973 
974 	return false;
975 }
976 
977 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
978  * controller based address resolution to be able to reconfigure
979  * resolving list.
980  */
981 void hci_req_add_le_passive_scan(struct hci_request *req)
982 {
983 	struct hci_dev *hdev = req->hdev;
984 	u8 own_addr_type;
985 	u8 filter_policy;
986 	u16 window, interval;
987 	/* Background scanning should run with address resolution */
988 	bool addr_resolv = true;
989 
990 	if (hdev->scanning_paused) {
991 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
992 		return;
993 	}
994 
995 	/* Set require_privacy to false since no SCAN_REQ are send
996 	 * during passive scanning. Not using an non-resolvable address
997 	 * here is important so that peer devices using direct
998 	 * advertising with our address will be correctly reported
999 	 * by the controller.
1000 	 */
1001 	if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1002 				      &own_addr_type))
1003 		return;
1004 
1005 	/* Adding or removing entries from the white list must
1006 	 * happen before enabling scanning. The controller does
1007 	 * not allow white list modification while scanning.
1008 	 */
1009 	filter_policy = update_white_list(req);
1010 
1011 	/* When the controller is using random resolvable addresses and
1012 	 * with that having LE privacy enabled, then controllers with
1013 	 * Extended Scanner Filter Policies support can now enable support
1014 	 * for handling directed advertising.
1015 	 *
1016 	 * So instead of using filter polices 0x00 (no whitelist)
1017 	 * and 0x01 (whitelist enabled) use the new filter policies
1018 	 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1019 	 */
1020 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1021 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1022 		filter_policy |= 0x02;
1023 
1024 	if (hdev->suspended) {
1025 		window = hdev->le_scan_window_suspend;
1026 		interval = hdev->le_scan_int_suspend;
1027 	} else if (hci_is_le_conn_scanning(hdev)) {
1028 		window = hdev->le_scan_window_connect;
1029 		interval = hdev->le_scan_int_connect;
1030 	} else {
1031 		window = hdev->le_scan_window;
1032 		interval = hdev->le_scan_interval;
1033 	}
1034 
1035 	bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1036 	hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1037 			   own_addr_type, filter_policy, addr_resolv);
1038 }
1039 
1040 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1041 {
1042 	struct adv_info *adv_instance;
1043 
1044 	/* Instance 0x00 always set local name */
1045 	if (instance == 0x00)
1046 		return 1;
1047 
1048 	adv_instance = hci_find_adv_instance(hdev, instance);
1049 	if (!adv_instance)
1050 		return 0;
1051 
1052 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1053 	 * These are currently being ignored as they are not supported.
1054 	 */
1055 	return adv_instance->scan_rsp_len;
1056 }
1057 
1058 static void hci_req_clear_event_filter(struct hci_request *req)
1059 {
1060 	struct hci_cp_set_event_filter f;
1061 
1062 	memset(&f, 0, sizeof(f));
1063 	f.flt_type = HCI_FLT_CLEAR_ALL;
1064 	hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1065 
1066 	/* Update page scan state (since we may have modified it when setting
1067 	 * the event filter).
1068 	 */
1069 	__hci_req_update_scan(req);
1070 }
1071 
1072 static void hci_req_set_event_filter(struct hci_request *req)
1073 {
1074 	struct bdaddr_list_with_flags *b;
1075 	struct hci_cp_set_event_filter f;
1076 	struct hci_dev *hdev = req->hdev;
1077 	u8 scan = SCAN_DISABLED;
1078 
1079 	/* Always clear event filter when starting */
1080 	hci_req_clear_event_filter(req);
1081 
1082 	list_for_each_entry(b, &hdev->whitelist, list) {
1083 		if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1084 					b->current_flags))
1085 			continue;
1086 
1087 		memset(&f, 0, sizeof(f));
1088 		bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1089 		f.flt_type = HCI_FLT_CONN_SETUP;
1090 		f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1091 		f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1092 
1093 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1094 		hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1095 		scan = SCAN_PAGE;
1096 	}
1097 
1098 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1099 }
1100 
1101 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1102 {
1103 	/* Before changing params disable scan if enabled */
1104 	if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1105 		hci_req_add_le_scan_disable(req, false);
1106 
1107 	/* Configure params and enable scanning */
1108 	hci_req_add_le_passive_scan(req);
1109 
1110 	/* Block suspend notifier on response */
1111 	set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1112 }
1113 
1114 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1115 {
1116 	bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1117 		   status);
1118 	if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1119 	    test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1120 		wake_up(&hdev->suspend_wait_q);
1121 	}
1122 }
1123 
1124 /* Call with hci_dev_lock */
1125 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1126 {
1127 	int old_state;
1128 	struct hci_conn *conn;
1129 	struct hci_request req;
1130 	u8 page_scan;
1131 	int disconnect_counter;
1132 
1133 	if (next == hdev->suspend_state) {
1134 		bt_dev_dbg(hdev, "Same state before and after: %d", next);
1135 		goto done;
1136 	}
1137 
1138 	hdev->suspend_state = next;
1139 	hci_req_init(&req, hdev);
1140 
1141 	if (next == BT_SUSPEND_DISCONNECT) {
1142 		/* Mark device as suspended */
1143 		hdev->suspended = true;
1144 
1145 		/* Pause discovery if not already stopped */
1146 		old_state = hdev->discovery.state;
1147 		if (old_state != DISCOVERY_STOPPED) {
1148 			set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1149 			hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1150 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1151 		}
1152 
1153 		hdev->discovery_paused = true;
1154 		hdev->discovery_old_state = old_state;
1155 
1156 		/* Stop advertising */
1157 		old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1158 		if (old_state) {
1159 			set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1160 			cancel_delayed_work(&hdev->discov_off);
1161 			queue_delayed_work(hdev->req_workqueue,
1162 					   &hdev->discov_off, 0);
1163 		}
1164 
1165 		hdev->advertising_paused = true;
1166 		hdev->advertising_old_state = old_state;
1167 		/* Disable page scan */
1168 		page_scan = SCAN_DISABLED;
1169 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1170 
1171 		/* Disable LE passive scan if enabled */
1172 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1173 			hci_req_add_le_scan_disable(&req, false);
1174 
1175 		/* Mark task needing completion */
1176 		set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1177 
1178 		/* Prevent disconnects from causing scanning to be re-enabled */
1179 		hdev->scanning_paused = true;
1180 
1181 		/* Run commands before disconnecting */
1182 		hci_req_run(&req, suspend_req_complete);
1183 
1184 		disconnect_counter = 0;
1185 		/* Soft disconnect everything (power off) */
1186 		list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1187 			hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1188 			disconnect_counter++;
1189 		}
1190 
1191 		if (disconnect_counter > 0) {
1192 			bt_dev_dbg(hdev,
1193 				   "Had %d disconnects. Will wait on them",
1194 				   disconnect_counter);
1195 			set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1196 		}
1197 	} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1198 		/* Unpause to take care of updating scanning params */
1199 		hdev->scanning_paused = false;
1200 		/* Enable event filter for paired devices */
1201 		hci_req_set_event_filter(&req);
1202 		/* Enable passive scan at lower duty cycle */
1203 		hci_req_config_le_suspend_scan(&req);
1204 		/* Pause scan changes again. */
1205 		hdev->scanning_paused = true;
1206 		hci_req_run(&req, suspend_req_complete);
1207 	} else {
1208 		hdev->suspended = false;
1209 		hdev->scanning_paused = false;
1210 
1211 		hci_req_clear_event_filter(&req);
1212 		/* Reset passive/background scanning to normal */
1213 		hci_req_config_le_suspend_scan(&req);
1214 
1215 		/* Unpause advertising */
1216 		hdev->advertising_paused = false;
1217 		if (hdev->advertising_old_state) {
1218 			set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1219 				hdev->suspend_tasks);
1220 			hci_dev_set_flag(hdev, HCI_ADVERTISING);
1221 			queue_work(hdev->req_workqueue,
1222 				   &hdev->discoverable_update);
1223 			hdev->advertising_old_state = 0;
1224 		}
1225 
1226 		/* Unpause discovery */
1227 		hdev->discovery_paused = false;
1228 		if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1229 		    hdev->discovery_old_state != DISCOVERY_STOPPING) {
1230 			set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1231 			hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1232 			queue_work(hdev->req_workqueue, &hdev->discov_update);
1233 		}
1234 
1235 		hci_req_run(&req, suspend_req_complete);
1236 	}
1237 
1238 	hdev->suspend_state = next;
1239 
1240 done:
1241 	clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1242 	wake_up(&hdev->suspend_wait_q);
1243 }
1244 
1245 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1246 {
1247 	u8 instance = hdev->cur_adv_instance;
1248 	struct adv_info *adv_instance;
1249 
1250 	/* Instance 0x00 always set local name */
1251 	if (instance == 0x00)
1252 		return 1;
1253 
1254 	adv_instance = hci_find_adv_instance(hdev, instance);
1255 	if (!adv_instance)
1256 		return 0;
1257 
1258 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1259 	 * These are currently being ignored as they are not supported.
1260 	 */
1261 	return adv_instance->scan_rsp_len;
1262 }
1263 
1264 void __hci_req_disable_advertising(struct hci_request *req)
1265 {
1266 	if (ext_adv_capable(req->hdev)) {
1267 		__hci_req_disable_ext_adv_instance(req, 0x00);
1268 
1269 	} else {
1270 		u8 enable = 0x00;
1271 
1272 		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1273 	}
1274 }
1275 
1276 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1277 {
1278 	u32 flags;
1279 	struct adv_info *adv_instance;
1280 
1281 	if (instance == 0x00) {
1282 		/* Instance 0 always manages the "Tx Power" and "Flags"
1283 		 * fields
1284 		 */
1285 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1286 
1287 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1288 		 * corresponds to the "connectable" instance flag.
1289 		 */
1290 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1291 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1292 
1293 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1294 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1295 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1296 			flags |= MGMT_ADV_FLAG_DISCOV;
1297 
1298 		return flags;
1299 	}
1300 
1301 	adv_instance = hci_find_adv_instance(hdev, instance);
1302 
1303 	/* Return 0 when we got an invalid instance identifier. */
1304 	if (!adv_instance)
1305 		return 0;
1306 
1307 	return adv_instance->flags;
1308 }
1309 
1310 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1311 {
1312 	/* If privacy is not enabled don't use RPA */
1313 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1314 		return false;
1315 
1316 	/* If basic privacy mode is enabled use RPA */
1317 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1318 		return true;
1319 
1320 	/* If limited privacy mode is enabled don't use RPA if we're
1321 	 * both discoverable and bondable.
1322 	 */
1323 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1324 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1325 		return false;
1326 
1327 	/* We're neither bondable nor discoverable in the limited
1328 	 * privacy mode, therefore use RPA.
1329 	 */
1330 	return true;
1331 }
1332 
1333 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1334 {
1335 	/* If there is no connection we are OK to advertise. */
1336 	if (hci_conn_num(hdev, LE_LINK) == 0)
1337 		return true;
1338 
1339 	/* Check le_states if there is any connection in slave role. */
1340 	if (hdev->conn_hash.le_num_slave > 0) {
1341 		/* Slave connection state and non connectable mode bit 20. */
1342 		if (!connectable && !(hdev->le_states[2] & 0x10))
1343 			return false;
1344 
1345 		/* Slave connection state and connectable mode bit 38
1346 		 * and scannable bit 21.
1347 		 */
1348 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1349 				    !(hdev->le_states[2] & 0x20)))
1350 			return false;
1351 	}
1352 
1353 	/* Check le_states if there is any connection in master role. */
1354 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1355 		/* Master connection state and non connectable mode bit 18. */
1356 		if (!connectable && !(hdev->le_states[2] & 0x02))
1357 			return false;
1358 
1359 		/* Master connection state and connectable mode bit 35 and
1360 		 * scannable 19.
1361 		 */
1362 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1363 				    !(hdev->le_states[2] & 0x08)))
1364 			return false;
1365 	}
1366 
1367 	return true;
1368 }
1369 
1370 void __hci_req_enable_advertising(struct hci_request *req)
1371 {
1372 	struct hci_dev *hdev = req->hdev;
1373 	struct hci_cp_le_set_adv_param cp;
1374 	u8 own_addr_type, enable = 0x01;
1375 	bool connectable;
1376 	u16 adv_min_interval, adv_max_interval;
1377 	u32 flags;
1378 
1379 	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1380 
1381 	/* If the "connectable" instance flag was not set, then choose between
1382 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1383 	 */
1384 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1385 		      mgmt_get_connectable(hdev);
1386 
1387 	if (!is_advertising_allowed(hdev, connectable))
1388 		return;
1389 
1390 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1391 		__hci_req_disable_advertising(req);
1392 
1393 	/* Clear the HCI_LE_ADV bit temporarily so that the
1394 	 * hci_update_random_address knows that it's safe to go ahead
1395 	 * and write a new random address. The flag will be set back on
1396 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1397 	 */
1398 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1399 
1400 	/* Set require_privacy to true only when non-connectable
1401 	 * advertising is used. In that case it is fine to use a
1402 	 * non-resolvable private address.
1403 	 */
1404 	if (hci_update_random_address(req, !connectable,
1405 				      adv_use_rpa(hdev, flags),
1406 				      &own_addr_type) < 0)
1407 		return;
1408 
1409 	memset(&cp, 0, sizeof(cp));
1410 
1411 	if (connectable) {
1412 		cp.type = LE_ADV_IND;
1413 
1414 		adv_min_interval = hdev->le_adv_min_interval;
1415 		adv_max_interval = hdev->le_adv_max_interval;
1416 	} else {
1417 		if (get_cur_adv_instance_scan_rsp_len(hdev))
1418 			cp.type = LE_ADV_SCAN_IND;
1419 		else
1420 			cp.type = LE_ADV_NONCONN_IND;
1421 
1422 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1423 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1424 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1425 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1426 		} else {
1427 			adv_min_interval = hdev->le_adv_min_interval;
1428 			adv_max_interval = hdev->le_adv_max_interval;
1429 		}
1430 	}
1431 
1432 	cp.min_interval = cpu_to_le16(adv_min_interval);
1433 	cp.max_interval = cpu_to_le16(adv_max_interval);
1434 	cp.own_address_type = own_addr_type;
1435 	cp.channel_map = hdev->le_adv_channel_map;
1436 
1437 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1438 
1439 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1440 }
1441 
1442 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1443 {
1444 	size_t short_len;
1445 	size_t complete_len;
1446 
1447 	/* no space left for name (+ NULL + type + len) */
1448 	if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1449 		return ad_len;
1450 
1451 	/* use complete name if present and fits */
1452 	complete_len = strlen(hdev->dev_name);
1453 	if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1454 		return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1455 				       hdev->dev_name, complete_len + 1);
1456 
1457 	/* use short name if present */
1458 	short_len = strlen(hdev->short_name);
1459 	if (short_len)
1460 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1461 				       hdev->short_name, short_len + 1);
1462 
1463 	/* use shortened full name if present, we already know that name
1464 	 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1465 	 */
1466 	if (complete_len) {
1467 		u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1468 
1469 		memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1470 		name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1471 
1472 		return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1473 				       sizeof(name));
1474 	}
1475 
1476 	return ad_len;
1477 }
1478 
1479 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1480 {
1481 	return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1482 }
1483 
1484 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1485 {
1486 	u8 scan_rsp_len = 0;
1487 
1488 	if (hdev->appearance) {
1489 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1490 	}
1491 
1492 	return append_local_name(hdev, ptr, scan_rsp_len);
1493 }
1494 
1495 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1496 					u8 *ptr)
1497 {
1498 	struct adv_info *adv_instance;
1499 	u32 instance_flags;
1500 	u8 scan_rsp_len = 0;
1501 
1502 	adv_instance = hci_find_adv_instance(hdev, instance);
1503 	if (!adv_instance)
1504 		return 0;
1505 
1506 	instance_flags = adv_instance->flags;
1507 
1508 	if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1509 		scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1510 	}
1511 
1512 	memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1513 	       adv_instance->scan_rsp_len);
1514 
1515 	scan_rsp_len += adv_instance->scan_rsp_len;
1516 
1517 	if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1518 		scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1519 
1520 	return scan_rsp_len;
1521 }
1522 
1523 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1524 {
1525 	struct hci_dev *hdev = req->hdev;
1526 	u8 len;
1527 
1528 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1529 		return;
1530 
1531 	if (ext_adv_capable(hdev)) {
1532 		struct hci_cp_le_set_ext_scan_rsp_data cp;
1533 
1534 		memset(&cp, 0, sizeof(cp));
1535 
1536 		if (instance)
1537 			len = create_instance_scan_rsp_data(hdev, instance,
1538 							    cp.data);
1539 		else
1540 			len = create_default_scan_rsp_data(hdev, cp.data);
1541 
1542 		if (hdev->scan_rsp_data_len == len &&
1543 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1544 			return;
1545 
1546 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1547 		hdev->scan_rsp_data_len = len;
1548 
1549 		cp.handle = instance;
1550 		cp.length = len;
1551 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1552 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1553 
1554 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1555 			    &cp);
1556 	} else {
1557 		struct hci_cp_le_set_scan_rsp_data cp;
1558 
1559 		memset(&cp, 0, sizeof(cp));
1560 
1561 		if (instance)
1562 			len = create_instance_scan_rsp_data(hdev, instance,
1563 							    cp.data);
1564 		else
1565 			len = create_default_scan_rsp_data(hdev, cp.data);
1566 
1567 		if (hdev->scan_rsp_data_len == len &&
1568 		    !memcmp(cp.data, hdev->scan_rsp_data, len))
1569 			return;
1570 
1571 		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1572 		hdev->scan_rsp_data_len = len;
1573 
1574 		cp.length = len;
1575 
1576 		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1577 	}
1578 }
1579 
1580 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1581 {
1582 	struct adv_info *adv_instance = NULL;
1583 	u8 ad_len = 0, flags = 0;
1584 	u32 instance_flags;
1585 
1586 	/* Return 0 when the current instance identifier is invalid. */
1587 	if (instance) {
1588 		adv_instance = hci_find_adv_instance(hdev, instance);
1589 		if (!adv_instance)
1590 			return 0;
1591 	}
1592 
1593 	instance_flags = get_adv_instance_flags(hdev, instance);
1594 
1595 	/* If instance already has the flags set skip adding it once
1596 	 * again.
1597 	 */
1598 	if (adv_instance && eir_get_data(adv_instance->adv_data,
1599 					 adv_instance->adv_data_len, EIR_FLAGS,
1600 					 NULL))
1601 		goto skip_flags;
1602 
1603 	/* The Add Advertising command allows userspace to set both the general
1604 	 * and limited discoverable flags.
1605 	 */
1606 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1607 		flags |= LE_AD_GENERAL;
1608 
1609 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1610 		flags |= LE_AD_LIMITED;
1611 
1612 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1613 		flags |= LE_AD_NO_BREDR;
1614 
1615 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1616 		/* If a discovery flag wasn't provided, simply use the global
1617 		 * settings.
1618 		 */
1619 		if (!flags)
1620 			flags |= mgmt_get_adv_discov_flags(hdev);
1621 
1622 		/* If flags would still be empty, then there is no need to
1623 		 * include the "Flags" AD field".
1624 		 */
1625 		if (flags) {
1626 			ptr[0] = 0x02;
1627 			ptr[1] = EIR_FLAGS;
1628 			ptr[2] = flags;
1629 
1630 			ad_len += 3;
1631 			ptr += 3;
1632 		}
1633 	}
1634 
1635 skip_flags:
1636 	if (adv_instance) {
1637 		memcpy(ptr, adv_instance->adv_data,
1638 		       adv_instance->adv_data_len);
1639 		ad_len += adv_instance->adv_data_len;
1640 		ptr += adv_instance->adv_data_len;
1641 	}
1642 
1643 	if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1644 		s8 adv_tx_power;
1645 
1646 		if (ext_adv_capable(hdev)) {
1647 			if (adv_instance)
1648 				adv_tx_power = adv_instance->tx_power;
1649 			else
1650 				adv_tx_power = hdev->adv_tx_power;
1651 		} else {
1652 			adv_tx_power = hdev->adv_tx_power;
1653 		}
1654 
1655 		/* Provide Tx Power only if we can provide a valid value for it */
1656 		if (adv_tx_power != HCI_TX_POWER_INVALID) {
1657 			ptr[0] = 0x02;
1658 			ptr[1] = EIR_TX_POWER;
1659 			ptr[2] = (u8)adv_tx_power;
1660 
1661 			ad_len += 3;
1662 			ptr += 3;
1663 		}
1664 	}
1665 
1666 	return ad_len;
1667 }
1668 
1669 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1670 {
1671 	struct hci_dev *hdev = req->hdev;
1672 	u8 len;
1673 
1674 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1675 		return;
1676 
1677 	if (ext_adv_capable(hdev)) {
1678 		struct hci_cp_le_set_ext_adv_data cp;
1679 
1680 		memset(&cp, 0, sizeof(cp));
1681 
1682 		len = create_instance_adv_data(hdev, instance, cp.data);
1683 
1684 		/* There's nothing to do if the data hasn't changed */
1685 		if (hdev->adv_data_len == len &&
1686 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1687 			return;
1688 
1689 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1690 		hdev->adv_data_len = len;
1691 
1692 		cp.length = len;
1693 		cp.handle = instance;
1694 		cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1695 		cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1696 
1697 		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1698 	} else {
1699 		struct hci_cp_le_set_adv_data cp;
1700 
1701 		memset(&cp, 0, sizeof(cp));
1702 
1703 		len = create_instance_adv_data(hdev, instance, cp.data);
1704 
1705 		/* There's nothing to do if the data hasn't changed */
1706 		if (hdev->adv_data_len == len &&
1707 		    memcmp(cp.data, hdev->adv_data, len) == 0)
1708 			return;
1709 
1710 		memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1711 		hdev->adv_data_len = len;
1712 
1713 		cp.length = len;
1714 
1715 		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1716 	}
1717 }
1718 
1719 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1720 {
1721 	struct hci_request req;
1722 
1723 	hci_req_init(&req, hdev);
1724 	__hci_req_update_adv_data(&req, instance);
1725 
1726 	return hci_req_run(&req, NULL);
1727 }
1728 
1729 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1730 					    u16 opcode)
1731 {
1732 	BT_DBG("%s status %u", hdev->name, status);
1733 }
1734 
1735 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1736 {
1737 	struct hci_request req;
1738 	__u8 enable = 0x00;
1739 
1740 	if (!use_ll_privacy(hdev) &&
1741 	    !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1742 		return;
1743 
1744 	hci_req_init(&req, hdev);
1745 
1746 	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1747 
1748 	hci_req_run(&req, enable_addr_resolution_complete);
1749 }
1750 
1751 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1752 {
1753 	BT_DBG("%s status %u", hdev->name, status);
1754 }
1755 
1756 void hci_req_reenable_advertising(struct hci_dev *hdev)
1757 {
1758 	struct hci_request req;
1759 
1760 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1761 	    list_empty(&hdev->adv_instances))
1762 		return;
1763 
1764 	hci_req_init(&req, hdev);
1765 
1766 	if (hdev->cur_adv_instance) {
1767 		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1768 						true);
1769 	} else {
1770 		if (ext_adv_capable(hdev)) {
1771 			__hci_req_start_ext_adv(&req, 0x00);
1772 		} else {
1773 			__hci_req_update_adv_data(&req, 0x00);
1774 			__hci_req_update_scan_rsp_data(&req, 0x00);
1775 			__hci_req_enable_advertising(&req);
1776 		}
1777 	}
1778 
1779 	hci_req_run(&req, adv_enable_complete);
1780 }
1781 
1782 static void adv_timeout_expire(struct work_struct *work)
1783 {
1784 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1785 					    adv_instance_expire.work);
1786 
1787 	struct hci_request req;
1788 	u8 instance;
1789 
1790 	BT_DBG("%s", hdev->name);
1791 
1792 	hci_dev_lock(hdev);
1793 
1794 	hdev->adv_instance_timeout = 0;
1795 
1796 	instance = hdev->cur_adv_instance;
1797 	if (instance == 0x00)
1798 		goto unlock;
1799 
1800 	hci_req_init(&req, hdev);
1801 
1802 	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1803 
1804 	if (list_empty(&hdev->adv_instances))
1805 		__hci_req_disable_advertising(&req);
1806 
1807 	hci_req_run(&req, NULL);
1808 
1809 unlock:
1810 	hci_dev_unlock(hdev);
1811 }
1812 
1813 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1814 			   bool use_rpa, struct adv_info *adv_instance,
1815 			   u8 *own_addr_type, bdaddr_t *rand_addr)
1816 {
1817 	int err;
1818 
1819 	bacpy(rand_addr, BDADDR_ANY);
1820 
1821 	/* If privacy is enabled use a resolvable private address. If
1822 	 * current RPA has expired then generate a new one.
1823 	 */
1824 	if (use_rpa) {
1825 		int to;
1826 
1827 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1828 
1829 		if (adv_instance) {
1830 			if (!adv_instance->rpa_expired &&
1831 			    !bacmp(&adv_instance->random_addr, &hdev->rpa))
1832 				return 0;
1833 
1834 			adv_instance->rpa_expired = false;
1835 		} else {
1836 			if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1837 			    !bacmp(&hdev->random_addr, &hdev->rpa))
1838 				return 0;
1839 		}
1840 
1841 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1842 		if (err < 0) {
1843 			bt_dev_err(hdev, "failed to generate new RPA");
1844 			return err;
1845 		}
1846 
1847 		bacpy(rand_addr, &hdev->rpa);
1848 
1849 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1850 		if (adv_instance)
1851 			queue_delayed_work(hdev->workqueue,
1852 					   &adv_instance->rpa_expired_cb, to);
1853 		else
1854 			queue_delayed_work(hdev->workqueue,
1855 					   &hdev->rpa_expired, to);
1856 
1857 		return 0;
1858 	}
1859 
1860 	/* In case of required privacy without resolvable private address,
1861 	 * use an non-resolvable private address. This is useful for
1862 	 * non-connectable advertising.
1863 	 */
1864 	if (require_privacy) {
1865 		bdaddr_t nrpa;
1866 
1867 		while (true) {
1868 			/* The non-resolvable private address is generated
1869 			 * from random six bytes with the two most significant
1870 			 * bits cleared.
1871 			 */
1872 			get_random_bytes(&nrpa, 6);
1873 			nrpa.b[5] &= 0x3f;
1874 
1875 			/* The non-resolvable private address shall not be
1876 			 * equal to the public address.
1877 			 */
1878 			if (bacmp(&hdev->bdaddr, &nrpa))
1879 				break;
1880 		}
1881 
1882 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1883 		bacpy(rand_addr, &nrpa);
1884 
1885 		return 0;
1886 	}
1887 
1888 	/* No privacy so use a public address. */
1889 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1890 
1891 	return 0;
1892 }
1893 
1894 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1895 {
1896 	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1897 }
1898 
1899 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1900 {
1901 	struct hci_cp_le_set_ext_adv_params cp;
1902 	struct hci_dev *hdev = req->hdev;
1903 	bool connectable;
1904 	u32 flags;
1905 	bdaddr_t random_addr;
1906 	u8 own_addr_type;
1907 	int err;
1908 	struct adv_info *adv_instance;
1909 	bool secondary_adv;
1910 
1911 	if (instance > 0) {
1912 		adv_instance = hci_find_adv_instance(hdev, instance);
1913 		if (!adv_instance)
1914 			return -EINVAL;
1915 	} else {
1916 		adv_instance = NULL;
1917 	}
1918 
1919 	flags = get_adv_instance_flags(hdev, instance);
1920 
1921 	/* If the "connectable" instance flag was not set, then choose between
1922 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1923 	 */
1924 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1925 		      mgmt_get_connectable(hdev);
1926 
1927 	if (!is_advertising_allowed(hdev, connectable))
1928 		return -EPERM;
1929 
1930 	/* Set require_privacy to true only when non-connectable
1931 	 * advertising is used. In that case it is fine to use a
1932 	 * non-resolvable private address.
1933 	 */
1934 	err = hci_get_random_address(hdev, !connectable,
1935 				     adv_use_rpa(hdev, flags), adv_instance,
1936 				     &own_addr_type, &random_addr);
1937 	if (err < 0)
1938 		return err;
1939 
1940 	memset(&cp, 0, sizeof(cp));
1941 
1942 	/* In ext adv set param interval is 3 octets */
1943 	hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1944 	hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1945 
1946 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1947 
1948 	if (connectable) {
1949 		if (secondary_adv)
1950 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1951 		else
1952 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1953 	} else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1954 		if (secondary_adv)
1955 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1956 		else
1957 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1958 	} else {
1959 		if (secondary_adv)
1960 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1961 		else
1962 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1963 	}
1964 
1965 	cp.own_addr_type = own_addr_type;
1966 	cp.channel_map = hdev->le_adv_channel_map;
1967 	cp.tx_power = 127;
1968 	cp.handle = instance;
1969 
1970 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1971 		cp.primary_phy = HCI_ADV_PHY_1M;
1972 		cp.secondary_phy = HCI_ADV_PHY_2M;
1973 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1974 		cp.primary_phy = HCI_ADV_PHY_CODED;
1975 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1976 	} else {
1977 		/* In all other cases use 1M */
1978 		cp.primary_phy = HCI_ADV_PHY_1M;
1979 		cp.secondary_phy = HCI_ADV_PHY_1M;
1980 	}
1981 
1982 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1983 
1984 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1985 	    bacmp(&random_addr, BDADDR_ANY)) {
1986 		struct hci_cp_le_set_adv_set_rand_addr cp;
1987 
1988 		/* Check if random address need to be updated */
1989 		if (adv_instance) {
1990 			if (!bacmp(&random_addr, &adv_instance->random_addr))
1991 				return 0;
1992 		} else {
1993 			if (!bacmp(&random_addr, &hdev->random_addr))
1994 				return 0;
1995 		}
1996 
1997 		memset(&cp, 0, sizeof(cp));
1998 
1999 		cp.handle = instance;
2000 		bacpy(&cp.bdaddr, &random_addr);
2001 
2002 		hci_req_add(req,
2003 			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2004 			    sizeof(cp), &cp);
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2011 {
2012 	struct hci_dev *hdev = req->hdev;
2013 	struct hci_cp_le_set_ext_adv_enable *cp;
2014 	struct hci_cp_ext_adv_set *adv_set;
2015 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2016 	struct adv_info *adv_instance;
2017 
2018 	if (instance > 0) {
2019 		adv_instance = hci_find_adv_instance(hdev, instance);
2020 		if (!adv_instance)
2021 			return -EINVAL;
2022 	} else {
2023 		adv_instance = NULL;
2024 	}
2025 
2026 	cp = (void *) data;
2027 	adv_set = (void *) cp->data;
2028 
2029 	memset(cp, 0, sizeof(*cp));
2030 
2031 	cp->enable = 0x01;
2032 	cp->num_of_sets = 0x01;
2033 
2034 	memset(adv_set, 0, sizeof(*adv_set));
2035 
2036 	adv_set->handle = instance;
2037 
2038 	/* Set duration per instance since controller is responsible for
2039 	 * scheduling it.
2040 	 */
2041 	if (adv_instance && adv_instance->duration) {
2042 		u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2043 
2044 		/* Time = N * 10 ms */
2045 		adv_set->duration = cpu_to_le16(duration / 10);
2046 	}
2047 
2048 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2049 		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2050 		    data);
2051 
2052 	return 0;
2053 }
2054 
2055 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2056 {
2057 	struct hci_dev *hdev = req->hdev;
2058 	struct hci_cp_le_set_ext_adv_enable *cp;
2059 	struct hci_cp_ext_adv_set *adv_set;
2060 	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2061 	u8 req_size;
2062 
2063 	/* If request specifies an instance that doesn't exist, fail */
2064 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2065 		return -EINVAL;
2066 
2067 	memset(data, 0, sizeof(data));
2068 
2069 	cp = (void *)data;
2070 	adv_set = (void *)cp->data;
2071 
2072 	/* Instance 0x00 indicates all advertising instances will be disabled */
2073 	cp->num_of_sets = !!instance;
2074 	cp->enable = 0x00;
2075 
2076 	adv_set->handle = instance;
2077 
2078 	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2079 	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2080 
2081 	return 0;
2082 }
2083 
2084 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2085 {
2086 	struct hci_dev *hdev = req->hdev;
2087 
2088 	/* If request specifies an instance that doesn't exist, fail */
2089 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2090 		return -EINVAL;
2091 
2092 	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2093 
2094 	return 0;
2095 }
2096 
2097 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2098 {
2099 	struct hci_dev *hdev = req->hdev;
2100 	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2101 	int err;
2102 
2103 	/* If instance isn't pending, the chip knows about it, and it's safe to
2104 	 * disable
2105 	 */
2106 	if (adv_instance && !adv_instance->pending)
2107 		__hci_req_disable_ext_adv_instance(req, instance);
2108 
2109 	err = __hci_req_setup_ext_adv_instance(req, instance);
2110 	if (err < 0)
2111 		return err;
2112 
2113 	__hci_req_update_scan_rsp_data(req, instance);
2114 	__hci_req_enable_ext_advertising(req, instance);
2115 
2116 	return 0;
2117 }
2118 
2119 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2120 				    bool force)
2121 {
2122 	struct hci_dev *hdev = req->hdev;
2123 	struct adv_info *adv_instance = NULL;
2124 	u16 timeout;
2125 
2126 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2127 	    list_empty(&hdev->adv_instances))
2128 		return -EPERM;
2129 
2130 	if (hdev->adv_instance_timeout)
2131 		return -EBUSY;
2132 
2133 	adv_instance = hci_find_adv_instance(hdev, instance);
2134 	if (!adv_instance)
2135 		return -ENOENT;
2136 
2137 	/* A zero timeout means unlimited advertising. As long as there is
2138 	 * only one instance, duration should be ignored. We still set a timeout
2139 	 * in case further instances are being added later on.
2140 	 *
2141 	 * If the remaining lifetime of the instance is more than the duration
2142 	 * then the timeout corresponds to the duration, otherwise it will be
2143 	 * reduced to the remaining instance lifetime.
2144 	 */
2145 	if (adv_instance->timeout == 0 ||
2146 	    adv_instance->duration <= adv_instance->remaining_time)
2147 		timeout = adv_instance->duration;
2148 	else
2149 		timeout = adv_instance->remaining_time;
2150 
2151 	/* The remaining time is being reduced unless the instance is being
2152 	 * advertised without time limit.
2153 	 */
2154 	if (adv_instance->timeout)
2155 		adv_instance->remaining_time =
2156 				adv_instance->remaining_time - timeout;
2157 
2158 	/* Only use work for scheduling instances with legacy advertising */
2159 	if (!ext_adv_capable(hdev)) {
2160 		hdev->adv_instance_timeout = timeout;
2161 		queue_delayed_work(hdev->req_workqueue,
2162 			   &hdev->adv_instance_expire,
2163 			   msecs_to_jiffies(timeout * 1000));
2164 	}
2165 
2166 	/* If we're just re-scheduling the same instance again then do not
2167 	 * execute any HCI commands. This happens when a single instance is
2168 	 * being advertised.
2169 	 */
2170 	if (!force && hdev->cur_adv_instance == instance &&
2171 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
2172 		return 0;
2173 
2174 	hdev->cur_adv_instance = instance;
2175 	if (ext_adv_capable(hdev)) {
2176 		__hci_req_start_ext_adv(req, instance);
2177 	} else {
2178 		__hci_req_update_adv_data(req, instance);
2179 		__hci_req_update_scan_rsp_data(req, instance);
2180 		__hci_req_enable_advertising(req);
2181 	}
2182 
2183 	return 0;
2184 }
2185 
2186 static void cancel_adv_timeout(struct hci_dev *hdev)
2187 {
2188 	if (hdev->adv_instance_timeout) {
2189 		hdev->adv_instance_timeout = 0;
2190 		cancel_delayed_work(&hdev->adv_instance_expire);
2191 	}
2192 }
2193 
2194 /* For a single instance:
2195  * - force == true: The instance will be removed even when its remaining
2196  *   lifetime is not zero.
2197  * - force == false: the instance will be deactivated but kept stored unless
2198  *   the remaining lifetime is zero.
2199  *
2200  * For instance == 0x00:
2201  * - force == true: All instances will be removed regardless of their timeout
2202  *   setting.
2203  * - force == false: Only instances that have a timeout will be removed.
2204  */
2205 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2206 				struct hci_request *req, u8 instance,
2207 				bool force)
2208 {
2209 	struct adv_info *adv_instance, *n, *next_instance = NULL;
2210 	int err;
2211 	u8 rem_inst;
2212 
2213 	/* Cancel any timeout concerning the removed instance(s). */
2214 	if (!instance || hdev->cur_adv_instance == instance)
2215 		cancel_adv_timeout(hdev);
2216 
2217 	/* Get the next instance to advertise BEFORE we remove
2218 	 * the current one. This can be the same instance again
2219 	 * if there is only one instance.
2220 	 */
2221 	if (instance && hdev->cur_adv_instance == instance)
2222 		next_instance = hci_get_next_instance(hdev, instance);
2223 
2224 	if (instance == 0x00) {
2225 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2226 					 list) {
2227 			if (!(force || adv_instance->timeout))
2228 				continue;
2229 
2230 			rem_inst = adv_instance->instance;
2231 			err = hci_remove_adv_instance(hdev, rem_inst);
2232 			if (!err)
2233 				mgmt_advertising_removed(sk, hdev, rem_inst);
2234 		}
2235 	} else {
2236 		adv_instance = hci_find_adv_instance(hdev, instance);
2237 
2238 		if (force || (adv_instance && adv_instance->timeout &&
2239 			      !adv_instance->remaining_time)) {
2240 			/* Don't advertise a removed instance. */
2241 			if (next_instance &&
2242 			    next_instance->instance == instance)
2243 				next_instance = NULL;
2244 
2245 			err = hci_remove_adv_instance(hdev, instance);
2246 			if (!err)
2247 				mgmt_advertising_removed(sk, hdev, instance);
2248 		}
2249 	}
2250 
2251 	if (!req || !hdev_is_powered(hdev) ||
2252 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
2253 		return;
2254 
2255 	if (next_instance && !ext_adv_capable(hdev))
2256 		__hci_req_schedule_adv_instance(req, next_instance->instance,
2257 						false);
2258 }
2259 
2260 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2261 {
2262 	struct hci_dev *hdev = req->hdev;
2263 
2264 	/* If we're advertising or initiating an LE connection we can't
2265 	 * go ahead and change the random address at this time. This is
2266 	 * because the eventual initiator address used for the
2267 	 * subsequently created connection will be undefined (some
2268 	 * controllers use the new address and others the one we had
2269 	 * when the operation started).
2270 	 *
2271 	 * In this kind of scenario skip the update and let the random
2272 	 * address be updated at the next cycle.
2273 	 */
2274 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2275 	    hci_lookup_le_connect(hdev)) {
2276 		BT_DBG("Deferring random address update");
2277 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2278 		return;
2279 	}
2280 
2281 	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2282 }
2283 
2284 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2285 			      bool use_rpa, u8 *own_addr_type)
2286 {
2287 	struct hci_dev *hdev = req->hdev;
2288 	int err;
2289 
2290 	/* If privacy is enabled use a resolvable private address. If
2291 	 * current RPA has expired or there is something else than
2292 	 * the current RPA in use, then generate a new one.
2293 	 */
2294 	if (use_rpa) {
2295 		int to;
2296 
2297 		/* If Controller supports LL Privacy use own address type is
2298 		 * 0x03
2299 		 */
2300 		if (use_ll_privacy(hdev))
2301 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2302 		else
2303 			*own_addr_type = ADDR_LE_DEV_RANDOM;
2304 
2305 		if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2306 		    !bacmp(&hdev->random_addr, &hdev->rpa))
2307 			return 0;
2308 
2309 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2310 		if (err < 0) {
2311 			bt_dev_err(hdev, "failed to generate new RPA");
2312 			return err;
2313 		}
2314 
2315 		set_random_addr(req, &hdev->rpa);
2316 
2317 		to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2318 		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2319 
2320 		return 0;
2321 	}
2322 
2323 	/* In case of required privacy without resolvable private address,
2324 	 * use an non-resolvable private address. This is useful for active
2325 	 * scanning and non-connectable advertising.
2326 	 */
2327 	if (require_privacy) {
2328 		bdaddr_t nrpa;
2329 
2330 		while (true) {
2331 			/* The non-resolvable private address is generated
2332 			 * from random six bytes with the two most significant
2333 			 * bits cleared.
2334 			 */
2335 			get_random_bytes(&nrpa, 6);
2336 			nrpa.b[5] &= 0x3f;
2337 
2338 			/* The non-resolvable private address shall not be
2339 			 * equal to the public address.
2340 			 */
2341 			if (bacmp(&hdev->bdaddr, &nrpa))
2342 				break;
2343 		}
2344 
2345 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2346 		set_random_addr(req, &nrpa);
2347 		return 0;
2348 	}
2349 
2350 	/* If forcing static address is in use or there is no public
2351 	 * address use the static address as random address (but skip
2352 	 * the HCI command if the current random address is already the
2353 	 * static one.
2354 	 *
2355 	 * In case BR/EDR has been disabled on a dual-mode controller
2356 	 * and a static address has been configured, then use that
2357 	 * address instead of the public BR/EDR address.
2358 	 */
2359 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2360 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2361 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2362 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2363 		*own_addr_type = ADDR_LE_DEV_RANDOM;
2364 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
2365 			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2366 				    &hdev->static_addr);
2367 		return 0;
2368 	}
2369 
2370 	/* Neither privacy nor static address is being used so use a
2371 	 * public address.
2372 	 */
2373 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
2374 
2375 	return 0;
2376 }
2377 
2378 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2379 {
2380 	struct bdaddr_list *b;
2381 
2382 	list_for_each_entry(b, &hdev->whitelist, list) {
2383 		struct hci_conn *conn;
2384 
2385 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2386 		if (!conn)
2387 			return true;
2388 
2389 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2390 			return true;
2391 	}
2392 
2393 	return false;
2394 }
2395 
2396 void __hci_req_update_scan(struct hci_request *req)
2397 {
2398 	struct hci_dev *hdev = req->hdev;
2399 	u8 scan;
2400 
2401 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2402 		return;
2403 
2404 	if (!hdev_is_powered(hdev))
2405 		return;
2406 
2407 	if (mgmt_powering_down(hdev))
2408 		return;
2409 
2410 	if (hdev->scanning_paused)
2411 		return;
2412 
2413 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2414 	    disconnected_whitelist_entries(hdev))
2415 		scan = SCAN_PAGE;
2416 	else
2417 		scan = SCAN_DISABLED;
2418 
2419 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2420 		scan |= SCAN_INQUIRY;
2421 
2422 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2423 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2424 		return;
2425 
2426 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2427 }
2428 
2429 static int update_scan(struct hci_request *req, unsigned long opt)
2430 {
2431 	hci_dev_lock(req->hdev);
2432 	__hci_req_update_scan(req);
2433 	hci_dev_unlock(req->hdev);
2434 	return 0;
2435 }
2436 
2437 static void scan_update_work(struct work_struct *work)
2438 {
2439 	struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2440 
2441 	hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2442 }
2443 
2444 static int connectable_update(struct hci_request *req, unsigned long opt)
2445 {
2446 	struct hci_dev *hdev = req->hdev;
2447 
2448 	hci_dev_lock(hdev);
2449 
2450 	__hci_req_update_scan(req);
2451 
2452 	/* If BR/EDR is not enabled and we disable advertising as a
2453 	 * by-product of disabling connectable, we need to update the
2454 	 * advertising flags.
2455 	 */
2456 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2457 		__hci_req_update_adv_data(req, hdev->cur_adv_instance);
2458 
2459 	/* Update the advertising parameters if necessary */
2460 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2461 	    !list_empty(&hdev->adv_instances)) {
2462 		if (ext_adv_capable(hdev))
2463 			__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2464 		else
2465 			__hci_req_enable_advertising(req);
2466 	}
2467 
2468 	__hci_update_background_scan(req);
2469 
2470 	hci_dev_unlock(hdev);
2471 
2472 	return 0;
2473 }
2474 
2475 static void connectable_update_work(struct work_struct *work)
2476 {
2477 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2478 					    connectable_update);
2479 	u8 status;
2480 
2481 	hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2482 	mgmt_set_connectable_complete(hdev, status);
2483 }
2484 
2485 static u8 get_service_classes(struct hci_dev *hdev)
2486 {
2487 	struct bt_uuid *uuid;
2488 	u8 val = 0;
2489 
2490 	list_for_each_entry(uuid, &hdev->uuids, list)
2491 		val |= uuid->svc_hint;
2492 
2493 	return val;
2494 }
2495 
2496 void __hci_req_update_class(struct hci_request *req)
2497 {
2498 	struct hci_dev *hdev = req->hdev;
2499 	u8 cod[3];
2500 
2501 	BT_DBG("%s", hdev->name);
2502 
2503 	if (!hdev_is_powered(hdev))
2504 		return;
2505 
2506 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2507 		return;
2508 
2509 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2510 		return;
2511 
2512 	cod[0] = hdev->minor_class;
2513 	cod[1] = hdev->major_class;
2514 	cod[2] = get_service_classes(hdev);
2515 
2516 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2517 		cod[1] |= 0x20;
2518 
2519 	if (memcmp(cod, hdev->dev_class, 3) == 0)
2520 		return;
2521 
2522 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2523 }
2524 
2525 static void write_iac(struct hci_request *req)
2526 {
2527 	struct hci_dev *hdev = req->hdev;
2528 	struct hci_cp_write_current_iac_lap cp;
2529 
2530 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2531 		return;
2532 
2533 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2534 		/* Limited discoverable mode */
2535 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
2536 		cp.iac_lap[0] = 0x00;	/* LIAC */
2537 		cp.iac_lap[1] = 0x8b;
2538 		cp.iac_lap[2] = 0x9e;
2539 		cp.iac_lap[3] = 0x33;	/* GIAC */
2540 		cp.iac_lap[4] = 0x8b;
2541 		cp.iac_lap[5] = 0x9e;
2542 	} else {
2543 		/* General discoverable mode */
2544 		cp.num_iac = 1;
2545 		cp.iac_lap[0] = 0x33;	/* GIAC */
2546 		cp.iac_lap[1] = 0x8b;
2547 		cp.iac_lap[2] = 0x9e;
2548 	}
2549 
2550 	hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2551 		    (cp.num_iac * 3) + 1, &cp);
2552 }
2553 
2554 static int discoverable_update(struct hci_request *req, unsigned long opt)
2555 {
2556 	struct hci_dev *hdev = req->hdev;
2557 
2558 	hci_dev_lock(hdev);
2559 
2560 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2561 		write_iac(req);
2562 		__hci_req_update_scan(req);
2563 		__hci_req_update_class(req);
2564 	}
2565 
2566 	/* Advertising instances don't use the global discoverable setting, so
2567 	 * only update AD if advertising was enabled using Set Advertising.
2568 	 */
2569 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2570 		__hci_req_update_adv_data(req, 0x00);
2571 
2572 		/* Discoverable mode affects the local advertising
2573 		 * address in limited privacy mode.
2574 		 */
2575 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2576 			if (ext_adv_capable(hdev))
2577 				__hci_req_start_ext_adv(req, 0x00);
2578 			else
2579 				__hci_req_enable_advertising(req);
2580 		}
2581 	}
2582 
2583 	hci_dev_unlock(hdev);
2584 
2585 	return 0;
2586 }
2587 
2588 static void discoverable_update_work(struct work_struct *work)
2589 {
2590 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2591 					    discoverable_update);
2592 	u8 status;
2593 
2594 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2595 	mgmt_set_discoverable_complete(hdev, status);
2596 }
2597 
2598 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2599 		      u8 reason)
2600 {
2601 	switch (conn->state) {
2602 	case BT_CONNECTED:
2603 	case BT_CONFIG:
2604 		if (conn->type == AMP_LINK) {
2605 			struct hci_cp_disconn_phy_link cp;
2606 
2607 			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2608 			cp.reason = reason;
2609 			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2610 				    &cp);
2611 		} else {
2612 			struct hci_cp_disconnect dc;
2613 
2614 			dc.handle = cpu_to_le16(conn->handle);
2615 			dc.reason = reason;
2616 			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2617 		}
2618 
2619 		conn->state = BT_DISCONN;
2620 
2621 		break;
2622 	case BT_CONNECT:
2623 		if (conn->type == LE_LINK) {
2624 			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2625 				break;
2626 			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2627 				    0, NULL);
2628 		} else if (conn->type == ACL_LINK) {
2629 			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2630 				break;
2631 			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2632 				    6, &conn->dst);
2633 		}
2634 		break;
2635 	case BT_CONNECT2:
2636 		if (conn->type == ACL_LINK) {
2637 			struct hci_cp_reject_conn_req rej;
2638 
2639 			bacpy(&rej.bdaddr, &conn->dst);
2640 			rej.reason = reason;
2641 
2642 			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2643 				    sizeof(rej), &rej);
2644 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2645 			struct hci_cp_reject_sync_conn_req rej;
2646 
2647 			bacpy(&rej.bdaddr, &conn->dst);
2648 
2649 			/* SCO rejection has its own limited set of
2650 			 * allowed error values (0x0D-0x0F) which isn't
2651 			 * compatible with most values passed to this
2652 			 * function. To be safe hard-code one of the
2653 			 * values that's suitable for SCO.
2654 			 */
2655 			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2656 
2657 			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2658 				    sizeof(rej), &rej);
2659 		}
2660 		break;
2661 	default:
2662 		conn->state = BT_CLOSED;
2663 		break;
2664 	}
2665 }
2666 
2667 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2668 {
2669 	if (status)
2670 		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2671 }
2672 
2673 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2674 {
2675 	struct hci_request req;
2676 	int err;
2677 
2678 	hci_req_init(&req, conn->hdev);
2679 
2680 	__hci_abort_conn(&req, conn, reason);
2681 
2682 	err = hci_req_run(&req, abort_conn_complete);
2683 	if (err && err != -ENODATA) {
2684 		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2685 		return err;
2686 	}
2687 
2688 	return 0;
2689 }
2690 
2691 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2692 {
2693 	hci_dev_lock(req->hdev);
2694 	__hci_update_background_scan(req);
2695 	hci_dev_unlock(req->hdev);
2696 	return 0;
2697 }
2698 
2699 static void bg_scan_update(struct work_struct *work)
2700 {
2701 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2702 					    bg_scan_update);
2703 	struct hci_conn *conn;
2704 	u8 status;
2705 	int err;
2706 
2707 	err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2708 	if (!err)
2709 		return;
2710 
2711 	hci_dev_lock(hdev);
2712 
2713 	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2714 	if (conn)
2715 		hci_le_conn_failed(conn, status);
2716 
2717 	hci_dev_unlock(hdev);
2718 }
2719 
2720 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2721 {
2722 	hci_req_add_le_scan_disable(req, false);
2723 	return 0;
2724 }
2725 
2726 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2727 {
2728 	u8 length = opt;
2729 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2730 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2731 	struct hci_cp_inquiry cp;
2732 
2733 	BT_DBG("%s", req->hdev->name);
2734 
2735 	hci_dev_lock(req->hdev);
2736 	hci_inquiry_cache_flush(req->hdev);
2737 	hci_dev_unlock(req->hdev);
2738 
2739 	memset(&cp, 0, sizeof(cp));
2740 
2741 	if (req->hdev->discovery.limited)
2742 		memcpy(&cp.lap, liac, sizeof(cp.lap));
2743 	else
2744 		memcpy(&cp.lap, giac, sizeof(cp.lap));
2745 
2746 	cp.length = length;
2747 
2748 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2749 
2750 	return 0;
2751 }
2752 
2753 static void le_scan_disable_work(struct work_struct *work)
2754 {
2755 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2756 					    le_scan_disable.work);
2757 	u8 status;
2758 
2759 	BT_DBG("%s", hdev->name);
2760 
2761 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2762 		return;
2763 
2764 	cancel_delayed_work(&hdev->le_scan_restart);
2765 
2766 	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2767 	if (status) {
2768 		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2769 			   status);
2770 		return;
2771 	}
2772 
2773 	hdev->discovery.scan_start = 0;
2774 
2775 	/* If we were running LE only scan, change discovery state. If
2776 	 * we were running both LE and BR/EDR inquiry simultaneously,
2777 	 * and BR/EDR inquiry is already finished, stop discovery,
2778 	 * otherwise BR/EDR inquiry will stop discovery when finished.
2779 	 * If we will resolve remote device name, do not change
2780 	 * discovery state.
2781 	 */
2782 
2783 	if (hdev->discovery.type == DISCOV_TYPE_LE)
2784 		goto discov_stopped;
2785 
2786 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2787 		return;
2788 
2789 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2790 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2791 		    hdev->discovery.state != DISCOVERY_RESOLVING)
2792 			goto discov_stopped;
2793 
2794 		return;
2795 	}
2796 
2797 	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2798 		     HCI_CMD_TIMEOUT, &status);
2799 	if (status) {
2800 		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2801 		goto discov_stopped;
2802 	}
2803 
2804 	return;
2805 
2806 discov_stopped:
2807 	hci_dev_lock(hdev);
2808 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2809 	hci_dev_unlock(hdev);
2810 }
2811 
2812 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2813 {
2814 	struct hci_dev *hdev = req->hdev;
2815 
2816 	/* If controller is not scanning we are done. */
2817 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2818 		return 0;
2819 
2820 	if (hdev->scanning_paused) {
2821 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2822 		return 0;
2823 	}
2824 
2825 	hci_req_add_le_scan_disable(req, false);
2826 
2827 	if (use_ext_scan(hdev)) {
2828 		struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2829 
2830 		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2831 		ext_enable_cp.enable = LE_SCAN_ENABLE;
2832 		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2833 
2834 		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2835 			    sizeof(ext_enable_cp), &ext_enable_cp);
2836 	} else {
2837 		struct hci_cp_le_set_scan_enable cp;
2838 
2839 		memset(&cp, 0, sizeof(cp));
2840 		cp.enable = LE_SCAN_ENABLE;
2841 		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2842 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2843 	}
2844 
2845 	return 0;
2846 }
2847 
2848 static void le_scan_restart_work(struct work_struct *work)
2849 {
2850 	struct hci_dev *hdev = container_of(work, struct hci_dev,
2851 					    le_scan_restart.work);
2852 	unsigned long timeout, duration, scan_start, now;
2853 	u8 status;
2854 
2855 	BT_DBG("%s", hdev->name);
2856 
2857 	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2858 	if (status) {
2859 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
2860 			   status);
2861 		return;
2862 	}
2863 
2864 	hci_dev_lock(hdev);
2865 
2866 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2867 	    !hdev->discovery.scan_start)
2868 		goto unlock;
2869 
2870 	/* When the scan was started, hdev->le_scan_disable has been queued
2871 	 * after duration from scan_start. During scan restart this job
2872 	 * has been canceled, and we need to queue it again after proper
2873 	 * timeout, to make sure that scan does not run indefinitely.
2874 	 */
2875 	duration = hdev->discovery.scan_duration;
2876 	scan_start = hdev->discovery.scan_start;
2877 	now = jiffies;
2878 	if (now - scan_start <= duration) {
2879 		int elapsed;
2880 
2881 		if (now >= scan_start)
2882 			elapsed = now - scan_start;
2883 		else
2884 			elapsed = ULONG_MAX - scan_start + now;
2885 
2886 		timeout = duration - elapsed;
2887 	} else {
2888 		timeout = 0;
2889 	}
2890 
2891 	queue_delayed_work(hdev->req_workqueue,
2892 			   &hdev->le_scan_disable, timeout);
2893 
2894 unlock:
2895 	hci_dev_unlock(hdev);
2896 }
2897 
2898 static int active_scan(struct hci_request *req, unsigned long opt)
2899 {
2900 	uint16_t interval = opt;
2901 	struct hci_dev *hdev = req->hdev;
2902 	u8 own_addr_type;
2903 	/* White list is not used for discovery */
2904 	u8 filter_policy = 0x00;
2905 	/* Discovery doesn't require controller address resolution */
2906 	bool addr_resolv = false;
2907 	int err;
2908 
2909 	BT_DBG("%s", hdev->name);
2910 
2911 	/* If controller is scanning, it means the background scanning is
2912 	 * running. Thus, we should temporarily stop it in order to set the
2913 	 * discovery scanning parameters.
2914 	 */
2915 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2916 		hci_req_add_le_scan_disable(req, false);
2917 
2918 	/* All active scans will be done with either a resolvable private
2919 	 * address (when privacy feature has been enabled) or non-resolvable
2920 	 * private address.
2921 	 */
2922 	err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2923 					&own_addr_type);
2924 	if (err < 0)
2925 		own_addr_type = ADDR_LE_DEV_PUBLIC;
2926 
2927 	hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2928 			   hdev->le_scan_window_discovery, own_addr_type,
2929 			   filter_policy, addr_resolv);
2930 	return 0;
2931 }
2932 
2933 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2934 {
2935 	int err;
2936 
2937 	BT_DBG("%s", req->hdev->name);
2938 
2939 	err = active_scan(req, opt);
2940 	if (err)
2941 		return err;
2942 
2943 	return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2944 }
2945 
2946 static void start_discovery(struct hci_dev *hdev, u8 *status)
2947 {
2948 	unsigned long timeout;
2949 
2950 	BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2951 
2952 	switch (hdev->discovery.type) {
2953 	case DISCOV_TYPE_BREDR:
2954 		if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2955 			hci_req_sync(hdev, bredr_inquiry,
2956 				     DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2957 				     status);
2958 		return;
2959 	case DISCOV_TYPE_INTERLEAVED:
2960 		/* When running simultaneous discovery, the LE scanning time
2961 		 * should occupy the whole discovery time sine BR/EDR inquiry
2962 		 * and LE scanning are scheduled by the controller.
2963 		 *
2964 		 * For interleaving discovery in comparison, BR/EDR inquiry
2965 		 * and LE scanning are done sequentially with separate
2966 		 * timeouts.
2967 		 */
2968 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2969 			     &hdev->quirks)) {
2970 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2971 			/* During simultaneous discovery, we double LE scan
2972 			 * interval. We must leave some time for the controller
2973 			 * to do BR/EDR inquiry.
2974 			 */
2975 			hci_req_sync(hdev, interleaved_discov,
2976 				     hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2977 				     status);
2978 			break;
2979 		}
2980 
2981 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2982 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2983 			     HCI_CMD_TIMEOUT, status);
2984 		break;
2985 	case DISCOV_TYPE_LE:
2986 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2987 		hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2988 			     HCI_CMD_TIMEOUT, status);
2989 		break;
2990 	default:
2991 		*status = HCI_ERROR_UNSPECIFIED;
2992 		return;
2993 	}
2994 
2995 	if (*status)
2996 		return;
2997 
2998 	BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2999 
3000 	/* When service discovery is used and the controller has a
3001 	 * strict duplicate filter, it is important to remember the
3002 	 * start and duration of the scan. This is required for
3003 	 * restarting scanning during the discovery phase.
3004 	 */
3005 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3006 		     hdev->discovery.result_filtering) {
3007 		hdev->discovery.scan_start = jiffies;
3008 		hdev->discovery.scan_duration = timeout;
3009 	}
3010 
3011 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3012 			   timeout);
3013 }
3014 
3015 bool hci_req_stop_discovery(struct hci_request *req)
3016 {
3017 	struct hci_dev *hdev = req->hdev;
3018 	struct discovery_state *d = &hdev->discovery;
3019 	struct hci_cp_remote_name_req_cancel cp;
3020 	struct inquiry_entry *e;
3021 	bool ret = false;
3022 
3023 	BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3024 
3025 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3026 		if (test_bit(HCI_INQUIRY, &hdev->flags))
3027 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3028 
3029 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3030 			cancel_delayed_work(&hdev->le_scan_disable);
3031 			hci_req_add_le_scan_disable(req, false);
3032 		}
3033 
3034 		ret = true;
3035 	} else {
3036 		/* Passive scanning */
3037 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3038 			hci_req_add_le_scan_disable(req, false);
3039 			ret = true;
3040 		}
3041 	}
3042 
3043 	/* No further actions needed for LE-only discovery */
3044 	if (d->type == DISCOV_TYPE_LE)
3045 		return ret;
3046 
3047 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3048 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3049 						     NAME_PENDING);
3050 		if (!e)
3051 			return ret;
3052 
3053 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3054 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3055 			    &cp);
3056 		ret = true;
3057 	}
3058 
3059 	return ret;
3060 }
3061 
3062 static int stop_discovery(struct hci_request *req, unsigned long opt)
3063 {
3064 	hci_dev_lock(req->hdev);
3065 	hci_req_stop_discovery(req);
3066 	hci_dev_unlock(req->hdev);
3067 
3068 	return 0;
3069 }
3070 
3071 static void discov_update(struct work_struct *work)
3072 {
3073 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3074 					    discov_update);
3075 	u8 status = 0;
3076 
3077 	switch (hdev->discovery.state) {
3078 	case DISCOVERY_STARTING:
3079 		start_discovery(hdev, &status);
3080 		mgmt_start_discovery_complete(hdev, status);
3081 		if (status)
3082 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3083 		else
3084 			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3085 		break;
3086 	case DISCOVERY_STOPPING:
3087 		hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3088 		mgmt_stop_discovery_complete(hdev, status);
3089 		if (!status)
3090 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3091 		break;
3092 	case DISCOVERY_STOPPED:
3093 	default:
3094 		return;
3095 	}
3096 }
3097 
3098 static void discov_off(struct work_struct *work)
3099 {
3100 	struct hci_dev *hdev = container_of(work, struct hci_dev,
3101 					    discov_off.work);
3102 
3103 	BT_DBG("%s", hdev->name);
3104 
3105 	hci_dev_lock(hdev);
3106 
3107 	/* When discoverable timeout triggers, then just make sure
3108 	 * the limited discoverable flag is cleared. Even in the case
3109 	 * of a timeout triggered from general discoverable, it is
3110 	 * safe to unconditionally clear the flag.
3111 	 */
3112 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3113 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3114 	hdev->discov_timeout = 0;
3115 
3116 	hci_dev_unlock(hdev);
3117 
3118 	hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3119 	mgmt_new_settings(hdev);
3120 }
3121 
3122 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3123 {
3124 	struct hci_dev *hdev = req->hdev;
3125 	u8 link_sec;
3126 
3127 	hci_dev_lock(hdev);
3128 
3129 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3130 	    !lmp_host_ssp_capable(hdev)) {
3131 		u8 mode = 0x01;
3132 
3133 		hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3134 
3135 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3136 			u8 support = 0x01;
3137 
3138 			hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3139 				    sizeof(support), &support);
3140 		}
3141 	}
3142 
3143 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3144 	    lmp_bredr_capable(hdev)) {
3145 		struct hci_cp_write_le_host_supported cp;
3146 
3147 		cp.le = 0x01;
3148 		cp.simul = 0x00;
3149 
3150 		/* Check first if we already have the right
3151 		 * host state (host features set)
3152 		 */
3153 		if (cp.le != lmp_host_le_capable(hdev) ||
3154 		    cp.simul != lmp_host_le_br_capable(hdev))
3155 			hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3156 				    sizeof(cp), &cp);
3157 	}
3158 
3159 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3160 		/* Make sure the controller has a good default for
3161 		 * advertising data. This also applies to the case
3162 		 * where BR/EDR was toggled during the AUTO_OFF phase.
3163 		 */
3164 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3165 		    list_empty(&hdev->adv_instances)) {
3166 			int err;
3167 
3168 			if (ext_adv_capable(hdev)) {
3169 				err = __hci_req_setup_ext_adv_instance(req,
3170 								       0x00);
3171 				if (!err)
3172 					__hci_req_update_scan_rsp_data(req,
3173 								       0x00);
3174 			} else {
3175 				err = 0;
3176 				__hci_req_update_adv_data(req, 0x00);
3177 				__hci_req_update_scan_rsp_data(req, 0x00);
3178 			}
3179 
3180 			if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3181 				if (!ext_adv_capable(hdev))
3182 					__hci_req_enable_advertising(req);
3183 				else if (!err)
3184 					__hci_req_enable_ext_advertising(req,
3185 									 0x00);
3186 			}
3187 		} else if (!list_empty(&hdev->adv_instances)) {
3188 			struct adv_info *adv_instance;
3189 
3190 			adv_instance = list_first_entry(&hdev->adv_instances,
3191 							struct adv_info, list);
3192 			__hci_req_schedule_adv_instance(req,
3193 							adv_instance->instance,
3194 							true);
3195 		}
3196 	}
3197 
3198 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3199 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3200 		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3201 			    sizeof(link_sec), &link_sec);
3202 
3203 	if (lmp_bredr_capable(hdev)) {
3204 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3205 			__hci_req_write_fast_connectable(req, true);
3206 		else
3207 			__hci_req_write_fast_connectable(req, false);
3208 		__hci_req_update_scan(req);
3209 		__hci_req_update_class(req);
3210 		__hci_req_update_name(req);
3211 		__hci_req_update_eir(req);
3212 	}
3213 
3214 	hci_dev_unlock(hdev);
3215 	return 0;
3216 }
3217 
3218 int __hci_req_hci_power_on(struct hci_dev *hdev)
3219 {
3220 	/* Register the available SMP channels (BR/EDR and LE) only when
3221 	 * successfully powering on the controller. This late
3222 	 * registration is required so that LE SMP can clearly decide if
3223 	 * the public address or static address is used.
3224 	 */
3225 	smp_register(hdev);
3226 
3227 	return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3228 			      NULL);
3229 }
3230 
3231 void hci_request_setup(struct hci_dev *hdev)
3232 {
3233 	INIT_WORK(&hdev->discov_update, discov_update);
3234 	INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3235 	INIT_WORK(&hdev->scan_update, scan_update_work);
3236 	INIT_WORK(&hdev->connectable_update, connectable_update_work);
3237 	INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3238 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3239 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3240 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3241 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3242 }
3243 
3244 void hci_request_cancel_all(struct hci_dev *hdev)
3245 {
3246 	hci_req_sync_cancel(hdev, ENODEV);
3247 
3248 	cancel_work_sync(&hdev->discov_update);
3249 	cancel_work_sync(&hdev->bg_scan_update);
3250 	cancel_work_sync(&hdev->scan_update);
3251 	cancel_work_sync(&hdev->connectable_update);
3252 	cancel_work_sync(&hdev->discoverable_update);
3253 	cancel_delayed_work_sync(&hdev->discov_off);
3254 	cancel_delayed_work_sync(&hdev->le_scan_disable);
3255 	cancel_delayed_work_sync(&hdev->le_scan_restart);
3256 
3257 	if (hdev->adv_instance_timeout) {
3258 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
3259 		hdev->adv_instance_timeout = 0;
3260 	}
3261 }
3262