xref: /openbmc/linux/net/bluetooth/hci_sync.c (revision 44ad3baf1cca483e418b6aadf2d3994f69e0f16a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  * Copyright 2023 NXP
7  */
8 
9 #include <linux/property.h>
10 
11 #include <net/bluetooth/bluetooth.h>
12 #include <net/bluetooth/hci_core.h>
13 #include <net/bluetooth/mgmt.h>
14 
15 #include "hci_request.h"
16 #include "hci_codec.h"
17 #include "hci_debugfs.h"
18 #include "smp.h"
19 #include "eir.h"
20 #include "msft.h"
21 #include "aosp.h"
22 #include "leds.h"
23 
hci_cmd_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)24 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
25 				  struct sk_buff *skb)
26 {
27 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
28 
29 	if (hdev->req_status != HCI_REQ_PEND)
30 		return;
31 
32 	hdev->req_result = result;
33 	hdev->req_status = HCI_REQ_DONE;
34 
35 	/* Free the request command so it is not used as response */
36 	kfree_skb(hdev->req_skb);
37 	hdev->req_skb = NULL;
38 
39 	if (skb) {
40 		struct sock *sk = hci_skb_sk(skb);
41 
42 		/* Drop sk reference if set */
43 		if (sk)
44 			sock_put(sk);
45 
46 		hdev->req_rsp = skb_get(skb);
47 	}
48 
49 	wake_up_interruptible(&hdev->req_wait_q);
50 }
51 
hci_cmd_sync_alloc(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,struct sock * sk)52 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
53 					  u32 plen, const void *param,
54 					  struct sock *sk)
55 {
56 	int len = HCI_COMMAND_HDR_SIZE + plen;
57 	struct hci_command_hdr *hdr;
58 	struct sk_buff *skb;
59 
60 	skb = bt_skb_alloc(len, GFP_ATOMIC);
61 	if (!skb)
62 		return NULL;
63 
64 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
65 	hdr->opcode = cpu_to_le16(opcode);
66 	hdr->plen   = plen;
67 
68 	if (plen)
69 		skb_put_data(skb, param, plen);
70 
71 	bt_dev_dbg(hdev, "skb len %d", skb->len);
72 
73 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
74 	hci_skb_opcode(skb) = opcode;
75 
76 	/* Grab a reference if command needs to be associated with a sock (e.g.
77 	 * likely mgmt socket that initiated the command).
78 	 */
79 	if (sk) {
80 		hci_skb_sk(skb) = sk;
81 		sock_hold(sk);
82 	}
83 
84 	return skb;
85 }
86 
hci_cmd_sync_add(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event,struct sock * sk)87 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
88 			     const void *param, u8 event, struct sock *sk)
89 {
90 	struct hci_dev *hdev = req->hdev;
91 	struct sk_buff *skb;
92 
93 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
94 
95 	/* If an error occurred during request building, there is no point in
96 	 * queueing the HCI command. We can simply return.
97 	 */
98 	if (req->err)
99 		return;
100 
101 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
102 	if (!skb) {
103 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
104 			   opcode);
105 		req->err = -ENOMEM;
106 		return;
107 	}
108 
109 	if (skb_queue_empty(&req->cmd_q))
110 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
111 
112 	hci_skb_event(skb) = event;
113 
114 	skb_queue_tail(&req->cmd_q, skb);
115 }
116 
hci_req_sync_run(struct hci_request * req)117 static int hci_req_sync_run(struct hci_request *req)
118 {
119 	struct hci_dev *hdev = req->hdev;
120 	struct sk_buff *skb;
121 	unsigned long flags;
122 
123 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
124 
125 	/* If an error occurred during request building, remove all HCI
126 	 * commands queued on the HCI request queue.
127 	 */
128 	if (req->err) {
129 		skb_queue_purge(&req->cmd_q);
130 		return req->err;
131 	}
132 
133 	/* Do not allow empty requests */
134 	if (skb_queue_empty(&req->cmd_q))
135 		return -ENODATA;
136 
137 	skb = skb_peek_tail(&req->cmd_q);
138 	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
139 	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
140 
141 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
142 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
143 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
144 
145 	queue_work(hdev->workqueue, &hdev->cmd_work);
146 
147 	return 0;
148 }
149 
150 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)151 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
152 				  const void *param, u8 event, u32 timeout,
153 				  struct sock *sk)
154 {
155 	struct hci_request req;
156 	struct sk_buff *skb;
157 	int err = 0;
158 
159 	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
160 
161 	hci_req_init(&req, hdev);
162 
163 	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
164 
165 	hdev->req_status = HCI_REQ_PEND;
166 
167 	err = hci_req_sync_run(&req);
168 	if (err < 0)
169 		return ERR_PTR(err);
170 
171 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
172 					       hdev->req_status != HCI_REQ_PEND,
173 					       timeout);
174 
175 	if (err == -ERESTARTSYS)
176 		return ERR_PTR(-EINTR);
177 
178 	switch (hdev->req_status) {
179 	case HCI_REQ_DONE:
180 		err = -bt_to_errno(hdev->req_result);
181 		break;
182 
183 	case HCI_REQ_CANCELED:
184 		err = -hdev->req_result;
185 		break;
186 
187 	default:
188 		err = -ETIMEDOUT;
189 		break;
190 	}
191 
192 	hdev->req_status = 0;
193 	hdev->req_result = 0;
194 	skb = hdev->req_rsp;
195 	hdev->req_rsp = NULL;
196 
197 	bt_dev_dbg(hdev, "end: err %d", err);
198 
199 	if (err < 0) {
200 		kfree_skb(skb);
201 		return ERR_PTR(err);
202 	}
203 
204 	/* If command return a status event skb will be set to NULL as there are
205 	 * no parameters.
206 	 */
207 	if (!skb)
208 		return ERR_PTR(-ENODATA);
209 
210 	return skb;
211 }
212 EXPORT_SYMBOL(__hci_cmd_sync_sk);
213 
214 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)215 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
216 			       const void *param, u32 timeout)
217 {
218 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
219 }
220 EXPORT_SYMBOL(__hci_cmd_sync);
221 
222 /* Send HCI command and wait for command complete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)223 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
224 			     const void *param, u32 timeout)
225 {
226 	struct sk_buff *skb;
227 
228 	if (!test_bit(HCI_UP, &hdev->flags))
229 		return ERR_PTR(-ENETDOWN);
230 
231 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
232 
233 	hci_req_sync_lock(hdev);
234 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
235 	hci_req_sync_unlock(hdev);
236 
237 	return skb;
238 }
239 EXPORT_SYMBOL(hci_cmd_sync);
240 
241 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)242 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
243 				  const void *param, u8 event, u32 timeout)
244 {
245 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
246 				 NULL);
247 }
248 EXPORT_SYMBOL(__hci_cmd_sync_ev);
249 
250 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_status_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)251 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
252 			     const void *param, u8 event, u32 timeout,
253 			     struct sock *sk)
254 {
255 	struct sk_buff *skb;
256 	u8 status;
257 
258 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
259 
260 	/* If command return a status event, skb will be set to -ENODATA */
261 	if (skb == ERR_PTR(-ENODATA))
262 		return 0;
263 
264 	if (IS_ERR(skb)) {
265 		if (!event)
266 			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
267 				   PTR_ERR(skb));
268 		return PTR_ERR(skb);
269 	}
270 
271 	status = skb->data[0];
272 
273 	kfree_skb(skb);
274 
275 	return status;
276 }
277 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
278 
__hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)279 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
280 			  const void *param, u32 timeout)
281 {
282 	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
283 					NULL);
284 }
285 EXPORT_SYMBOL(__hci_cmd_sync_status);
286 
hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)287 int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
288 			const void *param, u32 timeout)
289 {
290 	int err;
291 
292 	hci_req_sync_lock(hdev);
293 	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
294 	hci_req_sync_unlock(hdev);
295 
296 	return err;
297 }
298 EXPORT_SYMBOL(hci_cmd_sync_status);
299 
hci_cmd_sync_work(struct work_struct * work)300 static void hci_cmd_sync_work(struct work_struct *work)
301 {
302 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
303 
304 	bt_dev_dbg(hdev, "");
305 
306 	/* Dequeue all entries and run them */
307 	while (1) {
308 		struct hci_cmd_sync_work_entry *entry;
309 
310 		mutex_lock(&hdev->cmd_sync_work_lock);
311 		entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
312 						 struct hci_cmd_sync_work_entry,
313 						 list);
314 		if (entry)
315 			list_del(&entry->list);
316 		mutex_unlock(&hdev->cmd_sync_work_lock);
317 
318 		if (!entry)
319 			break;
320 
321 		bt_dev_dbg(hdev, "entry %p", entry);
322 
323 		if (entry->func) {
324 			int err;
325 
326 			hci_req_sync_lock(hdev);
327 			err = entry->func(hdev, entry->data);
328 			if (entry->destroy)
329 				entry->destroy(hdev, entry->data, err);
330 			hci_req_sync_unlock(hdev);
331 		}
332 
333 		kfree(entry);
334 	}
335 }
336 
hci_cmd_sync_cancel_work(struct work_struct * work)337 static void hci_cmd_sync_cancel_work(struct work_struct *work)
338 {
339 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
340 
341 	cancel_delayed_work_sync(&hdev->cmd_timer);
342 	cancel_delayed_work_sync(&hdev->ncmd_timer);
343 	atomic_set(&hdev->cmd_cnt, 1);
344 
345 	wake_up_interruptible(&hdev->req_wait_q);
346 }
347 
348 static int hci_scan_disable_sync(struct hci_dev *hdev);
scan_disable_sync(struct hci_dev * hdev,void * data)349 static int scan_disable_sync(struct hci_dev *hdev, void *data)
350 {
351 	return hci_scan_disable_sync(hdev);
352 }
353 
354 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length);
interleaved_inquiry_sync(struct hci_dev * hdev,void * data)355 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
356 {
357 	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN);
358 }
359 
le_scan_disable(struct work_struct * work)360 static void le_scan_disable(struct work_struct *work)
361 {
362 	struct hci_dev *hdev = container_of(work, struct hci_dev,
363 					    le_scan_disable.work);
364 	int status;
365 
366 	bt_dev_dbg(hdev, "");
367 	hci_dev_lock(hdev);
368 
369 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
370 		goto _return;
371 
372 	cancel_delayed_work(&hdev->le_scan_restart);
373 
374 	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
375 	if (status) {
376 		bt_dev_err(hdev, "failed to disable LE scan: %d", status);
377 		goto _return;
378 	}
379 
380 	hdev->discovery.scan_start = 0;
381 
382 	/* If we were running LE only scan, change discovery state. If
383 	 * we were running both LE and BR/EDR inquiry simultaneously,
384 	 * and BR/EDR inquiry is already finished, stop discovery,
385 	 * otherwise BR/EDR inquiry will stop discovery when finished.
386 	 * If we will resolve remote device name, do not change
387 	 * discovery state.
388 	 */
389 
390 	if (hdev->discovery.type == DISCOV_TYPE_LE)
391 		goto discov_stopped;
392 
393 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
394 		goto _return;
395 
396 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
397 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
398 		    hdev->discovery.state != DISCOVERY_RESOLVING)
399 			goto discov_stopped;
400 
401 		goto _return;
402 	}
403 
404 	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
405 	if (status) {
406 		bt_dev_err(hdev, "inquiry failed: status %d", status);
407 		goto discov_stopped;
408 	}
409 
410 	goto _return;
411 
412 discov_stopped:
413 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
414 
415 _return:
416 	hci_dev_unlock(hdev);
417 }
418 
419 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
420 				       u8 filter_dup);
hci_le_scan_restart_sync(struct hci_dev * hdev)421 static int hci_le_scan_restart_sync(struct hci_dev *hdev)
422 {
423 	/* If controller is not scanning we are done. */
424 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
425 		return 0;
426 
427 	if (hdev->scanning_paused) {
428 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
429 		return 0;
430 	}
431 
432 	hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
433 	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE,
434 					   LE_SCAN_FILTER_DUP_ENABLE);
435 }
436 
le_scan_restart(struct work_struct * work)437 static void le_scan_restart(struct work_struct *work)
438 {
439 	struct hci_dev *hdev = container_of(work, struct hci_dev,
440 					    le_scan_restart.work);
441 	unsigned long timeout, duration, scan_start, now;
442 	int status;
443 
444 	bt_dev_dbg(hdev, "");
445 
446 	status = hci_le_scan_restart_sync(hdev);
447 	if (status) {
448 		bt_dev_err(hdev, "failed to restart LE scan: status %d",
449 			   status);
450 		return;
451 	}
452 
453 	hci_dev_lock(hdev);
454 
455 	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
456 	    !hdev->discovery.scan_start)
457 		goto unlock;
458 
459 	/* When the scan was started, hdev->le_scan_disable has been queued
460 	 * after duration from scan_start. During scan restart this job
461 	 * has been canceled, and we need to queue it again after proper
462 	 * timeout, to make sure that scan does not run indefinitely.
463 	 */
464 	duration = hdev->discovery.scan_duration;
465 	scan_start = hdev->discovery.scan_start;
466 	now = jiffies;
467 	if (now - scan_start <= duration) {
468 		int elapsed;
469 
470 		if (now >= scan_start)
471 			elapsed = now - scan_start;
472 		else
473 			elapsed = ULONG_MAX - scan_start + now;
474 
475 		timeout = duration - elapsed;
476 	} else {
477 		timeout = 0;
478 	}
479 
480 	queue_delayed_work(hdev->req_workqueue,
481 			   &hdev->le_scan_disable, timeout);
482 
483 unlock:
484 	hci_dev_unlock(hdev);
485 }
486 
reenable_adv_sync(struct hci_dev * hdev,void * data)487 static int reenable_adv_sync(struct hci_dev *hdev, void *data)
488 {
489 	bt_dev_dbg(hdev, "");
490 
491 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
492 	    list_empty(&hdev->adv_instances))
493 		return 0;
494 
495 	if (hdev->cur_adv_instance) {
496 		return hci_schedule_adv_instance_sync(hdev,
497 						      hdev->cur_adv_instance,
498 						      true);
499 	} else {
500 		if (ext_adv_capable(hdev)) {
501 			hci_start_ext_adv_sync(hdev, 0x00);
502 		} else {
503 			hci_update_adv_data_sync(hdev, 0x00);
504 			hci_update_scan_rsp_data_sync(hdev, 0x00);
505 			hci_enable_advertising_sync(hdev);
506 		}
507 	}
508 
509 	return 0;
510 }
511 
reenable_adv(struct work_struct * work)512 static void reenable_adv(struct work_struct *work)
513 {
514 	struct hci_dev *hdev = container_of(work, struct hci_dev,
515 					    reenable_adv_work);
516 	int status;
517 
518 	bt_dev_dbg(hdev, "");
519 
520 	hci_dev_lock(hdev);
521 
522 	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
523 	if (status)
524 		bt_dev_err(hdev, "failed to reenable ADV: %d", status);
525 
526 	hci_dev_unlock(hdev);
527 }
528 
cancel_adv_timeout(struct hci_dev * hdev)529 static void cancel_adv_timeout(struct hci_dev *hdev)
530 {
531 	if (hdev->adv_instance_timeout) {
532 		hdev->adv_instance_timeout = 0;
533 		cancel_delayed_work(&hdev->adv_instance_expire);
534 	}
535 }
536 
537 /* For a single instance:
538  * - force == true: The instance will be removed even when its remaining
539  *   lifetime is not zero.
540  * - force == false: the instance will be deactivated but kept stored unless
541  *   the remaining lifetime is zero.
542  *
543  * For instance == 0x00:
544  * - force == true: All instances will be removed regardless of their timeout
545  *   setting.
546  * - force == false: Only instances that have a timeout will be removed.
547  */
hci_clear_adv_instance_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)548 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
549 				u8 instance, bool force)
550 {
551 	struct adv_info *adv_instance, *n, *next_instance = NULL;
552 	int err;
553 	u8 rem_inst;
554 
555 	/* Cancel any timeout concerning the removed instance(s). */
556 	if (!instance || hdev->cur_adv_instance == instance)
557 		cancel_adv_timeout(hdev);
558 
559 	/* Get the next instance to advertise BEFORE we remove
560 	 * the current one. This can be the same instance again
561 	 * if there is only one instance.
562 	 */
563 	if (instance && hdev->cur_adv_instance == instance)
564 		next_instance = hci_get_next_instance(hdev, instance);
565 
566 	if (instance == 0x00) {
567 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
568 					 list) {
569 			if (!(force || adv_instance->timeout))
570 				continue;
571 
572 			rem_inst = adv_instance->instance;
573 			err = hci_remove_adv_instance(hdev, rem_inst);
574 			if (!err)
575 				mgmt_advertising_removed(sk, hdev, rem_inst);
576 		}
577 	} else {
578 		adv_instance = hci_find_adv_instance(hdev, instance);
579 
580 		if (force || (adv_instance && adv_instance->timeout &&
581 			      !adv_instance->remaining_time)) {
582 			/* Don't advertise a removed instance. */
583 			if (next_instance &&
584 			    next_instance->instance == instance)
585 				next_instance = NULL;
586 
587 			err = hci_remove_adv_instance(hdev, instance);
588 			if (!err)
589 				mgmt_advertising_removed(sk, hdev, instance);
590 		}
591 	}
592 
593 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
594 		return 0;
595 
596 	if (next_instance && !ext_adv_capable(hdev))
597 		return hci_schedule_adv_instance_sync(hdev,
598 						      next_instance->instance,
599 						      false);
600 
601 	return 0;
602 }
603 
adv_timeout_expire_sync(struct hci_dev * hdev,void * data)604 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
605 {
606 	u8 instance = *(u8 *)data;
607 
608 	kfree(data);
609 
610 	hci_clear_adv_instance_sync(hdev, NULL, instance, false);
611 
612 	if (list_empty(&hdev->adv_instances))
613 		return hci_disable_advertising_sync(hdev);
614 
615 	return 0;
616 }
617 
adv_timeout_expire(struct work_struct * work)618 static void adv_timeout_expire(struct work_struct *work)
619 {
620 	u8 *inst_ptr;
621 	struct hci_dev *hdev = container_of(work, struct hci_dev,
622 					    adv_instance_expire.work);
623 
624 	bt_dev_dbg(hdev, "");
625 
626 	hci_dev_lock(hdev);
627 
628 	hdev->adv_instance_timeout = 0;
629 
630 	if (hdev->cur_adv_instance == 0x00)
631 		goto unlock;
632 
633 	inst_ptr = kmalloc(1, GFP_KERNEL);
634 	if (!inst_ptr)
635 		goto unlock;
636 
637 	*inst_ptr = hdev->cur_adv_instance;
638 	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
639 
640 unlock:
641 	hci_dev_unlock(hdev);
642 }
643 
hci_cmd_sync_init(struct hci_dev * hdev)644 void hci_cmd_sync_init(struct hci_dev *hdev)
645 {
646 	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
647 	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
648 	mutex_init(&hdev->cmd_sync_work_lock);
649 	mutex_init(&hdev->unregister_lock);
650 
651 	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
652 	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
653 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
654 	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart);
655 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
656 }
657 
_hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry,int err)658 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
659 				       struct hci_cmd_sync_work_entry *entry,
660 				       int err)
661 {
662 	if (entry->destroy)
663 		entry->destroy(hdev, entry->data, err);
664 
665 	list_del(&entry->list);
666 	kfree(entry);
667 }
668 
hci_cmd_sync_clear(struct hci_dev * hdev)669 void hci_cmd_sync_clear(struct hci_dev *hdev)
670 {
671 	struct hci_cmd_sync_work_entry *entry, *tmp;
672 
673 	cancel_work_sync(&hdev->cmd_sync_work);
674 	cancel_work_sync(&hdev->reenable_adv_work);
675 
676 	mutex_lock(&hdev->cmd_sync_work_lock);
677 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
678 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
679 	mutex_unlock(&hdev->cmd_sync_work_lock);
680 }
681 
hci_cmd_sync_cancel(struct hci_dev * hdev,int err)682 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
683 {
684 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
685 
686 	if (hdev->req_status == HCI_REQ_PEND) {
687 		hdev->req_result = err;
688 		hdev->req_status = HCI_REQ_CANCELED;
689 
690 		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
691 	}
692 }
693 EXPORT_SYMBOL(hci_cmd_sync_cancel);
694 
695 /* Cancel ongoing command request synchronously:
696  *
697  * - Set result and mark status to HCI_REQ_CANCELED
698  * - Wakeup command sync thread
699  */
hci_cmd_sync_cancel_sync(struct hci_dev * hdev,int err)700 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
701 {
702 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
703 
704 	if (hdev->req_status == HCI_REQ_PEND) {
705 		/* req_result is __u32 so error must be positive to be properly
706 		 * propagated.
707 		 */
708 		hdev->req_result = err < 0 ? -err : err;
709 		hdev->req_status = HCI_REQ_CANCELED;
710 
711 		wake_up_interruptible(&hdev->req_wait_q);
712 	}
713 }
714 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
715 
716 /* Submit HCI command to be run in as cmd_sync_work:
717  *
718  * - hdev must _not_ be unregistered
719  */
hci_cmd_sync_submit(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)720 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
721 			void *data, hci_cmd_sync_work_destroy_t destroy)
722 {
723 	struct hci_cmd_sync_work_entry *entry;
724 	int err = 0;
725 
726 	mutex_lock(&hdev->unregister_lock);
727 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
728 		err = -ENODEV;
729 		goto unlock;
730 	}
731 
732 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
733 	if (!entry) {
734 		err = -ENOMEM;
735 		goto unlock;
736 	}
737 	entry->func = func;
738 	entry->data = data;
739 	entry->destroy = destroy;
740 
741 	mutex_lock(&hdev->cmd_sync_work_lock);
742 	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
743 	mutex_unlock(&hdev->cmd_sync_work_lock);
744 
745 	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
746 
747 unlock:
748 	mutex_unlock(&hdev->unregister_lock);
749 	return err;
750 }
751 EXPORT_SYMBOL(hci_cmd_sync_submit);
752 
753 /* Queue HCI command:
754  *
755  * - hdev must be running
756  */
hci_cmd_sync_queue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)757 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
758 		       void *data, hci_cmd_sync_work_destroy_t destroy)
759 {
760 	/* Only queue command if hdev is running which means it had been opened
761 	 * and is either on init phase or is already up.
762 	 */
763 	if (!test_bit(HCI_RUNNING, &hdev->flags))
764 		return -ENETDOWN;
765 
766 	return hci_cmd_sync_submit(hdev, func, data, destroy);
767 }
768 EXPORT_SYMBOL(hci_cmd_sync_queue);
769 
770 static struct hci_cmd_sync_work_entry *
_hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)771 _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
772 			   void *data, hci_cmd_sync_work_destroy_t destroy)
773 {
774 	struct hci_cmd_sync_work_entry *entry, *tmp;
775 
776 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
777 		if (func && entry->func != func)
778 			continue;
779 
780 		if (data && entry->data != data)
781 			continue;
782 
783 		if (destroy && entry->destroy != destroy)
784 			continue;
785 
786 		return entry;
787 	}
788 
789 	return NULL;
790 }
791 
792 /* Queue HCI command entry once:
793  *
794  * - Lookup if an entry already exist and only if it doesn't creates a new entry
795  *   and queue it.
796  */
hci_cmd_sync_queue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)797 int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
798 			    void *data, hci_cmd_sync_work_destroy_t destroy)
799 {
800 	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
801 		return 0;
802 
803 	return hci_cmd_sync_queue(hdev, func, data, destroy);
804 }
805 EXPORT_SYMBOL(hci_cmd_sync_queue_once);
806 
807 /* Run HCI command:
808  *
809  * - hdev must be running
810  * - if on cmd_sync_work then run immediately otherwise queue
811  */
hci_cmd_sync_run(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)812 int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
813 		     void *data, hci_cmd_sync_work_destroy_t destroy)
814 {
815 	/* Only queue command if hdev is running which means it had been opened
816 	 * and is either on init phase or is already up.
817 	 */
818 	if (!test_bit(HCI_RUNNING, &hdev->flags))
819 		return -ENETDOWN;
820 
821 	/* If on cmd_sync_work then run immediately otherwise queue */
822 	if (current_work() == &hdev->cmd_sync_work)
823 		return func(hdev, data);
824 
825 	return hci_cmd_sync_submit(hdev, func, data, destroy);
826 }
827 EXPORT_SYMBOL(hci_cmd_sync_run);
828 
829 /* Run HCI command entry once:
830  *
831  * - Lookup if an entry already exist and only if it doesn't creates a new entry
832  *   and run it.
833  * - if on cmd_sync_work then run immediately otherwise queue
834  */
hci_cmd_sync_run_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)835 int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
836 			  void *data, hci_cmd_sync_work_destroy_t destroy)
837 {
838 	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
839 		return 0;
840 
841 	return hci_cmd_sync_run(hdev, func, data, destroy);
842 }
843 EXPORT_SYMBOL(hci_cmd_sync_run_once);
844 
845 /* Lookup HCI command entry:
846  *
847  * - Return first entry that matches by function callback or data or
848  *   destroy callback.
849  */
850 struct hci_cmd_sync_work_entry *
hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)851 hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
852 			  void *data, hci_cmd_sync_work_destroy_t destroy)
853 {
854 	struct hci_cmd_sync_work_entry *entry;
855 
856 	mutex_lock(&hdev->cmd_sync_work_lock);
857 	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
858 	mutex_unlock(&hdev->cmd_sync_work_lock);
859 
860 	return entry;
861 }
862 EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
863 
864 /* Cancel HCI command entry */
hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry)865 void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
866 			       struct hci_cmd_sync_work_entry *entry)
867 {
868 	mutex_lock(&hdev->cmd_sync_work_lock);
869 	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
870 	mutex_unlock(&hdev->cmd_sync_work_lock);
871 }
872 EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
873 
874 /* Dequeue one HCI command entry:
875  *
876  * - Lookup and cancel first entry that matches.
877  */
hci_cmd_sync_dequeue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)878 bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
879 			       hci_cmd_sync_work_func_t func,
880 			       void *data, hci_cmd_sync_work_destroy_t destroy)
881 {
882 	struct hci_cmd_sync_work_entry *entry;
883 
884 	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
885 	if (!entry)
886 		return false;
887 
888 	hci_cmd_sync_cancel_entry(hdev, entry);
889 
890 	return true;
891 }
892 EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
893 
894 /* Dequeue HCI command entry:
895  *
896  * - Lookup and cancel any entry that matches by function callback or data or
897  *   destroy callback.
898  */
hci_cmd_sync_dequeue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)899 bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
900 			  void *data, hci_cmd_sync_work_destroy_t destroy)
901 {
902 	struct hci_cmd_sync_work_entry *entry;
903 	bool ret = false;
904 
905 	mutex_lock(&hdev->cmd_sync_work_lock);
906 	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
907 						   destroy))) {
908 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
909 		ret = true;
910 	}
911 	mutex_unlock(&hdev->cmd_sync_work_lock);
912 
913 	return ret;
914 }
915 EXPORT_SYMBOL(hci_cmd_sync_dequeue);
916 
hci_update_eir_sync(struct hci_dev * hdev)917 int hci_update_eir_sync(struct hci_dev *hdev)
918 {
919 	struct hci_cp_write_eir cp;
920 
921 	bt_dev_dbg(hdev, "");
922 
923 	if (!hdev_is_powered(hdev))
924 		return 0;
925 
926 	if (!lmp_ext_inq_capable(hdev))
927 		return 0;
928 
929 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
930 		return 0;
931 
932 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
933 		return 0;
934 
935 	memset(&cp, 0, sizeof(cp));
936 
937 	eir_create(hdev, cp.data);
938 
939 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
940 		return 0;
941 
942 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
943 
944 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
945 				     HCI_CMD_TIMEOUT);
946 }
947 
get_service_classes(struct hci_dev * hdev)948 static u8 get_service_classes(struct hci_dev *hdev)
949 {
950 	struct bt_uuid *uuid;
951 	u8 val = 0;
952 
953 	list_for_each_entry(uuid, &hdev->uuids, list)
954 		val |= uuid->svc_hint;
955 
956 	return val;
957 }
958 
hci_update_class_sync(struct hci_dev * hdev)959 int hci_update_class_sync(struct hci_dev *hdev)
960 {
961 	u8 cod[3];
962 
963 	bt_dev_dbg(hdev, "");
964 
965 	if (!hdev_is_powered(hdev))
966 		return 0;
967 
968 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
969 		return 0;
970 
971 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
972 		return 0;
973 
974 	cod[0] = hdev->minor_class;
975 	cod[1] = hdev->major_class;
976 	cod[2] = get_service_classes(hdev);
977 
978 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
979 		cod[1] |= 0x20;
980 
981 	if (memcmp(cod, hdev->dev_class, 3) == 0)
982 		return 0;
983 
984 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
985 				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
986 }
987 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)988 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
989 {
990 	/* If there is no connection we are OK to advertise. */
991 	if (hci_conn_num(hdev, LE_LINK) == 0)
992 		return true;
993 
994 	/* Check le_states if there is any connection in peripheral role. */
995 	if (hdev->conn_hash.le_num_peripheral > 0) {
996 		/* Peripheral connection state and non connectable mode
997 		 * bit 20.
998 		 */
999 		if (!connectable && !(hdev->le_states[2] & 0x10))
1000 			return false;
1001 
1002 		/* Peripheral connection state and connectable mode bit 38
1003 		 * and scannable bit 21.
1004 		 */
1005 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1006 				    !(hdev->le_states[2] & 0x20)))
1007 			return false;
1008 	}
1009 
1010 	/* Check le_states if there is any connection in central role. */
1011 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1012 		/* Central connection state and non connectable mode bit 18. */
1013 		if (!connectable && !(hdev->le_states[2] & 0x02))
1014 			return false;
1015 
1016 		/* Central connection state and connectable mode bit 35 and
1017 		 * scannable 19.
1018 		 */
1019 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1020 				    !(hdev->le_states[2] & 0x08)))
1021 			return false;
1022 	}
1023 
1024 	return true;
1025 }
1026 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1027 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1028 {
1029 	/* If privacy is not enabled don't use RPA */
1030 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1031 		return false;
1032 
1033 	/* If basic privacy mode is enabled use RPA */
1034 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1035 		return true;
1036 
1037 	/* If limited privacy mode is enabled don't use RPA if we're
1038 	 * both discoverable and bondable.
1039 	 */
1040 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1041 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1042 		return false;
1043 
1044 	/* We're neither bondable nor discoverable in the limited
1045 	 * privacy mode, therefore use RPA.
1046 	 */
1047 	return true;
1048 }
1049 
hci_set_random_addr_sync(struct hci_dev * hdev,bdaddr_t * rpa)1050 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
1051 {
1052 	/* If a random_addr has been set we're advertising or initiating an LE
1053 	 * connection we can't go ahead and change the random address at this
1054 	 * time. This is because the eventual initiator address used for the
1055 	 * subsequently created connection will be undefined (some
1056 	 * controllers use the new address and others the one we had
1057 	 * when the operation started).
1058 	 *
1059 	 * In this kind of scenario skip the update and let the random
1060 	 * address be updated at the next cycle.
1061 	 */
1062 	if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
1063 	    (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1064 	    hci_lookup_le_connect(hdev))) {
1065 		bt_dev_dbg(hdev, "Deferring random address update");
1066 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1067 		return 0;
1068 	}
1069 
1070 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1071 				     6, rpa, HCI_CMD_TIMEOUT);
1072 }
1073 
hci_update_random_address_sync(struct hci_dev * hdev,bool require_privacy,bool rpa,u8 * own_addr_type)1074 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1075 				   bool rpa, u8 *own_addr_type)
1076 {
1077 	int err;
1078 
1079 	/* If privacy is enabled use a resolvable private address. If
1080 	 * current RPA has expired or there is something else than
1081 	 * the current RPA in use, then generate a new one.
1082 	 */
1083 	if (rpa) {
1084 		/* If Controller supports LL Privacy use own address type is
1085 		 * 0x03
1086 		 */
1087 		if (use_ll_privacy(hdev))
1088 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1089 		else
1090 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1091 
1092 		/* Check if RPA is valid */
1093 		if (rpa_valid(hdev))
1094 			return 0;
1095 
1096 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1097 		if (err < 0) {
1098 			bt_dev_err(hdev, "failed to generate new RPA");
1099 			return err;
1100 		}
1101 
1102 		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1103 		if (err)
1104 			return err;
1105 
1106 		return 0;
1107 	}
1108 
1109 	/* In case of required privacy without resolvable private address,
1110 	 * use an non-resolvable private address. This is useful for active
1111 	 * scanning and non-connectable advertising.
1112 	 */
1113 	if (require_privacy) {
1114 		bdaddr_t nrpa;
1115 
1116 		while (true) {
1117 			/* The non-resolvable private address is generated
1118 			 * from random six bytes with the two most significant
1119 			 * bits cleared.
1120 			 */
1121 			get_random_bytes(&nrpa, 6);
1122 			nrpa.b[5] &= 0x3f;
1123 
1124 			/* The non-resolvable private address shall not be
1125 			 * equal to the public address.
1126 			 */
1127 			if (bacmp(&hdev->bdaddr, &nrpa))
1128 				break;
1129 		}
1130 
1131 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1132 
1133 		return hci_set_random_addr_sync(hdev, &nrpa);
1134 	}
1135 
1136 	/* If forcing static address is in use or there is no public
1137 	 * address use the static address as random address (but skip
1138 	 * the HCI command if the current random address is already the
1139 	 * static one.
1140 	 *
1141 	 * In case BR/EDR has been disabled on a dual-mode controller
1142 	 * and a static address has been configured, then use that
1143 	 * address instead of the public BR/EDR address.
1144 	 */
1145 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1146 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1147 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1148 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1149 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1150 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1151 			return hci_set_random_addr_sync(hdev,
1152 							&hdev->static_addr);
1153 		return 0;
1154 	}
1155 
1156 	/* Neither privacy nor static address is being used so use a
1157 	 * public address.
1158 	 */
1159 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1160 
1161 	return 0;
1162 }
1163 
hci_disable_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1164 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1165 {
1166 	struct hci_cp_le_set_ext_adv_enable *cp;
1167 	struct hci_cp_ext_adv_set *set;
1168 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1169 	u8 size;
1170 
1171 	/* If request specifies an instance that doesn't exist, fail */
1172 	if (instance > 0) {
1173 		struct adv_info *adv;
1174 
1175 		adv = hci_find_adv_instance(hdev, instance);
1176 		if (!adv)
1177 			return -EINVAL;
1178 
1179 		/* If not enabled there is nothing to do */
1180 		if (!adv->enabled)
1181 			return 0;
1182 	}
1183 
1184 	memset(data, 0, sizeof(data));
1185 
1186 	cp = (void *)data;
1187 	set = (void *)cp->data;
1188 
1189 	/* Instance 0x00 indicates all advertising instances will be disabled */
1190 	cp->num_of_sets = !!instance;
1191 	cp->enable = 0x00;
1192 
1193 	set->handle = instance;
1194 
1195 	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1196 
1197 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1198 				     size, data, HCI_CMD_TIMEOUT);
1199 }
1200 
hci_set_adv_set_random_addr_sync(struct hci_dev * hdev,u8 instance,bdaddr_t * random_addr)1201 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1202 					    bdaddr_t *random_addr)
1203 {
1204 	struct hci_cp_le_set_adv_set_rand_addr cp;
1205 	int err;
1206 
1207 	if (!instance) {
1208 		/* Instance 0x00 doesn't have an adv_info, instead it uses
1209 		 * hdev->random_addr to track its address so whenever it needs
1210 		 * to be updated this also set the random address since
1211 		 * hdev->random_addr is shared with scan state machine.
1212 		 */
1213 		err = hci_set_random_addr_sync(hdev, random_addr);
1214 		if (err)
1215 			return err;
1216 	}
1217 
1218 	memset(&cp, 0, sizeof(cp));
1219 
1220 	cp.handle = instance;
1221 	bacpy(&cp.bdaddr, random_addr);
1222 
1223 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1224 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1225 }
1226 
hci_setup_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1227 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1228 {
1229 	struct hci_cp_le_set_ext_adv_params cp;
1230 	bool connectable;
1231 	u32 flags;
1232 	bdaddr_t random_addr;
1233 	u8 own_addr_type;
1234 	int err;
1235 	struct adv_info *adv;
1236 	bool secondary_adv;
1237 
1238 	if (instance > 0) {
1239 		adv = hci_find_adv_instance(hdev, instance);
1240 		if (!adv)
1241 			return -EINVAL;
1242 	} else {
1243 		adv = NULL;
1244 	}
1245 
1246 	/* Updating parameters of an active instance will return a
1247 	 * Command Disallowed error, so we must first disable the
1248 	 * instance if it is active.
1249 	 */
1250 	if (adv && !adv->pending) {
1251 		err = hci_disable_ext_adv_instance_sync(hdev, instance);
1252 		if (err)
1253 			return err;
1254 	}
1255 
1256 	flags = hci_adv_instance_flags(hdev, instance);
1257 
1258 	/* If the "connectable" instance flag was not set, then choose between
1259 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1260 	 */
1261 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1262 		      mgmt_get_connectable(hdev);
1263 
1264 	if (!is_advertising_allowed(hdev, connectable))
1265 		return -EPERM;
1266 
1267 	/* Set require_privacy to true only when non-connectable
1268 	 * advertising is used. In that case it is fine to use a
1269 	 * non-resolvable private address.
1270 	 */
1271 	err = hci_get_random_address(hdev, !connectable,
1272 				     adv_use_rpa(hdev, flags), adv,
1273 				     &own_addr_type, &random_addr);
1274 	if (err < 0)
1275 		return err;
1276 
1277 	memset(&cp, 0, sizeof(cp));
1278 
1279 	if (adv) {
1280 		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1281 		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1282 		cp.tx_power = adv->tx_power;
1283 	} else {
1284 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1285 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1286 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1287 	}
1288 
1289 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1290 
1291 	if (connectable) {
1292 		if (secondary_adv)
1293 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1294 		else
1295 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1296 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1297 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1298 		if (secondary_adv)
1299 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1300 		else
1301 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1302 	} else {
1303 		if (secondary_adv)
1304 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1305 		else
1306 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1307 	}
1308 
1309 	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1310 	 * contains the peer’s Identity Address and the Peer_Address_Type
1311 	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1312 	 * These parameters are used to locate the corresponding local IRK in
1313 	 * the resolving list; this IRK is used to generate their own address
1314 	 * used in the advertisement.
1315 	 */
1316 	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1317 		hci_copy_identity_address(hdev, &cp.peer_addr,
1318 					  &cp.peer_addr_type);
1319 
1320 	cp.own_addr_type = own_addr_type;
1321 	cp.channel_map = hdev->le_adv_channel_map;
1322 	cp.handle = instance;
1323 
1324 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1325 		cp.primary_phy = HCI_ADV_PHY_1M;
1326 		cp.secondary_phy = HCI_ADV_PHY_2M;
1327 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1328 		cp.primary_phy = HCI_ADV_PHY_CODED;
1329 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1330 	} else {
1331 		/* In all other cases use 1M */
1332 		cp.primary_phy = HCI_ADV_PHY_1M;
1333 		cp.secondary_phy = HCI_ADV_PHY_1M;
1334 	}
1335 
1336 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1337 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1338 	if (err)
1339 		return err;
1340 
1341 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1342 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1343 	    bacmp(&random_addr, BDADDR_ANY)) {
1344 		/* Check if random address need to be updated */
1345 		if (adv) {
1346 			if (!bacmp(&random_addr, &adv->random_addr))
1347 				return 0;
1348 		} else {
1349 			if (!bacmp(&random_addr, &hdev->random_addr))
1350 				return 0;
1351 		}
1352 
1353 		return hci_set_adv_set_random_addr_sync(hdev, instance,
1354 							&random_addr);
1355 	}
1356 
1357 	return 0;
1358 }
1359 
hci_set_ext_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1360 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1361 {
1362 	struct {
1363 		struct hci_cp_le_set_ext_scan_rsp_data cp;
1364 		u8 data[HCI_MAX_EXT_AD_LENGTH];
1365 	} pdu;
1366 	u8 len;
1367 	struct adv_info *adv = NULL;
1368 	int err;
1369 
1370 	memset(&pdu, 0, sizeof(pdu));
1371 
1372 	if (instance) {
1373 		adv = hci_find_adv_instance(hdev, instance);
1374 		if (!adv || !adv->scan_rsp_changed)
1375 			return 0;
1376 	}
1377 
1378 	len = eir_create_scan_rsp(hdev, instance, pdu.data);
1379 
1380 	pdu.cp.handle = instance;
1381 	pdu.cp.length = len;
1382 	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1383 	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1384 
1385 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1386 				    sizeof(pdu.cp) + len, &pdu.cp,
1387 				    HCI_CMD_TIMEOUT);
1388 	if (err)
1389 		return err;
1390 
1391 	if (adv) {
1392 		adv->scan_rsp_changed = false;
1393 	} else {
1394 		memcpy(hdev->scan_rsp_data, pdu.data, len);
1395 		hdev->scan_rsp_data_len = len;
1396 	}
1397 
1398 	return 0;
1399 }
1400 
__hci_set_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1401 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1402 {
1403 	struct hci_cp_le_set_scan_rsp_data cp;
1404 	u8 len;
1405 
1406 	memset(&cp, 0, sizeof(cp));
1407 
1408 	len = eir_create_scan_rsp(hdev, instance, cp.data);
1409 
1410 	if (hdev->scan_rsp_data_len == len &&
1411 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1412 		return 0;
1413 
1414 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1415 	hdev->scan_rsp_data_len = len;
1416 
1417 	cp.length = len;
1418 
1419 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1420 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1421 }
1422 
hci_update_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1423 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1424 {
1425 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1426 		return 0;
1427 
1428 	if (ext_adv_capable(hdev))
1429 		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1430 
1431 	return __hci_set_scan_rsp_data_sync(hdev, instance);
1432 }
1433 
hci_enable_ext_advertising_sync(struct hci_dev * hdev,u8 instance)1434 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1435 {
1436 	struct hci_cp_le_set_ext_adv_enable *cp;
1437 	struct hci_cp_ext_adv_set *set;
1438 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1439 	struct adv_info *adv;
1440 
1441 	if (instance > 0) {
1442 		adv = hci_find_adv_instance(hdev, instance);
1443 		if (!adv)
1444 			return -EINVAL;
1445 		/* If already enabled there is nothing to do */
1446 		if (adv->enabled)
1447 			return 0;
1448 	} else {
1449 		adv = NULL;
1450 	}
1451 
1452 	cp = (void *)data;
1453 	set = (void *)cp->data;
1454 
1455 	memset(cp, 0, sizeof(*cp));
1456 
1457 	cp->enable = 0x01;
1458 	cp->num_of_sets = 0x01;
1459 
1460 	memset(set, 0, sizeof(*set));
1461 
1462 	set->handle = instance;
1463 
1464 	/* Set duration per instance since controller is responsible for
1465 	 * scheduling it.
1466 	 */
1467 	if (adv && adv->timeout) {
1468 		u16 duration = adv->timeout * MSEC_PER_SEC;
1469 
1470 		/* Time = N * 10 ms */
1471 		set->duration = cpu_to_le16(duration / 10);
1472 	}
1473 
1474 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1475 				     sizeof(*cp) +
1476 				     sizeof(*set) * cp->num_of_sets,
1477 				     data, HCI_CMD_TIMEOUT);
1478 }
1479 
hci_start_ext_adv_sync(struct hci_dev * hdev,u8 instance)1480 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1481 {
1482 	int err;
1483 
1484 	err = hci_setup_ext_adv_instance_sync(hdev, instance);
1485 	if (err)
1486 		return err;
1487 
1488 	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1489 	if (err)
1490 		return err;
1491 
1492 	return hci_enable_ext_advertising_sync(hdev, instance);
1493 }
1494 
hci_disable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1495 int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1496 {
1497 	struct hci_cp_le_set_per_adv_enable cp;
1498 	struct adv_info *adv = NULL;
1499 
1500 	/* If periodic advertising already disabled there is nothing to do. */
1501 	adv = hci_find_adv_instance(hdev, instance);
1502 	if (!adv || !adv->periodic || !adv->enabled)
1503 		return 0;
1504 
1505 	memset(&cp, 0, sizeof(cp));
1506 
1507 	cp.enable = 0x00;
1508 	cp.handle = instance;
1509 
1510 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1511 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1512 }
1513 
hci_set_per_adv_params_sync(struct hci_dev * hdev,u8 instance,u16 min_interval,u16 max_interval)1514 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1515 				       u16 min_interval, u16 max_interval)
1516 {
1517 	struct hci_cp_le_set_per_adv_params cp;
1518 
1519 	memset(&cp, 0, sizeof(cp));
1520 
1521 	if (!min_interval)
1522 		min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1523 
1524 	if (!max_interval)
1525 		max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1526 
1527 	cp.handle = instance;
1528 	cp.min_interval = cpu_to_le16(min_interval);
1529 	cp.max_interval = cpu_to_le16(max_interval);
1530 	cp.periodic_properties = 0x0000;
1531 
1532 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1533 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1534 }
1535 
hci_set_per_adv_data_sync(struct hci_dev * hdev,u8 instance)1536 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1537 {
1538 	struct {
1539 		struct hci_cp_le_set_per_adv_data cp;
1540 		u8 data[HCI_MAX_PER_AD_LENGTH];
1541 	} pdu;
1542 	u8 len;
1543 
1544 	memset(&pdu, 0, sizeof(pdu));
1545 
1546 	if (instance) {
1547 		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
1548 
1549 		if (!adv || !adv->periodic)
1550 			return 0;
1551 	}
1552 
1553 	len = eir_create_per_adv_data(hdev, instance, pdu.data);
1554 
1555 	pdu.cp.length = len;
1556 	pdu.cp.handle = instance;
1557 	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1558 
1559 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1560 				     sizeof(pdu.cp) + len, &pdu,
1561 				     HCI_CMD_TIMEOUT);
1562 }
1563 
hci_enable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1564 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1565 {
1566 	struct hci_cp_le_set_per_adv_enable cp;
1567 	struct adv_info *adv = NULL;
1568 
1569 	/* If periodic advertising already enabled there is nothing to do. */
1570 	adv = hci_find_adv_instance(hdev, instance);
1571 	if (adv && adv->periodic && adv->enabled)
1572 		return 0;
1573 
1574 	memset(&cp, 0, sizeof(cp));
1575 
1576 	cp.enable = 0x01;
1577 	cp.handle = instance;
1578 
1579 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1580 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1581 }
1582 
1583 /* Checks if periodic advertising data contains a Basic Announcement and if it
1584  * does generates a Broadcast ID and add Broadcast Announcement.
1585  */
hci_adv_bcast_annoucement(struct hci_dev * hdev,struct adv_info * adv)1586 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1587 {
1588 	u8 bid[3];
1589 	u8 ad[HCI_MAX_EXT_AD_LENGTH];
1590 	u8 len;
1591 
1592 	/* Skip if NULL adv as instance 0x00 is used for general purpose
1593 	 * advertising so it cannot used for the likes of Broadcast Announcement
1594 	 * as it can be overwritten at any point.
1595 	 */
1596 	if (!adv)
1597 		return 0;
1598 
1599 	/* Check if PA data doesn't contains a Basic Audio Announcement then
1600 	 * there is nothing to do.
1601 	 */
1602 	if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1603 				  0x1851, NULL))
1604 		return 0;
1605 
1606 	/* Check if advertising data already has a Broadcast Announcement since
1607 	 * the process may want to control the Broadcast ID directly and in that
1608 	 * case the kernel shall no interfere.
1609 	 */
1610 	if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1611 				 NULL))
1612 		return 0;
1613 
1614 	/* Generate Broadcast ID */
1615 	get_random_bytes(bid, sizeof(bid));
1616 	len = eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1617 	memcpy(ad + len, adv->adv_data, adv->adv_data_len);
1618 	hci_set_adv_instance_data(hdev, adv->instance, len + adv->adv_data_len,
1619 				  ad, 0, NULL);
1620 
1621 	return hci_update_adv_data_sync(hdev, adv->instance);
1622 }
1623 
hci_start_per_adv_sync(struct hci_dev * hdev,u8 instance,u8 data_len,u8 * data,u32 flags,u16 min_interval,u16 max_interval,u16 sync_interval)1624 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1625 			   u8 *data, u32 flags, u16 min_interval,
1626 			   u16 max_interval, u16 sync_interval)
1627 {
1628 	struct adv_info *adv = NULL;
1629 	int err;
1630 	bool added = false;
1631 
1632 	hci_disable_per_advertising_sync(hdev, instance);
1633 
1634 	if (instance) {
1635 		adv = hci_find_adv_instance(hdev, instance);
1636 		if (adv) {
1637 			/* Turn it into periodic advertising */
1638 			adv->periodic = true;
1639 			adv->per_adv_data_len = data_len;
1640 			if (data)
1641 				memcpy(adv->per_adv_data, data, data_len);
1642 			adv->flags = flags;
1643 		} else if (!adv) {
1644 			/* Create an instance if that could not be found */
1645 			adv = hci_add_per_instance(hdev, instance, flags,
1646 						   data_len, data,
1647 						   sync_interval,
1648 						   sync_interval);
1649 			if (IS_ERR(adv))
1650 				return PTR_ERR(adv);
1651 			adv->pending = false;
1652 			added = true;
1653 		}
1654 	}
1655 
1656 	/* Start advertising */
1657 	err = hci_start_ext_adv_sync(hdev, instance);
1658 	if (err < 0)
1659 		goto fail;
1660 
1661 	err = hci_adv_bcast_annoucement(hdev, adv);
1662 	if (err < 0)
1663 		goto fail;
1664 
1665 	err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1666 					  max_interval);
1667 	if (err < 0)
1668 		goto fail;
1669 
1670 	err = hci_set_per_adv_data_sync(hdev, instance);
1671 	if (err < 0)
1672 		goto fail;
1673 
1674 	err = hci_enable_per_advertising_sync(hdev, instance);
1675 	if (err < 0)
1676 		goto fail;
1677 
1678 	return 0;
1679 
1680 fail:
1681 	if (added)
1682 		hci_remove_adv_instance(hdev, instance);
1683 
1684 	return err;
1685 }
1686 
hci_start_adv_sync(struct hci_dev * hdev,u8 instance)1687 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1688 {
1689 	int err;
1690 
1691 	if (ext_adv_capable(hdev))
1692 		return hci_start_ext_adv_sync(hdev, instance);
1693 
1694 	err = hci_update_adv_data_sync(hdev, instance);
1695 	if (err)
1696 		return err;
1697 
1698 	err = hci_update_scan_rsp_data_sync(hdev, instance);
1699 	if (err)
1700 		return err;
1701 
1702 	return hci_enable_advertising_sync(hdev);
1703 }
1704 
hci_enable_advertising_sync(struct hci_dev * hdev)1705 int hci_enable_advertising_sync(struct hci_dev *hdev)
1706 {
1707 	struct adv_info *adv_instance;
1708 	struct hci_cp_le_set_adv_param cp;
1709 	u8 own_addr_type, enable = 0x01;
1710 	bool connectable;
1711 	u16 adv_min_interval, adv_max_interval;
1712 	u32 flags;
1713 	u8 status;
1714 
1715 	if (ext_adv_capable(hdev))
1716 		return hci_enable_ext_advertising_sync(hdev,
1717 						       hdev->cur_adv_instance);
1718 
1719 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1720 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1721 
1722 	/* If the "connectable" instance flag was not set, then choose between
1723 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1724 	 */
1725 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1726 		      mgmt_get_connectable(hdev);
1727 
1728 	if (!is_advertising_allowed(hdev, connectable))
1729 		return -EINVAL;
1730 
1731 	status = hci_disable_advertising_sync(hdev);
1732 	if (status)
1733 		return status;
1734 
1735 	/* Clear the HCI_LE_ADV bit temporarily so that the
1736 	 * hci_update_random_address knows that it's safe to go ahead
1737 	 * and write a new random address. The flag will be set back on
1738 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1739 	 */
1740 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1741 
1742 	/* Set require_privacy to true only when non-connectable
1743 	 * advertising is used. In that case it is fine to use a
1744 	 * non-resolvable private address.
1745 	 */
1746 	status = hci_update_random_address_sync(hdev, !connectable,
1747 						adv_use_rpa(hdev, flags),
1748 						&own_addr_type);
1749 	if (status)
1750 		return status;
1751 
1752 	memset(&cp, 0, sizeof(cp));
1753 
1754 	if (adv_instance) {
1755 		adv_min_interval = adv_instance->min_interval;
1756 		adv_max_interval = adv_instance->max_interval;
1757 	} else {
1758 		adv_min_interval = hdev->le_adv_min_interval;
1759 		adv_max_interval = hdev->le_adv_max_interval;
1760 	}
1761 
1762 	if (connectable) {
1763 		cp.type = LE_ADV_IND;
1764 	} else {
1765 		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1766 			cp.type = LE_ADV_SCAN_IND;
1767 		else
1768 			cp.type = LE_ADV_NONCONN_IND;
1769 
1770 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1771 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1772 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1773 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1774 		}
1775 	}
1776 
1777 	cp.min_interval = cpu_to_le16(adv_min_interval);
1778 	cp.max_interval = cpu_to_le16(adv_max_interval);
1779 	cp.own_address_type = own_addr_type;
1780 	cp.channel_map = hdev->le_adv_channel_map;
1781 
1782 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1783 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1784 	if (status)
1785 		return status;
1786 
1787 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1788 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1789 }
1790 
enable_advertising_sync(struct hci_dev * hdev,void * data)1791 static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1792 {
1793 	return hci_enable_advertising_sync(hdev);
1794 }
1795 
hci_enable_advertising(struct hci_dev * hdev)1796 int hci_enable_advertising(struct hci_dev *hdev)
1797 {
1798 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1799 	    list_empty(&hdev->adv_instances))
1800 		return 0;
1801 
1802 	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1803 }
1804 
hci_remove_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1805 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1806 				     struct sock *sk)
1807 {
1808 	int err;
1809 
1810 	if (!ext_adv_capable(hdev))
1811 		return 0;
1812 
1813 	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1814 	if (err)
1815 		return err;
1816 
1817 	/* If request specifies an instance that doesn't exist, fail */
1818 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1819 		return -EINVAL;
1820 
1821 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1822 					sizeof(instance), &instance, 0,
1823 					HCI_CMD_TIMEOUT, sk);
1824 }
1825 
remove_ext_adv_sync(struct hci_dev * hdev,void * data)1826 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
1827 {
1828 	struct adv_info *adv = data;
1829 	u8 instance = 0;
1830 
1831 	if (adv)
1832 		instance = adv->instance;
1833 
1834 	return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
1835 }
1836 
hci_remove_ext_adv_instance(struct hci_dev * hdev,u8 instance)1837 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
1838 {
1839 	struct adv_info *adv = NULL;
1840 
1841 	if (instance) {
1842 		adv = hci_find_adv_instance(hdev, instance);
1843 		if (!adv)
1844 			return -EINVAL;
1845 	}
1846 
1847 	return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
1848 }
1849 
hci_le_terminate_big_sync(struct hci_dev * hdev,u8 handle,u8 reason)1850 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1851 {
1852 	struct hci_cp_le_term_big cp;
1853 
1854 	memset(&cp, 0, sizeof(cp));
1855 	cp.handle = handle;
1856 	cp.reason = reason;
1857 
1858 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1859 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1860 }
1861 
hci_set_ext_adv_data_sync(struct hci_dev * hdev,u8 instance)1862 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1863 {
1864 	struct {
1865 		struct hci_cp_le_set_ext_adv_data cp;
1866 		u8 data[HCI_MAX_EXT_AD_LENGTH];
1867 	} pdu;
1868 	u8 len;
1869 	struct adv_info *adv = NULL;
1870 	int err;
1871 
1872 	memset(&pdu, 0, sizeof(pdu));
1873 
1874 	if (instance) {
1875 		adv = hci_find_adv_instance(hdev, instance);
1876 		if (!adv || !adv->adv_data_changed)
1877 			return 0;
1878 	}
1879 
1880 	len = eir_create_adv_data(hdev, instance, pdu.data);
1881 
1882 	pdu.cp.length = len;
1883 	pdu.cp.handle = instance;
1884 	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1885 	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1886 
1887 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1888 				    sizeof(pdu.cp) + len, &pdu.cp,
1889 				    HCI_CMD_TIMEOUT);
1890 	if (err)
1891 		return err;
1892 
1893 	/* Update data if the command succeed */
1894 	if (adv) {
1895 		adv->adv_data_changed = false;
1896 	} else {
1897 		memcpy(hdev->adv_data, pdu.data, len);
1898 		hdev->adv_data_len = len;
1899 	}
1900 
1901 	return 0;
1902 }
1903 
hci_set_adv_data_sync(struct hci_dev * hdev,u8 instance)1904 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1905 {
1906 	struct hci_cp_le_set_adv_data cp;
1907 	u8 len;
1908 
1909 	memset(&cp, 0, sizeof(cp));
1910 
1911 	len = eir_create_adv_data(hdev, instance, cp.data);
1912 
1913 	/* There's nothing to do if the data hasn't changed */
1914 	if (hdev->adv_data_len == len &&
1915 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1916 		return 0;
1917 
1918 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1919 	hdev->adv_data_len = len;
1920 
1921 	cp.length = len;
1922 
1923 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1924 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1925 }
1926 
hci_update_adv_data_sync(struct hci_dev * hdev,u8 instance)1927 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1928 {
1929 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1930 		return 0;
1931 
1932 	if (ext_adv_capable(hdev))
1933 		return hci_set_ext_adv_data_sync(hdev, instance);
1934 
1935 	return hci_set_adv_data_sync(hdev, instance);
1936 }
1937 
hci_schedule_adv_instance_sync(struct hci_dev * hdev,u8 instance,bool force)1938 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1939 				   bool force)
1940 {
1941 	struct adv_info *adv = NULL;
1942 	u16 timeout;
1943 
1944 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1945 		return -EPERM;
1946 
1947 	if (hdev->adv_instance_timeout)
1948 		return -EBUSY;
1949 
1950 	adv = hci_find_adv_instance(hdev, instance);
1951 	if (!adv)
1952 		return -ENOENT;
1953 
1954 	/* A zero timeout means unlimited advertising. As long as there is
1955 	 * only one instance, duration should be ignored. We still set a timeout
1956 	 * in case further instances are being added later on.
1957 	 *
1958 	 * If the remaining lifetime of the instance is more than the duration
1959 	 * then the timeout corresponds to the duration, otherwise it will be
1960 	 * reduced to the remaining instance lifetime.
1961 	 */
1962 	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1963 		timeout = adv->duration;
1964 	else
1965 		timeout = adv->remaining_time;
1966 
1967 	/* The remaining time is being reduced unless the instance is being
1968 	 * advertised without time limit.
1969 	 */
1970 	if (adv->timeout)
1971 		adv->remaining_time = adv->remaining_time - timeout;
1972 
1973 	/* Only use work for scheduling instances with legacy advertising */
1974 	if (!ext_adv_capable(hdev)) {
1975 		hdev->adv_instance_timeout = timeout;
1976 		queue_delayed_work(hdev->req_workqueue,
1977 				   &hdev->adv_instance_expire,
1978 				   msecs_to_jiffies(timeout * 1000));
1979 	}
1980 
1981 	/* If we're just re-scheduling the same instance again then do not
1982 	 * execute any HCI commands. This happens when a single instance is
1983 	 * being advertised.
1984 	 */
1985 	if (!force && hdev->cur_adv_instance == instance &&
1986 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1987 		return 0;
1988 
1989 	hdev->cur_adv_instance = instance;
1990 
1991 	return hci_start_adv_sync(hdev, instance);
1992 }
1993 
hci_clear_adv_sets_sync(struct hci_dev * hdev,struct sock * sk)1994 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1995 {
1996 	int err;
1997 
1998 	if (!ext_adv_capable(hdev))
1999 		return 0;
2000 
2001 	/* Disable instance 0x00 to disable all instances */
2002 	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2003 	if (err)
2004 		return err;
2005 
2006 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
2007 					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2008 }
2009 
hci_clear_adv_sync(struct hci_dev * hdev,struct sock * sk,bool force)2010 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
2011 {
2012 	struct adv_info *adv, *n;
2013 	int err = 0;
2014 
2015 	if (ext_adv_capable(hdev))
2016 		/* Remove all existing sets */
2017 		err = hci_clear_adv_sets_sync(hdev, sk);
2018 	if (ext_adv_capable(hdev))
2019 		return err;
2020 
2021 	/* This is safe as long as there is no command send while the lock is
2022 	 * held.
2023 	 */
2024 	hci_dev_lock(hdev);
2025 
2026 	/* Cleanup non-ext instances */
2027 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
2028 		u8 instance = adv->instance;
2029 		int err;
2030 
2031 		if (!(force || adv->timeout))
2032 			continue;
2033 
2034 		err = hci_remove_adv_instance(hdev, instance);
2035 		if (!err)
2036 			mgmt_advertising_removed(sk, hdev, instance);
2037 	}
2038 
2039 	hci_dev_unlock(hdev);
2040 
2041 	return 0;
2042 }
2043 
hci_remove_adv_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)2044 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
2045 			       struct sock *sk)
2046 {
2047 	int err = 0;
2048 
2049 	/* If we use extended advertising, instance has to be removed first. */
2050 	if (ext_adv_capable(hdev))
2051 		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
2052 	if (ext_adv_capable(hdev))
2053 		return err;
2054 
2055 	/* This is safe as long as there is no command send while the lock is
2056 	 * held.
2057 	 */
2058 	hci_dev_lock(hdev);
2059 
2060 	err = hci_remove_adv_instance(hdev, instance);
2061 	if (!err)
2062 		mgmt_advertising_removed(sk, hdev, instance);
2063 
2064 	hci_dev_unlock(hdev);
2065 
2066 	return err;
2067 }
2068 
2069 /* For a single instance:
2070  * - force == true: The instance will be removed even when its remaining
2071  *   lifetime is not zero.
2072  * - force == false: the instance will be deactivated but kept stored unless
2073  *   the remaining lifetime is zero.
2074  *
2075  * For instance == 0x00:
2076  * - force == true: All instances will be removed regardless of their timeout
2077  *   setting.
2078  * - force == false: Only instances that have a timeout will be removed.
2079  */
hci_remove_advertising_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)2080 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
2081 				u8 instance, bool force)
2082 {
2083 	struct adv_info *next = NULL;
2084 	int err;
2085 
2086 	/* Cancel any timeout concerning the removed instance(s). */
2087 	if (!instance || hdev->cur_adv_instance == instance)
2088 		cancel_adv_timeout(hdev);
2089 
2090 	/* Get the next instance to advertise BEFORE we remove
2091 	 * the current one. This can be the same instance again
2092 	 * if there is only one instance.
2093 	 */
2094 	if (hdev->cur_adv_instance == instance)
2095 		next = hci_get_next_instance(hdev, instance);
2096 
2097 	if (!instance) {
2098 		err = hci_clear_adv_sync(hdev, sk, force);
2099 		if (err)
2100 			return err;
2101 	} else {
2102 		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2103 
2104 		if (force || (adv && adv->timeout && !adv->remaining_time)) {
2105 			/* Don't advertise a removed instance. */
2106 			if (next && next->instance == instance)
2107 				next = NULL;
2108 
2109 			err = hci_remove_adv_sync(hdev, instance, sk);
2110 			if (err)
2111 				return err;
2112 		}
2113 	}
2114 
2115 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2116 		return 0;
2117 
2118 	if (next && !ext_adv_capable(hdev))
2119 		hci_schedule_adv_instance_sync(hdev, next->instance, false);
2120 
2121 	return 0;
2122 }
2123 
hci_read_rssi_sync(struct hci_dev * hdev,__le16 handle)2124 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2125 {
2126 	struct hci_cp_read_rssi cp;
2127 
2128 	cp.handle = handle;
2129 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2130 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2131 }
2132 
hci_read_clock_sync(struct hci_dev * hdev,struct hci_cp_read_clock * cp)2133 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2134 {
2135 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2136 					sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2137 }
2138 
hci_read_tx_power_sync(struct hci_dev * hdev,__le16 handle,u8 type)2139 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2140 {
2141 	struct hci_cp_read_tx_power cp;
2142 
2143 	cp.handle = handle;
2144 	cp.type = type;
2145 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2146 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2147 }
2148 
hci_disable_advertising_sync(struct hci_dev * hdev)2149 int hci_disable_advertising_sync(struct hci_dev *hdev)
2150 {
2151 	u8 enable = 0x00;
2152 	int err = 0;
2153 
2154 	/* If controller is not advertising we are done. */
2155 	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2156 		return 0;
2157 
2158 	if (ext_adv_capable(hdev))
2159 		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2160 	if (ext_adv_capable(hdev))
2161 		return err;
2162 
2163 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2164 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2165 }
2166 
hci_le_set_ext_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2167 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2168 					   u8 filter_dup)
2169 {
2170 	struct hci_cp_le_set_ext_scan_enable cp;
2171 
2172 	memset(&cp, 0, sizeof(cp));
2173 	cp.enable = val;
2174 
2175 	if (hci_dev_test_flag(hdev, HCI_MESH))
2176 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2177 	else
2178 		cp.filter_dup = filter_dup;
2179 
2180 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2181 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2182 }
2183 
hci_le_set_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2184 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2185 				       u8 filter_dup)
2186 {
2187 	struct hci_cp_le_set_scan_enable cp;
2188 
2189 	if (use_ext_scan(hdev))
2190 		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2191 
2192 	memset(&cp, 0, sizeof(cp));
2193 	cp.enable = val;
2194 
2195 	if (val && hci_dev_test_flag(hdev, HCI_MESH))
2196 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2197 	else
2198 		cp.filter_dup = filter_dup;
2199 
2200 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2201 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2202 }
2203 
hci_le_set_addr_resolution_enable_sync(struct hci_dev * hdev,u8 val)2204 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2205 {
2206 	if (!use_ll_privacy(hdev))
2207 		return 0;
2208 
2209 	/* If controller is not/already resolving we are done. */
2210 	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2211 		return 0;
2212 
2213 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2214 				     sizeof(val), &val, HCI_CMD_TIMEOUT);
2215 }
2216 
hci_scan_disable_sync(struct hci_dev * hdev)2217 static int hci_scan_disable_sync(struct hci_dev *hdev)
2218 {
2219 	int err;
2220 
2221 	/* If controller is not scanning we are done. */
2222 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2223 		return 0;
2224 
2225 	if (hdev->scanning_paused) {
2226 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2227 		return 0;
2228 	}
2229 
2230 	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2231 	if (err) {
2232 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2233 		return err;
2234 	}
2235 
2236 	return err;
2237 }
2238 
scan_use_rpa(struct hci_dev * hdev)2239 static bool scan_use_rpa(struct hci_dev *hdev)
2240 {
2241 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
2242 }
2243 
hci_start_interleave_scan(struct hci_dev * hdev)2244 static void hci_start_interleave_scan(struct hci_dev *hdev)
2245 {
2246 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2247 	queue_delayed_work(hdev->req_workqueue,
2248 			   &hdev->interleave_scan, 0);
2249 }
2250 
is_interleave_scanning(struct hci_dev * hdev)2251 static bool is_interleave_scanning(struct hci_dev *hdev)
2252 {
2253 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
2254 }
2255 
cancel_interleave_scan(struct hci_dev * hdev)2256 static void cancel_interleave_scan(struct hci_dev *hdev)
2257 {
2258 	bt_dev_dbg(hdev, "cancelling interleave scan");
2259 
2260 	cancel_delayed_work_sync(&hdev->interleave_scan);
2261 
2262 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2263 }
2264 
2265 /* Return true if interleave_scan wasn't started until exiting this function,
2266  * otherwise, return false
2267  */
hci_update_interleaved_scan_sync(struct hci_dev * hdev)2268 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2269 {
2270 	/* Do interleaved scan only if all of the following are true:
2271 	 * - There is at least one ADV monitor
2272 	 * - At least one pending LE connection or one device to be scanned for
2273 	 * - Monitor offloading is not supported
2274 	 * If so, we should alternate between allowlist scan and one without
2275 	 * any filters to save power.
2276 	 */
2277 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2278 				!(list_empty(&hdev->pend_le_conns) &&
2279 				  list_empty(&hdev->pend_le_reports)) &&
2280 				hci_get_adv_monitor_offload_ext(hdev) ==
2281 				    HCI_ADV_MONITOR_EXT_NONE;
2282 	bool is_interleaving = is_interleave_scanning(hdev);
2283 
2284 	if (use_interleaving && !is_interleaving) {
2285 		hci_start_interleave_scan(hdev);
2286 		bt_dev_dbg(hdev, "starting interleave scan");
2287 		return true;
2288 	}
2289 
2290 	if (!use_interleaving && is_interleaving)
2291 		cancel_interleave_scan(hdev);
2292 
2293 	return false;
2294 }
2295 
2296 /* Removes connection to resolve list if needed.*/
hci_le_del_resolve_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2297 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2298 					bdaddr_t *bdaddr, u8 bdaddr_type)
2299 {
2300 	struct hci_cp_le_del_from_resolv_list cp;
2301 	struct bdaddr_list_with_irk *entry;
2302 
2303 	if (!use_ll_privacy(hdev))
2304 		return 0;
2305 
2306 	/* Check if the IRK has been programmed */
2307 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2308 						bdaddr_type);
2309 	if (!entry)
2310 		return 0;
2311 
2312 	cp.bdaddr_type = bdaddr_type;
2313 	bacpy(&cp.bdaddr, bdaddr);
2314 
2315 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2316 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2317 }
2318 
hci_le_del_accept_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2319 static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2320 				       bdaddr_t *bdaddr, u8 bdaddr_type)
2321 {
2322 	struct hci_cp_le_del_from_accept_list cp;
2323 	int err;
2324 
2325 	/* Check if device is on accept list before removing it */
2326 	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2327 		return 0;
2328 
2329 	cp.bdaddr_type = bdaddr_type;
2330 	bacpy(&cp.bdaddr, bdaddr);
2331 
2332 	/* Ignore errors when removing from resolving list as that is likely
2333 	 * that the device was never added.
2334 	 */
2335 	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2336 
2337 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2338 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2339 	if (err) {
2340 		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2341 		return err;
2342 	}
2343 
2344 	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2345 		   cp.bdaddr_type);
2346 
2347 	return 0;
2348 }
2349 
2350 struct conn_params {
2351 	bdaddr_t addr;
2352 	u8 addr_type;
2353 	hci_conn_flags_t flags;
2354 	u8 privacy_mode;
2355 };
2356 
2357 /* Adds connection to resolve list if needed.
2358  * Setting params to NULL programs local hdev->irk
2359  */
hci_le_add_resolve_list_sync(struct hci_dev * hdev,struct conn_params * params)2360 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2361 					struct conn_params *params)
2362 {
2363 	struct hci_cp_le_add_to_resolv_list cp;
2364 	struct smp_irk *irk;
2365 	struct bdaddr_list_with_irk *entry;
2366 	struct hci_conn_params *p;
2367 
2368 	if (!use_ll_privacy(hdev))
2369 		return 0;
2370 
2371 	/* Attempt to program local identity address, type and irk if params is
2372 	 * NULL.
2373 	 */
2374 	if (!params) {
2375 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2376 			return 0;
2377 
2378 		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2379 		memcpy(cp.peer_irk, hdev->irk, 16);
2380 		goto done;
2381 	}
2382 
2383 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2384 	if (!irk)
2385 		return 0;
2386 
2387 	/* Check if the IK has _not_ been programmed yet. */
2388 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2389 						&params->addr,
2390 						params->addr_type);
2391 	if (entry)
2392 		return 0;
2393 
2394 	cp.bdaddr_type = params->addr_type;
2395 	bacpy(&cp.bdaddr, &params->addr);
2396 	memcpy(cp.peer_irk, irk->val, 16);
2397 
2398 	/* Default privacy mode is always Network */
2399 	params->privacy_mode = HCI_NETWORK_PRIVACY;
2400 
2401 	rcu_read_lock();
2402 	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2403 				      &params->addr, params->addr_type);
2404 	if (!p)
2405 		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2406 					      &params->addr, params->addr_type);
2407 	if (p)
2408 		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2409 	rcu_read_unlock();
2410 
2411 done:
2412 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2413 		memcpy(cp.local_irk, hdev->irk, 16);
2414 	else
2415 		memset(cp.local_irk, 0, 16);
2416 
2417 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2418 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2419 }
2420 
2421 /* Set Device Privacy Mode. */
hci_le_set_privacy_mode_sync(struct hci_dev * hdev,struct conn_params * params)2422 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2423 					struct conn_params *params)
2424 {
2425 	struct hci_cp_le_set_privacy_mode cp;
2426 	struct smp_irk *irk;
2427 
2428 	/* If device privacy mode has already been set there is nothing to do */
2429 	if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2430 		return 0;
2431 
2432 	/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2433 	 * indicates that LL Privacy has been enabled and
2434 	 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2435 	 */
2436 	if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2437 		return 0;
2438 
2439 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2440 	if (!irk)
2441 		return 0;
2442 
2443 	memset(&cp, 0, sizeof(cp));
2444 	cp.bdaddr_type = irk->addr_type;
2445 	bacpy(&cp.bdaddr, &irk->bdaddr);
2446 	cp.mode = HCI_DEVICE_PRIVACY;
2447 
2448 	/* Note: params->privacy_mode is not updated since it is a copy */
2449 
2450 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2451 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2452 }
2453 
2454 /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2455  * this attempts to program the device in the resolving list as well and
2456  * properly set the privacy mode.
2457  */
hci_le_add_accept_list_sync(struct hci_dev * hdev,struct conn_params * params,u8 * num_entries)2458 static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2459 				       struct conn_params *params,
2460 				       u8 *num_entries)
2461 {
2462 	struct hci_cp_le_add_to_accept_list cp;
2463 	int err;
2464 
2465 	/* During suspend, only wakeable devices can be in acceptlist */
2466 	if (hdev->suspended &&
2467 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2468 		hci_le_del_accept_list_sync(hdev, &params->addr,
2469 					    params->addr_type);
2470 		return 0;
2471 	}
2472 
2473 	/* Select filter policy to accept all advertising */
2474 	if (*num_entries >= hdev->le_accept_list_size)
2475 		return -ENOSPC;
2476 
2477 	/* Accept list can not be used with RPAs */
2478 	if (!use_ll_privacy(hdev) &&
2479 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
2480 		return -EINVAL;
2481 
2482 	/* Attempt to program the device in the resolving list first to avoid
2483 	 * having to rollback in case it fails since the resolving list is
2484 	 * dynamic it can probably be smaller than the accept list.
2485 	 */
2486 	err = hci_le_add_resolve_list_sync(hdev, params);
2487 	if (err) {
2488 		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2489 		return err;
2490 	}
2491 
2492 	/* Set Privacy Mode */
2493 	err = hci_le_set_privacy_mode_sync(hdev, params);
2494 	if (err) {
2495 		bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2496 		return err;
2497 	}
2498 
2499 	/* Check if already in accept list */
2500 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2501 				   params->addr_type))
2502 		return 0;
2503 
2504 	*num_entries += 1;
2505 	cp.bdaddr_type = params->addr_type;
2506 	bacpy(&cp.bdaddr, &params->addr);
2507 
2508 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2509 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2510 	if (err) {
2511 		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2512 		/* Rollback the device from the resolving list */
2513 		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2514 		return err;
2515 	}
2516 
2517 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2518 		   cp.bdaddr_type);
2519 
2520 	return 0;
2521 }
2522 
2523 /* This function disables/pause all advertising instances */
hci_pause_advertising_sync(struct hci_dev * hdev)2524 static int hci_pause_advertising_sync(struct hci_dev *hdev)
2525 {
2526 	int err;
2527 	int old_state;
2528 
2529 	/* If already been paused there is nothing to do. */
2530 	if (hdev->advertising_paused)
2531 		return 0;
2532 
2533 	bt_dev_dbg(hdev, "Pausing directed advertising");
2534 
2535 	/* Stop directed advertising */
2536 	old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2537 	if (old_state) {
2538 		/* When discoverable timeout triggers, then just make sure
2539 		 * the limited discoverable flag is cleared. Even in the case
2540 		 * of a timeout triggered from general discoverable, it is
2541 		 * safe to unconditionally clear the flag.
2542 		 */
2543 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2544 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2545 		hdev->discov_timeout = 0;
2546 	}
2547 
2548 	bt_dev_dbg(hdev, "Pausing advertising instances");
2549 
2550 	/* Call to disable any advertisements active on the controller.
2551 	 * This will succeed even if no advertisements are configured.
2552 	 */
2553 	err = hci_disable_advertising_sync(hdev);
2554 	if (err)
2555 		return err;
2556 
2557 	/* If we are using software rotation, pause the loop */
2558 	if (!ext_adv_capable(hdev))
2559 		cancel_adv_timeout(hdev);
2560 
2561 	hdev->advertising_paused = true;
2562 	hdev->advertising_old_state = old_state;
2563 
2564 	return 0;
2565 }
2566 
2567 /* This function enables all user advertising instances */
hci_resume_advertising_sync(struct hci_dev * hdev)2568 static int hci_resume_advertising_sync(struct hci_dev *hdev)
2569 {
2570 	struct adv_info *adv, *tmp;
2571 	int err;
2572 
2573 	/* If advertising has not been paused there is nothing  to do. */
2574 	if (!hdev->advertising_paused)
2575 		return 0;
2576 
2577 	/* Resume directed advertising */
2578 	hdev->advertising_paused = false;
2579 	if (hdev->advertising_old_state) {
2580 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
2581 		hdev->advertising_old_state = 0;
2582 	}
2583 
2584 	bt_dev_dbg(hdev, "Resuming advertising instances");
2585 
2586 	if (ext_adv_capable(hdev)) {
2587 		/* Call for each tracked instance to be re-enabled */
2588 		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2589 			err = hci_enable_ext_advertising_sync(hdev,
2590 							      adv->instance);
2591 			if (!err)
2592 				continue;
2593 
2594 			/* If the instance cannot be resumed remove it */
2595 			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2596 							 NULL);
2597 		}
2598 	} else {
2599 		/* Schedule for most recent instance to be restarted and begin
2600 		 * the software rotation loop
2601 		 */
2602 		err = hci_schedule_adv_instance_sync(hdev,
2603 						     hdev->cur_adv_instance,
2604 						     true);
2605 	}
2606 
2607 	hdev->advertising_paused = false;
2608 
2609 	return err;
2610 }
2611 
hci_pause_addr_resolution(struct hci_dev * hdev)2612 static int hci_pause_addr_resolution(struct hci_dev *hdev)
2613 {
2614 	int err;
2615 
2616 	if (!use_ll_privacy(hdev))
2617 		return 0;
2618 
2619 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2620 		return 0;
2621 
2622 	/* Cannot disable addr resolution if scanning is enabled or
2623 	 * when initiating an LE connection.
2624 	 */
2625 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2626 	    hci_lookup_le_connect(hdev)) {
2627 		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2628 		return -EPERM;
2629 	}
2630 
2631 	/* Cannot disable addr resolution if advertising is enabled. */
2632 	err = hci_pause_advertising_sync(hdev);
2633 	if (err) {
2634 		bt_dev_err(hdev, "Pause advertising failed: %d", err);
2635 		return err;
2636 	}
2637 
2638 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2639 	if (err)
2640 		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2641 			   err);
2642 
2643 	/* Return if address resolution is disabled and RPA is not used. */
2644 	if (!err && scan_use_rpa(hdev))
2645 		return 0;
2646 
2647 	hci_resume_advertising_sync(hdev);
2648 	return err;
2649 }
2650 
hci_read_local_oob_data_sync(struct hci_dev * hdev,bool extended,struct sock * sk)2651 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2652 					     bool extended, struct sock *sk)
2653 {
2654 	u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2655 					HCI_OP_READ_LOCAL_OOB_DATA;
2656 
2657 	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2658 }
2659 
conn_params_copy(struct list_head * list,size_t * n)2660 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2661 {
2662 	struct hci_conn_params *params;
2663 	struct conn_params *p;
2664 	size_t i;
2665 
2666 	rcu_read_lock();
2667 
2668 	i = 0;
2669 	list_for_each_entry_rcu(params, list, action)
2670 		++i;
2671 	*n = i;
2672 
2673 	rcu_read_unlock();
2674 
2675 	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2676 	if (!p)
2677 		return NULL;
2678 
2679 	rcu_read_lock();
2680 
2681 	i = 0;
2682 	list_for_each_entry_rcu(params, list, action) {
2683 		/* Racing adds are handled in next scan update */
2684 		if (i >= *n)
2685 			break;
2686 
2687 		/* No hdev->lock, but: addr, addr_type are immutable.
2688 		 * privacy_mode is only written by us or in
2689 		 * hci_cc_le_set_privacy_mode that we wait for.
2690 		 * We should be idempotent so MGMT updating flags
2691 		 * while we are processing is OK.
2692 		 */
2693 		bacpy(&p[i].addr, &params->addr);
2694 		p[i].addr_type = params->addr_type;
2695 		p[i].flags = READ_ONCE(params->flags);
2696 		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2697 		++i;
2698 	}
2699 
2700 	rcu_read_unlock();
2701 
2702 	*n = i;
2703 	return p;
2704 }
2705 
2706 /* Device must not be scanning when updating the accept list.
2707  *
2708  * Update is done using the following sequence:
2709  *
2710  * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
2711  * Remove Devices From Accept List ->
2712  * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
2713  * Add Devices to Accept List ->
2714  * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
2715  * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
2716  * Enable Scanning
2717  *
2718  * In case of failure advertising shall be restored to its original state and
2719  * return would disable accept list since either accept or resolving list could
2720  * not be programmed.
2721  *
2722  */
hci_update_accept_list_sync(struct hci_dev * hdev)2723 static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2724 {
2725 	struct conn_params *params;
2726 	struct bdaddr_list *b, *t;
2727 	u8 num_entries = 0;
2728 	bool pend_conn, pend_report;
2729 	u8 filter_policy;
2730 	size_t i, n;
2731 	int err;
2732 
2733 	/* Pause advertising if resolving list can be used as controllers
2734 	 * cannot accept resolving list modifications while advertising.
2735 	 */
2736 	if (use_ll_privacy(hdev)) {
2737 		err = hci_pause_advertising_sync(hdev);
2738 		if (err) {
2739 			bt_dev_err(hdev, "pause advertising failed: %d", err);
2740 			return 0x00;
2741 		}
2742 	}
2743 
2744 	/* Disable address resolution while reprogramming accept list since
2745 	 * devices that do have an IRK will be programmed in the resolving list
2746 	 * when LL Privacy is enabled.
2747 	 */
2748 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2749 	if (err) {
2750 		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2751 		goto done;
2752 	}
2753 
2754 	/* Go through the current accept list programmed into the
2755 	 * controller one by one and check if that address is connected or is
2756 	 * still in the list of pending connections or list of devices to
2757 	 * report. If not present in either list, then remove it from
2758 	 * the controller.
2759 	 */
2760 	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2761 		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2762 			continue;
2763 
2764 		/* Pointers not dereferenced, no locks needed */
2765 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2766 						      &b->bdaddr,
2767 						      b->bdaddr_type);
2768 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2769 							&b->bdaddr,
2770 							b->bdaddr_type);
2771 
2772 		/* If the device is not likely to connect or report,
2773 		 * remove it from the acceptlist.
2774 		 */
2775 		if (!pend_conn && !pend_report) {
2776 			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2777 						    b->bdaddr_type);
2778 			continue;
2779 		}
2780 
2781 		num_entries++;
2782 	}
2783 
2784 	/* Since all no longer valid accept list entries have been
2785 	 * removed, walk through the list of pending connections
2786 	 * and ensure that any new device gets programmed into
2787 	 * the controller.
2788 	 *
2789 	 * If the list of the devices is larger than the list of
2790 	 * available accept list entries in the controller, then
2791 	 * just abort and return filer policy value to not use the
2792 	 * accept list.
2793 	 *
2794 	 * The list and params may be mutated while we wait for events,
2795 	 * so make a copy and iterate it.
2796 	 */
2797 
2798 	params = conn_params_copy(&hdev->pend_le_conns, &n);
2799 	if (!params) {
2800 		err = -ENOMEM;
2801 		goto done;
2802 	}
2803 
2804 	for (i = 0; i < n; ++i) {
2805 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2806 						  &num_entries);
2807 		if (err) {
2808 			kvfree(params);
2809 			goto done;
2810 		}
2811 	}
2812 
2813 	kvfree(params);
2814 
2815 	/* After adding all new pending connections, walk through
2816 	 * the list of pending reports and also add these to the
2817 	 * accept list if there is still space. Abort if space runs out.
2818 	 */
2819 
2820 	params = conn_params_copy(&hdev->pend_le_reports, &n);
2821 	if (!params) {
2822 		err = -ENOMEM;
2823 		goto done;
2824 	}
2825 
2826 	for (i = 0; i < n; ++i) {
2827 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2828 						  &num_entries);
2829 		if (err) {
2830 			kvfree(params);
2831 			goto done;
2832 		}
2833 	}
2834 
2835 	kvfree(params);
2836 
2837 	/* Use the allowlist unless the following conditions are all true:
2838 	 * - We are not currently suspending
2839 	 * - There are 1 or more ADV monitors registered and it's not offloaded
2840 	 * - Interleaved scanning is not currently using the allowlist
2841 	 */
2842 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2843 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2844 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2845 		err = -EINVAL;
2846 
2847 done:
2848 	filter_policy = err ? 0x00 : 0x01;
2849 
2850 	/* Enable address resolution when LL Privacy is enabled. */
2851 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2852 	if (err)
2853 		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2854 
2855 	/* Resume advertising if it was paused */
2856 	if (use_ll_privacy(hdev))
2857 		hci_resume_advertising_sync(hdev);
2858 
2859 	/* Select filter policy to use accept list */
2860 	return filter_policy;
2861 }
2862 
hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params * cp,u8 type,u16 interval,u16 window)2863 static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2864 				   u8 type, u16 interval, u16 window)
2865 {
2866 	cp->type = type;
2867 	cp->interval = cpu_to_le16(interval);
2868 	cp->window = cpu_to_le16(window);
2869 }
2870 
hci_le_set_ext_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2871 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2872 					  u16 interval, u16 window,
2873 					  u8 own_addr_type, u8 filter_policy)
2874 {
2875 	struct hci_cp_le_set_ext_scan_params *cp;
2876 	struct hci_cp_le_scan_phy_params *phy;
2877 	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2878 	u8 num_phy = 0x00;
2879 
2880 	cp = (void *)data;
2881 	phy = (void *)cp->data;
2882 
2883 	memset(data, 0, sizeof(data));
2884 
2885 	cp->own_addr_type = own_addr_type;
2886 	cp->filter_policy = filter_policy;
2887 
2888 	/* Check if PA Sync is in progress then select the PHY based on the
2889 	 * hci_conn.iso_qos.
2890 	 */
2891 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2892 		struct hci_cp_le_add_to_accept_list *sent;
2893 
2894 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2895 		if (sent) {
2896 			struct hci_conn *conn;
2897 
2898 			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2899 						       &sent->bdaddr);
2900 			if (conn) {
2901 				struct bt_iso_qos *qos = &conn->iso_qos;
2902 
2903 				if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2904 				    qos->bcast.in.phy & BT_ISO_PHY_2M) {
2905 					cp->scanning_phys |= LE_SCAN_PHY_1M;
2906 					hci_le_scan_phy_params(phy, type,
2907 							       interval,
2908 							       window);
2909 					num_phy++;
2910 					phy++;
2911 				}
2912 
2913 				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2914 					cp->scanning_phys |= LE_SCAN_PHY_CODED;
2915 					hci_le_scan_phy_params(phy, type,
2916 							       interval * 3,
2917 							       window * 3);
2918 					num_phy++;
2919 					phy++;
2920 				}
2921 
2922 				if (num_phy)
2923 					goto done;
2924 			}
2925 		}
2926 	}
2927 
2928 	if (scan_1m(hdev) || scan_2m(hdev)) {
2929 		cp->scanning_phys |= LE_SCAN_PHY_1M;
2930 		hci_le_scan_phy_params(phy, type, interval, window);
2931 		num_phy++;
2932 		phy++;
2933 	}
2934 
2935 	if (scan_coded(hdev)) {
2936 		cp->scanning_phys |= LE_SCAN_PHY_CODED;
2937 		hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2938 		num_phy++;
2939 		phy++;
2940 	}
2941 
2942 done:
2943 	if (!num_phy)
2944 		return -EINVAL;
2945 
2946 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2947 				     sizeof(*cp) + sizeof(*phy) * num_phy,
2948 				     data, HCI_CMD_TIMEOUT);
2949 }
2950 
hci_le_set_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2951 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2952 				      u16 interval, u16 window,
2953 				      u8 own_addr_type, u8 filter_policy)
2954 {
2955 	struct hci_cp_le_set_scan_param cp;
2956 
2957 	if (use_ext_scan(hdev))
2958 		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2959 						      window, own_addr_type,
2960 						      filter_policy);
2961 
2962 	memset(&cp, 0, sizeof(cp));
2963 	cp.type = type;
2964 	cp.interval = cpu_to_le16(interval);
2965 	cp.window = cpu_to_le16(window);
2966 	cp.own_address_type = own_addr_type;
2967 	cp.filter_policy = filter_policy;
2968 
2969 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2970 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2971 }
2972 
hci_start_scan_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,u8 filter_dup)2973 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2974 			       u16 window, u8 own_addr_type, u8 filter_policy,
2975 			       u8 filter_dup)
2976 {
2977 	int err;
2978 
2979 	if (hdev->scanning_paused) {
2980 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2981 		return 0;
2982 	}
2983 
2984 	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2985 					 own_addr_type, filter_policy);
2986 	if (err)
2987 		return err;
2988 
2989 	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2990 }
2991 
hci_passive_scan_sync(struct hci_dev * hdev)2992 static int hci_passive_scan_sync(struct hci_dev *hdev)
2993 {
2994 	u8 own_addr_type;
2995 	u8 filter_policy;
2996 	u16 window, interval;
2997 	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2998 	int err;
2999 
3000 	if (hdev->scanning_paused) {
3001 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
3002 		return 0;
3003 	}
3004 
3005 	err = hci_scan_disable_sync(hdev);
3006 	if (err) {
3007 		bt_dev_err(hdev, "disable scanning failed: %d", err);
3008 		return err;
3009 	}
3010 
3011 	/* Set require_privacy to false since no SCAN_REQ are send
3012 	 * during passive scanning. Not using an non-resolvable address
3013 	 * here is important so that peer devices using direct
3014 	 * advertising with our address will be correctly reported
3015 	 * by the controller.
3016 	 */
3017 	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
3018 					   &own_addr_type))
3019 		return 0;
3020 
3021 	if (hdev->enable_advmon_interleave_scan &&
3022 	    hci_update_interleaved_scan_sync(hdev))
3023 		return 0;
3024 
3025 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
3026 
3027 	/* Adding or removing entries from the accept list must
3028 	 * happen before enabling scanning. The controller does
3029 	 * not allow accept list modification while scanning.
3030 	 */
3031 	filter_policy = hci_update_accept_list_sync(hdev);
3032 
3033 	/* If suspended and filter_policy set to 0x00 (no acceptlist) then
3034 	 * passive scanning cannot be started since that would require the host
3035 	 * to be woken up to process the reports.
3036 	 */
3037 	if (hdev->suspended && !filter_policy) {
3038 		/* Check if accept list is empty then there is no need to scan
3039 		 * while suspended.
3040 		 */
3041 		if (list_empty(&hdev->le_accept_list))
3042 			return 0;
3043 
3044 		/* If there are devices is the accept_list that means some
3045 		 * devices could not be programmed which in non-suspended case
3046 		 * means filter_policy needs to be set to 0x00 so the host needs
3047 		 * to filter, but since this is treating suspended case we
3048 		 * can ignore device needing host to filter to allow devices in
3049 		 * the acceptlist to be able to wakeup the system.
3050 		 */
3051 		filter_policy = 0x01;
3052 	}
3053 
3054 	/* When the controller is using random resolvable addresses and
3055 	 * with that having LE privacy enabled, then controllers with
3056 	 * Extended Scanner Filter Policies support can now enable support
3057 	 * for handling directed advertising.
3058 	 *
3059 	 * So instead of using filter polices 0x00 (no acceptlist)
3060 	 * and 0x01 (acceptlist enabled) use the new filter policies
3061 	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3062 	 */
3063 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3064 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3065 		filter_policy |= 0x02;
3066 
3067 	if (hdev->suspended) {
3068 		window = hdev->le_scan_window_suspend;
3069 		interval = hdev->le_scan_int_suspend;
3070 	} else if (hci_is_le_conn_scanning(hdev)) {
3071 		window = hdev->le_scan_window_connect;
3072 		interval = hdev->le_scan_int_connect;
3073 	} else if (hci_is_adv_monitoring(hdev)) {
3074 		window = hdev->le_scan_window_adv_monitor;
3075 		interval = hdev->le_scan_int_adv_monitor;
3076 
3077 		/* Disable duplicates filter when scanning for advertisement
3078 		 * monitor for the following reasons.
3079 		 *
3080 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
3081 		 * controllers ignore RSSI_Sampling_Period when the duplicates
3082 		 * filter is enabled.
3083 		 *
3084 		 * For SW pattern filtering, when we're not doing interleaved
3085 		 * scanning, it is necessary to disable duplicates filter,
3086 		 * otherwise hosts can only receive one advertisement and it's
3087 		 * impossible to know if a peer is still in range.
3088 		 */
3089 		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3090 	} else {
3091 		window = hdev->le_scan_window;
3092 		interval = hdev->le_scan_interval;
3093 	}
3094 
3095 	/* Disable all filtering for Mesh */
3096 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
3097 		filter_policy = 0;
3098 		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3099 	}
3100 
3101 	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3102 
3103 	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3104 				   own_addr_type, filter_policy, filter_dups);
3105 }
3106 
3107 /* This function controls the passive scanning based on hdev->pend_le_conns
3108  * list. If there are pending LE connection we start the background scanning,
3109  * otherwise we stop it in the following sequence:
3110  *
3111  * If there are devices to scan:
3112  *
3113  * Disable Scanning -> Update Accept List ->
3114  * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
3115  * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3116  * Enable Scanning
3117  *
3118  * Otherwise:
3119  *
3120  * Disable Scanning
3121  */
hci_update_passive_scan_sync(struct hci_dev * hdev)3122 int hci_update_passive_scan_sync(struct hci_dev *hdev)
3123 {
3124 	int err;
3125 
3126 	if (!test_bit(HCI_UP, &hdev->flags) ||
3127 	    test_bit(HCI_INIT, &hdev->flags) ||
3128 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3129 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3130 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3131 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3132 		return 0;
3133 
3134 	/* No point in doing scanning if LE support hasn't been enabled */
3135 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3136 		return 0;
3137 
3138 	/* If discovery is active don't interfere with it */
3139 	if (hdev->discovery.state != DISCOVERY_STOPPED)
3140 		return 0;
3141 
3142 	/* Reset RSSI and UUID filters when starting background scanning
3143 	 * since these filters are meant for service discovery only.
3144 	 *
3145 	 * The Start Discovery and Start Service Discovery operations
3146 	 * ensure to set proper values for RSSI threshold and UUID
3147 	 * filter list. So it is safe to just reset them here.
3148 	 */
3149 	hci_discovery_filter_clear(hdev);
3150 
3151 	bt_dev_dbg(hdev, "ADV monitoring is %s",
3152 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
3153 
3154 	if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3155 	    list_empty(&hdev->pend_le_conns) &&
3156 	    list_empty(&hdev->pend_le_reports) &&
3157 	    !hci_is_adv_monitoring(hdev) &&
3158 	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3159 		/* If there is no pending LE connections or devices
3160 		 * to be scanned for or no ADV monitors, we should stop the
3161 		 * background scanning.
3162 		 */
3163 
3164 		bt_dev_dbg(hdev, "stopping background scanning");
3165 
3166 		err = hci_scan_disable_sync(hdev);
3167 		if (err)
3168 			bt_dev_err(hdev, "stop background scanning failed: %d",
3169 				   err);
3170 	} else {
3171 		/* If there is at least one pending LE connection, we should
3172 		 * keep the background scan running.
3173 		 */
3174 
3175 		/* If controller is connecting, we should not start scanning
3176 		 * since some controllers are not able to scan and connect at
3177 		 * the same time.
3178 		 */
3179 		if (hci_lookup_le_connect(hdev))
3180 			return 0;
3181 
3182 		bt_dev_dbg(hdev, "start background scanning");
3183 
3184 		err = hci_passive_scan_sync(hdev);
3185 		if (err)
3186 			bt_dev_err(hdev, "start background scanning failed: %d",
3187 				   err);
3188 	}
3189 
3190 	return err;
3191 }
3192 
update_scan_sync(struct hci_dev * hdev,void * data)3193 static int update_scan_sync(struct hci_dev *hdev, void *data)
3194 {
3195 	return hci_update_scan_sync(hdev);
3196 }
3197 
hci_update_scan(struct hci_dev * hdev)3198 int hci_update_scan(struct hci_dev *hdev)
3199 {
3200 	return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3201 }
3202 
update_passive_scan_sync(struct hci_dev * hdev,void * data)3203 static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3204 {
3205 	return hci_update_passive_scan_sync(hdev);
3206 }
3207 
hci_update_passive_scan(struct hci_dev * hdev)3208 int hci_update_passive_scan(struct hci_dev *hdev)
3209 {
3210 	/* Only queue if it would have any effect */
3211 	if (!test_bit(HCI_UP, &hdev->flags) ||
3212 	    test_bit(HCI_INIT, &hdev->flags) ||
3213 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3214 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3215 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3216 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3217 		return 0;
3218 
3219 	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3220 				       NULL);
3221 }
3222 
hci_write_sc_support_sync(struct hci_dev * hdev,u8 val)3223 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3224 {
3225 	int err;
3226 
3227 	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3228 		return 0;
3229 
3230 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3231 				    sizeof(val), &val, HCI_CMD_TIMEOUT);
3232 
3233 	if (!err) {
3234 		if (val) {
3235 			hdev->features[1][0] |= LMP_HOST_SC;
3236 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3237 		} else {
3238 			hdev->features[1][0] &= ~LMP_HOST_SC;
3239 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3240 		}
3241 	}
3242 
3243 	return err;
3244 }
3245 
hci_write_ssp_mode_sync(struct hci_dev * hdev,u8 mode)3246 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3247 {
3248 	int err;
3249 
3250 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3251 	    lmp_host_ssp_capable(hdev))
3252 		return 0;
3253 
3254 	if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3255 		__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3256 				      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3257 	}
3258 
3259 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3260 				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3261 	if (err)
3262 		return err;
3263 
3264 	return hci_write_sc_support_sync(hdev, 0x01);
3265 }
3266 
hci_write_le_host_supported_sync(struct hci_dev * hdev,u8 le,u8 simul)3267 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3268 {
3269 	struct hci_cp_write_le_host_supported cp;
3270 
3271 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3272 	    !lmp_bredr_capable(hdev))
3273 		return 0;
3274 
3275 	/* Check first if we already have the right host state
3276 	 * (host features set)
3277 	 */
3278 	if (le == lmp_host_le_capable(hdev) &&
3279 	    simul == lmp_host_le_br_capable(hdev))
3280 		return 0;
3281 
3282 	memset(&cp, 0, sizeof(cp));
3283 
3284 	cp.le = le;
3285 	cp.simul = simul;
3286 
3287 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3288 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3289 }
3290 
hci_powered_update_adv_sync(struct hci_dev * hdev)3291 static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3292 {
3293 	struct adv_info *adv, *tmp;
3294 	int err;
3295 
3296 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3297 		return 0;
3298 
3299 	/* If RPA Resolution has not been enable yet it means the
3300 	 * resolving list is empty and we should attempt to program the
3301 	 * local IRK in order to support using own_addr_type
3302 	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3303 	 */
3304 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3305 		hci_le_add_resolve_list_sync(hdev, NULL);
3306 		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3307 	}
3308 
3309 	/* Make sure the controller has a good default for
3310 	 * advertising data. This also applies to the case
3311 	 * where BR/EDR was toggled during the AUTO_OFF phase.
3312 	 */
3313 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3314 	    list_empty(&hdev->adv_instances)) {
3315 		if (ext_adv_capable(hdev)) {
3316 			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3317 			if (!err)
3318 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3319 		} else {
3320 			err = hci_update_adv_data_sync(hdev, 0x00);
3321 			if (!err)
3322 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3323 		}
3324 
3325 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3326 			hci_enable_advertising_sync(hdev);
3327 	}
3328 
3329 	/* Call for each tracked instance to be scheduled */
3330 	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3331 		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3332 
3333 	return 0;
3334 }
3335 
hci_write_auth_enable_sync(struct hci_dev * hdev)3336 static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3337 {
3338 	u8 link_sec;
3339 
3340 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3341 	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3342 		return 0;
3343 
3344 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3345 				     sizeof(link_sec), &link_sec,
3346 				     HCI_CMD_TIMEOUT);
3347 }
3348 
hci_write_fast_connectable_sync(struct hci_dev * hdev,bool enable)3349 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3350 {
3351 	struct hci_cp_write_page_scan_activity cp;
3352 	u8 type;
3353 	int err = 0;
3354 
3355 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3356 		return 0;
3357 
3358 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3359 		return 0;
3360 
3361 	memset(&cp, 0, sizeof(cp));
3362 
3363 	if (enable) {
3364 		type = PAGE_SCAN_TYPE_INTERLACED;
3365 
3366 		/* 160 msec page scan interval */
3367 		cp.interval = cpu_to_le16(0x0100);
3368 	} else {
3369 		type = hdev->def_page_scan_type;
3370 		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3371 	}
3372 
3373 	cp.window = cpu_to_le16(hdev->def_page_scan_window);
3374 
3375 	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3376 	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3377 		err = __hci_cmd_sync_status(hdev,
3378 					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3379 					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3380 		if (err)
3381 			return err;
3382 	}
3383 
3384 	if (hdev->page_scan_type != type)
3385 		err = __hci_cmd_sync_status(hdev,
3386 					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
3387 					    sizeof(type), &type,
3388 					    HCI_CMD_TIMEOUT);
3389 
3390 	return err;
3391 }
3392 
disconnected_accept_list_entries(struct hci_dev * hdev)3393 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3394 {
3395 	struct bdaddr_list *b;
3396 
3397 	list_for_each_entry(b, &hdev->accept_list, list) {
3398 		struct hci_conn *conn;
3399 
3400 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3401 		if (!conn)
3402 			return true;
3403 
3404 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3405 			return true;
3406 	}
3407 
3408 	return false;
3409 }
3410 
hci_write_scan_enable_sync(struct hci_dev * hdev,u8 val)3411 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3412 {
3413 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3414 					    sizeof(val), &val,
3415 					    HCI_CMD_TIMEOUT);
3416 }
3417 
hci_update_scan_sync(struct hci_dev * hdev)3418 int hci_update_scan_sync(struct hci_dev *hdev)
3419 {
3420 	u8 scan;
3421 
3422 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3423 		return 0;
3424 
3425 	if (!hdev_is_powered(hdev))
3426 		return 0;
3427 
3428 	if (mgmt_powering_down(hdev))
3429 		return 0;
3430 
3431 	if (hdev->scanning_paused)
3432 		return 0;
3433 
3434 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3435 	    disconnected_accept_list_entries(hdev))
3436 		scan = SCAN_PAGE;
3437 	else
3438 		scan = SCAN_DISABLED;
3439 
3440 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3441 		scan |= SCAN_INQUIRY;
3442 
3443 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3444 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3445 		return 0;
3446 
3447 	return hci_write_scan_enable_sync(hdev, scan);
3448 }
3449 
hci_update_name_sync(struct hci_dev * hdev)3450 int hci_update_name_sync(struct hci_dev *hdev)
3451 {
3452 	struct hci_cp_write_local_name cp;
3453 
3454 	memset(&cp, 0, sizeof(cp));
3455 
3456 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3457 
3458 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3459 					    sizeof(cp), &cp,
3460 					    HCI_CMD_TIMEOUT);
3461 }
3462 
3463 /* This function perform powered update HCI command sequence after the HCI init
3464  * sequence which end up resetting all states, the sequence is as follows:
3465  *
3466  * HCI_SSP_ENABLED(Enable SSP)
3467  * HCI_LE_ENABLED(Enable LE)
3468  * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
3469  * Update adv data)
3470  * Enable Authentication
3471  * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3472  * Set Name -> Set EIR)
3473  * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3474  */
hci_powered_update_sync(struct hci_dev * hdev)3475 int hci_powered_update_sync(struct hci_dev *hdev)
3476 {
3477 	int err;
3478 
3479 	/* Register the available SMP channels (BR/EDR and LE) only when
3480 	 * successfully powering on the controller. This late
3481 	 * registration is required so that LE SMP can clearly decide if
3482 	 * the public address or static address is used.
3483 	 */
3484 	smp_register(hdev);
3485 
3486 	err = hci_write_ssp_mode_sync(hdev, 0x01);
3487 	if (err)
3488 		return err;
3489 
3490 	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3491 	if (err)
3492 		return err;
3493 
3494 	err = hci_powered_update_adv_sync(hdev);
3495 	if (err)
3496 		return err;
3497 
3498 	err = hci_write_auth_enable_sync(hdev);
3499 	if (err)
3500 		return err;
3501 
3502 	if (lmp_bredr_capable(hdev)) {
3503 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3504 			hci_write_fast_connectable_sync(hdev, true);
3505 		else
3506 			hci_write_fast_connectable_sync(hdev, false);
3507 		hci_update_scan_sync(hdev);
3508 		hci_update_class_sync(hdev);
3509 		hci_update_name_sync(hdev);
3510 		hci_update_eir_sync(hdev);
3511 	}
3512 
3513 	/* If forcing static address is in use or there is no public
3514 	 * address use the static address as random address (but skip
3515 	 * the HCI command if the current random address is already the
3516 	 * static one.
3517 	 *
3518 	 * In case BR/EDR has been disabled on a dual-mode controller
3519 	 * and a static address has been configured, then use that
3520 	 * address instead of the public BR/EDR address.
3521 	 */
3522 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3523 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3524 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3525 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
3526 			return hci_set_random_addr_sync(hdev,
3527 							&hdev->static_addr);
3528 	}
3529 
3530 	return 0;
3531 }
3532 
3533 /**
3534  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3535  *				       (BD_ADDR) for a HCI device from
3536  *				       a firmware node property.
3537  * @hdev:	The HCI device
3538  *
3539  * Search the firmware node for 'local-bd-address'.
3540  *
3541  * All-zero BD addresses are rejected, because those could be properties
3542  * that exist in the firmware tables, but were not updated by the firmware. For
3543  * example, the DTS could define 'local-bd-address', with zero BD addresses.
3544  */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)3545 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3546 {
3547 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3548 	bdaddr_t ba;
3549 	int ret;
3550 
3551 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3552 					    (u8 *)&ba, sizeof(ba));
3553 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3554 		return;
3555 
3556 	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3557 		baswap(&hdev->public_addr, &ba);
3558 	else
3559 		bacpy(&hdev->public_addr, &ba);
3560 }
3561 
3562 struct hci_init_stage {
3563 	int (*func)(struct hci_dev *hdev);
3564 };
3565 
3566 /* Run init stage NULL terminated function table */
hci_init_stage_sync(struct hci_dev * hdev,const struct hci_init_stage * stage)3567 static int hci_init_stage_sync(struct hci_dev *hdev,
3568 			       const struct hci_init_stage *stage)
3569 {
3570 	size_t i;
3571 
3572 	for (i = 0; stage[i].func; i++) {
3573 		int err;
3574 
3575 		err = stage[i].func(hdev);
3576 		if (err)
3577 			return err;
3578 	}
3579 
3580 	return 0;
3581 }
3582 
3583 /* Read Local Version */
hci_read_local_version_sync(struct hci_dev * hdev)3584 static int hci_read_local_version_sync(struct hci_dev *hdev)
3585 {
3586 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3587 				     0, NULL, HCI_CMD_TIMEOUT);
3588 }
3589 
3590 /* Read BD Address */
hci_read_bd_addr_sync(struct hci_dev * hdev)3591 static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3592 {
3593 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3594 				     0, NULL, HCI_CMD_TIMEOUT);
3595 }
3596 
3597 #define HCI_INIT(_func) \
3598 { \
3599 	.func = _func, \
3600 }
3601 
3602 static const struct hci_init_stage hci_init0[] = {
3603 	/* HCI_OP_READ_LOCAL_VERSION */
3604 	HCI_INIT(hci_read_local_version_sync),
3605 	/* HCI_OP_READ_BD_ADDR */
3606 	HCI_INIT(hci_read_bd_addr_sync),
3607 	{}
3608 };
3609 
hci_reset_sync(struct hci_dev * hdev)3610 int hci_reset_sync(struct hci_dev *hdev)
3611 {
3612 	int err;
3613 
3614 	set_bit(HCI_RESET, &hdev->flags);
3615 
3616 	err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3617 				    HCI_CMD_TIMEOUT);
3618 	if (err)
3619 		return err;
3620 
3621 	return 0;
3622 }
3623 
hci_init0_sync(struct hci_dev * hdev)3624 static int hci_init0_sync(struct hci_dev *hdev)
3625 {
3626 	int err;
3627 
3628 	bt_dev_dbg(hdev, "");
3629 
3630 	/* Reset */
3631 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3632 		err = hci_reset_sync(hdev);
3633 		if (err)
3634 			return err;
3635 	}
3636 
3637 	return hci_init_stage_sync(hdev, hci_init0);
3638 }
3639 
hci_unconf_init_sync(struct hci_dev * hdev)3640 static int hci_unconf_init_sync(struct hci_dev *hdev)
3641 {
3642 	int err;
3643 
3644 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3645 		return 0;
3646 
3647 	err = hci_init0_sync(hdev);
3648 	if (err < 0)
3649 		return err;
3650 
3651 	if (hci_dev_test_flag(hdev, HCI_SETUP))
3652 		hci_debugfs_create_basic(hdev);
3653 
3654 	return 0;
3655 }
3656 
3657 /* Read Local Supported Features. */
hci_read_local_features_sync(struct hci_dev * hdev)3658 static int hci_read_local_features_sync(struct hci_dev *hdev)
3659 {
3660 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3661 				     0, NULL, HCI_CMD_TIMEOUT);
3662 }
3663 
3664 /* BR Controller init stage 1 command sequence */
3665 static const struct hci_init_stage br_init1[] = {
3666 	/* HCI_OP_READ_LOCAL_FEATURES */
3667 	HCI_INIT(hci_read_local_features_sync),
3668 	/* HCI_OP_READ_LOCAL_VERSION */
3669 	HCI_INIT(hci_read_local_version_sync),
3670 	/* HCI_OP_READ_BD_ADDR */
3671 	HCI_INIT(hci_read_bd_addr_sync),
3672 	{}
3673 };
3674 
3675 /* Read Local Commands */
hci_read_local_cmds_sync(struct hci_dev * hdev)3676 static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3677 {
3678 	/* All Bluetooth 1.2 and later controllers should support the
3679 	 * HCI command for reading the local supported commands.
3680 	 *
3681 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
3682 	 * but do not have support for this command. If that is the case,
3683 	 * the driver can quirk the behavior and skip reading the local
3684 	 * supported commands.
3685 	 */
3686 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3687 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3688 		return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3689 					     0, NULL, HCI_CMD_TIMEOUT);
3690 
3691 	return 0;
3692 }
3693 
hci_init1_sync(struct hci_dev * hdev)3694 static int hci_init1_sync(struct hci_dev *hdev)
3695 {
3696 	int err;
3697 
3698 	bt_dev_dbg(hdev, "");
3699 
3700 	/* Reset */
3701 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3702 		err = hci_reset_sync(hdev);
3703 		if (err)
3704 			return err;
3705 	}
3706 
3707 	return hci_init_stage_sync(hdev, br_init1);
3708 }
3709 
3710 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_read_buffer_size_sync(struct hci_dev * hdev)3711 static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3712 {
3713 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3714 				     0, NULL, HCI_CMD_TIMEOUT);
3715 }
3716 
3717 /* Read Class of Device */
hci_read_dev_class_sync(struct hci_dev * hdev)3718 static int hci_read_dev_class_sync(struct hci_dev *hdev)
3719 {
3720 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3721 				     0, NULL, HCI_CMD_TIMEOUT);
3722 }
3723 
3724 /* Read Local Name */
hci_read_local_name_sync(struct hci_dev * hdev)3725 static int hci_read_local_name_sync(struct hci_dev *hdev)
3726 {
3727 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3728 				     0, NULL, HCI_CMD_TIMEOUT);
3729 }
3730 
3731 /* Read Voice Setting */
hci_read_voice_setting_sync(struct hci_dev * hdev)3732 static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3733 {
3734 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3735 				     0, NULL, HCI_CMD_TIMEOUT);
3736 }
3737 
3738 /* Read Number of Supported IAC */
hci_read_num_supported_iac_sync(struct hci_dev * hdev)3739 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3740 {
3741 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3742 				     0, NULL, HCI_CMD_TIMEOUT);
3743 }
3744 
3745 /* Read Current IAC LAP */
hci_read_current_iac_lap_sync(struct hci_dev * hdev)3746 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3747 {
3748 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3749 				     0, NULL, HCI_CMD_TIMEOUT);
3750 }
3751 
hci_set_event_filter_sync(struct hci_dev * hdev,u8 flt_type,u8 cond_type,bdaddr_t * bdaddr,u8 auto_accept)3752 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3753 				     u8 cond_type, bdaddr_t *bdaddr,
3754 				     u8 auto_accept)
3755 {
3756 	struct hci_cp_set_event_filter cp;
3757 
3758 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3759 		return 0;
3760 
3761 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3762 		return 0;
3763 
3764 	memset(&cp, 0, sizeof(cp));
3765 	cp.flt_type = flt_type;
3766 
3767 	if (flt_type != HCI_FLT_CLEAR_ALL) {
3768 		cp.cond_type = cond_type;
3769 		bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3770 		cp.addr_conn_flt.auto_accept = auto_accept;
3771 	}
3772 
3773 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3774 				     flt_type == HCI_FLT_CLEAR_ALL ?
3775 				     sizeof(cp.flt_type) : sizeof(cp), &cp,
3776 				     HCI_CMD_TIMEOUT);
3777 }
3778 
hci_clear_event_filter_sync(struct hci_dev * hdev)3779 static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3780 {
3781 	if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3782 		return 0;
3783 
3784 	/* In theory the state machine should not reach here unless
3785 	 * a hci_set_event_filter_sync() call succeeds, but we do
3786 	 * the check both for parity and as a future reminder.
3787 	 */
3788 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3789 		return 0;
3790 
3791 	return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3792 					 BDADDR_ANY, 0x00);
3793 }
3794 
3795 /* Connection accept timeout ~20 secs */
hci_write_ca_timeout_sync(struct hci_dev * hdev)3796 static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3797 {
3798 	__le16 param = cpu_to_le16(0x7d00);
3799 
3800 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3801 				     sizeof(param), &param, HCI_CMD_TIMEOUT);
3802 }
3803 
3804 /* BR Controller init stage 2 command sequence */
3805 static const struct hci_init_stage br_init2[] = {
3806 	/* HCI_OP_READ_BUFFER_SIZE */
3807 	HCI_INIT(hci_read_buffer_size_sync),
3808 	/* HCI_OP_READ_CLASS_OF_DEV */
3809 	HCI_INIT(hci_read_dev_class_sync),
3810 	/* HCI_OP_READ_LOCAL_NAME */
3811 	HCI_INIT(hci_read_local_name_sync),
3812 	/* HCI_OP_READ_VOICE_SETTING */
3813 	HCI_INIT(hci_read_voice_setting_sync),
3814 	/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3815 	HCI_INIT(hci_read_num_supported_iac_sync),
3816 	/* HCI_OP_READ_CURRENT_IAC_LAP */
3817 	HCI_INIT(hci_read_current_iac_lap_sync),
3818 	/* HCI_OP_SET_EVENT_FLT */
3819 	HCI_INIT(hci_clear_event_filter_sync),
3820 	/* HCI_OP_WRITE_CA_TIMEOUT */
3821 	HCI_INIT(hci_write_ca_timeout_sync),
3822 	{}
3823 };
3824 
hci_write_ssp_mode_1_sync(struct hci_dev * hdev)3825 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3826 {
3827 	u8 mode = 0x01;
3828 
3829 	if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3830 		return 0;
3831 
3832 	/* When SSP is available, then the host features page
3833 	 * should also be available as well. However some
3834 	 * controllers list the max_page as 0 as long as SSP
3835 	 * has not been enabled. To achieve proper debugging
3836 	 * output, force the minimum max_page to 1 at least.
3837 	 */
3838 	hdev->max_page = 0x01;
3839 
3840 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3841 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3842 }
3843 
hci_write_eir_sync(struct hci_dev * hdev)3844 static int hci_write_eir_sync(struct hci_dev *hdev)
3845 {
3846 	struct hci_cp_write_eir cp;
3847 
3848 	if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3849 		return 0;
3850 
3851 	memset(hdev->eir, 0, sizeof(hdev->eir));
3852 	memset(&cp, 0, sizeof(cp));
3853 
3854 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3855 				     HCI_CMD_TIMEOUT);
3856 }
3857 
hci_write_inquiry_mode_sync(struct hci_dev * hdev)3858 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3859 {
3860 	u8 mode;
3861 
3862 	if (!lmp_inq_rssi_capable(hdev) &&
3863 	    !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3864 		return 0;
3865 
3866 	/* If Extended Inquiry Result events are supported, then
3867 	 * they are clearly preferred over Inquiry Result with RSSI
3868 	 * events.
3869 	 */
3870 	mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3871 
3872 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3873 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3874 }
3875 
hci_read_inq_rsp_tx_power_sync(struct hci_dev * hdev)3876 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3877 {
3878 	if (!lmp_inq_tx_pwr_capable(hdev))
3879 		return 0;
3880 
3881 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3882 				     0, NULL, HCI_CMD_TIMEOUT);
3883 }
3884 
hci_read_local_ext_features_sync(struct hci_dev * hdev,u8 page)3885 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3886 {
3887 	struct hci_cp_read_local_ext_features cp;
3888 
3889 	if (!lmp_ext_feat_capable(hdev))
3890 		return 0;
3891 
3892 	memset(&cp, 0, sizeof(cp));
3893 	cp.page = page;
3894 
3895 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3896 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3897 }
3898 
hci_read_local_ext_features_1_sync(struct hci_dev * hdev)3899 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3900 {
3901 	return hci_read_local_ext_features_sync(hdev, 0x01);
3902 }
3903 
3904 /* HCI Controller init stage 2 command sequence */
3905 static const struct hci_init_stage hci_init2[] = {
3906 	/* HCI_OP_READ_LOCAL_COMMANDS */
3907 	HCI_INIT(hci_read_local_cmds_sync),
3908 	/* HCI_OP_WRITE_SSP_MODE */
3909 	HCI_INIT(hci_write_ssp_mode_1_sync),
3910 	/* HCI_OP_WRITE_EIR */
3911 	HCI_INIT(hci_write_eir_sync),
3912 	/* HCI_OP_WRITE_INQUIRY_MODE */
3913 	HCI_INIT(hci_write_inquiry_mode_sync),
3914 	/* HCI_OP_READ_INQ_RSP_TX_POWER */
3915 	HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3916 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3917 	HCI_INIT(hci_read_local_ext_features_1_sync),
3918 	/* HCI_OP_WRITE_AUTH_ENABLE */
3919 	HCI_INIT(hci_write_auth_enable_sync),
3920 	{}
3921 };
3922 
3923 /* Read LE Buffer Size */
hci_le_read_buffer_size_sync(struct hci_dev * hdev)3924 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3925 {
3926 	/* Use Read LE Buffer Size V2 if supported */
3927 	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3928 		return __hci_cmd_sync_status(hdev,
3929 					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
3930 					     0, NULL, HCI_CMD_TIMEOUT);
3931 
3932 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3933 				     0, NULL, HCI_CMD_TIMEOUT);
3934 }
3935 
3936 /* Read LE Local Supported Features */
hci_le_read_local_features_sync(struct hci_dev * hdev)3937 static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3938 {
3939 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3940 				     0, NULL, HCI_CMD_TIMEOUT);
3941 }
3942 
3943 /* Read LE Supported States */
hci_le_read_supported_states_sync(struct hci_dev * hdev)3944 static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3945 {
3946 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3947 				     0, NULL, HCI_CMD_TIMEOUT);
3948 }
3949 
3950 /* LE Controller init stage 2 command sequence */
3951 static const struct hci_init_stage le_init2[] = {
3952 	/* HCI_OP_LE_READ_LOCAL_FEATURES */
3953 	HCI_INIT(hci_le_read_local_features_sync),
3954 	/* HCI_OP_LE_READ_BUFFER_SIZE */
3955 	HCI_INIT(hci_le_read_buffer_size_sync),
3956 	/* HCI_OP_LE_READ_SUPPORTED_STATES */
3957 	HCI_INIT(hci_le_read_supported_states_sync),
3958 	{}
3959 };
3960 
hci_init2_sync(struct hci_dev * hdev)3961 static int hci_init2_sync(struct hci_dev *hdev)
3962 {
3963 	int err;
3964 
3965 	bt_dev_dbg(hdev, "");
3966 
3967 	err = hci_init_stage_sync(hdev, hci_init2);
3968 	if (err)
3969 		return err;
3970 
3971 	if (lmp_bredr_capable(hdev)) {
3972 		err = hci_init_stage_sync(hdev, br_init2);
3973 		if (err)
3974 			return err;
3975 	} else {
3976 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3977 	}
3978 
3979 	if (lmp_le_capable(hdev)) {
3980 		err = hci_init_stage_sync(hdev, le_init2);
3981 		if (err)
3982 			return err;
3983 		/* LE-only controllers have LE implicitly enabled */
3984 		if (!lmp_bredr_capable(hdev))
3985 			hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3986 	}
3987 
3988 	return 0;
3989 }
3990 
hci_set_event_mask_sync(struct hci_dev * hdev)3991 static int hci_set_event_mask_sync(struct hci_dev *hdev)
3992 {
3993 	/* The second byte is 0xff instead of 0x9f (two reserved bits
3994 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3995 	 * command otherwise.
3996 	 */
3997 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3998 
3999 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
4000 	 * any event mask for pre 1.2 devices.
4001 	 */
4002 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
4003 		return 0;
4004 
4005 	if (lmp_bredr_capable(hdev)) {
4006 		events[4] |= 0x01; /* Flow Specification Complete */
4007 
4008 		/* Don't set Disconnect Complete and mode change when
4009 		 * suspended as that would wakeup the host when disconnecting
4010 		 * due to suspend.
4011 		 */
4012 		if (hdev->suspended) {
4013 			events[0] &= 0xef;
4014 			events[2] &= 0xf7;
4015 		}
4016 	} else {
4017 		/* Use a different default for LE-only devices */
4018 		memset(events, 0, sizeof(events));
4019 		events[1] |= 0x20; /* Command Complete */
4020 		events[1] |= 0x40; /* Command Status */
4021 		events[1] |= 0x80; /* Hardware Error */
4022 
4023 		/* If the controller supports the Disconnect command, enable
4024 		 * the corresponding event. In addition enable packet flow
4025 		 * control related events.
4026 		 */
4027 		if (hdev->commands[0] & 0x20) {
4028 			/* Don't set Disconnect Complete when suspended as that
4029 			 * would wakeup the host when disconnecting due to
4030 			 * suspend.
4031 			 */
4032 			if (!hdev->suspended)
4033 				events[0] |= 0x10; /* Disconnection Complete */
4034 			events[2] |= 0x04; /* Number of Completed Packets */
4035 			events[3] |= 0x02; /* Data Buffer Overflow */
4036 		}
4037 
4038 		/* If the controller supports the Read Remote Version
4039 		 * Information command, enable the corresponding event.
4040 		 */
4041 		if (hdev->commands[2] & 0x80)
4042 			events[1] |= 0x08; /* Read Remote Version Information
4043 					    * Complete
4044 					    */
4045 
4046 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
4047 			events[0] |= 0x80; /* Encryption Change */
4048 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
4049 		}
4050 	}
4051 
4052 	if (lmp_inq_rssi_capable(hdev) ||
4053 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
4054 		events[4] |= 0x02; /* Inquiry Result with RSSI */
4055 
4056 	if (lmp_ext_feat_capable(hdev))
4057 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
4058 
4059 	if (lmp_esco_capable(hdev)) {
4060 		events[5] |= 0x08; /* Synchronous Connection Complete */
4061 		events[5] |= 0x10; /* Synchronous Connection Changed */
4062 	}
4063 
4064 	if (lmp_sniffsubr_capable(hdev))
4065 		events[5] |= 0x20; /* Sniff Subrating */
4066 
4067 	if (lmp_pause_enc_capable(hdev))
4068 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
4069 
4070 	if (lmp_ext_inq_capable(hdev))
4071 		events[5] |= 0x40; /* Extended Inquiry Result */
4072 
4073 	if (lmp_no_flush_capable(hdev))
4074 		events[7] |= 0x01; /* Enhanced Flush Complete */
4075 
4076 	if (lmp_lsto_capable(hdev))
4077 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
4078 
4079 	if (lmp_ssp_capable(hdev)) {
4080 		events[6] |= 0x01;	/* IO Capability Request */
4081 		events[6] |= 0x02;	/* IO Capability Response */
4082 		events[6] |= 0x04;	/* User Confirmation Request */
4083 		events[6] |= 0x08;	/* User Passkey Request */
4084 		events[6] |= 0x10;	/* Remote OOB Data Request */
4085 		events[6] |= 0x20;	/* Simple Pairing Complete */
4086 		events[7] |= 0x04;	/* User Passkey Notification */
4087 		events[7] |= 0x08;	/* Keypress Notification */
4088 		events[7] |= 0x10;	/* Remote Host Supported
4089 					 * Features Notification
4090 					 */
4091 	}
4092 
4093 	if (lmp_le_capable(hdev))
4094 		events[7] |= 0x20;	/* LE Meta-Event */
4095 
4096 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4097 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4098 }
4099 
hci_read_stored_link_key_sync(struct hci_dev * hdev)4100 static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4101 {
4102 	struct hci_cp_read_stored_link_key cp;
4103 
4104 	if (!(hdev->commands[6] & 0x20) ||
4105 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4106 		return 0;
4107 
4108 	memset(&cp, 0, sizeof(cp));
4109 	bacpy(&cp.bdaddr, BDADDR_ANY);
4110 	cp.read_all = 0x01;
4111 
4112 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4113 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4114 }
4115 
hci_setup_link_policy_sync(struct hci_dev * hdev)4116 static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4117 {
4118 	struct hci_cp_write_def_link_policy cp;
4119 	u16 link_policy = 0;
4120 
4121 	if (!(hdev->commands[5] & 0x10))
4122 		return 0;
4123 
4124 	memset(&cp, 0, sizeof(cp));
4125 
4126 	if (lmp_rswitch_capable(hdev))
4127 		link_policy |= HCI_LP_RSWITCH;
4128 	if (lmp_hold_capable(hdev))
4129 		link_policy |= HCI_LP_HOLD;
4130 	if (lmp_sniff_capable(hdev))
4131 		link_policy |= HCI_LP_SNIFF;
4132 	if (lmp_park_capable(hdev))
4133 		link_policy |= HCI_LP_PARK;
4134 
4135 	cp.policy = cpu_to_le16(link_policy);
4136 
4137 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4138 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4139 }
4140 
hci_read_page_scan_activity_sync(struct hci_dev * hdev)4141 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4142 {
4143 	if (!(hdev->commands[8] & 0x01))
4144 		return 0;
4145 
4146 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4147 				     0, NULL, HCI_CMD_TIMEOUT);
4148 }
4149 
hci_read_def_err_data_reporting_sync(struct hci_dev * hdev)4150 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4151 {
4152 	if (!(hdev->commands[18] & 0x04) ||
4153 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4154 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4155 		return 0;
4156 
4157 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4158 				     0, NULL, HCI_CMD_TIMEOUT);
4159 }
4160 
hci_read_page_scan_type_sync(struct hci_dev * hdev)4161 static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4162 {
4163 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
4164 	 * support the Read Page Scan Type command. Check support for
4165 	 * this command in the bit mask of supported commands.
4166 	 */
4167 	if (!(hdev->commands[13] & 0x01))
4168 		return 0;
4169 
4170 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4171 				     0, NULL, HCI_CMD_TIMEOUT);
4172 }
4173 
4174 /* Read features beyond page 1 if available */
hci_read_local_ext_features_all_sync(struct hci_dev * hdev)4175 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4176 {
4177 	u8 page;
4178 	int err;
4179 
4180 	if (!lmp_ext_feat_capable(hdev))
4181 		return 0;
4182 
4183 	for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4184 	     page++) {
4185 		err = hci_read_local_ext_features_sync(hdev, page);
4186 		if (err)
4187 			return err;
4188 	}
4189 
4190 	return 0;
4191 }
4192 
4193 /* HCI Controller init stage 3 command sequence */
4194 static const struct hci_init_stage hci_init3[] = {
4195 	/* HCI_OP_SET_EVENT_MASK */
4196 	HCI_INIT(hci_set_event_mask_sync),
4197 	/* HCI_OP_READ_STORED_LINK_KEY */
4198 	HCI_INIT(hci_read_stored_link_key_sync),
4199 	/* HCI_OP_WRITE_DEF_LINK_POLICY */
4200 	HCI_INIT(hci_setup_link_policy_sync),
4201 	/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4202 	HCI_INIT(hci_read_page_scan_activity_sync),
4203 	/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4204 	HCI_INIT(hci_read_def_err_data_reporting_sync),
4205 	/* HCI_OP_READ_PAGE_SCAN_TYPE */
4206 	HCI_INIT(hci_read_page_scan_type_sync),
4207 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4208 	HCI_INIT(hci_read_local_ext_features_all_sync),
4209 	{}
4210 };
4211 
hci_le_set_event_mask_sync(struct hci_dev * hdev)4212 static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4213 {
4214 	u8 events[8];
4215 
4216 	if (!lmp_le_capable(hdev))
4217 		return 0;
4218 
4219 	memset(events, 0, sizeof(events));
4220 
4221 	if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4222 		events[0] |= 0x10;	/* LE Long Term Key Request */
4223 
4224 	/* If controller supports the Connection Parameters Request
4225 	 * Link Layer Procedure, enable the corresponding event.
4226 	 */
4227 	if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4228 		/* LE Remote Connection Parameter Request */
4229 		events[0] |= 0x20;
4230 
4231 	/* If the controller supports the Data Length Extension
4232 	 * feature, enable the corresponding event.
4233 	 */
4234 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4235 		events[0] |= 0x40;	/* LE Data Length Change */
4236 
4237 	/* If the controller supports LL Privacy feature or LE Extended Adv,
4238 	 * enable the corresponding event.
4239 	 */
4240 	if (use_enhanced_conn_complete(hdev))
4241 		events[1] |= 0x02;	/* LE Enhanced Connection Complete */
4242 
4243 	/* If the controller supports Extended Scanner Filter
4244 	 * Policies, enable the corresponding event.
4245 	 */
4246 	if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4247 		events[1] |= 0x04;	/* LE Direct Advertising Report */
4248 
4249 	/* If the controller supports Channel Selection Algorithm #2
4250 	 * feature, enable the corresponding event.
4251 	 */
4252 	if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4253 		events[2] |= 0x08;	/* LE Channel Selection Algorithm */
4254 
4255 	/* If the controller supports the LE Set Scan Enable command,
4256 	 * enable the corresponding advertising report event.
4257 	 */
4258 	if (hdev->commands[26] & 0x08)
4259 		events[0] |= 0x02;	/* LE Advertising Report */
4260 
4261 	/* If the controller supports the LE Create Connection
4262 	 * command, enable the corresponding event.
4263 	 */
4264 	if (hdev->commands[26] & 0x10)
4265 		events[0] |= 0x01;	/* LE Connection Complete */
4266 
4267 	/* If the controller supports the LE Connection Update
4268 	 * command, enable the corresponding event.
4269 	 */
4270 	if (hdev->commands[27] & 0x04)
4271 		events[0] |= 0x04;	/* LE Connection Update Complete */
4272 
4273 	/* If the controller supports the LE Read Remote Used Features
4274 	 * command, enable the corresponding event.
4275 	 */
4276 	if (hdev->commands[27] & 0x20)
4277 		/* LE Read Remote Used Features Complete */
4278 		events[0] |= 0x08;
4279 
4280 	/* If the controller supports the LE Read Local P-256
4281 	 * Public Key command, enable the corresponding event.
4282 	 */
4283 	if (hdev->commands[34] & 0x02)
4284 		/* LE Read Local P-256 Public Key Complete */
4285 		events[0] |= 0x80;
4286 
4287 	/* If the controller supports the LE Generate DHKey
4288 	 * command, enable the corresponding event.
4289 	 */
4290 	if (hdev->commands[34] & 0x04)
4291 		events[1] |= 0x01;	/* LE Generate DHKey Complete */
4292 
4293 	/* If the controller supports the LE Set Default PHY or
4294 	 * LE Set PHY commands, enable the corresponding event.
4295 	 */
4296 	if (hdev->commands[35] & (0x20 | 0x40))
4297 		events[1] |= 0x08;        /* LE PHY Update Complete */
4298 
4299 	/* If the controller supports LE Set Extended Scan Parameters
4300 	 * and LE Set Extended Scan Enable commands, enable the
4301 	 * corresponding event.
4302 	 */
4303 	if (use_ext_scan(hdev))
4304 		events[1] |= 0x10;	/* LE Extended Advertising Report */
4305 
4306 	/* If the controller supports the LE Extended Advertising
4307 	 * command, enable the corresponding event.
4308 	 */
4309 	if (ext_adv_capable(hdev))
4310 		events[2] |= 0x02;	/* LE Advertising Set Terminated */
4311 
4312 	if (cis_capable(hdev)) {
4313 		events[3] |= 0x01;	/* LE CIS Established */
4314 		if (cis_peripheral_capable(hdev))
4315 			events[3] |= 0x02; /* LE CIS Request */
4316 	}
4317 
4318 	if (bis_capable(hdev)) {
4319 		events[1] |= 0x20;	/* LE PA Report */
4320 		events[1] |= 0x40;	/* LE PA Sync Established */
4321 		events[3] |= 0x04;	/* LE Create BIG Complete */
4322 		events[3] |= 0x08;	/* LE Terminate BIG Complete */
4323 		events[3] |= 0x10;	/* LE BIG Sync Established */
4324 		events[3] |= 0x20;	/* LE BIG Sync Loss */
4325 		events[4] |= 0x02;	/* LE BIG Info Advertising Report */
4326 	}
4327 
4328 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4329 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4330 }
4331 
4332 /* Read LE Advertising Channel TX Power */
hci_le_read_adv_tx_power_sync(struct hci_dev * hdev)4333 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4334 {
4335 	if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4336 		/* HCI TS spec forbids mixing of legacy and extended
4337 		 * advertising commands wherein READ_ADV_TX_POWER is
4338 		 * also included. So do not call it if extended adv
4339 		 * is supported otherwise controller will return
4340 		 * COMMAND_DISALLOWED for extended commands.
4341 		 */
4342 		return __hci_cmd_sync_status(hdev,
4343 					       HCI_OP_LE_READ_ADV_TX_POWER,
4344 					       0, NULL, HCI_CMD_TIMEOUT);
4345 	}
4346 
4347 	return 0;
4348 }
4349 
4350 /* Read LE Min/Max Tx Power*/
hci_le_read_tx_power_sync(struct hci_dev * hdev)4351 static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4352 {
4353 	if (!(hdev->commands[38] & 0x80) ||
4354 	    test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4355 		return 0;
4356 
4357 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4358 				     0, NULL, HCI_CMD_TIMEOUT);
4359 }
4360 
4361 /* Read LE Accept List Size */
hci_le_read_accept_list_size_sync(struct hci_dev * hdev)4362 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4363 {
4364 	if (!(hdev->commands[26] & 0x40))
4365 		return 0;
4366 
4367 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4368 				     0, NULL, HCI_CMD_TIMEOUT);
4369 }
4370 
4371 /* Clear LE Accept List */
hci_le_clear_accept_list_sync(struct hci_dev * hdev)4372 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
4373 {
4374 	if (!(hdev->commands[26] & 0x80))
4375 		return 0;
4376 
4377 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
4378 				     HCI_CMD_TIMEOUT);
4379 }
4380 
4381 /* Read LE Resolving List Size */
hci_le_read_resolv_list_size_sync(struct hci_dev * hdev)4382 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4383 {
4384 	if (!(hdev->commands[34] & 0x40))
4385 		return 0;
4386 
4387 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4388 				     0, NULL, HCI_CMD_TIMEOUT);
4389 }
4390 
4391 /* Clear LE Resolving List */
hci_le_clear_resolv_list_sync(struct hci_dev * hdev)4392 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4393 {
4394 	if (!(hdev->commands[34] & 0x20))
4395 		return 0;
4396 
4397 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4398 				     HCI_CMD_TIMEOUT);
4399 }
4400 
4401 /* Set RPA timeout */
hci_le_set_rpa_timeout_sync(struct hci_dev * hdev)4402 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4403 {
4404 	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4405 
4406 	if (!(hdev->commands[35] & 0x04) ||
4407 	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4408 		return 0;
4409 
4410 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4411 				     sizeof(timeout), &timeout,
4412 				     HCI_CMD_TIMEOUT);
4413 }
4414 
4415 /* Read LE Maximum Data Length */
hci_le_read_max_data_len_sync(struct hci_dev * hdev)4416 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4417 {
4418 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4419 		return 0;
4420 
4421 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4422 				     HCI_CMD_TIMEOUT);
4423 }
4424 
4425 /* Read LE Suggested Default Data Length */
hci_le_read_def_data_len_sync(struct hci_dev * hdev)4426 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4427 {
4428 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4429 		return 0;
4430 
4431 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4432 				     HCI_CMD_TIMEOUT);
4433 }
4434 
4435 /* Read LE Number of Supported Advertising Sets */
hci_le_read_num_support_adv_sets_sync(struct hci_dev * hdev)4436 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4437 {
4438 	if (!ext_adv_capable(hdev))
4439 		return 0;
4440 
4441 	return __hci_cmd_sync_status(hdev,
4442 				     HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4443 				     0, NULL, HCI_CMD_TIMEOUT);
4444 }
4445 
4446 /* Write LE Host Supported */
hci_set_le_support_sync(struct hci_dev * hdev)4447 static int hci_set_le_support_sync(struct hci_dev *hdev)
4448 {
4449 	struct hci_cp_write_le_host_supported cp;
4450 
4451 	/* LE-only devices do not support explicit enablement */
4452 	if (!lmp_bredr_capable(hdev))
4453 		return 0;
4454 
4455 	memset(&cp, 0, sizeof(cp));
4456 
4457 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4458 		cp.le = 0x01;
4459 		cp.simul = 0x00;
4460 	}
4461 
4462 	if (cp.le == lmp_host_le_capable(hdev))
4463 		return 0;
4464 
4465 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4466 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4467 }
4468 
4469 /* LE Set Host Feature */
hci_le_set_host_feature_sync(struct hci_dev * hdev)4470 static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4471 {
4472 	struct hci_cp_le_set_host_feature cp;
4473 
4474 	if (!iso_capable(hdev))
4475 		return 0;
4476 
4477 	memset(&cp, 0, sizeof(cp));
4478 
4479 	/* Isochronous Channels (Host Support) */
4480 	cp.bit_number = 32;
4481 	cp.bit_value = 1;
4482 
4483 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4484 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4485 }
4486 
4487 /* LE Controller init stage 3 command sequence */
4488 static const struct hci_init_stage le_init3[] = {
4489 	/* HCI_OP_LE_SET_EVENT_MASK */
4490 	HCI_INIT(hci_le_set_event_mask_sync),
4491 	/* HCI_OP_LE_READ_ADV_TX_POWER */
4492 	HCI_INIT(hci_le_read_adv_tx_power_sync),
4493 	/* HCI_OP_LE_READ_TRANSMIT_POWER */
4494 	HCI_INIT(hci_le_read_tx_power_sync),
4495 	/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4496 	HCI_INIT(hci_le_read_accept_list_size_sync),
4497 	/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4498 	HCI_INIT(hci_le_clear_accept_list_sync),
4499 	/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4500 	HCI_INIT(hci_le_read_resolv_list_size_sync),
4501 	/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4502 	HCI_INIT(hci_le_clear_resolv_list_sync),
4503 	/* HCI_OP_LE_SET_RPA_TIMEOUT */
4504 	HCI_INIT(hci_le_set_rpa_timeout_sync),
4505 	/* HCI_OP_LE_READ_MAX_DATA_LEN */
4506 	HCI_INIT(hci_le_read_max_data_len_sync),
4507 	/* HCI_OP_LE_READ_DEF_DATA_LEN */
4508 	HCI_INIT(hci_le_read_def_data_len_sync),
4509 	/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4510 	HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4511 	/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4512 	HCI_INIT(hci_set_le_support_sync),
4513 	/* HCI_OP_LE_SET_HOST_FEATURE */
4514 	HCI_INIT(hci_le_set_host_feature_sync),
4515 	{}
4516 };
4517 
hci_init3_sync(struct hci_dev * hdev)4518 static int hci_init3_sync(struct hci_dev *hdev)
4519 {
4520 	int err;
4521 
4522 	bt_dev_dbg(hdev, "");
4523 
4524 	err = hci_init_stage_sync(hdev, hci_init3);
4525 	if (err)
4526 		return err;
4527 
4528 	if (lmp_le_capable(hdev))
4529 		return hci_init_stage_sync(hdev, le_init3);
4530 
4531 	return 0;
4532 }
4533 
hci_delete_stored_link_key_sync(struct hci_dev * hdev)4534 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4535 {
4536 	struct hci_cp_delete_stored_link_key cp;
4537 
4538 	/* Some Broadcom based Bluetooth controllers do not support the
4539 	 * Delete Stored Link Key command. They are clearly indicating its
4540 	 * absence in the bit mask of supported commands.
4541 	 *
4542 	 * Check the supported commands and only if the command is marked
4543 	 * as supported send it. If not supported assume that the controller
4544 	 * does not have actual support for stored link keys which makes this
4545 	 * command redundant anyway.
4546 	 *
4547 	 * Some controllers indicate that they support handling deleting
4548 	 * stored link keys, but they don't. The quirk lets a driver
4549 	 * just disable this command.
4550 	 */
4551 	if (!(hdev->commands[6] & 0x80) ||
4552 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4553 		return 0;
4554 
4555 	memset(&cp, 0, sizeof(cp));
4556 	bacpy(&cp.bdaddr, BDADDR_ANY);
4557 	cp.delete_all = 0x01;
4558 
4559 	return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4560 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4561 }
4562 
hci_set_event_mask_page_2_sync(struct hci_dev * hdev)4563 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4564 {
4565 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4566 	bool changed = false;
4567 
4568 	/* Set event mask page 2 if the HCI command for it is supported */
4569 	if (!(hdev->commands[22] & 0x04))
4570 		return 0;
4571 
4572 	/* If Connectionless Peripheral Broadcast central role is supported
4573 	 * enable all necessary events for it.
4574 	 */
4575 	if (lmp_cpb_central_capable(hdev)) {
4576 		events[1] |= 0x40;	/* Triggered Clock Capture */
4577 		events[1] |= 0x80;	/* Synchronization Train Complete */
4578 		events[2] |= 0x08;	/* Truncated Page Complete */
4579 		events[2] |= 0x20;	/* CPB Channel Map Change */
4580 		changed = true;
4581 	}
4582 
4583 	/* If Connectionless Peripheral Broadcast peripheral role is supported
4584 	 * enable all necessary events for it.
4585 	 */
4586 	if (lmp_cpb_peripheral_capable(hdev)) {
4587 		events[2] |= 0x01;	/* Synchronization Train Received */
4588 		events[2] |= 0x02;	/* CPB Receive */
4589 		events[2] |= 0x04;	/* CPB Timeout */
4590 		events[2] |= 0x10;	/* Peripheral Page Response Timeout */
4591 		changed = true;
4592 	}
4593 
4594 	/* Enable Authenticated Payload Timeout Expired event if supported */
4595 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4596 		events[2] |= 0x80;
4597 		changed = true;
4598 	}
4599 
4600 	/* Some Broadcom based controllers indicate support for Set Event
4601 	 * Mask Page 2 command, but then actually do not support it. Since
4602 	 * the default value is all bits set to zero, the command is only
4603 	 * required if the event mask has to be changed. In case no change
4604 	 * to the event mask is needed, skip this command.
4605 	 */
4606 	if (!changed)
4607 		return 0;
4608 
4609 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4610 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4611 }
4612 
4613 /* Read local codec list if the HCI command is supported */
hci_read_local_codecs_sync(struct hci_dev * hdev)4614 static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4615 {
4616 	if (hdev->commands[45] & 0x04)
4617 		hci_read_supported_codecs_v2(hdev);
4618 	else if (hdev->commands[29] & 0x20)
4619 		hci_read_supported_codecs(hdev);
4620 
4621 	return 0;
4622 }
4623 
4624 /* Read local pairing options if the HCI command is supported */
hci_read_local_pairing_opts_sync(struct hci_dev * hdev)4625 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4626 {
4627 	if (!(hdev->commands[41] & 0x08))
4628 		return 0;
4629 
4630 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4631 				     0, NULL, HCI_CMD_TIMEOUT);
4632 }
4633 
4634 /* Get MWS transport configuration if the HCI command is supported */
hci_get_mws_transport_config_sync(struct hci_dev * hdev)4635 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4636 {
4637 	if (!mws_transport_config_capable(hdev))
4638 		return 0;
4639 
4640 	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4641 				     0, NULL, HCI_CMD_TIMEOUT);
4642 }
4643 
4644 /* Check for Synchronization Train support */
hci_read_sync_train_params_sync(struct hci_dev * hdev)4645 static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4646 {
4647 	if (!lmp_sync_train_capable(hdev))
4648 		return 0;
4649 
4650 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4651 				     0, NULL, HCI_CMD_TIMEOUT);
4652 }
4653 
4654 /* Enable Secure Connections if supported and configured */
hci_write_sc_support_1_sync(struct hci_dev * hdev)4655 static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4656 {
4657 	u8 support = 0x01;
4658 
4659 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4660 	    !bredr_sc_enabled(hdev))
4661 		return 0;
4662 
4663 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4664 				     sizeof(support), &support,
4665 				     HCI_CMD_TIMEOUT);
4666 }
4667 
4668 /* Set erroneous data reporting if supported to the wideband speech
4669  * setting value
4670  */
hci_set_err_data_report_sync(struct hci_dev * hdev)4671 static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4672 {
4673 	struct hci_cp_write_def_err_data_reporting cp;
4674 	bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4675 
4676 	if (!(hdev->commands[18] & 0x08) ||
4677 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4678 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4679 		return 0;
4680 
4681 	if (enabled == hdev->err_data_reporting)
4682 		return 0;
4683 
4684 	memset(&cp, 0, sizeof(cp));
4685 	cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4686 				ERR_DATA_REPORTING_DISABLED;
4687 
4688 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4689 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4690 }
4691 
4692 static const struct hci_init_stage hci_init4[] = {
4693 	 /* HCI_OP_DELETE_STORED_LINK_KEY */
4694 	HCI_INIT(hci_delete_stored_link_key_sync),
4695 	/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4696 	HCI_INIT(hci_set_event_mask_page_2_sync),
4697 	/* HCI_OP_READ_LOCAL_CODECS */
4698 	HCI_INIT(hci_read_local_codecs_sync),
4699 	 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4700 	HCI_INIT(hci_read_local_pairing_opts_sync),
4701 	 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4702 	HCI_INIT(hci_get_mws_transport_config_sync),
4703 	 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4704 	HCI_INIT(hci_read_sync_train_params_sync),
4705 	/* HCI_OP_WRITE_SC_SUPPORT */
4706 	HCI_INIT(hci_write_sc_support_1_sync),
4707 	/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4708 	HCI_INIT(hci_set_err_data_report_sync),
4709 	{}
4710 };
4711 
4712 /* Set Suggested Default Data Length to maximum if supported */
hci_le_set_write_def_data_len_sync(struct hci_dev * hdev)4713 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4714 {
4715 	struct hci_cp_le_write_def_data_len cp;
4716 
4717 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4718 		return 0;
4719 
4720 	memset(&cp, 0, sizeof(cp));
4721 	cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4722 	cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4723 
4724 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4725 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4726 }
4727 
4728 /* Set Default PHY parameters if command is supported, enables all supported
4729  * PHYs according to the LE Features bits.
4730  */
hci_le_set_default_phy_sync(struct hci_dev * hdev)4731 static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4732 {
4733 	struct hci_cp_le_set_default_phy cp;
4734 
4735 	if (!(hdev->commands[35] & 0x20)) {
4736 		/* If the command is not supported it means only 1M PHY is
4737 		 * supported.
4738 		 */
4739 		hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4740 		hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4741 		return 0;
4742 	}
4743 
4744 	memset(&cp, 0, sizeof(cp));
4745 	cp.all_phys = 0x00;
4746 	cp.tx_phys = HCI_LE_SET_PHY_1M;
4747 	cp.rx_phys = HCI_LE_SET_PHY_1M;
4748 
4749 	/* Enables 2M PHY if supported */
4750 	if (le_2m_capable(hdev)) {
4751 		cp.tx_phys |= HCI_LE_SET_PHY_2M;
4752 		cp.rx_phys |= HCI_LE_SET_PHY_2M;
4753 	}
4754 
4755 	/* Enables Coded PHY if supported */
4756 	if (le_coded_capable(hdev)) {
4757 		cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4758 		cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4759 	}
4760 
4761 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4762 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4763 }
4764 
4765 static const struct hci_init_stage le_init4[] = {
4766 	/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4767 	HCI_INIT(hci_le_set_write_def_data_len_sync),
4768 	/* HCI_OP_LE_SET_DEFAULT_PHY */
4769 	HCI_INIT(hci_le_set_default_phy_sync),
4770 	{}
4771 };
4772 
hci_init4_sync(struct hci_dev * hdev)4773 static int hci_init4_sync(struct hci_dev *hdev)
4774 {
4775 	int err;
4776 
4777 	bt_dev_dbg(hdev, "");
4778 
4779 	err = hci_init_stage_sync(hdev, hci_init4);
4780 	if (err)
4781 		return err;
4782 
4783 	if (lmp_le_capable(hdev))
4784 		return hci_init_stage_sync(hdev, le_init4);
4785 
4786 	return 0;
4787 }
4788 
hci_init_sync(struct hci_dev * hdev)4789 static int hci_init_sync(struct hci_dev *hdev)
4790 {
4791 	int err;
4792 
4793 	err = hci_init1_sync(hdev);
4794 	if (err < 0)
4795 		return err;
4796 
4797 	if (hci_dev_test_flag(hdev, HCI_SETUP))
4798 		hci_debugfs_create_basic(hdev);
4799 
4800 	err = hci_init2_sync(hdev);
4801 	if (err < 0)
4802 		return err;
4803 
4804 	err = hci_init3_sync(hdev);
4805 	if (err < 0)
4806 		return err;
4807 
4808 	err = hci_init4_sync(hdev);
4809 	if (err < 0)
4810 		return err;
4811 
4812 	/* This function is only called when the controller is actually in
4813 	 * configured state. When the controller is marked as unconfigured,
4814 	 * this initialization procedure is not run.
4815 	 *
4816 	 * It means that it is possible that a controller runs through its
4817 	 * setup phase and then discovers missing settings. If that is the
4818 	 * case, then this function will not be called. It then will only
4819 	 * be called during the config phase.
4820 	 *
4821 	 * So only when in setup phase or config phase, create the debugfs
4822 	 * entries and register the SMP channels.
4823 	 */
4824 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4825 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
4826 		return 0;
4827 
4828 	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4829 		return 0;
4830 
4831 	hci_debugfs_create_common(hdev);
4832 
4833 	if (lmp_bredr_capable(hdev))
4834 		hci_debugfs_create_bredr(hdev);
4835 
4836 	if (lmp_le_capable(hdev))
4837 		hci_debugfs_create_le(hdev);
4838 
4839 	return 0;
4840 }
4841 
4842 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4843 
4844 static const struct {
4845 	unsigned long quirk;
4846 	const char *desc;
4847 } hci_broken_table[] = {
4848 	HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4849 			 "HCI Read Local Supported Commands not supported"),
4850 	HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4851 			 "HCI Delete Stored Link Key command is advertised, "
4852 			 "but not supported."),
4853 	HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4854 			 "HCI Read Default Erroneous Data Reporting command is "
4855 			 "advertised, but not supported."),
4856 	HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4857 			 "HCI Read Transmit Power Level command is advertised, "
4858 			 "but not supported."),
4859 	HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4860 			 "HCI Set Event Filter command not supported."),
4861 	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4862 			 "HCI Enhanced Setup Synchronous Connection command is "
4863 			 "advertised, but not supported."),
4864 	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4865 			 "HCI LE Set Random Private Address Timeout command is "
4866 			 "advertised, but not supported."),
4867 	HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
4868 			 "HCI LE Extended Create Connection command is "
4869 			 "advertised, but not supported."),
4870 	HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
4871 			 "HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
4872 			 "to unexpected SMP errors when pairing "
4873 			 "and will not be used."),
4874 	HCI_QUIRK_BROKEN(LE_CODED,
4875 			 "HCI LE Coded PHY feature bit is set, "
4876 			 "but its usage is not supported.")
4877 };
4878 
4879 /* This function handles hdev setup stage:
4880  *
4881  * Calls hdev->setup
4882  * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4883  */
hci_dev_setup_sync(struct hci_dev * hdev)4884 static int hci_dev_setup_sync(struct hci_dev *hdev)
4885 {
4886 	int ret = 0;
4887 	bool invalid_bdaddr;
4888 	size_t i;
4889 
4890 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4891 	    !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4892 		return 0;
4893 
4894 	bt_dev_dbg(hdev, "");
4895 
4896 	hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4897 
4898 	if (hdev->setup)
4899 		ret = hdev->setup(hdev);
4900 
4901 	for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4902 		if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4903 			bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4904 	}
4905 
4906 	/* The transport driver can set the quirk to mark the
4907 	 * BD_ADDR invalid before creating the HCI device or in
4908 	 * its setup callback.
4909 	 */
4910 	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4911 			 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4912 	if (!ret) {
4913 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4914 		    !bacmp(&hdev->public_addr, BDADDR_ANY))
4915 			hci_dev_get_bd_addr_from_property(hdev);
4916 
4917 		if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4918 		    hdev->set_bdaddr) {
4919 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4920 			if (!ret)
4921 				invalid_bdaddr = false;
4922 		}
4923 	}
4924 
4925 	/* The transport driver can set these quirks before
4926 	 * creating the HCI device or in its setup callback.
4927 	 *
4928 	 * For the invalid BD_ADDR quirk it is possible that
4929 	 * it becomes a valid address if the bootloader does
4930 	 * provide it (see above).
4931 	 *
4932 	 * In case any of them is set, the controller has to
4933 	 * start up as unconfigured.
4934 	 */
4935 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4936 	    invalid_bdaddr)
4937 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4938 
4939 	/* For an unconfigured controller it is required to
4940 	 * read at least the version information provided by
4941 	 * the Read Local Version Information command.
4942 	 *
4943 	 * If the set_bdaddr driver callback is provided, then
4944 	 * also the original Bluetooth public device address
4945 	 * will be read using the Read BD Address command.
4946 	 */
4947 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4948 		return hci_unconf_init_sync(hdev);
4949 
4950 	return ret;
4951 }
4952 
4953 /* This function handles hdev init stage:
4954  *
4955  * Calls hci_dev_setup_sync to perform setup stage
4956  * Calls hci_init_sync to perform HCI command init sequence
4957  */
hci_dev_init_sync(struct hci_dev * hdev)4958 static int hci_dev_init_sync(struct hci_dev *hdev)
4959 {
4960 	int ret;
4961 
4962 	bt_dev_dbg(hdev, "");
4963 
4964 	atomic_set(&hdev->cmd_cnt, 1);
4965 	set_bit(HCI_INIT, &hdev->flags);
4966 
4967 	ret = hci_dev_setup_sync(hdev);
4968 
4969 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4970 		/* If public address change is configured, ensure that
4971 		 * the address gets programmed. If the driver does not
4972 		 * support changing the public address, fail the power
4973 		 * on procedure.
4974 		 */
4975 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4976 		    hdev->set_bdaddr)
4977 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4978 		else
4979 			ret = -EADDRNOTAVAIL;
4980 	}
4981 
4982 	if (!ret) {
4983 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4984 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4985 			ret = hci_init_sync(hdev);
4986 			if (!ret && hdev->post_init)
4987 				ret = hdev->post_init(hdev);
4988 		}
4989 	}
4990 
4991 	/* If the HCI Reset command is clearing all diagnostic settings,
4992 	 * then they need to be reprogrammed after the init procedure
4993 	 * completed.
4994 	 */
4995 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4996 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4997 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4998 		ret = hdev->set_diag(hdev, true);
4999 
5000 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5001 		msft_do_open(hdev);
5002 		aosp_do_open(hdev);
5003 	}
5004 
5005 	clear_bit(HCI_INIT, &hdev->flags);
5006 
5007 	return ret;
5008 }
5009 
hci_dev_open_sync(struct hci_dev * hdev)5010 int hci_dev_open_sync(struct hci_dev *hdev)
5011 {
5012 	int ret;
5013 
5014 	bt_dev_dbg(hdev, "");
5015 
5016 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5017 		ret = -ENODEV;
5018 		goto done;
5019 	}
5020 
5021 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5022 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
5023 		/* Check for rfkill but allow the HCI setup stage to
5024 		 * proceed (which in itself doesn't cause any RF activity).
5025 		 */
5026 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
5027 			ret = -ERFKILL;
5028 			goto done;
5029 		}
5030 
5031 		/* Check for valid public address or a configured static
5032 		 * random address, but let the HCI setup proceed to
5033 		 * be able to determine if there is a public address
5034 		 * or not.
5035 		 *
5036 		 * In case of user channel usage, it is not important
5037 		 * if a public address or static random address is
5038 		 * available.
5039 		 */
5040 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5041 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5042 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
5043 			ret = -EADDRNOTAVAIL;
5044 			goto done;
5045 		}
5046 	}
5047 
5048 	if (test_bit(HCI_UP, &hdev->flags)) {
5049 		ret = -EALREADY;
5050 		goto done;
5051 	}
5052 
5053 	if (hdev->open(hdev)) {
5054 		ret = -EIO;
5055 		goto done;
5056 	}
5057 
5058 	hci_devcd_reset(hdev);
5059 
5060 	set_bit(HCI_RUNNING, &hdev->flags);
5061 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
5062 
5063 	ret = hci_dev_init_sync(hdev);
5064 	if (!ret) {
5065 		hci_dev_hold(hdev);
5066 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5067 		hci_adv_instances_set_rpa_expired(hdev, true);
5068 		set_bit(HCI_UP, &hdev->flags);
5069 		hci_sock_dev_event(hdev, HCI_DEV_UP);
5070 		hci_leds_update_powered(hdev, true);
5071 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5072 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
5073 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5074 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5075 		    hci_dev_test_flag(hdev, HCI_MGMT)) {
5076 			ret = hci_powered_update_sync(hdev);
5077 			mgmt_power_on(hdev, ret);
5078 		}
5079 	} else {
5080 		/* Init failed, cleanup */
5081 		flush_work(&hdev->tx_work);
5082 
5083 		/* Since hci_rx_work() is possible to awake new cmd_work
5084 		 * it should be flushed first to avoid unexpected call of
5085 		 * hci_cmd_work()
5086 		 */
5087 		flush_work(&hdev->rx_work);
5088 		flush_work(&hdev->cmd_work);
5089 
5090 		skb_queue_purge(&hdev->cmd_q);
5091 		skb_queue_purge(&hdev->rx_q);
5092 
5093 		if (hdev->flush)
5094 			hdev->flush(hdev);
5095 
5096 		if (hdev->sent_cmd) {
5097 			cancel_delayed_work_sync(&hdev->cmd_timer);
5098 			kfree_skb(hdev->sent_cmd);
5099 			hdev->sent_cmd = NULL;
5100 		}
5101 
5102 		if (hdev->req_skb) {
5103 			kfree_skb(hdev->req_skb);
5104 			hdev->req_skb = NULL;
5105 		}
5106 
5107 		clear_bit(HCI_RUNNING, &hdev->flags);
5108 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5109 
5110 		hdev->close(hdev);
5111 		hdev->flags &= BIT(HCI_RAW);
5112 	}
5113 
5114 done:
5115 	return ret;
5116 }
5117 
5118 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)5119 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5120 {
5121 	struct hci_conn_params *p;
5122 
5123 	list_for_each_entry(p, &hdev->le_conn_params, list) {
5124 		hci_pend_le_list_del_init(p);
5125 		if (p->conn) {
5126 			hci_conn_drop(p->conn);
5127 			hci_conn_put(p->conn);
5128 			p->conn = NULL;
5129 		}
5130 	}
5131 
5132 	BT_DBG("All LE pending actions cleared");
5133 }
5134 
hci_dev_shutdown(struct hci_dev * hdev)5135 static int hci_dev_shutdown(struct hci_dev *hdev)
5136 {
5137 	int err = 0;
5138 	/* Similar to how we first do setup and then set the exclusive access
5139 	 * bit for userspace, we must first unset userchannel and then clean up.
5140 	 * Otherwise, the kernel can't properly use the hci channel to clean up
5141 	 * the controller (some shutdown routines require sending additional
5142 	 * commands to the controller for example).
5143 	 */
5144 	bool was_userchannel =
5145 		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5146 
5147 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5148 	    test_bit(HCI_UP, &hdev->flags)) {
5149 		/* Execute vendor specific shutdown routine */
5150 		if (hdev->shutdown)
5151 			err = hdev->shutdown(hdev);
5152 	}
5153 
5154 	if (was_userchannel)
5155 		hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5156 
5157 	return err;
5158 }
5159 
hci_dev_close_sync(struct hci_dev * hdev)5160 int hci_dev_close_sync(struct hci_dev *hdev)
5161 {
5162 	bool auto_off;
5163 	int err = 0;
5164 
5165 	bt_dev_dbg(hdev, "");
5166 
5167 	cancel_delayed_work(&hdev->power_off);
5168 	cancel_delayed_work(&hdev->ncmd_timer);
5169 	cancel_delayed_work(&hdev->le_scan_disable);
5170 	cancel_delayed_work(&hdev->le_scan_restart);
5171 
5172 	hci_request_cancel_all(hdev);
5173 
5174 	if (hdev->adv_instance_timeout) {
5175 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
5176 		hdev->adv_instance_timeout = 0;
5177 	}
5178 
5179 	err = hci_dev_shutdown(hdev);
5180 
5181 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5182 		cancel_delayed_work_sync(&hdev->cmd_timer);
5183 		return err;
5184 	}
5185 
5186 	hci_leds_update_powered(hdev, false);
5187 
5188 	/* Flush RX and TX works */
5189 	flush_work(&hdev->tx_work);
5190 	flush_work(&hdev->rx_work);
5191 
5192 	if (hdev->discov_timeout > 0) {
5193 		hdev->discov_timeout = 0;
5194 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5195 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5196 	}
5197 
5198 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5199 		cancel_delayed_work(&hdev->service_cache);
5200 
5201 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5202 		struct adv_info *adv_instance;
5203 
5204 		cancel_delayed_work_sync(&hdev->rpa_expired);
5205 
5206 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5207 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5208 	}
5209 
5210 	/* Avoid potential lockdep warnings from the *_flush() calls by
5211 	 * ensuring the workqueue is empty up front.
5212 	 */
5213 	drain_workqueue(hdev->workqueue);
5214 
5215 	hci_dev_lock(hdev);
5216 
5217 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5218 
5219 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5220 
5221 	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5222 	    hci_dev_test_flag(hdev, HCI_MGMT))
5223 		__mgmt_power_off(hdev);
5224 
5225 	hci_inquiry_cache_flush(hdev);
5226 	hci_pend_le_actions_clear(hdev);
5227 	hci_conn_hash_flush(hdev);
5228 	/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5229 	smp_unregister(hdev);
5230 	hci_dev_unlock(hdev);
5231 
5232 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5233 
5234 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5235 		aosp_do_close(hdev);
5236 		msft_do_close(hdev);
5237 	}
5238 
5239 	if (hdev->flush)
5240 		hdev->flush(hdev);
5241 
5242 	/* Reset device */
5243 	skb_queue_purge(&hdev->cmd_q);
5244 	atomic_set(&hdev->cmd_cnt, 1);
5245 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5246 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5247 		set_bit(HCI_INIT, &hdev->flags);
5248 		hci_reset_sync(hdev);
5249 		clear_bit(HCI_INIT, &hdev->flags);
5250 	}
5251 
5252 	/* flush cmd  work */
5253 	flush_work(&hdev->cmd_work);
5254 
5255 	/* Drop queues */
5256 	skb_queue_purge(&hdev->rx_q);
5257 	skb_queue_purge(&hdev->cmd_q);
5258 	skb_queue_purge(&hdev->raw_q);
5259 
5260 	/* Drop last sent command */
5261 	if (hdev->sent_cmd) {
5262 		cancel_delayed_work_sync(&hdev->cmd_timer);
5263 		kfree_skb(hdev->sent_cmd);
5264 		hdev->sent_cmd = NULL;
5265 	}
5266 
5267 	/* Drop last request */
5268 	if (hdev->req_skb) {
5269 		kfree_skb(hdev->req_skb);
5270 		hdev->req_skb = NULL;
5271 	}
5272 
5273 	clear_bit(HCI_RUNNING, &hdev->flags);
5274 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5275 
5276 	/* After this point our queues are empty and no tasks are scheduled. */
5277 	hdev->close(hdev);
5278 
5279 	/* Clear flags */
5280 	hdev->flags &= BIT(HCI_RAW);
5281 	hci_dev_clear_volatile_flags(hdev);
5282 
5283 	memset(hdev->eir, 0, sizeof(hdev->eir));
5284 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5285 	bacpy(&hdev->random_addr, BDADDR_ANY);
5286 	hci_codec_list_clear(&hdev->local_codecs);
5287 
5288 	hci_dev_put(hdev);
5289 	return err;
5290 }
5291 
5292 /* This function perform power on HCI command sequence as follows:
5293  *
5294  * If controller is already up (HCI_UP) performs hci_powered_update_sync
5295  * sequence otherwise run hci_dev_open_sync which will follow with
5296  * hci_powered_update_sync after the init sequence is completed.
5297  */
hci_power_on_sync(struct hci_dev * hdev)5298 static int hci_power_on_sync(struct hci_dev *hdev)
5299 {
5300 	int err;
5301 
5302 	if (test_bit(HCI_UP, &hdev->flags) &&
5303 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
5304 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5305 		cancel_delayed_work(&hdev->power_off);
5306 		return hci_powered_update_sync(hdev);
5307 	}
5308 
5309 	err = hci_dev_open_sync(hdev);
5310 	if (err < 0)
5311 		return err;
5312 
5313 	/* During the HCI setup phase, a few error conditions are
5314 	 * ignored and they need to be checked now. If they are still
5315 	 * valid, it is important to return the device back off.
5316 	 */
5317 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5318 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5319 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5320 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5321 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5322 		hci_dev_close_sync(hdev);
5323 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5324 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5325 				   HCI_AUTO_OFF_TIMEOUT);
5326 	}
5327 
5328 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5329 		/* For unconfigured devices, set the HCI_RAW flag
5330 		 * so that userspace can easily identify them.
5331 		 */
5332 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5333 			set_bit(HCI_RAW, &hdev->flags);
5334 
5335 		/* For fully configured devices, this will send
5336 		 * the Index Added event. For unconfigured devices,
5337 		 * it will send Unconfigued Index Added event.
5338 		 *
5339 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5340 		 * and no event will be send.
5341 		 */
5342 		mgmt_index_added(hdev);
5343 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5344 		/* When the controller is now configured, then it
5345 		 * is important to clear the HCI_RAW flag.
5346 		 */
5347 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5348 			clear_bit(HCI_RAW, &hdev->flags);
5349 
5350 		/* Powering on the controller with HCI_CONFIG set only
5351 		 * happens with the transition from unconfigured to
5352 		 * configured. This will send the Index Added event.
5353 		 */
5354 		mgmt_index_added(hdev);
5355 	}
5356 
5357 	return 0;
5358 }
5359 
hci_remote_name_cancel_sync(struct hci_dev * hdev,bdaddr_t * addr)5360 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5361 {
5362 	struct hci_cp_remote_name_req_cancel cp;
5363 
5364 	memset(&cp, 0, sizeof(cp));
5365 	bacpy(&cp.bdaddr, addr);
5366 
5367 	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5368 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5369 }
5370 
hci_stop_discovery_sync(struct hci_dev * hdev)5371 int hci_stop_discovery_sync(struct hci_dev *hdev)
5372 {
5373 	struct discovery_state *d = &hdev->discovery;
5374 	struct inquiry_entry *e;
5375 	int err;
5376 
5377 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5378 
5379 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5380 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5381 			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5382 						    0, NULL, HCI_CMD_TIMEOUT);
5383 			if (err)
5384 				return err;
5385 		}
5386 
5387 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5388 			cancel_delayed_work(&hdev->le_scan_disable);
5389 			cancel_delayed_work(&hdev->le_scan_restart);
5390 
5391 			err = hci_scan_disable_sync(hdev);
5392 			if (err)
5393 				return err;
5394 		}
5395 
5396 	} else {
5397 		err = hci_scan_disable_sync(hdev);
5398 		if (err)
5399 			return err;
5400 	}
5401 
5402 	/* Resume advertising if it was paused */
5403 	if (use_ll_privacy(hdev))
5404 		hci_resume_advertising_sync(hdev);
5405 
5406 	/* No further actions needed for LE-only discovery */
5407 	if (d->type == DISCOV_TYPE_LE)
5408 		return 0;
5409 
5410 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5411 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5412 						     NAME_PENDING);
5413 		if (!e)
5414 			return 0;
5415 
5416 		/* Ignore cancel errors since it should interfere with stopping
5417 		 * of the discovery.
5418 		 */
5419 		hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5420 	}
5421 
5422 	return 0;
5423 }
5424 
hci_disconnect_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5425 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5426 			       u8 reason)
5427 {
5428 	struct hci_cp_disconnect cp;
5429 
5430 	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5431 		/* This is a BIS connection, hci_conn_del will
5432 		 * do the necessary cleanup.
5433 		 */
5434 		hci_dev_lock(hdev);
5435 		hci_conn_failed(conn, reason);
5436 		hci_dev_unlock(hdev);
5437 
5438 		return 0;
5439 	}
5440 
5441 	memset(&cp, 0, sizeof(cp));
5442 	cp.handle = cpu_to_le16(conn->handle);
5443 	cp.reason = reason;
5444 
5445 	/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5446 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5447 	 * used when suspending or powering off, where we don't want to wait
5448 	 * for the peer's response.
5449 	 */
5450 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5451 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5452 						sizeof(cp), &cp,
5453 						HCI_EV_DISCONN_COMPLETE,
5454 						HCI_CMD_TIMEOUT, NULL);
5455 
5456 	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5457 				     HCI_CMD_TIMEOUT);
5458 }
5459 
hci_le_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5460 static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5461 				      struct hci_conn *conn, u8 reason)
5462 {
5463 	/* Return reason if scanning since the connection shall probably be
5464 	 * cleanup directly.
5465 	 */
5466 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5467 		return reason;
5468 
5469 	if (conn->role == HCI_ROLE_SLAVE ||
5470 	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5471 		return 0;
5472 
5473 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5474 				     0, NULL, HCI_CMD_TIMEOUT);
5475 }
5476 
hci_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5477 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5478 				   u8 reason)
5479 {
5480 	if (conn->type == LE_LINK)
5481 		return hci_le_connect_cancel_sync(hdev, conn, reason);
5482 
5483 	if (conn->type == ISO_LINK) {
5484 		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5485 		 * page 1857:
5486 		 *
5487 		 * If this command is issued for a CIS on the Central and the
5488 		 * CIS is successfully terminated before being established,
5489 		 * then an HCI_LE_CIS_Established event shall also be sent for
5490 		 * this CIS with the Status Operation Cancelled by Host (0x44).
5491 		 */
5492 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5493 			return hci_disconnect_sync(hdev, conn, reason);
5494 
5495 		/* CIS with no Create CIS sent have nothing to cancel */
5496 		if (bacmp(&conn->dst, BDADDR_ANY))
5497 			return HCI_ERROR_LOCAL_HOST_TERM;
5498 
5499 		/* There is no way to cancel a BIS without terminating the BIG
5500 		 * which is done later on connection cleanup.
5501 		 */
5502 		return 0;
5503 	}
5504 
5505 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5506 		return 0;
5507 
5508 	/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5509 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5510 	 * used when suspending or powering off, where we don't want to wait
5511 	 * for the peer's response.
5512 	 */
5513 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5514 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5515 						6, &conn->dst,
5516 						HCI_EV_CONN_COMPLETE,
5517 						HCI_CMD_TIMEOUT, NULL);
5518 
5519 	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5520 				     6, &conn->dst, HCI_CMD_TIMEOUT);
5521 }
5522 
hci_reject_sco_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5523 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5524 			       u8 reason)
5525 {
5526 	struct hci_cp_reject_sync_conn_req cp;
5527 
5528 	memset(&cp, 0, sizeof(cp));
5529 	bacpy(&cp.bdaddr, &conn->dst);
5530 	cp.reason = reason;
5531 
5532 	/* SCO rejection has its own limited set of
5533 	 * allowed error values (0x0D-0x0F).
5534 	 */
5535 	if (reason < 0x0d || reason > 0x0f)
5536 		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5537 
5538 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5539 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5540 }
5541 
hci_le_reject_cis_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5542 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5543 				  u8 reason)
5544 {
5545 	struct hci_cp_le_reject_cis cp;
5546 
5547 	memset(&cp, 0, sizeof(cp));
5548 	cp.handle = cpu_to_le16(conn->handle);
5549 	cp.reason = reason;
5550 
5551 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5552 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5553 }
5554 
hci_reject_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5555 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5556 				u8 reason)
5557 {
5558 	struct hci_cp_reject_conn_req cp;
5559 
5560 	if (conn->type == ISO_LINK)
5561 		return hci_le_reject_cis_sync(hdev, conn, reason);
5562 
5563 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5564 		return hci_reject_sco_sync(hdev, conn, reason);
5565 
5566 	memset(&cp, 0, sizeof(cp));
5567 	bacpy(&cp.bdaddr, &conn->dst);
5568 	cp.reason = reason;
5569 
5570 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5571 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5572 }
5573 
hci_abort_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5574 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5575 {
5576 	int err = 0;
5577 	u16 handle = conn->handle;
5578 	bool disconnect = false;
5579 	struct hci_conn *c;
5580 
5581 	switch (conn->state) {
5582 	case BT_CONNECTED:
5583 	case BT_CONFIG:
5584 		err = hci_disconnect_sync(hdev, conn, reason);
5585 		break;
5586 	case BT_CONNECT:
5587 		err = hci_connect_cancel_sync(hdev, conn, reason);
5588 		break;
5589 	case BT_CONNECT2:
5590 		err = hci_reject_conn_sync(hdev, conn, reason);
5591 		break;
5592 	case BT_OPEN:
5593 	case BT_BOUND:
5594 		break;
5595 	default:
5596 		disconnect = true;
5597 		break;
5598 	}
5599 
5600 	hci_dev_lock(hdev);
5601 
5602 	/* Check if the connection has been cleaned up concurrently */
5603 	c = hci_conn_hash_lookup_handle(hdev, handle);
5604 	if (!c || c != conn) {
5605 		err = 0;
5606 		goto unlock;
5607 	}
5608 
5609 	/* Cleanup hci_conn object if it cannot be cancelled as it
5610 	 * likelly means the controller and host stack are out of sync
5611 	 * or in case of LE it was still scanning so it can be cleanup
5612 	 * safely.
5613 	 */
5614 	if (disconnect) {
5615 		conn->state = BT_CLOSED;
5616 		hci_disconn_cfm(conn, reason);
5617 		hci_conn_del(conn);
5618 	} else {
5619 		hci_conn_failed(conn, reason);
5620 	}
5621 
5622 unlock:
5623 	hci_dev_unlock(hdev);
5624 	return err;
5625 }
5626 
hci_disconnect_all_sync(struct hci_dev * hdev,u8 reason)5627 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5628 {
5629 	struct list_head *head = &hdev->conn_hash.list;
5630 	struct hci_conn *conn;
5631 
5632 	rcu_read_lock();
5633 	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5634 		/* Make sure the connection is not freed while unlocking */
5635 		conn = hci_conn_get(conn);
5636 		rcu_read_unlock();
5637 		/* Disregard possible errors since hci_conn_del shall have been
5638 		 * called even in case of errors had occurred since it would
5639 		 * then cause hci_conn_failed to be called which calls
5640 		 * hci_conn_del internally.
5641 		 */
5642 		hci_abort_conn_sync(hdev, conn, reason);
5643 		hci_conn_put(conn);
5644 		rcu_read_lock();
5645 	}
5646 	rcu_read_unlock();
5647 
5648 	return 0;
5649 }
5650 
5651 /* This function perform power off HCI command sequence as follows:
5652  *
5653  * Clear Advertising
5654  * Stop Discovery
5655  * Disconnect all connections
5656  * hci_dev_close_sync
5657  */
hci_power_off_sync(struct hci_dev * hdev)5658 static int hci_power_off_sync(struct hci_dev *hdev)
5659 {
5660 	int err;
5661 
5662 	/* If controller is already down there is nothing to do */
5663 	if (!test_bit(HCI_UP, &hdev->flags))
5664 		return 0;
5665 
5666 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
5667 	    test_bit(HCI_PSCAN, &hdev->flags)) {
5668 		err = hci_write_scan_enable_sync(hdev, 0x00);
5669 		if (err)
5670 			return err;
5671 	}
5672 
5673 	err = hci_clear_adv_sync(hdev, NULL, false);
5674 	if (err)
5675 		return err;
5676 
5677 	err = hci_stop_discovery_sync(hdev);
5678 	if (err)
5679 		return err;
5680 
5681 	/* Terminated due to Power Off */
5682 	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5683 	if (err)
5684 		return err;
5685 
5686 	return hci_dev_close_sync(hdev);
5687 }
5688 
hci_set_powered_sync(struct hci_dev * hdev,u8 val)5689 int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5690 {
5691 	if (val)
5692 		return hci_power_on_sync(hdev);
5693 
5694 	return hci_power_off_sync(hdev);
5695 }
5696 
hci_write_iac_sync(struct hci_dev * hdev)5697 static int hci_write_iac_sync(struct hci_dev *hdev)
5698 {
5699 	struct hci_cp_write_current_iac_lap cp;
5700 
5701 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5702 		return 0;
5703 
5704 	memset(&cp, 0, sizeof(cp));
5705 
5706 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5707 		/* Limited discoverable mode */
5708 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
5709 		cp.iac_lap[0] = 0x00;	/* LIAC */
5710 		cp.iac_lap[1] = 0x8b;
5711 		cp.iac_lap[2] = 0x9e;
5712 		cp.iac_lap[3] = 0x33;	/* GIAC */
5713 		cp.iac_lap[4] = 0x8b;
5714 		cp.iac_lap[5] = 0x9e;
5715 	} else {
5716 		/* General discoverable mode */
5717 		cp.num_iac = 1;
5718 		cp.iac_lap[0] = 0x33;	/* GIAC */
5719 		cp.iac_lap[1] = 0x8b;
5720 		cp.iac_lap[2] = 0x9e;
5721 	}
5722 
5723 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5724 				     (cp.num_iac * 3) + 1, &cp,
5725 				     HCI_CMD_TIMEOUT);
5726 }
5727 
hci_update_discoverable_sync(struct hci_dev * hdev)5728 int hci_update_discoverable_sync(struct hci_dev *hdev)
5729 {
5730 	int err = 0;
5731 
5732 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5733 		err = hci_write_iac_sync(hdev);
5734 		if (err)
5735 			return err;
5736 
5737 		err = hci_update_scan_sync(hdev);
5738 		if (err)
5739 			return err;
5740 
5741 		err = hci_update_class_sync(hdev);
5742 		if (err)
5743 			return err;
5744 	}
5745 
5746 	/* Advertising instances don't use the global discoverable setting, so
5747 	 * only update AD if advertising was enabled using Set Advertising.
5748 	 */
5749 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5750 		err = hci_update_adv_data_sync(hdev, 0x00);
5751 		if (err)
5752 			return err;
5753 
5754 		/* Discoverable mode affects the local advertising
5755 		 * address in limited privacy mode.
5756 		 */
5757 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5758 			if (ext_adv_capable(hdev))
5759 				err = hci_start_ext_adv_sync(hdev, 0x00);
5760 			else
5761 				err = hci_enable_advertising_sync(hdev);
5762 		}
5763 	}
5764 
5765 	return err;
5766 }
5767 
update_discoverable_sync(struct hci_dev * hdev,void * data)5768 static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5769 {
5770 	return hci_update_discoverable_sync(hdev);
5771 }
5772 
hci_update_discoverable(struct hci_dev * hdev)5773 int hci_update_discoverable(struct hci_dev *hdev)
5774 {
5775 	/* Only queue if it would have any effect */
5776 	if (hdev_is_powered(hdev) &&
5777 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5778 	    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5779 	    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5780 		return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5781 					  NULL);
5782 
5783 	return 0;
5784 }
5785 
hci_update_connectable_sync(struct hci_dev * hdev)5786 int hci_update_connectable_sync(struct hci_dev *hdev)
5787 {
5788 	int err;
5789 
5790 	err = hci_update_scan_sync(hdev);
5791 	if (err)
5792 		return err;
5793 
5794 	/* If BR/EDR is not enabled and we disable advertising as a
5795 	 * by-product of disabling connectable, we need to update the
5796 	 * advertising flags.
5797 	 */
5798 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5799 		err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5800 
5801 	/* Update the advertising parameters if necessary */
5802 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5803 	    !list_empty(&hdev->adv_instances)) {
5804 		if (ext_adv_capable(hdev))
5805 			err = hci_start_ext_adv_sync(hdev,
5806 						     hdev->cur_adv_instance);
5807 		else
5808 			err = hci_enable_advertising_sync(hdev);
5809 
5810 		if (err)
5811 			return err;
5812 	}
5813 
5814 	return hci_update_passive_scan_sync(hdev);
5815 }
5816 
hci_inquiry_sync(struct hci_dev * hdev,u8 length)5817 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
5818 {
5819 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5820 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5821 	struct hci_cp_inquiry cp;
5822 
5823 	bt_dev_dbg(hdev, "");
5824 
5825 	if (test_bit(HCI_INQUIRY, &hdev->flags))
5826 		return 0;
5827 
5828 	hci_dev_lock(hdev);
5829 	hci_inquiry_cache_flush(hdev);
5830 	hci_dev_unlock(hdev);
5831 
5832 	memset(&cp, 0, sizeof(cp));
5833 
5834 	if (hdev->discovery.limited)
5835 		memcpy(&cp.lap, liac, sizeof(cp.lap));
5836 	else
5837 		memcpy(&cp.lap, giac, sizeof(cp.lap));
5838 
5839 	cp.length = length;
5840 
5841 	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5842 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5843 }
5844 
hci_active_scan_sync(struct hci_dev * hdev,uint16_t interval)5845 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5846 {
5847 	u8 own_addr_type;
5848 	/* Accept list is not used for discovery */
5849 	u8 filter_policy = 0x00;
5850 	/* Default is to enable duplicates filter */
5851 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5852 	int err;
5853 
5854 	bt_dev_dbg(hdev, "");
5855 
5856 	/* If controller is scanning, it means the passive scanning is
5857 	 * running. Thus, we should temporarily stop it in order to set the
5858 	 * discovery scanning parameters.
5859 	 */
5860 	err = hci_scan_disable_sync(hdev);
5861 	if (err) {
5862 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5863 		return err;
5864 	}
5865 
5866 	cancel_interleave_scan(hdev);
5867 
5868 	/* Pause address resolution for active scan and stop advertising if
5869 	 * privacy is enabled.
5870 	 */
5871 	err = hci_pause_addr_resolution(hdev);
5872 	if (err)
5873 		goto failed;
5874 
5875 	/* All active scans will be done with either a resolvable private
5876 	 * address (when privacy feature has been enabled) or non-resolvable
5877 	 * private address.
5878 	 */
5879 	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5880 					     &own_addr_type);
5881 	if (err < 0)
5882 		own_addr_type = ADDR_LE_DEV_PUBLIC;
5883 
5884 	if (hci_is_adv_monitoring(hdev)) {
5885 		/* Duplicate filter should be disabled when some advertisement
5886 		 * monitor is activated, otherwise AdvMon can only receive one
5887 		 * advertisement for one peer(*) during active scanning, and
5888 		 * might report loss to these peers.
5889 		 *
5890 		 * Note that different controllers have different meanings of
5891 		 * |duplicate|. Some of them consider packets with the same
5892 		 * address as duplicate, and others consider packets with the
5893 		 * same address and the same RSSI as duplicate. Although in the
5894 		 * latter case we don't need to disable duplicate filter, but
5895 		 * it is common to have active scanning for a short period of
5896 		 * time, the power impact should be neglectable.
5897 		 */
5898 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5899 	}
5900 
5901 	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5902 				  hdev->le_scan_window_discovery,
5903 				  own_addr_type, filter_policy, filter_dup);
5904 	if (!err)
5905 		return err;
5906 
5907 failed:
5908 	/* Resume advertising if it was paused */
5909 	if (use_ll_privacy(hdev))
5910 		hci_resume_advertising_sync(hdev);
5911 
5912 	/* Resume passive scanning */
5913 	hci_update_passive_scan_sync(hdev);
5914 	return err;
5915 }
5916 
hci_start_interleaved_discovery_sync(struct hci_dev * hdev)5917 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5918 {
5919 	int err;
5920 
5921 	bt_dev_dbg(hdev, "");
5922 
5923 	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5924 	if (err)
5925 		return err;
5926 
5927 	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
5928 }
5929 
hci_start_discovery_sync(struct hci_dev * hdev)5930 int hci_start_discovery_sync(struct hci_dev *hdev)
5931 {
5932 	unsigned long timeout;
5933 	int err;
5934 
5935 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5936 
5937 	switch (hdev->discovery.type) {
5938 	case DISCOV_TYPE_BREDR:
5939 		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
5940 	case DISCOV_TYPE_INTERLEAVED:
5941 		/* When running simultaneous discovery, the LE scanning time
5942 		 * should occupy the whole discovery time sine BR/EDR inquiry
5943 		 * and LE scanning are scheduled by the controller.
5944 		 *
5945 		 * For interleaving discovery in comparison, BR/EDR inquiry
5946 		 * and LE scanning are done sequentially with separate
5947 		 * timeouts.
5948 		 */
5949 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5950 			     &hdev->quirks)) {
5951 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5952 			/* During simultaneous discovery, we double LE scan
5953 			 * interval. We must leave some time for the controller
5954 			 * to do BR/EDR inquiry.
5955 			 */
5956 			err = hci_start_interleaved_discovery_sync(hdev);
5957 			break;
5958 		}
5959 
5960 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5961 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5962 		break;
5963 	case DISCOV_TYPE_LE:
5964 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5965 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5966 		break;
5967 	default:
5968 		return -EINVAL;
5969 	}
5970 
5971 	if (err)
5972 		return err;
5973 
5974 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5975 
5976 	/* When service discovery is used and the controller has a
5977 	 * strict duplicate filter, it is important to remember the
5978 	 * start and duration of the scan. This is required for
5979 	 * restarting scanning during the discovery phase.
5980 	 */
5981 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5982 	    hdev->discovery.result_filtering) {
5983 		hdev->discovery.scan_start = jiffies;
5984 		hdev->discovery.scan_duration = timeout;
5985 	}
5986 
5987 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5988 			   timeout);
5989 	return 0;
5990 }
5991 
hci_suspend_monitor_sync(struct hci_dev * hdev)5992 static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5993 {
5994 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
5995 	case HCI_ADV_MONITOR_EXT_MSFT:
5996 		msft_suspend_sync(hdev);
5997 		break;
5998 	default:
5999 		return;
6000 	}
6001 }
6002 
6003 /* This function disables discovery and mark it as paused */
hci_pause_discovery_sync(struct hci_dev * hdev)6004 static int hci_pause_discovery_sync(struct hci_dev *hdev)
6005 {
6006 	int old_state = hdev->discovery.state;
6007 	int err;
6008 
6009 	/* If discovery already stopped/stopping/paused there nothing to do */
6010 	if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
6011 	    hdev->discovery_paused)
6012 		return 0;
6013 
6014 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6015 	err = hci_stop_discovery_sync(hdev);
6016 	if (err)
6017 		return err;
6018 
6019 	hdev->discovery_paused = true;
6020 	hdev->discovery_old_state = old_state;
6021 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6022 
6023 	return 0;
6024 }
6025 
hci_update_event_filter_sync(struct hci_dev * hdev)6026 static int hci_update_event_filter_sync(struct hci_dev *hdev)
6027 {
6028 	struct bdaddr_list_with_flags *b;
6029 	u8 scan = SCAN_DISABLED;
6030 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
6031 	int err;
6032 
6033 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6034 		return 0;
6035 
6036 	/* Some fake CSR controllers lock up after setting this type of
6037 	 * filter, so avoid sending the request altogether.
6038 	 */
6039 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
6040 		return 0;
6041 
6042 	/* Always clear event filter when starting */
6043 	hci_clear_event_filter_sync(hdev);
6044 
6045 	list_for_each_entry(b, &hdev->accept_list, list) {
6046 		if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
6047 			continue;
6048 
6049 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
6050 
6051 		err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
6052 						 HCI_CONN_SETUP_ALLOW_BDADDR,
6053 						 &b->bdaddr,
6054 						 HCI_CONN_SETUP_AUTO_ON);
6055 		if (err)
6056 			bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
6057 				   &b->bdaddr);
6058 		else
6059 			scan = SCAN_PAGE;
6060 	}
6061 
6062 	if (scan && !scanning)
6063 		hci_write_scan_enable_sync(hdev, scan);
6064 	else if (!scan && scanning)
6065 		hci_write_scan_enable_sync(hdev, scan);
6066 
6067 	return 0;
6068 }
6069 
6070 /* This function disables scan (BR and LE) and mark it as paused */
hci_pause_scan_sync(struct hci_dev * hdev)6071 static int hci_pause_scan_sync(struct hci_dev *hdev)
6072 {
6073 	if (hdev->scanning_paused)
6074 		return 0;
6075 
6076 	/* Disable page scan if enabled */
6077 	if (test_bit(HCI_PSCAN, &hdev->flags))
6078 		hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
6079 
6080 	hci_scan_disable_sync(hdev);
6081 
6082 	hdev->scanning_paused = true;
6083 
6084 	return 0;
6085 }
6086 
6087 /* This function performs the HCI suspend procedures in the follow order:
6088  *
6089  * Pause discovery (active scanning/inquiry)
6090  * Pause Directed Advertising/Advertising
6091  * Pause Scanning (passive scanning in case discovery was not active)
6092  * Disconnect all connections
6093  * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6094  * otherwise:
6095  * Update event mask (only set events that are allowed to wake up the host)
6096  * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6097  * Update passive scanning (lower duty cycle)
6098  * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6099  */
hci_suspend_sync(struct hci_dev * hdev)6100 int hci_suspend_sync(struct hci_dev *hdev)
6101 {
6102 	int err;
6103 
6104 	/* If marked as suspended there nothing to do */
6105 	if (hdev->suspended)
6106 		return 0;
6107 
6108 	/* Mark device as suspended */
6109 	hdev->suspended = true;
6110 
6111 	/* Pause discovery if not already stopped */
6112 	hci_pause_discovery_sync(hdev);
6113 
6114 	/* Pause other advertisements */
6115 	hci_pause_advertising_sync(hdev);
6116 
6117 	/* Suspend monitor filters */
6118 	hci_suspend_monitor_sync(hdev);
6119 
6120 	/* Prevent disconnects from causing scanning to be re-enabled */
6121 	hci_pause_scan_sync(hdev);
6122 
6123 	if (hci_conn_count(hdev)) {
6124 		/* Soft disconnect everything (power off) */
6125 		err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6126 		if (err) {
6127 			/* Set state to BT_RUNNING so resume doesn't notify */
6128 			hdev->suspend_state = BT_RUNNING;
6129 			hci_resume_sync(hdev);
6130 			return err;
6131 		}
6132 
6133 		/* Update event mask so only the allowed event can wakeup the
6134 		 * host.
6135 		 */
6136 		hci_set_event_mask_sync(hdev);
6137 	}
6138 
6139 	/* Only configure accept list if disconnect succeeded and wake
6140 	 * isn't being prevented.
6141 	 */
6142 	if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6143 		hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6144 		return 0;
6145 	}
6146 
6147 	/* Unpause to take care of updating scanning params */
6148 	hdev->scanning_paused = false;
6149 
6150 	/* Enable event filter for paired devices */
6151 	hci_update_event_filter_sync(hdev);
6152 
6153 	/* Update LE passive scan if enabled */
6154 	hci_update_passive_scan_sync(hdev);
6155 
6156 	/* Pause scan changes again. */
6157 	hdev->scanning_paused = true;
6158 
6159 	hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6160 
6161 	return 0;
6162 }
6163 
6164 /* This function resumes discovery */
hci_resume_discovery_sync(struct hci_dev * hdev)6165 static int hci_resume_discovery_sync(struct hci_dev *hdev)
6166 {
6167 	int err;
6168 
6169 	/* If discovery not paused there nothing to do */
6170 	if (!hdev->discovery_paused)
6171 		return 0;
6172 
6173 	hdev->discovery_paused = false;
6174 
6175 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6176 
6177 	err = hci_start_discovery_sync(hdev);
6178 
6179 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6180 				DISCOVERY_FINDING);
6181 
6182 	return err;
6183 }
6184 
hci_resume_monitor_sync(struct hci_dev * hdev)6185 static void hci_resume_monitor_sync(struct hci_dev *hdev)
6186 {
6187 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
6188 	case HCI_ADV_MONITOR_EXT_MSFT:
6189 		msft_resume_sync(hdev);
6190 		break;
6191 	default:
6192 		return;
6193 	}
6194 }
6195 
6196 /* This function resume scan and reset paused flag */
hci_resume_scan_sync(struct hci_dev * hdev)6197 static int hci_resume_scan_sync(struct hci_dev *hdev)
6198 {
6199 	if (!hdev->scanning_paused)
6200 		return 0;
6201 
6202 	hdev->scanning_paused = false;
6203 
6204 	hci_update_scan_sync(hdev);
6205 
6206 	/* Reset passive scanning to normal */
6207 	hci_update_passive_scan_sync(hdev);
6208 
6209 	return 0;
6210 }
6211 
6212 /* This function performs the HCI suspend procedures in the follow order:
6213  *
6214  * Restore event mask
6215  * Clear event filter
6216  * Update passive scanning (normal duty cycle)
6217  * Resume Directed Advertising/Advertising
6218  * Resume discovery (active scanning/inquiry)
6219  */
hci_resume_sync(struct hci_dev * hdev)6220 int hci_resume_sync(struct hci_dev *hdev)
6221 {
6222 	/* If not marked as suspended there nothing to do */
6223 	if (!hdev->suspended)
6224 		return 0;
6225 
6226 	hdev->suspended = false;
6227 
6228 	/* Restore event mask */
6229 	hci_set_event_mask_sync(hdev);
6230 
6231 	/* Clear any event filters and restore scan state */
6232 	hci_clear_event_filter_sync(hdev);
6233 
6234 	/* Resume scanning */
6235 	hci_resume_scan_sync(hdev);
6236 
6237 	/* Resume monitor filters */
6238 	hci_resume_monitor_sync(hdev);
6239 
6240 	/* Resume other advertisements */
6241 	hci_resume_advertising_sync(hdev);
6242 
6243 	/* Resume discovery */
6244 	hci_resume_discovery_sync(hdev);
6245 
6246 	return 0;
6247 }
6248 
conn_use_rpa(struct hci_conn * conn)6249 static bool conn_use_rpa(struct hci_conn *conn)
6250 {
6251 	struct hci_dev *hdev = conn->hdev;
6252 
6253 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
6254 }
6255 
hci_le_ext_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6256 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6257 						struct hci_conn *conn)
6258 {
6259 	struct hci_cp_le_set_ext_adv_params cp;
6260 	int err;
6261 	bdaddr_t random_addr;
6262 	u8 own_addr_type;
6263 
6264 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6265 					     &own_addr_type);
6266 	if (err)
6267 		return err;
6268 
6269 	/* Set require_privacy to false so that the remote device has a
6270 	 * chance of identifying us.
6271 	 */
6272 	err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6273 				     &own_addr_type, &random_addr);
6274 	if (err)
6275 		return err;
6276 
6277 	memset(&cp, 0, sizeof(cp));
6278 
6279 	cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6280 	cp.channel_map = hdev->le_adv_channel_map;
6281 	cp.tx_power = HCI_TX_POWER_INVALID;
6282 	cp.primary_phy = HCI_ADV_PHY_1M;
6283 	cp.secondary_phy = HCI_ADV_PHY_1M;
6284 	cp.handle = 0x00; /* Use instance 0 for directed adv */
6285 	cp.own_addr_type = own_addr_type;
6286 	cp.peer_addr_type = conn->dst_type;
6287 	bacpy(&cp.peer_addr, &conn->dst);
6288 
6289 	/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6290 	 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6291 	 * does not supports advertising data when the advertising set already
6292 	 * contains some, the controller shall return erroc code 'Invalid
6293 	 * HCI Command Parameters(0x12).
6294 	 * So it is required to remove adv set for handle 0x00. since we use
6295 	 * instance 0 for directed adv.
6296 	 */
6297 	err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6298 	if (err)
6299 		return err;
6300 
6301 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6302 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6303 	if (err)
6304 		return err;
6305 
6306 	/* Check if random address need to be updated */
6307 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6308 	    bacmp(&random_addr, BDADDR_ANY) &&
6309 	    bacmp(&random_addr, &hdev->random_addr)) {
6310 		err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6311 						       &random_addr);
6312 		if (err)
6313 			return err;
6314 	}
6315 
6316 	return hci_enable_ext_advertising_sync(hdev, 0x00);
6317 }
6318 
hci_le_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6319 static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6320 					    struct hci_conn *conn)
6321 {
6322 	struct hci_cp_le_set_adv_param cp;
6323 	u8 status;
6324 	u8 own_addr_type;
6325 	u8 enable;
6326 
6327 	if (ext_adv_capable(hdev))
6328 		return hci_le_ext_directed_advertising_sync(hdev, conn);
6329 
6330 	/* Clear the HCI_LE_ADV bit temporarily so that the
6331 	 * hci_update_random_address knows that it's safe to go ahead
6332 	 * and write a new random address. The flag will be set back on
6333 	 * as soon as the SET_ADV_ENABLE HCI command completes.
6334 	 */
6335 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
6336 
6337 	/* Set require_privacy to false so that the remote device has a
6338 	 * chance of identifying us.
6339 	 */
6340 	status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6341 						&own_addr_type);
6342 	if (status)
6343 		return status;
6344 
6345 	memset(&cp, 0, sizeof(cp));
6346 
6347 	/* Some controllers might reject command if intervals are not
6348 	 * within range for undirected advertising.
6349 	 * BCM20702A0 is known to be affected by this.
6350 	 */
6351 	cp.min_interval = cpu_to_le16(0x0020);
6352 	cp.max_interval = cpu_to_le16(0x0020);
6353 
6354 	cp.type = LE_ADV_DIRECT_IND;
6355 	cp.own_address_type = own_addr_type;
6356 	cp.direct_addr_type = conn->dst_type;
6357 	bacpy(&cp.direct_addr, &conn->dst);
6358 	cp.channel_map = hdev->le_adv_channel_map;
6359 
6360 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6361 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6362 	if (status)
6363 		return status;
6364 
6365 	enable = 0x01;
6366 
6367 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6368 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6369 }
6370 
set_ext_conn_params(struct hci_conn * conn,struct hci_cp_le_ext_conn_param * p)6371 static void set_ext_conn_params(struct hci_conn *conn,
6372 				struct hci_cp_le_ext_conn_param *p)
6373 {
6374 	struct hci_dev *hdev = conn->hdev;
6375 
6376 	memset(p, 0, sizeof(*p));
6377 
6378 	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6379 	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6380 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6381 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6382 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6383 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6384 	p->min_ce_len = cpu_to_le16(0x0000);
6385 	p->max_ce_len = cpu_to_le16(0x0000);
6386 }
6387 
hci_le_ext_create_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 own_addr_type)6388 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6389 				       struct hci_conn *conn, u8 own_addr_type)
6390 {
6391 	struct hci_cp_le_ext_create_conn *cp;
6392 	struct hci_cp_le_ext_conn_param *p;
6393 	u8 data[sizeof(*cp) + sizeof(*p) * 3];
6394 	u32 plen;
6395 
6396 	cp = (void *)data;
6397 	p = (void *)cp->data;
6398 
6399 	memset(cp, 0, sizeof(*cp));
6400 
6401 	bacpy(&cp->peer_addr, &conn->dst);
6402 	cp->peer_addr_type = conn->dst_type;
6403 	cp->own_addr_type = own_addr_type;
6404 
6405 	plen = sizeof(*cp);
6406 
6407 	if (scan_1m(hdev)) {
6408 		cp->phys |= LE_SCAN_PHY_1M;
6409 		set_ext_conn_params(conn, p);
6410 
6411 		p++;
6412 		plen += sizeof(*p);
6413 	}
6414 
6415 	if (scan_2m(hdev)) {
6416 		cp->phys |= LE_SCAN_PHY_2M;
6417 		set_ext_conn_params(conn, p);
6418 
6419 		p++;
6420 		plen += sizeof(*p);
6421 	}
6422 
6423 	if (scan_coded(hdev)) {
6424 		cp->phys |= LE_SCAN_PHY_CODED;
6425 		set_ext_conn_params(conn, p);
6426 
6427 		plen += sizeof(*p);
6428 	}
6429 
6430 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6431 					plen, data,
6432 					HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6433 					conn->conn_timeout, NULL);
6434 }
6435 
hci_le_create_conn_sync(struct hci_dev * hdev,void * data)6436 static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6437 {
6438 	struct hci_cp_le_create_conn cp;
6439 	struct hci_conn_params *params;
6440 	u8 own_addr_type;
6441 	int err;
6442 	struct hci_conn *conn = data;
6443 
6444 	if (!hci_conn_valid(hdev, conn))
6445 		return -ECANCELED;
6446 
6447 	bt_dev_dbg(hdev, "conn %p", conn);
6448 
6449 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
6450 	conn->state = BT_CONNECT;
6451 
6452 	/* If requested to connect as peripheral use directed advertising */
6453 	if (conn->role == HCI_ROLE_SLAVE) {
6454 		/* If we're active scanning and simultaneous roles is not
6455 		 * enabled simply reject the attempt.
6456 		 */
6457 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6458 		    hdev->le_scan_type == LE_SCAN_ACTIVE &&
6459 		    !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6460 			hci_conn_del(conn);
6461 			return -EBUSY;
6462 		}
6463 
6464 		/* Pause advertising while doing directed advertising. */
6465 		hci_pause_advertising_sync(hdev);
6466 
6467 		err = hci_le_directed_advertising_sync(hdev, conn);
6468 		goto done;
6469 	}
6470 
6471 	/* Disable advertising if simultaneous roles is not in use. */
6472 	if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6473 		hci_pause_advertising_sync(hdev);
6474 
6475 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6476 	if (params) {
6477 		conn->le_conn_min_interval = params->conn_min_interval;
6478 		conn->le_conn_max_interval = params->conn_max_interval;
6479 		conn->le_conn_latency = params->conn_latency;
6480 		conn->le_supv_timeout = params->supervision_timeout;
6481 	} else {
6482 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
6483 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
6484 		conn->le_conn_latency = hdev->le_conn_latency;
6485 		conn->le_supv_timeout = hdev->le_supv_timeout;
6486 	}
6487 
6488 	/* If controller is scanning, we stop it since some controllers are
6489 	 * not able to scan and connect at the same time. Also set the
6490 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6491 	 * handler for scan disabling knows to set the correct discovery
6492 	 * state.
6493 	 */
6494 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6495 		hci_scan_disable_sync(hdev);
6496 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6497 	}
6498 
6499 	/* Update random address, but set require_privacy to false so
6500 	 * that we never connect with an non-resolvable address.
6501 	 */
6502 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6503 					     &own_addr_type);
6504 	if (err)
6505 		goto done;
6506 	/* Send command LE Extended Create Connection if supported */
6507 	if (use_ext_conn(hdev)) {
6508 		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6509 		goto done;
6510 	}
6511 
6512 	memset(&cp, 0, sizeof(cp));
6513 
6514 	cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6515 	cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6516 
6517 	bacpy(&cp.peer_addr, &conn->dst);
6518 	cp.peer_addr_type = conn->dst_type;
6519 	cp.own_address_type = own_addr_type;
6520 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6521 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6522 	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6523 	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6524 	cp.min_ce_len = cpu_to_le16(0x0000);
6525 	cp.max_ce_len = cpu_to_le16(0x0000);
6526 
6527 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6528 	 *
6529 	 * If this event is unmasked and the HCI_LE_Connection_Complete event
6530 	 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6531 	 * sent when a new connection has been created.
6532 	 */
6533 	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6534 				       sizeof(cp), &cp,
6535 				       use_enhanced_conn_complete(hdev) ?
6536 				       HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6537 				       HCI_EV_LE_CONN_COMPLETE,
6538 				       conn->conn_timeout, NULL);
6539 
6540 done:
6541 	if (err == -ETIMEDOUT)
6542 		hci_le_connect_cancel_sync(hdev, conn, 0x00);
6543 
6544 	/* Re-enable advertising after the connection attempt is finished. */
6545 	hci_resume_advertising_sync(hdev);
6546 	return err;
6547 }
6548 
hci_le_create_cis_sync(struct hci_dev * hdev)6549 int hci_le_create_cis_sync(struct hci_dev *hdev)
6550 {
6551 	struct {
6552 		struct hci_cp_le_create_cis cp;
6553 		struct hci_cis cis[0x1f];
6554 	} cmd;
6555 	struct hci_conn *conn;
6556 	u8 cig = BT_ISO_QOS_CIG_UNSET;
6557 
6558 	/* The spec allows only one pending LE Create CIS command at a time. If
6559 	 * the command is pending now, don't do anything. We check for pending
6560 	 * connections after each CIS Established event.
6561 	 *
6562 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6563 	 * page 2566:
6564 	 *
6565 	 * If the Host issues this command before all the
6566 	 * HCI_LE_CIS_Established events from the previous use of the
6567 	 * command have been generated, the Controller shall return the
6568 	 * error code Command Disallowed (0x0C).
6569 	 *
6570 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6571 	 * page 2567:
6572 	 *
6573 	 * When the Controller receives the HCI_LE_Create_CIS command, the
6574 	 * Controller sends the HCI_Command_Status event to the Host. An
6575 	 * HCI_LE_CIS_Established event will be generated for each CIS when it
6576 	 * is established or if it is disconnected or considered lost before
6577 	 * being established; until all the events are generated, the command
6578 	 * remains pending.
6579 	 */
6580 
6581 	memset(&cmd, 0, sizeof(cmd));
6582 
6583 	hci_dev_lock(hdev);
6584 
6585 	rcu_read_lock();
6586 
6587 	/* Wait until previous Create CIS has completed */
6588 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6589 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6590 			goto done;
6591 	}
6592 
6593 	/* Find CIG with all CIS ready */
6594 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6595 		struct hci_conn *link;
6596 
6597 		if (hci_conn_check_create_cis(conn))
6598 			continue;
6599 
6600 		cig = conn->iso_qos.ucast.cig;
6601 
6602 		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6603 			if (hci_conn_check_create_cis(link) > 0 &&
6604 			    link->iso_qos.ucast.cig == cig &&
6605 			    link->state != BT_CONNECTED) {
6606 				cig = BT_ISO_QOS_CIG_UNSET;
6607 				break;
6608 			}
6609 		}
6610 
6611 		if (cig != BT_ISO_QOS_CIG_UNSET)
6612 			break;
6613 	}
6614 
6615 	if (cig == BT_ISO_QOS_CIG_UNSET)
6616 		goto done;
6617 
6618 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6619 		struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
6620 
6621 		if (hci_conn_check_create_cis(conn) ||
6622 		    conn->iso_qos.ucast.cig != cig)
6623 			continue;
6624 
6625 		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6626 		cis->acl_handle = cpu_to_le16(conn->parent->handle);
6627 		cis->cis_handle = cpu_to_le16(conn->handle);
6628 		cmd.cp.num_cis++;
6629 
6630 		if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis))
6631 			break;
6632 	}
6633 
6634 done:
6635 	rcu_read_unlock();
6636 
6637 	hci_dev_unlock(hdev);
6638 
6639 	if (!cmd.cp.num_cis)
6640 		return 0;
6641 
6642 	/* Wait for HCI_LE_CIS_Established */
6643 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6644 					sizeof(cmd.cp) + sizeof(cmd.cis[0]) *
6645 					cmd.cp.num_cis, &cmd,
6646 					HCI_EVT_LE_CIS_ESTABLISHED,
6647 					conn->conn_timeout, NULL);
6648 }
6649 
hci_le_remove_cig_sync(struct hci_dev * hdev,u8 handle)6650 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6651 {
6652 	struct hci_cp_le_remove_cig cp;
6653 
6654 	memset(&cp, 0, sizeof(cp));
6655 	cp.cig_id = handle;
6656 
6657 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6658 				     &cp, HCI_CMD_TIMEOUT);
6659 }
6660 
hci_le_big_terminate_sync(struct hci_dev * hdev,u8 handle)6661 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6662 {
6663 	struct hci_cp_le_big_term_sync cp;
6664 
6665 	memset(&cp, 0, sizeof(cp));
6666 	cp.handle = handle;
6667 
6668 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6669 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6670 }
6671 
hci_le_pa_terminate_sync(struct hci_dev * hdev,u16 handle)6672 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6673 {
6674 	struct hci_cp_le_pa_term_sync cp;
6675 
6676 	memset(&cp, 0, sizeof(cp));
6677 	cp.handle = cpu_to_le16(handle);
6678 
6679 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6680 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6681 }
6682 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)6683 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6684 			   bool use_rpa, struct adv_info *adv_instance,
6685 			   u8 *own_addr_type, bdaddr_t *rand_addr)
6686 {
6687 	int err;
6688 
6689 	bacpy(rand_addr, BDADDR_ANY);
6690 
6691 	/* If privacy is enabled use a resolvable private address. If
6692 	 * current RPA has expired then generate a new one.
6693 	 */
6694 	if (use_rpa) {
6695 		/* If Controller supports LL Privacy use own address type is
6696 		 * 0x03
6697 		 */
6698 		if (use_ll_privacy(hdev))
6699 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6700 		else
6701 			*own_addr_type = ADDR_LE_DEV_RANDOM;
6702 
6703 		if (adv_instance) {
6704 			if (adv_rpa_valid(adv_instance))
6705 				return 0;
6706 		} else {
6707 			if (rpa_valid(hdev))
6708 				return 0;
6709 		}
6710 
6711 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6712 		if (err < 0) {
6713 			bt_dev_err(hdev, "failed to generate new RPA");
6714 			return err;
6715 		}
6716 
6717 		bacpy(rand_addr, &hdev->rpa);
6718 
6719 		return 0;
6720 	}
6721 
6722 	/* In case of required privacy without resolvable private address,
6723 	 * use an non-resolvable private address. This is useful for
6724 	 * non-connectable advertising.
6725 	 */
6726 	if (require_privacy) {
6727 		bdaddr_t nrpa;
6728 
6729 		while (true) {
6730 			/* The non-resolvable private address is generated
6731 			 * from random six bytes with the two most significant
6732 			 * bits cleared.
6733 			 */
6734 			get_random_bytes(&nrpa, 6);
6735 			nrpa.b[5] &= 0x3f;
6736 
6737 			/* The non-resolvable private address shall not be
6738 			 * equal to the public address.
6739 			 */
6740 			if (bacmp(&hdev->bdaddr, &nrpa))
6741 				break;
6742 		}
6743 
6744 		*own_addr_type = ADDR_LE_DEV_RANDOM;
6745 		bacpy(rand_addr, &nrpa);
6746 
6747 		return 0;
6748 	}
6749 
6750 	/* No privacy so use a public address. */
6751 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
6752 
6753 	return 0;
6754 }
6755 
_update_adv_data_sync(struct hci_dev * hdev,void * data)6756 static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6757 {
6758 	u8 instance = PTR_UINT(data);
6759 
6760 	return hci_update_adv_data_sync(hdev, instance);
6761 }
6762 
hci_update_adv_data(struct hci_dev * hdev,u8 instance)6763 int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6764 {
6765 	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6766 				  UINT_PTR(instance), NULL);
6767 }
6768 
hci_acl_create_conn_sync(struct hci_dev * hdev,void * data)6769 static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6770 {
6771 	struct hci_conn *conn = data;
6772 	struct inquiry_entry *ie;
6773 	struct hci_cp_create_conn cp;
6774 	int err;
6775 
6776 	if (!hci_conn_valid(hdev, conn))
6777 		return -ECANCELED;
6778 
6779 	/* Many controllers disallow HCI Create Connection while it is doing
6780 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6781 	 * Connection. This may cause the MGMT discovering state to become false
6782 	 * without user space's request but it is okay since the MGMT Discovery
6783 	 * APIs do not promise that discovery should be done forever. Instead,
6784 	 * the user space monitors the status of MGMT discovering and it may
6785 	 * request for discovery again when this flag becomes false.
6786 	 */
6787 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6788 		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6789 					    NULL, HCI_CMD_TIMEOUT);
6790 		if (err)
6791 			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6792 	}
6793 
6794 	conn->state = BT_CONNECT;
6795 	conn->out = true;
6796 	conn->role = HCI_ROLE_MASTER;
6797 
6798 	conn->attempt++;
6799 
6800 	conn->link_policy = hdev->link_policy;
6801 
6802 	memset(&cp, 0, sizeof(cp));
6803 	bacpy(&cp.bdaddr, &conn->dst);
6804 	cp.pscan_rep_mode = 0x02;
6805 
6806 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6807 	if (ie) {
6808 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6809 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6810 			cp.pscan_mode     = ie->data.pscan_mode;
6811 			cp.clock_offset   = ie->data.clock_offset |
6812 					    cpu_to_le16(0x8000);
6813 		}
6814 
6815 		memcpy(conn->dev_class, ie->data.dev_class, 3);
6816 	}
6817 
6818 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
6819 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6820 		cp.role_switch = 0x01;
6821 	else
6822 		cp.role_switch = 0x00;
6823 
6824 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6825 					sizeof(cp), &cp,
6826 					HCI_EV_CONN_COMPLETE,
6827 					conn->conn_timeout, NULL);
6828 }
6829 
hci_connect_acl_sync(struct hci_dev * hdev,struct hci_conn * conn)6830 int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6831 {
6832 	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6833 				       NULL);
6834 }
6835 
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)6836 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6837 {
6838 	struct hci_conn *conn = data;
6839 
6840 	bt_dev_dbg(hdev, "err %d", err);
6841 
6842 	if (err == -ECANCELED)
6843 		return;
6844 
6845 	hci_dev_lock(hdev);
6846 
6847 	if (!hci_conn_valid(hdev, conn))
6848 		goto done;
6849 
6850 	if (!err) {
6851 		hci_connect_le_scan_cleanup(conn, 0x00);
6852 		goto done;
6853 	}
6854 
6855 	/* Check if connection is still pending */
6856 	if (conn != hci_lookup_le_connect(hdev))
6857 		goto done;
6858 
6859 	/* Flush to make sure we send create conn cancel command if needed */
6860 	flush_delayed_work(&conn->le_conn_timeout);
6861 	hci_conn_failed(conn, bt_status(err));
6862 
6863 done:
6864 	hci_dev_unlock(hdev);
6865 }
6866 
hci_connect_le_sync(struct hci_dev * hdev,struct hci_conn * conn)6867 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6868 {
6869 	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6870 				       create_le_conn_complete);
6871 }
6872 
hci_cancel_connect_sync(struct hci_dev * hdev,struct hci_conn * conn)6873 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6874 {
6875 	if (conn->state != BT_OPEN)
6876 		return -EINVAL;
6877 
6878 	switch (conn->type) {
6879 	case ACL_LINK:
6880 		return !hci_cmd_sync_dequeue_once(hdev,
6881 						  hci_acl_create_conn_sync,
6882 						  conn, NULL);
6883 	case LE_LINK:
6884 		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6885 						  conn, create_le_conn_complete);
6886 	}
6887 
6888 	return -ENOENT;
6889 }
6890