xref: /openbmc/linux/net/bluetooth/hci_sync.c (revision d32fd6bb9f2bc8178cdd65ebec1ad670a8bfa241)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * BlueZ - Bluetooth protocol stack for Linux
4   *
5   * Copyright (C) 2021 Intel Corporation
6   * Copyright 2023 NXP
7   */
8  
9  #include <linux/property.h>
10  
11  #include <net/bluetooth/bluetooth.h>
12  #include <net/bluetooth/hci_core.h>
13  #include <net/bluetooth/mgmt.h>
14  
15  #include "hci_request.h"
16  #include "hci_codec.h"
17  #include "hci_debugfs.h"
18  #include "smp.h"
19  #include "eir.h"
20  #include "msft.h"
21  #include "aosp.h"
22  #include "leds.h"
23  
hci_cmd_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)24  static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
25  				  struct sk_buff *skb)
26  {
27  	bt_dev_dbg(hdev, "result 0x%2.2x", result);
28  
29  	if (hdev->req_status != HCI_REQ_PEND)
30  		return;
31  
32  	hdev->req_result = result;
33  	hdev->req_status = HCI_REQ_DONE;
34  
35  	/* Free the request command so it is not used as response */
36  	kfree_skb(hdev->req_skb);
37  	hdev->req_skb = NULL;
38  
39  	if (skb) {
40  		struct sock *sk = hci_skb_sk(skb);
41  
42  		/* Drop sk reference if set */
43  		if (sk)
44  			sock_put(sk);
45  
46  		hdev->req_rsp = skb_get(skb);
47  	}
48  
49  	wake_up_interruptible(&hdev->req_wait_q);
50  }
51  
hci_cmd_sync_alloc(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,struct sock * sk)52  static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
53  					  u32 plen, const void *param,
54  					  struct sock *sk)
55  {
56  	int len = HCI_COMMAND_HDR_SIZE + plen;
57  	struct hci_command_hdr *hdr;
58  	struct sk_buff *skb;
59  
60  	skb = bt_skb_alloc(len, GFP_ATOMIC);
61  	if (!skb)
62  		return NULL;
63  
64  	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
65  	hdr->opcode = cpu_to_le16(opcode);
66  	hdr->plen   = plen;
67  
68  	if (plen)
69  		skb_put_data(skb, param, plen);
70  
71  	bt_dev_dbg(hdev, "skb len %d", skb->len);
72  
73  	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
74  	hci_skb_opcode(skb) = opcode;
75  
76  	/* Grab a reference if command needs to be associated with a sock (e.g.
77  	 * likely mgmt socket that initiated the command).
78  	 */
79  	if (sk) {
80  		hci_skb_sk(skb) = sk;
81  		sock_hold(sk);
82  	}
83  
84  	return skb;
85  }
86  
hci_cmd_sync_add(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event,struct sock * sk)87  static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
88  			     const void *param, u8 event, struct sock *sk)
89  {
90  	struct hci_dev *hdev = req->hdev;
91  	struct sk_buff *skb;
92  
93  	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
94  
95  	/* If an error occurred during request building, there is no point in
96  	 * queueing the HCI command. We can simply return.
97  	 */
98  	if (req->err)
99  		return;
100  
101  	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
102  	if (!skb) {
103  		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
104  			   opcode);
105  		req->err = -ENOMEM;
106  		return;
107  	}
108  
109  	if (skb_queue_empty(&req->cmd_q))
110  		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
111  
112  	hci_skb_event(skb) = event;
113  
114  	skb_queue_tail(&req->cmd_q, skb);
115  }
116  
hci_req_sync_run(struct hci_request * req)117  static int hci_req_sync_run(struct hci_request *req)
118  {
119  	struct hci_dev *hdev = req->hdev;
120  	struct sk_buff *skb;
121  	unsigned long flags;
122  
123  	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
124  
125  	/* If an error occurred during request building, remove all HCI
126  	 * commands queued on the HCI request queue.
127  	 */
128  	if (req->err) {
129  		skb_queue_purge(&req->cmd_q);
130  		return req->err;
131  	}
132  
133  	/* Do not allow empty requests */
134  	if (skb_queue_empty(&req->cmd_q))
135  		return -ENODATA;
136  
137  	skb = skb_peek_tail(&req->cmd_q);
138  	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
139  	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
140  
141  	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
142  	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
143  	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
144  
145  	queue_work(hdev->workqueue, &hdev->cmd_work);
146  
147  	return 0;
148  }
149  
150  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)151  struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
152  				  const void *param, u8 event, u32 timeout,
153  				  struct sock *sk)
154  {
155  	struct hci_request req;
156  	struct sk_buff *skb;
157  	int err = 0;
158  
159  	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
160  
161  	hci_req_init(&req, hdev);
162  
163  	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
164  
165  	hdev->req_status = HCI_REQ_PEND;
166  
167  	err = hci_req_sync_run(&req);
168  	if (err < 0)
169  		return ERR_PTR(err);
170  
171  	err = wait_event_interruptible_timeout(hdev->req_wait_q,
172  					       hdev->req_status != HCI_REQ_PEND,
173  					       timeout);
174  
175  	if (err == -ERESTARTSYS)
176  		return ERR_PTR(-EINTR);
177  
178  	switch (hdev->req_status) {
179  	case HCI_REQ_DONE:
180  		err = -bt_to_errno(hdev->req_result);
181  		break;
182  
183  	case HCI_REQ_CANCELED:
184  		err = -hdev->req_result;
185  		break;
186  
187  	default:
188  		err = -ETIMEDOUT;
189  		break;
190  	}
191  
192  	hdev->req_status = 0;
193  	hdev->req_result = 0;
194  	skb = hdev->req_rsp;
195  	hdev->req_rsp = NULL;
196  
197  	bt_dev_dbg(hdev, "end: err %d", err);
198  
199  	if (err < 0) {
200  		kfree_skb(skb);
201  		return ERR_PTR(err);
202  	}
203  
204  	/* If command return a status event skb will be set to NULL as there are
205  	 * no parameters.
206  	 */
207  	if (!skb)
208  		return ERR_PTR(-ENODATA);
209  
210  	return skb;
211  }
212  EXPORT_SYMBOL(__hci_cmd_sync_sk);
213  
214  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)215  struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
216  			       const void *param, u32 timeout)
217  {
218  	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
219  }
220  EXPORT_SYMBOL(__hci_cmd_sync);
221  
222  /* Send HCI command and wait for command complete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)223  struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
224  			     const void *param, u32 timeout)
225  {
226  	struct sk_buff *skb;
227  
228  	if (!test_bit(HCI_UP, &hdev->flags))
229  		return ERR_PTR(-ENETDOWN);
230  
231  	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
232  
233  	hci_req_sync_lock(hdev);
234  	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
235  	hci_req_sync_unlock(hdev);
236  
237  	return skb;
238  }
239  EXPORT_SYMBOL(hci_cmd_sync);
240  
241  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)242  struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
243  				  const void *param, u8 event, u32 timeout)
244  {
245  	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
246  				 NULL);
247  }
248  EXPORT_SYMBOL(__hci_cmd_sync_ev);
249  
250  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_status_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)251  int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
252  			     const void *param, u8 event, u32 timeout,
253  			     struct sock *sk)
254  {
255  	struct sk_buff *skb;
256  	u8 status;
257  
258  	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
259  
260  	/* If command return a status event, skb will be set to -ENODATA */
261  	if (skb == ERR_PTR(-ENODATA))
262  		return 0;
263  
264  	if (IS_ERR(skb)) {
265  		if (!event)
266  			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
267  				   PTR_ERR(skb));
268  		return PTR_ERR(skb);
269  	}
270  
271  	status = skb->data[0];
272  
273  	kfree_skb(skb);
274  
275  	return status;
276  }
277  EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
278  
__hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)279  int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
280  			  const void *param, u32 timeout)
281  {
282  	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
283  					NULL);
284  }
285  EXPORT_SYMBOL(__hci_cmd_sync_status);
286  
hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)287  int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
288  			const void *param, u32 timeout)
289  {
290  	int err;
291  
292  	hci_req_sync_lock(hdev);
293  	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
294  	hci_req_sync_unlock(hdev);
295  
296  	return err;
297  }
298  EXPORT_SYMBOL(hci_cmd_sync_status);
299  
hci_cmd_sync_work(struct work_struct * work)300  static void hci_cmd_sync_work(struct work_struct *work)
301  {
302  	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
303  
304  	bt_dev_dbg(hdev, "");
305  
306  	/* Dequeue all entries and run them */
307  	while (1) {
308  		struct hci_cmd_sync_work_entry *entry;
309  
310  		mutex_lock(&hdev->cmd_sync_work_lock);
311  		entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
312  						 struct hci_cmd_sync_work_entry,
313  						 list);
314  		if (entry)
315  			list_del(&entry->list);
316  		mutex_unlock(&hdev->cmd_sync_work_lock);
317  
318  		if (!entry)
319  			break;
320  
321  		bt_dev_dbg(hdev, "entry %p", entry);
322  
323  		if (entry->func) {
324  			int err;
325  
326  			hci_req_sync_lock(hdev);
327  			err = entry->func(hdev, entry->data);
328  			if (entry->destroy)
329  				entry->destroy(hdev, entry->data, err);
330  			hci_req_sync_unlock(hdev);
331  		}
332  
333  		kfree(entry);
334  	}
335  }
336  
hci_cmd_sync_cancel_work(struct work_struct * work)337  static void hci_cmd_sync_cancel_work(struct work_struct *work)
338  {
339  	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
340  
341  	cancel_delayed_work_sync(&hdev->cmd_timer);
342  	cancel_delayed_work_sync(&hdev->ncmd_timer);
343  	atomic_set(&hdev->cmd_cnt, 1);
344  
345  	wake_up_interruptible(&hdev->req_wait_q);
346  }
347  
348  static int hci_scan_disable_sync(struct hci_dev *hdev);
scan_disable_sync(struct hci_dev * hdev,void * data)349  static int scan_disable_sync(struct hci_dev *hdev, void *data)
350  {
351  	return hci_scan_disable_sync(hdev);
352  }
353  
354  static int hci_inquiry_sync(struct hci_dev *hdev, u8 length);
interleaved_inquiry_sync(struct hci_dev * hdev,void * data)355  static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
356  {
357  	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN);
358  }
359  
le_scan_disable(struct work_struct * work)360  static void le_scan_disable(struct work_struct *work)
361  {
362  	struct hci_dev *hdev = container_of(work, struct hci_dev,
363  					    le_scan_disable.work);
364  	int status;
365  
366  	bt_dev_dbg(hdev, "");
367  	hci_dev_lock(hdev);
368  
369  	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
370  		goto _return;
371  
372  	cancel_delayed_work(&hdev->le_scan_restart);
373  
374  	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
375  	if (status) {
376  		bt_dev_err(hdev, "failed to disable LE scan: %d", status);
377  		goto _return;
378  	}
379  
380  	hdev->discovery.scan_start = 0;
381  
382  	/* If we were running LE only scan, change discovery state. If
383  	 * we were running both LE and BR/EDR inquiry simultaneously,
384  	 * and BR/EDR inquiry is already finished, stop discovery,
385  	 * otherwise BR/EDR inquiry will stop discovery when finished.
386  	 * If we will resolve remote device name, do not change
387  	 * discovery state.
388  	 */
389  
390  	if (hdev->discovery.type == DISCOV_TYPE_LE)
391  		goto discov_stopped;
392  
393  	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
394  		goto _return;
395  
396  	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
397  		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
398  		    hdev->discovery.state != DISCOVERY_RESOLVING)
399  			goto discov_stopped;
400  
401  		goto _return;
402  	}
403  
404  	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
405  	if (status) {
406  		bt_dev_err(hdev, "inquiry failed: status %d", status);
407  		goto discov_stopped;
408  	}
409  
410  	goto _return;
411  
412  discov_stopped:
413  	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
414  
415  _return:
416  	hci_dev_unlock(hdev);
417  }
418  
419  static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
420  				       u8 filter_dup);
hci_le_scan_restart_sync(struct hci_dev * hdev)421  static int hci_le_scan_restart_sync(struct hci_dev *hdev)
422  {
423  	/* If controller is not scanning we are done. */
424  	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
425  		return 0;
426  
427  	if (hdev->scanning_paused) {
428  		bt_dev_dbg(hdev, "Scanning is paused for suspend");
429  		return 0;
430  	}
431  
432  	hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
433  	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE,
434  					   LE_SCAN_FILTER_DUP_ENABLE);
435  }
436  
le_scan_restart(struct work_struct * work)437  static void le_scan_restart(struct work_struct *work)
438  {
439  	struct hci_dev *hdev = container_of(work, struct hci_dev,
440  					    le_scan_restart.work);
441  	unsigned long timeout, duration, scan_start, now;
442  	int status;
443  
444  	bt_dev_dbg(hdev, "");
445  
446  	status = hci_le_scan_restart_sync(hdev);
447  	if (status) {
448  		bt_dev_err(hdev, "failed to restart LE scan: status %d",
449  			   status);
450  		return;
451  	}
452  
453  	hci_dev_lock(hdev);
454  
455  	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
456  	    !hdev->discovery.scan_start)
457  		goto unlock;
458  
459  	/* When the scan was started, hdev->le_scan_disable has been queued
460  	 * after duration from scan_start. During scan restart this job
461  	 * has been canceled, and we need to queue it again after proper
462  	 * timeout, to make sure that scan does not run indefinitely.
463  	 */
464  	duration = hdev->discovery.scan_duration;
465  	scan_start = hdev->discovery.scan_start;
466  	now = jiffies;
467  	if (now - scan_start <= duration) {
468  		int elapsed;
469  
470  		if (now >= scan_start)
471  			elapsed = now - scan_start;
472  		else
473  			elapsed = ULONG_MAX - scan_start + now;
474  
475  		timeout = duration - elapsed;
476  	} else {
477  		timeout = 0;
478  	}
479  
480  	queue_delayed_work(hdev->req_workqueue,
481  			   &hdev->le_scan_disable, timeout);
482  
483  unlock:
484  	hci_dev_unlock(hdev);
485  }
486  
reenable_adv_sync(struct hci_dev * hdev,void * data)487  static int reenable_adv_sync(struct hci_dev *hdev, void *data)
488  {
489  	bt_dev_dbg(hdev, "");
490  
491  	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
492  	    list_empty(&hdev->adv_instances))
493  		return 0;
494  
495  	if (hdev->cur_adv_instance) {
496  		return hci_schedule_adv_instance_sync(hdev,
497  						      hdev->cur_adv_instance,
498  						      true);
499  	} else {
500  		if (ext_adv_capable(hdev)) {
501  			hci_start_ext_adv_sync(hdev, 0x00);
502  		} else {
503  			hci_update_adv_data_sync(hdev, 0x00);
504  			hci_update_scan_rsp_data_sync(hdev, 0x00);
505  			hci_enable_advertising_sync(hdev);
506  		}
507  	}
508  
509  	return 0;
510  }
511  
reenable_adv(struct work_struct * work)512  static void reenable_adv(struct work_struct *work)
513  {
514  	struct hci_dev *hdev = container_of(work, struct hci_dev,
515  					    reenable_adv_work);
516  	int status;
517  
518  	bt_dev_dbg(hdev, "");
519  
520  	hci_dev_lock(hdev);
521  
522  	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
523  	if (status)
524  		bt_dev_err(hdev, "failed to reenable ADV: %d", status);
525  
526  	hci_dev_unlock(hdev);
527  }
528  
cancel_adv_timeout(struct hci_dev * hdev)529  static void cancel_adv_timeout(struct hci_dev *hdev)
530  {
531  	if (hdev->adv_instance_timeout) {
532  		hdev->adv_instance_timeout = 0;
533  		cancel_delayed_work(&hdev->adv_instance_expire);
534  	}
535  }
536  
537  /* For a single instance:
538   * - force == true: The instance will be removed even when its remaining
539   *   lifetime is not zero.
540   * - force == false: the instance will be deactivated but kept stored unless
541   *   the remaining lifetime is zero.
542   *
543   * For instance == 0x00:
544   * - force == true: All instances will be removed regardless of their timeout
545   *   setting.
546   * - force == false: Only instances that have a timeout will be removed.
547   */
hci_clear_adv_instance_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)548  int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
549  				u8 instance, bool force)
550  {
551  	struct adv_info *adv_instance, *n, *next_instance = NULL;
552  	int err;
553  	u8 rem_inst;
554  
555  	/* Cancel any timeout concerning the removed instance(s). */
556  	if (!instance || hdev->cur_adv_instance == instance)
557  		cancel_adv_timeout(hdev);
558  
559  	/* Get the next instance to advertise BEFORE we remove
560  	 * the current one. This can be the same instance again
561  	 * if there is only one instance.
562  	 */
563  	if (instance && hdev->cur_adv_instance == instance)
564  		next_instance = hci_get_next_instance(hdev, instance);
565  
566  	if (instance == 0x00) {
567  		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
568  					 list) {
569  			if (!(force || adv_instance->timeout))
570  				continue;
571  
572  			rem_inst = adv_instance->instance;
573  			err = hci_remove_adv_instance(hdev, rem_inst);
574  			if (!err)
575  				mgmt_advertising_removed(sk, hdev, rem_inst);
576  		}
577  	} else {
578  		adv_instance = hci_find_adv_instance(hdev, instance);
579  
580  		if (force || (adv_instance && adv_instance->timeout &&
581  			      !adv_instance->remaining_time)) {
582  			/* Don't advertise a removed instance. */
583  			if (next_instance &&
584  			    next_instance->instance == instance)
585  				next_instance = NULL;
586  
587  			err = hci_remove_adv_instance(hdev, instance);
588  			if (!err)
589  				mgmt_advertising_removed(sk, hdev, instance);
590  		}
591  	}
592  
593  	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
594  		return 0;
595  
596  	if (next_instance && !ext_adv_capable(hdev))
597  		return hci_schedule_adv_instance_sync(hdev,
598  						      next_instance->instance,
599  						      false);
600  
601  	return 0;
602  }
603  
adv_timeout_expire_sync(struct hci_dev * hdev,void * data)604  static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
605  {
606  	u8 instance = *(u8 *)data;
607  
608  	kfree(data);
609  
610  	hci_clear_adv_instance_sync(hdev, NULL, instance, false);
611  
612  	if (list_empty(&hdev->adv_instances))
613  		return hci_disable_advertising_sync(hdev);
614  
615  	return 0;
616  }
617  
adv_timeout_expire(struct work_struct * work)618  static void adv_timeout_expire(struct work_struct *work)
619  {
620  	u8 *inst_ptr;
621  	struct hci_dev *hdev = container_of(work, struct hci_dev,
622  					    adv_instance_expire.work);
623  
624  	bt_dev_dbg(hdev, "");
625  
626  	hci_dev_lock(hdev);
627  
628  	hdev->adv_instance_timeout = 0;
629  
630  	if (hdev->cur_adv_instance == 0x00)
631  		goto unlock;
632  
633  	inst_ptr = kmalloc(1, GFP_KERNEL);
634  	if (!inst_ptr)
635  		goto unlock;
636  
637  	*inst_ptr = hdev->cur_adv_instance;
638  	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
639  
640  unlock:
641  	hci_dev_unlock(hdev);
642  }
643  
hci_cmd_sync_init(struct hci_dev * hdev)644  void hci_cmd_sync_init(struct hci_dev *hdev)
645  {
646  	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
647  	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
648  	mutex_init(&hdev->cmd_sync_work_lock);
649  	mutex_init(&hdev->unregister_lock);
650  
651  	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
652  	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
653  	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
654  	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart);
655  	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
656  }
657  
_hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry,int err)658  static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
659  				       struct hci_cmd_sync_work_entry *entry,
660  				       int err)
661  {
662  	if (entry->destroy)
663  		entry->destroy(hdev, entry->data, err);
664  
665  	list_del(&entry->list);
666  	kfree(entry);
667  }
668  
hci_cmd_sync_clear(struct hci_dev * hdev)669  void hci_cmd_sync_clear(struct hci_dev *hdev)
670  {
671  	struct hci_cmd_sync_work_entry *entry, *tmp;
672  
673  	cancel_work_sync(&hdev->cmd_sync_work);
674  	cancel_work_sync(&hdev->reenable_adv_work);
675  
676  	mutex_lock(&hdev->cmd_sync_work_lock);
677  	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
678  		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
679  	mutex_unlock(&hdev->cmd_sync_work_lock);
680  }
681  
hci_cmd_sync_cancel(struct hci_dev * hdev,int err)682  void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
683  {
684  	bt_dev_dbg(hdev, "err 0x%2.2x", err);
685  
686  	if (hdev->req_status == HCI_REQ_PEND) {
687  		hdev->req_result = err;
688  		hdev->req_status = HCI_REQ_CANCELED;
689  
690  		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
691  	}
692  }
693  EXPORT_SYMBOL(hci_cmd_sync_cancel);
694  
695  /* Cancel ongoing command request synchronously:
696   *
697   * - Set result and mark status to HCI_REQ_CANCELED
698   * - Wakeup command sync thread
699   */
hci_cmd_sync_cancel_sync(struct hci_dev * hdev,int err)700  void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
701  {
702  	bt_dev_dbg(hdev, "err 0x%2.2x", err);
703  
704  	if (hdev->req_status == HCI_REQ_PEND) {
705  		/* req_result is __u32 so error must be positive to be properly
706  		 * propagated.
707  		 */
708  		hdev->req_result = err < 0 ? -err : err;
709  		hdev->req_status = HCI_REQ_CANCELED;
710  
711  		wake_up_interruptible(&hdev->req_wait_q);
712  	}
713  }
714  EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
715  
716  /* Submit HCI command to be run in as cmd_sync_work:
717   *
718   * - hdev must _not_ be unregistered
719   */
hci_cmd_sync_submit(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)720  int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
721  			void *data, hci_cmd_sync_work_destroy_t destroy)
722  {
723  	struct hci_cmd_sync_work_entry *entry;
724  	int err = 0;
725  
726  	mutex_lock(&hdev->unregister_lock);
727  	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
728  		err = -ENODEV;
729  		goto unlock;
730  	}
731  
732  	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
733  	if (!entry) {
734  		err = -ENOMEM;
735  		goto unlock;
736  	}
737  	entry->func = func;
738  	entry->data = data;
739  	entry->destroy = destroy;
740  
741  	mutex_lock(&hdev->cmd_sync_work_lock);
742  	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
743  	mutex_unlock(&hdev->cmd_sync_work_lock);
744  
745  	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
746  
747  unlock:
748  	mutex_unlock(&hdev->unregister_lock);
749  	return err;
750  }
751  EXPORT_SYMBOL(hci_cmd_sync_submit);
752  
753  /* Queue HCI command:
754   *
755   * - hdev must be running
756   */
hci_cmd_sync_queue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)757  int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
758  		       void *data, hci_cmd_sync_work_destroy_t destroy)
759  {
760  	/* Only queue command if hdev is running which means it had been opened
761  	 * and is either on init phase or is already up.
762  	 */
763  	if (!test_bit(HCI_RUNNING, &hdev->flags))
764  		return -ENETDOWN;
765  
766  	return hci_cmd_sync_submit(hdev, func, data, destroy);
767  }
768  EXPORT_SYMBOL(hci_cmd_sync_queue);
769  
770  static struct hci_cmd_sync_work_entry *
_hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)771  _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
772  			   void *data, hci_cmd_sync_work_destroy_t destroy)
773  {
774  	struct hci_cmd_sync_work_entry *entry, *tmp;
775  
776  	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
777  		if (func && entry->func != func)
778  			continue;
779  
780  		if (data && entry->data != data)
781  			continue;
782  
783  		if (destroy && entry->destroy != destroy)
784  			continue;
785  
786  		return entry;
787  	}
788  
789  	return NULL;
790  }
791  
792  /* Queue HCI command entry once:
793   *
794   * - Lookup if an entry already exist and only if it doesn't creates a new entry
795   *   and queue it.
796   */
hci_cmd_sync_queue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)797  int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
798  			    void *data, hci_cmd_sync_work_destroy_t destroy)
799  {
800  	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
801  		return 0;
802  
803  	return hci_cmd_sync_queue(hdev, func, data, destroy);
804  }
805  EXPORT_SYMBOL(hci_cmd_sync_queue_once);
806  
807  /* Run HCI command:
808   *
809   * - hdev must be running
810   * - if on cmd_sync_work then run immediately otherwise queue
811   */
hci_cmd_sync_run(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)812  int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
813  		     void *data, hci_cmd_sync_work_destroy_t destroy)
814  {
815  	/* Only queue command if hdev is running which means it had been opened
816  	 * and is either on init phase or is already up.
817  	 */
818  	if (!test_bit(HCI_RUNNING, &hdev->flags))
819  		return -ENETDOWN;
820  
821  	/* If on cmd_sync_work then run immediately otherwise queue */
822  	if (current_work() == &hdev->cmd_sync_work)
823  		return func(hdev, data);
824  
825  	return hci_cmd_sync_submit(hdev, func, data, destroy);
826  }
827  EXPORT_SYMBOL(hci_cmd_sync_run);
828  
829  /* Run HCI command entry once:
830   *
831   * - Lookup if an entry already exist and only if it doesn't creates a new entry
832   *   and run it.
833   * - if on cmd_sync_work then run immediately otherwise queue
834   */
hci_cmd_sync_run_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)835  int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
836  			  void *data, hci_cmd_sync_work_destroy_t destroy)
837  {
838  	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
839  		return 0;
840  
841  	return hci_cmd_sync_run(hdev, func, data, destroy);
842  }
843  EXPORT_SYMBOL(hci_cmd_sync_run_once);
844  
845  /* Lookup HCI command entry:
846   *
847   * - Return first entry that matches by function callback or data or
848   *   destroy callback.
849   */
850  struct hci_cmd_sync_work_entry *
hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)851  hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
852  			  void *data, hci_cmd_sync_work_destroy_t destroy)
853  {
854  	struct hci_cmd_sync_work_entry *entry;
855  
856  	mutex_lock(&hdev->cmd_sync_work_lock);
857  	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
858  	mutex_unlock(&hdev->cmd_sync_work_lock);
859  
860  	return entry;
861  }
862  EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
863  
864  /* Cancel HCI command entry */
hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry)865  void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
866  			       struct hci_cmd_sync_work_entry *entry)
867  {
868  	mutex_lock(&hdev->cmd_sync_work_lock);
869  	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
870  	mutex_unlock(&hdev->cmd_sync_work_lock);
871  }
872  EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
873  
874  /* Dequeue one HCI command entry:
875   *
876   * - Lookup and cancel first entry that matches.
877   */
hci_cmd_sync_dequeue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)878  bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
879  			       hci_cmd_sync_work_func_t func,
880  			       void *data, hci_cmd_sync_work_destroy_t destroy)
881  {
882  	struct hci_cmd_sync_work_entry *entry;
883  
884  	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
885  	if (!entry)
886  		return false;
887  
888  	hci_cmd_sync_cancel_entry(hdev, entry);
889  
890  	return true;
891  }
892  EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
893  
894  /* Dequeue HCI command entry:
895   *
896   * - Lookup and cancel any entry that matches by function callback or data or
897   *   destroy callback.
898   */
hci_cmd_sync_dequeue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)899  bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
900  			  void *data, hci_cmd_sync_work_destroy_t destroy)
901  {
902  	struct hci_cmd_sync_work_entry *entry;
903  	bool ret = false;
904  
905  	mutex_lock(&hdev->cmd_sync_work_lock);
906  	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
907  						   destroy))) {
908  		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
909  		ret = true;
910  	}
911  	mutex_unlock(&hdev->cmd_sync_work_lock);
912  
913  	return ret;
914  }
915  EXPORT_SYMBOL(hci_cmd_sync_dequeue);
916  
hci_update_eir_sync(struct hci_dev * hdev)917  int hci_update_eir_sync(struct hci_dev *hdev)
918  {
919  	struct hci_cp_write_eir cp;
920  
921  	bt_dev_dbg(hdev, "");
922  
923  	if (!hdev_is_powered(hdev))
924  		return 0;
925  
926  	if (!lmp_ext_inq_capable(hdev))
927  		return 0;
928  
929  	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
930  		return 0;
931  
932  	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
933  		return 0;
934  
935  	memset(&cp, 0, sizeof(cp));
936  
937  	eir_create(hdev, cp.data);
938  
939  	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
940  		return 0;
941  
942  	memcpy(hdev->eir, cp.data, sizeof(cp.data));
943  
944  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
945  				     HCI_CMD_TIMEOUT);
946  }
947  
get_service_classes(struct hci_dev * hdev)948  static u8 get_service_classes(struct hci_dev *hdev)
949  {
950  	struct bt_uuid *uuid;
951  	u8 val = 0;
952  
953  	list_for_each_entry(uuid, &hdev->uuids, list)
954  		val |= uuid->svc_hint;
955  
956  	return val;
957  }
958  
hci_update_class_sync(struct hci_dev * hdev)959  int hci_update_class_sync(struct hci_dev *hdev)
960  {
961  	u8 cod[3];
962  
963  	bt_dev_dbg(hdev, "");
964  
965  	if (!hdev_is_powered(hdev))
966  		return 0;
967  
968  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
969  		return 0;
970  
971  	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
972  		return 0;
973  
974  	cod[0] = hdev->minor_class;
975  	cod[1] = hdev->major_class;
976  	cod[2] = get_service_classes(hdev);
977  
978  	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
979  		cod[1] |= 0x20;
980  
981  	if (memcmp(cod, hdev->dev_class, 3) == 0)
982  		return 0;
983  
984  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
985  				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
986  }
987  
is_advertising_allowed(struct hci_dev * hdev,bool connectable)988  static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
989  {
990  	/* If there is no connection we are OK to advertise. */
991  	if (hci_conn_num(hdev, LE_LINK) == 0)
992  		return true;
993  
994  	/* Check le_states if there is any connection in peripheral role. */
995  	if (hdev->conn_hash.le_num_peripheral > 0) {
996  		/* Peripheral connection state and non connectable mode
997  		 * bit 20.
998  		 */
999  		if (!connectable && !(hdev->le_states[2] & 0x10))
1000  			return false;
1001  
1002  		/* Peripheral connection state and connectable mode bit 38
1003  		 * and scannable bit 21.
1004  		 */
1005  		if (connectable && (!(hdev->le_states[4] & 0x40) ||
1006  				    !(hdev->le_states[2] & 0x20)))
1007  			return false;
1008  	}
1009  
1010  	/* Check le_states if there is any connection in central role. */
1011  	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1012  		/* Central connection state and non connectable mode bit 18. */
1013  		if (!connectable && !(hdev->le_states[2] & 0x02))
1014  			return false;
1015  
1016  		/* Central connection state and connectable mode bit 35 and
1017  		 * scannable 19.
1018  		 */
1019  		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1020  				    !(hdev->le_states[2] & 0x08)))
1021  			return false;
1022  	}
1023  
1024  	return true;
1025  }
1026  
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1027  static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1028  {
1029  	/* If privacy is not enabled don't use RPA */
1030  	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1031  		return false;
1032  
1033  	/* If basic privacy mode is enabled use RPA */
1034  	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1035  		return true;
1036  
1037  	/* If limited privacy mode is enabled don't use RPA if we're
1038  	 * both discoverable and bondable.
1039  	 */
1040  	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1041  	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1042  		return false;
1043  
1044  	/* We're neither bondable nor discoverable in the limited
1045  	 * privacy mode, therefore use RPA.
1046  	 */
1047  	return true;
1048  }
1049  
hci_set_random_addr_sync(struct hci_dev * hdev,bdaddr_t * rpa)1050  static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
1051  {
1052  	/* If a random_addr has been set we're advertising or initiating an LE
1053  	 * connection we can't go ahead and change the random address at this
1054  	 * time. This is because the eventual initiator address used for the
1055  	 * subsequently created connection will be undefined (some
1056  	 * controllers use the new address and others the one we had
1057  	 * when the operation started).
1058  	 *
1059  	 * In this kind of scenario skip the update and let the random
1060  	 * address be updated at the next cycle.
1061  	 */
1062  	if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
1063  	    (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1064  	    hci_lookup_le_connect(hdev))) {
1065  		bt_dev_dbg(hdev, "Deferring random address update");
1066  		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1067  		return 0;
1068  	}
1069  
1070  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1071  				     6, rpa, HCI_CMD_TIMEOUT);
1072  }
1073  
hci_update_random_address_sync(struct hci_dev * hdev,bool require_privacy,bool rpa,u8 * own_addr_type)1074  int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1075  				   bool rpa, u8 *own_addr_type)
1076  {
1077  	int err;
1078  
1079  	/* If privacy is enabled use a resolvable private address. If
1080  	 * current RPA has expired or there is something else than
1081  	 * the current RPA in use, then generate a new one.
1082  	 */
1083  	if (rpa) {
1084  		/* If Controller supports LL Privacy use own address type is
1085  		 * 0x03
1086  		 */
1087  		if (use_ll_privacy(hdev))
1088  			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1089  		else
1090  			*own_addr_type = ADDR_LE_DEV_RANDOM;
1091  
1092  		/* Check if RPA is valid */
1093  		if (rpa_valid(hdev))
1094  			return 0;
1095  
1096  		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1097  		if (err < 0) {
1098  			bt_dev_err(hdev, "failed to generate new RPA");
1099  			return err;
1100  		}
1101  
1102  		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1103  		if (err)
1104  			return err;
1105  
1106  		return 0;
1107  	}
1108  
1109  	/* In case of required privacy without resolvable private address,
1110  	 * use an non-resolvable private address. This is useful for active
1111  	 * scanning and non-connectable advertising.
1112  	 */
1113  	if (require_privacy) {
1114  		bdaddr_t nrpa;
1115  
1116  		while (true) {
1117  			/* The non-resolvable private address is generated
1118  			 * from random six bytes with the two most significant
1119  			 * bits cleared.
1120  			 */
1121  			get_random_bytes(&nrpa, 6);
1122  			nrpa.b[5] &= 0x3f;
1123  
1124  			/* The non-resolvable private address shall not be
1125  			 * equal to the public address.
1126  			 */
1127  			if (bacmp(&hdev->bdaddr, &nrpa))
1128  				break;
1129  		}
1130  
1131  		*own_addr_type = ADDR_LE_DEV_RANDOM;
1132  
1133  		return hci_set_random_addr_sync(hdev, &nrpa);
1134  	}
1135  
1136  	/* If forcing static address is in use or there is no public
1137  	 * address use the static address as random address (but skip
1138  	 * the HCI command if the current random address is already the
1139  	 * static one.
1140  	 *
1141  	 * In case BR/EDR has been disabled on a dual-mode controller
1142  	 * and a static address has been configured, then use that
1143  	 * address instead of the public BR/EDR address.
1144  	 */
1145  	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1146  	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1147  	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1148  	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1149  		*own_addr_type = ADDR_LE_DEV_RANDOM;
1150  		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1151  			return hci_set_random_addr_sync(hdev,
1152  							&hdev->static_addr);
1153  		return 0;
1154  	}
1155  
1156  	/* Neither privacy nor static address is being used so use a
1157  	 * public address.
1158  	 */
1159  	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1160  
1161  	return 0;
1162  }
1163  
hci_disable_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1164  static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1165  {
1166  	struct hci_cp_le_set_ext_adv_enable *cp;
1167  	struct hci_cp_ext_adv_set *set;
1168  	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1169  	u8 size;
1170  
1171  	/* If request specifies an instance that doesn't exist, fail */
1172  	if (instance > 0) {
1173  		struct adv_info *adv;
1174  
1175  		adv = hci_find_adv_instance(hdev, instance);
1176  		if (!adv)
1177  			return -EINVAL;
1178  
1179  		/* If not enabled there is nothing to do */
1180  		if (!adv->enabled)
1181  			return 0;
1182  	}
1183  
1184  	memset(data, 0, sizeof(data));
1185  
1186  	cp = (void *)data;
1187  	set = (void *)cp->data;
1188  
1189  	/* Instance 0x00 indicates all advertising instances will be disabled */
1190  	cp->num_of_sets = !!instance;
1191  	cp->enable = 0x00;
1192  
1193  	set->handle = instance;
1194  
1195  	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1196  
1197  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1198  				     size, data, HCI_CMD_TIMEOUT);
1199  }
1200  
hci_set_adv_set_random_addr_sync(struct hci_dev * hdev,u8 instance,bdaddr_t * random_addr)1201  static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1202  					    bdaddr_t *random_addr)
1203  {
1204  	struct hci_cp_le_set_adv_set_rand_addr cp;
1205  	int err;
1206  
1207  	if (!instance) {
1208  		/* Instance 0x00 doesn't have an adv_info, instead it uses
1209  		 * hdev->random_addr to track its address so whenever it needs
1210  		 * to be updated this also set the random address since
1211  		 * hdev->random_addr is shared with scan state machine.
1212  		 */
1213  		err = hci_set_random_addr_sync(hdev, random_addr);
1214  		if (err)
1215  			return err;
1216  	}
1217  
1218  	memset(&cp, 0, sizeof(cp));
1219  
1220  	cp.handle = instance;
1221  	bacpy(&cp.bdaddr, random_addr);
1222  
1223  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1224  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1225  }
1226  
hci_setup_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1227  int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1228  {
1229  	struct hci_cp_le_set_ext_adv_params cp;
1230  	bool connectable;
1231  	u32 flags;
1232  	bdaddr_t random_addr;
1233  	u8 own_addr_type;
1234  	int err;
1235  	struct adv_info *adv;
1236  	bool secondary_adv;
1237  
1238  	if (instance > 0) {
1239  		adv = hci_find_adv_instance(hdev, instance);
1240  		if (!adv)
1241  			return -EINVAL;
1242  	} else {
1243  		adv = NULL;
1244  	}
1245  
1246  	/* Updating parameters of an active instance will return a
1247  	 * Command Disallowed error, so we must first disable the
1248  	 * instance if it is active.
1249  	 */
1250  	if (adv && !adv->pending) {
1251  		err = hci_disable_ext_adv_instance_sync(hdev, instance);
1252  		if (err)
1253  			return err;
1254  	}
1255  
1256  	flags = hci_adv_instance_flags(hdev, instance);
1257  
1258  	/* If the "connectable" instance flag was not set, then choose between
1259  	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1260  	 */
1261  	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1262  		      mgmt_get_connectable(hdev);
1263  
1264  	if (!is_advertising_allowed(hdev, connectable))
1265  		return -EPERM;
1266  
1267  	/* Set require_privacy to true only when non-connectable
1268  	 * advertising is used. In that case it is fine to use a
1269  	 * non-resolvable private address.
1270  	 */
1271  	err = hci_get_random_address(hdev, !connectable,
1272  				     adv_use_rpa(hdev, flags), adv,
1273  				     &own_addr_type, &random_addr);
1274  	if (err < 0)
1275  		return err;
1276  
1277  	memset(&cp, 0, sizeof(cp));
1278  
1279  	if (adv) {
1280  		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1281  		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1282  		cp.tx_power = adv->tx_power;
1283  	} else {
1284  		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1285  		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1286  		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1287  	}
1288  
1289  	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1290  
1291  	if (connectable) {
1292  		if (secondary_adv)
1293  			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1294  		else
1295  			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1296  	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1297  		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1298  		if (secondary_adv)
1299  			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1300  		else
1301  			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1302  	} else {
1303  		if (secondary_adv)
1304  			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1305  		else
1306  			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1307  	}
1308  
1309  	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1310  	 * contains the peer’s Identity Address and the Peer_Address_Type
1311  	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1312  	 * These parameters are used to locate the corresponding local IRK in
1313  	 * the resolving list; this IRK is used to generate their own address
1314  	 * used in the advertisement.
1315  	 */
1316  	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1317  		hci_copy_identity_address(hdev, &cp.peer_addr,
1318  					  &cp.peer_addr_type);
1319  
1320  	cp.own_addr_type = own_addr_type;
1321  	cp.channel_map = hdev->le_adv_channel_map;
1322  	cp.handle = instance;
1323  
1324  	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1325  		cp.primary_phy = HCI_ADV_PHY_1M;
1326  		cp.secondary_phy = HCI_ADV_PHY_2M;
1327  	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1328  		cp.primary_phy = HCI_ADV_PHY_CODED;
1329  		cp.secondary_phy = HCI_ADV_PHY_CODED;
1330  	} else {
1331  		/* In all other cases use 1M */
1332  		cp.primary_phy = HCI_ADV_PHY_1M;
1333  		cp.secondary_phy = HCI_ADV_PHY_1M;
1334  	}
1335  
1336  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1337  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1338  	if (err)
1339  		return err;
1340  
1341  	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1342  	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1343  	    bacmp(&random_addr, BDADDR_ANY)) {
1344  		/* Check if random address need to be updated */
1345  		if (adv) {
1346  			if (!bacmp(&random_addr, &adv->random_addr))
1347  				return 0;
1348  		} else {
1349  			if (!bacmp(&random_addr, &hdev->random_addr))
1350  				return 0;
1351  		}
1352  
1353  		return hci_set_adv_set_random_addr_sync(hdev, instance,
1354  							&random_addr);
1355  	}
1356  
1357  	return 0;
1358  }
1359  
hci_set_ext_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1360  static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1361  {
1362  	struct {
1363  		struct hci_cp_le_set_ext_scan_rsp_data cp;
1364  		u8 data[HCI_MAX_EXT_AD_LENGTH];
1365  	} pdu;
1366  	u8 len;
1367  	struct adv_info *adv = NULL;
1368  	int err;
1369  
1370  	memset(&pdu, 0, sizeof(pdu));
1371  
1372  	if (instance) {
1373  		adv = hci_find_adv_instance(hdev, instance);
1374  		if (!adv || !adv->scan_rsp_changed)
1375  			return 0;
1376  	}
1377  
1378  	len = eir_create_scan_rsp(hdev, instance, pdu.data);
1379  
1380  	pdu.cp.handle = instance;
1381  	pdu.cp.length = len;
1382  	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1383  	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1384  
1385  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1386  				    sizeof(pdu.cp) + len, &pdu.cp,
1387  				    HCI_CMD_TIMEOUT);
1388  	if (err)
1389  		return err;
1390  
1391  	if (adv) {
1392  		adv->scan_rsp_changed = false;
1393  	} else {
1394  		memcpy(hdev->scan_rsp_data, pdu.data, len);
1395  		hdev->scan_rsp_data_len = len;
1396  	}
1397  
1398  	return 0;
1399  }
1400  
__hci_set_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1401  static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1402  {
1403  	struct hci_cp_le_set_scan_rsp_data cp;
1404  	u8 len;
1405  
1406  	memset(&cp, 0, sizeof(cp));
1407  
1408  	len = eir_create_scan_rsp(hdev, instance, cp.data);
1409  
1410  	if (hdev->scan_rsp_data_len == len &&
1411  	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1412  		return 0;
1413  
1414  	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1415  	hdev->scan_rsp_data_len = len;
1416  
1417  	cp.length = len;
1418  
1419  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1420  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1421  }
1422  
hci_update_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1423  int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1424  {
1425  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1426  		return 0;
1427  
1428  	if (ext_adv_capable(hdev))
1429  		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1430  
1431  	return __hci_set_scan_rsp_data_sync(hdev, instance);
1432  }
1433  
hci_enable_ext_advertising_sync(struct hci_dev * hdev,u8 instance)1434  int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1435  {
1436  	struct hci_cp_le_set_ext_adv_enable *cp;
1437  	struct hci_cp_ext_adv_set *set;
1438  	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1439  	struct adv_info *adv;
1440  
1441  	if (instance > 0) {
1442  		adv = hci_find_adv_instance(hdev, instance);
1443  		if (!adv)
1444  			return -EINVAL;
1445  		/* If already enabled there is nothing to do */
1446  		if (adv->enabled)
1447  			return 0;
1448  	} else {
1449  		adv = NULL;
1450  	}
1451  
1452  	cp = (void *)data;
1453  	set = (void *)cp->data;
1454  
1455  	memset(cp, 0, sizeof(*cp));
1456  
1457  	cp->enable = 0x01;
1458  	cp->num_of_sets = 0x01;
1459  
1460  	memset(set, 0, sizeof(*set));
1461  
1462  	set->handle = instance;
1463  
1464  	/* Set duration per instance since controller is responsible for
1465  	 * scheduling it.
1466  	 */
1467  	if (adv && adv->timeout) {
1468  		u16 duration = adv->timeout * MSEC_PER_SEC;
1469  
1470  		/* Time = N * 10 ms */
1471  		set->duration = cpu_to_le16(duration / 10);
1472  	}
1473  
1474  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1475  				     sizeof(*cp) +
1476  				     sizeof(*set) * cp->num_of_sets,
1477  				     data, HCI_CMD_TIMEOUT);
1478  }
1479  
hci_start_ext_adv_sync(struct hci_dev * hdev,u8 instance)1480  int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1481  {
1482  	int err;
1483  
1484  	err = hci_setup_ext_adv_instance_sync(hdev, instance);
1485  	if (err)
1486  		return err;
1487  
1488  	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1489  	if (err)
1490  		return err;
1491  
1492  	return hci_enable_ext_advertising_sync(hdev, instance);
1493  }
1494  
hci_disable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1495  int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1496  {
1497  	struct hci_cp_le_set_per_adv_enable cp;
1498  	struct adv_info *adv = NULL;
1499  
1500  	/* If periodic advertising already disabled there is nothing to do. */
1501  	adv = hci_find_adv_instance(hdev, instance);
1502  	if (!adv || !adv->periodic || !adv->enabled)
1503  		return 0;
1504  
1505  	memset(&cp, 0, sizeof(cp));
1506  
1507  	cp.enable = 0x00;
1508  	cp.handle = instance;
1509  
1510  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1511  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1512  }
1513  
hci_set_per_adv_params_sync(struct hci_dev * hdev,u8 instance,u16 min_interval,u16 max_interval)1514  static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1515  				       u16 min_interval, u16 max_interval)
1516  {
1517  	struct hci_cp_le_set_per_adv_params cp;
1518  
1519  	memset(&cp, 0, sizeof(cp));
1520  
1521  	if (!min_interval)
1522  		min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1523  
1524  	if (!max_interval)
1525  		max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1526  
1527  	cp.handle = instance;
1528  	cp.min_interval = cpu_to_le16(min_interval);
1529  	cp.max_interval = cpu_to_le16(max_interval);
1530  	cp.periodic_properties = 0x0000;
1531  
1532  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1533  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1534  }
1535  
hci_set_per_adv_data_sync(struct hci_dev * hdev,u8 instance)1536  static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1537  {
1538  	struct {
1539  		struct hci_cp_le_set_per_adv_data cp;
1540  		u8 data[HCI_MAX_PER_AD_LENGTH];
1541  	} pdu;
1542  	u8 len;
1543  
1544  	memset(&pdu, 0, sizeof(pdu));
1545  
1546  	if (instance) {
1547  		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
1548  
1549  		if (!adv || !adv->periodic)
1550  			return 0;
1551  	}
1552  
1553  	len = eir_create_per_adv_data(hdev, instance, pdu.data);
1554  
1555  	pdu.cp.length = len;
1556  	pdu.cp.handle = instance;
1557  	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1558  
1559  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1560  				     sizeof(pdu.cp) + len, &pdu,
1561  				     HCI_CMD_TIMEOUT);
1562  }
1563  
hci_enable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1564  static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1565  {
1566  	struct hci_cp_le_set_per_adv_enable cp;
1567  	struct adv_info *adv = NULL;
1568  
1569  	/* If periodic advertising already enabled there is nothing to do. */
1570  	adv = hci_find_adv_instance(hdev, instance);
1571  	if (adv && adv->periodic && adv->enabled)
1572  		return 0;
1573  
1574  	memset(&cp, 0, sizeof(cp));
1575  
1576  	cp.enable = 0x01;
1577  	cp.handle = instance;
1578  
1579  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1580  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1581  }
1582  
1583  /* Checks if periodic advertising data contains a Basic Announcement and if it
1584   * does generates a Broadcast ID and add Broadcast Announcement.
1585   */
hci_adv_bcast_annoucement(struct hci_dev * hdev,struct adv_info * adv)1586  static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1587  {
1588  	u8 bid[3];
1589  	u8 ad[4 + 3];
1590  
1591  	/* Skip if NULL adv as instance 0x00 is used for general purpose
1592  	 * advertising so it cannot used for the likes of Broadcast Announcement
1593  	 * as it can be overwritten at any point.
1594  	 */
1595  	if (!adv)
1596  		return 0;
1597  
1598  	/* Check if PA data doesn't contains a Basic Audio Announcement then
1599  	 * there is nothing to do.
1600  	 */
1601  	if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1602  				  0x1851, NULL))
1603  		return 0;
1604  
1605  	/* Check if advertising data already has a Broadcast Announcement since
1606  	 * the process may want to control the Broadcast ID directly and in that
1607  	 * case the kernel shall no interfere.
1608  	 */
1609  	if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1610  				 NULL))
1611  		return 0;
1612  
1613  	/* Generate Broadcast ID */
1614  	get_random_bytes(bid, sizeof(bid));
1615  	eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1616  	hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
1617  
1618  	return hci_update_adv_data_sync(hdev, adv->instance);
1619  }
1620  
hci_start_per_adv_sync(struct hci_dev * hdev,u8 instance,u8 data_len,u8 * data,u32 flags,u16 min_interval,u16 max_interval,u16 sync_interval)1621  int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1622  			   u8 *data, u32 flags, u16 min_interval,
1623  			   u16 max_interval, u16 sync_interval)
1624  {
1625  	struct adv_info *adv = NULL;
1626  	int err;
1627  	bool added = false;
1628  
1629  	hci_disable_per_advertising_sync(hdev, instance);
1630  
1631  	if (instance) {
1632  		adv = hci_find_adv_instance(hdev, instance);
1633  		/* Create an instance if that could not be found */
1634  		if (!adv) {
1635  			adv = hci_add_per_instance(hdev, instance, flags,
1636  						   data_len, data,
1637  						   sync_interval,
1638  						   sync_interval);
1639  			if (IS_ERR(adv))
1640  				return PTR_ERR(adv);
1641  			adv->pending = false;
1642  			added = true;
1643  		}
1644  	}
1645  
1646  	/* Start advertising */
1647  	err = hci_start_ext_adv_sync(hdev, instance);
1648  	if (err < 0)
1649  		goto fail;
1650  
1651  	err = hci_adv_bcast_annoucement(hdev, adv);
1652  	if (err < 0)
1653  		goto fail;
1654  
1655  	err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1656  					  max_interval);
1657  	if (err < 0)
1658  		goto fail;
1659  
1660  	err = hci_set_per_adv_data_sync(hdev, instance);
1661  	if (err < 0)
1662  		goto fail;
1663  
1664  	err = hci_enable_per_advertising_sync(hdev, instance);
1665  	if (err < 0)
1666  		goto fail;
1667  
1668  	return 0;
1669  
1670  fail:
1671  	if (added)
1672  		hci_remove_adv_instance(hdev, instance);
1673  
1674  	return err;
1675  }
1676  
hci_start_adv_sync(struct hci_dev * hdev,u8 instance)1677  static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1678  {
1679  	int err;
1680  
1681  	if (ext_adv_capable(hdev))
1682  		return hci_start_ext_adv_sync(hdev, instance);
1683  
1684  	err = hci_update_adv_data_sync(hdev, instance);
1685  	if (err)
1686  		return err;
1687  
1688  	err = hci_update_scan_rsp_data_sync(hdev, instance);
1689  	if (err)
1690  		return err;
1691  
1692  	return hci_enable_advertising_sync(hdev);
1693  }
1694  
hci_enable_advertising_sync(struct hci_dev * hdev)1695  int hci_enable_advertising_sync(struct hci_dev *hdev)
1696  {
1697  	struct adv_info *adv_instance;
1698  	struct hci_cp_le_set_adv_param cp;
1699  	u8 own_addr_type, enable = 0x01;
1700  	bool connectable;
1701  	u16 adv_min_interval, adv_max_interval;
1702  	u32 flags;
1703  	u8 status;
1704  
1705  	if (ext_adv_capable(hdev))
1706  		return hci_enable_ext_advertising_sync(hdev,
1707  						       hdev->cur_adv_instance);
1708  
1709  	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1710  	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1711  
1712  	/* If the "connectable" instance flag was not set, then choose between
1713  	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1714  	 */
1715  	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1716  		      mgmt_get_connectable(hdev);
1717  
1718  	if (!is_advertising_allowed(hdev, connectable))
1719  		return -EINVAL;
1720  
1721  	status = hci_disable_advertising_sync(hdev);
1722  	if (status)
1723  		return status;
1724  
1725  	/* Clear the HCI_LE_ADV bit temporarily so that the
1726  	 * hci_update_random_address knows that it's safe to go ahead
1727  	 * and write a new random address. The flag will be set back on
1728  	 * as soon as the SET_ADV_ENABLE HCI command completes.
1729  	 */
1730  	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1731  
1732  	/* Set require_privacy to true only when non-connectable
1733  	 * advertising is used. In that case it is fine to use a
1734  	 * non-resolvable private address.
1735  	 */
1736  	status = hci_update_random_address_sync(hdev, !connectable,
1737  						adv_use_rpa(hdev, flags),
1738  						&own_addr_type);
1739  	if (status)
1740  		return status;
1741  
1742  	memset(&cp, 0, sizeof(cp));
1743  
1744  	if (adv_instance) {
1745  		adv_min_interval = adv_instance->min_interval;
1746  		adv_max_interval = adv_instance->max_interval;
1747  	} else {
1748  		adv_min_interval = hdev->le_adv_min_interval;
1749  		adv_max_interval = hdev->le_adv_max_interval;
1750  	}
1751  
1752  	if (connectable) {
1753  		cp.type = LE_ADV_IND;
1754  	} else {
1755  		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1756  			cp.type = LE_ADV_SCAN_IND;
1757  		else
1758  			cp.type = LE_ADV_NONCONN_IND;
1759  
1760  		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1761  		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1762  			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1763  			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1764  		}
1765  	}
1766  
1767  	cp.min_interval = cpu_to_le16(adv_min_interval);
1768  	cp.max_interval = cpu_to_le16(adv_max_interval);
1769  	cp.own_address_type = own_addr_type;
1770  	cp.channel_map = hdev->le_adv_channel_map;
1771  
1772  	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1773  				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1774  	if (status)
1775  		return status;
1776  
1777  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1778  				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1779  }
1780  
enable_advertising_sync(struct hci_dev * hdev,void * data)1781  static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1782  {
1783  	return hci_enable_advertising_sync(hdev);
1784  }
1785  
hci_enable_advertising(struct hci_dev * hdev)1786  int hci_enable_advertising(struct hci_dev *hdev)
1787  {
1788  	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1789  	    list_empty(&hdev->adv_instances))
1790  		return 0;
1791  
1792  	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1793  }
1794  
hci_remove_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1795  int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1796  				     struct sock *sk)
1797  {
1798  	int err;
1799  
1800  	if (!ext_adv_capable(hdev))
1801  		return 0;
1802  
1803  	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1804  	if (err)
1805  		return err;
1806  
1807  	/* If request specifies an instance that doesn't exist, fail */
1808  	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1809  		return -EINVAL;
1810  
1811  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1812  					sizeof(instance), &instance, 0,
1813  					HCI_CMD_TIMEOUT, sk);
1814  }
1815  
remove_ext_adv_sync(struct hci_dev * hdev,void * data)1816  static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
1817  {
1818  	struct adv_info *adv = data;
1819  	u8 instance = 0;
1820  
1821  	if (adv)
1822  		instance = adv->instance;
1823  
1824  	return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
1825  }
1826  
hci_remove_ext_adv_instance(struct hci_dev * hdev,u8 instance)1827  int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
1828  {
1829  	struct adv_info *adv = NULL;
1830  
1831  	if (instance) {
1832  		adv = hci_find_adv_instance(hdev, instance);
1833  		if (!adv)
1834  			return -EINVAL;
1835  	}
1836  
1837  	return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
1838  }
1839  
hci_le_terminate_big_sync(struct hci_dev * hdev,u8 handle,u8 reason)1840  int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1841  {
1842  	struct hci_cp_le_term_big cp;
1843  
1844  	memset(&cp, 0, sizeof(cp));
1845  	cp.handle = handle;
1846  	cp.reason = reason;
1847  
1848  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1849  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1850  }
1851  
hci_set_ext_adv_data_sync(struct hci_dev * hdev,u8 instance)1852  static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1853  {
1854  	struct {
1855  		struct hci_cp_le_set_ext_adv_data cp;
1856  		u8 data[HCI_MAX_EXT_AD_LENGTH];
1857  	} pdu;
1858  	u8 len;
1859  	struct adv_info *adv = NULL;
1860  	int err;
1861  
1862  	memset(&pdu, 0, sizeof(pdu));
1863  
1864  	if (instance) {
1865  		adv = hci_find_adv_instance(hdev, instance);
1866  		if (!adv || !adv->adv_data_changed)
1867  			return 0;
1868  	}
1869  
1870  	len = eir_create_adv_data(hdev, instance, pdu.data);
1871  
1872  	pdu.cp.length = len;
1873  	pdu.cp.handle = instance;
1874  	pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1875  	pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1876  
1877  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1878  				    sizeof(pdu.cp) + len, &pdu.cp,
1879  				    HCI_CMD_TIMEOUT);
1880  	if (err)
1881  		return err;
1882  
1883  	/* Update data if the command succeed */
1884  	if (adv) {
1885  		adv->adv_data_changed = false;
1886  	} else {
1887  		memcpy(hdev->adv_data, pdu.data, len);
1888  		hdev->adv_data_len = len;
1889  	}
1890  
1891  	return 0;
1892  }
1893  
hci_set_adv_data_sync(struct hci_dev * hdev,u8 instance)1894  static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1895  {
1896  	struct hci_cp_le_set_adv_data cp;
1897  	u8 len;
1898  
1899  	memset(&cp, 0, sizeof(cp));
1900  
1901  	len = eir_create_adv_data(hdev, instance, cp.data);
1902  
1903  	/* There's nothing to do if the data hasn't changed */
1904  	if (hdev->adv_data_len == len &&
1905  	    memcmp(cp.data, hdev->adv_data, len) == 0)
1906  		return 0;
1907  
1908  	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1909  	hdev->adv_data_len = len;
1910  
1911  	cp.length = len;
1912  
1913  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1914  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1915  }
1916  
hci_update_adv_data_sync(struct hci_dev * hdev,u8 instance)1917  int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1918  {
1919  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1920  		return 0;
1921  
1922  	if (ext_adv_capable(hdev))
1923  		return hci_set_ext_adv_data_sync(hdev, instance);
1924  
1925  	return hci_set_adv_data_sync(hdev, instance);
1926  }
1927  
hci_schedule_adv_instance_sync(struct hci_dev * hdev,u8 instance,bool force)1928  int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1929  				   bool force)
1930  {
1931  	struct adv_info *adv = NULL;
1932  	u16 timeout;
1933  
1934  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1935  		return -EPERM;
1936  
1937  	if (hdev->adv_instance_timeout)
1938  		return -EBUSY;
1939  
1940  	adv = hci_find_adv_instance(hdev, instance);
1941  	if (!adv)
1942  		return -ENOENT;
1943  
1944  	/* A zero timeout means unlimited advertising. As long as there is
1945  	 * only one instance, duration should be ignored. We still set a timeout
1946  	 * in case further instances are being added later on.
1947  	 *
1948  	 * If the remaining lifetime of the instance is more than the duration
1949  	 * then the timeout corresponds to the duration, otherwise it will be
1950  	 * reduced to the remaining instance lifetime.
1951  	 */
1952  	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1953  		timeout = adv->duration;
1954  	else
1955  		timeout = adv->remaining_time;
1956  
1957  	/* The remaining time is being reduced unless the instance is being
1958  	 * advertised without time limit.
1959  	 */
1960  	if (adv->timeout)
1961  		adv->remaining_time = adv->remaining_time - timeout;
1962  
1963  	/* Only use work for scheduling instances with legacy advertising */
1964  	if (!ext_adv_capable(hdev)) {
1965  		hdev->adv_instance_timeout = timeout;
1966  		queue_delayed_work(hdev->req_workqueue,
1967  				   &hdev->adv_instance_expire,
1968  				   msecs_to_jiffies(timeout * 1000));
1969  	}
1970  
1971  	/* If we're just re-scheduling the same instance again then do not
1972  	 * execute any HCI commands. This happens when a single instance is
1973  	 * being advertised.
1974  	 */
1975  	if (!force && hdev->cur_adv_instance == instance &&
1976  	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1977  		return 0;
1978  
1979  	hdev->cur_adv_instance = instance;
1980  
1981  	return hci_start_adv_sync(hdev, instance);
1982  }
1983  
hci_clear_adv_sets_sync(struct hci_dev * hdev,struct sock * sk)1984  static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1985  {
1986  	int err;
1987  
1988  	if (!ext_adv_capable(hdev))
1989  		return 0;
1990  
1991  	/* Disable instance 0x00 to disable all instances */
1992  	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1993  	if (err)
1994  		return err;
1995  
1996  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1997  					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1998  }
1999  
hci_clear_adv_sync(struct hci_dev * hdev,struct sock * sk,bool force)2000  static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
2001  {
2002  	struct adv_info *adv, *n;
2003  	int err = 0;
2004  
2005  	if (ext_adv_capable(hdev))
2006  		/* Remove all existing sets */
2007  		err = hci_clear_adv_sets_sync(hdev, sk);
2008  	if (ext_adv_capable(hdev))
2009  		return err;
2010  
2011  	/* This is safe as long as there is no command send while the lock is
2012  	 * held.
2013  	 */
2014  	hci_dev_lock(hdev);
2015  
2016  	/* Cleanup non-ext instances */
2017  	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
2018  		u8 instance = adv->instance;
2019  		int err;
2020  
2021  		if (!(force || adv->timeout))
2022  			continue;
2023  
2024  		err = hci_remove_adv_instance(hdev, instance);
2025  		if (!err)
2026  			mgmt_advertising_removed(sk, hdev, instance);
2027  	}
2028  
2029  	hci_dev_unlock(hdev);
2030  
2031  	return 0;
2032  }
2033  
hci_remove_adv_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)2034  static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
2035  			       struct sock *sk)
2036  {
2037  	int err = 0;
2038  
2039  	/* If we use extended advertising, instance has to be removed first. */
2040  	if (ext_adv_capable(hdev))
2041  		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
2042  	if (ext_adv_capable(hdev))
2043  		return err;
2044  
2045  	/* This is safe as long as there is no command send while the lock is
2046  	 * held.
2047  	 */
2048  	hci_dev_lock(hdev);
2049  
2050  	err = hci_remove_adv_instance(hdev, instance);
2051  	if (!err)
2052  		mgmt_advertising_removed(sk, hdev, instance);
2053  
2054  	hci_dev_unlock(hdev);
2055  
2056  	return err;
2057  }
2058  
2059  /* For a single instance:
2060   * - force == true: The instance will be removed even when its remaining
2061   *   lifetime is not zero.
2062   * - force == false: the instance will be deactivated but kept stored unless
2063   *   the remaining lifetime is zero.
2064   *
2065   * For instance == 0x00:
2066   * - force == true: All instances will be removed regardless of their timeout
2067   *   setting.
2068   * - force == false: Only instances that have a timeout will be removed.
2069   */
hci_remove_advertising_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)2070  int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
2071  				u8 instance, bool force)
2072  {
2073  	struct adv_info *next = NULL;
2074  	int err;
2075  
2076  	/* Cancel any timeout concerning the removed instance(s). */
2077  	if (!instance || hdev->cur_adv_instance == instance)
2078  		cancel_adv_timeout(hdev);
2079  
2080  	/* Get the next instance to advertise BEFORE we remove
2081  	 * the current one. This can be the same instance again
2082  	 * if there is only one instance.
2083  	 */
2084  	if (hdev->cur_adv_instance == instance)
2085  		next = hci_get_next_instance(hdev, instance);
2086  
2087  	if (!instance) {
2088  		err = hci_clear_adv_sync(hdev, sk, force);
2089  		if (err)
2090  			return err;
2091  	} else {
2092  		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2093  
2094  		if (force || (adv && adv->timeout && !adv->remaining_time)) {
2095  			/* Don't advertise a removed instance. */
2096  			if (next && next->instance == instance)
2097  				next = NULL;
2098  
2099  			err = hci_remove_adv_sync(hdev, instance, sk);
2100  			if (err)
2101  				return err;
2102  		}
2103  	}
2104  
2105  	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2106  		return 0;
2107  
2108  	if (next && !ext_adv_capable(hdev))
2109  		hci_schedule_adv_instance_sync(hdev, next->instance, false);
2110  
2111  	return 0;
2112  }
2113  
hci_read_rssi_sync(struct hci_dev * hdev,__le16 handle)2114  int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2115  {
2116  	struct hci_cp_read_rssi cp;
2117  
2118  	cp.handle = handle;
2119  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2120  					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2121  }
2122  
hci_read_clock_sync(struct hci_dev * hdev,struct hci_cp_read_clock * cp)2123  int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2124  {
2125  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2126  					sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2127  }
2128  
hci_read_tx_power_sync(struct hci_dev * hdev,__le16 handle,u8 type)2129  int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2130  {
2131  	struct hci_cp_read_tx_power cp;
2132  
2133  	cp.handle = handle;
2134  	cp.type = type;
2135  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2136  					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2137  }
2138  
hci_disable_advertising_sync(struct hci_dev * hdev)2139  int hci_disable_advertising_sync(struct hci_dev *hdev)
2140  {
2141  	u8 enable = 0x00;
2142  	int err = 0;
2143  
2144  	/* If controller is not advertising we are done. */
2145  	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2146  		return 0;
2147  
2148  	if (ext_adv_capable(hdev))
2149  		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2150  	if (ext_adv_capable(hdev))
2151  		return err;
2152  
2153  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2154  				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2155  }
2156  
hci_le_set_ext_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2157  static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2158  					   u8 filter_dup)
2159  {
2160  	struct hci_cp_le_set_ext_scan_enable cp;
2161  
2162  	memset(&cp, 0, sizeof(cp));
2163  	cp.enable = val;
2164  
2165  	if (hci_dev_test_flag(hdev, HCI_MESH))
2166  		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2167  	else
2168  		cp.filter_dup = filter_dup;
2169  
2170  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2171  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2172  }
2173  
hci_le_set_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2174  static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2175  				       u8 filter_dup)
2176  {
2177  	struct hci_cp_le_set_scan_enable cp;
2178  
2179  	if (use_ext_scan(hdev))
2180  		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2181  
2182  	memset(&cp, 0, sizeof(cp));
2183  	cp.enable = val;
2184  
2185  	if (val && hci_dev_test_flag(hdev, HCI_MESH))
2186  		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2187  	else
2188  		cp.filter_dup = filter_dup;
2189  
2190  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2191  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2192  }
2193  
hci_le_set_addr_resolution_enable_sync(struct hci_dev * hdev,u8 val)2194  static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2195  {
2196  	if (!use_ll_privacy(hdev))
2197  		return 0;
2198  
2199  	/* If controller is not/already resolving we are done. */
2200  	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2201  		return 0;
2202  
2203  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2204  				     sizeof(val), &val, HCI_CMD_TIMEOUT);
2205  }
2206  
hci_scan_disable_sync(struct hci_dev * hdev)2207  static int hci_scan_disable_sync(struct hci_dev *hdev)
2208  {
2209  	int err;
2210  
2211  	/* If controller is not scanning we are done. */
2212  	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2213  		return 0;
2214  
2215  	if (hdev->scanning_paused) {
2216  		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2217  		return 0;
2218  	}
2219  
2220  	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2221  	if (err) {
2222  		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2223  		return err;
2224  	}
2225  
2226  	return err;
2227  }
2228  
scan_use_rpa(struct hci_dev * hdev)2229  static bool scan_use_rpa(struct hci_dev *hdev)
2230  {
2231  	return hci_dev_test_flag(hdev, HCI_PRIVACY);
2232  }
2233  
hci_start_interleave_scan(struct hci_dev * hdev)2234  static void hci_start_interleave_scan(struct hci_dev *hdev)
2235  {
2236  	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2237  	queue_delayed_work(hdev->req_workqueue,
2238  			   &hdev->interleave_scan, 0);
2239  }
2240  
is_interleave_scanning(struct hci_dev * hdev)2241  static bool is_interleave_scanning(struct hci_dev *hdev)
2242  {
2243  	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
2244  }
2245  
cancel_interleave_scan(struct hci_dev * hdev)2246  static void cancel_interleave_scan(struct hci_dev *hdev)
2247  {
2248  	bt_dev_dbg(hdev, "cancelling interleave scan");
2249  
2250  	cancel_delayed_work_sync(&hdev->interleave_scan);
2251  
2252  	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2253  }
2254  
2255  /* Return true if interleave_scan wasn't started until exiting this function,
2256   * otherwise, return false
2257   */
hci_update_interleaved_scan_sync(struct hci_dev * hdev)2258  static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2259  {
2260  	/* Do interleaved scan only if all of the following are true:
2261  	 * - There is at least one ADV monitor
2262  	 * - At least one pending LE connection or one device to be scanned for
2263  	 * - Monitor offloading is not supported
2264  	 * If so, we should alternate between allowlist scan and one without
2265  	 * any filters to save power.
2266  	 */
2267  	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2268  				!(list_empty(&hdev->pend_le_conns) &&
2269  				  list_empty(&hdev->pend_le_reports)) &&
2270  				hci_get_adv_monitor_offload_ext(hdev) ==
2271  				    HCI_ADV_MONITOR_EXT_NONE;
2272  	bool is_interleaving = is_interleave_scanning(hdev);
2273  
2274  	if (use_interleaving && !is_interleaving) {
2275  		hci_start_interleave_scan(hdev);
2276  		bt_dev_dbg(hdev, "starting interleave scan");
2277  		return true;
2278  	}
2279  
2280  	if (!use_interleaving && is_interleaving)
2281  		cancel_interleave_scan(hdev);
2282  
2283  	return false;
2284  }
2285  
2286  /* Removes connection to resolve list if needed.*/
hci_le_del_resolve_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2287  static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2288  					bdaddr_t *bdaddr, u8 bdaddr_type)
2289  {
2290  	struct hci_cp_le_del_from_resolv_list cp;
2291  	struct bdaddr_list_with_irk *entry;
2292  
2293  	if (!use_ll_privacy(hdev))
2294  		return 0;
2295  
2296  	/* Check if the IRK has been programmed */
2297  	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2298  						bdaddr_type);
2299  	if (!entry)
2300  		return 0;
2301  
2302  	cp.bdaddr_type = bdaddr_type;
2303  	bacpy(&cp.bdaddr, bdaddr);
2304  
2305  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2306  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2307  }
2308  
hci_le_del_accept_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2309  static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2310  				       bdaddr_t *bdaddr, u8 bdaddr_type)
2311  {
2312  	struct hci_cp_le_del_from_accept_list cp;
2313  	int err;
2314  
2315  	/* Check if device is on accept list before removing it */
2316  	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2317  		return 0;
2318  
2319  	cp.bdaddr_type = bdaddr_type;
2320  	bacpy(&cp.bdaddr, bdaddr);
2321  
2322  	/* Ignore errors when removing from resolving list as that is likely
2323  	 * that the device was never added.
2324  	 */
2325  	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2326  
2327  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2328  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2329  	if (err) {
2330  		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2331  		return err;
2332  	}
2333  
2334  	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2335  		   cp.bdaddr_type);
2336  
2337  	return 0;
2338  }
2339  
2340  struct conn_params {
2341  	bdaddr_t addr;
2342  	u8 addr_type;
2343  	hci_conn_flags_t flags;
2344  	u8 privacy_mode;
2345  };
2346  
2347  /* Adds connection to resolve list if needed.
2348   * Setting params to NULL programs local hdev->irk
2349   */
hci_le_add_resolve_list_sync(struct hci_dev * hdev,struct conn_params * params)2350  static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2351  					struct conn_params *params)
2352  {
2353  	struct hci_cp_le_add_to_resolv_list cp;
2354  	struct smp_irk *irk;
2355  	struct bdaddr_list_with_irk *entry;
2356  	struct hci_conn_params *p;
2357  
2358  	if (!use_ll_privacy(hdev))
2359  		return 0;
2360  
2361  	/* Attempt to program local identity address, type and irk if params is
2362  	 * NULL.
2363  	 */
2364  	if (!params) {
2365  		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2366  			return 0;
2367  
2368  		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2369  		memcpy(cp.peer_irk, hdev->irk, 16);
2370  		goto done;
2371  	}
2372  
2373  	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2374  	if (!irk)
2375  		return 0;
2376  
2377  	/* Check if the IK has _not_ been programmed yet. */
2378  	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2379  						&params->addr,
2380  						params->addr_type);
2381  	if (entry)
2382  		return 0;
2383  
2384  	cp.bdaddr_type = params->addr_type;
2385  	bacpy(&cp.bdaddr, &params->addr);
2386  	memcpy(cp.peer_irk, irk->val, 16);
2387  
2388  	/* Default privacy mode is always Network */
2389  	params->privacy_mode = HCI_NETWORK_PRIVACY;
2390  
2391  	rcu_read_lock();
2392  	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2393  				      &params->addr, params->addr_type);
2394  	if (!p)
2395  		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2396  					      &params->addr, params->addr_type);
2397  	if (p)
2398  		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2399  	rcu_read_unlock();
2400  
2401  done:
2402  	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2403  		memcpy(cp.local_irk, hdev->irk, 16);
2404  	else
2405  		memset(cp.local_irk, 0, 16);
2406  
2407  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2408  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2409  }
2410  
2411  /* Set Device Privacy Mode. */
hci_le_set_privacy_mode_sync(struct hci_dev * hdev,struct conn_params * params)2412  static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2413  					struct conn_params *params)
2414  {
2415  	struct hci_cp_le_set_privacy_mode cp;
2416  	struct smp_irk *irk;
2417  
2418  	/* If device privacy mode has already been set there is nothing to do */
2419  	if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2420  		return 0;
2421  
2422  	/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2423  	 * indicates that LL Privacy has been enabled and
2424  	 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2425  	 */
2426  	if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2427  		return 0;
2428  
2429  	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2430  	if (!irk)
2431  		return 0;
2432  
2433  	memset(&cp, 0, sizeof(cp));
2434  	cp.bdaddr_type = irk->addr_type;
2435  	bacpy(&cp.bdaddr, &irk->bdaddr);
2436  	cp.mode = HCI_DEVICE_PRIVACY;
2437  
2438  	/* Note: params->privacy_mode is not updated since it is a copy */
2439  
2440  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2441  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2442  }
2443  
2444  /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2445   * this attempts to program the device in the resolving list as well and
2446   * properly set the privacy mode.
2447   */
hci_le_add_accept_list_sync(struct hci_dev * hdev,struct conn_params * params,u8 * num_entries)2448  static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2449  				       struct conn_params *params,
2450  				       u8 *num_entries)
2451  {
2452  	struct hci_cp_le_add_to_accept_list cp;
2453  	int err;
2454  
2455  	/* During suspend, only wakeable devices can be in acceptlist */
2456  	if (hdev->suspended &&
2457  	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2458  		hci_le_del_accept_list_sync(hdev, &params->addr,
2459  					    params->addr_type);
2460  		return 0;
2461  	}
2462  
2463  	/* Select filter policy to accept all advertising */
2464  	if (*num_entries >= hdev->le_accept_list_size)
2465  		return -ENOSPC;
2466  
2467  	/* Accept list can not be used with RPAs */
2468  	if (!use_ll_privacy(hdev) &&
2469  	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
2470  		return -EINVAL;
2471  
2472  	/* Attempt to program the device in the resolving list first to avoid
2473  	 * having to rollback in case it fails since the resolving list is
2474  	 * dynamic it can probably be smaller than the accept list.
2475  	 */
2476  	err = hci_le_add_resolve_list_sync(hdev, params);
2477  	if (err) {
2478  		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2479  		return err;
2480  	}
2481  
2482  	/* Set Privacy Mode */
2483  	err = hci_le_set_privacy_mode_sync(hdev, params);
2484  	if (err) {
2485  		bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2486  		return err;
2487  	}
2488  
2489  	/* Check if already in accept list */
2490  	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2491  				   params->addr_type))
2492  		return 0;
2493  
2494  	*num_entries += 1;
2495  	cp.bdaddr_type = params->addr_type;
2496  	bacpy(&cp.bdaddr, &params->addr);
2497  
2498  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2499  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2500  	if (err) {
2501  		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2502  		/* Rollback the device from the resolving list */
2503  		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2504  		return err;
2505  	}
2506  
2507  	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2508  		   cp.bdaddr_type);
2509  
2510  	return 0;
2511  }
2512  
2513  /* This function disables/pause all advertising instances */
hci_pause_advertising_sync(struct hci_dev * hdev)2514  static int hci_pause_advertising_sync(struct hci_dev *hdev)
2515  {
2516  	int err;
2517  	int old_state;
2518  
2519  	/* If already been paused there is nothing to do. */
2520  	if (hdev->advertising_paused)
2521  		return 0;
2522  
2523  	bt_dev_dbg(hdev, "Pausing directed advertising");
2524  
2525  	/* Stop directed advertising */
2526  	old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2527  	if (old_state) {
2528  		/* When discoverable timeout triggers, then just make sure
2529  		 * the limited discoverable flag is cleared. Even in the case
2530  		 * of a timeout triggered from general discoverable, it is
2531  		 * safe to unconditionally clear the flag.
2532  		 */
2533  		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2534  		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2535  		hdev->discov_timeout = 0;
2536  	}
2537  
2538  	bt_dev_dbg(hdev, "Pausing advertising instances");
2539  
2540  	/* Call to disable any advertisements active on the controller.
2541  	 * This will succeed even if no advertisements are configured.
2542  	 */
2543  	err = hci_disable_advertising_sync(hdev);
2544  	if (err)
2545  		return err;
2546  
2547  	/* If we are using software rotation, pause the loop */
2548  	if (!ext_adv_capable(hdev))
2549  		cancel_adv_timeout(hdev);
2550  
2551  	hdev->advertising_paused = true;
2552  	hdev->advertising_old_state = old_state;
2553  
2554  	return 0;
2555  }
2556  
2557  /* This function enables all user advertising instances */
hci_resume_advertising_sync(struct hci_dev * hdev)2558  static int hci_resume_advertising_sync(struct hci_dev *hdev)
2559  {
2560  	struct adv_info *adv, *tmp;
2561  	int err;
2562  
2563  	/* If advertising has not been paused there is nothing  to do. */
2564  	if (!hdev->advertising_paused)
2565  		return 0;
2566  
2567  	/* Resume directed advertising */
2568  	hdev->advertising_paused = false;
2569  	if (hdev->advertising_old_state) {
2570  		hci_dev_set_flag(hdev, HCI_ADVERTISING);
2571  		hdev->advertising_old_state = 0;
2572  	}
2573  
2574  	bt_dev_dbg(hdev, "Resuming advertising instances");
2575  
2576  	if (ext_adv_capable(hdev)) {
2577  		/* Call for each tracked instance to be re-enabled */
2578  		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2579  			err = hci_enable_ext_advertising_sync(hdev,
2580  							      adv->instance);
2581  			if (!err)
2582  				continue;
2583  
2584  			/* If the instance cannot be resumed remove it */
2585  			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2586  							 NULL);
2587  		}
2588  	} else {
2589  		/* Schedule for most recent instance to be restarted and begin
2590  		 * the software rotation loop
2591  		 */
2592  		err = hci_schedule_adv_instance_sync(hdev,
2593  						     hdev->cur_adv_instance,
2594  						     true);
2595  	}
2596  
2597  	hdev->advertising_paused = false;
2598  
2599  	return err;
2600  }
2601  
hci_pause_addr_resolution(struct hci_dev * hdev)2602  static int hci_pause_addr_resolution(struct hci_dev *hdev)
2603  {
2604  	int err;
2605  
2606  	if (!use_ll_privacy(hdev))
2607  		return 0;
2608  
2609  	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2610  		return 0;
2611  
2612  	/* Cannot disable addr resolution if scanning is enabled or
2613  	 * when initiating an LE connection.
2614  	 */
2615  	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2616  	    hci_lookup_le_connect(hdev)) {
2617  		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2618  		return -EPERM;
2619  	}
2620  
2621  	/* Cannot disable addr resolution if advertising is enabled. */
2622  	err = hci_pause_advertising_sync(hdev);
2623  	if (err) {
2624  		bt_dev_err(hdev, "Pause advertising failed: %d", err);
2625  		return err;
2626  	}
2627  
2628  	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2629  	if (err)
2630  		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2631  			   err);
2632  
2633  	/* Return if address resolution is disabled and RPA is not used. */
2634  	if (!err && scan_use_rpa(hdev))
2635  		return 0;
2636  
2637  	hci_resume_advertising_sync(hdev);
2638  	return err;
2639  }
2640  
hci_read_local_oob_data_sync(struct hci_dev * hdev,bool extended,struct sock * sk)2641  struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2642  					     bool extended, struct sock *sk)
2643  {
2644  	u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2645  					HCI_OP_READ_LOCAL_OOB_DATA;
2646  
2647  	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2648  }
2649  
conn_params_copy(struct list_head * list,size_t * n)2650  static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2651  {
2652  	struct hci_conn_params *params;
2653  	struct conn_params *p;
2654  	size_t i;
2655  
2656  	rcu_read_lock();
2657  
2658  	i = 0;
2659  	list_for_each_entry_rcu(params, list, action)
2660  		++i;
2661  	*n = i;
2662  
2663  	rcu_read_unlock();
2664  
2665  	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2666  	if (!p)
2667  		return NULL;
2668  
2669  	rcu_read_lock();
2670  
2671  	i = 0;
2672  	list_for_each_entry_rcu(params, list, action) {
2673  		/* Racing adds are handled in next scan update */
2674  		if (i >= *n)
2675  			break;
2676  
2677  		/* No hdev->lock, but: addr, addr_type are immutable.
2678  		 * privacy_mode is only written by us or in
2679  		 * hci_cc_le_set_privacy_mode that we wait for.
2680  		 * We should be idempotent so MGMT updating flags
2681  		 * while we are processing is OK.
2682  		 */
2683  		bacpy(&p[i].addr, &params->addr);
2684  		p[i].addr_type = params->addr_type;
2685  		p[i].flags = READ_ONCE(params->flags);
2686  		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2687  		++i;
2688  	}
2689  
2690  	rcu_read_unlock();
2691  
2692  	*n = i;
2693  	return p;
2694  }
2695  
2696  /* Device must not be scanning when updating the accept list.
2697   *
2698   * Update is done using the following sequence:
2699   *
2700   * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
2701   * Remove Devices From Accept List ->
2702   * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
2703   * Add Devices to Accept List ->
2704   * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
2705   * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
2706   * Enable Scanning
2707   *
2708   * In case of failure advertising shall be restored to its original state and
2709   * return would disable accept list since either accept or resolving list could
2710   * not be programmed.
2711   *
2712   */
hci_update_accept_list_sync(struct hci_dev * hdev)2713  static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2714  {
2715  	struct conn_params *params;
2716  	struct bdaddr_list *b, *t;
2717  	u8 num_entries = 0;
2718  	bool pend_conn, pend_report;
2719  	u8 filter_policy;
2720  	size_t i, n;
2721  	int err;
2722  
2723  	/* Pause advertising if resolving list can be used as controllers
2724  	 * cannot accept resolving list modifications while advertising.
2725  	 */
2726  	if (use_ll_privacy(hdev)) {
2727  		err = hci_pause_advertising_sync(hdev);
2728  		if (err) {
2729  			bt_dev_err(hdev, "pause advertising failed: %d", err);
2730  			return 0x00;
2731  		}
2732  	}
2733  
2734  	/* Disable address resolution while reprogramming accept list since
2735  	 * devices that do have an IRK will be programmed in the resolving list
2736  	 * when LL Privacy is enabled.
2737  	 */
2738  	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2739  	if (err) {
2740  		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2741  		goto done;
2742  	}
2743  
2744  	/* Go through the current accept list programmed into the
2745  	 * controller one by one and check if that address is connected or is
2746  	 * still in the list of pending connections or list of devices to
2747  	 * report. If not present in either list, then remove it from
2748  	 * the controller.
2749  	 */
2750  	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2751  		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2752  			continue;
2753  
2754  		/* Pointers not dereferenced, no locks needed */
2755  		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2756  						      &b->bdaddr,
2757  						      b->bdaddr_type);
2758  		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2759  							&b->bdaddr,
2760  							b->bdaddr_type);
2761  
2762  		/* If the device is not likely to connect or report,
2763  		 * remove it from the acceptlist.
2764  		 */
2765  		if (!pend_conn && !pend_report) {
2766  			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2767  						    b->bdaddr_type);
2768  			continue;
2769  		}
2770  
2771  		num_entries++;
2772  	}
2773  
2774  	/* Since all no longer valid accept list entries have been
2775  	 * removed, walk through the list of pending connections
2776  	 * and ensure that any new device gets programmed into
2777  	 * the controller.
2778  	 *
2779  	 * If the list of the devices is larger than the list of
2780  	 * available accept list entries in the controller, then
2781  	 * just abort and return filer policy value to not use the
2782  	 * accept list.
2783  	 *
2784  	 * The list and params may be mutated while we wait for events,
2785  	 * so make a copy and iterate it.
2786  	 */
2787  
2788  	params = conn_params_copy(&hdev->pend_le_conns, &n);
2789  	if (!params) {
2790  		err = -ENOMEM;
2791  		goto done;
2792  	}
2793  
2794  	for (i = 0; i < n; ++i) {
2795  		err = hci_le_add_accept_list_sync(hdev, &params[i],
2796  						  &num_entries);
2797  		if (err) {
2798  			kvfree(params);
2799  			goto done;
2800  		}
2801  	}
2802  
2803  	kvfree(params);
2804  
2805  	/* After adding all new pending connections, walk through
2806  	 * the list of pending reports and also add these to the
2807  	 * accept list if there is still space. Abort if space runs out.
2808  	 */
2809  
2810  	params = conn_params_copy(&hdev->pend_le_reports, &n);
2811  	if (!params) {
2812  		err = -ENOMEM;
2813  		goto done;
2814  	}
2815  
2816  	for (i = 0; i < n; ++i) {
2817  		err = hci_le_add_accept_list_sync(hdev, &params[i],
2818  						  &num_entries);
2819  		if (err) {
2820  			kvfree(params);
2821  			goto done;
2822  		}
2823  	}
2824  
2825  	kvfree(params);
2826  
2827  	/* Use the allowlist unless the following conditions are all true:
2828  	 * - We are not currently suspending
2829  	 * - There are 1 or more ADV monitors registered and it's not offloaded
2830  	 * - Interleaved scanning is not currently using the allowlist
2831  	 */
2832  	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2833  	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2834  	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2835  		err = -EINVAL;
2836  
2837  done:
2838  	filter_policy = err ? 0x00 : 0x01;
2839  
2840  	/* Enable address resolution when LL Privacy is enabled. */
2841  	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2842  	if (err)
2843  		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2844  
2845  	/* Resume advertising if it was paused */
2846  	if (use_ll_privacy(hdev))
2847  		hci_resume_advertising_sync(hdev);
2848  
2849  	/* Select filter policy to use accept list */
2850  	return filter_policy;
2851  }
2852  
hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params * cp,u8 type,u16 interval,u16 window)2853  static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2854  				   u8 type, u16 interval, u16 window)
2855  {
2856  	cp->type = type;
2857  	cp->interval = cpu_to_le16(interval);
2858  	cp->window = cpu_to_le16(window);
2859  }
2860  
hci_le_set_ext_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2861  static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2862  					  u16 interval, u16 window,
2863  					  u8 own_addr_type, u8 filter_policy)
2864  {
2865  	struct hci_cp_le_set_ext_scan_params *cp;
2866  	struct hci_cp_le_scan_phy_params *phy;
2867  	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2868  	u8 num_phy = 0x00;
2869  
2870  	cp = (void *)data;
2871  	phy = (void *)cp->data;
2872  
2873  	memset(data, 0, sizeof(data));
2874  
2875  	cp->own_addr_type = own_addr_type;
2876  	cp->filter_policy = filter_policy;
2877  
2878  	/* Check if PA Sync is in progress then select the PHY based on the
2879  	 * hci_conn.iso_qos.
2880  	 */
2881  	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2882  		struct hci_cp_le_add_to_accept_list *sent;
2883  
2884  		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2885  		if (sent) {
2886  			struct hci_conn *conn;
2887  
2888  			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2889  						       &sent->bdaddr);
2890  			if (conn) {
2891  				struct bt_iso_qos *qos = &conn->iso_qos;
2892  
2893  				if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2894  				    qos->bcast.in.phy & BT_ISO_PHY_2M) {
2895  					cp->scanning_phys |= LE_SCAN_PHY_1M;
2896  					hci_le_scan_phy_params(phy, type,
2897  							       interval,
2898  							       window);
2899  					num_phy++;
2900  					phy++;
2901  				}
2902  
2903  				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2904  					cp->scanning_phys |= LE_SCAN_PHY_CODED;
2905  					hci_le_scan_phy_params(phy, type,
2906  							       interval * 3,
2907  							       window * 3);
2908  					num_phy++;
2909  					phy++;
2910  				}
2911  
2912  				if (num_phy)
2913  					goto done;
2914  			}
2915  		}
2916  	}
2917  
2918  	if (scan_1m(hdev) || scan_2m(hdev)) {
2919  		cp->scanning_phys |= LE_SCAN_PHY_1M;
2920  		hci_le_scan_phy_params(phy, type, interval, window);
2921  		num_phy++;
2922  		phy++;
2923  	}
2924  
2925  	if (scan_coded(hdev)) {
2926  		cp->scanning_phys |= LE_SCAN_PHY_CODED;
2927  		hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2928  		num_phy++;
2929  		phy++;
2930  	}
2931  
2932  done:
2933  	if (!num_phy)
2934  		return -EINVAL;
2935  
2936  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2937  				     sizeof(*cp) + sizeof(*phy) * num_phy,
2938  				     data, HCI_CMD_TIMEOUT);
2939  }
2940  
hci_le_set_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2941  static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2942  				      u16 interval, u16 window,
2943  				      u8 own_addr_type, u8 filter_policy)
2944  {
2945  	struct hci_cp_le_set_scan_param cp;
2946  
2947  	if (use_ext_scan(hdev))
2948  		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2949  						      window, own_addr_type,
2950  						      filter_policy);
2951  
2952  	memset(&cp, 0, sizeof(cp));
2953  	cp.type = type;
2954  	cp.interval = cpu_to_le16(interval);
2955  	cp.window = cpu_to_le16(window);
2956  	cp.own_address_type = own_addr_type;
2957  	cp.filter_policy = filter_policy;
2958  
2959  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2960  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2961  }
2962  
hci_start_scan_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,u8 filter_dup)2963  static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2964  			       u16 window, u8 own_addr_type, u8 filter_policy,
2965  			       u8 filter_dup)
2966  {
2967  	int err;
2968  
2969  	if (hdev->scanning_paused) {
2970  		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2971  		return 0;
2972  	}
2973  
2974  	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2975  					 own_addr_type, filter_policy);
2976  	if (err)
2977  		return err;
2978  
2979  	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2980  }
2981  
hci_passive_scan_sync(struct hci_dev * hdev)2982  static int hci_passive_scan_sync(struct hci_dev *hdev)
2983  {
2984  	u8 own_addr_type;
2985  	u8 filter_policy;
2986  	u16 window, interval;
2987  	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2988  	int err;
2989  
2990  	if (hdev->scanning_paused) {
2991  		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2992  		return 0;
2993  	}
2994  
2995  	err = hci_scan_disable_sync(hdev);
2996  	if (err) {
2997  		bt_dev_err(hdev, "disable scanning failed: %d", err);
2998  		return err;
2999  	}
3000  
3001  	/* Set require_privacy to false since no SCAN_REQ are send
3002  	 * during passive scanning. Not using an non-resolvable address
3003  	 * here is important so that peer devices using direct
3004  	 * advertising with our address will be correctly reported
3005  	 * by the controller.
3006  	 */
3007  	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
3008  					   &own_addr_type))
3009  		return 0;
3010  
3011  	if (hdev->enable_advmon_interleave_scan &&
3012  	    hci_update_interleaved_scan_sync(hdev))
3013  		return 0;
3014  
3015  	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
3016  
3017  	/* Adding or removing entries from the accept list must
3018  	 * happen before enabling scanning. The controller does
3019  	 * not allow accept list modification while scanning.
3020  	 */
3021  	filter_policy = hci_update_accept_list_sync(hdev);
3022  
3023  	/* If suspended and filter_policy set to 0x00 (no acceptlist) then
3024  	 * passive scanning cannot be started since that would require the host
3025  	 * to be woken up to process the reports.
3026  	 */
3027  	if (hdev->suspended && !filter_policy) {
3028  		/* Check if accept list is empty then there is no need to scan
3029  		 * while suspended.
3030  		 */
3031  		if (list_empty(&hdev->le_accept_list))
3032  			return 0;
3033  
3034  		/* If there are devices is the accept_list that means some
3035  		 * devices could not be programmed which in non-suspended case
3036  		 * means filter_policy needs to be set to 0x00 so the host needs
3037  		 * to filter, but since this is treating suspended case we
3038  		 * can ignore device needing host to filter to allow devices in
3039  		 * the acceptlist to be able to wakeup the system.
3040  		 */
3041  		filter_policy = 0x01;
3042  	}
3043  
3044  	/* When the controller is using random resolvable addresses and
3045  	 * with that having LE privacy enabled, then controllers with
3046  	 * Extended Scanner Filter Policies support can now enable support
3047  	 * for handling directed advertising.
3048  	 *
3049  	 * So instead of using filter polices 0x00 (no acceptlist)
3050  	 * and 0x01 (acceptlist enabled) use the new filter policies
3051  	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3052  	 */
3053  	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3054  	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3055  		filter_policy |= 0x02;
3056  
3057  	if (hdev->suspended) {
3058  		window = hdev->le_scan_window_suspend;
3059  		interval = hdev->le_scan_int_suspend;
3060  	} else if (hci_is_le_conn_scanning(hdev)) {
3061  		window = hdev->le_scan_window_connect;
3062  		interval = hdev->le_scan_int_connect;
3063  	} else if (hci_is_adv_monitoring(hdev)) {
3064  		window = hdev->le_scan_window_adv_monitor;
3065  		interval = hdev->le_scan_int_adv_monitor;
3066  
3067  		/* Disable duplicates filter when scanning for advertisement
3068  		 * monitor for the following reasons.
3069  		 *
3070  		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
3071  		 * controllers ignore RSSI_Sampling_Period when the duplicates
3072  		 * filter is enabled.
3073  		 *
3074  		 * For SW pattern filtering, when we're not doing interleaved
3075  		 * scanning, it is necessary to disable duplicates filter,
3076  		 * otherwise hosts can only receive one advertisement and it's
3077  		 * impossible to know if a peer is still in range.
3078  		 */
3079  		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3080  	} else {
3081  		window = hdev->le_scan_window;
3082  		interval = hdev->le_scan_interval;
3083  	}
3084  
3085  	/* Disable all filtering for Mesh */
3086  	if (hci_dev_test_flag(hdev, HCI_MESH)) {
3087  		filter_policy = 0;
3088  		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3089  	}
3090  
3091  	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3092  
3093  	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3094  				   own_addr_type, filter_policy, filter_dups);
3095  }
3096  
3097  /* This function controls the passive scanning based on hdev->pend_le_conns
3098   * list. If there are pending LE connection we start the background scanning,
3099   * otherwise we stop it in the following sequence:
3100   *
3101   * If there are devices to scan:
3102   *
3103   * Disable Scanning -> Update Accept List ->
3104   * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
3105   * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3106   * Enable Scanning
3107   *
3108   * Otherwise:
3109   *
3110   * Disable Scanning
3111   */
hci_update_passive_scan_sync(struct hci_dev * hdev)3112  int hci_update_passive_scan_sync(struct hci_dev *hdev)
3113  {
3114  	int err;
3115  
3116  	if (!test_bit(HCI_UP, &hdev->flags) ||
3117  	    test_bit(HCI_INIT, &hdev->flags) ||
3118  	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3119  	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3120  	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3121  	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3122  		return 0;
3123  
3124  	/* No point in doing scanning if LE support hasn't been enabled */
3125  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3126  		return 0;
3127  
3128  	/* If discovery is active don't interfere with it */
3129  	if (hdev->discovery.state != DISCOVERY_STOPPED)
3130  		return 0;
3131  
3132  	/* Reset RSSI and UUID filters when starting background scanning
3133  	 * since these filters are meant for service discovery only.
3134  	 *
3135  	 * The Start Discovery and Start Service Discovery operations
3136  	 * ensure to set proper values for RSSI threshold and UUID
3137  	 * filter list. So it is safe to just reset them here.
3138  	 */
3139  	hci_discovery_filter_clear(hdev);
3140  
3141  	bt_dev_dbg(hdev, "ADV monitoring is %s",
3142  		   hci_is_adv_monitoring(hdev) ? "on" : "off");
3143  
3144  	if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3145  	    list_empty(&hdev->pend_le_conns) &&
3146  	    list_empty(&hdev->pend_le_reports) &&
3147  	    !hci_is_adv_monitoring(hdev) &&
3148  	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3149  		/* If there is no pending LE connections or devices
3150  		 * to be scanned for or no ADV monitors, we should stop the
3151  		 * background scanning.
3152  		 */
3153  
3154  		bt_dev_dbg(hdev, "stopping background scanning");
3155  
3156  		err = hci_scan_disable_sync(hdev);
3157  		if (err)
3158  			bt_dev_err(hdev, "stop background scanning failed: %d",
3159  				   err);
3160  	} else {
3161  		/* If there is at least one pending LE connection, we should
3162  		 * keep the background scan running.
3163  		 */
3164  
3165  		/* If controller is connecting, we should not start scanning
3166  		 * since some controllers are not able to scan and connect at
3167  		 * the same time.
3168  		 */
3169  		if (hci_lookup_le_connect(hdev))
3170  			return 0;
3171  
3172  		bt_dev_dbg(hdev, "start background scanning");
3173  
3174  		err = hci_passive_scan_sync(hdev);
3175  		if (err)
3176  			bt_dev_err(hdev, "start background scanning failed: %d",
3177  				   err);
3178  	}
3179  
3180  	return err;
3181  }
3182  
update_scan_sync(struct hci_dev * hdev,void * data)3183  static int update_scan_sync(struct hci_dev *hdev, void *data)
3184  {
3185  	return hci_update_scan_sync(hdev);
3186  }
3187  
hci_update_scan(struct hci_dev * hdev)3188  int hci_update_scan(struct hci_dev *hdev)
3189  {
3190  	return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3191  }
3192  
update_passive_scan_sync(struct hci_dev * hdev,void * data)3193  static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3194  {
3195  	return hci_update_passive_scan_sync(hdev);
3196  }
3197  
hci_update_passive_scan(struct hci_dev * hdev)3198  int hci_update_passive_scan(struct hci_dev *hdev)
3199  {
3200  	/* Only queue if it would have any effect */
3201  	if (!test_bit(HCI_UP, &hdev->flags) ||
3202  	    test_bit(HCI_INIT, &hdev->flags) ||
3203  	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3204  	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3205  	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3206  	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3207  		return 0;
3208  
3209  	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3210  				       NULL);
3211  }
3212  
hci_write_sc_support_sync(struct hci_dev * hdev,u8 val)3213  int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3214  {
3215  	int err;
3216  
3217  	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3218  		return 0;
3219  
3220  	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3221  				    sizeof(val), &val, HCI_CMD_TIMEOUT);
3222  
3223  	if (!err) {
3224  		if (val) {
3225  			hdev->features[1][0] |= LMP_HOST_SC;
3226  			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3227  		} else {
3228  			hdev->features[1][0] &= ~LMP_HOST_SC;
3229  			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3230  		}
3231  	}
3232  
3233  	return err;
3234  }
3235  
hci_write_ssp_mode_sync(struct hci_dev * hdev,u8 mode)3236  int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3237  {
3238  	int err;
3239  
3240  	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3241  	    lmp_host_ssp_capable(hdev))
3242  		return 0;
3243  
3244  	if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3245  		__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3246  				      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3247  	}
3248  
3249  	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3250  				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3251  	if (err)
3252  		return err;
3253  
3254  	return hci_write_sc_support_sync(hdev, 0x01);
3255  }
3256  
hci_write_le_host_supported_sync(struct hci_dev * hdev,u8 le,u8 simul)3257  int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3258  {
3259  	struct hci_cp_write_le_host_supported cp;
3260  
3261  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3262  	    !lmp_bredr_capable(hdev))
3263  		return 0;
3264  
3265  	/* Check first if we already have the right host state
3266  	 * (host features set)
3267  	 */
3268  	if (le == lmp_host_le_capable(hdev) &&
3269  	    simul == lmp_host_le_br_capable(hdev))
3270  		return 0;
3271  
3272  	memset(&cp, 0, sizeof(cp));
3273  
3274  	cp.le = le;
3275  	cp.simul = simul;
3276  
3277  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3278  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3279  }
3280  
hci_powered_update_adv_sync(struct hci_dev * hdev)3281  static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3282  {
3283  	struct adv_info *adv, *tmp;
3284  	int err;
3285  
3286  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3287  		return 0;
3288  
3289  	/* If RPA Resolution has not been enable yet it means the
3290  	 * resolving list is empty and we should attempt to program the
3291  	 * local IRK in order to support using own_addr_type
3292  	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3293  	 */
3294  	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3295  		hci_le_add_resolve_list_sync(hdev, NULL);
3296  		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3297  	}
3298  
3299  	/* Make sure the controller has a good default for
3300  	 * advertising data. This also applies to the case
3301  	 * where BR/EDR was toggled during the AUTO_OFF phase.
3302  	 */
3303  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3304  	    list_empty(&hdev->adv_instances)) {
3305  		if (ext_adv_capable(hdev)) {
3306  			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3307  			if (!err)
3308  				hci_update_scan_rsp_data_sync(hdev, 0x00);
3309  		} else {
3310  			err = hci_update_adv_data_sync(hdev, 0x00);
3311  			if (!err)
3312  				hci_update_scan_rsp_data_sync(hdev, 0x00);
3313  		}
3314  
3315  		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3316  			hci_enable_advertising_sync(hdev);
3317  	}
3318  
3319  	/* Call for each tracked instance to be scheduled */
3320  	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3321  		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3322  
3323  	return 0;
3324  }
3325  
hci_write_auth_enable_sync(struct hci_dev * hdev)3326  static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3327  {
3328  	u8 link_sec;
3329  
3330  	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3331  	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3332  		return 0;
3333  
3334  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3335  				     sizeof(link_sec), &link_sec,
3336  				     HCI_CMD_TIMEOUT);
3337  }
3338  
hci_write_fast_connectable_sync(struct hci_dev * hdev,bool enable)3339  int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3340  {
3341  	struct hci_cp_write_page_scan_activity cp;
3342  	u8 type;
3343  	int err = 0;
3344  
3345  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3346  		return 0;
3347  
3348  	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3349  		return 0;
3350  
3351  	memset(&cp, 0, sizeof(cp));
3352  
3353  	if (enable) {
3354  		type = PAGE_SCAN_TYPE_INTERLACED;
3355  
3356  		/* 160 msec page scan interval */
3357  		cp.interval = cpu_to_le16(0x0100);
3358  	} else {
3359  		type = hdev->def_page_scan_type;
3360  		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3361  	}
3362  
3363  	cp.window = cpu_to_le16(hdev->def_page_scan_window);
3364  
3365  	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3366  	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3367  		err = __hci_cmd_sync_status(hdev,
3368  					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3369  					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3370  		if (err)
3371  			return err;
3372  	}
3373  
3374  	if (hdev->page_scan_type != type)
3375  		err = __hci_cmd_sync_status(hdev,
3376  					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
3377  					    sizeof(type), &type,
3378  					    HCI_CMD_TIMEOUT);
3379  
3380  	return err;
3381  }
3382  
disconnected_accept_list_entries(struct hci_dev * hdev)3383  static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3384  {
3385  	struct bdaddr_list *b;
3386  
3387  	list_for_each_entry(b, &hdev->accept_list, list) {
3388  		struct hci_conn *conn;
3389  
3390  		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3391  		if (!conn)
3392  			return true;
3393  
3394  		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3395  			return true;
3396  	}
3397  
3398  	return false;
3399  }
3400  
hci_write_scan_enable_sync(struct hci_dev * hdev,u8 val)3401  static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3402  {
3403  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3404  					    sizeof(val), &val,
3405  					    HCI_CMD_TIMEOUT);
3406  }
3407  
hci_update_scan_sync(struct hci_dev * hdev)3408  int hci_update_scan_sync(struct hci_dev *hdev)
3409  {
3410  	u8 scan;
3411  
3412  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3413  		return 0;
3414  
3415  	if (!hdev_is_powered(hdev))
3416  		return 0;
3417  
3418  	if (mgmt_powering_down(hdev))
3419  		return 0;
3420  
3421  	if (hdev->scanning_paused)
3422  		return 0;
3423  
3424  	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3425  	    disconnected_accept_list_entries(hdev))
3426  		scan = SCAN_PAGE;
3427  	else
3428  		scan = SCAN_DISABLED;
3429  
3430  	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3431  		scan |= SCAN_INQUIRY;
3432  
3433  	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3434  	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3435  		return 0;
3436  
3437  	return hci_write_scan_enable_sync(hdev, scan);
3438  }
3439  
hci_update_name_sync(struct hci_dev * hdev)3440  int hci_update_name_sync(struct hci_dev *hdev)
3441  {
3442  	struct hci_cp_write_local_name cp;
3443  
3444  	memset(&cp, 0, sizeof(cp));
3445  
3446  	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3447  
3448  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3449  					    sizeof(cp), &cp,
3450  					    HCI_CMD_TIMEOUT);
3451  }
3452  
3453  /* This function perform powered update HCI command sequence after the HCI init
3454   * sequence which end up resetting all states, the sequence is as follows:
3455   *
3456   * HCI_SSP_ENABLED(Enable SSP)
3457   * HCI_LE_ENABLED(Enable LE)
3458   * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
3459   * Update adv data)
3460   * Enable Authentication
3461   * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3462   * Set Name -> Set EIR)
3463   * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3464   */
hci_powered_update_sync(struct hci_dev * hdev)3465  int hci_powered_update_sync(struct hci_dev *hdev)
3466  {
3467  	int err;
3468  
3469  	/* Register the available SMP channels (BR/EDR and LE) only when
3470  	 * successfully powering on the controller. This late
3471  	 * registration is required so that LE SMP can clearly decide if
3472  	 * the public address or static address is used.
3473  	 */
3474  	smp_register(hdev);
3475  
3476  	err = hci_write_ssp_mode_sync(hdev, 0x01);
3477  	if (err)
3478  		return err;
3479  
3480  	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3481  	if (err)
3482  		return err;
3483  
3484  	err = hci_powered_update_adv_sync(hdev);
3485  	if (err)
3486  		return err;
3487  
3488  	err = hci_write_auth_enable_sync(hdev);
3489  	if (err)
3490  		return err;
3491  
3492  	if (lmp_bredr_capable(hdev)) {
3493  		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3494  			hci_write_fast_connectable_sync(hdev, true);
3495  		else
3496  			hci_write_fast_connectable_sync(hdev, false);
3497  		hci_update_scan_sync(hdev);
3498  		hci_update_class_sync(hdev);
3499  		hci_update_name_sync(hdev);
3500  		hci_update_eir_sync(hdev);
3501  	}
3502  
3503  	/* If forcing static address is in use or there is no public
3504  	 * address use the static address as random address (but skip
3505  	 * the HCI command if the current random address is already the
3506  	 * static one.
3507  	 *
3508  	 * In case BR/EDR has been disabled on a dual-mode controller
3509  	 * and a static address has been configured, then use that
3510  	 * address instead of the public BR/EDR address.
3511  	 */
3512  	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3513  	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3514  	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3515  		if (bacmp(&hdev->static_addr, BDADDR_ANY))
3516  			return hci_set_random_addr_sync(hdev,
3517  							&hdev->static_addr);
3518  	}
3519  
3520  	return 0;
3521  }
3522  
3523  /**
3524   * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3525   *				       (BD_ADDR) for a HCI device from
3526   *				       a firmware node property.
3527   * @hdev:	The HCI device
3528   *
3529   * Search the firmware node for 'local-bd-address'.
3530   *
3531   * All-zero BD addresses are rejected, because those could be properties
3532   * that exist in the firmware tables, but were not updated by the firmware. For
3533   * example, the DTS could define 'local-bd-address', with zero BD addresses.
3534   */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)3535  static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3536  {
3537  	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3538  	bdaddr_t ba;
3539  	int ret;
3540  
3541  	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3542  					    (u8 *)&ba, sizeof(ba));
3543  	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3544  		return;
3545  
3546  	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3547  		baswap(&hdev->public_addr, &ba);
3548  	else
3549  		bacpy(&hdev->public_addr, &ba);
3550  }
3551  
3552  struct hci_init_stage {
3553  	int (*func)(struct hci_dev *hdev);
3554  };
3555  
3556  /* Run init stage NULL terminated function table */
hci_init_stage_sync(struct hci_dev * hdev,const struct hci_init_stage * stage)3557  static int hci_init_stage_sync(struct hci_dev *hdev,
3558  			       const struct hci_init_stage *stage)
3559  {
3560  	size_t i;
3561  
3562  	for (i = 0; stage[i].func; i++) {
3563  		int err;
3564  
3565  		err = stage[i].func(hdev);
3566  		if (err)
3567  			return err;
3568  	}
3569  
3570  	return 0;
3571  }
3572  
3573  /* Read Local Version */
hci_read_local_version_sync(struct hci_dev * hdev)3574  static int hci_read_local_version_sync(struct hci_dev *hdev)
3575  {
3576  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3577  				     0, NULL, HCI_CMD_TIMEOUT);
3578  }
3579  
3580  /* Read BD Address */
hci_read_bd_addr_sync(struct hci_dev * hdev)3581  static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3582  {
3583  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3584  				     0, NULL, HCI_CMD_TIMEOUT);
3585  }
3586  
3587  #define HCI_INIT(_func) \
3588  { \
3589  	.func = _func, \
3590  }
3591  
3592  static const struct hci_init_stage hci_init0[] = {
3593  	/* HCI_OP_READ_LOCAL_VERSION */
3594  	HCI_INIT(hci_read_local_version_sync),
3595  	/* HCI_OP_READ_BD_ADDR */
3596  	HCI_INIT(hci_read_bd_addr_sync),
3597  	{}
3598  };
3599  
hci_reset_sync(struct hci_dev * hdev)3600  int hci_reset_sync(struct hci_dev *hdev)
3601  {
3602  	int err;
3603  
3604  	set_bit(HCI_RESET, &hdev->flags);
3605  
3606  	err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3607  				    HCI_CMD_TIMEOUT);
3608  	if (err)
3609  		return err;
3610  
3611  	return 0;
3612  }
3613  
hci_init0_sync(struct hci_dev * hdev)3614  static int hci_init0_sync(struct hci_dev *hdev)
3615  {
3616  	int err;
3617  
3618  	bt_dev_dbg(hdev, "");
3619  
3620  	/* Reset */
3621  	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3622  		err = hci_reset_sync(hdev);
3623  		if (err)
3624  			return err;
3625  	}
3626  
3627  	return hci_init_stage_sync(hdev, hci_init0);
3628  }
3629  
hci_unconf_init_sync(struct hci_dev * hdev)3630  static int hci_unconf_init_sync(struct hci_dev *hdev)
3631  {
3632  	int err;
3633  
3634  	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3635  		return 0;
3636  
3637  	err = hci_init0_sync(hdev);
3638  	if (err < 0)
3639  		return err;
3640  
3641  	if (hci_dev_test_flag(hdev, HCI_SETUP))
3642  		hci_debugfs_create_basic(hdev);
3643  
3644  	return 0;
3645  }
3646  
3647  /* Read Local Supported Features. */
hci_read_local_features_sync(struct hci_dev * hdev)3648  static int hci_read_local_features_sync(struct hci_dev *hdev)
3649  {
3650  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3651  				     0, NULL, HCI_CMD_TIMEOUT);
3652  }
3653  
3654  /* BR Controller init stage 1 command sequence */
3655  static const struct hci_init_stage br_init1[] = {
3656  	/* HCI_OP_READ_LOCAL_FEATURES */
3657  	HCI_INIT(hci_read_local_features_sync),
3658  	/* HCI_OP_READ_LOCAL_VERSION */
3659  	HCI_INIT(hci_read_local_version_sync),
3660  	/* HCI_OP_READ_BD_ADDR */
3661  	HCI_INIT(hci_read_bd_addr_sync),
3662  	{}
3663  };
3664  
3665  /* Read Local Commands */
hci_read_local_cmds_sync(struct hci_dev * hdev)3666  static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3667  {
3668  	/* All Bluetooth 1.2 and later controllers should support the
3669  	 * HCI command for reading the local supported commands.
3670  	 *
3671  	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
3672  	 * but do not have support for this command. If that is the case,
3673  	 * the driver can quirk the behavior and skip reading the local
3674  	 * supported commands.
3675  	 */
3676  	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3677  	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3678  		return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3679  					     0, NULL, HCI_CMD_TIMEOUT);
3680  
3681  	return 0;
3682  }
3683  
hci_init1_sync(struct hci_dev * hdev)3684  static int hci_init1_sync(struct hci_dev *hdev)
3685  {
3686  	int err;
3687  
3688  	bt_dev_dbg(hdev, "");
3689  
3690  	/* Reset */
3691  	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3692  		err = hci_reset_sync(hdev);
3693  		if (err)
3694  			return err;
3695  	}
3696  
3697  	return hci_init_stage_sync(hdev, br_init1);
3698  }
3699  
3700  /* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_read_buffer_size_sync(struct hci_dev * hdev)3701  static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3702  {
3703  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3704  				     0, NULL, HCI_CMD_TIMEOUT);
3705  }
3706  
3707  /* Read Class of Device */
hci_read_dev_class_sync(struct hci_dev * hdev)3708  static int hci_read_dev_class_sync(struct hci_dev *hdev)
3709  {
3710  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3711  				     0, NULL, HCI_CMD_TIMEOUT);
3712  }
3713  
3714  /* Read Local Name */
hci_read_local_name_sync(struct hci_dev * hdev)3715  static int hci_read_local_name_sync(struct hci_dev *hdev)
3716  {
3717  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3718  				     0, NULL, HCI_CMD_TIMEOUT);
3719  }
3720  
3721  /* Read Voice Setting */
hci_read_voice_setting_sync(struct hci_dev * hdev)3722  static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3723  {
3724  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3725  				     0, NULL, HCI_CMD_TIMEOUT);
3726  }
3727  
3728  /* Read Number of Supported IAC */
hci_read_num_supported_iac_sync(struct hci_dev * hdev)3729  static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3730  {
3731  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3732  				     0, NULL, HCI_CMD_TIMEOUT);
3733  }
3734  
3735  /* Read Current IAC LAP */
hci_read_current_iac_lap_sync(struct hci_dev * hdev)3736  static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3737  {
3738  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3739  				     0, NULL, HCI_CMD_TIMEOUT);
3740  }
3741  
hci_set_event_filter_sync(struct hci_dev * hdev,u8 flt_type,u8 cond_type,bdaddr_t * bdaddr,u8 auto_accept)3742  static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3743  				     u8 cond_type, bdaddr_t *bdaddr,
3744  				     u8 auto_accept)
3745  {
3746  	struct hci_cp_set_event_filter cp;
3747  
3748  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3749  		return 0;
3750  
3751  	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3752  		return 0;
3753  
3754  	memset(&cp, 0, sizeof(cp));
3755  	cp.flt_type = flt_type;
3756  
3757  	if (flt_type != HCI_FLT_CLEAR_ALL) {
3758  		cp.cond_type = cond_type;
3759  		bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3760  		cp.addr_conn_flt.auto_accept = auto_accept;
3761  	}
3762  
3763  	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3764  				     flt_type == HCI_FLT_CLEAR_ALL ?
3765  				     sizeof(cp.flt_type) : sizeof(cp), &cp,
3766  				     HCI_CMD_TIMEOUT);
3767  }
3768  
hci_clear_event_filter_sync(struct hci_dev * hdev)3769  static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3770  {
3771  	if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3772  		return 0;
3773  
3774  	/* In theory the state machine should not reach here unless
3775  	 * a hci_set_event_filter_sync() call succeeds, but we do
3776  	 * the check both for parity and as a future reminder.
3777  	 */
3778  	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3779  		return 0;
3780  
3781  	return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3782  					 BDADDR_ANY, 0x00);
3783  }
3784  
3785  /* Connection accept timeout ~20 secs */
hci_write_ca_timeout_sync(struct hci_dev * hdev)3786  static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3787  {
3788  	__le16 param = cpu_to_le16(0x7d00);
3789  
3790  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3791  				     sizeof(param), &param, HCI_CMD_TIMEOUT);
3792  }
3793  
3794  /* BR Controller init stage 2 command sequence */
3795  static const struct hci_init_stage br_init2[] = {
3796  	/* HCI_OP_READ_BUFFER_SIZE */
3797  	HCI_INIT(hci_read_buffer_size_sync),
3798  	/* HCI_OP_READ_CLASS_OF_DEV */
3799  	HCI_INIT(hci_read_dev_class_sync),
3800  	/* HCI_OP_READ_LOCAL_NAME */
3801  	HCI_INIT(hci_read_local_name_sync),
3802  	/* HCI_OP_READ_VOICE_SETTING */
3803  	HCI_INIT(hci_read_voice_setting_sync),
3804  	/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3805  	HCI_INIT(hci_read_num_supported_iac_sync),
3806  	/* HCI_OP_READ_CURRENT_IAC_LAP */
3807  	HCI_INIT(hci_read_current_iac_lap_sync),
3808  	/* HCI_OP_SET_EVENT_FLT */
3809  	HCI_INIT(hci_clear_event_filter_sync),
3810  	/* HCI_OP_WRITE_CA_TIMEOUT */
3811  	HCI_INIT(hci_write_ca_timeout_sync),
3812  	{}
3813  };
3814  
hci_write_ssp_mode_1_sync(struct hci_dev * hdev)3815  static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3816  {
3817  	u8 mode = 0x01;
3818  
3819  	if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3820  		return 0;
3821  
3822  	/* When SSP is available, then the host features page
3823  	 * should also be available as well. However some
3824  	 * controllers list the max_page as 0 as long as SSP
3825  	 * has not been enabled. To achieve proper debugging
3826  	 * output, force the minimum max_page to 1 at least.
3827  	 */
3828  	hdev->max_page = 0x01;
3829  
3830  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3831  				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3832  }
3833  
hci_write_eir_sync(struct hci_dev * hdev)3834  static int hci_write_eir_sync(struct hci_dev *hdev)
3835  {
3836  	struct hci_cp_write_eir cp;
3837  
3838  	if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3839  		return 0;
3840  
3841  	memset(hdev->eir, 0, sizeof(hdev->eir));
3842  	memset(&cp, 0, sizeof(cp));
3843  
3844  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3845  				     HCI_CMD_TIMEOUT);
3846  }
3847  
hci_write_inquiry_mode_sync(struct hci_dev * hdev)3848  static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3849  {
3850  	u8 mode;
3851  
3852  	if (!lmp_inq_rssi_capable(hdev) &&
3853  	    !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3854  		return 0;
3855  
3856  	/* If Extended Inquiry Result events are supported, then
3857  	 * they are clearly preferred over Inquiry Result with RSSI
3858  	 * events.
3859  	 */
3860  	mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3861  
3862  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3863  				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3864  }
3865  
hci_read_inq_rsp_tx_power_sync(struct hci_dev * hdev)3866  static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3867  {
3868  	if (!lmp_inq_tx_pwr_capable(hdev))
3869  		return 0;
3870  
3871  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3872  				     0, NULL, HCI_CMD_TIMEOUT);
3873  }
3874  
hci_read_local_ext_features_sync(struct hci_dev * hdev,u8 page)3875  static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3876  {
3877  	struct hci_cp_read_local_ext_features cp;
3878  
3879  	if (!lmp_ext_feat_capable(hdev))
3880  		return 0;
3881  
3882  	memset(&cp, 0, sizeof(cp));
3883  	cp.page = page;
3884  
3885  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3886  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3887  }
3888  
hci_read_local_ext_features_1_sync(struct hci_dev * hdev)3889  static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3890  {
3891  	return hci_read_local_ext_features_sync(hdev, 0x01);
3892  }
3893  
3894  /* HCI Controller init stage 2 command sequence */
3895  static const struct hci_init_stage hci_init2[] = {
3896  	/* HCI_OP_READ_LOCAL_COMMANDS */
3897  	HCI_INIT(hci_read_local_cmds_sync),
3898  	/* HCI_OP_WRITE_SSP_MODE */
3899  	HCI_INIT(hci_write_ssp_mode_1_sync),
3900  	/* HCI_OP_WRITE_EIR */
3901  	HCI_INIT(hci_write_eir_sync),
3902  	/* HCI_OP_WRITE_INQUIRY_MODE */
3903  	HCI_INIT(hci_write_inquiry_mode_sync),
3904  	/* HCI_OP_READ_INQ_RSP_TX_POWER */
3905  	HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3906  	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3907  	HCI_INIT(hci_read_local_ext_features_1_sync),
3908  	/* HCI_OP_WRITE_AUTH_ENABLE */
3909  	HCI_INIT(hci_write_auth_enable_sync),
3910  	{}
3911  };
3912  
3913  /* Read LE Buffer Size */
hci_le_read_buffer_size_sync(struct hci_dev * hdev)3914  static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3915  {
3916  	/* Use Read LE Buffer Size V2 if supported */
3917  	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3918  		return __hci_cmd_sync_status(hdev,
3919  					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
3920  					     0, NULL, HCI_CMD_TIMEOUT);
3921  
3922  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3923  				     0, NULL, HCI_CMD_TIMEOUT);
3924  }
3925  
3926  /* Read LE Local Supported Features */
hci_le_read_local_features_sync(struct hci_dev * hdev)3927  static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3928  {
3929  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3930  				     0, NULL, HCI_CMD_TIMEOUT);
3931  }
3932  
3933  /* Read LE Supported States */
hci_le_read_supported_states_sync(struct hci_dev * hdev)3934  static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3935  {
3936  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3937  				     0, NULL, HCI_CMD_TIMEOUT);
3938  }
3939  
3940  /* LE Controller init stage 2 command sequence */
3941  static const struct hci_init_stage le_init2[] = {
3942  	/* HCI_OP_LE_READ_LOCAL_FEATURES */
3943  	HCI_INIT(hci_le_read_local_features_sync),
3944  	/* HCI_OP_LE_READ_BUFFER_SIZE */
3945  	HCI_INIT(hci_le_read_buffer_size_sync),
3946  	/* HCI_OP_LE_READ_SUPPORTED_STATES */
3947  	HCI_INIT(hci_le_read_supported_states_sync),
3948  	{}
3949  };
3950  
hci_init2_sync(struct hci_dev * hdev)3951  static int hci_init2_sync(struct hci_dev *hdev)
3952  {
3953  	int err;
3954  
3955  	bt_dev_dbg(hdev, "");
3956  
3957  	err = hci_init_stage_sync(hdev, hci_init2);
3958  	if (err)
3959  		return err;
3960  
3961  	if (lmp_bredr_capable(hdev)) {
3962  		err = hci_init_stage_sync(hdev, br_init2);
3963  		if (err)
3964  			return err;
3965  	} else {
3966  		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3967  	}
3968  
3969  	if (lmp_le_capable(hdev)) {
3970  		err = hci_init_stage_sync(hdev, le_init2);
3971  		if (err)
3972  			return err;
3973  		/* LE-only controllers have LE implicitly enabled */
3974  		if (!lmp_bredr_capable(hdev))
3975  			hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3976  	}
3977  
3978  	return 0;
3979  }
3980  
hci_set_event_mask_sync(struct hci_dev * hdev)3981  static int hci_set_event_mask_sync(struct hci_dev *hdev)
3982  {
3983  	/* The second byte is 0xff instead of 0x9f (two reserved bits
3984  	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3985  	 * command otherwise.
3986  	 */
3987  	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3988  
3989  	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
3990  	 * any event mask for pre 1.2 devices.
3991  	 */
3992  	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3993  		return 0;
3994  
3995  	if (lmp_bredr_capable(hdev)) {
3996  		events[4] |= 0x01; /* Flow Specification Complete */
3997  
3998  		/* Don't set Disconnect Complete and mode change when
3999  		 * suspended as that would wakeup the host when disconnecting
4000  		 * due to suspend.
4001  		 */
4002  		if (hdev->suspended) {
4003  			events[0] &= 0xef;
4004  			events[2] &= 0xf7;
4005  		}
4006  	} else {
4007  		/* Use a different default for LE-only devices */
4008  		memset(events, 0, sizeof(events));
4009  		events[1] |= 0x20; /* Command Complete */
4010  		events[1] |= 0x40; /* Command Status */
4011  		events[1] |= 0x80; /* Hardware Error */
4012  
4013  		/* If the controller supports the Disconnect command, enable
4014  		 * the corresponding event. In addition enable packet flow
4015  		 * control related events.
4016  		 */
4017  		if (hdev->commands[0] & 0x20) {
4018  			/* Don't set Disconnect Complete when suspended as that
4019  			 * would wakeup the host when disconnecting due to
4020  			 * suspend.
4021  			 */
4022  			if (!hdev->suspended)
4023  				events[0] |= 0x10; /* Disconnection Complete */
4024  			events[2] |= 0x04; /* Number of Completed Packets */
4025  			events[3] |= 0x02; /* Data Buffer Overflow */
4026  		}
4027  
4028  		/* If the controller supports the Read Remote Version
4029  		 * Information command, enable the corresponding event.
4030  		 */
4031  		if (hdev->commands[2] & 0x80)
4032  			events[1] |= 0x08; /* Read Remote Version Information
4033  					    * Complete
4034  					    */
4035  
4036  		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
4037  			events[0] |= 0x80; /* Encryption Change */
4038  			events[5] |= 0x80; /* Encryption Key Refresh Complete */
4039  		}
4040  	}
4041  
4042  	if (lmp_inq_rssi_capable(hdev) ||
4043  	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
4044  		events[4] |= 0x02; /* Inquiry Result with RSSI */
4045  
4046  	if (lmp_ext_feat_capable(hdev))
4047  		events[4] |= 0x04; /* Read Remote Extended Features Complete */
4048  
4049  	if (lmp_esco_capable(hdev)) {
4050  		events[5] |= 0x08; /* Synchronous Connection Complete */
4051  		events[5] |= 0x10; /* Synchronous Connection Changed */
4052  	}
4053  
4054  	if (lmp_sniffsubr_capable(hdev))
4055  		events[5] |= 0x20; /* Sniff Subrating */
4056  
4057  	if (lmp_pause_enc_capable(hdev))
4058  		events[5] |= 0x80; /* Encryption Key Refresh Complete */
4059  
4060  	if (lmp_ext_inq_capable(hdev))
4061  		events[5] |= 0x40; /* Extended Inquiry Result */
4062  
4063  	if (lmp_no_flush_capable(hdev))
4064  		events[7] |= 0x01; /* Enhanced Flush Complete */
4065  
4066  	if (lmp_lsto_capable(hdev))
4067  		events[6] |= 0x80; /* Link Supervision Timeout Changed */
4068  
4069  	if (lmp_ssp_capable(hdev)) {
4070  		events[6] |= 0x01;	/* IO Capability Request */
4071  		events[6] |= 0x02;	/* IO Capability Response */
4072  		events[6] |= 0x04;	/* User Confirmation Request */
4073  		events[6] |= 0x08;	/* User Passkey Request */
4074  		events[6] |= 0x10;	/* Remote OOB Data Request */
4075  		events[6] |= 0x20;	/* Simple Pairing Complete */
4076  		events[7] |= 0x04;	/* User Passkey Notification */
4077  		events[7] |= 0x08;	/* Keypress Notification */
4078  		events[7] |= 0x10;	/* Remote Host Supported
4079  					 * Features Notification
4080  					 */
4081  	}
4082  
4083  	if (lmp_le_capable(hdev))
4084  		events[7] |= 0x20;	/* LE Meta-Event */
4085  
4086  	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4087  				     sizeof(events), events, HCI_CMD_TIMEOUT);
4088  }
4089  
hci_read_stored_link_key_sync(struct hci_dev * hdev)4090  static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4091  {
4092  	struct hci_cp_read_stored_link_key cp;
4093  
4094  	if (!(hdev->commands[6] & 0x20) ||
4095  	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4096  		return 0;
4097  
4098  	memset(&cp, 0, sizeof(cp));
4099  	bacpy(&cp.bdaddr, BDADDR_ANY);
4100  	cp.read_all = 0x01;
4101  
4102  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4103  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4104  }
4105  
hci_setup_link_policy_sync(struct hci_dev * hdev)4106  static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4107  {
4108  	struct hci_cp_write_def_link_policy cp;
4109  	u16 link_policy = 0;
4110  
4111  	if (!(hdev->commands[5] & 0x10))
4112  		return 0;
4113  
4114  	memset(&cp, 0, sizeof(cp));
4115  
4116  	if (lmp_rswitch_capable(hdev))
4117  		link_policy |= HCI_LP_RSWITCH;
4118  	if (lmp_hold_capable(hdev))
4119  		link_policy |= HCI_LP_HOLD;
4120  	if (lmp_sniff_capable(hdev))
4121  		link_policy |= HCI_LP_SNIFF;
4122  	if (lmp_park_capable(hdev))
4123  		link_policy |= HCI_LP_PARK;
4124  
4125  	cp.policy = cpu_to_le16(link_policy);
4126  
4127  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4128  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4129  }
4130  
hci_read_page_scan_activity_sync(struct hci_dev * hdev)4131  static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4132  {
4133  	if (!(hdev->commands[8] & 0x01))
4134  		return 0;
4135  
4136  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4137  				     0, NULL, HCI_CMD_TIMEOUT);
4138  }
4139  
hci_read_def_err_data_reporting_sync(struct hci_dev * hdev)4140  static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4141  {
4142  	if (!(hdev->commands[18] & 0x04) ||
4143  	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4144  	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4145  		return 0;
4146  
4147  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4148  				     0, NULL, HCI_CMD_TIMEOUT);
4149  }
4150  
hci_read_page_scan_type_sync(struct hci_dev * hdev)4151  static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4152  {
4153  	/* Some older Broadcom based Bluetooth 1.2 controllers do not
4154  	 * support the Read Page Scan Type command. Check support for
4155  	 * this command in the bit mask of supported commands.
4156  	 */
4157  	if (!(hdev->commands[13] & 0x01))
4158  		return 0;
4159  
4160  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4161  				     0, NULL, HCI_CMD_TIMEOUT);
4162  }
4163  
4164  /* Read features beyond page 1 if available */
hci_read_local_ext_features_all_sync(struct hci_dev * hdev)4165  static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4166  {
4167  	u8 page;
4168  	int err;
4169  
4170  	if (!lmp_ext_feat_capable(hdev))
4171  		return 0;
4172  
4173  	for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4174  	     page++) {
4175  		err = hci_read_local_ext_features_sync(hdev, page);
4176  		if (err)
4177  			return err;
4178  	}
4179  
4180  	return 0;
4181  }
4182  
4183  /* HCI Controller init stage 3 command sequence */
4184  static const struct hci_init_stage hci_init3[] = {
4185  	/* HCI_OP_SET_EVENT_MASK */
4186  	HCI_INIT(hci_set_event_mask_sync),
4187  	/* HCI_OP_READ_STORED_LINK_KEY */
4188  	HCI_INIT(hci_read_stored_link_key_sync),
4189  	/* HCI_OP_WRITE_DEF_LINK_POLICY */
4190  	HCI_INIT(hci_setup_link_policy_sync),
4191  	/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4192  	HCI_INIT(hci_read_page_scan_activity_sync),
4193  	/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4194  	HCI_INIT(hci_read_def_err_data_reporting_sync),
4195  	/* HCI_OP_READ_PAGE_SCAN_TYPE */
4196  	HCI_INIT(hci_read_page_scan_type_sync),
4197  	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4198  	HCI_INIT(hci_read_local_ext_features_all_sync),
4199  	{}
4200  };
4201  
hci_le_set_event_mask_sync(struct hci_dev * hdev)4202  static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4203  {
4204  	u8 events[8];
4205  
4206  	if (!lmp_le_capable(hdev))
4207  		return 0;
4208  
4209  	memset(events, 0, sizeof(events));
4210  
4211  	if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4212  		events[0] |= 0x10;	/* LE Long Term Key Request */
4213  
4214  	/* If controller supports the Connection Parameters Request
4215  	 * Link Layer Procedure, enable the corresponding event.
4216  	 */
4217  	if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4218  		/* LE Remote Connection Parameter Request */
4219  		events[0] |= 0x20;
4220  
4221  	/* If the controller supports the Data Length Extension
4222  	 * feature, enable the corresponding event.
4223  	 */
4224  	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4225  		events[0] |= 0x40;	/* LE Data Length Change */
4226  
4227  	/* If the controller supports LL Privacy feature or LE Extended Adv,
4228  	 * enable the corresponding event.
4229  	 */
4230  	if (use_enhanced_conn_complete(hdev))
4231  		events[1] |= 0x02;	/* LE Enhanced Connection Complete */
4232  
4233  	/* If the controller supports Extended Scanner Filter
4234  	 * Policies, enable the corresponding event.
4235  	 */
4236  	if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4237  		events[1] |= 0x04;	/* LE Direct Advertising Report */
4238  
4239  	/* If the controller supports Channel Selection Algorithm #2
4240  	 * feature, enable the corresponding event.
4241  	 */
4242  	if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4243  		events[2] |= 0x08;	/* LE Channel Selection Algorithm */
4244  
4245  	/* If the controller supports the LE Set Scan Enable command,
4246  	 * enable the corresponding advertising report event.
4247  	 */
4248  	if (hdev->commands[26] & 0x08)
4249  		events[0] |= 0x02;	/* LE Advertising Report */
4250  
4251  	/* If the controller supports the LE Create Connection
4252  	 * command, enable the corresponding event.
4253  	 */
4254  	if (hdev->commands[26] & 0x10)
4255  		events[0] |= 0x01;	/* LE Connection Complete */
4256  
4257  	/* If the controller supports the LE Connection Update
4258  	 * command, enable the corresponding event.
4259  	 */
4260  	if (hdev->commands[27] & 0x04)
4261  		events[0] |= 0x04;	/* LE Connection Update Complete */
4262  
4263  	/* If the controller supports the LE Read Remote Used Features
4264  	 * command, enable the corresponding event.
4265  	 */
4266  	if (hdev->commands[27] & 0x20)
4267  		/* LE Read Remote Used Features Complete */
4268  		events[0] |= 0x08;
4269  
4270  	/* If the controller supports the LE Read Local P-256
4271  	 * Public Key command, enable the corresponding event.
4272  	 */
4273  	if (hdev->commands[34] & 0x02)
4274  		/* LE Read Local P-256 Public Key Complete */
4275  		events[0] |= 0x80;
4276  
4277  	/* If the controller supports the LE Generate DHKey
4278  	 * command, enable the corresponding event.
4279  	 */
4280  	if (hdev->commands[34] & 0x04)
4281  		events[1] |= 0x01;	/* LE Generate DHKey Complete */
4282  
4283  	/* If the controller supports the LE Set Default PHY or
4284  	 * LE Set PHY commands, enable the corresponding event.
4285  	 */
4286  	if (hdev->commands[35] & (0x20 | 0x40))
4287  		events[1] |= 0x08;        /* LE PHY Update Complete */
4288  
4289  	/* If the controller supports LE Set Extended Scan Parameters
4290  	 * and LE Set Extended Scan Enable commands, enable the
4291  	 * corresponding event.
4292  	 */
4293  	if (use_ext_scan(hdev))
4294  		events[1] |= 0x10;	/* LE Extended Advertising Report */
4295  
4296  	/* If the controller supports the LE Extended Advertising
4297  	 * command, enable the corresponding event.
4298  	 */
4299  	if (ext_adv_capable(hdev))
4300  		events[2] |= 0x02;	/* LE Advertising Set Terminated */
4301  
4302  	if (cis_capable(hdev)) {
4303  		events[3] |= 0x01;	/* LE CIS Established */
4304  		if (cis_peripheral_capable(hdev))
4305  			events[3] |= 0x02; /* LE CIS Request */
4306  	}
4307  
4308  	if (bis_capable(hdev)) {
4309  		events[1] |= 0x20;	/* LE PA Report */
4310  		events[1] |= 0x40;	/* LE PA Sync Established */
4311  		events[3] |= 0x04;	/* LE Create BIG Complete */
4312  		events[3] |= 0x08;	/* LE Terminate BIG Complete */
4313  		events[3] |= 0x10;	/* LE BIG Sync Established */
4314  		events[3] |= 0x20;	/* LE BIG Sync Loss */
4315  		events[4] |= 0x02;	/* LE BIG Info Advertising Report */
4316  	}
4317  
4318  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4319  				     sizeof(events), events, HCI_CMD_TIMEOUT);
4320  }
4321  
4322  /* Read LE Advertising Channel TX Power */
hci_le_read_adv_tx_power_sync(struct hci_dev * hdev)4323  static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4324  {
4325  	if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4326  		/* HCI TS spec forbids mixing of legacy and extended
4327  		 * advertising commands wherein READ_ADV_TX_POWER is
4328  		 * also included. So do not call it if extended adv
4329  		 * is supported otherwise controller will return
4330  		 * COMMAND_DISALLOWED for extended commands.
4331  		 */
4332  		return __hci_cmd_sync_status(hdev,
4333  					       HCI_OP_LE_READ_ADV_TX_POWER,
4334  					       0, NULL, HCI_CMD_TIMEOUT);
4335  	}
4336  
4337  	return 0;
4338  }
4339  
4340  /* Read LE Min/Max Tx Power*/
hci_le_read_tx_power_sync(struct hci_dev * hdev)4341  static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4342  {
4343  	if (!(hdev->commands[38] & 0x80) ||
4344  	    test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4345  		return 0;
4346  
4347  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4348  				     0, NULL, HCI_CMD_TIMEOUT);
4349  }
4350  
4351  /* Read LE Accept List Size */
hci_le_read_accept_list_size_sync(struct hci_dev * hdev)4352  static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4353  {
4354  	if (!(hdev->commands[26] & 0x40))
4355  		return 0;
4356  
4357  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4358  				     0, NULL, HCI_CMD_TIMEOUT);
4359  }
4360  
4361  /* Clear LE Accept List */
hci_le_clear_accept_list_sync(struct hci_dev * hdev)4362  static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
4363  {
4364  	if (!(hdev->commands[26] & 0x80))
4365  		return 0;
4366  
4367  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
4368  				     HCI_CMD_TIMEOUT);
4369  }
4370  
4371  /* Read LE Resolving List Size */
hci_le_read_resolv_list_size_sync(struct hci_dev * hdev)4372  static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4373  {
4374  	if (!(hdev->commands[34] & 0x40))
4375  		return 0;
4376  
4377  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4378  				     0, NULL, HCI_CMD_TIMEOUT);
4379  }
4380  
4381  /* Clear LE Resolving List */
hci_le_clear_resolv_list_sync(struct hci_dev * hdev)4382  static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4383  {
4384  	if (!(hdev->commands[34] & 0x20))
4385  		return 0;
4386  
4387  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4388  				     HCI_CMD_TIMEOUT);
4389  }
4390  
4391  /* Set RPA timeout */
hci_le_set_rpa_timeout_sync(struct hci_dev * hdev)4392  static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4393  {
4394  	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4395  
4396  	if (!(hdev->commands[35] & 0x04) ||
4397  	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4398  		return 0;
4399  
4400  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4401  				     sizeof(timeout), &timeout,
4402  				     HCI_CMD_TIMEOUT);
4403  }
4404  
4405  /* Read LE Maximum Data Length */
hci_le_read_max_data_len_sync(struct hci_dev * hdev)4406  static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4407  {
4408  	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4409  		return 0;
4410  
4411  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4412  				     HCI_CMD_TIMEOUT);
4413  }
4414  
4415  /* Read LE Suggested Default Data Length */
hci_le_read_def_data_len_sync(struct hci_dev * hdev)4416  static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4417  {
4418  	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4419  		return 0;
4420  
4421  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4422  				     HCI_CMD_TIMEOUT);
4423  }
4424  
4425  /* Read LE Number of Supported Advertising Sets */
hci_le_read_num_support_adv_sets_sync(struct hci_dev * hdev)4426  static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4427  {
4428  	if (!ext_adv_capable(hdev))
4429  		return 0;
4430  
4431  	return __hci_cmd_sync_status(hdev,
4432  				     HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4433  				     0, NULL, HCI_CMD_TIMEOUT);
4434  }
4435  
4436  /* Write LE Host Supported */
hci_set_le_support_sync(struct hci_dev * hdev)4437  static int hci_set_le_support_sync(struct hci_dev *hdev)
4438  {
4439  	struct hci_cp_write_le_host_supported cp;
4440  
4441  	/* LE-only devices do not support explicit enablement */
4442  	if (!lmp_bredr_capable(hdev))
4443  		return 0;
4444  
4445  	memset(&cp, 0, sizeof(cp));
4446  
4447  	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4448  		cp.le = 0x01;
4449  		cp.simul = 0x00;
4450  	}
4451  
4452  	if (cp.le == lmp_host_le_capable(hdev))
4453  		return 0;
4454  
4455  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4456  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4457  }
4458  
4459  /* LE Set Host Feature */
hci_le_set_host_feature_sync(struct hci_dev * hdev)4460  static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4461  {
4462  	struct hci_cp_le_set_host_feature cp;
4463  
4464  	if (!iso_capable(hdev))
4465  		return 0;
4466  
4467  	memset(&cp, 0, sizeof(cp));
4468  
4469  	/* Isochronous Channels (Host Support) */
4470  	cp.bit_number = 32;
4471  	cp.bit_value = 1;
4472  
4473  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4474  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4475  }
4476  
4477  /* LE Controller init stage 3 command sequence */
4478  static const struct hci_init_stage le_init3[] = {
4479  	/* HCI_OP_LE_SET_EVENT_MASK */
4480  	HCI_INIT(hci_le_set_event_mask_sync),
4481  	/* HCI_OP_LE_READ_ADV_TX_POWER */
4482  	HCI_INIT(hci_le_read_adv_tx_power_sync),
4483  	/* HCI_OP_LE_READ_TRANSMIT_POWER */
4484  	HCI_INIT(hci_le_read_tx_power_sync),
4485  	/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4486  	HCI_INIT(hci_le_read_accept_list_size_sync),
4487  	/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4488  	HCI_INIT(hci_le_clear_accept_list_sync),
4489  	/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4490  	HCI_INIT(hci_le_read_resolv_list_size_sync),
4491  	/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4492  	HCI_INIT(hci_le_clear_resolv_list_sync),
4493  	/* HCI_OP_LE_SET_RPA_TIMEOUT */
4494  	HCI_INIT(hci_le_set_rpa_timeout_sync),
4495  	/* HCI_OP_LE_READ_MAX_DATA_LEN */
4496  	HCI_INIT(hci_le_read_max_data_len_sync),
4497  	/* HCI_OP_LE_READ_DEF_DATA_LEN */
4498  	HCI_INIT(hci_le_read_def_data_len_sync),
4499  	/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4500  	HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4501  	/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4502  	HCI_INIT(hci_set_le_support_sync),
4503  	/* HCI_OP_LE_SET_HOST_FEATURE */
4504  	HCI_INIT(hci_le_set_host_feature_sync),
4505  	{}
4506  };
4507  
hci_init3_sync(struct hci_dev * hdev)4508  static int hci_init3_sync(struct hci_dev *hdev)
4509  {
4510  	int err;
4511  
4512  	bt_dev_dbg(hdev, "");
4513  
4514  	err = hci_init_stage_sync(hdev, hci_init3);
4515  	if (err)
4516  		return err;
4517  
4518  	if (lmp_le_capable(hdev))
4519  		return hci_init_stage_sync(hdev, le_init3);
4520  
4521  	return 0;
4522  }
4523  
hci_delete_stored_link_key_sync(struct hci_dev * hdev)4524  static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4525  {
4526  	struct hci_cp_delete_stored_link_key cp;
4527  
4528  	/* Some Broadcom based Bluetooth controllers do not support the
4529  	 * Delete Stored Link Key command. They are clearly indicating its
4530  	 * absence in the bit mask of supported commands.
4531  	 *
4532  	 * Check the supported commands and only if the command is marked
4533  	 * as supported send it. If not supported assume that the controller
4534  	 * does not have actual support for stored link keys which makes this
4535  	 * command redundant anyway.
4536  	 *
4537  	 * Some controllers indicate that they support handling deleting
4538  	 * stored link keys, but they don't. The quirk lets a driver
4539  	 * just disable this command.
4540  	 */
4541  	if (!(hdev->commands[6] & 0x80) ||
4542  	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4543  		return 0;
4544  
4545  	memset(&cp, 0, sizeof(cp));
4546  	bacpy(&cp.bdaddr, BDADDR_ANY);
4547  	cp.delete_all = 0x01;
4548  
4549  	return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4550  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4551  }
4552  
hci_set_event_mask_page_2_sync(struct hci_dev * hdev)4553  static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4554  {
4555  	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4556  	bool changed = false;
4557  
4558  	/* Set event mask page 2 if the HCI command for it is supported */
4559  	if (!(hdev->commands[22] & 0x04))
4560  		return 0;
4561  
4562  	/* If Connectionless Peripheral Broadcast central role is supported
4563  	 * enable all necessary events for it.
4564  	 */
4565  	if (lmp_cpb_central_capable(hdev)) {
4566  		events[1] |= 0x40;	/* Triggered Clock Capture */
4567  		events[1] |= 0x80;	/* Synchronization Train Complete */
4568  		events[2] |= 0x08;	/* Truncated Page Complete */
4569  		events[2] |= 0x20;	/* CPB Channel Map Change */
4570  		changed = true;
4571  	}
4572  
4573  	/* If Connectionless Peripheral Broadcast peripheral role is supported
4574  	 * enable all necessary events for it.
4575  	 */
4576  	if (lmp_cpb_peripheral_capable(hdev)) {
4577  		events[2] |= 0x01;	/* Synchronization Train Received */
4578  		events[2] |= 0x02;	/* CPB Receive */
4579  		events[2] |= 0x04;	/* CPB Timeout */
4580  		events[2] |= 0x10;	/* Peripheral Page Response Timeout */
4581  		changed = true;
4582  	}
4583  
4584  	/* Enable Authenticated Payload Timeout Expired event if supported */
4585  	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4586  		events[2] |= 0x80;
4587  		changed = true;
4588  	}
4589  
4590  	/* Some Broadcom based controllers indicate support for Set Event
4591  	 * Mask Page 2 command, but then actually do not support it. Since
4592  	 * the default value is all bits set to zero, the command is only
4593  	 * required if the event mask has to be changed. In case no change
4594  	 * to the event mask is needed, skip this command.
4595  	 */
4596  	if (!changed)
4597  		return 0;
4598  
4599  	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4600  				     sizeof(events), events, HCI_CMD_TIMEOUT);
4601  }
4602  
4603  /* Read local codec list if the HCI command is supported */
hci_read_local_codecs_sync(struct hci_dev * hdev)4604  static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4605  {
4606  	if (hdev->commands[45] & 0x04)
4607  		hci_read_supported_codecs_v2(hdev);
4608  	else if (hdev->commands[29] & 0x20)
4609  		hci_read_supported_codecs(hdev);
4610  
4611  	return 0;
4612  }
4613  
4614  /* Read local pairing options if the HCI command is supported */
hci_read_local_pairing_opts_sync(struct hci_dev * hdev)4615  static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4616  {
4617  	if (!(hdev->commands[41] & 0x08))
4618  		return 0;
4619  
4620  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4621  				     0, NULL, HCI_CMD_TIMEOUT);
4622  }
4623  
4624  /* Get MWS transport configuration if the HCI command is supported */
hci_get_mws_transport_config_sync(struct hci_dev * hdev)4625  static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4626  {
4627  	if (!mws_transport_config_capable(hdev))
4628  		return 0;
4629  
4630  	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4631  				     0, NULL, HCI_CMD_TIMEOUT);
4632  }
4633  
4634  /* Check for Synchronization Train support */
hci_read_sync_train_params_sync(struct hci_dev * hdev)4635  static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4636  {
4637  	if (!lmp_sync_train_capable(hdev))
4638  		return 0;
4639  
4640  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4641  				     0, NULL, HCI_CMD_TIMEOUT);
4642  }
4643  
4644  /* Enable Secure Connections if supported and configured */
hci_write_sc_support_1_sync(struct hci_dev * hdev)4645  static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4646  {
4647  	u8 support = 0x01;
4648  
4649  	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4650  	    !bredr_sc_enabled(hdev))
4651  		return 0;
4652  
4653  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4654  				     sizeof(support), &support,
4655  				     HCI_CMD_TIMEOUT);
4656  }
4657  
4658  /* Set erroneous data reporting if supported to the wideband speech
4659   * setting value
4660   */
hci_set_err_data_report_sync(struct hci_dev * hdev)4661  static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4662  {
4663  	struct hci_cp_write_def_err_data_reporting cp;
4664  	bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4665  
4666  	if (!(hdev->commands[18] & 0x08) ||
4667  	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4668  	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4669  		return 0;
4670  
4671  	if (enabled == hdev->err_data_reporting)
4672  		return 0;
4673  
4674  	memset(&cp, 0, sizeof(cp));
4675  	cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4676  				ERR_DATA_REPORTING_DISABLED;
4677  
4678  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4679  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4680  }
4681  
4682  static const struct hci_init_stage hci_init4[] = {
4683  	 /* HCI_OP_DELETE_STORED_LINK_KEY */
4684  	HCI_INIT(hci_delete_stored_link_key_sync),
4685  	/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4686  	HCI_INIT(hci_set_event_mask_page_2_sync),
4687  	/* HCI_OP_READ_LOCAL_CODECS */
4688  	HCI_INIT(hci_read_local_codecs_sync),
4689  	 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4690  	HCI_INIT(hci_read_local_pairing_opts_sync),
4691  	 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4692  	HCI_INIT(hci_get_mws_transport_config_sync),
4693  	 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4694  	HCI_INIT(hci_read_sync_train_params_sync),
4695  	/* HCI_OP_WRITE_SC_SUPPORT */
4696  	HCI_INIT(hci_write_sc_support_1_sync),
4697  	/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4698  	HCI_INIT(hci_set_err_data_report_sync),
4699  	{}
4700  };
4701  
4702  /* Set Suggested Default Data Length to maximum if supported */
hci_le_set_write_def_data_len_sync(struct hci_dev * hdev)4703  static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4704  {
4705  	struct hci_cp_le_write_def_data_len cp;
4706  
4707  	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4708  		return 0;
4709  
4710  	memset(&cp, 0, sizeof(cp));
4711  	cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4712  	cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4713  
4714  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4715  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4716  }
4717  
4718  /* Set Default PHY parameters if command is supported, enables all supported
4719   * PHYs according to the LE Features bits.
4720   */
hci_le_set_default_phy_sync(struct hci_dev * hdev)4721  static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4722  {
4723  	struct hci_cp_le_set_default_phy cp;
4724  
4725  	if (!(hdev->commands[35] & 0x20)) {
4726  		/* If the command is not supported it means only 1M PHY is
4727  		 * supported.
4728  		 */
4729  		hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4730  		hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4731  		return 0;
4732  	}
4733  
4734  	memset(&cp, 0, sizeof(cp));
4735  	cp.all_phys = 0x00;
4736  	cp.tx_phys = HCI_LE_SET_PHY_1M;
4737  	cp.rx_phys = HCI_LE_SET_PHY_1M;
4738  
4739  	/* Enables 2M PHY if supported */
4740  	if (le_2m_capable(hdev)) {
4741  		cp.tx_phys |= HCI_LE_SET_PHY_2M;
4742  		cp.rx_phys |= HCI_LE_SET_PHY_2M;
4743  	}
4744  
4745  	/* Enables Coded PHY if supported */
4746  	if (le_coded_capable(hdev)) {
4747  		cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4748  		cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4749  	}
4750  
4751  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4752  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4753  }
4754  
4755  static const struct hci_init_stage le_init4[] = {
4756  	/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4757  	HCI_INIT(hci_le_set_write_def_data_len_sync),
4758  	/* HCI_OP_LE_SET_DEFAULT_PHY */
4759  	HCI_INIT(hci_le_set_default_phy_sync),
4760  	{}
4761  };
4762  
hci_init4_sync(struct hci_dev * hdev)4763  static int hci_init4_sync(struct hci_dev *hdev)
4764  {
4765  	int err;
4766  
4767  	bt_dev_dbg(hdev, "");
4768  
4769  	err = hci_init_stage_sync(hdev, hci_init4);
4770  	if (err)
4771  		return err;
4772  
4773  	if (lmp_le_capable(hdev))
4774  		return hci_init_stage_sync(hdev, le_init4);
4775  
4776  	return 0;
4777  }
4778  
hci_init_sync(struct hci_dev * hdev)4779  static int hci_init_sync(struct hci_dev *hdev)
4780  {
4781  	int err;
4782  
4783  	err = hci_init1_sync(hdev);
4784  	if (err < 0)
4785  		return err;
4786  
4787  	if (hci_dev_test_flag(hdev, HCI_SETUP))
4788  		hci_debugfs_create_basic(hdev);
4789  
4790  	err = hci_init2_sync(hdev);
4791  	if (err < 0)
4792  		return err;
4793  
4794  	err = hci_init3_sync(hdev);
4795  	if (err < 0)
4796  		return err;
4797  
4798  	err = hci_init4_sync(hdev);
4799  	if (err < 0)
4800  		return err;
4801  
4802  	/* This function is only called when the controller is actually in
4803  	 * configured state. When the controller is marked as unconfigured,
4804  	 * this initialization procedure is not run.
4805  	 *
4806  	 * It means that it is possible that a controller runs through its
4807  	 * setup phase and then discovers missing settings. If that is the
4808  	 * case, then this function will not be called. It then will only
4809  	 * be called during the config phase.
4810  	 *
4811  	 * So only when in setup phase or config phase, create the debugfs
4812  	 * entries and register the SMP channels.
4813  	 */
4814  	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4815  	    !hci_dev_test_flag(hdev, HCI_CONFIG))
4816  		return 0;
4817  
4818  	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4819  		return 0;
4820  
4821  	hci_debugfs_create_common(hdev);
4822  
4823  	if (lmp_bredr_capable(hdev))
4824  		hci_debugfs_create_bredr(hdev);
4825  
4826  	if (lmp_le_capable(hdev))
4827  		hci_debugfs_create_le(hdev);
4828  
4829  	return 0;
4830  }
4831  
4832  #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4833  
4834  static const struct {
4835  	unsigned long quirk;
4836  	const char *desc;
4837  } hci_broken_table[] = {
4838  	HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4839  			 "HCI Read Local Supported Commands not supported"),
4840  	HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4841  			 "HCI Delete Stored Link Key command is advertised, "
4842  			 "but not supported."),
4843  	HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4844  			 "HCI Read Default Erroneous Data Reporting command is "
4845  			 "advertised, but not supported."),
4846  	HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4847  			 "HCI Read Transmit Power Level command is advertised, "
4848  			 "but not supported."),
4849  	HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4850  			 "HCI Set Event Filter command not supported."),
4851  	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4852  			 "HCI Enhanced Setup Synchronous Connection command is "
4853  			 "advertised, but not supported."),
4854  	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4855  			 "HCI LE Set Random Private Address Timeout command is "
4856  			 "advertised, but not supported."),
4857  	HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
4858  			 "HCI LE Extended Create Connection command is "
4859  			 "advertised, but not supported."),
4860  	HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
4861  			 "HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
4862  			 "to unexpected SMP errors when pairing "
4863  			 "and will not be used."),
4864  	HCI_QUIRK_BROKEN(LE_CODED,
4865  			 "HCI LE Coded PHY feature bit is set, "
4866  			 "but its usage is not supported.")
4867  };
4868  
4869  /* This function handles hdev setup stage:
4870   *
4871   * Calls hdev->setup
4872   * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4873   */
hci_dev_setup_sync(struct hci_dev * hdev)4874  static int hci_dev_setup_sync(struct hci_dev *hdev)
4875  {
4876  	int ret = 0;
4877  	bool invalid_bdaddr;
4878  	size_t i;
4879  
4880  	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4881  	    !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4882  		return 0;
4883  
4884  	bt_dev_dbg(hdev, "");
4885  
4886  	hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4887  
4888  	if (hdev->setup)
4889  		ret = hdev->setup(hdev);
4890  
4891  	for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4892  		if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4893  			bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4894  	}
4895  
4896  	/* The transport driver can set the quirk to mark the
4897  	 * BD_ADDR invalid before creating the HCI device or in
4898  	 * its setup callback.
4899  	 */
4900  	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4901  			 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4902  	if (!ret) {
4903  		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4904  		    !bacmp(&hdev->public_addr, BDADDR_ANY))
4905  			hci_dev_get_bd_addr_from_property(hdev);
4906  
4907  		if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4908  		    hdev->set_bdaddr) {
4909  			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4910  			if (!ret)
4911  				invalid_bdaddr = false;
4912  		}
4913  	}
4914  
4915  	/* The transport driver can set these quirks before
4916  	 * creating the HCI device or in its setup callback.
4917  	 *
4918  	 * For the invalid BD_ADDR quirk it is possible that
4919  	 * it becomes a valid address if the bootloader does
4920  	 * provide it (see above).
4921  	 *
4922  	 * In case any of them is set, the controller has to
4923  	 * start up as unconfigured.
4924  	 */
4925  	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4926  	    invalid_bdaddr)
4927  		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4928  
4929  	/* For an unconfigured controller it is required to
4930  	 * read at least the version information provided by
4931  	 * the Read Local Version Information command.
4932  	 *
4933  	 * If the set_bdaddr driver callback is provided, then
4934  	 * also the original Bluetooth public device address
4935  	 * will be read using the Read BD Address command.
4936  	 */
4937  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4938  		return hci_unconf_init_sync(hdev);
4939  
4940  	return ret;
4941  }
4942  
4943  /* This function handles hdev init stage:
4944   *
4945   * Calls hci_dev_setup_sync to perform setup stage
4946   * Calls hci_init_sync to perform HCI command init sequence
4947   */
hci_dev_init_sync(struct hci_dev * hdev)4948  static int hci_dev_init_sync(struct hci_dev *hdev)
4949  {
4950  	int ret;
4951  
4952  	bt_dev_dbg(hdev, "");
4953  
4954  	atomic_set(&hdev->cmd_cnt, 1);
4955  	set_bit(HCI_INIT, &hdev->flags);
4956  
4957  	ret = hci_dev_setup_sync(hdev);
4958  
4959  	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4960  		/* If public address change is configured, ensure that
4961  		 * the address gets programmed. If the driver does not
4962  		 * support changing the public address, fail the power
4963  		 * on procedure.
4964  		 */
4965  		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4966  		    hdev->set_bdaddr)
4967  			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4968  		else
4969  			ret = -EADDRNOTAVAIL;
4970  	}
4971  
4972  	if (!ret) {
4973  		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4974  		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4975  			ret = hci_init_sync(hdev);
4976  			if (!ret && hdev->post_init)
4977  				ret = hdev->post_init(hdev);
4978  		}
4979  	}
4980  
4981  	/* If the HCI Reset command is clearing all diagnostic settings,
4982  	 * then they need to be reprogrammed after the init procedure
4983  	 * completed.
4984  	 */
4985  	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4986  	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4987  	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4988  		ret = hdev->set_diag(hdev, true);
4989  
4990  	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4991  		msft_do_open(hdev);
4992  		aosp_do_open(hdev);
4993  	}
4994  
4995  	clear_bit(HCI_INIT, &hdev->flags);
4996  
4997  	return ret;
4998  }
4999  
hci_dev_open_sync(struct hci_dev * hdev)5000  int hci_dev_open_sync(struct hci_dev *hdev)
5001  {
5002  	int ret;
5003  
5004  	bt_dev_dbg(hdev, "");
5005  
5006  	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5007  		ret = -ENODEV;
5008  		goto done;
5009  	}
5010  
5011  	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5012  	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
5013  		/* Check for rfkill but allow the HCI setup stage to
5014  		 * proceed (which in itself doesn't cause any RF activity).
5015  		 */
5016  		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
5017  			ret = -ERFKILL;
5018  			goto done;
5019  		}
5020  
5021  		/* Check for valid public address or a configured static
5022  		 * random address, but let the HCI setup proceed to
5023  		 * be able to determine if there is a public address
5024  		 * or not.
5025  		 *
5026  		 * In case of user channel usage, it is not important
5027  		 * if a public address or static random address is
5028  		 * available.
5029  		 */
5030  		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5031  		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5032  		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
5033  			ret = -EADDRNOTAVAIL;
5034  			goto done;
5035  		}
5036  	}
5037  
5038  	if (test_bit(HCI_UP, &hdev->flags)) {
5039  		ret = -EALREADY;
5040  		goto done;
5041  	}
5042  
5043  	if (hdev->open(hdev)) {
5044  		ret = -EIO;
5045  		goto done;
5046  	}
5047  
5048  	hci_devcd_reset(hdev);
5049  
5050  	set_bit(HCI_RUNNING, &hdev->flags);
5051  	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
5052  
5053  	ret = hci_dev_init_sync(hdev);
5054  	if (!ret) {
5055  		hci_dev_hold(hdev);
5056  		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5057  		hci_adv_instances_set_rpa_expired(hdev, true);
5058  		set_bit(HCI_UP, &hdev->flags);
5059  		hci_sock_dev_event(hdev, HCI_DEV_UP);
5060  		hci_leds_update_powered(hdev, true);
5061  		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5062  		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
5063  		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5064  		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5065  		    hci_dev_test_flag(hdev, HCI_MGMT)) {
5066  			ret = hci_powered_update_sync(hdev);
5067  			mgmt_power_on(hdev, ret);
5068  		}
5069  	} else {
5070  		/* Init failed, cleanup */
5071  		flush_work(&hdev->tx_work);
5072  
5073  		/* Since hci_rx_work() is possible to awake new cmd_work
5074  		 * it should be flushed first to avoid unexpected call of
5075  		 * hci_cmd_work()
5076  		 */
5077  		flush_work(&hdev->rx_work);
5078  		flush_work(&hdev->cmd_work);
5079  
5080  		skb_queue_purge(&hdev->cmd_q);
5081  		skb_queue_purge(&hdev->rx_q);
5082  
5083  		if (hdev->flush)
5084  			hdev->flush(hdev);
5085  
5086  		if (hdev->sent_cmd) {
5087  			cancel_delayed_work_sync(&hdev->cmd_timer);
5088  			kfree_skb(hdev->sent_cmd);
5089  			hdev->sent_cmd = NULL;
5090  		}
5091  
5092  		if (hdev->req_skb) {
5093  			kfree_skb(hdev->req_skb);
5094  			hdev->req_skb = NULL;
5095  		}
5096  
5097  		clear_bit(HCI_RUNNING, &hdev->flags);
5098  		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5099  
5100  		hdev->close(hdev);
5101  		hdev->flags &= BIT(HCI_RAW);
5102  	}
5103  
5104  done:
5105  	return ret;
5106  }
5107  
5108  /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)5109  static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5110  {
5111  	struct hci_conn_params *p;
5112  
5113  	list_for_each_entry(p, &hdev->le_conn_params, list) {
5114  		hci_pend_le_list_del_init(p);
5115  		if (p->conn) {
5116  			hci_conn_drop(p->conn);
5117  			hci_conn_put(p->conn);
5118  			p->conn = NULL;
5119  		}
5120  	}
5121  
5122  	BT_DBG("All LE pending actions cleared");
5123  }
5124  
hci_dev_shutdown(struct hci_dev * hdev)5125  static int hci_dev_shutdown(struct hci_dev *hdev)
5126  {
5127  	int err = 0;
5128  	/* Similar to how we first do setup and then set the exclusive access
5129  	 * bit for userspace, we must first unset userchannel and then clean up.
5130  	 * Otherwise, the kernel can't properly use the hci channel to clean up
5131  	 * the controller (some shutdown routines require sending additional
5132  	 * commands to the controller for example).
5133  	 */
5134  	bool was_userchannel =
5135  		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5136  
5137  	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5138  	    test_bit(HCI_UP, &hdev->flags)) {
5139  		/* Execute vendor specific shutdown routine */
5140  		if (hdev->shutdown)
5141  			err = hdev->shutdown(hdev);
5142  	}
5143  
5144  	if (was_userchannel)
5145  		hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5146  
5147  	return err;
5148  }
5149  
hci_dev_close_sync(struct hci_dev * hdev)5150  int hci_dev_close_sync(struct hci_dev *hdev)
5151  {
5152  	bool auto_off;
5153  	int err = 0;
5154  
5155  	bt_dev_dbg(hdev, "");
5156  
5157  	cancel_delayed_work(&hdev->power_off);
5158  	cancel_delayed_work(&hdev->ncmd_timer);
5159  	cancel_delayed_work(&hdev->le_scan_disable);
5160  	cancel_delayed_work(&hdev->le_scan_restart);
5161  
5162  	hci_request_cancel_all(hdev);
5163  
5164  	if (hdev->adv_instance_timeout) {
5165  		cancel_delayed_work_sync(&hdev->adv_instance_expire);
5166  		hdev->adv_instance_timeout = 0;
5167  	}
5168  
5169  	err = hci_dev_shutdown(hdev);
5170  
5171  	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5172  		cancel_delayed_work_sync(&hdev->cmd_timer);
5173  		return err;
5174  	}
5175  
5176  	hci_leds_update_powered(hdev, false);
5177  
5178  	/* Flush RX and TX works */
5179  	flush_work(&hdev->tx_work);
5180  	flush_work(&hdev->rx_work);
5181  
5182  	if (hdev->discov_timeout > 0) {
5183  		hdev->discov_timeout = 0;
5184  		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5185  		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5186  	}
5187  
5188  	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5189  		cancel_delayed_work(&hdev->service_cache);
5190  
5191  	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5192  		struct adv_info *adv_instance;
5193  
5194  		cancel_delayed_work_sync(&hdev->rpa_expired);
5195  
5196  		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5197  			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5198  	}
5199  
5200  	/* Avoid potential lockdep warnings from the *_flush() calls by
5201  	 * ensuring the workqueue is empty up front.
5202  	 */
5203  	drain_workqueue(hdev->workqueue);
5204  
5205  	hci_dev_lock(hdev);
5206  
5207  	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5208  
5209  	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5210  
5211  	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5212  	    hci_dev_test_flag(hdev, HCI_MGMT))
5213  		__mgmt_power_off(hdev);
5214  
5215  	hci_inquiry_cache_flush(hdev);
5216  	hci_pend_le_actions_clear(hdev);
5217  	hci_conn_hash_flush(hdev);
5218  	/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5219  	smp_unregister(hdev);
5220  	hci_dev_unlock(hdev);
5221  
5222  	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5223  
5224  	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5225  		aosp_do_close(hdev);
5226  		msft_do_close(hdev);
5227  	}
5228  
5229  	if (hdev->flush)
5230  		hdev->flush(hdev);
5231  
5232  	/* Reset device */
5233  	skb_queue_purge(&hdev->cmd_q);
5234  	atomic_set(&hdev->cmd_cnt, 1);
5235  	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5236  	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5237  		set_bit(HCI_INIT, &hdev->flags);
5238  		hci_reset_sync(hdev);
5239  		clear_bit(HCI_INIT, &hdev->flags);
5240  	}
5241  
5242  	/* flush cmd  work */
5243  	flush_work(&hdev->cmd_work);
5244  
5245  	/* Drop queues */
5246  	skb_queue_purge(&hdev->rx_q);
5247  	skb_queue_purge(&hdev->cmd_q);
5248  	skb_queue_purge(&hdev->raw_q);
5249  
5250  	/* Drop last sent command */
5251  	if (hdev->sent_cmd) {
5252  		cancel_delayed_work_sync(&hdev->cmd_timer);
5253  		kfree_skb(hdev->sent_cmd);
5254  		hdev->sent_cmd = NULL;
5255  	}
5256  
5257  	/* Drop last request */
5258  	if (hdev->req_skb) {
5259  		kfree_skb(hdev->req_skb);
5260  		hdev->req_skb = NULL;
5261  	}
5262  
5263  	clear_bit(HCI_RUNNING, &hdev->flags);
5264  	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5265  
5266  	/* After this point our queues are empty and no tasks are scheduled. */
5267  	hdev->close(hdev);
5268  
5269  	/* Clear flags */
5270  	hdev->flags &= BIT(HCI_RAW);
5271  	hci_dev_clear_volatile_flags(hdev);
5272  
5273  	memset(hdev->eir, 0, sizeof(hdev->eir));
5274  	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5275  	bacpy(&hdev->random_addr, BDADDR_ANY);
5276  	hci_codec_list_clear(&hdev->local_codecs);
5277  
5278  	hci_dev_put(hdev);
5279  	return err;
5280  }
5281  
5282  /* This function perform power on HCI command sequence as follows:
5283   *
5284   * If controller is already up (HCI_UP) performs hci_powered_update_sync
5285   * sequence otherwise run hci_dev_open_sync which will follow with
5286   * hci_powered_update_sync after the init sequence is completed.
5287   */
hci_power_on_sync(struct hci_dev * hdev)5288  static int hci_power_on_sync(struct hci_dev *hdev)
5289  {
5290  	int err;
5291  
5292  	if (test_bit(HCI_UP, &hdev->flags) &&
5293  	    hci_dev_test_flag(hdev, HCI_MGMT) &&
5294  	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5295  		cancel_delayed_work(&hdev->power_off);
5296  		return hci_powered_update_sync(hdev);
5297  	}
5298  
5299  	err = hci_dev_open_sync(hdev);
5300  	if (err < 0)
5301  		return err;
5302  
5303  	/* During the HCI setup phase, a few error conditions are
5304  	 * ignored and they need to be checked now. If they are still
5305  	 * valid, it is important to return the device back off.
5306  	 */
5307  	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5308  	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5309  	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5310  	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5311  		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5312  		hci_dev_close_sync(hdev);
5313  	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5314  		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5315  				   HCI_AUTO_OFF_TIMEOUT);
5316  	}
5317  
5318  	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5319  		/* For unconfigured devices, set the HCI_RAW flag
5320  		 * so that userspace can easily identify them.
5321  		 */
5322  		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5323  			set_bit(HCI_RAW, &hdev->flags);
5324  
5325  		/* For fully configured devices, this will send
5326  		 * the Index Added event. For unconfigured devices,
5327  		 * it will send Unconfigued Index Added event.
5328  		 *
5329  		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5330  		 * and no event will be send.
5331  		 */
5332  		mgmt_index_added(hdev);
5333  	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5334  		/* When the controller is now configured, then it
5335  		 * is important to clear the HCI_RAW flag.
5336  		 */
5337  		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5338  			clear_bit(HCI_RAW, &hdev->flags);
5339  
5340  		/* Powering on the controller with HCI_CONFIG set only
5341  		 * happens with the transition from unconfigured to
5342  		 * configured. This will send the Index Added event.
5343  		 */
5344  		mgmt_index_added(hdev);
5345  	}
5346  
5347  	return 0;
5348  }
5349  
hci_remote_name_cancel_sync(struct hci_dev * hdev,bdaddr_t * addr)5350  static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5351  {
5352  	struct hci_cp_remote_name_req_cancel cp;
5353  
5354  	memset(&cp, 0, sizeof(cp));
5355  	bacpy(&cp.bdaddr, addr);
5356  
5357  	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5358  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5359  }
5360  
hci_stop_discovery_sync(struct hci_dev * hdev)5361  int hci_stop_discovery_sync(struct hci_dev *hdev)
5362  {
5363  	struct discovery_state *d = &hdev->discovery;
5364  	struct inquiry_entry *e;
5365  	int err;
5366  
5367  	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5368  
5369  	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5370  		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5371  			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5372  						    0, NULL, HCI_CMD_TIMEOUT);
5373  			if (err)
5374  				return err;
5375  		}
5376  
5377  		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5378  			cancel_delayed_work(&hdev->le_scan_disable);
5379  			cancel_delayed_work(&hdev->le_scan_restart);
5380  
5381  			err = hci_scan_disable_sync(hdev);
5382  			if (err)
5383  				return err;
5384  		}
5385  
5386  	} else {
5387  		err = hci_scan_disable_sync(hdev);
5388  		if (err)
5389  			return err;
5390  	}
5391  
5392  	/* Resume advertising if it was paused */
5393  	if (use_ll_privacy(hdev))
5394  		hci_resume_advertising_sync(hdev);
5395  
5396  	/* No further actions needed for LE-only discovery */
5397  	if (d->type == DISCOV_TYPE_LE)
5398  		return 0;
5399  
5400  	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5401  		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5402  						     NAME_PENDING);
5403  		if (!e)
5404  			return 0;
5405  
5406  		/* Ignore cancel errors since it should interfere with stopping
5407  		 * of the discovery.
5408  		 */
5409  		hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5410  	}
5411  
5412  	return 0;
5413  }
5414  
hci_disconnect_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5415  static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5416  			       u8 reason)
5417  {
5418  	struct hci_cp_disconnect cp;
5419  
5420  	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5421  		/* This is a BIS connection, hci_conn_del will
5422  		 * do the necessary cleanup.
5423  		 */
5424  		hci_dev_lock(hdev);
5425  		hci_conn_failed(conn, reason);
5426  		hci_dev_unlock(hdev);
5427  
5428  		return 0;
5429  	}
5430  
5431  	memset(&cp, 0, sizeof(cp));
5432  	cp.handle = cpu_to_le16(conn->handle);
5433  	cp.reason = reason;
5434  
5435  	/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5436  	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5437  	 * used when suspending or powering off, where we don't want to wait
5438  	 * for the peer's response.
5439  	 */
5440  	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5441  		return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5442  						sizeof(cp), &cp,
5443  						HCI_EV_DISCONN_COMPLETE,
5444  						HCI_CMD_TIMEOUT, NULL);
5445  
5446  	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5447  				     HCI_CMD_TIMEOUT);
5448  }
5449  
hci_le_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5450  static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5451  				      struct hci_conn *conn, u8 reason)
5452  {
5453  	/* Return reason if scanning since the connection shall probably be
5454  	 * cleanup directly.
5455  	 */
5456  	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5457  		return reason;
5458  
5459  	if (conn->role == HCI_ROLE_SLAVE ||
5460  	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5461  		return 0;
5462  
5463  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5464  				     0, NULL, HCI_CMD_TIMEOUT);
5465  }
5466  
hci_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5467  static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5468  				   u8 reason)
5469  {
5470  	if (conn->type == LE_LINK)
5471  		return hci_le_connect_cancel_sync(hdev, conn, reason);
5472  
5473  	if (conn->type == ISO_LINK) {
5474  		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5475  		 * page 1857:
5476  		 *
5477  		 * If this command is issued for a CIS on the Central and the
5478  		 * CIS is successfully terminated before being established,
5479  		 * then an HCI_LE_CIS_Established event shall also be sent for
5480  		 * this CIS with the Status Operation Cancelled by Host (0x44).
5481  		 */
5482  		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5483  			return hci_disconnect_sync(hdev, conn, reason);
5484  
5485  		/* CIS with no Create CIS sent have nothing to cancel */
5486  		if (bacmp(&conn->dst, BDADDR_ANY))
5487  			return HCI_ERROR_LOCAL_HOST_TERM;
5488  
5489  		/* There is no way to cancel a BIS without terminating the BIG
5490  		 * which is done later on connection cleanup.
5491  		 */
5492  		return 0;
5493  	}
5494  
5495  	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5496  		return 0;
5497  
5498  	/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5499  	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5500  	 * used when suspending or powering off, where we don't want to wait
5501  	 * for the peer's response.
5502  	 */
5503  	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5504  		return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5505  						6, &conn->dst,
5506  						HCI_EV_CONN_COMPLETE,
5507  						HCI_CMD_TIMEOUT, NULL);
5508  
5509  	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5510  				     6, &conn->dst, HCI_CMD_TIMEOUT);
5511  }
5512  
hci_reject_sco_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5513  static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5514  			       u8 reason)
5515  {
5516  	struct hci_cp_reject_sync_conn_req cp;
5517  
5518  	memset(&cp, 0, sizeof(cp));
5519  	bacpy(&cp.bdaddr, &conn->dst);
5520  	cp.reason = reason;
5521  
5522  	/* SCO rejection has its own limited set of
5523  	 * allowed error values (0x0D-0x0F).
5524  	 */
5525  	if (reason < 0x0d || reason > 0x0f)
5526  		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5527  
5528  	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5529  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5530  }
5531  
hci_le_reject_cis_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5532  static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5533  				  u8 reason)
5534  {
5535  	struct hci_cp_le_reject_cis cp;
5536  
5537  	memset(&cp, 0, sizeof(cp));
5538  	cp.handle = cpu_to_le16(conn->handle);
5539  	cp.reason = reason;
5540  
5541  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5542  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5543  }
5544  
hci_reject_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5545  static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5546  				u8 reason)
5547  {
5548  	struct hci_cp_reject_conn_req cp;
5549  
5550  	if (conn->type == ISO_LINK)
5551  		return hci_le_reject_cis_sync(hdev, conn, reason);
5552  
5553  	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5554  		return hci_reject_sco_sync(hdev, conn, reason);
5555  
5556  	memset(&cp, 0, sizeof(cp));
5557  	bacpy(&cp.bdaddr, &conn->dst);
5558  	cp.reason = reason;
5559  
5560  	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5561  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5562  }
5563  
hci_abort_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5564  int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5565  {
5566  	int err = 0;
5567  	u16 handle = conn->handle;
5568  	bool disconnect = false;
5569  	struct hci_conn *c;
5570  
5571  	switch (conn->state) {
5572  	case BT_CONNECTED:
5573  	case BT_CONFIG:
5574  		err = hci_disconnect_sync(hdev, conn, reason);
5575  		break;
5576  	case BT_CONNECT:
5577  		err = hci_connect_cancel_sync(hdev, conn, reason);
5578  		break;
5579  	case BT_CONNECT2:
5580  		err = hci_reject_conn_sync(hdev, conn, reason);
5581  		break;
5582  	case BT_OPEN:
5583  	case BT_BOUND:
5584  		break;
5585  	default:
5586  		disconnect = true;
5587  		break;
5588  	}
5589  
5590  	hci_dev_lock(hdev);
5591  
5592  	/* Check if the connection has been cleaned up concurrently */
5593  	c = hci_conn_hash_lookup_handle(hdev, handle);
5594  	if (!c || c != conn) {
5595  		err = 0;
5596  		goto unlock;
5597  	}
5598  
5599  	/* Cleanup hci_conn object if it cannot be cancelled as it
5600  	 * likelly means the controller and host stack are out of sync
5601  	 * or in case of LE it was still scanning so it can be cleanup
5602  	 * safely.
5603  	 */
5604  	if (disconnect) {
5605  		conn->state = BT_CLOSED;
5606  		hci_disconn_cfm(conn, reason);
5607  		hci_conn_del(conn);
5608  	} else {
5609  		hci_conn_failed(conn, reason);
5610  	}
5611  
5612  unlock:
5613  	hci_dev_unlock(hdev);
5614  	return err;
5615  }
5616  
hci_disconnect_all_sync(struct hci_dev * hdev,u8 reason)5617  static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5618  {
5619  	struct list_head *head = &hdev->conn_hash.list;
5620  	struct hci_conn *conn;
5621  
5622  	rcu_read_lock();
5623  	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5624  		/* Make sure the connection is not freed while unlocking */
5625  		conn = hci_conn_get(conn);
5626  		rcu_read_unlock();
5627  		/* Disregard possible errors since hci_conn_del shall have been
5628  		 * called even in case of errors had occurred since it would
5629  		 * then cause hci_conn_failed to be called which calls
5630  		 * hci_conn_del internally.
5631  		 */
5632  		hci_abort_conn_sync(hdev, conn, reason);
5633  		hci_conn_put(conn);
5634  		rcu_read_lock();
5635  	}
5636  	rcu_read_unlock();
5637  
5638  	return 0;
5639  }
5640  
5641  /* This function perform power off HCI command sequence as follows:
5642   *
5643   * Clear Advertising
5644   * Stop Discovery
5645   * Disconnect all connections
5646   * hci_dev_close_sync
5647   */
hci_power_off_sync(struct hci_dev * hdev)5648  static int hci_power_off_sync(struct hci_dev *hdev)
5649  {
5650  	int err;
5651  
5652  	/* If controller is already down there is nothing to do */
5653  	if (!test_bit(HCI_UP, &hdev->flags))
5654  		return 0;
5655  
5656  	if (test_bit(HCI_ISCAN, &hdev->flags) ||
5657  	    test_bit(HCI_PSCAN, &hdev->flags)) {
5658  		err = hci_write_scan_enable_sync(hdev, 0x00);
5659  		if (err)
5660  			return err;
5661  	}
5662  
5663  	err = hci_clear_adv_sync(hdev, NULL, false);
5664  	if (err)
5665  		return err;
5666  
5667  	err = hci_stop_discovery_sync(hdev);
5668  	if (err)
5669  		return err;
5670  
5671  	/* Terminated due to Power Off */
5672  	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5673  	if (err)
5674  		return err;
5675  
5676  	return hci_dev_close_sync(hdev);
5677  }
5678  
hci_set_powered_sync(struct hci_dev * hdev,u8 val)5679  int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5680  {
5681  	if (val)
5682  		return hci_power_on_sync(hdev);
5683  
5684  	return hci_power_off_sync(hdev);
5685  }
5686  
hci_write_iac_sync(struct hci_dev * hdev)5687  static int hci_write_iac_sync(struct hci_dev *hdev)
5688  {
5689  	struct hci_cp_write_current_iac_lap cp;
5690  
5691  	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5692  		return 0;
5693  
5694  	memset(&cp, 0, sizeof(cp));
5695  
5696  	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5697  		/* Limited discoverable mode */
5698  		cp.num_iac = min_t(u8, hdev->num_iac, 2);
5699  		cp.iac_lap[0] = 0x00;	/* LIAC */
5700  		cp.iac_lap[1] = 0x8b;
5701  		cp.iac_lap[2] = 0x9e;
5702  		cp.iac_lap[3] = 0x33;	/* GIAC */
5703  		cp.iac_lap[4] = 0x8b;
5704  		cp.iac_lap[5] = 0x9e;
5705  	} else {
5706  		/* General discoverable mode */
5707  		cp.num_iac = 1;
5708  		cp.iac_lap[0] = 0x33;	/* GIAC */
5709  		cp.iac_lap[1] = 0x8b;
5710  		cp.iac_lap[2] = 0x9e;
5711  	}
5712  
5713  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5714  				     (cp.num_iac * 3) + 1, &cp,
5715  				     HCI_CMD_TIMEOUT);
5716  }
5717  
hci_update_discoverable_sync(struct hci_dev * hdev)5718  int hci_update_discoverable_sync(struct hci_dev *hdev)
5719  {
5720  	int err = 0;
5721  
5722  	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5723  		err = hci_write_iac_sync(hdev);
5724  		if (err)
5725  			return err;
5726  
5727  		err = hci_update_scan_sync(hdev);
5728  		if (err)
5729  			return err;
5730  
5731  		err = hci_update_class_sync(hdev);
5732  		if (err)
5733  			return err;
5734  	}
5735  
5736  	/* Advertising instances don't use the global discoverable setting, so
5737  	 * only update AD if advertising was enabled using Set Advertising.
5738  	 */
5739  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5740  		err = hci_update_adv_data_sync(hdev, 0x00);
5741  		if (err)
5742  			return err;
5743  
5744  		/* Discoverable mode affects the local advertising
5745  		 * address in limited privacy mode.
5746  		 */
5747  		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5748  			if (ext_adv_capable(hdev))
5749  				err = hci_start_ext_adv_sync(hdev, 0x00);
5750  			else
5751  				err = hci_enable_advertising_sync(hdev);
5752  		}
5753  	}
5754  
5755  	return err;
5756  }
5757  
update_discoverable_sync(struct hci_dev * hdev,void * data)5758  static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5759  {
5760  	return hci_update_discoverable_sync(hdev);
5761  }
5762  
hci_update_discoverable(struct hci_dev * hdev)5763  int hci_update_discoverable(struct hci_dev *hdev)
5764  {
5765  	/* Only queue if it would have any effect */
5766  	if (hdev_is_powered(hdev) &&
5767  	    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5768  	    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5769  	    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5770  		return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5771  					  NULL);
5772  
5773  	return 0;
5774  }
5775  
hci_update_connectable_sync(struct hci_dev * hdev)5776  int hci_update_connectable_sync(struct hci_dev *hdev)
5777  {
5778  	int err;
5779  
5780  	err = hci_update_scan_sync(hdev);
5781  	if (err)
5782  		return err;
5783  
5784  	/* If BR/EDR is not enabled and we disable advertising as a
5785  	 * by-product of disabling connectable, we need to update the
5786  	 * advertising flags.
5787  	 */
5788  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5789  		err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5790  
5791  	/* Update the advertising parameters if necessary */
5792  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5793  	    !list_empty(&hdev->adv_instances)) {
5794  		if (ext_adv_capable(hdev))
5795  			err = hci_start_ext_adv_sync(hdev,
5796  						     hdev->cur_adv_instance);
5797  		else
5798  			err = hci_enable_advertising_sync(hdev);
5799  
5800  		if (err)
5801  			return err;
5802  	}
5803  
5804  	return hci_update_passive_scan_sync(hdev);
5805  }
5806  
hci_inquiry_sync(struct hci_dev * hdev,u8 length)5807  static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
5808  {
5809  	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5810  	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5811  	struct hci_cp_inquiry cp;
5812  
5813  	bt_dev_dbg(hdev, "");
5814  
5815  	if (test_bit(HCI_INQUIRY, &hdev->flags))
5816  		return 0;
5817  
5818  	hci_dev_lock(hdev);
5819  	hci_inquiry_cache_flush(hdev);
5820  	hci_dev_unlock(hdev);
5821  
5822  	memset(&cp, 0, sizeof(cp));
5823  
5824  	if (hdev->discovery.limited)
5825  		memcpy(&cp.lap, liac, sizeof(cp.lap));
5826  	else
5827  		memcpy(&cp.lap, giac, sizeof(cp.lap));
5828  
5829  	cp.length = length;
5830  
5831  	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5832  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5833  }
5834  
hci_active_scan_sync(struct hci_dev * hdev,uint16_t interval)5835  static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5836  {
5837  	u8 own_addr_type;
5838  	/* Accept list is not used for discovery */
5839  	u8 filter_policy = 0x00;
5840  	/* Default is to enable duplicates filter */
5841  	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5842  	int err;
5843  
5844  	bt_dev_dbg(hdev, "");
5845  
5846  	/* If controller is scanning, it means the passive scanning is
5847  	 * running. Thus, we should temporarily stop it in order to set the
5848  	 * discovery scanning parameters.
5849  	 */
5850  	err = hci_scan_disable_sync(hdev);
5851  	if (err) {
5852  		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5853  		return err;
5854  	}
5855  
5856  	cancel_interleave_scan(hdev);
5857  
5858  	/* Pause address resolution for active scan and stop advertising if
5859  	 * privacy is enabled.
5860  	 */
5861  	err = hci_pause_addr_resolution(hdev);
5862  	if (err)
5863  		goto failed;
5864  
5865  	/* All active scans will be done with either a resolvable private
5866  	 * address (when privacy feature has been enabled) or non-resolvable
5867  	 * private address.
5868  	 */
5869  	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5870  					     &own_addr_type);
5871  	if (err < 0)
5872  		own_addr_type = ADDR_LE_DEV_PUBLIC;
5873  
5874  	if (hci_is_adv_monitoring(hdev)) {
5875  		/* Duplicate filter should be disabled when some advertisement
5876  		 * monitor is activated, otherwise AdvMon can only receive one
5877  		 * advertisement for one peer(*) during active scanning, and
5878  		 * might report loss to these peers.
5879  		 *
5880  		 * Note that different controllers have different meanings of
5881  		 * |duplicate|. Some of them consider packets with the same
5882  		 * address as duplicate, and others consider packets with the
5883  		 * same address and the same RSSI as duplicate. Although in the
5884  		 * latter case we don't need to disable duplicate filter, but
5885  		 * it is common to have active scanning for a short period of
5886  		 * time, the power impact should be neglectable.
5887  		 */
5888  		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5889  	}
5890  
5891  	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5892  				  hdev->le_scan_window_discovery,
5893  				  own_addr_type, filter_policy, filter_dup);
5894  	if (!err)
5895  		return err;
5896  
5897  failed:
5898  	/* Resume advertising if it was paused */
5899  	if (use_ll_privacy(hdev))
5900  		hci_resume_advertising_sync(hdev);
5901  
5902  	/* Resume passive scanning */
5903  	hci_update_passive_scan_sync(hdev);
5904  	return err;
5905  }
5906  
hci_start_interleaved_discovery_sync(struct hci_dev * hdev)5907  static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5908  {
5909  	int err;
5910  
5911  	bt_dev_dbg(hdev, "");
5912  
5913  	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5914  	if (err)
5915  		return err;
5916  
5917  	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
5918  }
5919  
hci_start_discovery_sync(struct hci_dev * hdev)5920  int hci_start_discovery_sync(struct hci_dev *hdev)
5921  {
5922  	unsigned long timeout;
5923  	int err;
5924  
5925  	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5926  
5927  	switch (hdev->discovery.type) {
5928  	case DISCOV_TYPE_BREDR:
5929  		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
5930  	case DISCOV_TYPE_INTERLEAVED:
5931  		/* When running simultaneous discovery, the LE scanning time
5932  		 * should occupy the whole discovery time sine BR/EDR inquiry
5933  		 * and LE scanning are scheduled by the controller.
5934  		 *
5935  		 * For interleaving discovery in comparison, BR/EDR inquiry
5936  		 * and LE scanning are done sequentially with separate
5937  		 * timeouts.
5938  		 */
5939  		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5940  			     &hdev->quirks)) {
5941  			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5942  			/* During simultaneous discovery, we double LE scan
5943  			 * interval. We must leave some time for the controller
5944  			 * to do BR/EDR inquiry.
5945  			 */
5946  			err = hci_start_interleaved_discovery_sync(hdev);
5947  			break;
5948  		}
5949  
5950  		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5951  		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5952  		break;
5953  	case DISCOV_TYPE_LE:
5954  		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5955  		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5956  		break;
5957  	default:
5958  		return -EINVAL;
5959  	}
5960  
5961  	if (err)
5962  		return err;
5963  
5964  	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5965  
5966  	/* When service discovery is used and the controller has a
5967  	 * strict duplicate filter, it is important to remember the
5968  	 * start and duration of the scan. This is required for
5969  	 * restarting scanning during the discovery phase.
5970  	 */
5971  	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5972  	    hdev->discovery.result_filtering) {
5973  		hdev->discovery.scan_start = jiffies;
5974  		hdev->discovery.scan_duration = timeout;
5975  	}
5976  
5977  	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5978  			   timeout);
5979  	return 0;
5980  }
5981  
hci_suspend_monitor_sync(struct hci_dev * hdev)5982  static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5983  {
5984  	switch (hci_get_adv_monitor_offload_ext(hdev)) {
5985  	case HCI_ADV_MONITOR_EXT_MSFT:
5986  		msft_suspend_sync(hdev);
5987  		break;
5988  	default:
5989  		return;
5990  	}
5991  }
5992  
5993  /* This function disables discovery and mark it as paused */
hci_pause_discovery_sync(struct hci_dev * hdev)5994  static int hci_pause_discovery_sync(struct hci_dev *hdev)
5995  {
5996  	int old_state = hdev->discovery.state;
5997  	int err;
5998  
5999  	/* If discovery already stopped/stopping/paused there nothing to do */
6000  	if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
6001  	    hdev->discovery_paused)
6002  		return 0;
6003  
6004  	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6005  	err = hci_stop_discovery_sync(hdev);
6006  	if (err)
6007  		return err;
6008  
6009  	hdev->discovery_paused = true;
6010  	hdev->discovery_old_state = old_state;
6011  	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6012  
6013  	return 0;
6014  }
6015  
hci_update_event_filter_sync(struct hci_dev * hdev)6016  static int hci_update_event_filter_sync(struct hci_dev *hdev)
6017  {
6018  	struct bdaddr_list_with_flags *b;
6019  	u8 scan = SCAN_DISABLED;
6020  	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
6021  	int err;
6022  
6023  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6024  		return 0;
6025  
6026  	/* Some fake CSR controllers lock up after setting this type of
6027  	 * filter, so avoid sending the request altogether.
6028  	 */
6029  	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
6030  		return 0;
6031  
6032  	/* Always clear event filter when starting */
6033  	hci_clear_event_filter_sync(hdev);
6034  
6035  	list_for_each_entry(b, &hdev->accept_list, list) {
6036  		if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
6037  			continue;
6038  
6039  		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
6040  
6041  		err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
6042  						 HCI_CONN_SETUP_ALLOW_BDADDR,
6043  						 &b->bdaddr,
6044  						 HCI_CONN_SETUP_AUTO_ON);
6045  		if (err)
6046  			bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
6047  				   &b->bdaddr);
6048  		else
6049  			scan = SCAN_PAGE;
6050  	}
6051  
6052  	if (scan && !scanning)
6053  		hci_write_scan_enable_sync(hdev, scan);
6054  	else if (!scan && scanning)
6055  		hci_write_scan_enable_sync(hdev, scan);
6056  
6057  	return 0;
6058  }
6059  
6060  /* This function disables scan (BR and LE) and mark it as paused */
hci_pause_scan_sync(struct hci_dev * hdev)6061  static int hci_pause_scan_sync(struct hci_dev *hdev)
6062  {
6063  	if (hdev->scanning_paused)
6064  		return 0;
6065  
6066  	/* Disable page scan if enabled */
6067  	if (test_bit(HCI_PSCAN, &hdev->flags))
6068  		hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
6069  
6070  	hci_scan_disable_sync(hdev);
6071  
6072  	hdev->scanning_paused = true;
6073  
6074  	return 0;
6075  }
6076  
6077  /* This function performs the HCI suspend procedures in the follow order:
6078   *
6079   * Pause discovery (active scanning/inquiry)
6080   * Pause Directed Advertising/Advertising
6081   * Pause Scanning (passive scanning in case discovery was not active)
6082   * Disconnect all connections
6083   * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6084   * otherwise:
6085   * Update event mask (only set events that are allowed to wake up the host)
6086   * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6087   * Update passive scanning (lower duty cycle)
6088   * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6089   */
hci_suspend_sync(struct hci_dev * hdev)6090  int hci_suspend_sync(struct hci_dev *hdev)
6091  {
6092  	int err;
6093  
6094  	/* If marked as suspended there nothing to do */
6095  	if (hdev->suspended)
6096  		return 0;
6097  
6098  	/* Mark device as suspended */
6099  	hdev->suspended = true;
6100  
6101  	/* Pause discovery if not already stopped */
6102  	hci_pause_discovery_sync(hdev);
6103  
6104  	/* Pause other advertisements */
6105  	hci_pause_advertising_sync(hdev);
6106  
6107  	/* Suspend monitor filters */
6108  	hci_suspend_monitor_sync(hdev);
6109  
6110  	/* Prevent disconnects from causing scanning to be re-enabled */
6111  	hci_pause_scan_sync(hdev);
6112  
6113  	if (hci_conn_count(hdev)) {
6114  		/* Soft disconnect everything (power off) */
6115  		err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6116  		if (err) {
6117  			/* Set state to BT_RUNNING so resume doesn't notify */
6118  			hdev->suspend_state = BT_RUNNING;
6119  			hci_resume_sync(hdev);
6120  			return err;
6121  		}
6122  
6123  		/* Update event mask so only the allowed event can wakeup the
6124  		 * host.
6125  		 */
6126  		hci_set_event_mask_sync(hdev);
6127  	}
6128  
6129  	/* Only configure accept list if disconnect succeeded and wake
6130  	 * isn't being prevented.
6131  	 */
6132  	if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6133  		hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6134  		return 0;
6135  	}
6136  
6137  	/* Unpause to take care of updating scanning params */
6138  	hdev->scanning_paused = false;
6139  
6140  	/* Enable event filter for paired devices */
6141  	hci_update_event_filter_sync(hdev);
6142  
6143  	/* Update LE passive scan if enabled */
6144  	hci_update_passive_scan_sync(hdev);
6145  
6146  	/* Pause scan changes again. */
6147  	hdev->scanning_paused = true;
6148  
6149  	hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6150  
6151  	return 0;
6152  }
6153  
6154  /* This function resumes discovery */
hci_resume_discovery_sync(struct hci_dev * hdev)6155  static int hci_resume_discovery_sync(struct hci_dev *hdev)
6156  {
6157  	int err;
6158  
6159  	/* If discovery not paused there nothing to do */
6160  	if (!hdev->discovery_paused)
6161  		return 0;
6162  
6163  	hdev->discovery_paused = false;
6164  
6165  	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6166  
6167  	err = hci_start_discovery_sync(hdev);
6168  
6169  	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6170  				DISCOVERY_FINDING);
6171  
6172  	return err;
6173  }
6174  
hci_resume_monitor_sync(struct hci_dev * hdev)6175  static void hci_resume_monitor_sync(struct hci_dev *hdev)
6176  {
6177  	switch (hci_get_adv_monitor_offload_ext(hdev)) {
6178  	case HCI_ADV_MONITOR_EXT_MSFT:
6179  		msft_resume_sync(hdev);
6180  		break;
6181  	default:
6182  		return;
6183  	}
6184  }
6185  
6186  /* This function resume scan and reset paused flag */
hci_resume_scan_sync(struct hci_dev * hdev)6187  static int hci_resume_scan_sync(struct hci_dev *hdev)
6188  {
6189  	if (!hdev->scanning_paused)
6190  		return 0;
6191  
6192  	hdev->scanning_paused = false;
6193  
6194  	hci_update_scan_sync(hdev);
6195  
6196  	/* Reset passive scanning to normal */
6197  	hci_update_passive_scan_sync(hdev);
6198  
6199  	return 0;
6200  }
6201  
6202  /* This function performs the HCI suspend procedures in the follow order:
6203   *
6204   * Restore event mask
6205   * Clear event filter
6206   * Update passive scanning (normal duty cycle)
6207   * Resume Directed Advertising/Advertising
6208   * Resume discovery (active scanning/inquiry)
6209   */
hci_resume_sync(struct hci_dev * hdev)6210  int hci_resume_sync(struct hci_dev *hdev)
6211  {
6212  	/* If not marked as suspended there nothing to do */
6213  	if (!hdev->suspended)
6214  		return 0;
6215  
6216  	hdev->suspended = false;
6217  
6218  	/* Restore event mask */
6219  	hci_set_event_mask_sync(hdev);
6220  
6221  	/* Clear any event filters and restore scan state */
6222  	hci_clear_event_filter_sync(hdev);
6223  
6224  	/* Resume scanning */
6225  	hci_resume_scan_sync(hdev);
6226  
6227  	/* Resume monitor filters */
6228  	hci_resume_monitor_sync(hdev);
6229  
6230  	/* Resume other advertisements */
6231  	hci_resume_advertising_sync(hdev);
6232  
6233  	/* Resume discovery */
6234  	hci_resume_discovery_sync(hdev);
6235  
6236  	return 0;
6237  }
6238  
conn_use_rpa(struct hci_conn * conn)6239  static bool conn_use_rpa(struct hci_conn *conn)
6240  {
6241  	struct hci_dev *hdev = conn->hdev;
6242  
6243  	return hci_dev_test_flag(hdev, HCI_PRIVACY);
6244  }
6245  
hci_le_ext_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6246  static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6247  						struct hci_conn *conn)
6248  {
6249  	struct hci_cp_le_set_ext_adv_params cp;
6250  	int err;
6251  	bdaddr_t random_addr;
6252  	u8 own_addr_type;
6253  
6254  	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6255  					     &own_addr_type);
6256  	if (err)
6257  		return err;
6258  
6259  	/* Set require_privacy to false so that the remote device has a
6260  	 * chance of identifying us.
6261  	 */
6262  	err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6263  				     &own_addr_type, &random_addr);
6264  	if (err)
6265  		return err;
6266  
6267  	memset(&cp, 0, sizeof(cp));
6268  
6269  	cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6270  	cp.channel_map = hdev->le_adv_channel_map;
6271  	cp.tx_power = HCI_TX_POWER_INVALID;
6272  	cp.primary_phy = HCI_ADV_PHY_1M;
6273  	cp.secondary_phy = HCI_ADV_PHY_1M;
6274  	cp.handle = 0x00; /* Use instance 0 for directed adv */
6275  	cp.own_addr_type = own_addr_type;
6276  	cp.peer_addr_type = conn->dst_type;
6277  	bacpy(&cp.peer_addr, &conn->dst);
6278  
6279  	/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6280  	 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6281  	 * does not supports advertising data when the advertising set already
6282  	 * contains some, the controller shall return erroc code 'Invalid
6283  	 * HCI Command Parameters(0x12).
6284  	 * So it is required to remove adv set for handle 0x00. since we use
6285  	 * instance 0 for directed adv.
6286  	 */
6287  	err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6288  	if (err)
6289  		return err;
6290  
6291  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6292  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6293  	if (err)
6294  		return err;
6295  
6296  	/* Check if random address need to be updated */
6297  	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6298  	    bacmp(&random_addr, BDADDR_ANY) &&
6299  	    bacmp(&random_addr, &hdev->random_addr)) {
6300  		err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6301  						       &random_addr);
6302  		if (err)
6303  			return err;
6304  	}
6305  
6306  	return hci_enable_ext_advertising_sync(hdev, 0x00);
6307  }
6308  
hci_le_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6309  static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6310  					    struct hci_conn *conn)
6311  {
6312  	struct hci_cp_le_set_adv_param cp;
6313  	u8 status;
6314  	u8 own_addr_type;
6315  	u8 enable;
6316  
6317  	if (ext_adv_capable(hdev))
6318  		return hci_le_ext_directed_advertising_sync(hdev, conn);
6319  
6320  	/* Clear the HCI_LE_ADV bit temporarily so that the
6321  	 * hci_update_random_address knows that it's safe to go ahead
6322  	 * and write a new random address. The flag will be set back on
6323  	 * as soon as the SET_ADV_ENABLE HCI command completes.
6324  	 */
6325  	hci_dev_clear_flag(hdev, HCI_LE_ADV);
6326  
6327  	/* Set require_privacy to false so that the remote device has a
6328  	 * chance of identifying us.
6329  	 */
6330  	status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6331  						&own_addr_type);
6332  	if (status)
6333  		return status;
6334  
6335  	memset(&cp, 0, sizeof(cp));
6336  
6337  	/* Some controllers might reject command if intervals are not
6338  	 * within range for undirected advertising.
6339  	 * BCM20702A0 is known to be affected by this.
6340  	 */
6341  	cp.min_interval = cpu_to_le16(0x0020);
6342  	cp.max_interval = cpu_to_le16(0x0020);
6343  
6344  	cp.type = LE_ADV_DIRECT_IND;
6345  	cp.own_address_type = own_addr_type;
6346  	cp.direct_addr_type = conn->dst_type;
6347  	bacpy(&cp.direct_addr, &conn->dst);
6348  	cp.channel_map = hdev->le_adv_channel_map;
6349  
6350  	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6351  				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6352  	if (status)
6353  		return status;
6354  
6355  	enable = 0x01;
6356  
6357  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6358  				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6359  }
6360  
set_ext_conn_params(struct hci_conn * conn,struct hci_cp_le_ext_conn_param * p)6361  static void set_ext_conn_params(struct hci_conn *conn,
6362  				struct hci_cp_le_ext_conn_param *p)
6363  {
6364  	struct hci_dev *hdev = conn->hdev;
6365  
6366  	memset(p, 0, sizeof(*p));
6367  
6368  	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6369  	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6370  	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6371  	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6372  	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6373  	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6374  	p->min_ce_len = cpu_to_le16(0x0000);
6375  	p->max_ce_len = cpu_to_le16(0x0000);
6376  }
6377  
hci_le_ext_create_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 own_addr_type)6378  static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6379  				       struct hci_conn *conn, u8 own_addr_type)
6380  {
6381  	struct hci_cp_le_ext_create_conn *cp;
6382  	struct hci_cp_le_ext_conn_param *p;
6383  	u8 data[sizeof(*cp) + sizeof(*p) * 3];
6384  	u32 plen;
6385  
6386  	cp = (void *)data;
6387  	p = (void *)cp->data;
6388  
6389  	memset(cp, 0, sizeof(*cp));
6390  
6391  	bacpy(&cp->peer_addr, &conn->dst);
6392  	cp->peer_addr_type = conn->dst_type;
6393  	cp->own_addr_type = own_addr_type;
6394  
6395  	plen = sizeof(*cp);
6396  
6397  	if (scan_1m(hdev)) {
6398  		cp->phys |= LE_SCAN_PHY_1M;
6399  		set_ext_conn_params(conn, p);
6400  
6401  		p++;
6402  		plen += sizeof(*p);
6403  	}
6404  
6405  	if (scan_2m(hdev)) {
6406  		cp->phys |= LE_SCAN_PHY_2M;
6407  		set_ext_conn_params(conn, p);
6408  
6409  		p++;
6410  		plen += sizeof(*p);
6411  	}
6412  
6413  	if (scan_coded(hdev)) {
6414  		cp->phys |= LE_SCAN_PHY_CODED;
6415  		set_ext_conn_params(conn, p);
6416  
6417  		plen += sizeof(*p);
6418  	}
6419  
6420  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6421  					plen, data,
6422  					HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6423  					conn->conn_timeout, NULL);
6424  }
6425  
hci_le_create_conn_sync(struct hci_dev * hdev,void * data)6426  static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6427  {
6428  	struct hci_cp_le_create_conn cp;
6429  	struct hci_conn_params *params;
6430  	u8 own_addr_type;
6431  	int err;
6432  	struct hci_conn *conn = data;
6433  
6434  	if (!hci_conn_valid(hdev, conn))
6435  		return -ECANCELED;
6436  
6437  	bt_dev_dbg(hdev, "conn %p", conn);
6438  
6439  	clear_bit(HCI_CONN_SCANNING, &conn->flags);
6440  	conn->state = BT_CONNECT;
6441  
6442  	/* If requested to connect as peripheral use directed advertising */
6443  	if (conn->role == HCI_ROLE_SLAVE) {
6444  		/* If we're active scanning and simultaneous roles is not
6445  		 * enabled simply reject the attempt.
6446  		 */
6447  		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6448  		    hdev->le_scan_type == LE_SCAN_ACTIVE &&
6449  		    !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6450  			hci_conn_del(conn);
6451  			return -EBUSY;
6452  		}
6453  
6454  		/* Pause advertising while doing directed advertising. */
6455  		hci_pause_advertising_sync(hdev);
6456  
6457  		err = hci_le_directed_advertising_sync(hdev, conn);
6458  		goto done;
6459  	}
6460  
6461  	/* Disable advertising if simultaneous roles is not in use. */
6462  	if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6463  		hci_pause_advertising_sync(hdev);
6464  
6465  	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6466  	if (params) {
6467  		conn->le_conn_min_interval = params->conn_min_interval;
6468  		conn->le_conn_max_interval = params->conn_max_interval;
6469  		conn->le_conn_latency = params->conn_latency;
6470  		conn->le_supv_timeout = params->supervision_timeout;
6471  	} else {
6472  		conn->le_conn_min_interval = hdev->le_conn_min_interval;
6473  		conn->le_conn_max_interval = hdev->le_conn_max_interval;
6474  		conn->le_conn_latency = hdev->le_conn_latency;
6475  		conn->le_supv_timeout = hdev->le_supv_timeout;
6476  	}
6477  
6478  	/* If controller is scanning, we stop it since some controllers are
6479  	 * not able to scan and connect at the same time. Also set the
6480  	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6481  	 * handler for scan disabling knows to set the correct discovery
6482  	 * state.
6483  	 */
6484  	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6485  		hci_scan_disable_sync(hdev);
6486  		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6487  	}
6488  
6489  	/* Update random address, but set require_privacy to false so
6490  	 * that we never connect with an non-resolvable address.
6491  	 */
6492  	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6493  					     &own_addr_type);
6494  	if (err)
6495  		goto done;
6496  	/* Send command LE Extended Create Connection if supported */
6497  	if (use_ext_conn(hdev)) {
6498  		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6499  		goto done;
6500  	}
6501  
6502  	memset(&cp, 0, sizeof(cp));
6503  
6504  	cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6505  	cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6506  
6507  	bacpy(&cp.peer_addr, &conn->dst);
6508  	cp.peer_addr_type = conn->dst_type;
6509  	cp.own_address_type = own_addr_type;
6510  	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6511  	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6512  	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6513  	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6514  	cp.min_ce_len = cpu_to_le16(0x0000);
6515  	cp.max_ce_len = cpu_to_le16(0x0000);
6516  
6517  	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6518  	 *
6519  	 * If this event is unmasked and the HCI_LE_Connection_Complete event
6520  	 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6521  	 * sent when a new connection has been created.
6522  	 */
6523  	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6524  				       sizeof(cp), &cp,
6525  				       use_enhanced_conn_complete(hdev) ?
6526  				       HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6527  				       HCI_EV_LE_CONN_COMPLETE,
6528  				       conn->conn_timeout, NULL);
6529  
6530  done:
6531  	if (err == -ETIMEDOUT)
6532  		hci_le_connect_cancel_sync(hdev, conn, 0x00);
6533  
6534  	/* Re-enable advertising after the connection attempt is finished. */
6535  	hci_resume_advertising_sync(hdev);
6536  	return err;
6537  }
6538  
hci_le_create_cis_sync(struct hci_dev * hdev)6539  int hci_le_create_cis_sync(struct hci_dev *hdev)
6540  {
6541  	struct {
6542  		struct hci_cp_le_create_cis cp;
6543  		struct hci_cis cis[0x1f];
6544  	} cmd;
6545  	struct hci_conn *conn;
6546  	u8 cig = BT_ISO_QOS_CIG_UNSET;
6547  
6548  	/* The spec allows only one pending LE Create CIS command at a time. If
6549  	 * the command is pending now, don't do anything. We check for pending
6550  	 * connections after each CIS Established event.
6551  	 *
6552  	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6553  	 * page 2566:
6554  	 *
6555  	 * If the Host issues this command before all the
6556  	 * HCI_LE_CIS_Established events from the previous use of the
6557  	 * command have been generated, the Controller shall return the
6558  	 * error code Command Disallowed (0x0C).
6559  	 *
6560  	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6561  	 * page 2567:
6562  	 *
6563  	 * When the Controller receives the HCI_LE_Create_CIS command, the
6564  	 * Controller sends the HCI_Command_Status event to the Host. An
6565  	 * HCI_LE_CIS_Established event will be generated for each CIS when it
6566  	 * is established or if it is disconnected or considered lost before
6567  	 * being established; until all the events are generated, the command
6568  	 * remains pending.
6569  	 */
6570  
6571  	memset(&cmd, 0, sizeof(cmd));
6572  
6573  	hci_dev_lock(hdev);
6574  
6575  	rcu_read_lock();
6576  
6577  	/* Wait until previous Create CIS has completed */
6578  	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6579  		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6580  			goto done;
6581  	}
6582  
6583  	/* Find CIG with all CIS ready */
6584  	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6585  		struct hci_conn *link;
6586  
6587  		if (hci_conn_check_create_cis(conn))
6588  			continue;
6589  
6590  		cig = conn->iso_qos.ucast.cig;
6591  
6592  		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6593  			if (hci_conn_check_create_cis(link) > 0 &&
6594  			    link->iso_qos.ucast.cig == cig &&
6595  			    link->state != BT_CONNECTED) {
6596  				cig = BT_ISO_QOS_CIG_UNSET;
6597  				break;
6598  			}
6599  		}
6600  
6601  		if (cig != BT_ISO_QOS_CIG_UNSET)
6602  			break;
6603  	}
6604  
6605  	if (cig == BT_ISO_QOS_CIG_UNSET)
6606  		goto done;
6607  
6608  	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6609  		struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
6610  
6611  		if (hci_conn_check_create_cis(conn) ||
6612  		    conn->iso_qos.ucast.cig != cig)
6613  			continue;
6614  
6615  		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6616  		cis->acl_handle = cpu_to_le16(conn->parent->handle);
6617  		cis->cis_handle = cpu_to_le16(conn->handle);
6618  		cmd.cp.num_cis++;
6619  
6620  		if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis))
6621  			break;
6622  	}
6623  
6624  done:
6625  	rcu_read_unlock();
6626  
6627  	hci_dev_unlock(hdev);
6628  
6629  	if (!cmd.cp.num_cis)
6630  		return 0;
6631  
6632  	/* Wait for HCI_LE_CIS_Established */
6633  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6634  					sizeof(cmd.cp) + sizeof(cmd.cis[0]) *
6635  					cmd.cp.num_cis, &cmd,
6636  					HCI_EVT_LE_CIS_ESTABLISHED,
6637  					conn->conn_timeout, NULL);
6638  }
6639  
hci_le_remove_cig_sync(struct hci_dev * hdev,u8 handle)6640  int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6641  {
6642  	struct hci_cp_le_remove_cig cp;
6643  
6644  	memset(&cp, 0, sizeof(cp));
6645  	cp.cig_id = handle;
6646  
6647  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6648  				     &cp, HCI_CMD_TIMEOUT);
6649  }
6650  
hci_le_big_terminate_sync(struct hci_dev * hdev,u8 handle)6651  int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6652  {
6653  	struct hci_cp_le_big_term_sync cp;
6654  
6655  	memset(&cp, 0, sizeof(cp));
6656  	cp.handle = handle;
6657  
6658  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6659  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6660  }
6661  
hci_le_pa_terminate_sync(struct hci_dev * hdev,u16 handle)6662  int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6663  {
6664  	struct hci_cp_le_pa_term_sync cp;
6665  
6666  	memset(&cp, 0, sizeof(cp));
6667  	cp.handle = cpu_to_le16(handle);
6668  
6669  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6670  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6671  }
6672  
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)6673  int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6674  			   bool use_rpa, struct adv_info *adv_instance,
6675  			   u8 *own_addr_type, bdaddr_t *rand_addr)
6676  {
6677  	int err;
6678  
6679  	bacpy(rand_addr, BDADDR_ANY);
6680  
6681  	/* If privacy is enabled use a resolvable private address. If
6682  	 * current RPA has expired then generate a new one.
6683  	 */
6684  	if (use_rpa) {
6685  		/* If Controller supports LL Privacy use own address type is
6686  		 * 0x03
6687  		 */
6688  		if (use_ll_privacy(hdev))
6689  			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6690  		else
6691  			*own_addr_type = ADDR_LE_DEV_RANDOM;
6692  
6693  		if (adv_instance) {
6694  			if (adv_rpa_valid(adv_instance))
6695  				return 0;
6696  		} else {
6697  			if (rpa_valid(hdev))
6698  				return 0;
6699  		}
6700  
6701  		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6702  		if (err < 0) {
6703  			bt_dev_err(hdev, "failed to generate new RPA");
6704  			return err;
6705  		}
6706  
6707  		bacpy(rand_addr, &hdev->rpa);
6708  
6709  		return 0;
6710  	}
6711  
6712  	/* In case of required privacy without resolvable private address,
6713  	 * use an non-resolvable private address. This is useful for
6714  	 * non-connectable advertising.
6715  	 */
6716  	if (require_privacy) {
6717  		bdaddr_t nrpa;
6718  
6719  		while (true) {
6720  			/* The non-resolvable private address is generated
6721  			 * from random six bytes with the two most significant
6722  			 * bits cleared.
6723  			 */
6724  			get_random_bytes(&nrpa, 6);
6725  			nrpa.b[5] &= 0x3f;
6726  
6727  			/* The non-resolvable private address shall not be
6728  			 * equal to the public address.
6729  			 */
6730  			if (bacmp(&hdev->bdaddr, &nrpa))
6731  				break;
6732  		}
6733  
6734  		*own_addr_type = ADDR_LE_DEV_RANDOM;
6735  		bacpy(rand_addr, &nrpa);
6736  
6737  		return 0;
6738  	}
6739  
6740  	/* No privacy so use a public address. */
6741  	*own_addr_type = ADDR_LE_DEV_PUBLIC;
6742  
6743  	return 0;
6744  }
6745  
_update_adv_data_sync(struct hci_dev * hdev,void * data)6746  static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6747  {
6748  	u8 instance = PTR_UINT(data);
6749  
6750  	return hci_update_adv_data_sync(hdev, instance);
6751  }
6752  
hci_update_adv_data(struct hci_dev * hdev,u8 instance)6753  int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6754  {
6755  	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6756  				  UINT_PTR(instance), NULL);
6757  }
6758  
hci_acl_create_conn_sync(struct hci_dev * hdev,void * data)6759  static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6760  {
6761  	struct hci_conn *conn = data;
6762  	struct inquiry_entry *ie;
6763  	struct hci_cp_create_conn cp;
6764  	int err;
6765  
6766  	if (!hci_conn_valid(hdev, conn))
6767  		return -ECANCELED;
6768  
6769  	/* Many controllers disallow HCI Create Connection while it is doing
6770  	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6771  	 * Connection. This may cause the MGMT discovering state to become false
6772  	 * without user space's request but it is okay since the MGMT Discovery
6773  	 * APIs do not promise that discovery should be done forever. Instead,
6774  	 * the user space monitors the status of MGMT discovering and it may
6775  	 * request for discovery again when this flag becomes false.
6776  	 */
6777  	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6778  		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6779  					    NULL, HCI_CMD_TIMEOUT);
6780  		if (err)
6781  			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6782  	}
6783  
6784  	conn->state = BT_CONNECT;
6785  	conn->out = true;
6786  	conn->role = HCI_ROLE_MASTER;
6787  
6788  	conn->attempt++;
6789  
6790  	conn->link_policy = hdev->link_policy;
6791  
6792  	memset(&cp, 0, sizeof(cp));
6793  	bacpy(&cp.bdaddr, &conn->dst);
6794  	cp.pscan_rep_mode = 0x02;
6795  
6796  	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6797  	if (ie) {
6798  		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6799  			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6800  			cp.pscan_mode     = ie->data.pscan_mode;
6801  			cp.clock_offset   = ie->data.clock_offset |
6802  					    cpu_to_le16(0x8000);
6803  		}
6804  
6805  		memcpy(conn->dev_class, ie->data.dev_class, 3);
6806  	}
6807  
6808  	cp.pkt_type = cpu_to_le16(conn->pkt_type);
6809  	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6810  		cp.role_switch = 0x01;
6811  	else
6812  		cp.role_switch = 0x00;
6813  
6814  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6815  					sizeof(cp), &cp,
6816  					HCI_EV_CONN_COMPLETE,
6817  					conn->conn_timeout, NULL);
6818  }
6819  
hci_connect_acl_sync(struct hci_dev * hdev,struct hci_conn * conn)6820  int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6821  {
6822  	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6823  				       NULL);
6824  }
6825  
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)6826  static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6827  {
6828  	struct hci_conn *conn = data;
6829  
6830  	bt_dev_dbg(hdev, "err %d", err);
6831  
6832  	if (err == -ECANCELED)
6833  		return;
6834  
6835  	hci_dev_lock(hdev);
6836  
6837  	if (!hci_conn_valid(hdev, conn))
6838  		goto done;
6839  
6840  	if (!err) {
6841  		hci_connect_le_scan_cleanup(conn, 0x00);
6842  		goto done;
6843  	}
6844  
6845  	/* Check if connection is still pending */
6846  	if (conn != hci_lookup_le_connect(hdev))
6847  		goto done;
6848  
6849  	/* Flush to make sure we send create conn cancel command if needed */
6850  	flush_delayed_work(&conn->le_conn_timeout);
6851  	hci_conn_failed(conn, bt_status(err));
6852  
6853  done:
6854  	hci_dev_unlock(hdev);
6855  }
6856  
hci_connect_le_sync(struct hci_dev * hdev,struct hci_conn * conn)6857  int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6858  {
6859  	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6860  				       create_le_conn_complete);
6861  }
6862  
hci_cancel_connect_sync(struct hci_dev * hdev,struct hci_conn * conn)6863  int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6864  {
6865  	if (conn->state != BT_OPEN)
6866  		return -EINVAL;
6867  
6868  	switch (conn->type) {
6869  	case ACL_LINK:
6870  		return !hci_cmd_sync_dequeue_once(hdev,
6871  						  hci_acl_create_conn_sync,
6872  						  conn, NULL);
6873  	case LE_LINK:
6874  		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6875  						  conn, create_le_conn_complete);
6876  	}
6877  
6878  	return -ENOENT;
6879  }
6880