10857dd3bSJohan Hedberg /*
20857dd3bSJohan Hedberg BlueZ - Bluetooth protocol stack for Linux
30857dd3bSJohan Hedberg
40857dd3bSJohan Hedberg Copyright (C) 2014 Intel Corporation
50857dd3bSJohan Hedberg
60857dd3bSJohan Hedberg This program is free software; you can redistribute it and/or modify
70857dd3bSJohan Hedberg it under the terms of the GNU General Public License version 2 as
80857dd3bSJohan Hedberg published by the Free Software Foundation;
90857dd3bSJohan Hedberg
100857dd3bSJohan Hedberg THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
110857dd3bSJohan Hedberg OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
120857dd3bSJohan Hedberg FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
130857dd3bSJohan Hedberg IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
140857dd3bSJohan Hedberg CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
150857dd3bSJohan Hedberg WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
160857dd3bSJohan Hedberg ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
170857dd3bSJohan Hedberg OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
180857dd3bSJohan Hedberg
190857dd3bSJohan Hedberg ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
200857dd3bSJohan Hedberg COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
210857dd3bSJohan Hedberg SOFTWARE IS DISCLAIMED.
220857dd3bSJohan Hedberg */
230857dd3bSJohan Hedberg
24174cd4b1SIngo Molnar #include <linux/sched/signal.h>
25174cd4b1SIngo Molnar
260857dd3bSJohan Hedberg #include <net/bluetooth/bluetooth.h>
270857dd3bSJohan Hedberg #include <net/bluetooth/hci_core.h>
28f2252570SJohan Hedberg #include <net/bluetooth/mgmt.h>
290857dd3bSJohan Hedberg
300857dd3bSJohan Hedberg #include "smp.h"
310857dd3bSJohan Hedberg #include "hci_request.h"
32bf6a4e30SHoward Chung #include "msft.h"
3301ce70b0SLuiz Augusto von Dentz #include "eir.h"
340857dd3bSJohan Hedberg
hci_req_init(struct hci_request * req,struct hci_dev * hdev)350857dd3bSJohan Hedberg void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
360857dd3bSJohan Hedberg {
370857dd3bSJohan Hedberg skb_queue_head_init(&req->cmd_q);
380857dd3bSJohan Hedberg req->hdev = hdev;
390857dd3bSJohan Hedberg req->err = 0;
400857dd3bSJohan Hedberg }
410857dd3bSJohan Hedberg
hci_req_purge(struct hci_request * req)42f17d858eSJaganath Kanakkassery void hci_req_purge(struct hci_request *req)
43f17d858eSJaganath Kanakkassery {
44f17d858eSJaganath Kanakkassery skb_queue_purge(&req->cmd_q);
45f17d858eSJaganath Kanakkassery }
46f17d858eSJaganath Kanakkassery
hci_req_status_pend(struct hci_dev * hdev)47f80c5dadSJoão Paulo Rechi Vita bool hci_req_status_pend(struct hci_dev *hdev)
48f80c5dadSJoão Paulo Rechi Vita {
49f80c5dadSJoão Paulo Rechi Vita return hdev->req_status == HCI_REQ_PEND;
50f80c5dadSJoão Paulo Rechi Vita }
51f80c5dadSJoão Paulo Rechi Vita
req_run(struct hci_request * req,hci_req_complete_t complete,hci_req_complete_skb_t complete_skb)52e6214487SJohan Hedberg static int req_run(struct hci_request *req, hci_req_complete_t complete,
53e6214487SJohan Hedberg hci_req_complete_skb_t complete_skb)
540857dd3bSJohan Hedberg {
550857dd3bSJohan Hedberg struct hci_dev *hdev = req->hdev;
560857dd3bSJohan Hedberg struct sk_buff *skb;
570857dd3bSJohan Hedberg unsigned long flags;
580857dd3bSJohan Hedberg
5922fbcfc5SHoward Chung bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
600857dd3bSJohan Hedberg
610857dd3bSJohan Hedberg /* If an error occurred during request building, remove all HCI
620857dd3bSJohan Hedberg * commands queued on the HCI request queue.
630857dd3bSJohan Hedberg */
640857dd3bSJohan Hedberg if (req->err) {
650857dd3bSJohan Hedberg skb_queue_purge(&req->cmd_q);
660857dd3bSJohan Hedberg return req->err;
670857dd3bSJohan Hedberg }
680857dd3bSJohan Hedberg
690857dd3bSJohan Hedberg /* Do not allow empty requests */
700857dd3bSJohan Hedberg if (skb_queue_empty(&req->cmd_q))
710857dd3bSJohan Hedberg return -ENODATA;
720857dd3bSJohan Hedberg
730857dd3bSJohan Hedberg skb = skb_peek_tail(&req->cmd_q);
7444d27137SJohan Hedberg if (complete) {
75242c0ebdSMarcel Holtmann bt_cb(skb)->hci.req_complete = complete;
7644d27137SJohan Hedberg } else if (complete_skb) {
77242c0ebdSMarcel Holtmann bt_cb(skb)->hci.req_complete_skb = complete_skb;
7844d27137SJohan Hedberg bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
7944d27137SJohan Hedberg }
800857dd3bSJohan Hedberg
810857dd3bSJohan Hedberg spin_lock_irqsave(&hdev->cmd_q.lock, flags);
820857dd3bSJohan Hedberg skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
830857dd3bSJohan Hedberg spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
840857dd3bSJohan Hedberg
850857dd3bSJohan Hedberg queue_work(hdev->workqueue, &hdev->cmd_work);
860857dd3bSJohan Hedberg
870857dd3bSJohan Hedberg return 0;
880857dd3bSJohan Hedberg }
890857dd3bSJohan Hedberg
hci_req_run(struct hci_request * req,hci_req_complete_t complete)90e6214487SJohan Hedberg int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91e6214487SJohan Hedberg {
92e6214487SJohan Hedberg return req_run(req, complete, NULL);
93e6214487SJohan Hedberg }
94e6214487SJohan Hedberg
hci_req_run_skb(struct hci_request * req,hci_req_complete_skb_t complete)95e6214487SJohan Hedberg int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96e6214487SJohan Hedberg {
97e6214487SJohan Hedberg return req_run(req, NULL, complete);
98e6214487SJohan Hedberg }
99e6214487SJohan Hedberg
hci_req_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)100161510ccSLuiz Augusto von Dentz void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101be91cd05SJohan Hedberg struct sk_buff *skb)
102be91cd05SJohan Hedberg {
10322fbcfc5SHoward Chung bt_dev_dbg(hdev, "result 0x%2.2x", result);
104be91cd05SJohan Hedberg
105be91cd05SJohan Hedberg if (hdev->req_status == HCI_REQ_PEND) {
106be91cd05SJohan Hedberg hdev->req_result = result;
107be91cd05SJohan Hedberg hdev->req_status = HCI_REQ_DONE;
108*9ab5e44bSDmitry Antipov if (skb) {
109*9ab5e44bSDmitry Antipov kfree_skb(hdev->req_skb);
110be91cd05SJohan Hedberg hdev->req_skb = skb_get(skb);
111*9ab5e44bSDmitry Antipov }
112be91cd05SJohan Hedberg wake_up_interruptible(&hdev->req_wait_q);
113be91cd05SJohan Hedberg }
114be91cd05SJohan Hedberg }
115be91cd05SJohan Hedberg
116be91cd05SJohan Hedberg /* Execute request and wait for completion. */
__hci_req_sync(struct hci_dev * hdev,int (* func)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)117a1d01db1SJohan Hedberg int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
118be91cd05SJohan Hedberg unsigned long opt),
1194ebeee2dSJohan Hedberg unsigned long opt, u32 timeout, u8 *hci_status)
120be91cd05SJohan Hedberg {
121be91cd05SJohan Hedberg struct hci_request req;
122be91cd05SJohan Hedberg int err = 0;
123be91cd05SJohan Hedberg
12422fbcfc5SHoward Chung bt_dev_dbg(hdev, "start");
125be91cd05SJohan Hedberg
126be91cd05SJohan Hedberg hci_req_init(&req, hdev);
127be91cd05SJohan Hedberg
128be91cd05SJohan Hedberg hdev->req_status = HCI_REQ_PEND;
129be91cd05SJohan Hedberg
130a1d01db1SJohan Hedberg err = func(&req, opt);
131a1d01db1SJohan Hedberg if (err) {
132a1d01db1SJohan Hedberg if (hci_status)
133a1d01db1SJohan Hedberg *hci_status = HCI_ERROR_UNSPECIFIED;
134a1d01db1SJohan Hedberg return err;
135a1d01db1SJohan Hedberg }
136be91cd05SJohan Hedberg
137be91cd05SJohan Hedberg err = hci_req_run_skb(&req, hci_req_sync_complete);
138be91cd05SJohan Hedberg if (err < 0) {
139be91cd05SJohan Hedberg hdev->req_status = 0;
140be91cd05SJohan Hedberg
141be91cd05SJohan Hedberg /* ENODATA means the HCI request command queue is empty.
142be91cd05SJohan Hedberg * This can happen when a request with conditionals doesn't
143be91cd05SJohan Hedberg * trigger any commands to be sent. This is normal behavior
144be91cd05SJohan Hedberg * and should not trigger an error return.
145be91cd05SJohan Hedberg */
146568f44f6SJohan Hedberg if (err == -ENODATA) {
147568f44f6SJohan Hedberg if (hci_status)
148568f44f6SJohan Hedberg *hci_status = 0;
149be91cd05SJohan Hedberg return 0;
150568f44f6SJohan Hedberg }
151568f44f6SJohan Hedberg
152568f44f6SJohan Hedberg if (hci_status)
153568f44f6SJohan Hedberg *hci_status = HCI_ERROR_UNSPECIFIED;
154be91cd05SJohan Hedberg
155be91cd05SJohan Hedberg return err;
156be91cd05SJohan Hedberg }
157be91cd05SJohan Hedberg
15867d8cee4SJohn Keeping err = wait_event_interruptible_timeout(hdev->req_wait_q,
15967d8cee4SJohn Keeping hdev->req_status != HCI_REQ_PEND, timeout);
160be91cd05SJohan Hedberg
16167d8cee4SJohn Keeping if (err == -ERESTARTSYS)
162be91cd05SJohan Hedberg return -EINTR;
163be91cd05SJohan Hedberg
164be91cd05SJohan Hedberg switch (hdev->req_status) {
165be91cd05SJohan Hedberg case HCI_REQ_DONE:
166be91cd05SJohan Hedberg err = -bt_to_errno(hdev->req_result);
1674ebeee2dSJohan Hedberg if (hci_status)
1684ebeee2dSJohan Hedberg *hci_status = hdev->req_result;
169be91cd05SJohan Hedberg break;
170be91cd05SJohan Hedberg
171be91cd05SJohan Hedberg case HCI_REQ_CANCELED:
172be91cd05SJohan Hedberg err = -hdev->req_result;
1734ebeee2dSJohan Hedberg if (hci_status)
1744ebeee2dSJohan Hedberg *hci_status = HCI_ERROR_UNSPECIFIED;
175be91cd05SJohan Hedberg break;
176be91cd05SJohan Hedberg
177be91cd05SJohan Hedberg default:
178be91cd05SJohan Hedberg err = -ETIMEDOUT;
1794ebeee2dSJohan Hedberg if (hci_status)
1804ebeee2dSJohan Hedberg *hci_status = HCI_ERROR_UNSPECIFIED;
181be91cd05SJohan Hedberg break;
182be91cd05SJohan Hedberg }
183be91cd05SJohan Hedberg
1849afee949SFrederic Dalleau kfree_skb(hdev->req_skb);
1859afee949SFrederic Dalleau hdev->req_skb = NULL;
186be91cd05SJohan Hedberg hdev->req_status = hdev->req_result = 0;
187be91cd05SJohan Hedberg
18822fbcfc5SHoward Chung bt_dev_dbg(hdev, "end: err %d", err);
189be91cd05SJohan Hedberg
190be91cd05SJohan Hedberg return err;
191be91cd05SJohan Hedberg }
192be91cd05SJohan Hedberg
hci_req_sync(struct hci_dev * hdev,int (* req)(struct hci_request * req,unsigned long opt),unsigned long opt,u32 timeout,u8 * hci_status)193a1d01db1SJohan Hedberg int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
194be91cd05SJohan Hedberg unsigned long opt),
1954ebeee2dSJohan Hedberg unsigned long opt, u32 timeout, u8 *hci_status)
196be91cd05SJohan Hedberg {
197be91cd05SJohan Hedberg int ret;
198be91cd05SJohan Hedberg
199be91cd05SJohan Hedberg /* Serialize all requests */
200b504430cSJohan Hedberg hci_req_sync_lock(hdev);
201e2cb6b89SLin Ma /* check the state after obtaing the lock to protect the HCI_UP
202e2cb6b89SLin Ma * against any races from hci_dev_do_close when the controller
203e2cb6b89SLin Ma * gets removed.
204e2cb6b89SLin Ma */
205e2cb6b89SLin Ma if (test_bit(HCI_UP, &hdev->flags))
2064ebeee2dSJohan Hedberg ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
207e2cb6b89SLin Ma else
208e2cb6b89SLin Ma ret = -ENETDOWN;
209b504430cSJohan Hedberg hci_req_sync_unlock(hdev);
210be91cd05SJohan Hedberg
211be91cd05SJohan Hedberg return ret;
212be91cd05SJohan Hedberg }
213be91cd05SJohan Hedberg
hci_prepare_cmd(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)2140857dd3bSJohan Hedberg struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
2150857dd3bSJohan Hedberg const void *param)
2160857dd3bSJohan Hedberg {
2170857dd3bSJohan Hedberg int len = HCI_COMMAND_HDR_SIZE + plen;
2180857dd3bSJohan Hedberg struct hci_command_hdr *hdr;
2190857dd3bSJohan Hedberg struct sk_buff *skb;
2200857dd3bSJohan Hedberg
2210857dd3bSJohan Hedberg skb = bt_skb_alloc(len, GFP_ATOMIC);
2220857dd3bSJohan Hedberg if (!skb)
2230857dd3bSJohan Hedberg return NULL;
2240857dd3bSJohan Hedberg
2254df864c1SJohannes Berg hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
2260857dd3bSJohan Hedberg hdr->opcode = cpu_to_le16(opcode);
2270857dd3bSJohan Hedberg hdr->plen = plen;
2280857dd3bSJohan Hedberg
2290857dd3bSJohan Hedberg if (plen)
23059ae1d12SJohannes Berg skb_put_data(skb, param, plen);
2310857dd3bSJohan Hedberg
23222fbcfc5SHoward Chung bt_dev_dbg(hdev, "skb len %d", skb->len);
2330857dd3bSJohan Hedberg
234d79f34e3SMarcel Holtmann hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
235d79f34e3SMarcel Holtmann hci_skb_opcode(skb) = opcode;
2360857dd3bSJohan Hedberg
2370857dd3bSJohan Hedberg return skb;
2380857dd3bSJohan Hedberg }
2390857dd3bSJohan Hedberg
2400857dd3bSJohan Hedberg /* Queue a command to an asynchronous HCI request */
hci_req_add_ev(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event)2410857dd3bSJohan Hedberg void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2420857dd3bSJohan Hedberg const void *param, u8 event)
2430857dd3bSJohan Hedberg {
2440857dd3bSJohan Hedberg struct hci_dev *hdev = req->hdev;
2450857dd3bSJohan Hedberg struct sk_buff *skb;
2460857dd3bSJohan Hedberg
24722fbcfc5SHoward Chung bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
2480857dd3bSJohan Hedberg
2490857dd3bSJohan Hedberg /* If an error occurred during request building, there is no point in
2500857dd3bSJohan Hedberg * queueing the HCI command. We can simply return.
2510857dd3bSJohan Hedberg */
2520857dd3bSJohan Hedberg if (req->err)
2530857dd3bSJohan Hedberg return;
2540857dd3bSJohan Hedberg
2550857dd3bSJohan Hedberg skb = hci_prepare_cmd(hdev, opcode, plen, param);
2560857dd3bSJohan Hedberg if (!skb) {
2572064ee33SMarcel Holtmann bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
2582064ee33SMarcel Holtmann opcode);
2590857dd3bSJohan Hedberg req->err = -ENOMEM;
2600857dd3bSJohan Hedberg return;
2610857dd3bSJohan Hedberg }
2620857dd3bSJohan Hedberg
2630857dd3bSJohan Hedberg if (skb_queue_empty(&req->cmd_q))
26444d27137SJohan Hedberg bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2650857dd3bSJohan Hedberg
266edcb185fSAhmad Fatoum hci_skb_event(skb) = event;
2670857dd3bSJohan Hedberg
2680857dd3bSJohan Hedberg skb_queue_tail(&req->cmd_q, skb);
2690857dd3bSJohan Hedberg }
2700857dd3bSJohan Hedberg
hci_req_add(struct hci_request * req,u16 opcode,u32 plen,const void * param)2710857dd3bSJohan Hedberg void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2720857dd3bSJohan Hedberg const void *param)
2730857dd3bSJohan Hedberg {
274696bd362SMateusz Jończyk bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
2750857dd3bSJohan Hedberg hci_req_add_ev(req, opcode, plen, param, 0);
2760857dd3bSJohan Hedberg }
2770857dd3bSJohan Hedberg
start_interleave_scan(struct hci_dev * hdev)278c4f1f408SHoward Chung static void start_interleave_scan(struct hci_dev *hdev)
279c4f1f408SHoward Chung {
280c4f1f408SHoward Chung hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
281c4f1f408SHoward Chung queue_delayed_work(hdev->req_workqueue,
282c4f1f408SHoward Chung &hdev->interleave_scan, 0);
283c4f1f408SHoward Chung }
284c4f1f408SHoward Chung
is_interleave_scanning(struct hci_dev * hdev)285c4f1f408SHoward Chung static bool is_interleave_scanning(struct hci_dev *hdev)
286c4f1f408SHoward Chung {
287c4f1f408SHoward Chung return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
288c4f1f408SHoward Chung }
289c4f1f408SHoward Chung
cancel_interleave_scan(struct hci_dev * hdev)290c4f1f408SHoward Chung static void cancel_interleave_scan(struct hci_dev *hdev)
291c4f1f408SHoward Chung {
292c4f1f408SHoward Chung bt_dev_dbg(hdev, "cancelling interleave scan");
293c4f1f408SHoward Chung
294c4f1f408SHoward Chung cancel_delayed_work_sync(&hdev->interleave_scan);
295c4f1f408SHoward Chung
296c4f1f408SHoward Chung hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
297c4f1f408SHoward Chung }
298c4f1f408SHoward Chung
299c4f1f408SHoward Chung /* Return true if interleave_scan wasn't started until exiting this function,
300c4f1f408SHoward Chung * otherwise, return false
301c4f1f408SHoward Chung */
__hci_update_interleaved_scan(struct hci_dev * hdev)302c4f1f408SHoward Chung static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
303c4f1f408SHoward Chung {
30458ceb1e6SArchie Pusaka /* Do interleaved scan only if all of the following are true:
30558ceb1e6SArchie Pusaka * - There is at least one ADV monitor
30658ceb1e6SArchie Pusaka * - At least one pending LE connection or one device to be scanned for
30758ceb1e6SArchie Pusaka * - Monitor offloading is not supported
30858ceb1e6SArchie Pusaka * If so, we should alternate between allowlist scan and one without
30958ceb1e6SArchie Pusaka * any filters to save power.
310c4f1f408SHoward Chung */
311c4f1f408SHoward Chung bool use_interleaving = hci_is_adv_monitoring(hdev) &&
312c4f1f408SHoward Chung !(list_empty(&hdev->pend_le_conns) &&
31358ceb1e6SArchie Pusaka list_empty(&hdev->pend_le_reports)) &&
31458ceb1e6SArchie Pusaka hci_get_adv_monitor_offload_ext(hdev) ==
31558ceb1e6SArchie Pusaka HCI_ADV_MONITOR_EXT_NONE;
316c4f1f408SHoward Chung bool is_interleaving = is_interleave_scanning(hdev);
317c4f1f408SHoward Chung
318c4f1f408SHoward Chung if (use_interleaving && !is_interleaving) {
319c4f1f408SHoward Chung start_interleave_scan(hdev);
320c4f1f408SHoward Chung bt_dev_dbg(hdev, "starting interleave scan");
321c4f1f408SHoward Chung return true;
322c4f1f408SHoward Chung }
323c4f1f408SHoward Chung
324c4f1f408SHoward Chung if (!use_interleaving && is_interleaving)
325c4f1f408SHoward Chung cancel_interleave_scan(hdev);
326c4f1f408SHoward Chung
327c4f1f408SHoward Chung return false;
328c4f1f408SHoward Chung }
329c4f1f408SHoward Chung
hci_req_add_le_scan_disable(struct hci_request * req,bool rpa_le_conn)3305c49bcceSSathish Narasimman void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
3310857dd3bSJohan Hedberg {
332a2344b9eSJaganath Kanakkassery struct hci_dev *hdev = req->hdev;
333a2344b9eSJaganath Kanakkassery
334dd522a74SAbhishek Pandit-Subedi if (hdev->scanning_paused) {
335dd522a74SAbhishek Pandit-Subedi bt_dev_dbg(hdev, "Scanning is paused for suspend");
336dd522a74SAbhishek Pandit-Subedi return;
337dd522a74SAbhishek Pandit-Subedi }
338dd522a74SAbhishek Pandit-Subedi
339a2344b9eSJaganath Kanakkassery if (use_ext_scan(hdev)) {
340a2344b9eSJaganath Kanakkassery struct hci_cp_le_set_ext_scan_enable cp;
341a2344b9eSJaganath Kanakkassery
342a2344b9eSJaganath Kanakkassery memset(&cp, 0, sizeof(cp));
343a2344b9eSJaganath Kanakkassery cp.enable = LE_SCAN_DISABLE;
344a2344b9eSJaganath Kanakkassery hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
345a2344b9eSJaganath Kanakkassery &cp);
346a2344b9eSJaganath Kanakkassery } else {
3470857dd3bSJohan Hedberg struct hci_cp_le_set_scan_enable cp;
3480857dd3bSJohan Hedberg
3490857dd3bSJohan Hedberg memset(&cp, 0, sizeof(cp));
3500857dd3bSJohan Hedberg cp.enable = LE_SCAN_DISABLE;
3510857dd3bSJohan Hedberg hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3520857dd3bSJohan Hedberg }
353e1d57235SMarcel Holtmann
3545c49bcceSSathish Narasimman /* Disable address resolution */
355ad383c2cSLuiz Augusto von Dentz if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
356e1d57235SMarcel Holtmann __u8 enable = 0x00;
357cbbdfa6fSSathish Narasimman
358e1d57235SMarcel Holtmann hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
359e1d57235SMarcel Holtmann }
360a2344b9eSJaganath Kanakkassery }
3610857dd3bSJohan Hedberg
del_from_accept_list(struct hci_request * req,bdaddr_t * bdaddr,u8 bdaddr_type)3623d4f9c00SArchie Pusaka static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
363dd522a74SAbhishek Pandit-Subedi u8 bdaddr_type)
364dd522a74SAbhishek Pandit-Subedi {
3653d4f9c00SArchie Pusaka struct hci_cp_le_del_from_accept_list cp;
366dd522a74SAbhishek Pandit-Subedi
367dd522a74SAbhishek Pandit-Subedi cp.bdaddr_type = bdaddr_type;
368dd522a74SAbhishek Pandit-Subedi bacpy(&cp.bdaddr, bdaddr);
369dd522a74SAbhishek Pandit-Subedi
3703d4f9c00SArchie Pusaka bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
371dd522a74SAbhishek Pandit-Subedi cp.bdaddr_type);
3723d4f9c00SArchie Pusaka hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
3730eee35bdSMarcel Holtmann
374ad383c2cSLuiz Augusto von Dentz if (use_ll_privacy(req->hdev)) {
3750eee35bdSMarcel Holtmann struct smp_irk *irk;
3760eee35bdSMarcel Holtmann
3770eee35bdSMarcel Holtmann irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
3780eee35bdSMarcel Holtmann if (irk) {
3790eee35bdSMarcel Holtmann struct hci_cp_le_del_from_resolv_list cp;
3800eee35bdSMarcel Holtmann
3810eee35bdSMarcel Holtmann cp.bdaddr_type = bdaddr_type;
3820eee35bdSMarcel Holtmann bacpy(&cp.bdaddr, bdaddr);
3830eee35bdSMarcel Holtmann
3840eee35bdSMarcel Holtmann hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
3850eee35bdSMarcel Holtmann sizeof(cp), &cp);
3860eee35bdSMarcel Holtmann }
3870eee35bdSMarcel Holtmann }
388dd522a74SAbhishek Pandit-Subedi }
389dd522a74SAbhishek Pandit-Subedi
3903d4f9c00SArchie Pusaka /* Adds connection to accept list if needed. On error, returns -1. */
add_to_accept_list(struct hci_request * req,struct hci_conn_params * params,u8 * num_entries,bool allow_rpa)3913d4f9c00SArchie Pusaka static int add_to_accept_list(struct hci_request *req,
392dd522a74SAbhishek Pandit-Subedi struct hci_conn_params *params, u8 *num_entries,
393dd522a74SAbhishek Pandit-Subedi bool allow_rpa)
3940857dd3bSJohan Hedberg {
3953d4f9c00SArchie Pusaka struct hci_cp_le_add_to_accept_list cp;
396dd522a74SAbhishek Pandit-Subedi struct hci_dev *hdev = req->hdev;
3970857dd3bSJohan Hedberg
3983d4f9c00SArchie Pusaka /* Already in accept list */
3993d4f9c00SArchie Pusaka if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
400dd522a74SAbhishek Pandit-Subedi params->addr_type))
401dd522a74SAbhishek Pandit-Subedi return 0;
402dd522a74SAbhishek Pandit-Subedi
403dd522a74SAbhishek Pandit-Subedi /* Select filter policy to accept all advertising */
4043d4f9c00SArchie Pusaka if (*num_entries >= hdev->le_accept_list_size)
405dd522a74SAbhishek Pandit-Subedi return -1;
406dd522a74SAbhishek Pandit-Subedi
4073d4f9c00SArchie Pusaka /* Accept list can not be used with RPAs */
4081fb17dfcSSathish Narasimman if (!allow_rpa &&
4091fb17dfcSSathish Narasimman !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
410dd522a74SAbhishek Pandit-Subedi hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
411dd522a74SAbhishek Pandit-Subedi return -1;
412dd522a74SAbhishek Pandit-Subedi }
413dd522a74SAbhishek Pandit-Subedi
4143d4f9c00SArchie Pusaka /* During suspend, only wakeable devices can be in accept list */
415fe92ee64SLuiz Augusto von Dentz if (hdev->suspended &&
416e1cff700SLinus Torvalds !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
417dd522a74SAbhishek Pandit-Subedi return 0;
418dd522a74SAbhishek Pandit-Subedi
419dd522a74SAbhishek Pandit-Subedi *num_entries += 1;
4200857dd3bSJohan Hedberg cp.bdaddr_type = params->addr_type;
4210857dd3bSJohan Hedberg bacpy(&cp.bdaddr, ¶ms->addr);
4220857dd3bSJohan Hedberg
4233d4f9c00SArchie Pusaka bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
424dd522a74SAbhishek Pandit-Subedi cp.bdaddr_type);
4253d4f9c00SArchie Pusaka hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
426dd522a74SAbhishek Pandit-Subedi
427ad383c2cSLuiz Augusto von Dentz if (use_ll_privacy(hdev)) {
4280eee35bdSMarcel Holtmann struct smp_irk *irk;
4290eee35bdSMarcel Holtmann
4300eee35bdSMarcel Holtmann irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
4310eee35bdSMarcel Holtmann params->addr_type);
4320eee35bdSMarcel Holtmann if (irk) {
4330eee35bdSMarcel Holtmann struct hci_cp_le_add_to_resolv_list cp;
4340eee35bdSMarcel Holtmann
4350eee35bdSMarcel Holtmann cp.bdaddr_type = params->addr_type;
4360eee35bdSMarcel Holtmann bacpy(&cp.bdaddr, ¶ms->addr);
4370eee35bdSMarcel Holtmann memcpy(cp.peer_irk, irk->val, 16);
4380eee35bdSMarcel Holtmann
4390eee35bdSMarcel Holtmann if (hci_dev_test_flag(hdev, HCI_PRIVACY))
4400eee35bdSMarcel Holtmann memcpy(cp.local_irk, hdev->irk, 16);
4410eee35bdSMarcel Holtmann else
4420eee35bdSMarcel Holtmann memset(cp.local_irk, 0, 16);
4430eee35bdSMarcel Holtmann
4440eee35bdSMarcel Holtmann hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
4450eee35bdSMarcel Holtmann sizeof(cp), &cp);
4460eee35bdSMarcel Holtmann }
4470eee35bdSMarcel Holtmann }
4480eee35bdSMarcel Holtmann
449dd522a74SAbhishek Pandit-Subedi return 0;
4500857dd3bSJohan Hedberg }
4510857dd3bSJohan Hedberg
update_accept_list(struct hci_request * req)4523d4f9c00SArchie Pusaka static u8 update_accept_list(struct hci_request *req)
4530857dd3bSJohan Hedberg {
4540857dd3bSJohan Hedberg struct hci_dev *hdev = req->hdev;
4550857dd3bSJohan Hedberg struct hci_conn_params *params;
4560857dd3bSJohan Hedberg struct bdaddr_list *b;
457dd522a74SAbhishek Pandit-Subedi u8 num_entries = 0;
458dd522a74SAbhishek Pandit-Subedi bool pend_conn, pend_report;
4593d4f9c00SArchie Pusaka /* We allow usage of accept list even with RPAs in suspend. In the worst
4603d4f9c00SArchie Pusaka * case, we won't be able to wake from devices that use the privacy1.2
461dd522a74SAbhishek Pandit-Subedi * features. Additionally, once we support privacy1.2 and IRK
462dd522a74SAbhishek Pandit-Subedi * offloading, we can update this to also check for those conditions.
463dd522a74SAbhishek Pandit-Subedi */
464dd522a74SAbhishek Pandit-Subedi bool allow_rpa = hdev->suspended;
4650857dd3bSJohan Hedberg
466ad383c2cSLuiz Augusto von Dentz if (use_ll_privacy(hdev))
4678ce85adaSSathish Narasimman allow_rpa = true;
4688ce85adaSSathish Narasimman
4693d4f9c00SArchie Pusaka /* Go through the current accept list programmed into the
4700857dd3bSJohan Hedberg * controller one by one and check if that address is still
4710857dd3bSJohan Hedberg * in the list of pending connections or list of devices to
4720857dd3bSJohan Hedberg * report. If not present in either list, then queue the
4730857dd3bSJohan Hedberg * command to remove it from the controller.
4740857dd3bSJohan Hedberg */
4753d4f9c00SArchie Pusaka list_for_each_entry(b, &hdev->le_accept_list, list) {
476dd522a74SAbhishek Pandit-Subedi pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
477dd522a74SAbhishek Pandit-Subedi &b->bdaddr,
478dd522a74SAbhishek Pandit-Subedi b->bdaddr_type);
479dd522a74SAbhishek Pandit-Subedi pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
480dd522a74SAbhishek Pandit-Subedi &b->bdaddr,
481dd522a74SAbhishek Pandit-Subedi b->bdaddr_type);
482dd522a74SAbhishek Pandit-Subedi
483dd522a74SAbhishek Pandit-Subedi /* If the device is not likely to connect or report,
4843d4f9c00SArchie Pusaka * remove it from the accept list.
485cff10ce7SJohan Hedberg */
486dd522a74SAbhishek Pandit-Subedi if (!pend_conn && !pend_report) {
4873d4f9c00SArchie Pusaka del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
488cff10ce7SJohan Hedberg continue;
489cff10ce7SJohan Hedberg }
490cff10ce7SJohan Hedberg
4913d4f9c00SArchie Pusaka /* Accept list can not be used with RPAs */
4921fb17dfcSSathish Narasimman if (!allow_rpa &&
4931fb17dfcSSathish Narasimman !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
494dd522a74SAbhishek Pandit-Subedi hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
495cff10ce7SJohan Hedberg return 0x00;
496cff10ce7SJohan Hedberg }
497cff10ce7SJohan Hedberg
498dd522a74SAbhishek Pandit-Subedi num_entries++;
4990857dd3bSJohan Hedberg }
5000857dd3bSJohan Hedberg
5013d4f9c00SArchie Pusaka /* Since all no longer valid accept list entries have been
5020857dd3bSJohan Hedberg * removed, walk through the list of pending connections
5030857dd3bSJohan Hedberg * and ensure that any new device gets programmed into
5040857dd3bSJohan Hedberg * the controller.
5050857dd3bSJohan Hedberg *
5060857dd3bSJohan Hedberg * If the list of the devices is larger than the list of
5073d4f9c00SArchie Pusaka * available accept list entries in the controller, then
5080857dd3bSJohan Hedberg * just abort and return filer policy value to not use the
5093d4f9c00SArchie Pusaka * accept list.
5100857dd3bSJohan Hedberg */
5110857dd3bSJohan Hedberg list_for_each_entry(params, &hdev->pend_le_conns, action) {
5123d4f9c00SArchie Pusaka if (add_to_accept_list(req, params, &num_entries, allow_rpa))
5130857dd3bSJohan Hedberg return 0x00;
5140857dd3bSJohan Hedberg }
5150857dd3bSJohan Hedberg
5160857dd3bSJohan Hedberg /* After adding all new pending connections, walk through
5170857dd3bSJohan Hedberg * the list of pending reports and also add these to the
5183d4f9c00SArchie Pusaka * accept list if there is still space. Abort if space runs out.
5190857dd3bSJohan Hedberg */
5200857dd3bSJohan Hedberg list_for_each_entry(params, &hdev->pend_le_reports, action) {
5213d4f9c00SArchie Pusaka if (add_to_accept_list(req, params, &num_entries, allow_rpa))
5220857dd3bSJohan Hedberg return 0x00;
5230857dd3bSJohan Hedberg }
5240857dd3bSJohan Hedberg
525c4f1f408SHoward Chung /* Use the allowlist unless the following conditions are all true:
526c4f1f408SHoward Chung * - We are not currently suspending
52758ceb1e6SArchie Pusaka * - There are 1 or more ADV monitors registered and it's not offloaded
528c4f1f408SHoward Chung * - Interleaved scanning is not currently using the allowlist
5298208f5a9SMiao-chen Chou */
530c4f1f408SHoward Chung if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
53158ceb1e6SArchie Pusaka hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
532c4f1f408SHoward Chung hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
5338208f5a9SMiao-chen Chou return 0x00;
5348208f5a9SMiao-chen Chou
5353d4f9c00SArchie Pusaka /* Select filter policy to use accept list */
5360857dd3bSJohan Hedberg return 0x01;
5370857dd3bSJohan Hedberg }
5380857dd3bSJohan Hedberg
scan_use_rpa(struct hci_dev * hdev)53982a37adeSJohan Hedberg static bool scan_use_rpa(struct hci_dev *hdev)
54082a37adeSJohan Hedberg {
54182a37adeSJohan Hedberg return hci_dev_test_flag(hdev, HCI_PRIVACY);
54282a37adeSJohan Hedberg }
54382a37adeSJohan Hedberg
hci_req_start_scan(struct hci_request * req,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,bool filter_dup,bool addr_resolv)5443baef810SJaganath Kanakkassery static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
545e1d57235SMarcel Holtmann u16 window, u8 own_addr_type, u8 filter_policy,
546c32d6246SYun-Hao Chung bool filter_dup, bool addr_resolv)
5470857dd3bSJohan Hedberg {
548a2344b9eSJaganath Kanakkassery struct hci_dev *hdev = req->hdev;
549a2344b9eSJaganath Kanakkassery
5503a0377d9SAbhishek Pandit-Subedi if (hdev->scanning_paused) {
5513a0377d9SAbhishek Pandit-Subedi bt_dev_dbg(hdev, "Scanning is paused for suspend");
5523a0377d9SAbhishek Pandit-Subedi return;
5533a0377d9SAbhishek Pandit-Subedi }
5543a0377d9SAbhishek Pandit-Subedi
555ad383c2cSLuiz Augusto von Dentz if (use_ll_privacy(hdev) && addr_resolv) {
556e1d57235SMarcel Holtmann u8 enable = 0x01;
557cbbdfa6fSSathish Narasimman
558e1d57235SMarcel Holtmann hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
559e1d57235SMarcel Holtmann }
560e1d57235SMarcel Holtmann
561a2344b9eSJaganath Kanakkassery /* Use ext scanning if set ext scan param and ext scan enable is
562a2344b9eSJaganath Kanakkassery * supported
563a2344b9eSJaganath Kanakkassery */
564a2344b9eSJaganath Kanakkassery if (use_ext_scan(hdev)) {
565a2344b9eSJaganath Kanakkassery struct hci_cp_le_set_ext_scan_params *ext_param_cp;
566a2344b9eSJaganath Kanakkassery struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
567a2344b9eSJaganath Kanakkassery struct hci_cp_le_scan_phy_params *phy_params;
56845bdd86eSJaganath Kanakkassery u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
56945bdd86eSJaganath Kanakkassery u32 plen;
570a2344b9eSJaganath Kanakkassery
571a2344b9eSJaganath Kanakkassery ext_param_cp = (void *)data;
572a2344b9eSJaganath Kanakkassery phy_params = (void *)ext_param_cp->data;
573a2344b9eSJaganath Kanakkassery
574a2344b9eSJaganath Kanakkassery memset(ext_param_cp, 0, sizeof(*ext_param_cp));
575a2344b9eSJaganath Kanakkassery ext_param_cp->own_addr_type = own_addr_type;
576a2344b9eSJaganath Kanakkassery ext_param_cp->filter_policy = filter_policy;
57745bdd86eSJaganath Kanakkassery
57845bdd86eSJaganath Kanakkassery plen = sizeof(*ext_param_cp);
57945bdd86eSJaganath Kanakkassery
58045bdd86eSJaganath Kanakkassery if (scan_1m(hdev) || scan_2m(hdev)) {
58145bdd86eSJaganath Kanakkassery ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
582a2344b9eSJaganath Kanakkassery
583a2344b9eSJaganath Kanakkassery memset(phy_params, 0, sizeof(*phy_params));
584a2344b9eSJaganath Kanakkassery phy_params->type = type;
585a2344b9eSJaganath Kanakkassery phy_params->interval = cpu_to_le16(interval);
586a2344b9eSJaganath Kanakkassery phy_params->window = cpu_to_le16(window);
587a2344b9eSJaganath Kanakkassery
58845bdd86eSJaganath Kanakkassery plen += sizeof(*phy_params);
58945bdd86eSJaganath Kanakkassery phy_params++;
59045bdd86eSJaganath Kanakkassery }
59145bdd86eSJaganath Kanakkassery
59245bdd86eSJaganath Kanakkassery if (scan_coded(hdev)) {
59345bdd86eSJaganath Kanakkassery ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
59445bdd86eSJaganath Kanakkassery
59545bdd86eSJaganath Kanakkassery memset(phy_params, 0, sizeof(*phy_params));
59645bdd86eSJaganath Kanakkassery phy_params->type = type;
59745bdd86eSJaganath Kanakkassery phy_params->interval = cpu_to_le16(interval);
59845bdd86eSJaganath Kanakkassery phy_params->window = cpu_to_le16(window);
59945bdd86eSJaganath Kanakkassery
60045bdd86eSJaganath Kanakkassery plen += sizeof(*phy_params);
60145bdd86eSJaganath Kanakkassery phy_params++;
60245bdd86eSJaganath Kanakkassery }
60345bdd86eSJaganath Kanakkassery
604a2344b9eSJaganath Kanakkassery hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
60545bdd86eSJaganath Kanakkassery plen, ext_param_cp);
606a2344b9eSJaganath Kanakkassery
607a2344b9eSJaganath Kanakkassery memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
608a2344b9eSJaganath Kanakkassery ext_enable_cp.enable = LE_SCAN_ENABLE;
609c32d6246SYun-Hao Chung ext_enable_cp.filter_dup = filter_dup;
610a2344b9eSJaganath Kanakkassery
611a2344b9eSJaganath Kanakkassery hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
612a2344b9eSJaganath Kanakkassery sizeof(ext_enable_cp), &ext_enable_cp);
613a2344b9eSJaganath Kanakkassery } else {
6140857dd3bSJohan Hedberg struct hci_cp_le_set_scan_param param_cp;
6150857dd3bSJohan Hedberg struct hci_cp_le_set_scan_enable enable_cp;
6163baef810SJaganath Kanakkassery
6173baef810SJaganath Kanakkassery memset(¶m_cp, 0, sizeof(param_cp));
6183baef810SJaganath Kanakkassery param_cp.type = type;
6193baef810SJaganath Kanakkassery param_cp.interval = cpu_to_le16(interval);
6203baef810SJaganath Kanakkassery param_cp.window = cpu_to_le16(window);
6213baef810SJaganath Kanakkassery param_cp.own_address_type = own_addr_type;
6223baef810SJaganath Kanakkassery param_cp.filter_policy = filter_policy;
6233baef810SJaganath Kanakkassery hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
6243baef810SJaganath Kanakkassery ¶m_cp);
6253baef810SJaganath Kanakkassery
6263baef810SJaganath Kanakkassery memset(&enable_cp, 0, sizeof(enable_cp));
6273baef810SJaganath Kanakkassery enable_cp.enable = LE_SCAN_ENABLE;
628c32d6246SYun-Hao Chung enable_cp.filter_dup = filter_dup;
6293baef810SJaganath Kanakkassery hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
6303baef810SJaganath Kanakkassery &enable_cp);
6313baef810SJaganath Kanakkassery }
632a2344b9eSJaganath Kanakkassery }
6333baef810SJaganath Kanakkassery
6343fe318eeSBrian Gix static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
hci_update_random_address(struct hci_request * req,bool require_privacy,bool use_rpa,u8 * own_addr_type)6353fe318eeSBrian Gix static int hci_update_random_address(struct hci_request *req,
6363fe318eeSBrian Gix bool require_privacy, bool use_rpa,
6373fe318eeSBrian Gix u8 *own_addr_type)
6383fe318eeSBrian Gix {
6393fe318eeSBrian Gix struct hci_dev *hdev = req->hdev;
6403fe318eeSBrian Gix int err;
6413fe318eeSBrian Gix
6423fe318eeSBrian Gix /* If privacy is enabled use a resolvable private address. If
6433fe318eeSBrian Gix * current RPA has expired or there is something else than
6443fe318eeSBrian Gix * the current RPA in use, then generate a new one.
6453fe318eeSBrian Gix */
6463fe318eeSBrian Gix if (use_rpa) {
6473fe318eeSBrian Gix /* If Controller supports LL Privacy use own address type is
6483fe318eeSBrian Gix * 0x03
6493fe318eeSBrian Gix */
6503fe318eeSBrian Gix if (use_ll_privacy(hdev))
6513fe318eeSBrian Gix *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6523fe318eeSBrian Gix else
6533fe318eeSBrian Gix *own_addr_type = ADDR_LE_DEV_RANDOM;
6543fe318eeSBrian Gix
6553fe318eeSBrian Gix if (rpa_valid(hdev))
6563fe318eeSBrian Gix return 0;
6573fe318eeSBrian Gix
6583fe318eeSBrian Gix err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6593fe318eeSBrian Gix if (err < 0) {
6603fe318eeSBrian Gix bt_dev_err(hdev, "failed to generate new RPA");
6613fe318eeSBrian Gix return err;
6623fe318eeSBrian Gix }
6633fe318eeSBrian Gix
6643fe318eeSBrian Gix set_random_addr(req, &hdev->rpa);
6653fe318eeSBrian Gix
6663fe318eeSBrian Gix return 0;
6673fe318eeSBrian Gix }
6683fe318eeSBrian Gix
6693fe318eeSBrian Gix /* In case of required privacy without resolvable private address,
6703fe318eeSBrian Gix * use an non-resolvable private address. This is useful for active
6713fe318eeSBrian Gix * scanning and non-connectable advertising.
6723fe318eeSBrian Gix */
6733fe318eeSBrian Gix if (require_privacy) {
6743fe318eeSBrian Gix bdaddr_t nrpa;
6753fe318eeSBrian Gix
6763fe318eeSBrian Gix while (true) {
6773fe318eeSBrian Gix /* The non-resolvable private address is generated
6783fe318eeSBrian Gix * from random six bytes with the two most significant
6793fe318eeSBrian Gix * bits cleared.
6803fe318eeSBrian Gix */
6813fe318eeSBrian Gix get_random_bytes(&nrpa, 6);
6823fe318eeSBrian Gix nrpa.b[5] &= 0x3f;
6833fe318eeSBrian Gix
6843fe318eeSBrian Gix /* The non-resolvable private address shall not be
6853fe318eeSBrian Gix * equal to the public address.
6863fe318eeSBrian Gix */
6873fe318eeSBrian Gix if (bacmp(&hdev->bdaddr, &nrpa))
6883fe318eeSBrian Gix break;
6893fe318eeSBrian Gix }
6903fe318eeSBrian Gix
6913fe318eeSBrian Gix *own_addr_type = ADDR_LE_DEV_RANDOM;
6923fe318eeSBrian Gix set_random_addr(req, &nrpa);
6933fe318eeSBrian Gix return 0;
6943fe318eeSBrian Gix }
6953fe318eeSBrian Gix
6963fe318eeSBrian Gix /* If forcing static address is in use or there is no public
6973fe318eeSBrian Gix * address use the static address as random address (but skip
6983fe318eeSBrian Gix * the HCI command if the current random address is already the
6993fe318eeSBrian Gix * static one.
7003fe318eeSBrian Gix *
7013fe318eeSBrian Gix * In case BR/EDR has been disabled on a dual-mode controller
7023fe318eeSBrian Gix * and a static address has been configured, then use that
7033fe318eeSBrian Gix * address instead of the public BR/EDR address.
7043fe318eeSBrian Gix */
7053fe318eeSBrian Gix if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7063fe318eeSBrian Gix !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7073fe318eeSBrian Gix (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7083fe318eeSBrian Gix bacmp(&hdev->static_addr, BDADDR_ANY))) {
7093fe318eeSBrian Gix *own_addr_type = ADDR_LE_DEV_RANDOM;
7103fe318eeSBrian Gix if (bacmp(&hdev->static_addr, &hdev->random_addr))
7113fe318eeSBrian Gix hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
7123fe318eeSBrian Gix &hdev->static_addr);
7133fe318eeSBrian Gix return 0;
7143fe318eeSBrian Gix }
7153fe318eeSBrian Gix
7163fe318eeSBrian Gix /* Neither privacy nor static address is being used so use a
7173fe318eeSBrian Gix * public address.
7183fe318eeSBrian Gix */
7193fe318eeSBrian Gix *own_addr_type = ADDR_LE_DEV_PUBLIC;
7203fe318eeSBrian Gix
7213fe318eeSBrian Gix return 0;
7223fe318eeSBrian Gix }
7233fe318eeSBrian Gix
724e1d57235SMarcel Holtmann /* Ensure to call hci_req_add_le_scan_disable() first to disable the
725e1d57235SMarcel Holtmann * controller based address resolution to be able to reconfigure
726e1d57235SMarcel Holtmann * resolving list.
727e1d57235SMarcel Holtmann */
hci_req_add_le_passive_scan(struct hci_request * req)7283baef810SJaganath Kanakkassery void hci_req_add_le_passive_scan(struct hci_request *req)
7293baef810SJaganath Kanakkassery {
7300857dd3bSJohan Hedberg struct hci_dev *hdev = req->hdev;
7310857dd3bSJohan Hedberg u8 own_addr_type;
7320857dd3bSJohan Hedberg u8 filter_policy;
733aaebf8e6SAbhishek Pandit-Subedi u16 window, interval;
734c32d6246SYun-Hao Chung /* Default is to enable duplicates filter */
735c32d6246SYun-Hao Chung u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
736e1d57235SMarcel Holtmann /* Background scanning should run with address resolution */
737e1d57235SMarcel Holtmann bool addr_resolv = true;
738dd522a74SAbhishek Pandit-Subedi
739dd522a74SAbhishek Pandit-Subedi if (hdev->scanning_paused) {
740dd522a74SAbhishek Pandit-Subedi bt_dev_dbg(hdev, "Scanning is paused for suspend");
741dd522a74SAbhishek Pandit-Subedi return;
742dd522a74SAbhishek Pandit-Subedi }
7430857dd3bSJohan Hedberg
7440857dd3bSJohan Hedberg /* Set require_privacy to false since no SCAN_REQ are send
7450857dd3bSJohan Hedberg * during passive scanning. Not using an non-resolvable address
7460857dd3bSJohan Hedberg * here is important so that peer devices using direct
7470857dd3bSJohan Hedberg * advertising with our address will be correctly reported
7480857dd3bSJohan Hedberg * by the controller.
7490857dd3bSJohan Hedberg */
75082a37adeSJohan Hedberg if (hci_update_random_address(req, false, scan_use_rpa(hdev),
75182a37adeSJohan Hedberg &own_addr_type))
7520857dd3bSJohan Hedberg return;
7530857dd3bSJohan Hedberg
75480af16a3SHoward Chung if (hdev->enable_advmon_interleave_scan &&
75580af16a3SHoward Chung __hci_update_interleaved_scan(hdev))
756c4f1f408SHoward Chung return;
757c4f1f408SHoward Chung
758c4f1f408SHoward Chung bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
7593d4f9c00SArchie Pusaka /* Adding or removing entries from the accept list must
7600857dd3bSJohan Hedberg * happen before enabling scanning. The controller does
7613d4f9c00SArchie Pusaka * not allow accept list modification while scanning.
7620857dd3bSJohan Hedberg */
7633d4f9c00SArchie Pusaka filter_policy = update_accept_list(req);
7640857dd3bSJohan Hedberg
7650857dd3bSJohan Hedberg /* When the controller is using random resolvable addresses and
7660857dd3bSJohan Hedberg * with that having LE privacy enabled, then controllers with
7670857dd3bSJohan Hedberg * Extended Scanner Filter Policies support can now enable support
7680857dd3bSJohan Hedberg * for handling directed advertising.
7690857dd3bSJohan Hedberg *
7703d4f9c00SArchie Pusaka * So instead of using filter polices 0x00 (no accept list)
7713d4f9c00SArchie Pusaka * and 0x01 (accept list enabled) use the new filter policies
7723d4f9c00SArchie Pusaka * 0x02 (no accept list) and 0x03 (accept list enabled).
7730857dd3bSJohan Hedberg */
774d7a5a11dSMarcel Holtmann if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
7750857dd3bSJohan Hedberg (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
7760857dd3bSJohan Hedberg filter_policy |= 0x02;
7770857dd3bSJohan Hedberg
778dd522a74SAbhishek Pandit-Subedi if (hdev->suspended) {
77910873f99SAlain Michaud window = hdev->le_scan_window_suspend;
78010873f99SAlain Michaud interval = hdev->le_scan_int_suspend;
7819a9373ffSAlain Michaud } else if (hci_is_le_conn_scanning(hdev)) {
7829a9373ffSAlain Michaud window = hdev->le_scan_window_connect;
7839a9373ffSAlain Michaud interval = hdev->le_scan_int_connect;
784291f0c55SHoward Chung } else if (hci_is_adv_monitoring(hdev)) {
785291f0c55SHoward Chung window = hdev->le_scan_window_adv_monitor;
786291f0c55SHoward Chung interval = hdev->le_scan_int_adv_monitor;
787c32d6246SYun-Hao Chung
788c32d6246SYun-Hao Chung /* Disable duplicates filter when scanning for advertisement
789c32d6246SYun-Hao Chung * monitor for the following reasons.
790c32d6246SYun-Hao Chung *
791c32d6246SYun-Hao Chung * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792c32d6246SYun-Hao Chung * controllers ignore RSSI_Sampling_Period when the duplicates
793c32d6246SYun-Hao Chung * filter is enabled.
794c32d6246SYun-Hao Chung *
795c32d6246SYun-Hao Chung * For SW pattern filtering, when we're not doing interleaved
796c32d6246SYun-Hao Chung * scanning, it is necessary to disable duplicates filter,
797c32d6246SYun-Hao Chung * otherwise hosts can only receive one advertisement and it's
798c32d6246SYun-Hao Chung * impossible to know if a peer is still in range.
799c32d6246SYun-Hao Chung */
800c32d6246SYun-Hao Chung filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
801dd522a74SAbhishek Pandit-Subedi } else {
802dd522a74SAbhishek Pandit-Subedi window = hdev->le_scan_window;
803dd522a74SAbhishek Pandit-Subedi interval = hdev->le_scan_interval;
804dd522a74SAbhishek Pandit-Subedi }
805dd522a74SAbhishek Pandit-Subedi
8063d4f9c00SArchie Pusaka bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
8073d4f9c00SArchie Pusaka filter_policy);
808dd522a74SAbhishek Pandit-Subedi hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
809c32d6246SYun-Hao Chung own_addr_type, filter_policy, filter_dup,
810c32d6246SYun-Hao Chung addr_resolv);
8110857dd3bSJohan Hedberg }
8120857dd3bSJohan Hedberg
hci_req_add_le_interleaved_scan(struct hci_request * req,unsigned long opt)813c4f1f408SHoward Chung static int hci_req_add_le_interleaved_scan(struct hci_request *req,
814c4f1f408SHoward Chung unsigned long opt)
815c4f1f408SHoward Chung {
816c4f1f408SHoward Chung struct hci_dev *hdev = req->hdev;
817c4f1f408SHoward Chung int ret = 0;
818c4f1f408SHoward Chung
819c4f1f408SHoward Chung hci_dev_lock(hdev);
820c4f1f408SHoward Chung
821c4f1f408SHoward Chung if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
822c4f1f408SHoward Chung hci_req_add_le_scan_disable(req, false);
823c4f1f408SHoward Chung hci_req_add_le_passive_scan(req);
824c4f1f408SHoward Chung
825c4f1f408SHoward Chung switch (hdev->interleave_scan_state) {
826c4f1f408SHoward Chung case INTERLEAVE_SCAN_ALLOWLIST:
827c4f1f408SHoward Chung bt_dev_dbg(hdev, "next state: allowlist");
828c4f1f408SHoward Chung hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
829c4f1f408SHoward Chung break;
830c4f1f408SHoward Chung case INTERLEAVE_SCAN_NO_FILTER:
831c4f1f408SHoward Chung bt_dev_dbg(hdev, "next state: no filter");
832c4f1f408SHoward Chung hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
833c4f1f408SHoward Chung break;
834c4f1f408SHoward Chung case INTERLEAVE_SCAN_NONE:
835c4f1f408SHoward Chung BT_ERR("unexpected error");
836c4f1f408SHoward Chung ret = -1;
837c4f1f408SHoward Chung }
838c4f1f408SHoward Chung
839c4f1f408SHoward Chung hci_dev_unlock(hdev);
840c4f1f408SHoward Chung
841c4f1f408SHoward Chung return ret;
842c4f1f408SHoward Chung }
843c4f1f408SHoward Chung
interleave_scan_work(struct work_struct * work)844c4f1f408SHoward Chung static void interleave_scan_work(struct work_struct *work)
845c4f1f408SHoward Chung {
846c4f1f408SHoward Chung struct hci_dev *hdev = container_of(work, struct hci_dev,
847c4f1f408SHoward Chung interleave_scan.work);
848c4f1f408SHoward Chung u8 status;
849c4f1f408SHoward Chung unsigned long timeout;
850c4f1f408SHoward Chung
851c4f1f408SHoward Chung if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
852c4f1f408SHoward Chung timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
853c4f1f408SHoward Chung } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
854c4f1f408SHoward Chung timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
855c4f1f408SHoward Chung } else {
856c4f1f408SHoward Chung bt_dev_err(hdev, "unexpected error");
857c4f1f408SHoward Chung return;
858c4f1f408SHoward Chung }
859c4f1f408SHoward Chung
860c4f1f408SHoward Chung hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
861c4f1f408SHoward Chung HCI_CMD_TIMEOUT, &status);
862c4f1f408SHoward Chung
863c4f1f408SHoward Chung /* Don't continue interleaving if it was canceled */
864c4f1f408SHoward Chung if (is_interleave_scanning(hdev))
865c4f1f408SHoward Chung queue_delayed_work(hdev->req_workqueue,
866c4f1f408SHoward Chung &hdev->interleave_scan, timeout);
867c4f1f408SHoward Chung }
868c4f1f408SHoward Chung
set_random_addr(struct hci_request * req,bdaddr_t * rpa)869c45074d6SLuiz Augusto von Dentz static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
870c45074d6SLuiz Augusto von Dentz {
871c45074d6SLuiz Augusto von Dentz struct hci_dev *hdev = req->hdev;
872c45074d6SLuiz Augusto von Dentz
873c45074d6SLuiz Augusto von Dentz /* If we're advertising or initiating an LE connection we can't
874c45074d6SLuiz Augusto von Dentz * go ahead and change the random address at this time. This is
875c45074d6SLuiz Augusto von Dentz * because the eventual initiator address used for the
876c45074d6SLuiz Augusto von Dentz * subsequently created connection will be undefined (some
877c45074d6SLuiz Augusto von Dentz * controllers use the new address and others the one we had
878c45074d6SLuiz Augusto von Dentz * when the operation started).
879c45074d6SLuiz Augusto von Dentz *
880c45074d6SLuiz Augusto von Dentz * In this kind of scenario skip the update and let the random
881c45074d6SLuiz Augusto von Dentz * address be updated at the next cycle.
882c45074d6SLuiz Augusto von Dentz */
883c45074d6SLuiz Augusto von Dentz if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
884c45074d6SLuiz Augusto von Dentz hci_lookup_le_connect(hdev)) {
885c45074d6SLuiz Augusto von Dentz bt_dev_dbg(hdev, "Deferring random address update");
886c45074d6SLuiz Augusto von Dentz hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
887c45074d6SLuiz Augusto von Dentz return;
888c45074d6SLuiz Augusto von Dentz }
889c45074d6SLuiz Augusto von Dentz
890c45074d6SLuiz Augusto von Dentz hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
891c45074d6SLuiz Augusto von Dentz }
892c45074d6SLuiz Augusto von Dentz
hci_request_setup(struct hci_dev * hdev)8935fc16cc4SJohan Hedberg void hci_request_setup(struct hci_dev *hdev)
8945fc16cc4SJohan Hedberg {
895c4f1f408SHoward Chung INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
8965fc16cc4SJohan Hedberg }
8975fc16cc4SJohan Hedberg
hci_request_cancel_all(struct hci_dev * hdev)8985fc16cc4SJohan Hedberg void hci_request_cancel_all(struct hci_dev *hdev)
8995fc16cc4SJohan Hedberg {
9000ce1229cSLuiz Augusto von Dentz hci_cmd_sync_cancel_sync(hdev, ENODEV);
9017df0f73eSJohan Hedberg
902c4f1f408SHoward Chung cancel_interleave_scan(hdev);
9035fc16cc4SJohan Hedberg }
904