xref: /openbmc/linux/net/bluetooth/hci_core.c (revision 5af2e235)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5 
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11 
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25 
26 /* Bluetooth HCI core. */
27 
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37 
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42 
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50 
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54 
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58 
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62 
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65 
hci_scan_req(struct hci_request * req,unsigned long opt)66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68 	__u8 scan = opt;
69 
70 	BT_DBG("%s %x", req->hdev->name, scan);
71 
72 	/* Inquiry and Page scans */
73 	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74 	return 0;
75 }
76 
hci_auth_req(struct hci_request * req,unsigned long opt)77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79 	__u8 auth = opt;
80 
81 	BT_DBG("%s %x", req->hdev->name, auth);
82 
83 	/* Authentication */
84 	hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85 	return 0;
86 }
87 
hci_encrypt_req(struct hci_request * req,unsigned long opt)88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90 	__u8 encrypt = opt;
91 
92 	BT_DBG("%s %x", req->hdev->name, encrypt);
93 
94 	/* Encryption */
95 	hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96 	return 0;
97 }
98 
hci_linkpol_req(struct hci_request * req,unsigned long opt)99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101 	__le16 policy = cpu_to_le16(opt);
102 
103 	BT_DBG("%s %x", req->hdev->name, policy);
104 
105 	/* Default link policy */
106 	hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107 	return 0;
108 }
109 
110 /* Get HCI device by index.
111  * Device is held on return. */
hci_dev_get(int index)112 struct hci_dev *hci_dev_get(int index)
113 {
114 	struct hci_dev *hdev = NULL, *d;
115 
116 	BT_DBG("%d", index);
117 
118 	if (index < 0)
119 		return NULL;
120 
121 	read_lock(&hci_dev_list_lock);
122 	list_for_each_entry(d, &hci_dev_list, list) {
123 		if (d->id == index) {
124 			hdev = hci_dev_hold(d);
125 			break;
126 		}
127 	}
128 	read_unlock(&hci_dev_list_lock);
129 	return hdev;
130 }
131 
132 /* ---- Inquiry support ---- */
133 
hci_discovery_active(struct hci_dev * hdev)134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136 	struct discovery_state *discov = &hdev->discovery;
137 
138 	switch (discov->state) {
139 	case DISCOVERY_FINDING:
140 	case DISCOVERY_RESOLVING:
141 		return true;
142 
143 	default:
144 		return false;
145 	}
146 }
147 
hci_discovery_set_state(struct hci_dev * hdev,int state)148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150 	int old_state = hdev->discovery.state;
151 
152 	BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153 
154 	if (old_state == state)
155 		return;
156 
157 	hdev->discovery.state = state;
158 
159 	switch (state) {
160 	case DISCOVERY_STOPPED:
161 		hci_update_passive_scan(hdev);
162 
163 		if (old_state != DISCOVERY_STARTING)
164 			mgmt_discovering(hdev, 0);
165 		break;
166 	case DISCOVERY_STARTING:
167 		break;
168 	case DISCOVERY_FINDING:
169 		mgmt_discovering(hdev, 1);
170 		break;
171 	case DISCOVERY_RESOLVING:
172 		break;
173 	case DISCOVERY_STOPPING:
174 		break;
175 	}
176 }
177 
hci_inquiry_cache_flush(struct hci_dev * hdev)178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180 	struct discovery_state *cache = &hdev->discovery;
181 	struct inquiry_entry *p, *n;
182 
183 	list_for_each_entry_safe(p, n, &cache->all, all) {
184 		list_del(&p->all);
185 		kfree(p);
186 	}
187 
188 	INIT_LIST_HEAD(&cache->unknown);
189 	INIT_LIST_HEAD(&cache->resolve);
190 }
191 
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193 					       bdaddr_t *bdaddr)
194 {
195 	struct discovery_state *cache = &hdev->discovery;
196 	struct inquiry_entry *e;
197 
198 	BT_DBG("cache %p, %pMR", cache, bdaddr);
199 
200 	list_for_each_entry(e, &cache->all, all) {
201 		if (!bacmp(&e->data.bdaddr, bdaddr))
202 			return e;
203 	}
204 
205 	return NULL;
206 }
207 
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209 						       bdaddr_t *bdaddr)
210 {
211 	struct discovery_state *cache = &hdev->discovery;
212 	struct inquiry_entry *e;
213 
214 	BT_DBG("cache %p, %pMR", cache, bdaddr);
215 
216 	list_for_each_entry(e, &cache->unknown, list) {
217 		if (!bacmp(&e->data.bdaddr, bdaddr))
218 			return e;
219 	}
220 
221 	return NULL;
222 }
223 
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225 						       bdaddr_t *bdaddr,
226 						       int state)
227 {
228 	struct discovery_state *cache = &hdev->discovery;
229 	struct inquiry_entry *e;
230 
231 	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232 
233 	list_for_each_entry(e, &cache->resolve, list) {
234 		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235 			return e;
236 		if (!bacmp(&e->data.bdaddr, bdaddr))
237 			return e;
238 	}
239 
240 	return NULL;
241 }
242 
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 				      struct inquiry_entry *ie)
245 {
246 	struct discovery_state *cache = &hdev->discovery;
247 	struct list_head *pos = &cache->resolve;
248 	struct inquiry_entry *p;
249 
250 	list_del(&ie->list);
251 
252 	list_for_each_entry(p, &cache->resolve, list) {
253 		if (p->name_state != NAME_PENDING &&
254 		    abs(p->data.rssi) >= abs(ie->data.rssi))
255 			break;
256 		pos = &p->list;
257 	}
258 
259 	list_add(&ie->list, pos);
260 }
261 
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263 			     bool name_known)
264 {
265 	struct discovery_state *cache = &hdev->discovery;
266 	struct inquiry_entry *ie;
267 	u32 flags = 0;
268 
269 	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270 
271 	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272 
273 	if (!data->ssp_mode)
274 		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275 
276 	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277 	if (ie) {
278 		if (!ie->data.ssp_mode)
279 			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280 
281 		if (ie->name_state == NAME_NEEDED &&
282 		    data->rssi != ie->data.rssi) {
283 			ie->data.rssi = data->rssi;
284 			hci_inquiry_cache_update_resolve(hdev, ie);
285 		}
286 
287 		goto update;
288 	}
289 
290 	/* Entry not in the cache. Add new one. */
291 	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292 	if (!ie) {
293 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294 		goto done;
295 	}
296 
297 	list_add(&ie->all, &cache->all);
298 
299 	if (name_known) {
300 		ie->name_state = NAME_KNOWN;
301 	} else {
302 		ie->name_state = NAME_NOT_KNOWN;
303 		list_add(&ie->list, &cache->unknown);
304 	}
305 
306 update:
307 	if (name_known && ie->name_state != NAME_KNOWN &&
308 	    ie->name_state != NAME_PENDING) {
309 		ie->name_state = NAME_KNOWN;
310 		list_del(&ie->list);
311 	}
312 
313 	memcpy(&ie->data, data, sizeof(*data));
314 	ie->timestamp = jiffies;
315 	cache->timestamp = jiffies;
316 
317 	if (ie->name_state == NAME_NOT_KNOWN)
318 		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319 
320 done:
321 	return flags;
322 }
323 
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326 	struct discovery_state *cache = &hdev->discovery;
327 	struct inquiry_info *info = (struct inquiry_info *) buf;
328 	struct inquiry_entry *e;
329 	int copied = 0;
330 
331 	list_for_each_entry(e, &cache->all, all) {
332 		struct inquiry_data *data = &e->data;
333 
334 		if (copied >= num)
335 			break;
336 
337 		bacpy(&info->bdaddr, &data->bdaddr);
338 		info->pscan_rep_mode	= data->pscan_rep_mode;
339 		info->pscan_period_mode	= data->pscan_period_mode;
340 		info->pscan_mode	= data->pscan_mode;
341 		memcpy(info->dev_class, data->dev_class, 3);
342 		info->clock_offset	= data->clock_offset;
343 
344 		info++;
345 		copied++;
346 	}
347 
348 	BT_DBG("cache %p, copied %d", cache, copied);
349 	return copied;
350 }
351 
hci_inq_req(struct hci_request * req,unsigned long opt)352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354 	struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 	struct hci_dev *hdev = req->hdev;
356 	struct hci_cp_inquiry cp;
357 
358 	BT_DBG("%s", hdev->name);
359 
360 	if (test_bit(HCI_INQUIRY, &hdev->flags))
361 		return 0;
362 
363 	/* Start Inquiry */
364 	memcpy(&cp.lap, &ir->lap, 3);
365 	cp.length  = ir->length;
366 	cp.num_rsp = ir->num_rsp;
367 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368 
369 	return 0;
370 }
371 
hci_inquiry(void __user * arg)372 int hci_inquiry(void __user *arg)
373 {
374 	__u8 __user *ptr = arg;
375 	struct hci_inquiry_req ir;
376 	struct hci_dev *hdev;
377 	int err = 0, do_inquiry = 0, max_rsp;
378 	long timeo;
379 	__u8 *buf;
380 
381 	if (copy_from_user(&ir, ptr, sizeof(ir)))
382 		return -EFAULT;
383 
384 	hdev = hci_dev_get(ir.dev_id);
385 	if (!hdev)
386 		return -ENODEV;
387 
388 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389 		err = -EBUSY;
390 		goto done;
391 	}
392 
393 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394 		err = -EOPNOTSUPP;
395 		goto done;
396 	}
397 
398 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
399 		err = -EOPNOTSUPP;
400 		goto done;
401 	}
402 
403 	/* Restrict maximum inquiry length to 60 seconds */
404 	if (ir.length > 60) {
405 		err = -EINVAL;
406 		goto done;
407 	}
408 
409 	hci_dev_lock(hdev);
410 	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
411 	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
412 		hci_inquiry_cache_flush(hdev);
413 		do_inquiry = 1;
414 	}
415 	hci_dev_unlock(hdev);
416 
417 	timeo = ir.length * msecs_to_jiffies(2000);
418 
419 	if (do_inquiry) {
420 		err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
421 				   timeo, NULL);
422 		if (err < 0)
423 			goto done;
424 
425 		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
426 		 * cleared). If it is interrupted by a signal, return -EINTR.
427 		 */
428 		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
429 				TASK_INTERRUPTIBLE)) {
430 			err = -EINTR;
431 			goto done;
432 		}
433 	}
434 
435 	/* for unlimited number of responses we will use buffer with
436 	 * 255 entries
437 	 */
438 	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
439 
440 	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
441 	 * copy it to the user space.
442 	 */
443 	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
444 	if (!buf) {
445 		err = -ENOMEM;
446 		goto done;
447 	}
448 
449 	hci_dev_lock(hdev);
450 	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
451 	hci_dev_unlock(hdev);
452 
453 	BT_DBG("num_rsp %d", ir.num_rsp);
454 
455 	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
456 		ptr += sizeof(ir);
457 		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
458 				 ir.num_rsp))
459 			err = -EFAULT;
460 	} else
461 		err = -EFAULT;
462 
463 	kfree(buf);
464 
465 done:
466 	hci_dev_put(hdev);
467 	return err;
468 }
469 
hci_dev_do_open(struct hci_dev * hdev)470 static int hci_dev_do_open(struct hci_dev *hdev)
471 {
472 	int ret = 0;
473 
474 	BT_DBG("%s %p", hdev->name, hdev);
475 
476 	hci_req_sync_lock(hdev);
477 
478 	ret = hci_dev_open_sync(hdev);
479 
480 	hci_req_sync_unlock(hdev);
481 	return ret;
482 }
483 
484 /* ---- HCI ioctl helpers ---- */
485 
hci_dev_open(__u16 dev)486 int hci_dev_open(__u16 dev)
487 {
488 	struct hci_dev *hdev;
489 	int err;
490 
491 	hdev = hci_dev_get(dev);
492 	if (!hdev)
493 		return -ENODEV;
494 
495 	/* Devices that are marked as unconfigured can only be powered
496 	 * up as user channel. Trying to bring them up as normal devices
497 	 * will result into a failure. Only user channel operation is
498 	 * possible.
499 	 *
500 	 * When this function is called for a user channel, the flag
501 	 * HCI_USER_CHANNEL will be set first before attempting to
502 	 * open the device.
503 	 */
504 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
505 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
506 		err = -EOPNOTSUPP;
507 		goto done;
508 	}
509 
510 	/* We need to ensure that no other power on/off work is pending
511 	 * before proceeding to call hci_dev_do_open. This is
512 	 * particularly important if the setup procedure has not yet
513 	 * completed.
514 	 */
515 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
516 		cancel_delayed_work(&hdev->power_off);
517 
518 	/* After this call it is guaranteed that the setup procedure
519 	 * has finished. This means that error conditions like RFKILL
520 	 * or no valid public or static random address apply.
521 	 */
522 	flush_workqueue(hdev->req_workqueue);
523 
524 	/* For controllers not using the management interface and that
525 	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
526 	 * so that pairing works for them. Once the management interface
527 	 * is in use this bit will be cleared again and userspace has
528 	 * to explicitly enable it.
529 	 */
530 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
531 	    !hci_dev_test_flag(hdev, HCI_MGMT))
532 		hci_dev_set_flag(hdev, HCI_BONDABLE);
533 
534 	err = hci_dev_do_open(hdev);
535 
536 done:
537 	hci_dev_put(hdev);
538 	return err;
539 }
540 
hci_dev_do_close(struct hci_dev * hdev)541 int hci_dev_do_close(struct hci_dev *hdev)
542 {
543 	int err;
544 
545 	BT_DBG("%s %p", hdev->name, hdev);
546 
547 	hci_req_sync_lock(hdev);
548 
549 	err = hci_dev_close_sync(hdev);
550 
551 	hci_req_sync_unlock(hdev);
552 
553 	return err;
554 }
555 
hci_dev_close(__u16 dev)556 int hci_dev_close(__u16 dev)
557 {
558 	struct hci_dev *hdev;
559 	int err;
560 
561 	hdev = hci_dev_get(dev);
562 	if (!hdev)
563 		return -ENODEV;
564 
565 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
566 		err = -EBUSY;
567 		goto done;
568 	}
569 
570 	cancel_work_sync(&hdev->power_on);
571 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
572 		cancel_delayed_work(&hdev->power_off);
573 
574 	err = hci_dev_do_close(hdev);
575 
576 done:
577 	hci_dev_put(hdev);
578 	return err;
579 }
580 
hci_dev_do_reset(struct hci_dev * hdev)581 static int hci_dev_do_reset(struct hci_dev *hdev)
582 {
583 	int ret;
584 
585 	BT_DBG("%s %p", hdev->name, hdev);
586 
587 	hci_req_sync_lock(hdev);
588 
589 	/* Drop queues */
590 	skb_queue_purge(&hdev->rx_q);
591 	skb_queue_purge(&hdev->cmd_q);
592 
593 	/* Cancel these to avoid queueing non-chained pending work */
594 	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
595 	/* Wait for
596 	 *
597 	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
598 	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
599 	 *
600 	 * inside RCU section to see the flag or complete scheduling.
601 	 */
602 	synchronize_rcu();
603 	/* Explicitly cancel works in case scheduled after setting the flag. */
604 	cancel_delayed_work(&hdev->cmd_timer);
605 	cancel_delayed_work(&hdev->ncmd_timer);
606 
607 	/* Avoid potential lockdep warnings from the *_flush() calls by
608 	 * ensuring the workqueue is empty up front.
609 	 */
610 	drain_workqueue(hdev->workqueue);
611 
612 	hci_dev_lock(hdev);
613 	hci_inquiry_cache_flush(hdev);
614 	hci_conn_hash_flush(hdev);
615 	hci_dev_unlock(hdev);
616 
617 	if (hdev->flush)
618 		hdev->flush(hdev);
619 
620 	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
621 
622 	atomic_set(&hdev->cmd_cnt, 1);
623 	hdev->acl_cnt = 0;
624 	hdev->sco_cnt = 0;
625 	hdev->le_cnt = 0;
626 	hdev->iso_cnt = 0;
627 
628 	ret = hci_reset_sync(hdev);
629 
630 	hci_req_sync_unlock(hdev);
631 	return ret;
632 }
633 
hci_dev_reset(__u16 dev)634 int hci_dev_reset(__u16 dev)
635 {
636 	struct hci_dev *hdev;
637 	int err;
638 
639 	hdev = hci_dev_get(dev);
640 	if (!hdev)
641 		return -ENODEV;
642 
643 	if (!test_bit(HCI_UP, &hdev->flags)) {
644 		err = -ENETDOWN;
645 		goto done;
646 	}
647 
648 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
649 		err = -EBUSY;
650 		goto done;
651 	}
652 
653 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
654 		err = -EOPNOTSUPP;
655 		goto done;
656 	}
657 
658 	err = hci_dev_do_reset(hdev);
659 
660 done:
661 	hci_dev_put(hdev);
662 	return err;
663 }
664 
hci_dev_reset_stat(__u16 dev)665 int hci_dev_reset_stat(__u16 dev)
666 {
667 	struct hci_dev *hdev;
668 	int ret = 0;
669 
670 	hdev = hci_dev_get(dev);
671 	if (!hdev)
672 		return -ENODEV;
673 
674 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
675 		ret = -EBUSY;
676 		goto done;
677 	}
678 
679 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
680 		ret = -EOPNOTSUPP;
681 		goto done;
682 	}
683 
684 	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
685 
686 done:
687 	hci_dev_put(hdev);
688 	return ret;
689 }
690 
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)691 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
692 {
693 	bool conn_changed, discov_changed;
694 
695 	BT_DBG("%s scan 0x%02x", hdev->name, scan);
696 
697 	if ((scan & SCAN_PAGE))
698 		conn_changed = !hci_dev_test_and_set_flag(hdev,
699 							  HCI_CONNECTABLE);
700 	else
701 		conn_changed = hci_dev_test_and_clear_flag(hdev,
702 							   HCI_CONNECTABLE);
703 
704 	if ((scan & SCAN_INQUIRY)) {
705 		discov_changed = !hci_dev_test_and_set_flag(hdev,
706 							    HCI_DISCOVERABLE);
707 	} else {
708 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
709 		discov_changed = hci_dev_test_and_clear_flag(hdev,
710 							     HCI_DISCOVERABLE);
711 	}
712 
713 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
714 		return;
715 
716 	if (conn_changed || discov_changed) {
717 		/* In case this was disabled through mgmt */
718 		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
719 
720 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
721 			hci_update_adv_data(hdev, hdev->cur_adv_instance);
722 
723 		mgmt_new_settings(hdev);
724 	}
725 }
726 
hci_dev_cmd(unsigned int cmd,void __user * arg)727 int hci_dev_cmd(unsigned int cmd, void __user *arg)
728 {
729 	struct hci_dev *hdev;
730 	struct hci_dev_req dr;
731 	int err = 0;
732 
733 	if (copy_from_user(&dr, arg, sizeof(dr)))
734 		return -EFAULT;
735 
736 	hdev = hci_dev_get(dr.dev_id);
737 	if (!hdev)
738 		return -ENODEV;
739 
740 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
741 		err = -EBUSY;
742 		goto done;
743 	}
744 
745 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
746 		err = -EOPNOTSUPP;
747 		goto done;
748 	}
749 
750 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
751 		err = -EOPNOTSUPP;
752 		goto done;
753 	}
754 
755 	switch (cmd) {
756 	case HCISETAUTH:
757 		err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
758 				   HCI_INIT_TIMEOUT, NULL);
759 		break;
760 
761 	case HCISETENCRYPT:
762 		if (!lmp_encrypt_capable(hdev)) {
763 			err = -EOPNOTSUPP;
764 			break;
765 		}
766 
767 		if (!test_bit(HCI_AUTH, &hdev->flags)) {
768 			/* Auth must be enabled first */
769 			err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
770 					   HCI_INIT_TIMEOUT, NULL);
771 			if (err)
772 				break;
773 		}
774 
775 		err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
776 				   HCI_INIT_TIMEOUT, NULL);
777 		break;
778 
779 	case HCISETSCAN:
780 		err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
781 				   HCI_INIT_TIMEOUT, NULL);
782 
783 		/* Ensure that the connectable and discoverable states
784 		 * get correctly modified as this was a non-mgmt change.
785 		 */
786 		if (!err)
787 			hci_update_passive_scan_state(hdev, dr.dev_opt);
788 		break;
789 
790 	case HCISETLINKPOL:
791 		err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
792 				   HCI_INIT_TIMEOUT, NULL);
793 		break;
794 
795 	case HCISETLINKMODE:
796 		hdev->link_mode = ((__u16) dr.dev_opt) &
797 					(HCI_LM_MASTER | HCI_LM_ACCEPT);
798 		break;
799 
800 	case HCISETPTYPE:
801 		if (hdev->pkt_type == (__u16) dr.dev_opt)
802 			break;
803 
804 		hdev->pkt_type = (__u16) dr.dev_opt;
805 		mgmt_phy_configuration_changed(hdev, NULL);
806 		break;
807 
808 	case HCISETACLMTU:
809 		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
810 		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
811 		break;
812 
813 	case HCISETSCOMTU:
814 		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
815 		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
816 		break;
817 
818 	default:
819 		err = -EINVAL;
820 		break;
821 	}
822 
823 done:
824 	hci_dev_put(hdev);
825 	return err;
826 }
827 
hci_get_dev_list(void __user * arg)828 int hci_get_dev_list(void __user *arg)
829 {
830 	struct hci_dev *hdev;
831 	struct hci_dev_list_req *dl;
832 	struct hci_dev_req *dr;
833 	int n = 0, size, err;
834 	__u16 dev_num;
835 
836 	if (get_user(dev_num, (__u16 __user *) arg))
837 		return -EFAULT;
838 
839 	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
840 		return -EINVAL;
841 
842 	size = sizeof(*dl) + dev_num * sizeof(*dr);
843 
844 	dl = kzalloc(size, GFP_KERNEL);
845 	if (!dl)
846 		return -ENOMEM;
847 
848 	dr = dl->dev_req;
849 
850 	read_lock(&hci_dev_list_lock);
851 	list_for_each_entry(hdev, &hci_dev_list, list) {
852 		unsigned long flags = hdev->flags;
853 
854 		/* When the auto-off is configured it means the transport
855 		 * is running, but in that case still indicate that the
856 		 * device is actually down.
857 		 */
858 		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
859 			flags &= ~BIT(HCI_UP);
860 
861 		(dr + n)->dev_id  = hdev->id;
862 		(dr + n)->dev_opt = flags;
863 
864 		if (++n >= dev_num)
865 			break;
866 	}
867 	read_unlock(&hci_dev_list_lock);
868 
869 	dl->dev_num = n;
870 	size = sizeof(*dl) + n * sizeof(*dr);
871 
872 	err = copy_to_user(arg, dl, size);
873 	kfree(dl);
874 
875 	return err ? -EFAULT : 0;
876 }
877 
hci_get_dev_info(void __user * arg)878 int hci_get_dev_info(void __user *arg)
879 {
880 	struct hci_dev *hdev;
881 	struct hci_dev_info di;
882 	unsigned long flags;
883 	int err = 0;
884 
885 	if (copy_from_user(&di, arg, sizeof(di)))
886 		return -EFAULT;
887 
888 	hdev = hci_dev_get(di.dev_id);
889 	if (!hdev)
890 		return -ENODEV;
891 
892 	/* When the auto-off is configured it means the transport
893 	 * is running, but in that case still indicate that the
894 	 * device is actually down.
895 	 */
896 	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
897 		flags = hdev->flags & ~BIT(HCI_UP);
898 	else
899 		flags = hdev->flags;
900 
901 	strscpy(di.name, hdev->name, sizeof(di.name));
902 	di.bdaddr   = hdev->bdaddr;
903 	di.type     = (hdev->bus & 0x0f);
904 	di.flags    = flags;
905 	di.pkt_type = hdev->pkt_type;
906 	if (lmp_bredr_capable(hdev)) {
907 		di.acl_mtu  = hdev->acl_mtu;
908 		di.acl_pkts = hdev->acl_pkts;
909 		di.sco_mtu  = hdev->sco_mtu;
910 		di.sco_pkts = hdev->sco_pkts;
911 	} else {
912 		di.acl_mtu  = hdev->le_mtu;
913 		di.acl_pkts = hdev->le_pkts;
914 		di.sco_mtu  = 0;
915 		di.sco_pkts = 0;
916 	}
917 	di.link_policy = hdev->link_policy;
918 	di.link_mode   = hdev->link_mode;
919 
920 	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
921 	memcpy(&di.features, &hdev->features, sizeof(di.features));
922 
923 	if (copy_to_user(arg, &di, sizeof(di)))
924 		err = -EFAULT;
925 
926 	hci_dev_put(hdev);
927 
928 	return err;
929 }
930 
931 /* ---- Interface to HCI drivers ---- */
932 
hci_rfkill_set_block(void * data,bool blocked)933 static int hci_rfkill_set_block(void *data, bool blocked)
934 {
935 	struct hci_dev *hdev = data;
936 
937 	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
938 
939 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
940 		return -EBUSY;
941 
942 	if (blocked) {
943 		hci_dev_set_flag(hdev, HCI_RFKILLED);
944 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
945 		    !hci_dev_test_flag(hdev, HCI_CONFIG))
946 			hci_dev_do_close(hdev);
947 	} else {
948 		hci_dev_clear_flag(hdev, HCI_RFKILLED);
949 	}
950 
951 	return 0;
952 }
953 
954 static const struct rfkill_ops hci_rfkill_ops = {
955 	.set_block = hci_rfkill_set_block,
956 };
957 
hci_power_on(struct work_struct * work)958 static void hci_power_on(struct work_struct *work)
959 {
960 	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
961 	int err;
962 
963 	BT_DBG("%s", hdev->name);
964 
965 	if (test_bit(HCI_UP, &hdev->flags) &&
966 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
967 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
968 		cancel_delayed_work(&hdev->power_off);
969 		err = hci_powered_update_sync(hdev);
970 		mgmt_power_on(hdev, err);
971 		return;
972 	}
973 
974 	err = hci_dev_do_open(hdev);
975 	if (err < 0) {
976 		hci_dev_lock(hdev);
977 		mgmt_set_powered_failed(hdev, err);
978 		hci_dev_unlock(hdev);
979 		return;
980 	}
981 
982 	/* During the HCI setup phase, a few error conditions are
983 	 * ignored and they need to be checked now. If they are still
984 	 * valid, it is important to turn the device back off.
985 	 */
986 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
987 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
988 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
989 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
990 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
991 		hci_dev_do_close(hdev);
992 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
993 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
994 				   HCI_AUTO_OFF_TIMEOUT);
995 	}
996 
997 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
998 		/* For unconfigured devices, set the HCI_RAW flag
999 		 * so that userspace can easily identify them.
1000 		 */
1001 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1002 			set_bit(HCI_RAW, &hdev->flags);
1003 
1004 		/* For fully configured devices, this will send
1005 		 * the Index Added event. For unconfigured devices,
1006 		 * it will send Unconfigued Index Added event.
1007 		 *
1008 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1009 		 * and no event will be send.
1010 		 */
1011 		mgmt_index_added(hdev);
1012 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1013 		/* When the controller is now configured, then it
1014 		 * is important to clear the HCI_RAW flag.
1015 		 */
1016 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1017 			clear_bit(HCI_RAW, &hdev->flags);
1018 
1019 		/* Powering on the controller with HCI_CONFIG set only
1020 		 * happens with the transition from unconfigured to
1021 		 * configured. This will send the Index Added event.
1022 		 */
1023 		mgmt_index_added(hdev);
1024 	}
1025 }
1026 
hci_power_off(struct work_struct * work)1027 static void hci_power_off(struct work_struct *work)
1028 {
1029 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1030 					    power_off.work);
1031 
1032 	BT_DBG("%s", hdev->name);
1033 
1034 	hci_dev_do_close(hdev);
1035 }
1036 
hci_error_reset(struct work_struct * work)1037 static void hci_error_reset(struct work_struct *work)
1038 {
1039 	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1040 
1041 	hci_dev_hold(hdev);
1042 	BT_DBG("%s", hdev->name);
1043 
1044 	if (hdev->hw_error)
1045 		hdev->hw_error(hdev, hdev->hw_error_code);
1046 	else
1047 		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1048 
1049 	if (!hci_dev_do_close(hdev))
1050 		hci_dev_do_open(hdev);
1051 
1052 	hci_dev_put(hdev);
1053 }
1054 
hci_uuids_clear(struct hci_dev * hdev)1055 void hci_uuids_clear(struct hci_dev *hdev)
1056 {
1057 	struct bt_uuid *uuid, *tmp;
1058 
1059 	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1060 		list_del(&uuid->list);
1061 		kfree(uuid);
1062 	}
1063 }
1064 
hci_link_keys_clear(struct hci_dev * hdev)1065 void hci_link_keys_clear(struct hci_dev *hdev)
1066 {
1067 	struct link_key *key, *tmp;
1068 
1069 	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1070 		list_del_rcu(&key->list);
1071 		kfree_rcu(key, rcu);
1072 	}
1073 }
1074 
hci_smp_ltks_clear(struct hci_dev * hdev)1075 void hci_smp_ltks_clear(struct hci_dev *hdev)
1076 {
1077 	struct smp_ltk *k, *tmp;
1078 
1079 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1080 		list_del_rcu(&k->list);
1081 		kfree_rcu(k, rcu);
1082 	}
1083 }
1084 
hci_smp_irks_clear(struct hci_dev * hdev)1085 void hci_smp_irks_clear(struct hci_dev *hdev)
1086 {
1087 	struct smp_irk *k, *tmp;
1088 
1089 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1090 		list_del_rcu(&k->list);
1091 		kfree_rcu(k, rcu);
1092 	}
1093 }
1094 
hci_blocked_keys_clear(struct hci_dev * hdev)1095 void hci_blocked_keys_clear(struct hci_dev *hdev)
1096 {
1097 	struct blocked_key *b, *tmp;
1098 
1099 	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1100 		list_del_rcu(&b->list);
1101 		kfree_rcu(b, rcu);
1102 	}
1103 }
1104 
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1105 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1106 {
1107 	bool blocked = false;
1108 	struct blocked_key *b;
1109 
1110 	rcu_read_lock();
1111 	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1112 		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1113 			blocked = true;
1114 			break;
1115 		}
1116 	}
1117 
1118 	rcu_read_unlock();
1119 	return blocked;
1120 }
1121 
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1122 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1123 {
1124 	struct link_key *k;
1125 
1126 	rcu_read_lock();
1127 	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1128 		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1129 			rcu_read_unlock();
1130 
1131 			if (hci_is_blocked_key(hdev,
1132 					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1133 					       k->val)) {
1134 				bt_dev_warn_ratelimited(hdev,
1135 							"Link key blocked for %pMR",
1136 							&k->bdaddr);
1137 				return NULL;
1138 			}
1139 
1140 			return k;
1141 		}
1142 	}
1143 	rcu_read_unlock();
1144 
1145 	return NULL;
1146 }
1147 
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1148 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1149 			       u8 key_type, u8 old_key_type)
1150 {
1151 	/* Legacy key */
1152 	if (key_type < 0x03)
1153 		return true;
1154 
1155 	/* Debug keys are insecure so don't store them persistently */
1156 	if (key_type == HCI_LK_DEBUG_COMBINATION)
1157 		return false;
1158 
1159 	/* Changed combination key and there's no previous one */
1160 	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1161 		return false;
1162 
1163 	/* Security mode 3 case */
1164 	if (!conn)
1165 		return true;
1166 
1167 	/* BR/EDR key derived using SC from an LE link */
1168 	if (conn->type == LE_LINK)
1169 		return true;
1170 
1171 	/* Neither local nor remote side had no-bonding as requirement */
1172 	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1173 		return true;
1174 
1175 	/* Local side had dedicated bonding as requirement */
1176 	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1177 		return true;
1178 
1179 	/* Remote side had dedicated bonding as requirement */
1180 	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1181 		return true;
1182 
1183 	/* If none of the above criteria match, then don't store the key
1184 	 * persistently */
1185 	return false;
1186 }
1187 
ltk_role(u8 type)1188 static u8 ltk_role(u8 type)
1189 {
1190 	if (type == SMP_LTK)
1191 		return HCI_ROLE_MASTER;
1192 
1193 	return HCI_ROLE_SLAVE;
1194 }
1195 
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1196 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1197 			     u8 addr_type, u8 role)
1198 {
1199 	struct smp_ltk *k;
1200 
1201 	rcu_read_lock();
1202 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1203 		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1204 			continue;
1205 
1206 		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1207 			rcu_read_unlock();
1208 
1209 			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1210 					       k->val)) {
1211 				bt_dev_warn_ratelimited(hdev,
1212 							"LTK blocked for %pMR",
1213 							&k->bdaddr);
1214 				return NULL;
1215 			}
1216 
1217 			return k;
1218 		}
1219 	}
1220 	rcu_read_unlock();
1221 
1222 	return NULL;
1223 }
1224 
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1225 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1226 {
1227 	struct smp_irk *irk_to_return = NULL;
1228 	struct smp_irk *irk;
1229 
1230 	rcu_read_lock();
1231 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1232 		if (!bacmp(&irk->rpa, rpa)) {
1233 			irk_to_return = irk;
1234 			goto done;
1235 		}
1236 	}
1237 
1238 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1239 		if (smp_irk_matches(hdev, irk->val, rpa)) {
1240 			bacpy(&irk->rpa, rpa);
1241 			irk_to_return = irk;
1242 			goto done;
1243 		}
1244 	}
1245 
1246 done:
1247 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1248 						irk_to_return->val)) {
1249 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1250 					&irk_to_return->bdaddr);
1251 		irk_to_return = NULL;
1252 	}
1253 
1254 	rcu_read_unlock();
1255 
1256 	return irk_to_return;
1257 }
1258 
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1259 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1260 				     u8 addr_type)
1261 {
1262 	struct smp_irk *irk_to_return = NULL;
1263 	struct smp_irk *irk;
1264 
1265 	/* Identity Address must be public or static random */
1266 	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1267 		return NULL;
1268 
1269 	rcu_read_lock();
1270 	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1271 		if (addr_type == irk->addr_type &&
1272 		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1273 			irk_to_return = irk;
1274 			goto done;
1275 		}
1276 	}
1277 
1278 done:
1279 
1280 	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1281 						irk_to_return->val)) {
1282 		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1283 					&irk_to_return->bdaddr);
1284 		irk_to_return = NULL;
1285 	}
1286 
1287 	rcu_read_unlock();
1288 
1289 	return irk_to_return;
1290 }
1291 
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1292 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1293 				  bdaddr_t *bdaddr, u8 *val, u8 type,
1294 				  u8 pin_len, bool *persistent)
1295 {
1296 	struct link_key *key, *old_key;
1297 	u8 old_key_type;
1298 
1299 	old_key = hci_find_link_key(hdev, bdaddr);
1300 	if (old_key) {
1301 		old_key_type = old_key->type;
1302 		key = old_key;
1303 	} else {
1304 		old_key_type = conn ? conn->key_type : 0xff;
1305 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1306 		if (!key)
1307 			return NULL;
1308 		list_add_rcu(&key->list, &hdev->link_keys);
1309 	}
1310 
1311 	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1312 
1313 	/* Some buggy controller combinations generate a changed
1314 	 * combination key for legacy pairing even when there's no
1315 	 * previous key */
1316 	if (type == HCI_LK_CHANGED_COMBINATION &&
1317 	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1318 		type = HCI_LK_COMBINATION;
1319 		if (conn)
1320 			conn->key_type = type;
1321 	}
1322 
1323 	bacpy(&key->bdaddr, bdaddr);
1324 	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1325 	key->pin_len = pin_len;
1326 
1327 	if (type == HCI_LK_CHANGED_COMBINATION)
1328 		key->type = old_key_type;
1329 	else
1330 		key->type = type;
1331 
1332 	if (persistent)
1333 		*persistent = hci_persistent_key(hdev, conn, type,
1334 						 old_key_type);
1335 
1336 	return key;
1337 }
1338 
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1339 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1340 			    u8 addr_type, u8 type, u8 authenticated,
1341 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1342 {
1343 	struct smp_ltk *key, *old_key;
1344 	u8 role = ltk_role(type);
1345 
1346 	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1347 	if (old_key)
1348 		key = old_key;
1349 	else {
1350 		key = kzalloc(sizeof(*key), GFP_KERNEL);
1351 		if (!key)
1352 			return NULL;
1353 		list_add_rcu(&key->list, &hdev->long_term_keys);
1354 	}
1355 
1356 	bacpy(&key->bdaddr, bdaddr);
1357 	key->bdaddr_type = addr_type;
1358 	memcpy(key->val, tk, sizeof(key->val));
1359 	key->authenticated = authenticated;
1360 	key->ediv = ediv;
1361 	key->rand = rand;
1362 	key->enc_size = enc_size;
1363 	key->type = type;
1364 
1365 	return key;
1366 }
1367 
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1368 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1369 			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1370 {
1371 	struct smp_irk *irk;
1372 
1373 	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1374 	if (!irk) {
1375 		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1376 		if (!irk)
1377 			return NULL;
1378 
1379 		bacpy(&irk->bdaddr, bdaddr);
1380 		irk->addr_type = addr_type;
1381 
1382 		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1383 	}
1384 
1385 	memcpy(irk->val, val, 16);
1386 	bacpy(&irk->rpa, rpa);
1387 
1388 	return irk;
1389 }
1390 
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1391 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1392 {
1393 	struct link_key *key;
1394 
1395 	key = hci_find_link_key(hdev, bdaddr);
1396 	if (!key)
1397 		return -ENOENT;
1398 
1399 	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1400 
1401 	list_del_rcu(&key->list);
1402 	kfree_rcu(key, rcu);
1403 
1404 	return 0;
1405 }
1406 
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1407 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1408 {
1409 	struct smp_ltk *k, *tmp;
1410 	int removed = 0;
1411 
1412 	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1413 		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1414 			continue;
1415 
1416 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1417 
1418 		list_del_rcu(&k->list);
1419 		kfree_rcu(k, rcu);
1420 		removed++;
1421 	}
1422 
1423 	return removed ? 0 : -ENOENT;
1424 }
1425 
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1426 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1427 {
1428 	struct smp_irk *k, *tmp;
1429 
1430 	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1431 		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1432 			continue;
1433 
1434 		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1435 
1436 		list_del_rcu(&k->list);
1437 		kfree_rcu(k, rcu);
1438 	}
1439 }
1440 
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1441 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1442 {
1443 	struct smp_ltk *k;
1444 	struct smp_irk *irk;
1445 	u8 addr_type;
1446 
1447 	if (type == BDADDR_BREDR) {
1448 		if (hci_find_link_key(hdev, bdaddr))
1449 			return true;
1450 		return false;
1451 	}
1452 
1453 	/* Convert to HCI addr type which struct smp_ltk uses */
1454 	if (type == BDADDR_LE_PUBLIC)
1455 		addr_type = ADDR_LE_DEV_PUBLIC;
1456 	else
1457 		addr_type = ADDR_LE_DEV_RANDOM;
1458 
1459 	irk = hci_get_irk(hdev, bdaddr, addr_type);
1460 	if (irk) {
1461 		bdaddr = &irk->bdaddr;
1462 		addr_type = irk->addr_type;
1463 	}
1464 
1465 	rcu_read_lock();
1466 	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1467 		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1468 			rcu_read_unlock();
1469 			return true;
1470 		}
1471 	}
1472 	rcu_read_unlock();
1473 
1474 	return false;
1475 }
1476 
1477 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1478 static void hci_cmd_timeout(struct work_struct *work)
1479 {
1480 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1481 					    cmd_timer.work);
1482 
1483 	if (hdev->req_skb) {
1484 		u16 opcode = hci_skb_opcode(hdev->req_skb);
1485 
1486 		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1487 
1488 		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1489 	} else {
1490 		bt_dev_err(hdev, "command tx timeout");
1491 	}
1492 
1493 	if (hdev->cmd_timeout)
1494 		hdev->cmd_timeout(hdev);
1495 
1496 	atomic_set(&hdev->cmd_cnt, 1);
1497 	queue_work(hdev->workqueue, &hdev->cmd_work);
1498 }
1499 
1500 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1501 static void hci_ncmd_timeout(struct work_struct *work)
1502 {
1503 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1504 					    ncmd_timer.work);
1505 
1506 	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1507 
1508 	/* During HCI_INIT phase no events can be injected if the ncmd timer
1509 	 * triggers since the procedure has its own timeout handling.
1510 	 */
1511 	if (test_bit(HCI_INIT, &hdev->flags))
1512 		return;
1513 
1514 	/* This is an irrecoverable state, inject hardware error event */
1515 	hci_reset_dev(hdev);
1516 }
1517 
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1518 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1519 					  bdaddr_t *bdaddr, u8 bdaddr_type)
1520 {
1521 	struct oob_data *data;
1522 
1523 	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1524 		if (bacmp(bdaddr, &data->bdaddr) != 0)
1525 			continue;
1526 		if (data->bdaddr_type != bdaddr_type)
1527 			continue;
1528 		return data;
1529 	}
1530 
1531 	return NULL;
1532 }
1533 
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1534 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1535 			       u8 bdaddr_type)
1536 {
1537 	struct oob_data *data;
1538 
1539 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1540 	if (!data)
1541 		return -ENOENT;
1542 
1543 	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1544 
1545 	list_del(&data->list);
1546 	kfree(data);
1547 
1548 	return 0;
1549 }
1550 
hci_remote_oob_data_clear(struct hci_dev * hdev)1551 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1552 {
1553 	struct oob_data *data, *n;
1554 
1555 	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1556 		list_del(&data->list);
1557 		kfree(data);
1558 	}
1559 }
1560 
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1561 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1562 			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1563 			    u8 *hash256, u8 *rand256)
1564 {
1565 	struct oob_data *data;
1566 
1567 	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1568 	if (!data) {
1569 		data = kmalloc(sizeof(*data), GFP_KERNEL);
1570 		if (!data)
1571 			return -ENOMEM;
1572 
1573 		bacpy(&data->bdaddr, bdaddr);
1574 		data->bdaddr_type = bdaddr_type;
1575 		list_add(&data->list, &hdev->remote_oob_data);
1576 	}
1577 
1578 	if (hash192 && rand192) {
1579 		memcpy(data->hash192, hash192, sizeof(data->hash192));
1580 		memcpy(data->rand192, rand192, sizeof(data->rand192));
1581 		if (hash256 && rand256)
1582 			data->present = 0x03;
1583 	} else {
1584 		memset(data->hash192, 0, sizeof(data->hash192));
1585 		memset(data->rand192, 0, sizeof(data->rand192));
1586 		if (hash256 && rand256)
1587 			data->present = 0x02;
1588 		else
1589 			data->present = 0x00;
1590 	}
1591 
1592 	if (hash256 && rand256) {
1593 		memcpy(data->hash256, hash256, sizeof(data->hash256));
1594 		memcpy(data->rand256, rand256, sizeof(data->rand256));
1595 	} else {
1596 		memset(data->hash256, 0, sizeof(data->hash256));
1597 		memset(data->rand256, 0, sizeof(data->rand256));
1598 		if (hash192 && rand192)
1599 			data->present = 0x01;
1600 	}
1601 
1602 	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1603 
1604 	return 0;
1605 }
1606 
1607 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1608 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1609 {
1610 	struct adv_info *adv_instance;
1611 
1612 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1613 		if (adv_instance->instance == instance)
1614 			return adv_instance;
1615 	}
1616 
1617 	return NULL;
1618 }
1619 
1620 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1621 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1622 {
1623 	struct adv_info *cur_instance;
1624 
1625 	cur_instance = hci_find_adv_instance(hdev, instance);
1626 	if (!cur_instance)
1627 		return NULL;
1628 
1629 	if (cur_instance == list_last_entry(&hdev->adv_instances,
1630 					    struct adv_info, list))
1631 		return list_first_entry(&hdev->adv_instances,
1632 						 struct adv_info, list);
1633 	else
1634 		return list_next_entry(cur_instance, list);
1635 }
1636 
1637 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1638 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1639 {
1640 	struct adv_info *adv_instance;
1641 
1642 	adv_instance = hci_find_adv_instance(hdev, instance);
1643 	if (!adv_instance)
1644 		return -ENOENT;
1645 
1646 	BT_DBG("%s removing %dMR", hdev->name, instance);
1647 
1648 	if (hdev->cur_adv_instance == instance) {
1649 		if (hdev->adv_instance_timeout) {
1650 			cancel_delayed_work(&hdev->adv_instance_expire);
1651 			hdev->adv_instance_timeout = 0;
1652 		}
1653 		hdev->cur_adv_instance = 0x00;
1654 	}
1655 
1656 	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1657 
1658 	list_del(&adv_instance->list);
1659 	kfree(adv_instance);
1660 
1661 	hdev->adv_instance_cnt--;
1662 
1663 	return 0;
1664 }
1665 
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1666 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1667 {
1668 	struct adv_info *adv_instance, *n;
1669 
1670 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1671 		adv_instance->rpa_expired = rpa_expired;
1672 }
1673 
1674 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1675 void hci_adv_instances_clear(struct hci_dev *hdev)
1676 {
1677 	struct adv_info *adv_instance, *n;
1678 
1679 	if (hdev->adv_instance_timeout) {
1680 		cancel_delayed_work(&hdev->adv_instance_expire);
1681 		hdev->adv_instance_timeout = 0;
1682 	}
1683 
1684 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1685 		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1686 		list_del(&adv_instance->list);
1687 		kfree(adv_instance);
1688 	}
1689 
1690 	hdev->adv_instance_cnt = 0;
1691 	hdev->cur_adv_instance = 0x00;
1692 }
1693 
adv_instance_rpa_expired(struct work_struct * work)1694 static void adv_instance_rpa_expired(struct work_struct *work)
1695 {
1696 	struct adv_info *adv_instance = container_of(work, struct adv_info,
1697 						     rpa_expired_cb.work);
1698 
1699 	BT_DBG("");
1700 
1701 	adv_instance->rpa_expired = true;
1702 }
1703 
1704 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1705 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1706 				      u32 flags, u16 adv_data_len, u8 *adv_data,
1707 				      u16 scan_rsp_len, u8 *scan_rsp_data,
1708 				      u16 timeout, u16 duration, s8 tx_power,
1709 				      u32 min_interval, u32 max_interval,
1710 				      u8 mesh_handle)
1711 {
1712 	struct adv_info *adv;
1713 
1714 	adv = hci_find_adv_instance(hdev, instance);
1715 	if (adv) {
1716 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1717 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1718 		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1719 	} else {
1720 		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1721 		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1722 			return ERR_PTR(-EOVERFLOW);
1723 
1724 		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1725 		if (!adv)
1726 			return ERR_PTR(-ENOMEM);
1727 
1728 		adv->pending = true;
1729 		adv->instance = instance;
1730 		list_add(&adv->list, &hdev->adv_instances);
1731 		hdev->adv_instance_cnt++;
1732 	}
1733 
1734 	adv->flags = flags;
1735 	adv->min_interval = min_interval;
1736 	adv->max_interval = max_interval;
1737 	adv->tx_power = tx_power;
1738 	/* Defining a mesh_handle changes the timing units to ms,
1739 	 * rather than seconds, and ties the instance to the requested
1740 	 * mesh_tx queue.
1741 	 */
1742 	adv->mesh = mesh_handle;
1743 
1744 	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1745 				  scan_rsp_len, scan_rsp_data);
1746 
1747 	adv->timeout = timeout;
1748 	adv->remaining_time = timeout;
1749 
1750 	if (duration == 0)
1751 		adv->duration = hdev->def_multi_adv_rotation_duration;
1752 	else
1753 		adv->duration = duration;
1754 
1755 	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1756 
1757 	BT_DBG("%s for %dMR", hdev->name, instance);
1758 
1759 	return adv;
1760 }
1761 
1762 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1763 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1764 				      u32 flags, u8 data_len, u8 *data,
1765 				      u32 min_interval, u32 max_interval)
1766 {
1767 	struct adv_info *adv;
1768 
1769 	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1770 				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1771 				   min_interval, max_interval, 0);
1772 	if (IS_ERR(adv))
1773 		return adv;
1774 
1775 	adv->periodic = true;
1776 	adv->per_adv_data_len = data_len;
1777 
1778 	if (data)
1779 		memcpy(adv->per_adv_data, data, data_len);
1780 
1781 	return adv;
1782 }
1783 
1784 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1785 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1786 			      u16 adv_data_len, u8 *adv_data,
1787 			      u16 scan_rsp_len, u8 *scan_rsp_data)
1788 {
1789 	struct adv_info *adv;
1790 
1791 	adv = hci_find_adv_instance(hdev, instance);
1792 
1793 	/* If advertisement doesn't exist, we can't modify its data */
1794 	if (!adv)
1795 		return -ENOENT;
1796 
1797 	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1798 		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1799 		memcpy(adv->adv_data, adv_data, adv_data_len);
1800 		adv->adv_data_len = adv_data_len;
1801 		adv->adv_data_changed = true;
1802 	}
1803 
1804 	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1805 		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1806 		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1807 		adv->scan_rsp_len = scan_rsp_len;
1808 		adv->scan_rsp_changed = true;
1809 	}
1810 
1811 	/* Mark as changed if there are flags which would affect it */
1812 	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1813 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1814 		adv->scan_rsp_changed = true;
1815 
1816 	return 0;
1817 }
1818 
1819 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1820 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1821 {
1822 	u32 flags;
1823 	struct adv_info *adv;
1824 
1825 	if (instance == 0x00) {
1826 		/* Instance 0 always manages the "Tx Power" and "Flags"
1827 		 * fields
1828 		 */
1829 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1830 
1831 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1832 		 * corresponds to the "connectable" instance flag.
1833 		 */
1834 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1835 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1836 
1837 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1838 			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1839 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1840 			flags |= MGMT_ADV_FLAG_DISCOV;
1841 
1842 		return flags;
1843 	}
1844 
1845 	adv = hci_find_adv_instance(hdev, instance);
1846 
1847 	/* Return 0 when we got an invalid instance identifier. */
1848 	if (!adv)
1849 		return 0;
1850 
1851 	return adv->flags;
1852 }
1853 
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1854 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1855 {
1856 	struct adv_info *adv;
1857 
1858 	/* Instance 0x00 always set local name */
1859 	if (instance == 0x00)
1860 		return true;
1861 
1862 	adv = hci_find_adv_instance(hdev, instance);
1863 	if (!adv)
1864 		return false;
1865 
1866 	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1867 	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1868 		return true;
1869 
1870 	return adv->scan_rsp_len ? true : false;
1871 }
1872 
1873 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1874 void hci_adv_monitors_clear(struct hci_dev *hdev)
1875 {
1876 	struct adv_monitor *monitor;
1877 	int handle;
1878 
1879 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1880 		hci_free_adv_monitor(hdev, monitor);
1881 
1882 	idr_destroy(&hdev->adv_monitors_idr);
1883 }
1884 
1885 /* Frees the monitor structure and do some bookkeepings.
1886  * This function requires the caller holds hdev->lock.
1887  */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1888 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1889 {
1890 	struct adv_pattern *pattern;
1891 	struct adv_pattern *tmp;
1892 
1893 	if (!monitor)
1894 		return;
1895 
1896 	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1897 		list_del(&pattern->list);
1898 		kfree(pattern);
1899 	}
1900 
1901 	if (monitor->handle)
1902 		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1903 
1904 	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1905 		hdev->adv_monitors_cnt--;
1906 		mgmt_adv_monitor_removed(hdev, monitor->handle);
1907 	}
1908 
1909 	kfree(monitor);
1910 }
1911 
1912 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1913  * also attempts to forward the request to the controller.
1914  * This function requires the caller holds hci_req_sync_lock.
1915  */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1916 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1917 {
1918 	int min, max, handle;
1919 	int status = 0;
1920 
1921 	if (!monitor)
1922 		return -EINVAL;
1923 
1924 	hci_dev_lock(hdev);
1925 
1926 	min = HCI_MIN_ADV_MONITOR_HANDLE;
1927 	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1928 	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1929 			   GFP_KERNEL);
1930 
1931 	hci_dev_unlock(hdev);
1932 
1933 	if (handle < 0)
1934 		return handle;
1935 
1936 	monitor->handle = handle;
1937 
1938 	if (!hdev_is_powered(hdev))
1939 		return status;
1940 
1941 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1942 	case HCI_ADV_MONITOR_EXT_NONE:
1943 		bt_dev_dbg(hdev, "add monitor %d status %d",
1944 			   monitor->handle, status);
1945 		/* Message was not forwarded to controller - not an error */
1946 		break;
1947 
1948 	case HCI_ADV_MONITOR_EXT_MSFT:
1949 		status = msft_add_monitor_pattern(hdev, monitor);
1950 		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1951 			   handle, status);
1952 		break;
1953 	}
1954 
1955 	return status;
1956 }
1957 
1958 /* Attempts to tell the controller and free the monitor. If somehow the
1959  * controller doesn't have a corresponding handle, remove anyway.
1960  * This function requires the caller holds hci_req_sync_lock.
1961  */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1962 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1963 				  struct adv_monitor *monitor)
1964 {
1965 	int status = 0;
1966 	int handle;
1967 
1968 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1969 	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1970 		bt_dev_dbg(hdev, "remove monitor %d status %d",
1971 			   monitor->handle, status);
1972 		goto free_monitor;
1973 
1974 	case HCI_ADV_MONITOR_EXT_MSFT:
1975 		handle = monitor->handle;
1976 		status = msft_remove_monitor(hdev, monitor);
1977 		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1978 			   handle, status);
1979 		break;
1980 	}
1981 
1982 	/* In case no matching handle registered, just free the monitor */
1983 	if (status == -ENOENT)
1984 		goto free_monitor;
1985 
1986 	return status;
1987 
1988 free_monitor:
1989 	if (status == -ENOENT)
1990 		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1991 			    monitor->handle);
1992 	hci_free_adv_monitor(hdev, monitor);
1993 
1994 	return status;
1995 }
1996 
1997 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1998 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1999 {
2000 	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2001 
2002 	if (!monitor)
2003 		return -EINVAL;
2004 
2005 	return hci_remove_adv_monitor(hdev, monitor);
2006 }
2007 
2008 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)2009 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2010 {
2011 	struct adv_monitor *monitor;
2012 	int idr_next_id = 0;
2013 	int status = 0;
2014 
2015 	while (1) {
2016 		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2017 		if (!monitor)
2018 			break;
2019 
2020 		status = hci_remove_adv_monitor(hdev, monitor);
2021 		if (status)
2022 			return status;
2023 
2024 		idr_next_id++;
2025 	}
2026 
2027 	return status;
2028 }
2029 
2030 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2031 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2032 {
2033 	return !idr_is_empty(&hdev->adv_monitors_idr);
2034 }
2035 
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2036 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2037 {
2038 	if (msft_monitor_supported(hdev))
2039 		return HCI_ADV_MONITOR_EXT_MSFT;
2040 
2041 	return HCI_ADV_MONITOR_EXT_NONE;
2042 }
2043 
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2044 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2045 					 bdaddr_t *bdaddr, u8 type)
2046 {
2047 	struct bdaddr_list *b;
2048 
2049 	list_for_each_entry(b, bdaddr_list, list) {
2050 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2051 			return b;
2052 	}
2053 
2054 	return NULL;
2055 }
2056 
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2057 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2058 				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2059 				u8 type)
2060 {
2061 	struct bdaddr_list_with_irk *b;
2062 
2063 	list_for_each_entry(b, bdaddr_list, list) {
2064 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2065 			return b;
2066 	}
2067 
2068 	return NULL;
2069 }
2070 
2071 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2072 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2073 				  bdaddr_t *bdaddr, u8 type)
2074 {
2075 	struct bdaddr_list_with_flags *b;
2076 
2077 	list_for_each_entry(b, bdaddr_list, list) {
2078 		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2079 			return b;
2080 	}
2081 
2082 	return NULL;
2083 }
2084 
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2085 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2086 {
2087 	struct bdaddr_list *b, *n;
2088 
2089 	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2090 		list_del(&b->list);
2091 		kfree(b);
2092 	}
2093 }
2094 
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2095 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2096 {
2097 	struct bdaddr_list *entry;
2098 
2099 	if (!bacmp(bdaddr, BDADDR_ANY))
2100 		return -EBADF;
2101 
2102 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2103 		return -EEXIST;
2104 
2105 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2106 	if (!entry)
2107 		return -ENOMEM;
2108 
2109 	bacpy(&entry->bdaddr, bdaddr);
2110 	entry->bdaddr_type = type;
2111 
2112 	list_add(&entry->list, list);
2113 
2114 	return 0;
2115 }
2116 
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2117 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2118 					u8 type, u8 *peer_irk, u8 *local_irk)
2119 {
2120 	struct bdaddr_list_with_irk *entry;
2121 
2122 	if (!bacmp(bdaddr, BDADDR_ANY))
2123 		return -EBADF;
2124 
2125 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2126 		return -EEXIST;
2127 
2128 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2129 	if (!entry)
2130 		return -ENOMEM;
2131 
2132 	bacpy(&entry->bdaddr, bdaddr);
2133 	entry->bdaddr_type = type;
2134 
2135 	if (peer_irk)
2136 		memcpy(entry->peer_irk, peer_irk, 16);
2137 
2138 	if (local_irk)
2139 		memcpy(entry->local_irk, local_irk, 16);
2140 
2141 	list_add(&entry->list, list);
2142 
2143 	return 0;
2144 }
2145 
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2146 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2147 				   u8 type, u32 flags)
2148 {
2149 	struct bdaddr_list_with_flags *entry;
2150 
2151 	if (!bacmp(bdaddr, BDADDR_ANY))
2152 		return -EBADF;
2153 
2154 	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2155 		return -EEXIST;
2156 
2157 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2158 	if (!entry)
2159 		return -ENOMEM;
2160 
2161 	bacpy(&entry->bdaddr, bdaddr);
2162 	entry->bdaddr_type = type;
2163 	entry->flags = flags;
2164 
2165 	list_add(&entry->list, list);
2166 
2167 	return 0;
2168 }
2169 
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2170 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2171 {
2172 	struct bdaddr_list *entry;
2173 
2174 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2175 		hci_bdaddr_list_clear(list);
2176 		return 0;
2177 	}
2178 
2179 	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2180 	if (!entry)
2181 		return -ENOENT;
2182 
2183 	list_del(&entry->list);
2184 	kfree(entry);
2185 
2186 	return 0;
2187 }
2188 
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2189 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2190 							u8 type)
2191 {
2192 	struct bdaddr_list_with_irk *entry;
2193 
2194 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2195 		hci_bdaddr_list_clear(list);
2196 		return 0;
2197 	}
2198 
2199 	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2200 	if (!entry)
2201 		return -ENOENT;
2202 
2203 	list_del(&entry->list);
2204 	kfree(entry);
2205 
2206 	return 0;
2207 }
2208 
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2209 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2210 				   u8 type)
2211 {
2212 	struct bdaddr_list_with_flags *entry;
2213 
2214 	if (!bacmp(bdaddr, BDADDR_ANY)) {
2215 		hci_bdaddr_list_clear(list);
2216 		return 0;
2217 	}
2218 
2219 	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2220 	if (!entry)
2221 		return -ENOENT;
2222 
2223 	list_del(&entry->list);
2224 	kfree(entry);
2225 
2226 	return 0;
2227 }
2228 
2229 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2230 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2231 					       bdaddr_t *addr, u8 addr_type)
2232 {
2233 	struct hci_conn_params *params;
2234 
2235 	list_for_each_entry(params, &hdev->le_conn_params, list) {
2236 		if (bacmp(&params->addr, addr) == 0 &&
2237 		    params->addr_type == addr_type) {
2238 			return params;
2239 		}
2240 	}
2241 
2242 	return NULL;
2243 }
2244 
2245 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2246 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2247 						  bdaddr_t *addr, u8 addr_type)
2248 {
2249 	struct hci_conn_params *param;
2250 
2251 	rcu_read_lock();
2252 
2253 	list_for_each_entry_rcu(param, list, action) {
2254 		if (bacmp(&param->addr, addr) == 0 &&
2255 		    param->addr_type == addr_type) {
2256 			rcu_read_unlock();
2257 			return param;
2258 		}
2259 	}
2260 
2261 	rcu_read_unlock();
2262 
2263 	return NULL;
2264 }
2265 
2266 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2267 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2268 {
2269 	if (list_empty(&param->action))
2270 		return;
2271 
2272 	list_del_rcu(&param->action);
2273 	synchronize_rcu();
2274 	INIT_LIST_HEAD(&param->action);
2275 }
2276 
2277 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2278 void hci_pend_le_list_add(struct hci_conn_params *param,
2279 			  struct list_head *list)
2280 {
2281 	list_add_rcu(&param->action, list);
2282 }
2283 
2284 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2285 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2286 					    bdaddr_t *addr, u8 addr_type)
2287 {
2288 	struct hci_conn_params *params;
2289 
2290 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2291 	if (params)
2292 		return params;
2293 
2294 	params = kzalloc(sizeof(*params), GFP_KERNEL);
2295 	if (!params) {
2296 		bt_dev_err(hdev, "out of memory");
2297 		return NULL;
2298 	}
2299 
2300 	bacpy(&params->addr, addr);
2301 	params->addr_type = addr_type;
2302 
2303 	list_add(&params->list, &hdev->le_conn_params);
2304 	INIT_LIST_HEAD(&params->action);
2305 
2306 	params->conn_min_interval = hdev->le_conn_min_interval;
2307 	params->conn_max_interval = hdev->le_conn_max_interval;
2308 	params->conn_latency = hdev->le_conn_latency;
2309 	params->supervision_timeout = hdev->le_supv_timeout;
2310 	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2311 
2312 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2313 
2314 	return params;
2315 }
2316 
hci_conn_params_free(struct hci_conn_params * params)2317 void hci_conn_params_free(struct hci_conn_params *params)
2318 {
2319 	hci_pend_le_list_del_init(params);
2320 
2321 	if (params->conn) {
2322 		hci_conn_drop(params->conn);
2323 		hci_conn_put(params->conn);
2324 	}
2325 
2326 	list_del(&params->list);
2327 	kfree(params);
2328 }
2329 
2330 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2331 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2332 {
2333 	struct hci_conn_params *params;
2334 
2335 	params = hci_conn_params_lookup(hdev, addr, addr_type);
2336 	if (!params)
2337 		return;
2338 
2339 	hci_conn_params_free(params);
2340 
2341 	hci_update_passive_scan(hdev);
2342 
2343 	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2344 }
2345 
2346 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2347 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2348 {
2349 	struct hci_conn_params *params, *tmp;
2350 
2351 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2352 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2353 			continue;
2354 
2355 		/* If trying to establish one time connection to disabled
2356 		 * device, leave the params, but mark them as just once.
2357 		 */
2358 		if (params->explicit_connect) {
2359 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2360 			continue;
2361 		}
2362 
2363 		hci_conn_params_free(params);
2364 	}
2365 
2366 	BT_DBG("All LE disabled connection parameters were removed");
2367 }
2368 
2369 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2370 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2371 {
2372 	struct hci_conn_params *params, *tmp;
2373 
2374 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2375 		hci_conn_params_free(params);
2376 
2377 	BT_DBG("All LE connection parameters were removed");
2378 }
2379 
2380 /* Copy the Identity Address of the controller.
2381  *
2382  * If the controller has a public BD_ADDR, then by default use that one.
2383  * If this is a LE only controller without a public address, default to
2384  * the static random address.
2385  *
2386  * For debugging purposes it is possible to force controllers with a
2387  * public address to use the static random address instead.
2388  *
2389  * In case BR/EDR has been disabled on a dual-mode controller and
2390  * userspace has configured a static address, then that address
2391  * becomes the identity address instead of the public BR/EDR address.
2392  */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2393 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2394 			       u8 *bdaddr_type)
2395 {
2396 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2397 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2398 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2399 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2400 		bacpy(bdaddr, &hdev->static_addr);
2401 		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2402 	} else {
2403 		bacpy(bdaddr, &hdev->bdaddr);
2404 		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2405 	}
2406 }
2407 
hci_clear_wake_reason(struct hci_dev * hdev)2408 static void hci_clear_wake_reason(struct hci_dev *hdev)
2409 {
2410 	hci_dev_lock(hdev);
2411 
2412 	hdev->wake_reason = 0;
2413 	bacpy(&hdev->wake_addr, BDADDR_ANY);
2414 	hdev->wake_addr_type = 0;
2415 
2416 	hci_dev_unlock(hdev);
2417 }
2418 
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2419 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2420 				void *data)
2421 {
2422 	struct hci_dev *hdev =
2423 		container_of(nb, struct hci_dev, suspend_notifier);
2424 	int ret = 0;
2425 
2426 	/* Userspace has full control of this device. Do nothing. */
2427 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2428 		return NOTIFY_DONE;
2429 
2430 	/* To avoid a potential race with hci_unregister_dev. */
2431 	hci_dev_hold(hdev);
2432 
2433 	if (action == PM_SUSPEND_PREPARE)
2434 		ret = hci_suspend_dev(hdev);
2435 	else if (action == PM_POST_SUSPEND)
2436 		ret = hci_resume_dev(hdev);
2437 
2438 	if (ret)
2439 		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2440 			   action, ret);
2441 
2442 	hci_dev_put(hdev);
2443 	return NOTIFY_DONE;
2444 }
2445 
2446 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2447 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2448 {
2449 	struct hci_dev *hdev;
2450 	unsigned int alloc_size;
2451 
2452 	alloc_size = sizeof(*hdev);
2453 	if (sizeof_priv) {
2454 		/* Fixme: May need ALIGN-ment? */
2455 		alloc_size += sizeof_priv;
2456 	}
2457 
2458 	hdev = kzalloc(alloc_size, GFP_KERNEL);
2459 	if (!hdev)
2460 		return NULL;
2461 
2462 	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2463 	hdev->esco_type = (ESCO_HV1);
2464 	hdev->link_mode = (HCI_LM_ACCEPT);
2465 	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2466 	hdev->io_capability = 0x03;	/* No Input No Output */
2467 	hdev->manufacturer = 0xffff;	/* Default to internal use */
2468 	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2469 	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2470 	hdev->adv_instance_cnt = 0;
2471 	hdev->cur_adv_instance = 0x00;
2472 	hdev->adv_instance_timeout = 0;
2473 
2474 	hdev->advmon_allowlist_duration = 300;
2475 	hdev->advmon_no_filter_duration = 500;
2476 	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2477 
2478 	hdev->sniff_max_interval = 800;
2479 	hdev->sniff_min_interval = 80;
2480 
2481 	hdev->le_adv_channel_map = 0x07;
2482 	hdev->le_adv_min_interval = 0x0800;
2483 	hdev->le_adv_max_interval = 0x0800;
2484 	hdev->le_scan_interval = 0x0060;
2485 	hdev->le_scan_window = 0x0030;
2486 	hdev->le_scan_int_suspend = 0x0400;
2487 	hdev->le_scan_window_suspend = 0x0012;
2488 	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2489 	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2490 	hdev->le_scan_int_adv_monitor = 0x0060;
2491 	hdev->le_scan_window_adv_monitor = 0x0030;
2492 	hdev->le_scan_int_connect = 0x0060;
2493 	hdev->le_scan_window_connect = 0x0060;
2494 	hdev->le_conn_min_interval = 0x0018;
2495 	hdev->le_conn_max_interval = 0x0028;
2496 	hdev->le_conn_latency = 0x0000;
2497 	hdev->le_supv_timeout = 0x002a;
2498 	hdev->le_def_tx_len = 0x001b;
2499 	hdev->le_def_tx_time = 0x0148;
2500 	hdev->le_max_tx_len = 0x001b;
2501 	hdev->le_max_tx_time = 0x0148;
2502 	hdev->le_max_rx_len = 0x001b;
2503 	hdev->le_max_rx_time = 0x0148;
2504 	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2505 	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2506 	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2507 	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2508 	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2509 	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2510 	hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2511 	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2512 	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2513 
2514 	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2515 	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2516 	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2517 	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2518 	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2519 	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2520 
2521 	/* default 1.28 sec page scan */
2522 	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2523 	hdev->def_page_scan_int = 0x0800;
2524 	hdev->def_page_scan_window = 0x0012;
2525 
2526 	mutex_init(&hdev->lock);
2527 	mutex_init(&hdev->req_lock);
2528 
2529 	ida_init(&hdev->unset_handle_ida);
2530 
2531 	INIT_LIST_HEAD(&hdev->mesh_pending);
2532 	INIT_LIST_HEAD(&hdev->mgmt_pending);
2533 	INIT_LIST_HEAD(&hdev->reject_list);
2534 	INIT_LIST_HEAD(&hdev->accept_list);
2535 	INIT_LIST_HEAD(&hdev->uuids);
2536 	INIT_LIST_HEAD(&hdev->link_keys);
2537 	INIT_LIST_HEAD(&hdev->long_term_keys);
2538 	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2539 	INIT_LIST_HEAD(&hdev->remote_oob_data);
2540 	INIT_LIST_HEAD(&hdev->le_accept_list);
2541 	INIT_LIST_HEAD(&hdev->le_resolv_list);
2542 	INIT_LIST_HEAD(&hdev->le_conn_params);
2543 	INIT_LIST_HEAD(&hdev->pend_le_conns);
2544 	INIT_LIST_HEAD(&hdev->pend_le_reports);
2545 	INIT_LIST_HEAD(&hdev->conn_hash.list);
2546 	INIT_LIST_HEAD(&hdev->adv_instances);
2547 	INIT_LIST_HEAD(&hdev->blocked_keys);
2548 	INIT_LIST_HEAD(&hdev->monitored_devices);
2549 
2550 	INIT_LIST_HEAD(&hdev->local_codecs);
2551 	INIT_WORK(&hdev->rx_work, hci_rx_work);
2552 	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2553 	INIT_WORK(&hdev->tx_work, hci_tx_work);
2554 	INIT_WORK(&hdev->power_on, hci_power_on);
2555 	INIT_WORK(&hdev->error_reset, hci_error_reset);
2556 
2557 	hci_cmd_sync_init(hdev);
2558 
2559 	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2560 
2561 	skb_queue_head_init(&hdev->rx_q);
2562 	skb_queue_head_init(&hdev->cmd_q);
2563 	skb_queue_head_init(&hdev->raw_q);
2564 
2565 	init_waitqueue_head(&hdev->req_wait_q);
2566 
2567 	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2568 	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2569 
2570 	hci_devcd_setup(hdev);
2571 	hci_request_setup(hdev);
2572 
2573 	hci_init_sysfs(hdev);
2574 	discovery_init(hdev);
2575 
2576 	return hdev;
2577 }
2578 EXPORT_SYMBOL(hci_alloc_dev_priv);
2579 
2580 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2581 void hci_free_dev(struct hci_dev *hdev)
2582 {
2583 	/* will free via device release */
2584 	put_device(&hdev->dev);
2585 }
2586 EXPORT_SYMBOL(hci_free_dev);
2587 
2588 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2589 int hci_register_dev(struct hci_dev *hdev)
2590 {
2591 	int id, error;
2592 
2593 	if (!hdev->open || !hdev->close || !hdev->send)
2594 		return -EINVAL;
2595 
2596 	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2597 	if (id < 0)
2598 		return id;
2599 
2600 	error = dev_set_name(&hdev->dev, "hci%u", id);
2601 	if (error)
2602 		return error;
2603 
2604 	hdev->name = dev_name(&hdev->dev);
2605 	hdev->id = id;
2606 
2607 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2608 
2609 	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2610 	if (!hdev->workqueue) {
2611 		error = -ENOMEM;
2612 		goto err;
2613 	}
2614 
2615 	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2616 						      hdev->name);
2617 	if (!hdev->req_workqueue) {
2618 		destroy_workqueue(hdev->workqueue);
2619 		error = -ENOMEM;
2620 		goto err;
2621 	}
2622 
2623 	if (!IS_ERR_OR_NULL(bt_debugfs))
2624 		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2625 
2626 	error = device_add(&hdev->dev);
2627 	if (error < 0)
2628 		goto err_wqueue;
2629 
2630 	hci_leds_init(hdev);
2631 
2632 	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2633 				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2634 				    hdev);
2635 	if (hdev->rfkill) {
2636 		if (rfkill_register(hdev->rfkill) < 0) {
2637 			rfkill_destroy(hdev->rfkill);
2638 			hdev->rfkill = NULL;
2639 		}
2640 	}
2641 
2642 	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2643 		hci_dev_set_flag(hdev, HCI_RFKILLED);
2644 
2645 	hci_dev_set_flag(hdev, HCI_SETUP);
2646 	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2647 
2648 	/* Assume BR/EDR support until proven otherwise (such as
2649 	 * through reading supported features during init.
2650 	 */
2651 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2652 
2653 	write_lock(&hci_dev_list_lock);
2654 	list_add(&hdev->list, &hci_dev_list);
2655 	write_unlock(&hci_dev_list_lock);
2656 
2657 	/* Devices that are marked for raw-only usage are unconfigured
2658 	 * and should not be included in normal operation.
2659 	 */
2660 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2661 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2662 
2663 	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2664 	 * callback.
2665 	 */
2666 	if (hdev->wakeup)
2667 		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2668 
2669 	hci_sock_dev_event(hdev, HCI_DEV_REG);
2670 	hci_dev_hold(hdev);
2671 
2672 	error = hci_register_suspend_notifier(hdev);
2673 	if (error)
2674 		BT_WARN("register suspend notifier failed error:%d\n", error);
2675 
2676 	queue_work(hdev->req_workqueue, &hdev->power_on);
2677 
2678 	idr_init(&hdev->adv_monitors_idr);
2679 	msft_register(hdev);
2680 
2681 	return id;
2682 
2683 err_wqueue:
2684 	debugfs_remove_recursive(hdev->debugfs);
2685 	destroy_workqueue(hdev->workqueue);
2686 	destroy_workqueue(hdev->req_workqueue);
2687 err:
2688 	ida_free(&hci_index_ida, hdev->id);
2689 
2690 	return error;
2691 }
2692 EXPORT_SYMBOL(hci_register_dev);
2693 
2694 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2695 void hci_unregister_dev(struct hci_dev *hdev)
2696 {
2697 	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2698 
2699 	mutex_lock(&hdev->unregister_lock);
2700 	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2701 	mutex_unlock(&hdev->unregister_lock);
2702 
2703 	write_lock(&hci_dev_list_lock);
2704 	list_del(&hdev->list);
2705 	write_unlock(&hci_dev_list_lock);
2706 
2707 	cancel_work_sync(&hdev->power_on);
2708 
2709 	hci_cmd_sync_clear(hdev);
2710 
2711 	hci_unregister_suspend_notifier(hdev);
2712 
2713 	hci_dev_do_close(hdev);
2714 
2715 	if (!test_bit(HCI_INIT, &hdev->flags) &&
2716 	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2717 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2718 		hci_dev_lock(hdev);
2719 		mgmt_index_removed(hdev);
2720 		hci_dev_unlock(hdev);
2721 	}
2722 
2723 	/* mgmt_index_removed should take care of emptying the
2724 	 * pending list */
2725 	BUG_ON(!list_empty(&hdev->mgmt_pending));
2726 
2727 	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2728 
2729 	if (hdev->rfkill) {
2730 		rfkill_unregister(hdev->rfkill);
2731 		rfkill_destroy(hdev->rfkill);
2732 	}
2733 
2734 	device_del(&hdev->dev);
2735 	/* Actual cleanup is deferred until hci_release_dev(). */
2736 	hci_dev_put(hdev);
2737 }
2738 EXPORT_SYMBOL(hci_unregister_dev);
2739 
2740 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2741 void hci_release_dev(struct hci_dev *hdev)
2742 {
2743 	debugfs_remove_recursive(hdev->debugfs);
2744 	kfree_const(hdev->hw_info);
2745 	kfree_const(hdev->fw_info);
2746 
2747 	destroy_workqueue(hdev->workqueue);
2748 	destroy_workqueue(hdev->req_workqueue);
2749 
2750 	hci_dev_lock(hdev);
2751 	hci_bdaddr_list_clear(&hdev->reject_list);
2752 	hci_bdaddr_list_clear(&hdev->accept_list);
2753 	hci_uuids_clear(hdev);
2754 	hci_link_keys_clear(hdev);
2755 	hci_smp_ltks_clear(hdev);
2756 	hci_smp_irks_clear(hdev);
2757 	hci_remote_oob_data_clear(hdev);
2758 	hci_adv_instances_clear(hdev);
2759 	hci_adv_monitors_clear(hdev);
2760 	hci_bdaddr_list_clear(&hdev->le_accept_list);
2761 	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2762 	hci_conn_params_clear_all(hdev);
2763 	hci_discovery_filter_clear(hdev);
2764 	hci_blocked_keys_clear(hdev);
2765 	hci_codec_list_clear(&hdev->local_codecs);
2766 	msft_release(hdev);
2767 	hci_dev_unlock(hdev);
2768 
2769 	ida_destroy(&hdev->unset_handle_ida);
2770 	ida_free(&hci_index_ida, hdev->id);
2771 	kfree_skb(hdev->sent_cmd);
2772 	kfree_skb(hdev->req_skb);
2773 	kfree_skb(hdev->recv_event);
2774 	kfree(hdev);
2775 }
2776 EXPORT_SYMBOL(hci_release_dev);
2777 
hci_register_suspend_notifier(struct hci_dev * hdev)2778 int hci_register_suspend_notifier(struct hci_dev *hdev)
2779 {
2780 	int ret = 0;
2781 
2782 	if (!hdev->suspend_notifier.notifier_call &&
2783 	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2784 		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2785 		ret = register_pm_notifier(&hdev->suspend_notifier);
2786 	}
2787 
2788 	return ret;
2789 }
2790 
hci_unregister_suspend_notifier(struct hci_dev * hdev)2791 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2792 {
2793 	int ret = 0;
2794 
2795 	if (hdev->suspend_notifier.notifier_call) {
2796 		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2797 		if (!ret)
2798 			hdev->suspend_notifier.notifier_call = NULL;
2799 	}
2800 
2801 	return ret;
2802 }
2803 
2804 /* Cancel ongoing command synchronously:
2805  *
2806  * - Cancel command timer
2807  * - Reset command counter
2808  * - Cancel command request
2809  */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2810 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2811 {
2812 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
2813 
2814 	cancel_delayed_work_sync(&hdev->cmd_timer);
2815 	cancel_delayed_work_sync(&hdev->ncmd_timer);
2816 	atomic_set(&hdev->cmd_cnt, 1);
2817 
2818 	hci_cmd_sync_cancel_sync(hdev, err);
2819 }
2820 
2821 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2822 int hci_suspend_dev(struct hci_dev *hdev)
2823 {
2824 	int ret;
2825 
2826 	bt_dev_dbg(hdev, "");
2827 
2828 	/* Suspend should only act on when powered. */
2829 	if (!hdev_is_powered(hdev) ||
2830 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2831 		return 0;
2832 
2833 	/* If powering down don't attempt to suspend */
2834 	if (mgmt_powering_down(hdev))
2835 		return 0;
2836 
2837 	/* Cancel potentially blocking sync operation before suspend */
2838 	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2839 
2840 	hci_req_sync_lock(hdev);
2841 	ret = hci_suspend_sync(hdev);
2842 	hci_req_sync_unlock(hdev);
2843 
2844 	hci_clear_wake_reason(hdev);
2845 	mgmt_suspending(hdev, hdev->suspend_state);
2846 
2847 	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2848 	return ret;
2849 }
2850 EXPORT_SYMBOL(hci_suspend_dev);
2851 
2852 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2853 int hci_resume_dev(struct hci_dev *hdev)
2854 {
2855 	int ret;
2856 
2857 	bt_dev_dbg(hdev, "");
2858 
2859 	/* Resume should only act on when powered. */
2860 	if (!hdev_is_powered(hdev) ||
2861 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2862 		return 0;
2863 
2864 	/* If powering down don't attempt to resume */
2865 	if (mgmt_powering_down(hdev))
2866 		return 0;
2867 
2868 	hci_req_sync_lock(hdev);
2869 	ret = hci_resume_sync(hdev);
2870 	hci_req_sync_unlock(hdev);
2871 
2872 	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2873 		      hdev->wake_addr_type);
2874 
2875 	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2876 	return ret;
2877 }
2878 EXPORT_SYMBOL(hci_resume_dev);
2879 
2880 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2881 int hci_reset_dev(struct hci_dev *hdev)
2882 {
2883 	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2884 	struct sk_buff *skb;
2885 
2886 	skb = bt_skb_alloc(3, GFP_ATOMIC);
2887 	if (!skb)
2888 		return -ENOMEM;
2889 
2890 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2891 	skb_put_data(skb, hw_err, 3);
2892 
2893 	bt_dev_err(hdev, "Injecting HCI hardware error event");
2894 
2895 	/* Send Hardware Error to upper stack */
2896 	return hci_recv_frame(hdev, skb);
2897 }
2898 EXPORT_SYMBOL(hci_reset_dev);
2899 
2900 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2901 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2902 {
2903 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2904 		      && !test_bit(HCI_INIT, &hdev->flags))) {
2905 		kfree_skb(skb);
2906 		return -ENXIO;
2907 	}
2908 
2909 	switch (hci_skb_pkt_type(skb)) {
2910 	case HCI_EVENT_PKT:
2911 		break;
2912 	case HCI_ACLDATA_PKT:
2913 		/* Detect if ISO packet has been sent as ACL */
2914 		if (hci_conn_num(hdev, ISO_LINK)) {
2915 			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2916 			__u8 type;
2917 
2918 			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2919 			if (type == ISO_LINK)
2920 				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2921 		}
2922 		break;
2923 	case HCI_SCODATA_PKT:
2924 		break;
2925 	case HCI_ISODATA_PKT:
2926 		break;
2927 	default:
2928 		kfree_skb(skb);
2929 		return -EINVAL;
2930 	}
2931 
2932 	/* Incoming skb */
2933 	bt_cb(skb)->incoming = 1;
2934 
2935 	/* Time stamp */
2936 	__net_timestamp(skb);
2937 
2938 	skb_queue_tail(&hdev->rx_q, skb);
2939 	queue_work(hdev->workqueue, &hdev->rx_work);
2940 
2941 	return 0;
2942 }
2943 EXPORT_SYMBOL(hci_recv_frame);
2944 
2945 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2946 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2947 {
2948 	/* Mark as diagnostic packet */
2949 	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2950 
2951 	/* Time stamp */
2952 	__net_timestamp(skb);
2953 
2954 	skb_queue_tail(&hdev->rx_q, skb);
2955 	queue_work(hdev->workqueue, &hdev->rx_work);
2956 
2957 	return 0;
2958 }
2959 EXPORT_SYMBOL(hci_recv_diag);
2960 
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2961 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2962 {
2963 	va_list vargs;
2964 
2965 	va_start(vargs, fmt);
2966 	kfree_const(hdev->hw_info);
2967 	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2968 	va_end(vargs);
2969 }
2970 EXPORT_SYMBOL(hci_set_hw_info);
2971 
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2972 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2973 {
2974 	va_list vargs;
2975 
2976 	va_start(vargs, fmt);
2977 	kfree_const(hdev->fw_info);
2978 	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2979 	va_end(vargs);
2980 }
2981 EXPORT_SYMBOL(hci_set_fw_info);
2982 
2983 /* ---- Interface to upper protocols ---- */
2984 
hci_register_cb(struct hci_cb * cb)2985 int hci_register_cb(struct hci_cb *cb)
2986 {
2987 	BT_DBG("%p name %s", cb, cb->name);
2988 
2989 	mutex_lock(&hci_cb_list_lock);
2990 	list_add_tail(&cb->list, &hci_cb_list);
2991 	mutex_unlock(&hci_cb_list_lock);
2992 
2993 	return 0;
2994 }
2995 EXPORT_SYMBOL(hci_register_cb);
2996 
hci_unregister_cb(struct hci_cb * cb)2997 int hci_unregister_cb(struct hci_cb *cb)
2998 {
2999 	BT_DBG("%p name %s", cb, cb->name);
3000 
3001 	mutex_lock(&hci_cb_list_lock);
3002 	list_del(&cb->list);
3003 	mutex_unlock(&hci_cb_list_lock);
3004 
3005 	return 0;
3006 }
3007 EXPORT_SYMBOL(hci_unregister_cb);
3008 
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3009 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3010 {
3011 	int err;
3012 
3013 	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3014 	       skb->len);
3015 
3016 	/* Time stamp */
3017 	__net_timestamp(skb);
3018 
3019 	/* Send copy to monitor */
3020 	hci_send_to_monitor(hdev, skb);
3021 
3022 	if (atomic_read(&hdev->promisc)) {
3023 		/* Send copy to the sockets */
3024 		hci_send_to_sock(hdev, skb);
3025 	}
3026 
3027 	/* Get rid of skb owner, prior to sending to the driver. */
3028 	skb_orphan(skb);
3029 
3030 	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3031 		kfree_skb(skb);
3032 		return -EINVAL;
3033 	}
3034 
3035 	err = hdev->send(hdev, skb);
3036 	if (err < 0) {
3037 		bt_dev_err(hdev, "sending frame failed (%d)", err);
3038 		kfree_skb(skb);
3039 		return err;
3040 	}
3041 
3042 	return 0;
3043 }
3044 
3045 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3046 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3047 		 const void *param)
3048 {
3049 	struct sk_buff *skb;
3050 
3051 	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3052 
3053 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3054 	if (!skb) {
3055 		bt_dev_err(hdev, "no memory for command");
3056 		return -ENOMEM;
3057 	}
3058 
3059 	/* Stand-alone HCI commands must be flagged as
3060 	 * single-command requests.
3061 	 */
3062 	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3063 
3064 	skb_queue_tail(&hdev->cmd_q, skb);
3065 	queue_work(hdev->workqueue, &hdev->cmd_work);
3066 
3067 	return 0;
3068 }
3069 
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3070 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3071 		   const void *param)
3072 {
3073 	struct sk_buff *skb;
3074 
3075 	if (hci_opcode_ogf(opcode) != 0x3f) {
3076 		/* A controller receiving a command shall respond with either
3077 		 * a Command Status Event or a Command Complete Event.
3078 		 * Therefore, all standard HCI commands must be sent via the
3079 		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3080 		 * Some vendors do not comply with this rule for vendor-specific
3081 		 * commands and do not return any event. We want to support
3082 		 * unresponded commands for such cases only.
3083 		 */
3084 		bt_dev_err(hdev, "unresponded command not supported");
3085 		return -EINVAL;
3086 	}
3087 
3088 	skb = hci_prepare_cmd(hdev, opcode, plen, param);
3089 	if (!skb) {
3090 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3091 			   opcode);
3092 		return -ENOMEM;
3093 	}
3094 
3095 	hci_send_frame(hdev, skb);
3096 
3097 	return 0;
3098 }
3099 EXPORT_SYMBOL(__hci_cmd_send);
3100 
3101 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3102 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3103 {
3104 	struct hci_command_hdr *hdr;
3105 
3106 	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3107 		return NULL;
3108 
3109 	hdr = (void *)skb->data;
3110 
3111 	if (hdr->opcode != cpu_to_le16(opcode))
3112 		return NULL;
3113 
3114 	return skb->data + HCI_COMMAND_HDR_SIZE;
3115 }
3116 
3117 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3118 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3119 {
3120 	void *data;
3121 
3122 	/* Check if opcode matches last sent command */
3123 	data = hci_cmd_data(hdev->sent_cmd, opcode);
3124 	if (!data)
3125 		/* Check if opcode matches last request */
3126 		data = hci_cmd_data(hdev->req_skb, opcode);
3127 
3128 	return data;
3129 }
3130 
3131 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3132 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3133 {
3134 	struct hci_event_hdr *hdr;
3135 	int offset;
3136 
3137 	if (!hdev->recv_event)
3138 		return NULL;
3139 
3140 	hdr = (void *)hdev->recv_event->data;
3141 	offset = sizeof(*hdr);
3142 
3143 	if (hdr->evt != event) {
3144 		/* In case of LE metaevent check the subevent match */
3145 		if (hdr->evt == HCI_EV_LE_META) {
3146 			struct hci_ev_le_meta *ev;
3147 
3148 			ev = (void *)hdev->recv_event->data + offset;
3149 			offset += sizeof(*ev);
3150 			if (ev->subevent == event)
3151 				goto found;
3152 		}
3153 		return NULL;
3154 	}
3155 
3156 found:
3157 	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3158 
3159 	return hdev->recv_event->data + offset;
3160 }
3161 
3162 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3163 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3164 {
3165 	struct hci_acl_hdr *hdr;
3166 	int len = skb->len;
3167 
3168 	skb_push(skb, HCI_ACL_HDR_SIZE);
3169 	skb_reset_transport_header(skb);
3170 	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3171 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3172 	hdr->dlen   = cpu_to_le16(len);
3173 }
3174 
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3175 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3176 			  struct sk_buff *skb, __u16 flags)
3177 {
3178 	struct hci_conn *conn = chan->conn;
3179 	struct hci_dev *hdev = conn->hdev;
3180 	struct sk_buff *list;
3181 
3182 	skb->len = skb_headlen(skb);
3183 	skb->data_len = 0;
3184 
3185 	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3186 
3187 	hci_add_acl_hdr(skb, conn->handle, flags);
3188 
3189 	list = skb_shinfo(skb)->frag_list;
3190 	if (!list) {
3191 		/* Non fragmented */
3192 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3193 
3194 		skb_queue_tail(queue, skb);
3195 	} else {
3196 		/* Fragmented */
3197 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3198 
3199 		skb_shinfo(skb)->frag_list = NULL;
3200 
3201 		/* Queue all fragments atomically. We need to use spin_lock_bh
3202 		 * here because of 6LoWPAN links, as there this function is
3203 		 * called from softirq and using normal spin lock could cause
3204 		 * deadlocks.
3205 		 */
3206 		spin_lock_bh(&queue->lock);
3207 
3208 		__skb_queue_tail(queue, skb);
3209 
3210 		flags &= ~ACL_START;
3211 		flags |= ACL_CONT;
3212 		do {
3213 			skb = list; list = list->next;
3214 
3215 			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3216 			hci_add_acl_hdr(skb, conn->handle, flags);
3217 
3218 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3219 
3220 			__skb_queue_tail(queue, skb);
3221 		} while (list);
3222 
3223 		spin_unlock_bh(&queue->lock);
3224 	}
3225 }
3226 
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3227 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3228 {
3229 	struct hci_dev *hdev = chan->conn->hdev;
3230 
3231 	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3232 
3233 	hci_queue_acl(chan, &chan->data_q, skb, flags);
3234 
3235 	queue_work(hdev->workqueue, &hdev->tx_work);
3236 }
3237 
3238 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3239 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3240 {
3241 	struct hci_dev *hdev = conn->hdev;
3242 	struct hci_sco_hdr hdr;
3243 
3244 	BT_DBG("%s len %d", hdev->name, skb->len);
3245 
3246 	hdr.handle = cpu_to_le16(conn->handle);
3247 	hdr.dlen   = skb->len;
3248 
3249 	skb_push(skb, HCI_SCO_HDR_SIZE);
3250 	skb_reset_transport_header(skb);
3251 	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3252 
3253 	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3254 
3255 	skb_queue_tail(&conn->data_q, skb);
3256 	queue_work(hdev->workqueue, &hdev->tx_work);
3257 }
3258 
3259 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3260 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3261 {
3262 	struct hci_iso_hdr *hdr;
3263 	int len = skb->len;
3264 
3265 	skb_push(skb, HCI_ISO_HDR_SIZE);
3266 	skb_reset_transport_header(skb);
3267 	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3268 	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3269 	hdr->dlen   = cpu_to_le16(len);
3270 }
3271 
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3272 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3273 			  struct sk_buff *skb)
3274 {
3275 	struct hci_dev *hdev = conn->hdev;
3276 	struct sk_buff *list;
3277 	__u16 flags;
3278 
3279 	skb->len = skb_headlen(skb);
3280 	skb->data_len = 0;
3281 
3282 	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3283 
3284 	list = skb_shinfo(skb)->frag_list;
3285 
3286 	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3287 	hci_add_iso_hdr(skb, conn->handle, flags);
3288 
3289 	if (!list) {
3290 		/* Non fragmented */
3291 		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3292 
3293 		skb_queue_tail(queue, skb);
3294 	} else {
3295 		/* Fragmented */
3296 		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3297 
3298 		skb_shinfo(skb)->frag_list = NULL;
3299 
3300 		__skb_queue_tail(queue, skb);
3301 
3302 		do {
3303 			skb = list; list = list->next;
3304 
3305 			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3306 			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3307 						   0x00);
3308 			hci_add_iso_hdr(skb, conn->handle, flags);
3309 
3310 			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3311 
3312 			__skb_queue_tail(queue, skb);
3313 		} while (list);
3314 	}
3315 }
3316 
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3317 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3318 {
3319 	struct hci_dev *hdev = conn->hdev;
3320 
3321 	BT_DBG("%s len %d", hdev->name, skb->len);
3322 
3323 	hci_queue_iso(conn, &conn->data_q, skb);
3324 
3325 	queue_work(hdev->workqueue, &hdev->tx_work);
3326 }
3327 
3328 /* ---- HCI TX task (outgoing data) ---- */
3329 
3330 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3331 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3332 {
3333 	struct hci_dev *hdev;
3334 	int cnt, q;
3335 
3336 	if (!conn) {
3337 		*quote = 0;
3338 		return;
3339 	}
3340 
3341 	hdev = conn->hdev;
3342 
3343 	switch (conn->type) {
3344 	case ACL_LINK:
3345 		cnt = hdev->acl_cnt;
3346 		break;
3347 	case SCO_LINK:
3348 	case ESCO_LINK:
3349 		cnt = hdev->sco_cnt;
3350 		break;
3351 	case LE_LINK:
3352 		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3353 		break;
3354 	case ISO_LINK:
3355 		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3356 			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3357 		break;
3358 	default:
3359 		cnt = 0;
3360 		bt_dev_err(hdev, "unknown link type %d", conn->type);
3361 	}
3362 
3363 	q = cnt / num;
3364 	*quote = q ? q : 1;
3365 }
3366 
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3367 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3368 				     int *quote)
3369 {
3370 	struct hci_conn_hash *h = &hdev->conn_hash;
3371 	struct hci_conn *conn = NULL, *c;
3372 	unsigned int num = 0, min = ~0;
3373 
3374 	/* We don't have to lock device here. Connections are always
3375 	 * added and removed with TX task disabled. */
3376 
3377 	rcu_read_lock();
3378 
3379 	list_for_each_entry_rcu(c, &h->list, list) {
3380 		if (c->type != type || skb_queue_empty(&c->data_q))
3381 			continue;
3382 
3383 		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3384 			continue;
3385 
3386 		num++;
3387 
3388 		if (c->sent < min) {
3389 			min  = c->sent;
3390 			conn = c;
3391 		}
3392 
3393 		if (hci_conn_num(hdev, type) == num)
3394 			break;
3395 	}
3396 
3397 	rcu_read_unlock();
3398 
3399 	hci_quote_sent(conn, num, quote);
3400 
3401 	BT_DBG("conn %p quote %d", conn, *quote);
3402 	return conn;
3403 }
3404 
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3405 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3406 {
3407 	struct hci_conn_hash *h = &hdev->conn_hash;
3408 	struct hci_conn *c;
3409 
3410 	bt_dev_err(hdev, "link tx timeout");
3411 
3412 	rcu_read_lock();
3413 
3414 	/* Kill stalled connections */
3415 	list_for_each_entry_rcu(c, &h->list, list) {
3416 		if (c->type == type && c->sent) {
3417 			bt_dev_err(hdev, "killing stalled connection %pMR",
3418 				   &c->dst);
3419 			/* hci_disconnect might sleep, so, we have to release
3420 			 * the RCU read lock before calling it.
3421 			 */
3422 			rcu_read_unlock();
3423 			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3424 			rcu_read_lock();
3425 		}
3426 	}
3427 
3428 	rcu_read_unlock();
3429 }
3430 
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3431 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3432 				      int *quote)
3433 {
3434 	struct hci_conn_hash *h = &hdev->conn_hash;
3435 	struct hci_chan *chan = NULL;
3436 	unsigned int num = 0, min = ~0, cur_prio = 0;
3437 	struct hci_conn *conn;
3438 	int conn_num = 0;
3439 
3440 	BT_DBG("%s", hdev->name);
3441 
3442 	rcu_read_lock();
3443 
3444 	list_for_each_entry_rcu(conn, &h->list, list) {
3445 		struct hci_chan *tmp;
3446 
3447 		if (conn->type != type)
3448 			continue;
3449 
3450 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3451 			continue;
3452 
3453 		conn_num++;
3454 
3455 		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3456 			struct sk_buff *skb;
3457 
3458 			if (skb_queue_empty(&tmp->data_q))
3459 				continue;
3460 
3461 			skb = skb_peek(&tmp->data_q);
3462 			if (skb->priority < cur_prio)
3463 				continue;
3464 
3465 			if (skb->priority > cur_prio) {
3466 				num = 0;
3467 				min = ~0;
3468 				cur_prio = skb->priority;
3469 			}
3470 
3471 			num++;
3472 
3473 			if (conn->sent < min) {
3474 				min  = conn->sent;
3475 				chan = tmp;
3476 			}
3477 		}
3478 
3479 		if (hci_conn_num(hdev, type) == conn_num)
3480 			break;
3481 	}
3482 
3483 	rcu_read_unlock();
3484 
3485 	if (!chan)
3486 		return NULL;
3487 
3488 	hci_quote_sent(chan->conn, num, quote);
3489 
3490 	BT_DBG("chan %p quote %d", chan, *quote);
3491 	return chan;
3492 }
3493 
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3494 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3495 {
3496 	struct hci_conn_hash *h = &hdev->conn_hash;
3497 	struct hci_conn *conn;
3498 	int num = 0;
3499 
3500 	BT_DBG("%s", hdev->name);
3501 
3502 	rcu_read_lock();
3503 
3504 	list_for_each_entry_rcu(conn, &h->list, list) {
3505 		struct hci_chan *chan;
3506 
3507 		if (conn->type != type)
3508 			continue;
3509 
3510 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3511 			continue;
3512 
3513 		num++;
3514 
3515 		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3516 			struct sk_buff *skb;
3517 
3518 			if (chan->sent) {
3519 				chan->sent = 0;
3520 				continue;
3521 			}
3522 
3523 			if (skb_queue_empty(&chan->data_q))
3524 				continue;
3525 
3526 			skb = skb_peek(&chan->data_q);
3527 			if (skb->priority >= HCI_PRIO_MAX - 1)
3528 				continue;
3529 
3530 			skb->priority = HCI_PRIO_MAX - 1;
3531 
3532 			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3533 			       skb->priority);
3534 		}
3535 
3536 		if (hci_conn_num(hdev, type) == num)
3537 			break;
3538 	}
3539 
3540 	rcu_read_unlock();
3541 
3542 }
3543 
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3544 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3545 {
3546 	unsigned long last_tx;
3547 
3548 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3549 		return;
3550 
3551 	switch (type) {
3552 	case LE_LINK:
3553 		last_tx = hdev->le_last_tx;
3554 		break;
3555 	default:
3556 		last_tx = hdev->acl_last_tx;
3557 		break;
3558 	}
3559 
3560 	/* tx timeout must be longer than maximum link supervision timeout
3561 	 * (40.9 seconds)
3562 	 */
3563 	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3564 		hci_link_tx_to(hdev, type);
3565 }
3566 
3567 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3568 static void hci_sched_sco(struct hci_dev *hdev)
3569 {
3570 	struct hci_conn *conn;
3571 	struct sk_buff *skb;
3572 	int quote;
3573 
3574 	BT_DBG("%s", hdev->name);
3575 
3576 	if (!hci_conn_num(hdev, SCO_LINK))
3577 		return;
3578 
3579 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3580 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3581 			BT_DBG("skb %p len %d", skb, skb->len);
3582 			hci_send_frame(hdev, skb);
3583 
3584 			conn->sent++;
3585 			if (conn->sent == ~0)
3586 				conn->sent = 0;
3587 		}
3588 	}
3589 }
3590 
hci_sched_esco(struct hci_dev * hdev)3591 static void hci_sched_esco(struct hci_dev *hdev)
3592 {
3593 	struct hci_conn *conn;
3594 	struct sk_buff *skb;
3595 	int quote;
3596 
3597 	BT_DBG("%s", hdev->name);
3598 
3599 	if (!hci_conn_num(hdev, ESCO_LINK))
3600 		return;
3601 
3602 	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3603 						     &quote))) {
3604 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3605 			BT_DBG("skb %p len %d", skb, skb->len);
3606 			hci_send_frame(hdev, skb);
3607 
3608 			conn->sent++;
3609 			if (conn->sent == ~0)
3610 				conn->sent = 0;
3611 		}
3612 	}
3613 }
3614 
hci_sched_acl_pkt(struct hci_dev * hdev)3615 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3616 {
3617 	unsigned int cnt = hdev->acl_cnt;
3618 	struct hci_chan *chan;
3619 	struct sk_buff *skb;
3620 	int quote;
3621 
3622 	__check_timeout(hdev, cnt, ACL_LINK);
3623 
3624 	while (hdev->acl_cnt &&
3625 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3626 		u32 priority = (skb_peek(&chan->data_q))->priority;
3627 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3628 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3629 			       skb->len, skb->priority);
3630 
3631 			/* Stop if priority has changed */
3632 			if (skb->priority < priority)
3633 				break;
3634 
3635 			skb = skb_dequeue(&chan->data_q);
3636 
3637 			hci_conn_enter_active_mode(chan->conn,
3638 						   bt_cb(skb)->force_active);
3639 
3640 			hci_send_frame(hdev, skb);
3641 			hdev->acl_last_tx = jiffies;
3642 
3643 			hdev->acl_cnt--;
3644 			chan->sent++;
3645 			chan->conn->sent++;
3646 
3647 			/* Send pending SCO packets right away */
3648 			hci_sched_sco(hdev);
3649 			hci_sched_esco(hdev);
3650 		}
3651 	}
3652 
3653 	if (cnt != hdev->acl_cnt)
3654 		hci_prio_recalculate(hdev, ACL_LINK);
3655 }
3656 
hci_sched_acl(struct hci_dev * hdev)3657 static void hci_sched_acl(struct hci_dev *hdev)
3658 {
3659 	BT_DBG("%s", hdev->name);
3660 
3661 	/* No ACL link over BR/EDR controller */
3662 	if (!hci_conn_num(hdev, ACL_LINK))
3663 		return;
3664 
3665 	hci_sched_acl_pkt(hdev);
3666 }
3667 
hci_sched_le(struct hci_dev * hdev)3668 static void hci_sched_le(struct hci_dev *hdev)
3669 {
3670 	struct hci_chan *chan;
3671 	struct sk_buff *skb;
3672 	int quote, cnt, tmp;
3673 
3674 	BT_DBG("%s", hdev->name);
3675 
3676 	if (!hci_conn_num(hdev, LE_LINK))
3677 		return;
3678 
3679 	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3680 
3681 	__check_timeout(hdev, cnt, LE_LINK);
3682 
3683 	tmp = cnt;
3684 	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3685 		u32 priority = (skb_peek(&chan->data_q))->priority;
3686 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3687 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3688 			       skb->len, skb->priority);
3689 
3690 			/* Stop if priority has changed */
3691 			if (skb->priority < priority)
3692 				break;
3693 
3694 			skb = skb_dequeue(&chan->data_q);
3695 
3696 			hci_send_frame(hdev, skb);
3697 			hdev->le_last_tx = jiffies;
3698 
3699 			cnt--;
3700 			chan->sent++;
3701 			chan->conn->sent++;
3702 
3703 			/* Send pending SCO packets right away */
3704 			hci_sched_sco(hdev);
3705 			hci_sched_esco(hdev);
3706 		}
3707 	}
3708 
3709 	if (hdev->le_pkts)
3710 		hdev->le_cnt = cnt;
3711 	else
3712 		hdev->acl_cnt = cnt;
3713 
3714 	if (cnt != tmp)
3715 		hci_prio_recalculate(hdev, LE_LINK);
3716 }
3717 
3718 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3719 static void hci_sched_iso(struct hci_dev *hdev)
3720 {
3721 	struct hci_conn *conn;
3722 	struct sk_buff *skb;
3723 	int quote, *cnt;
3724 
3725 	BT_DBG("%s", hdev->name);
3726 
3727 	if (!hci_conn_num(hdev, ISO_LINK))
3728 		return;
3729 
3730 	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3731 		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3732 	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3733 		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3734 			BT_DBG("skb %p len %d", skb, skb->len);
3735 			hci_send_frame(hdev, skb);
3736 
3737 			conn->sent++;
3738 			if (conn->sent == ~0)
3739 				conn->sent = 0;
3740 			(*cnt)--;
3741 		}
3742 	}
3743 }
3744 
hci_tx_work(struct work_struct * work)3745 static void hci_tx_work(struct work_struct *work)
3746 {
3747 	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3748 	struct sk_buff *skb;
3749 
3750 	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3751 	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3752 
3753 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3754 		/* Schedule queues and send stuff to HCI driver */
3755 		hci_sched_sco(hdev);
3756 		hci_sched_esco(hdev);
3757 		hci_sched_iso(hdev);
3758 		hci_sched_acl(hdev);
3759 		hci_sched_le(hdev);
3760 	}
3761 
3762 	/* Send next queued raw (unknown type) packet */
3763 	while ((skb = skb_dequeue(&hdev->raw_q)))
3764 		hci_send_frame(hdev, skb);
3765 }
3766 
3767 /* ----- HCI RX task (incoming data processing) ----- */
3768 
3769 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3770 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3771 {
3772 	struct hci_acl_hdr *hdr = (void *) skb->data;
3773 	struct hci_conn *conn;
3774 	__u16 handle, flags;
3775 
3776 	skb_pull(skb, HCI_ACL_HDR_SIZE);
3777 
3778 	handle = __le16_to_cpu(hdr->handle);
3779 	flags  = hci_flags(handle);
3780 	handle = hci_handle(handle);
3781 
3782 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3783 	       handle, flags);
3784 
3785 	hdev->stat.acl_rx++;
3786 
3787 	hci_dev_lock(hdev);
3788 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3789 	hci_dev_unlock(hdev);
3790 
3791 	if (conn) {
3792 		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3793 
3794 		/* Send to upper protocol */
3795 		l2cap_recv_acldata(conn, skb, flags);
3796 		return;
3797 	} else {
3798 		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3799 			   handle);
3800 	}
3801 
3802 	kfree_skb(skb);
3803 }
3804 
3805 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3806 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3807 {
3808 	struct hci_sco_hdr *hdr = (void *) skb->data;
3809 	struct hci_conn *conn;
3810 	__u16 handle, flags;
3811 
3812 	skb_pull(skb, HCI_SCO_HDR_SIZE);
3813 
3814 	handle = __le16_to_cpu(hdr->handle);
3815 	flags  = hci_flags(handle);
3816 	handle = hci_handle(handle);
3817 
3818 	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3819 	       handle, flags);
3820 
3821 	hdev->stat.sco_rx++;
3822 
3823 	hci_dev_lock(hdev);
3824 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3825 	hci_dev_unlock(hdev);
3826 
3827 	if (conn) {
3828 		/* Send to upper protocol */
3829 		hci_skb_pkt_status(skb) = flags & 0x03;
3830 		sco_recv_scodata(conn, skb);
3831 		return;
3832 	} else {
3833 		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3834 				       handle);
3835 	}
3836 
3837 	kfree_skb(skb);
3838 }
3839 
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3840 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3841 {
3842 	struct hci_iso_hdr *hdr;
3843 	struct hci_conn *conn;
3844 	__u16 handle, flags;
3845 
3846 	hdr = skb_pull_data(skb, sizeof(*hdr));
3847 	if (!hdr) {
3848 		bt_dev_err(hdev, "ISO packet too small");
3849 		goto drop;
3850 	}
3851 
3852 	handle = __le16_to_cpu(hdr->handle);
3853 	flags  = hci_flags(handle);
3854 	handle = hci_handle(handle);
3855 
3856 	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3857 		   handle, flags);
3858 
3859 	hci_dev_lock(hdev);
3860 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3861 	hci_dev_unlock(hdev);
3862 
3863 	if (!conn) {
3864 		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3865 			   handle);
3866 		goto drop;
3867 	}
3868 
3869 	/* Send to upper protocol */
3870 	iso_recv(conn, skb, flags);
3871 	return;
3872 
3873 drop:
3874 	kfree_skb(skb);
3875 }
3876 
hci_req_is_complete(struct hci_dev * hdev)3877 static bool hci_req_is_complete(struct hci_dev *hdev)
3878 {
3879 	struct sk_buff *skb;
3880 
3881 	skb = skb_peek(&hdev->cmd_q);
3882 	if (!skb)
3883 		return true;
3884 
3885 	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3886 }
3887 
hci_resend_last(struct hci_dev * hdev)3888 static void hci_resend_last(struct hci_dev *hdev)
3889 {
3890 	struct hci_command_hdr *sent;
3891 	struct sk_buff *skb;
3892 	u16 opcode;
3893 
3894 	if (!hdev->sent_cmd)
3895 		return;
3896 
3897 	sent = (void *) hdev->sent_cmd->data;
3898 	opcode = __le16_to_cpu(sent->opcode);
3899 	if (opcode == HCI_OP_RESET)
3900 		return;
3901 
3902 	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3903 	if (!skb)
3904 		return;
3905 
3906 	skb_queue_head(&hdev->cmd_q, skb);
3907 	queue_work(hdev->workqueue, &hdev->cmd_work);
3908 }
3909 
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3910 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3911 			  hci_req_complete_t *req_complete,
3912 			  hci_req_complete_skb_t *req_complete_skb)
3913 {
3914 	struct sk_buff *skb;
3915 	unsigned long flags;
3916 
3917 	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3918 
3919 	/* If the completed command doesn't match the last one that was
3920 	 * sent we need to do special handling of it.
3921 	 */
3922 	if (!hci_sent_cmd_data(hdev, opcode)) {
3923 		/* Some CSR based controllers generate a spontaneous
3924 		 * reset complete event during init and any pending
3925 		 * command will never be completed. In such a case we
3926 		 * need to resend whatever was the last sent
3927 		 * command.
3928 		 */
3929 		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3930 			hci_resend_last(hdev);
3931 
3932 		return;
3933 	}
3934 
3935 	/* If we reach this point this event matches the last command sent */
3936 	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3937 
3938 	/* If the command succeeded and there's still more commands in
3939 	 * this request the request is not yet complete.
3940 	 */
3941 	if (!status && !hci_req_is_complete(hdev))
3942 		return;
3943 
3944 	skb = hdev->req_skb;
3945 
3946 	/* If this was the last command in a request the complete
3947 	 * callback would be found in hdev->req_skb instead of the
3948 	 * command queue (hdev->cmd_q).
3949 	 */
3950 	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3951 		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3952 		return;
3953 	}
3954 
3955 	if (skb && bt_cb(skb)->hci.req_complete) {
3956 		*req_complete = bt_cb(skb)->hci.req_complete;
3957 		return;
3958 	}
3959 
3960 	/* Remove all pending commands belonging to this request */
3961 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3962 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3963 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3964 			__skb_queue_head(&hdev->cmd_q, skb);
3965 			break;
3966 		}
3967 
3968 		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3969 			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3970 		else
3971 			*req_complete = bt_cb(skb)->hci.req_complete;
3972 		dev_kfree_skb_irq(skb);
3973 	}
3974 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3975 }
3976 
hci_rx_work(struct work_struct * work)3977 static void hci_rx_work(struct work_struct *work)
3978 {
3979 	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3980 	struct sk_buff *skb;
3981 
3982 	BT_DBG("%s", hdev->name);
3983 
3984 	/* The kcov_remote functions used for collecting packet parsing
3985 	 * coverage information from this background thread and associate
3986 	 * the coverage with the syscall's thread which originally injected
3987 	 * the packet. This helps fuzzing the kernel.
3988 	 */
3989 	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3990 		kcov_remote_start_common(skb_get_kcov_handle(skb));
3991 
3992 		/* Send copy to monitor */
3993 		hci_send_to_monitor(hdev, skb);
3994 
3995 		if (atomic_read(&hdev->promisc)) {
3996 			/* Send copy to the sockets */
3997 			hci_send_to_sock(hdev, skb);
3998 		}
3999 
4000 		/* If the device has been opened in HCI_USER_CHANNEL,
4001 		 * the userspace has exclusive access to device.
4002 		 * When device is HCI_INIT, we still need to process
4003 		 * the data packets to the driver in order
4004 		 * to complete its setup().
4005 		 */
4006 		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4007 		    !test_bit(HCI_INIT, &hdev->flags)) {
4008 			kfree_skb(skb);
4009 			continue;
4010 		}
4011 
4012 		if (test_bit(HCI_INIT, &hdev->flags)) {
4013 			/* Don't process data packets in this states. */
4014 			switch (hci_skb_pkt_type(skb)) {
4015 			case HCI_ACLDATA_PKT:
4016 			case HCI_SCODATA_PKT:
4017 			case HCI_ISODATA_PKT:
4018 				kfree_skb(skb);
4019 				continue;
4020 			}
4021 		}
4022 
4023 		/* Process frame */
4024 		switch (hci_skb_pkt_type(skb)) {
4025 		case HCI_EVENT_PKT:
4026 			BT_DBG("%s Event packet", hdev->name);
4027 			hci_event_packet(hdev, skb);
4028 			break;
4029 
4030 		case HCI_ACLDATA_PKT:
4031 			BT_DBG("%s ACL data packet", hdev->name);
4032 			hci_acldata_packet(hdev, skb);
4033 			break;
4034 
4035 		case HCI_SCODATA_PKT:
4036 			BT_DBG("%s SCO data packet", hdev->name);
4037 			hci_scodata_packet(hdev, skb);
4038 			break;
4039 
4040 		case HCI_ISODATA_PKT:
4041 			BT_DBG("%s ISO data packet", hdev->name);
4042 			hci_isodata_packet(hdev, skb);
4043 			break;
4044 
4045 		default:
4046 			kfree_skb(skb);
4047 			break;
4048 		}
4049 	}
4050 }
4051 
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4052 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4053 {
4054 	int err;
4055 
4056 	bt_dev_dbg(hdev, "skb %p", skb);
4057 
4058 	kfree_skb(hdev->sent_cmd);
4059 
4060 	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4061 	if (!hdev->sent_cmd) {
4062 		skb_queue_head(&hdev->cmd_q, skb);
4063 		queue_work(hdev->workqueue, &hdev->cmd_work);
4064 		return;
4065 	}
4066 
4067 	err = hci_send_frame(hdev, skb);
4068 	if (err < 0) {
4069 		hci_cmd_sync_cancel_sync(hdev, -err);
4070 		return;
4071 	}
4072 
4073 	if (hci_req_status_pend(hdev) &&
4074 	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4075 		kfree_skb(hdev->req_skb);
4076 		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4077 	}
4078 
4079 	atomic_dec(&hdev->cmd_cnt);
4080 }
4081 
hci_cmd_work(struct work_struct * work)4082 static void hci_cmd_work(struct work_struct *work)
4083 {
4084 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4085 	struct sk_buff *skb;
4086 
4087 	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4088 	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4089 
4090 	/* Send queued commands */
4091 	if (atomic_read(&hdev->cmd_cnt)) {
4092 		skb = skb_dequeue(&hdev->cmd_q);
4093 		if (!skb)
4094 			return;
4095 
4096 		hci_send_cmd_sync(hdev, skb);
4097 
4098 		rcu_read_lock();
4099 		if (test_bit(HCI_RESET, &hdev->flags) ||
4100 		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4101 			cancel_delayed_work(&hdev->cmd_timer);
4102 		else
4103 			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4104 					   HCI_CMD_TIMEOUT);
4105 		rcu_read_unlock();
4106 	}
4107 }
4108