1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 /* Get HCI device by index.
67 * Device is held on return. */
__hci_dev_get(int index,int * srcu_index)68 static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
69 {
70 struct hci_dev *hdev = NULL, *d;
71
72 BT_DBG("%d", index);
73
74 if (index < 0)
75 return NULL;
76
77 read_lock(&hci_dev_list_lock);
78 list_for_each_entry(d, &hci_dev_list, list) {
79 if (d->id == index) {
80 hdev = hci_dev_hold(d);
81 if (srcu_index)
82 *srcu_index = srcu_read_lock(&d->srcu);
83 break;
84 }
85 }
86 read_unlock(&hci_dev_list_lock);
87 return hdev;
88 }
89
hci_dev_get(int index)90 struct hci_dev *hci_dev_get(int index)
91 {
92 return __hci_dev_get(index, NULL);
93 }
94
hci_dev_get_srcu(int index,int * srcu_index)95 static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
96 {
97 return __hci_dev_get(index, srcu_index);
98 }
99
hci_dev_put_srcu(struct hci_dev * hdev,int srcu_index)100 static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
101 {
102 srcu_read_unlock(&hdev->srcu, srcu_index);
103 hci_dev_put(hdev);
104 }
105
106 /* ---- Inquiry support ---- */
107
hci_discovery_active(struct hci_dev * hdev)108 bool hci_discovery_active(struct hci_dev *hdev)
109 {
110 struct discovery_state *discov = &hdev->discovery;
111
112 switch (discov->state) {
113 case DISCOVERY_FINDING:
114 case DISCOVERY_RESOLVING:
115 return true;
116
117 default:
118 return false;
119 }
120 }
121
hci_discovery_set_state(struct hci_dev * hdev,int state)122 void hci_discovery_set_state(struct hci_dev *hdev, int state)
123 {
124 int old_state = hdev->discovery.state;
125
126 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
127
128 if (old_state == state)
129 return;
130
131 hdev->discovery.state = state;
132
133 switch (state) {
134 case DISCOVERY_STOPPED:
135 hci_update_passive_scan(hdev);
136
137 if (old_state != DISCOVERY_STARTING)
138 mgmt_discovering(hdev, 0);
139 break;
140 case DISCOVERY_STARTING:
141 break;
142 case DISCOVERY_FINDING:
143 mgmt_discovering(hdev, 1);
144 break;
145 case DISCOVERY_RESOLVING:
146 break;
147 case DISCOVERY_STOPPING:
148 break;
149 }
150 }
151
hci_inquiry_cache_flush(struct hci_dev * hdev)152 void hci_inquiry_cache_flush(struct hci_dev *hdev)
153 {
154 struct discovery_state *cache = &hdev->discovery;
155 struct inquiry_entry *p, *n;
156
157 list_for_each_entry_safe(p, n, &cache->all, all) {
158 list_del(&p->all);
159 kfree(p);
160 }
161
162 INIT_LIST_HEAD(&cache->unknown);
163 INIT_LIST_HEAD(&cache->resolve);
164 }
165
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)166 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
167 bdaddr_t *bdaddr)
168 {
169 struct discovery_state *cache = &hdev->discovery;
170 struct inquiry_entry *e;
171
172 BT_DBG("cache %p, %pMR", cache, bdaddr);
173
174 list_for_each_entry(e, &cache->all, all) {
175 if (!bacmp(&e->data.bdaddr, bdaddr))
176 return e;
177 }
178
179 return NULL;
180 }
181
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)182 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
183 bdaddr_t *bdaddr)
184 {
185 struct discovery_state *cache = &hdev->discovery;
186 struct inquiry_entry *e;
187
188 BT_DBG("cache %p, %pMR", cache, bdaddr);
189
190 list_for_each_entry(e, &cache->unknown, list) {
191 if (!bacmp(&e->data.bdaddr, bdaddr))
192 return e;
193 }
194
195 return NULL;
196 }
197
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)198 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
199 bdaddr_t *bdaddr,
200 int state)
201 {
202 struct discovery_state *cache = &hdev->discovery;
203 struct inquiry_entry *e;
204
205 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
206
207 list_for_each_entry(e, &cache->resolve, list) {
208 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
209 return e;
210 if (!bacmp(&e->data.bdaddr, bdaddr))
211 return e;
212 }
213
214 return NULL;
215 }
216
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)217 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
218 struct inquiry_entry *ie)
219 {
220 struct discovery_state *cache = &hdev->discovery;
221 struct list_head *pos = &cache->resolve;
222 struct inquiry_entry *p;
223
224 list_del(&ie->list);
225
226 list_for_each_entry(p, &cache->resolve, list) {
227 if (p->name_state != NAME_PENDING &&
228 abs(p->data.rssi) >= abs(ie->data.rssi))
229 break;
230 pos = &p->list;
231 }
232
233 list_add(&ie->list, pos);
234 }
235
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)236 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
237 bool name_known)
238 {
239 struct discovery_state *cache = &hdev->discovery;
240 struct inquiry_entry *ie;
241 u32 flags = 0;
242
243 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
244
245 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
246
247 if (!data->ssp_mode)
248 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
249
250 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
251 if (ie) {
252 if (!ie->data.ssp_mode)
253 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
254
255 if (ie->name_state == NAME_NEEDED &&
256 data->rssi != ie->data.rssi) {
257 ie->data.rssi = data->rssi;
258 hci_inquiry_cache_update_resolve(hdev, ie);
259 }
260
261 goto update;
262 }
263
264 /* Entry not in the cache. Add new one. */
265 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
266 if (!ie) {
267 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
268 goto done;
269 }
270
271 list_add(&ie->all, &cache->all);
272
273 if (name_known) {
274 ie->name_state = NAME_KNOWN;
275 } else {
276 ie->name_state = NAME_NOT_KNOWN;
277 list_add(&ie->list, &cache->unknown);
278 }
279
280 update:
281 if (name_known && ie->name_state != NAME_KNOWN &&
282 ie->name_state != NAME_PENDING) {
283 ie->name_state = NAME_KNOWN;
284 list_del(&ie->list);
285 }
286
287 memcpy(&ie->data, data, sizeof(*data));
288 ie->timestamp = jiffies;
289 cache->timestamp = jiffies;
290
291 if (ie->name_state == NAME_NOT_KNOWN)
292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293
294 done:
295 return flags;
296 }
297
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)298 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
299 {
300 struct discovery_state *cache = &hdev->discovery;
301 struct inquiry_info *info = (struct inquiry_info *) buf;
302 struct inquiry_entry *e;
303 int copied = 0;
304
305 list_for_each_entry(e, &cache->all, all) {
306 struct inquiry_data *data = &e->data;
307
308 if (copied >= num)
309 break;
310
311 bacpy(&info->bdaddr, &data->bdaddr);
312 info->pscan_rep_mode = data->pscan_rep_mode;
313 info->pscan_period_mode = data->pscan_period_mode;
314 info->pscan_mode = data->pscan_mode;
315 memcpy(info->dev_class, data->dev_class, 3);
316 info->clock_offset = data->clock_offset;
317
318 info++;
319 copied++;
320 }
321
322 BT_DBG("cache %p, copied %d", cache, copied);
323 return copied;
324 }
325
hci_inq_req(struct hci_request * req,unsigned long opt)326 static int hci_inq_req(struct hci_request *req, unsigned long opt)
327 {
328 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
329 struct hci_dev *hdev = req->hdev;
330 struct hci_cp_inquiry cp;
331
332 BT_DBG("%s", hdev->name);
333
334 if (test_bit(HCI_INQUIRY, &hdev->flags))
335 return 0;
336
337 /* Start Inquiry */
338 memcpy(&cp.lap, &ir->lap, 3);
339 cp.length = ir->length;
340 cp.num_rsp = ir->num_rsp;
341 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
342
343 return 0;
344 }
345
hci_inquiry(void __user * arg)346 int hci_inquiry(void __user *arg)
347 {
348 __u8 __user *ptr = arg;
349 struct hci_inquiry_req ir;
350 struct hci_dev *hdev;
351 int err = 0, do_inquiry = 0, max_rsp;
352 long timeo;
353 __u8 *buf;
354
355 if (copy_from_user(&ir, ptr, sizeof(ir)))
356 return -EFAULT;
357
358 hdev = hci_dev_get(ir.dev_id);
359 if (!hdev)
360 return -ENODEV;
361
362 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
363 err = -EBUSY;
364 goto done;
365 }
366
367 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
368 err = -EOPNOTSUPP;
369 goto done;
370 }
371
372 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
373 err = -EOPNOTSUPP;
374 goto done;
375 }
376
377 /* Restrict maximum inquiry length to 60 seconds */
378 if (ir.length > 60) {
379 err = -EINVAL;
380 goto done;
381 }
382
383 hci_dev_lock(hdev);
384 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
385 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
386 hci_inquiry_cache_flush(hdev);
387 do_inquiry = 1;
388 }
389 hci_dev_unlock(hdev);
390
391 timeo = ir.length * msecs_to_jiffies(2000);
392
393 if (do_inquiry) {
394 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
395 timeo, NULL);
396 if (err < 0)
397 goto done;
398
399 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
400 * cleared). If it is interrupted by a signal, return -EINTR.
401 */
402 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
403 TASK_INTERRUPTIBLE)) {
404 err = -EINTR;
405 goto done;
406 }
407 }
408
409 /* for unlimited number of responses we will use buffer with
410 * 255 entries
411 */
412 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
413
414 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
415 * copy it to the user space.
416 */
417 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
418 if (!buf) {
419 err = -ENOMEM;
420 goto done;
421 }
422
423 hci_dev_lock(hdev);
424 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
425 hci_dev_unlock(hdev);
426
427 BT_DBG("num_rsp %d", ir.num_rsp);
428
429 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
430 ptr += sizeof(ir);
431 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
432 ir.num_rsp))
433 err = -EFAULT;
434 } else
435 err = -EFAULT;
436
437 kfree(buf);
438
439 done:
440 hci_dev_put(hdev);
441 return err;
442 }
443
hci_dev_do_open(struct hci_dev * hdev)444 static int hci_dev_do_open(struct hci_dev *hdev)
445 {
446 int ret = 0;
447
448 BT_DBG("%s %p", hdev->name, hdev);
449
450 hci_req_sync_lock(hdev);
451
452 ret = hci_dev_open_sync(hdev);
453
454 hci_req_sync_unlock(hdev);
455 return ret;
456 }
457
458 /* ---- HCI ioctl helpers ---- */
459
hci_dev_open(__u16 dev)460 int hci_dev_open(__u16 dev)
461 {
462 struct hci_dev *hdev;
463 int err;
464
465 hdev = hci_dev_get(dev);
466 if (!hdev)
467 return -ENODEV;
468
469 /* Devices that are marked as unconfigured can only be powered
470 * up as user channel. Trying to bring them up as normal devices
471 * will result into a failure. Only user channel operation is
472 * possible.
473 *
474 * When this function is called for a user channel, the flag
475 * HCI_USER_CHANNEL will be set first before attempting to
476 * open the device.
477 */
478 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
479 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
480 err = -EOPNOTSUPP;
481 goto done;
482 }
483
484 /* We need to ensure that no other power on/off work is pending
485 * before proceeding to call hci_dev_do_open. This is
486 * particularly important if the setup procedure has not yet
487 * completed.
488 */
489 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
490 cancel_delayed_work(&hdev->power_off);
491
492 /* After this call it is guaranteed that the setup procedure
493 * has finished. This means that error conditions like RFKILL
494 * or no valid public or static random address apply.
495 */
496 flush_workqueue(hdev->req_workqueue);
497
498 /* For controllers not using the management interface and that
499 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
500 * so that pairing works for them. Once the management interface
501 * is in use this bit will be cleared again and userspace has
502 * to explicitly enable it.
503 */
504 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
505 !hci_dev_test_flag(hdev, HCI_MGMT))
506 hci_dev_set_flag(hdev, HCI_BONDABLE);
507
508 err = hci_dev_do_open(hdev);
509
510 done:
511 hci_dev_put(hdev);
512 return err;
513 }
514
hci_dev_do_close(struct hci_dev * hdev)515 int hci_dev_do_close(struct hci_dev *hdev)
516 {
517 int err;
518
519 BT_DBG("%s %p", hdev->name, hdev);
520
521 hci_req_sync_lock(hdev);
522
523 err = hci_dev_close_sync(hdev);
524
525 hci_req_sync_unlock(hdev);
526
527 return err;
528 }
529
hci_dev_close(__u16 dev)530 int hci_dev_close(__u16 dev)
531 {
532 struct hci_dev *hdev;
533 int err;
534
535 hdev = hci_dev_get(dev);
536 if (!hdev)
537 return -ENODEV;
538
539 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
540 err = -EBUSY;
541 goto done;
542 }
543
544 cancel_work_sync(&hdev->power_on);
545 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
546 cancel_delayed_work(&hdev->power_off);
547
548 err = hci_dev_do_close(hdev);
549
550 done:
551 hci_dev_put(hdev);
552 return err;
553 }
554
hci_dev_do_reset(struct hci_dev * hdev)555 static int hci_dev_do_reset(struct hci_dev *hdev)
556 {
557 int ret;
558
559 BT_DBG("%s %p", hdev->name, hdev);
560
561 hci_req_sync_lock(hdev);
562
563 /* Drop queues */
564 skb_queue_purge(&hdev->rx_q);
565 skb_queue_purge(&hdev->cmd_q);
566
567 /* Cancel these to avoid queueing non-chained pending work */
568 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
569 /* Wait for
570 *
571 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
572 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
573 *
574 * inside RCU section to see the flag or complete scheduling.
575 */
576 synchronize_rcu();
577 /* Explicitly cancel works in case scheduled after setting the flag. */
578 cancel_delayed_work(&hdev->cmd_timer);
579 cancel_delayed_work(&hdev->ncmd_timer);
580
581 /* Avoid potential lockdep warnings from the *_flush() calls by
582 * ensuring the workqueue is empty up front.
583 */
584 drain_workqueue(hdev->workqueue);
585
586 hci_dev_lock(hdev);
587 hci_inquiry_cache_flush(hdev);
588 hci_conn_hash_flush(hdev);
589 hci_dev_unlock(hdev);
590
591 if (hdev->flush)
592 hdev->flush(hdev);
593
594 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
595
596 atomic_set(&hdev->cmd_cnt, 1);
597 hdev->acl_cnt = 0;
598 hdev->sco_cnt = 0;
599 hdev->le_cnt = 0;
600 hdev->iso_cnt = 0;
601
602 ret = hci_reset_sync(hdev);
603
604 hci_req_sync_unlock(hdev);
605 return ret;
606 }
607
hci_dev_reset(__u16 dev)608 int hci_dev_reset(__u16 dev)
609 {
610 struct hci_dev *hdev;
611 int err, srcu_index;
612
613 hdev = hci_dev_get_srcu(dev, &srcu_index);
614 if (!hdev)
615 return -ENODEV;
616
617 if (!test_bit(HCI_UP, &hdev->flags)) {
618 err = -ENETDOWN;
619 goto done;
620 }
621
622 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
623 err = -EBUSY;
624 goto done;
625 }
626
627 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
628 err = -EOPNOTSUPP;
629 goto done;
630 }
631
632 err = hci_dev_do_reset(hdev);
633
634 done:
635 hci_dev_put_srcu(hdev, srcu_index);
636 return err;
637 }
638
hci_dev_reset_stat(__u16 dev)639 int hci_dev_reset_stat(__u16 dev)
640 {
641 struct hci_dev *hdev;
642 int ret = 0;
643
644 hdev = hci_dev_get(dev);
645 if (!hdev)
646 return -ENODEV;
647
648 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
649 ret = -EBUSY;
650 goto done;
651 }
652
653 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
654 ret = -EOPNOTSUPP;
655 goto done;
656 }
657
658 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
659
660 done:
661 hci_dev_put(hdev);
662 return ret;
663 }
664
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)665 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
666 {
667 bool conn_changed, discov_changed;
668
669 BT_DBG("%s scan 0x%02x", hdev->name, scan);
670
671 if ((scan & SCAN_PAGE))
672 conn_changed = !hci_dev_test_and_set_flag(hdev,
673 HCI_CONNECTABLE);
674 else
675 conn_changed = hci_dev_test_and_clear_flag(hdev,
676 HCI_CONNECTABLE);
677
678 if ((scan & SCAN_INQUIRY)) {
679 discov_changed = !hci_dev_test_and_set_flag(hdev,
680 HCI_DISCOVERABLE);
681 } else {
682 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
683 discov_changed = hci_dev_test_and_clear_flag(hdev,
684 HCI_DISCOVERABLE);
685 }
686
687 if (!hci_dev_test_flag(hdev, HCI_MGMT))
688 return;
689
690 if (conn_changed || discov_changed) {
691 /* In case this was disabled through mgmt */
692 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
693
694 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
695 hci_update_adv_data(hdev, hdev->cur_adv_instance);
696
697 mgmt_new_settings(hdev);
698 }
699 }
700
hci_dev_cmd(unsigned int cmd,void __user * arg)701 int hci_dev_cmd(unsigned int cmd, void __user *arg)
702 {
703 struct hci_dev *hdev;
704 struct hci_dev_req dr;
705 __le16 policy;
706 int err = 0;
707
708 if (copy_from_user(&dr, arg, sizeof(dr)))
709 return -EFAULT;
710
711 hdev = hci_dev_get(dr.dev_id);
712 if (!hdev)
713 return -ENODEV;
714
715 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
716 err = -EBUSY;
717 goto done;
718 }
719
720 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
721 err = -EOPNOTSUPP;
722 goto done;
723 }
724
725 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
726 err = -EOPNOTSUPP;
727 goto done;
728 }
729
730 switch (cmd) {
731 case HCISETAUTH:
732 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
733 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
734 break;
735
736 case HCISETENCRYPT:
737 if (!lmp_encrypt_capable(hdev)) {
738 err = -EOPNOTSUPP;
739 break;
740 }
741
742 if (!test_bit(HCI_AUTH, &hdev->flags)) {
743 /* Auth must be enabled first */
744 err = hci_cmd_sync_status(hdev,
745 HCI_OP_WRITE_AUTH_ENABLE,
746 1, &dr.dev_opt,
747 HCI_CMD_TIMEOUT);
748 if (err)
749 break;
750 }
751
752 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
753 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
754 break;
755
756 case HCISETSCAN:
757 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
758 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
759
760 /* Ensure that the connectable and discoverable states
761 * get correctly modified as this was a non-mgmt change.
762 */
763 if (!err)
764 hci_update_passive_scan_state(hdev, dr.dev_opt);
765 break;
766
767 case HCISETLINKPOL:
768 policy = cpu_to_le16(dr.dev_opt);
769
770 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
771 2, &policy, HCI_CMD_TIMEOUT);
772 break;
773
774 case HCISETLINKMODE:
775 hdev->link_mode = ((__u16) dr.dev_opt) &
776 (HCI_LM_MASTER | HCI_LM_ACCEPT);
777 break;
778
779 case HCISETPTYPE:
780 if (hdev->pkt_type == (__u16) dr.dev_opt)
781 break;
782
783 hdev->pkt_type = (__u16) dr.dev_opt;
784 mgmt_phy_configuration_changed(hdev, NULL);
785 break;
786
787 case HCISETACLMTU:
788 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
789 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
790 break;
791
792 case HCISETSCOMTU:
793 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
794 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
795 break;
796
797 default:
798 err = -EINVAL;
799 break;
800 }
801
802 done:
803 hci_dev_put(hdev);
804 return err;
805 }
806
hci_get_dev_list(void __user * arg)807 int hci_get_dev_list(void __user *arg)
808 {
809 struct hci_dev *hdev;
810 struct hci_dev_list_req *dl;
811 struct hci_dev_req *dr;
812 int n = 0, size, err;
813 __u16 dev_num;
814
815 if (get_user(dev_num, (__u16 __user *) arg))
816 return -EFAULT;
817
818 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
819 return -EINVAL;
820
821 size = sizeof(*dl) + dev_num * sizeof(*dr);
822
823 dl = kzalloc(size, GFP_KERNEL);
824 if (!dl)
825 return -ENOMEM;
826
827 dr = dl->dev_req;
828
829 read_lock(&hci_dev_list_lock);
830 list_for_each_entry(hdev, &hci_dev_list, list) {
831 unsigned long flags = hdev->flags;
832
833 /* When the auto-off is configured it means the transport
834 * is running, but in that case still indicate that the
835 * device is actually down.
836 */
837 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
838 flags &= ~BIT(HCI_UP);
839
840 (dr + n)->dev_id = hdev->id;
841 (dr + n)->dev_opt = flags;
842
843 if (++n >= dev_num)
844 break;
845 }
846 read_unlock(&hci_dev_list_lock);
847
848 dl->dev_num = n;
849 size = sizeof(*dl) + n * sizeof(*dr);
850
851 err = copy_to_user(arg, dl, size);
852 kfree(dl);
853
854 return err ? -EFAULT : 0;
855 }
856
hci_get_dev_info(void __user * arg)857 int hci_get_dev_info(void __user *arg)
858 {
859 struct hci_dev *hdev;
860 struct hci_dev_info di;
861 unsigned long flags;
862 int err = 0;
863
864 if (copy_from_user(&di, arg, sizeof(di)))
865 return -EFAULT;
866
867 hdev = hci_dev_get(di.dev_id);
868 if (!hdev)
869 return -ENODEV;
870
871 /* When the auto-off is configured it means the transport
872 * is running, but in that case still indicate that the
873 * device is actually down.
874 */
875 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
876 flags = hdev->flags & ~BIT(HCI_UP);
877 else
878 flags = hdev->flags;
879
880 strscpy(di.name, hdev->name, sizeof(di.name));
881 di.bdaddr = hdev->bdaddr;
882 di.type = (hdev->bus & 0x0f);
883 di.flags = flags;
884 di.pkt_type = hdev->pkt_type;
885 if (lmp_bredr_capable(hdev)) {
886 di.acl_mtu = hdev->acl_mtu;
887 di.acl_pkts = hdev->acl_pkts;
888 di.sco_mtu = hdev->sco_mtu;
889 di.sco_pkts = hdev->sco_pkts;
890 } else {
891 di.acl_mtu = hdev->le_mtu;
892 di.acl_pkts = hdev->le_pkts;
893 di.sco_mtu = 0;
894 di.sco_pkts = 0;
895 }
896 di.link_policy = hdev->link_policy;
897 di.link_mode = hdev->link_mode;
898
899 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
900 memcpy(&di.features, &hdev->features, sizeof(di.features));
901
902 if (copy_to_user(arg, &di, sizeof(di)))
903 err = -EFAULT;
904
905 hci_dev_put(hdev);
906
907 return err;
908 }
909
910 /* ---- Interface to HCI drivers ---- */
911
hci_rfkill_set_block(void * data,bool blocked)912 static int hci_rfkill_set_block(void *data, bool blocked)
913 {
914 struct hci_dev *hdev = data;
915
916 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
917
918 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
919 return -EBUSY;
920
921 if (blocked) {
922 hci_dev_set_flag(hdev, HCI_RFKILLED);
923 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
924 !hci_dev_test_flag(hdev, HCI_CONFIG))
925 hci_dev_do_close(hdev);
926 } else {
927 hci_dev_clear_flag(hdev, HCI_RFKILLED);
928 }
929
930 return 0;
931 }
932
933 static const struct rfkill_ops hci_rfkill_ops = {
934 .set_block = hci_rfkill_set_block,
935 };
936
hci_power_on(struct work_struct * work)937 static void hci_power_on(struct work_struct *work)
938 {
939 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
940 int err;
941
942 BT_DBG("%s", hdev->name);
943
944 if (test_bit(HCI_UP, &hdev->flags) &&
945 hci_dev_test_flag(hdev, HCI_MGMT) &&
946 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
947 cancel_delayed_work(&hdev->power_off);
948 err = hci_powered_update_sync(hdev);
949 mgmt_power_on(hdev, err);
950 return;
951 }
952
953 err = hci_dev_do_open(hdev);
954 if (err < 0) {
955 hci_dev_lock(hdev);
956 mgmt_set_powered_failed(hdev, err);
957 hci_dev_unlock(hdev);
958 return;
959 }
960
961 /* During the HCI setup phase, a few error conditions are
962 * ignored and they need to be checked now. If they are still
963 * valid, it is important to turn the device back off.
964 */
965 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
966 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
967 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
968 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
969 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
970 hci_dev_do_close(hdev);
971 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
972 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
973 HCI_AUTO_OFF_TIMEOUT);
974 }
975
976 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
977 /* For unconfigured devices, set the HCI_RAW flag
978 * so that userspace can easily identify them.
979 */
980 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
981 set_bit(HCI_RAW, &hdev->flags);
982
983 /* For fully configured devices, this will send
984 * the Index Added event. For unconfigured devices,
985 * it will send Unconfigued Index Added event.
986 *
987 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
988 * and no event will be send.
989 */
990 mgmt_index_added(hdev);
991 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
992 /* When the controller is now configured, then it
993 * is important to clear the HCI_RAW flag.
994 */
995 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
996 clear_bit(HCI_RAW, &hdev->flags);
997
998 /* Powering on the controller with HCI_CONFIG set only
999 * happens with the transition from unconfigured to
1000 * configured. This will send the Index Added event.
1001 */
1002 mgmt_index_added(hdev);
1003 }
1004 }
1005
hci_power_off(struct work_struct * work)1006 static void hci_power_off(struct work_struct *work)
1007 {
1008 struct hci_dev *hdev = container_of(work, struct hci_dev,
1009 power_off.work);
1010
1011 BT_DBG("%s", hdev->name);
1012
1013 hci_dev_do_close(hdev);
1014 }
1015
hci_error_reset(struct work_struct * work)1016 static void hci_error_reset(struct work_struct *work)
1017 {
1018 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1019
1020 hci_dev_hold(hdev);
1021 BT_DBG("%s", hdev->name);
1022
1023 if (hdev->hw_error)
1024 hdev->hw_error(hdev, hdev->hw_error_code);
1025 else
1026 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1027
1028 if (!hci_dev_do_close(hdev))
1029 hci_dev_do_open(hdev);
1030
1031 hci_dev_put(hdev);
1032 }
1033
hci_uuids_clear(struct hci_dev * hdev)1034 void hci_uuids_clear(struct hci_dev *hdev)
1035 {
1036 struct bt_uuid *uuid, *tmp;
1037
1038 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1039 list_del(&uuid->list);
1040 kfree(uuid);
1041 }
1042 }
1043
hci_link_keys_clear(struct hci_dev * hdev)1044 void hci_link_keys_clear(struct hci_dev *hdev)
1045 {
1046 struct link_key *key, *tmp;
1047
1048 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1049 list_del_rcu(&key->list);
1050 kfree_rcu(key, rcu);
1051 }
1052 }
1053
hci_smp_ltks_clear(struct hci_dev * hdev)1054 void hci_smp_ltks_clear(struct hci_dev *hdev)
1055 {
1056 struct smp_ltk *k, *tmp;
1057
1058 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1059 list_del_rcu(&k->list);
1060 kfree_rcu(k, rcu);
1061 }
1062 }
1063
hci_smp_irks_clear(struct hci_dev * hdev)1064 void hci_smp_irks_clear(struct hci_dev *hdev)
1065 {
1066 struct smp_irk *k, *tmp;
1067
1068 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1069 list_del_rcu(&k->list);
1070 kfree_rcu(k, rcu);
1071 }
1072 }
1073
hci_blocked_keys_clear(struct hci_dev * hdev)1074 void hci_blocked_keys_clear(struct hci_dev *hdev)
1075 {
1076 struct blocked_key *b, *tmp;
1077
1078 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1079 list_del_rcu(&b->list);
1080 kfree_rcu(b, rcu);
1081 }
1082 }
1083
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1084 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1085 {
1086 bool blocked = false;
1087 struct blocked_key *b;
1088
1089 rcu_read_lock();
1090 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1091 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1092 blocked = true;
1093 break;
1094 }
1095 }
1096
1097 rcu_read_unlock();
1098 return blocked;
1099 }
1100
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1101 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1102 {
1103 struct link_key *k;
1104
1105 rcu_read_lock();
1106 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1107 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1108 rcu_read_unlock();
1109
1110 if (hci_is_blocked_key(hdev,
1111 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1112 k->val)) {
1113 bt_dev_warn_ratelimited(hdev,
1114 "Link key blocked for %pMR",
1115 &k->bdaddr);
1116 return NULL;
1117 }
1118
1119 return k;
1120 }
1121 }
1122 rcu_read_unlock();
1123
1124 return NULL;
1125 }
1126
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1127 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1128 u8 key_type, u8 old_key_type)
1129 {
1130 /* Legacy key */
1131 if (key_type < 0x03)
1132 return true;
1133
1134 /* Debug keys are insecure so don't store them persistently */
1135 if (key_type == HCI_LK_DEBUG_COMBINATION)
1136 return false;
1137
1138 /* Changed combination key and there's no previous one */
1139 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1140 return false;
1141
1142 /* Security mode 3 case */
1143 if (!conn)
1144 return true;
1145
1146 /* BR/EDR key derived using SC from an LE link */
1147 if (conn->type == LE_LINK)
1148 return true;
1149
1150 /* Neither local nor remote side had no-bonding as requirement */
1151 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1152 return true;
1153
1154 /* Local side had dedicated bonding as requirement */
1155 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1156 return true;
1157
1158 /* Remote side had dedicated bonding as requirement */
1159 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1160 return true;
1161
1162 /* If none of the above criteria match, then don't store the key
1163 * persistently */
1164 return false;
1165 }
1166
ltk_role(u8 type)1167 static u8 ltk_role(u8 type)
1168 {
1169 if (type == SMP_LTK)
1170 return HCI_ROLE_MASTER;
1171
1172 return HCI_ROLE_SLAVE;
1173 }
1174
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1175 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1176 u8 addr_type, u8 role)
1177 {
1178 struct smp_ltk *k;
1179
1180 rcu_read_lock();
1181 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1182 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1183 continue;
1184
1185 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1186 rcu_read_unlock();
1187
1188 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1189 k->val)) {
1190 bt_dev_warn_ratelimited(hdev,
1191 "LTK blocked for %pMR",
1192 &k->bdaddr);
1193 return NULL;
1194 }
1195
1196 return k;
1197 }
1198 }
1199 rcu_read_unlock();
1200
1201 return NULL;
1202 }
1203
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1204 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1205 {
1206 struct smp_irk *irk_to_return = NULL;
1207 struct smp_irk *irk;
1208
1209 rcu_read_lock();
1210 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1211 if (!bacmp(&irk->rpa, rpa)) {
1212 irk_to_return = irk;
1213 goto done;
1214 }
1215 }
1216
1217 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1218 if (smp_irk_matches(hdev, irk->val, rpa)) {
1219 bacpy(&irk->rpa, rpa);
1220 irk_to_return = irk;
1221 goto done;
1222 }
1223 }
1224
1225 done:
1226 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1227 irk_to_return->val)) {
1228 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1229 &irk_to_return->bdaddr);
1230 irk_to_return = NULL;
1231 }
1232
1233 rcu_read_unlock();
1234
1235 return irk_to_return;
1236 }
1237
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1238 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239 u8 addr_type)
1240 {
1241 struct smp_irk *irk_to_return = NULL;
1242 struct smp_irk *irk;
1243
1244 /* Identity Address must be public or static random */
1245 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1246 return NULL;
1247
1248 rcu_read_lock();
1249 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1250 if (addr_type == irk->addr_type &&
1251 bacmp(bdaddr, &irk->bdaddr) == 0) {
1252 irk_to_return = irk;
1253 goto done;
1254 }
1255 }
1256
1257 done:
1258
1259 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1260 irk_to_return->val)) {
1261 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1262 &irk_to_return->bdaddr);
1263 irk_to_return = NULL;
1264 }
1265
1266 rcu_read_unlock();
1267
1268 return irk_to_return;
1269 }
1270
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1271 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1272 bdaddr_t *bdaddr, u8 *val, u8 type,
1273 u8 pin_len, bool *persistent)
1274 {
1275 struct link_key *key, *old_key;
1276 u8 old_key_type;
1277
1278 old_key = hci_find_link_key(hdev, bdaddr);
1279 if (old_key) {
1280 old_key_type = old_key->type;
1281 key = old_key;
1282 } else {
1283 old_key_type = conn ? conn->key_type : 0xff;
1284 key = kzalloc(sizeof(*key), GFP_KERNEL);
1285 if (!key)
1286 return NULL;
1287 list_add_rcu(&key->list, &hdev->link_keys);
1288 }
1289
1290 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1291
1292 /* Some buggy controller combinations generate a changed
1293 * combination key for legacy pairing even when there's no
1294 * previous key */
1295 if (type == HCI_LK_CHANGED_COMBINATION &&
1296 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1297 type = HCI_LK_COMBINATION;
1298 if (conn)
1299 conn->key_type = type;
1300 }
1301
1302 bacpy(&key->bdaddr, bdaddr);
1303 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1304 key->pin_len = pin_len;
1305
1306 if (type == HCI_LK_CHANGED_COMBINATION)
1307 key->type = old_key_type;
1308 else
1309 key->type = type;
1310
1311 if (persistent)
1312 *persistent = hci_persistent_key(hdev, conn, type,
1313 old_key_type);
1314
1315 return key;
1316 }
1317
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1318 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1319 u8 addr_type, u8 type, u8 authenticated,
1320 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1321 {
1322 struct smp_ltk *key, *old_key;
1323 u8 role = ltk_role(type);
1324
1325 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1326 if (old_key)
1327 key = old_key;
1328 else {
1329 key = kzalloc(sizeof(*key), GFP_KERNEL);
1330 if (!key)
1331 return NULL;
1332 list_add_rcu(&key->list, &hdev->long_term_keys);
1333 }
1334
1335 bacpy(&key->bdaddr, bdaddr);
1336 key->bdaddr_type = addr_type;
1337 memcpy(key->val, tk, sizeof(key->val));
1338 key->authenticated = authenticated;
1339 key->ediv = ediv;
1340 key->rand = rand;
1341 key->enc_size = enc_size;
1342 key->type = type;
1343
1344 return key;
1345 }
1346
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1347 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1348 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1349 {
1350 struct smp_irk *irk;
1351
1352 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1353 if (!irk) {
1354 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1355 if (!irk)
1356 return NULL;
1357
1358 bacpy(&irk->bdaddr, bdaddr);
1359 irk->addr_type = addr_type;
1360
1361 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1362 }
1363
1364 memcpy(irk->val, val, 16);
1365 bacpy(&irk->rpa, rpa);
1366
1367 return irk;
1368 }
1369
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1370 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1371 {
1372 struct link_key *key;
1373
1374 key = hci_find_link_key(hdev, bdaddr);
1375 if (!key)
1376 return -ENOENT;
1377
1378 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1379
1380 list_del_rcu(&key->list);
1381 kfree_rcu(key, rcu);
1382
1383 return 0;
1384 }
1385
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1386 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1387 {
1388 struct smp_ltk *k, *tmp;
1389 int removed = 0;
1390
1391 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1392 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1393 continue;
1394
1395 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1396
1397 list_del_rcu(&k->list);
1398 kfree_rcu(k, rcu);
1399 removed++;
1400 }
1401
1402 return removed ? 0 : -ENOENT;
1403 }
1404
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1405 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1406 {
1407 struct smp_irk *k, *tmp;
1408
1409 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1410 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1411 continue;
1412
1413 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1414
1415 list_del_rcu(&k->list);
1416 kfree_rcu(k, rcu);
1417 }
1418 }
1419
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1420 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1421 {
1422 struct smp_ltk *k;
1423 struct smp_irk *irk;
1424 u8 addr_type;
1425
1426 if (type == BDADDR_BREDR) {
1427 if (hci_find_link_key(hdev, bdaddr))
1428 return true;
1429 return false;
1430 }
1431
1432 /* Convert to HCI addr type which struct smp_ltk uses */
1433 if (type == BDADDR_LE_PUBLIC)
1434 addr_type = ADDR_LE_DEV_PUBLIC;
1435 else
1436 addr_type = ADDR_LE_DEV_RANDOM;
1437
1438 irk = hci_get_irk(hdev, bdaddr, addr_type);
1439 if (irk) {
1440 bdaddr = &irk->bdaddr;
1441 addr_type = irk->addr_type;
1442 }
1443
1444 rcu_read_lock();
1445 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1446 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1447 rcu_read_unlock();
1448 return true;
1449 }
1450 }
1451 rcu_read_unlock();
1452
1453 return false;
1454 }
1455
1456 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1457 static void hci_cmd_timeout(struct work_struct *work)
1458 {
1459 struct hci_dev *hdev = container_of(work, struct hci_dev,
1460 cmd_timer.work);
1461
1462 if (hdev->req_skb) {
1463 u16 opcode = hci_skb_opcode(hdev->req_skb);
1464
1465 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1466
1467 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1468 } else {
1469 bt_dev_err(hdev, "command tx timeout");
1470 }
1471
1472 if (hdev->cmd_timeout)
1473 hdev->cmd_timeout(hdev);
1474
1475 atomic_set(&hdev->cmd_cnt, 1);
1476 queue_work(hdev->workqueue, &hdev->cmd_work);
1477 }
1478
1479 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1480 static void hci_ncmd_timeout(struct work_struct *work)
1481 {
1482 struct hci_dev *hdev = container_of(work, struct hci_dev,
1483 ncmd_timer.work);
1484
1485 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1486
1487 /* During HCI_INIT phase no events can be injected if the ncmd timer
1488 * triggers since the procedure has its own timeout handling.
1489 */
1490 if (test_bit(HCI_INIT, &hdev->flags))
1491 return;
1492
1493 /* This is an irrecoverable state, inject hardware error event */
1494 hci_reset_dev(hdev);
1495 }
1496
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1497 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1498 bdaddr_t *bdaddr, u8 bdaddr_type)
1499 {
1500 struct oob_data *data;
1501
1502 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1503 if (bacmp(bdaddr, &data->bdaddr) != 0)
1504 continue;
1505 if (data->bdaddr_type != bdaddr_type)
1506 continue;
1507 return data;
1508 }
1509
1510 return NULL;
1511 }
1512
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1513 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1514 u8 bdaddr_type)
1515 {
1516 struct oob_data *data;
1517
1518 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1519 if (!data)
1520 return -ENOENT;
1521
1522 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1523
1524 list_del(&data->list);
1525 kfree(data);
1526
1527 return 0;
1528 }
1529
hci_remote_oob_data_clear(struct hci_dev * hdev)1530 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1531 {
1532 struct oob_data *data, *n;
1533
1534 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1535 list_del(&data->list);
1536 kfree(data);
1537 }
1538 }
1539
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1540 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1541 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1542 u8 *hash256, u8 *rand256)
1543 {
1544 struct oob_data *data;
1545
1546 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1547 if (!data) {
1548 data = kmalloc(sizeof(*data), GFP_KERNEL);
1549 if (!data)
1550 return -ENOMEM;
1551
1552 bacpy(&data->bdaddr, bdaddr);
1553 data->bdaddr_type = bdaddr_type;
1554 list_add(&data->list, &hdev->remote_oob_data);
1555 }
1556
1557 if (hash192 && rand192) {
1558 memcpy(data->hash192, hash192, sizeof(data->hash192));
1559 memcpy(data->rand192, rand192, sizeof(data->rand192));
1560 if (hash256 && rand256)
1561 data->present = 0x03;
1562 } else {
1563 memset(data->hash192, 0, sizeof(data->hash192));
1564 memset(data->rand192, 0, sizeof(data->rand192));
1565 if (hash256 && rand256)
1566 data->present = 0x02;
1567 else
1568 data->present = 0x00;
1569 }
1570
1571 if (hash256 && rand256) {
1572 memcpy(data->hash256, hash256, sizeof(data->hash256));
1573 memcpy(data->rand256, rand256, sizeof(data->rand256));
1574 } else {
1575 memset(data->hash256, 0, sizeof(data->hash256));
1576 memset(data->rand256, 0, sizeof(data->rand256));
1577 if (hash192 && rand192)
1578 data->present = 0x01;
1579 }
1580
1581 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1582
1583 return 0;
1584 }
1585
1586 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1587 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1588 {
1589 struct adv_info *adv_instance;
1590
1591 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1592 if (adv_instance->instance == instance)
1593 return adv_instance;
1594 }
1595
1596 return NULL;
1597 }
1598
1599 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1600 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1601 {
1602 struct adv_info *cur_instance;
1603
1604 cur_instance = hci_find_adv_instance(hdev, instance);
1605 if (!cur_instance)
1606 return NULL;
1607
1608 if (cur_instance == list_last_entry(&hdev->adv_instances,
1609 struct adv_info, list))
1610 return list_first_entry(&hdev->adv_instances,
1611 struct adv_info, list);
1612 else
1613 return list_next_entry(cur_instance, list);
1614 }
1615
1616 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1617 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1618 {
1619 struct adv_info *adv_instance;
1620
1621 adv_instance = hci_find_adv_instance(hdev, instance);
1622 if (!adv_instance)
1623 return -ENOENT;
1624
1625 BT_DBG("%s removing %dMR", hdev->name, instance);
1626
1627 if (hdev->cur_adv_instance == instance) {
1628 if (hdev->adv_instance_timeout) {
1629 cancel_delayed_work(&hdev->adv_instance_expire);
1630 hdev->adv_instance_timeout = 0;
1631 }
1632 hdev->cur_adv_instance = 0x00;
1633 }
1634
1635 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1636
1637 list_del(&adv_instance->list);
1638 kfree(adv_instance);
1639
1640 hdev->adv_instance_cnt--;
1641
1642 return 0;
1643 }
1644
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1645 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1646 {
1647 struct adv_info *adv_instance, *n;
1648
1649 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1650 adv_instance->rpa_expired = rpa_expired;
1651 }
1652
1653 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1654 void hci_adv_instances_clear(struct hci_dev *hdev)
1655 {
1656 struct adv_info *adv_instance, *n;
1657
1658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work(&hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1661 }
1662
1663 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1664 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1665 list_del(&adv_instance->list);
1666 kfree(adv_instance);
1667 }
1668
1669 hdev->adv_instance_cnt = 0;
1670 hdev->cur_adv_instance = 0x00;
1671 }
1672
adv_instance_rpa_expired(struct work_struct * work)1673 static void adv_instance_rpa_expired(struct work_struct *work)
1674 {
1675 struct adv_info *adv_instance = container_of(work, struct adv_info,
1676 rpa_expired_cb.work);
1677
1678 BT_DBG("");
1679
1680 adv_instance->rpa_expired = true;
1681 }
1682
1683 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1684 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1685 u32 flags, u16 adv_data_len, u8 *adv_data,
1686 u16 scan_rsp_len, u8 *scan_rsp_data,
1687 u16 timeout, u16 duration, s8 tx_power,
1688 u32 min_interval, u32 max_interval,
1689 u8 mesh_handle)
1690 {
1691 struct adv_info *adv;
1692
1693 adv = hci_find_adv_instance(hdev, instance);
1694 if (adv) {
1695 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1696 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1697 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1698 } else {
1699 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1700 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1701 return ERR_PTR(-EOVERFLOW);
1702
1703 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1704 if (!adv)
1705 return ERR_PTR(-ENOMEM);
1706
1707 adv->pending = true;
1708 adv->instance = instance;
1709 list_add(&adv->list, &hdev->adv_instances);
1710 hdev->adv_instance_cnt++;
1711 }
1712
1713 adv->flags = flags;
1714 adv->min_interval = min_interval;
1715 adv->max_interval = max_interval;
1716 adv->tx_power = tx_power;
1717 /* Defining a mesh_handle changes the timing units to ms,
1718 * rather than seconds, and ties the instance to the requested
1719 * mesh_tx queue.
1720 */
1721 adv->mesh = mesh_handle;
1722
1723 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1724 scan_rsp_len, scan_rsp_data);
1725
1726 adv->timeout = timeout;
1727 adv->remaining_time = timeout;
1728
1729 if (duration == 0)
1730 adv->duration = hdev->def_multi_adv_rotation_duration;
1731 else
1732 adv->duration = duration;
1733
1734 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1735
1736 BT_DBG("%s for %dMR", hdev->name, instance);
1737
1738 return adv;
1739 }
1740
1741 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1742 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1743 u32 flags, u8 data_len, u8 *data,
1744 u32 min_interval, u32 max_interval)
1745 {
1746 struct adv_info *adv;
1747
1748 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1749 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1750 min_interval, max_interval, 0);
1751 if (IS_ERR(adv))
1752 return adv;
1753
1754 adv->periodic = true;
1755 adv->per_adv_data_len = data_len;
1756
1757 if (data)
1758 memcpy(adv->per_adv_data, data, data_len);
1759
1760 return adv;
1761 }
1762
1763 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1764 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1765 u16 adv_data_len, u8 *adv_data,
1766 u16 scan_rsp_len, u8 *scan_rsp_data)
1767 {
1768 struct adv_info *adv;
1769
1770 adv = hci_find_adv_instance(hdev, instance);
1771
1772 /* If advertisement doesn't exist, we can't modify its data */
1773 if (!adv)
1774 return -ENOENT;
1775
1776 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1777 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1778 memcpy(adv->adv_data, adv_data, adv_data_len);
1779 adv->adv_data_len = adv_data_len;
1780 adv->adv_data_changed = true;
1781 }
1782
1783 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1784 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1785 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1786 adv->scan_rsp_len = scan_rsp_len;
1787 adv->scan_rsp_changed = true;
1788 }
1789
1790 /* Mark as changed if there are flags which would affect it */
1791 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1792 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1793 adv->scan_rsp_changed = true;
1794
1795 return 0;
1796 }
1797
1798 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1799 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1800 {
1801 u32 flags;
1802 struct adv_info *adv;
1803
1804 if (instance == 0x00) {
1805 /* Instance 0 always manages the "Tx Power" and "Flags"
1806 * fields
1807 */
1808 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1809
1810 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1811 * corresponds to the "connectable" instance flag.
1812 */
1813 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1814 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1815
1816 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1817 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1818 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1819 flags |= MGMT_ADV_FLAG_DISCOV;
1820
1821 return flags;
1822 }
1823
1824 adv = hci_find_adv_instance(hdev, instance);
1825
1826 /* Return 0 when we got an invalid instance identifier. */
1827 if (!adv)
1828 return 0;
1829
1830 return adv->flags;
1831 }
1832
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1833 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1834 {
1835 struct adv_info *adv;
1836
1837 /* Instance 0x00 always set local name */
1838 if (instance == 0x00)
1839 return true;
1840
1841 adv = hci_find_adv_instance(hdev, instance);
1842 if (!adv)
1843 return false;
1844
1845 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1846 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1847 return true;
1848
1849 return adv->scan_rsp_len ? true : false;
1850 }
1851
1852 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1853 void hci_adv_monitors_clear(struct hci_dev *hdev)
1854 {
1855 struct adv_monitor *monitor;
1856 int handle;
1857
1858 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1859 hci_free_adv_monitor(hdev, monitor);
1860
1861 idr_destroy(&hdev->adv_monitors_idr);
1862 }
1863
1864 /* Frees the monitor structure and do some bookkeepings.
1865 * This function requires the caller holds hdev->lock.
1866 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1867 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1868 {
1869 struct adv_pattern *pattern;
1870 struct adv_pattern *tmp;
1871
1872 if (!monitor)
1873 return;
1874
1875 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1876 list_del(&pattern->list);
1877 kfree(pattern);
1878 }
1879
1880 if (monitor->handle)
1881 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1882
1883 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
1884 hdev->adv_monitors_cnt--;
1885
1886 kfree(monitor);
1887 }
1888
1889 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1890 * also attempts to forward the request to the controller.
1891 * This function requires the caller holds hci_req_sync_lock.
1892 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1893 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1894 {
1895 int min, max, handle;
1896 int status = 0;
1897
1898 if (!monitor)
1899 return -EINVAL;
1900
1901 hci_dev_lock(hdev);
1902
1903 min = HCI_MIN_ADV_MONITOR_HANDLE;
1904 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1905 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1906 GFP_KERNEL);
1907
1908 hci_dev_unlock(hdev);
1909
1910 if (handle < 0)
1911 return handle;
1912
1913 monitor->handle = handle;
1914
1915 if (!hdev_is_powered(hdev))
1916 return status;
1917
1918 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1919 case HCI_ADV_MONITOR_EXT_NONE:
1920 bt_dev_dbg(hdev, "add monitor %d status %d",
1921 monitor->handle, status);
1922 /* Message was not forwarded to controller - not an error */
1923 break;
1924
1925 case HCI_ADV_MONITOR_EXT_MSFT:
1926 status = msft_add_monitor_pattern(hdev, monitor);
1927 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1928 handle, status);
1929 break;
1930 }
1931
1932 return status;
1933 }
1934
1935 /* Attempts to tell the controller and free the monitor. If somehow the
1936 * controller doesn't have a corresponding handle, remove anyway.
1937 * This function requires the caller holds hci_req_sync_lock.
1938 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1939 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1940 struct adv_monitor *monitor)
1941 {
1942 int status = 0;
1943 int handle;
1944
1945 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1946 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1947 bt_dev_dbg(hdev, "remove monitor %d status %d",
1948 monitor->handle, status);
1949 goto free_monitor;
1950
1951 case HCI_ADV_MONITOR_EXT_MSFT:
1952 handle = monitor->handle;
1953 status = msft_remove_monitor(hdev, monitor);
1954 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1955 handle, status);
1956 break;
1957 }
1958
1959 /* In case no matching handle registered, just free the monitor */
1960 if (status == -ENOENT)
1961 goto free_monitor;
1962
1963 return status;
1964
1965 free_monitor:
1966 if (status == -ENOENT)
1967 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1968 monitor->handle);
1969 hci_free_adv_monitor(hdev, monitor);
1970
1971 return status;
1972 }
1973
1974 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1975 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1976 {
1977 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1978
1979 if (!monitor)
1980 return -EINVAL;
1981
1982 return hci_remove_adv_monitor(hdev, monitor);
1983 }
1984
1985 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1986 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1987 {
1988 struct adv_monitor *monitor;
1989 int idr_next_id = 0;
1990 int status = 0;
1991
1992 while (1) {
1993 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1994 if (!monitor)
1995 break;
1996
1997 status = hci_remove_adv_monitor(hdev, monitor);
1998 if (status)
1999 return status;
2000
2001 idr_next_id++;
2002 }
2003
2004 return status;
2005 }
2006
2007 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2008 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2009 {
2010 return !idr_is_empty(&hdev->adv_monitors_idr);
2011 }
2012
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2013 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2014 {
2015 if (msft_monitor_supported(hdev))
2016 return HCI_ADV_MONITOR_EXT_MSFT;
2017
2018 return HCI_ADV_MONITOR_EXT_NONE;
2019 }
2020
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2021 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2022 bdaddr_t *bdaddr, u8 type)
2023 {
2024 struct bdaddr_list *b;
2025
2026 list_for_each_entry(b, bdaddr_list, list) {
2027 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2028 return b;
2029 }
2030
2031 return NULL;
2032 }
2033
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2034 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2035 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2036 u8 type)
2037 {
2038 struct bdaddr_list_with_irk *b;
2039
2040 list_for_each_entry(b, bdaddr_list, list) {
2041 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2042 return b;
2043 }
2044
2045 return NULL;
2046 }
2047
2048 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2049 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2050 bdaddr_t *bdaddr, u8 type)
2051 {
2052 struct bdaddr_list_with_flags *b;
2053
2054 list_for_each_entry(b, bdaddr_list, list) {
2055 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2056 return b;
2057 }
2058
2059 return NULL;
2060 }
2061
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2062 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2063 {
2064 struct bdaddr_list *b, *n;
2065
2066 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2067 list_del(&b->list);
2068 kfree(b);
2069 }
2070 }
2071
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2072 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2073 {
2074 struct bdaddr_list *entry;
2075
2076 if (!bacmp(bdaddr, BDADDR_ANY))
2077 return -EBADF;
2078
2079 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2080 return -EEXIST;
2081
2082 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2083 if (!entry)
2084 return -ENOMEM;
2085
2086 bacpy(&entry->bdaddr, bdaddr);
2087 entry->bdaddr_type = type;
2088
2089 list_add(&entry->list, list);
2090
2091 return 0;
2092 }
2093
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2094 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2095 u8 type, u8 *peer_irk, u8 *local_irk)
2096 {
2097 struct bdaddr_list_with_irk *entry;
2098
2099 if (!bacmp(bdaddr, BDADDR_ANY))
2100 return -EBADF;
2101
2102 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2103 return -EEXIST;
2104
2105 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2106 if (!entry)
2107 return -ENOMEM;
2108
2109 bacpy(&entry->bdaddr, bdaddr);
2110 entry->bdaddr_type = type;
2111
2112 if (peer_irk)
2113 memcpy(entry->peer_irk, peer_irk, 16);
2114
2115 if (local_irk)
2116 memcpy(entry->local_irk, local_irk, 16);
2117
2118 list_add(&entry->list, list);
2119
2120 return 0;
2121 }
2122
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2123 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2124 u8 type, u32 flags)
2125 {
2126 struct bdaddr_list_with_flags *entry;
2127
2128 if (!bacmp(bdaddr, BDADDR_ANY))
2129 return -EBADF;
2130
2131 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2132 return -EEXIST;
2133
2134 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2135 if (!entry)
2136 return -ENOMEM;
2137
2138 bacpy(&entry->bdaddr, bdaddr);
2139 entry->bdaddr_type = type;
2140 entry->flags = flags;
2141
2142 list_add(&entry->list, list);
2143
2144 return 0;
2145 }
2146
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2147 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2148 {
2149 struct bdaddr_list *entry;
2150
2151 if (!bacmp(bdaddr, BDADDR_ANY)) {
2152 hci_bdaddr_list_clear(list);
2153 return 0;
2154 }
2155
2156 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2157 if (!entry)
2158 return -ENOENT;
2159
2160 list_del(&entry->list);
2161 kfree(entry);
2162
2163 return 0;
2164 }
2165
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2166 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2167 u8 type)
2168 {
2169 struct bdaddr_list_with_irk *entry;
2170
2171 if (!bacmp(bdaddr, BDADDR_ANY)) {
2172 hci_bdaddr_list_clear(list);
2173 return 0;
2174 }
2175
2176 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2177 if (!entry)
2178 return -ENOENT;
2179
2180 list_del(&entry->list);
2181 kfree(entry);
2182
2183 return 0;
2184 }
2185
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2186 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2187 u8 type)
2188 {
2189 struct bdaddr_list_with_flags *entry;
2190
2191 if (!bacmp(bdaddr, BDADDR_ANY)) {
2192 hci_bdaddr_list_clear(list);
2193 return 0;
2194 }
2195
2196 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2197 if (!entry)
2198 return -ENOENT;
2199
2200 list_del(&entry->list);
2201 kfree(entry);
2202
2203 return 0;
2204 }
2205
2206 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2207 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2208 bdaddr_t *addr, u8 addr_type)
2209 {
2210 struct hci_conn_params *params;
2211
2212 list_for_each_entry(params, &hdev->le_conn_params, list) {
2213 if (bacmp(¶ms->addr, addr) == 0 &&
2214 params->addr_type == addr_type) {
2215 return params;
2216 }
2217 }
2218
2219 return NULL;
2220 }
2221
2222 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2223 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2224 bdaddr_t *addr, u8 addr_type)
2225 {
2226 struct hci_conn_params *param;
2227
2228 rcu_read_lock();
2229
2230 list_for_each_entry_rcu(param, list, action) {
2231 if (bacmp(¶m->addr, addr) == 0 &&
2232 param->addr_type == addr_type) {
2233 rcu_read_unlock();
2234 return param;
2235 }
2236 }
2237
2238 rcu_read_unlock();
2239
2240 return NULL;
2241 }
2242
2243 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2244 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2245 {
2246 if (list_empty(¶m->action))
2247 return;
2248
2249 list_del_rcu(¶m->action);
2250 synchronize_rcu();
2251 INIT_LIST_HEAD(¶m->action);
2252 }
2253
2254 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2255 void hci_pend_le_list_add(struct hci_conn_params *param,
2256 struct list_head *list)
2257 {
2258 list_add_rcu(¶m->action, list);
2259 }
2260
2261 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2262 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2263 bdaddr_t *addr, u8 addr_type)
2264 {
2265 struct hci_conn_params *params;
2266
2267 params = hci_conn_params_lookup(hdev, addr, addr_type);
2268 if (params)
2269 return params;
2270
2271 params = kzalloc(sizeof(*params), GFP_KERNEL);
2272 if (!params) {
2273 bt_dev_err(hdev, "out of memory");
2274 return NULL;
2275 }
2276
2277 bacpy(¶ms->addr, addr);
2278 params->addr_type = addr_type;
2279
2280 list_add(¶ms->list, &hdev->le_conn_params);
2281 INIT_LIST_HEAD(¶ms->action);
2282
2283 params->conn_min_interval = hdev->le_conn_min_interval;
2284 params->conn_max_interval = hdev->le_conn_max_interval;
2285 params->conn_latency = hdev->le_conn_latency;
2286 params->supervision_timeout = hdev->le_supv_timeout;
2287 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2288
2289 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2290
2291 return params;
2292 }
2293
hci_conn_params_free(struct hci_conn_params * params)2294 void hci_conn_params_free(struct hci_conn_params *params)
2295 {
2296 hci_pend_le_list_del_init(params);
2297
2298 if (params->conn) {
2299 hci_conn_drop(params->conn);
2300 hci_conn_put(params->conn);
2301 }
2302
2303 list_del(¶ms->list);
2304 kfree(params);
2305 }
2306
2307 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2308 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2309 {
2310 struct hci_conn_params *params;
2311
2312 params = hci_conn_params_lookup(hdev, addr, addr_type);
2313 if (!params)
2314 return;
2315
2316 hci_conn_params_free(params);
2317
2318 hci_update_passive_scan(hdev);
2319
2320 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2321 }
2322
2323 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2324 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2325 {
2326 struct hci_conn_params *params, *tmp;
2327
2328 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2329 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2330 continue;
2331
2332 /* If trying to establish one time connection to disabled
2333 * device, leave the params, but mark them as just once.
2334 */
2335 if (params->explicit_connect) {
2336 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2337 continue;
2338 }
2339
2340 hci_conn_params_free(params);
2341 }
2342
2343 BT_DBG("All LE disabled connection parameters were removed");
2344 }
2345
2346 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2347 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2348 {
2349 struct hci_conn_params *params, *tmp;
2350
2351 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2352 hci_conn_params_free(params);
2353
2354 BT_DBG("All LE connection parameters were removed");
2355 }
2356
2357 /* Copy the Identity Address of the controller.
2358 *
2359 * If the controller has a public BD_ADDR, then by default use that one.
2360 * If this is a LE only controller without a public address, default to
2361 * the static random address.
2362 *
2363 * For debugging purposes it is possible to force controllers with a
2364 * public address to use the static random address instead.
2365 *
2366 * In case BR/EDR has been disabled on a dual-mode controller and
2367 * userspace has configured a static address, then that address
2368 * becomes the identity address instead of the public BR/EDR address.
2369 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2370 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2371 u8 *bdaddr_type)
2372 {
2373 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2374 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2375 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2376 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2377 bacpy(bdaddr, &hdev->static_addr);
2378 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2379 } else {
2380 bacpy(bdaddr, &hdev->bdaddr);
2381 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2382 }
2383 }
2384
hci_clear_wake_reason(struct hci_dev * hdev)2385 static void hci_clear_wake_reason(struct hci_dev *hdev)
2386 {
2387 hci_dev_lock(hdev);
2388
2389 hdev->wake_reason = 0;
2390 bacpy(&hdev->wake_addr, BDADDR_ANY);
2391 hdev->wake_addr_type = 0;
2392
2393 hci_dev_unlock(hdev);
2394 }
2395
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2396 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2397 void *data)
2398 {
2399 struct hci_dev *hdev =
2400 container_of(nb, struct hci_dev, suspend_notifier);
2401 int ret = 0;
2402
2403 /* Userspace has full control of this device. Do nothing. */
2404 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2405 return NOTIFY_DONE;
2406
2407 /* To avoid a potential race with hci_unregister_dev. */
2408 hci_dev_hold(hdev);
2409
2410 switch (action) {
2411 case PM_HIBERNATION_PREPARE:
2412 case PM_SUSPEND_PREPARE:
2413 ret = hci_suspend_dev(hdev);
2414 break;
2415 case PM_POST_HIBERNATION:
2416 case PM_POST_SUSPEND:
2417 ret = hci_resume_dev(hdev);
2418 break;
2419 }
2420
2421 if (ret)
2422 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2423 action, ret);
2424
2425 hci_dev_put(hdev);
2426 return NOTIFY_DONE;
2427 }
2428
2429 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2430 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2431 {
2432 struct hci_dev *hdev;
2433 unsigned int alloc_size;
2434
2435 alloc_size = sizeof(*hdev);
2436 if (sizeof_priv) {
2437 /* Fixme: May need ALIGN-ment? */
2438 alloc_size += sizeof_priv;
2439 }
2440
2441 hdev = kzalloc(alloc_size, GFP_KERNEL);
2442 if (!hdev)
2443 return NULL;
2444
2445 if (init_srcu_struct(&hdev->srcu)) {
2446 kfree(hdev);
2447 return NULL;
2448 }
2449
2450 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2451 hdev->esco_type = (ESCO_HV1);
2452 hdev->link_mode = (HCI_LM_ACCEPT);
2453 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2454 hdev->io_capability = 0x03; /* No Input No Output */
2455 hdev->manufacturer = 0xffff; /* Default to internal use */
2456 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2457 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2458 hdev->adv_instance_cnt = 0;
2459 hdev->cur_adv_instance = 0x00;
2460 hdev->adv_instance_timeout = 0;
2461
2462 hdev->advmon_allowlist_duration = 300;
2463 hdev->advmon_no_filter_duration = 500;
2464 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2465
2466 hdev->sniff_max_interval = 800;
2467 hdev->sniff_min_interval = 80;
2468
2469 hdev->le_adv_channel_map = 0x07;
2470 hdev->le_adv_min_interval = 0x0800;
2471 hdev->le_adv_max_interval = 0x0800;
2472 hdev->le_scan_interval = 0x0060;
2473 hdev->le_scan_window = 0x0030;
2474 hdev->le_scan_int_suspend = 0x0400;
2475 hdev->le_scan_window_suspend = 0x0012;
2476 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2477 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2478 hdev->le_scan_int_adv_monitor = 0x0060;
2479 hdev->le_scan_window_adv_monitor = 0x0030;
2480 hdev->le_scan_int_connect = 0x0060;
2481 hdev->le_scan_window_connect = 0x0060;
2482 hdev->le_conn_min_interval = 0x0018;
2483 hdev->le_conn_max_interval = 0x0028;
2484 hdev->le_conn_latency = 0x0000;
2485 hdev->le_supv_timeout = 0x002a;
2486 hdev->le_def_tx_len = 0x001b;
2487 hdev->le_def_tx_time = 0x0148;
2488 hdev->le_max_tx_len = 0x001b;
2489 hdev->le_max_tx_time = 0x0148;
2490 hdev->le_max_rx_len = 0x001b;
2491 hdev->le_max_rx_time = 0x0148;
2492 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2493 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2494 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2495 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2496 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2497 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2498 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2499 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2500 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2501
2502 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2503 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2504 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2505 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2506 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2507 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2508
2509 /* default 1.28 sec page scan */
2510 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2511 hdev->def_page_scan_int = 0x0800;
2512 hdev->def_page_scan_window = 0x0012;
2513
2514 mutex_init(&hdev->lock);
2515 mutex_init(&hdev->req_lock);
2516 mutex_init(&hdev->mgmt_pending_lock);
2517
2518 ida_init(&hdev->unset_handle_ida);
2519
2520 INIT_LIST_HEAD(&hdev->mesh_pending);
2521 INIT_LIST_HEAD(&hdev->mgmt_pending);
2522 INIT_LIST_HEAD(&hdev->reject_list);
2523 INIT_LIST_HEAD(&hdev->accept_list);
2524 INIT_LIST_HEAD(&hdev->uuids);
2525 INIT_LIST_HEAD(&hdev->link_keys);
2526 INIT_LIST_HEAD(&hdev->long_term_keys);
2527 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2528 INIT_LIST_HEAD(&hdev->remote_oob_data);
2529 INIT_LIST_HEAD(&hdev->le_accept_list);
2530 INIT_LIST_HEAD(&hdev->le_resolv_list);
2531 INIT_LIST_HEAD(&hdev->le_conn_params);
2532 INIT_LIST_HEAD(&hdev->pend_le_conns);
2533 INIT_LIST_HEAD(&hdev->pend_le_reports);
2534 INIT_LIST_HEAD(&hdev->conn_hash.list);
2535 INIT_LIST_HEAD(&hdev->adv_instances);
2536 INIT_LIST_HEAD(&hdev->blocked_keys);
2537 INIT_LIST_HEAD(&hdev->monitored_devices);
2538
2539 INIT_LIST_HEAD(&hdev->local_codecs);
2540 INIT_WORK(&hdev->rx_work, hci_rx_work);
2541 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2542 INIT_WORK(&hdev->tx_work, hci_tx_work);
2543 INIT_WORK(&hdev->power_on, hci_power_on);
2544 INIT_WORK(&hdev->error_reset, hci_error_reset);
2545
2546 hci_cmd_sync_init(hdev);
2547
2548 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2549
2550 skb_queue_head_init(&hdev->rx_q);
2551 skb_queue_head_init(&hdev->cmd_q);
2552 skb_queue_head_init(&hdev->raw_q);
2553
2554 init_waitqueue_head(&hdev->req_wait_q);
2555
2556 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2557 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2558
2559 hci_devcd_setup(hdev);
2560 hci_request_setup(hdev);
2561
2562 hci_init_sysfs(hdev);
2563 discovery_init(hdev);
2564
2565 return hdev;
2566 }
2567 EXPORT_SYMBOL(hci_alloc_dev_priv);
2568
2569 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2570 void hci_free_dev(struct hci_dev *hdev)
2571 {
2572 /* will free via device release */
2573 put_device(&hdev->dev);
2574 }
2575 EXPORT_SYMBOL(hci_free_dev);
2576
2577 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2578 int hci_register_dev(struct hci_dev *hdev)
2579 {
2580 int id, error;
2581
2582 if (!hdev->open || !hdev->close || !hdev->send)
2583 return -EINVAL;
2584
2585 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2586 if (id < 0)
2587 return id;
2588
2589 error = dev_set_name(&hdev->dev, "hci%u", id);
2590 if (error)
2591 return error;
2592
2593 hdev->name = dev_name(&hdev->dev);
2594 hdev->id = id;
2595
2596 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2597
2598 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2599 if (!hdev->workqueue) {
2600 error = -ENOMEM;
2601 goto err;
2602 }
2603
2604 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2605 hdev->name);
2606 if (!hdev->req_workqueue) {
2607 destroy_workqueue(hdev->workqueue);
2608 error = -ENOMEM;
2609 goto err;
2610 }
2611
2612 if (!IS_ERR_OR_NULL(bt_debugfs))
2613 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2614
2615 error = device_add(&hdev->dev);
2616 if (error < 0)
2617 goto err_wqueue;
2618
2619 hci_leds_init(hdev);
2620
2621 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2622 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2623 hdev);
2624 if (hdev->rfkill) {
2625 if (rfkill_register(hdev->rfkill) < 0) {
2626 rfkill_destroy(hdev->rfkill);
2627 hdev->rfkill = NULL;
2628 }
2629 }
2630
2631 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2632 hci_dev_set_flag(hdev, HCI_RFKILLED);
2633
2634 hci_dev_set_flag(hdev, HCI_SETUP);
2635 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2636
2637 /* Assume BR/EDR support until proven otherwise (such as
2638 * through reading supported features during init.
2639 */
2640 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2641
2642 write_lock(&hci_dev_list_lock);
2643 list_add(&hdev->list, &hci_dev_list);
2644 write_unlock(&hci_dev_list_lock);
2645
2646 /* Devices that are marked for raw-only usage are unconfigured
2647 * and should not be included in normal operation.
2648 */
2649 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2650 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2651
2652 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2653 * callback.
2654 */
2655 if (hdev->wakeup)
2656 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2657
2658 hci_sock_dev_event(hdev, HCI_DEV_REG);
2659 hci_dev_hold(hdev);
2660
2661 error = hci_register_suspend_notifier(hdev);
2662 if (error)
2663 BT_WARN("register suspend notifier failed error:%d\n", error);
2664
2665 queue_work(hdev->req_workqueue, &hdev->power_on);
2666
2667 idr_init(&hdev->adv_monitors_idr);
2668 msft_register(hdev);
2669
2670 return id;
2671
2672 err_wqueue:
2673 debugfs_remove_recursive(hdev->debugfs);
2674 destroy_workqueue(hdev->workqueue);
2675 destroy_workqueue(hdev->req_workqueue);
2676 err:
2677 ida_free(&hci_index_ida, hdev->id);
2678
2679 return error;
2680 }
2681 EXPORT_SYMBOL(hci_register_dev);
2682
2683 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2684 void hci_unregister_dev(struct hci_dev *hdev)
2685 {
2686 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2687
2688 mutex_lock(&hdev->unregister_lock);
2689 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2690 mutex_unlock(&hdev->unregister_lock);
2691
2692 write_lock(&hci_dev_list_lock);
2693 list_del(&hdev->list);
2694 write_unlock(&hci_dev_list_lock);
2695
2696 synchronize_srcu(&hdev->srcu);
2697 cleanup_srcu_struct(&hdev->srcu);
2698
2699 cancel_work_sync(&hdev->rx_work);
2700 cancel_work_sync(&hdev->cmd_work);
2701 cancel_work_sync(&hdev->tx_work);
2702 cancel_work_sync(&hdev->power_on);
2703 cancel_work_sync(&hdev->error_reset);
2704
2705 hci_cmd_sync_clear(hdev);
2706
2707 hci_unregister_suspend_notifier(hdev);
2708
2709 hci_dev_do_close(hdev);
2710
2711 if (!test_bit(HCI_INIT, &hdev->flags) &&
2712 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2713 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2714 hci_dev_lock(hdev);
2715 mgmt_index_removed(hdev);
2716 hci_dev_unlock(hdev);
2717 }
2718
2719 /* mgmt_index_removed should take care of emptying the
2720 * pending list */
2721 BUG_ON(!list_empty(&hdev->mgmt_pending));
2722
2723 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2724
2725 if (hdev->rfkill) {
2726 rfkill_unregister(hdev->rfkill);
2727 rfkill_destroy(hdev->rfkill);
2728 }
2729
2730 device_del(&hdev->dev);
2731 /* Actual cleanup is deferred until hci_release_dev(). */
2732 hci_dev_put(hdev);
2733 }
2734 EXPORT_SYMBOL(hci_unregister_dev);
2735
2736 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2737 void hci_release_dev(struct hci_dev *hdev)
2738 {
2739 debugfs_remove_recursive(hdev->debugfs);
2740 kfree_const(hdev->hw_info);
2741 kfree_const(hdev->fw_info);
2742
2743 destroy_workqueue(hdev->workqueue);
2744 destroy_workqueue(hdev->req_workqueue);
2745
2746 hci_dev_lock(hdev);
2747 hci_bdaddr_list_clear(&hdev->reject_list);
2748 hci_bdaddr_list_clear(&hdev->accept_list);
2749 hci_uuids_clear(hdev);
2750 hci_link_keys_clear(hdev);
2751 hci_smp_ltks_clear(hdev);
2752 hci_smp_irks_clear(hdev);
2753 hci_remote_oob_data_clear(hdev);
2754 hci_adv_instances_clear(hdev);
2755 hci_adv_monitors_clear(hdev);
2756 hci_bdaddr_list_clear(&hdev->le_accept_list);
2757 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2758 hci_conn_params_clear_all(hdev);
2759 hci_discovery_filter_clear(hdev);
2760 hci_blocked_keys_clear(hdev);
2761 hci_codec_list_clear(&hdev->local_codecs);
2762 msft_release(hdev);
2763 hci_dev_unlock(hdev);
2764
2765 ida_destroy(&hdev->unset_handle_ida);
2766 ida_free(&hci_index_ida, hdev->id);
2767 kfree_skb(hdev->sent_cmd);
2768 kfree_skb(hdev->req_skb);
2769 kfree_skb(hdev->recv_event);
2770 kfree(hdev);
2771 }
2772 EXPORT_SYMBOL(hci_release_dev);
2773
hci_register_suspend_notifier(struct hci_dev * hdev)2774 int hci_register_suspend_notifier(struct hci_dev *hdev)
2775 {
2776 int ret = 0;
2777
2778 if (!hdev->suspend_notifier.notifier_call &&
2779 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2780 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2781 ret = register_pm_notifier(&hdev->suspend_notifier);
2782 }
2783
2784 return ret;
2785 }
2786
hci_unregister_suspend_notifier(struct hci_dev * hdev)2787 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2788 {
2789 int ret = 0;
2790
2791 if (hdev->suspend_notifier.notifier_call) {
2792 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2793 if (!ret)
2794 hdev->suspend_notifier.notifier_call = NULL;
2795 }
2796
2797 return ret;
2798 }
2799
2800 /* Cancel ongoing command synchronously:
2801 *
2802 * - Cancel command timer
2803 * - Reset command counter
2804 * - Cancel command request
2805 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2806 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2807 {
2808 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2809
2810 cancel_delayed_work_sync(&hdev->cmd_timer);
2811 cancel_delayed_work_sync(&hdev->ncmd_timer);
2812 atomic_set(&hdev->cmd_cnt, 1);
2813
2814 hci_cmd_sync_cancel_sync(hdev, err);
2815 }
2816
2817 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2818 int hci_suspend_dev(struct hci_dev *hdev)
2819 {
2820 int ret;
2821
2822 bt_dev_dbg(hdev, "");
2823
2824 /* Suspend should only act on when powered. */
2825 if (!hdev_is_powered(hdev) ||
2826 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2827 return 0;
2828
2829 /* If powering down don't attempt to suspend */
2830 if (mgmt_powering_down(hdev))
2831 return 0;
2832
2833 /* Cancel potentially blocking sync operation before suspend */
2834 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2835
2836 hci_req_sync_lock(hdev);
2837 ret = hci_suspend_sync(hdev);
2838 hci_req_sync_unlock(hdev);
2839
2840 hci_clear_wake_reason(hdev);
2841 mgmt_suspending(hdev, hdev->suspend_state);
2842
2843 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2844 return ret;
2845 }
2846 EXPORT_SYMBOL(hci_suspend_dev);
2847
2848 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2849 int hci_resume_dev(struct hci_dev *hdev)
2850 {
2851 int ret;
2852
2853 bt_dev_dbg(hdev, "");
2854
2855 /* Resume should only act on when powered. */
2856 if (!hdev_is_powered(hdev) ||
2857 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2858 return 0;
2859
2860 /* If powering down don't attempt to resume */
2861 if (mgmt_powering_down(hdev))
2862 return 0;
2863
2864 hci_req_sync_lock(hdev);
2865 ret = hci_resume_sync(hdev);
2866 hci_req_sync_unlock(hdev);
2867
2868 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2869 hdev->wake_addr_type);
2870
2871 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2872 return ret;
2873 }
2874 EXPORT_SYMBOL(hci_resume_dev);
2875
2876 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2877 int hci_reset_dev(struct hci_dev *hdev)
2878 {
2879 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2880 struct sk_buff *skb;
2881
2882 skb = bt_skb_alloc(3, GFP_ATOMIC);
2883 if (!skb)
2884 return -ENOMEM;
2885
2886 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2887 skb_put_data(skb, hw_err, 3);
2888
2889 bt_dev_err(hdev, "Injecting HCI hardware error event");
2890
2891 /* Send Hardware Error to upper stack */
2892 return hci_recv_frame(hdev, skb);
2893 }
2894 EXPORT_SYMBOL(hci_reset_dev);
2895
2896 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2897 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2898 {
2899 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2900 && !test_bit(HCI_INIT, &hdev->flags))) {
2901 kfree_skb(skb);
2902 return -ENXIO;
2903 }
2904
2905 switch (hci_skb_pkt_type(skb)) {
2906 case HCI_EVENT_PKT:
2907 break;
2908 case HCI_ACLDATA_PKT:
2909 /* Detect if ISO packet has been sent as ACL */
2910 if (hci_conn_num(hdev, ISO_LINK)) {
2911 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2912 __u8 type;
2913
2914 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2915 if (type == ISO_LINK)
2916 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2917 }
2918 break;
2919 case HCI_SCODATA_PKT:
2920 break;
2921 case HCI_ISODATA_PKT:
2922 break;
2923 default:
2924 kfree_skb(skb);
2925 return -EINVAL;
2926 }
2927
2928 /* Incoming skb */
2929 bt_cb(skb)->incoming = 1;
2930
2931 /* Time stamp */
2932 __net_timestamp(skb);
2933
2934 skb_queue_tail(&hdev->rx_q, skb);
2935 queue_work(hdev->workqueue, &hdev->rx_work);
2936
2937 return 0;
2938 }
2939 EXPORT_SYMBOL(hci_recv_frame);
2940
2941 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2942 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2943 {
2944 /* Mark as diagnostic packet */
2945 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2946
2947 /* Time stamp */
2948 __net_timestamp(skb);
2949
2950 skb_queue_tail(&hdev->rx_q, skb);
2951 queue_work(hdev->workqueue, &hdev->rx_work);
2952
2953 return 0;
2954 }
2955 EXPORT_SYMBOL(hci_recv_diag);
2956
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2957 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2958 {
2959 va_list vargs;
2960
2961 va_start(vargs, fmt);
2962 kfree_const(hdev->hw_info);
2963 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2964 va_end(vargs);
2965 }
2966 EXPORT_SYMBOL(hci_set_hw_info);
2967
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2968 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2969 {
2970 va_list vargs;
2971
2972 va_start(vargs, fmt);
2973 kfree_const(hdev->fw_info);
2974 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2975 va_end(vargs);
2976 }
2977 EXPORT_SYMBOL(hci_set_fw_info);
2978
2979 /* ---- Interface to upper protocols ---- */
2980
hci_register_cb(struct hci_cb * cb)2981 int hci_register_cb(struct hci_cb *cb)
2982 {
2983 BT_DBG("%p name %s", cb, cb->name);
2984
2985 mutex_lock(&hci_cb_list_lock);
2986 list_add_tail(&cb->list, &hci_cb_list);
2987 mutex_unlock(&hci_cb_list_lock);
2988
2989 return 0;
2990 }
2991 EXPORT_SYMBOL(hci_register_cb);
2992
hci_unregister_cb(struct hci_cb * cb)2993 int hci_unregister_cb(struct hci_cb *cb)
2994 {
2995 BT_DBG("%p name %s", cb, cb->name);
2996
2997 mutex_lock(&hci_cb_list_lock);
2998 list_del(&cb->list);
2999 mutex_unlock(&hci_cb_list_lock);
3000
3001 return 0;
3002 }
3003 EXPORT_SYMBOL(hci_unregister_cb);
3004
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3005 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3006 {
3007 int err;
3008
3009 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3010 skb->len);
3011
3012 /* Time stamp */
3013 __net_timestamp(skb);
3014
3015 /* Send copy to monitor */
3016 hci_send_to_monitor(hdev, skb);
3017
3018 if (atomic_read(&hdev->promisc)) {
3019 /* Send copy to the sockets */
3020 hci_send_to_sock(hdev, skb);
3021 }
3022
3023 /* Get rid of skb owner, prior to sending to the driver. */
3024 skb_orphan(skb);
3025
3026 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3027 kfree_skb(skb);
3028 return -EINVAL;
3029 }
3030
3031 err = hdev->send(hdev, skb);
3032 if (err < 0) {
3033 bt_dev_err(hdev, "sending frame failed (%d)", err);
3034 kfree_skb(skb);
3035 return err;
3036 }
3037
3038 return 0;
3039 }
3040
3041 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3042 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3043 const void *param)
3044 {
3045 struct sk_buff *skb;
3046
3047 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3048
3049 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3050 if (!skb) {
3051 bt_dev_err(hdev, "no memory for command");
3052 return -ENOMEM;
3053 }
3054
3055 /* Stand-alone HCI commands must be flagged as
3056 * single-command requests.
3057 */
3058 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3059
3060 skb_queue_tail(&hdev->cmd_q, skb);
3061 queue_work(hdev->workqueue, &hdev->cmd_work);
3062
3063 return 0;
3064 }
3065
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3066 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3067 const void *param)
3068 {
3069 struct sk_buff *skb;
3070
3071 if (hci_opcode_ogf(opcode) != 0x3f) {
3072 /* A controller receiving a command shall respond with either
3073 * a Command Status Event or a Command Complete Event.
3074 * Therefore, all standard HCI commands must be sent via the
3075 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3076 * Some vendors do not comply with this rule for vendor-specific
3077 * commands and do not return any event. We want to support
3078 * unresponded commands for such cases only.
3079 */
3080 bt_dev_err(hdev, "unresponded command not supported");
3081 return -EINVAL;
3082 }
3083
3084 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3085 if (!skb) {
3086 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3087 opcode);
3088 return -ENOMEM;
3089 }
3090
3091 hci_send_frame(hdev, skb);
3092
3093 return 0;
3094 }
3095 EXPORT_SYMBOL(__hci_cmd_send);
3096
3097 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3098 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3099 {
3100 struct hci_command_hdr *hdr;
3101
3102 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3103 return NULL;
3104
3105 hdr = (void *)skb->data;
3106
3107 if (hdr->opcode != cpu_to_le16(opcode))
3108 return NULL;
3109
3110 return skb->data + HCI_COMMAND_HDR_SIZE;
3111 }
3112
3113 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3114 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3115 {
3116 void *data;
3117
3118 /* Check if opcode matches last sent command */
3119 data = hci_cmd_data(hdev->sent_cmd, opcode);
3120 if (!data)
3121 /* Check if opcode matches last request */
3122 data = hci_cmd_data(hdev->req_skb, opcode);
3123
3124 return data;
3125 }
3126
3127 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3128 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3129 {
3130 struct hci_event_hdr *hdr;
3131 int offset;
3132
3133 if (!hdev->recv_event)
3134 return NULL;
3135
3136 hdr = (void *)hdev->recv_event->data;
3137 offset = sizeof(*hdr);
3138
3139 if (hdr->evt != event) {
3140 /* In case of LE metaevent check the subevent match */
3141 if (hdr->evt == HCI_EV_LE_META) {
3142 struct hci_ev_le_meta *ev;
3143
3144 ev = (void *)hdev->recv_event->data + offset;
3145 offset += sizeof(*ev);
3146 if (ev->subevent == event)
3147 goto found;
3148 }
3149 return NULL;
3150 }
3151
3152 found:
3153 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3154
3155 return hdev->recv_event->data + offset;
3156 }
3157
3158 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3159 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3160 {
3161 struct hci_acl_hdr *hdr;
3162 int len = skb->len;
3163
3164 skb_push(skb, HCI_ACL_HDR_SIZE);
3165 skb_reset_transport_header(skb);
3166 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3167 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3168 hdr->dlen = cpu_to_le16(len);
3169 }
3170
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3171 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3172 struct sk_buff *skb, __u16 flags)
3173 {
3174 struct hci_conn *conn = chan->conn;
3175 struct hci_dev *hdev = conn->hdev;
3176 struct sk_buff *list;
3177
3178 skb->len = skb_headlen(skb);
3179 skb->data_len = 0;
3180
3181 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3182
3183 hci_add_acl_hdr(skb, conn->handle, flags);
3184
3185 list = skb_shinfo(skb)->frag_list;
3186 if (!list) {
3187 /* Non fragmented */
3188 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3189
3190 skb_queue_tail(queue, skb);
3191 } else {
3192 /* Fragmented */
3193 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3194
3195 skb_shinfo(skb)->frag_list = NULL;
3196
3197 /* Queue all fragments atomically. We need to use spin_lock_bh
3198 * here because of 6LoWPAN links, as there this function is
3199 * called from softirq and using normal spin lock could cause
3200 * deadlocks.
3201 */
3202 spin_lock_bh(&queue->lock);
3203
3204 __skb_queue_tail(queue, skb);
3205
3206 flags &= ~ACL_START;
3207 flags |= ACL_CONT;
3208 do {
3209 skb = list; list = list->next;
3210
3211 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3212 hci_add_acl_hdr(skb, conn->handle, flags);
3213
3214 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3215
3216 __skb_queue_tail(queue, skb);
3217 } while (list);
3218
3219 spin_unlock_bh(&queue->lock);
3220 }
3221 }
3222
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3223 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3224 {
3225 struct hci_dev *hdev = chan->conn->hdev;
3226
3227 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3228
3229 hci_queue_acl(chan, &chan->data_q, skb, flags);
3230
3231 queue_work(hdev->workqueue, &hdev->tx_work);
3232 }
3233
3234 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3235 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3236 {
3237 struct hci_dev *hdev = conn->hdev;
3238 struct hci_sco_hdr hdr;
3239
3240 BT_DBG("%s len %d", hdev->name, skb->len);
3241
3242 hdr.handle = cpu_to_le16(conn->handle);
3243 hdr.dlen = skb->len;
3244
3245 skb_push(skb, HCI_SCO_HDR_SIZE);
3246 skb_reset_transport_header(skb);
3247 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3248
3249 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3250
3251 skb_queue_tail(&conn->data_q, skb);
3252 queue_work(hdev->workqueue, &hdev->tx_work);
3253 }
3254
3255 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3256 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3257 {
3258 struct hci_iso_hdr *hdr;
3259 int len = skb->len;
3260
3261 skb_push(skb, HCI_ISO_HDR_SIZE);
3262 skb_reset_transport_header(skb);
3263 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3264 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3265 hdr->dlen = cpu_to_le16(len);
3266 }
3267
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3268 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3269 struct sk_buff *skb)
3270 {
3271 struct hci_dev *hdev = conn->hdev;
3272 struct sk_buff *list;
3273 __u16 flags;
3274
3275 skb->len = skb_headlen(skb);
3276 skb->data_len = 0;
3277
3278 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3279
3280 list = skb_shinfo(skb)->frag_list;
3281
3282 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3283 hci_add_iso_hdr(skb, conn->handle, flags);
3284
3285 if (!list) {
3286 /* Non fragmented */
3287 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3288
3289 skb_queue_tail(queue, skb);
3290 } else {
3291 /* Fragmented */
3292 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3293
3294 skb_shinfo(skb)->frag_list = NULL;
3295
3296 __skb_queue_tail(queue, skb);
3297
3298 do {
3299 skb = list; list = list->next;
3300
3301 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3302 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3303 0x00);
3304 hci_add_iso_hdr(skb, conn->handle, flags);
3305
3306 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3307
3308 __skb_queue_tail(queue, skb);
3309 } while (list);
3310 }
3311 }
3312
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3313 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3314 {
3315 struct hci_dev *hdev = conn->hdev;
3316
3317 BT_DBG("%s len %d", hdev->name, skb->len);
3318
3319 hci_queue_iso(conn, &conn->data_q, skb);
3320
3321 queue_work(hdev->workqueue, &hdev->tx_work);
3322 }
3323
3324 /* ---- HCI TX task (outgoing data) ---- */
3325
3326 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3327 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3328 {
3329 struct hci_dev *hdev;
3330 int cnt, q;
3331
3332 if (!conn) {
3333 *quote = 0;
3334 return;
3335 }
3336
3337 hdev = conn->hdev;
3338
3339 switch (conn->type) {
3340 case ACL_LINK:
3341 cnt = hdev->acl_cnt;
3342 break;
3343 case SCO_LINK:
3344 case ESCO_LINK:
3345 cnt = hdev->sco_cnt;
3346 break;
3347 case LE_LINK:
3348 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3349 break;
3350 case ISO_LINK:
3351 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3352 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3353 break;
3354 default:
3355 cnt = 0;
3356 bt_dev_err(hdev, "unknown link type %d", conn->type);
3357 }
3358
3359 q = cnt / num;
3360 *quote = q ? q : 1;
3361 }
3362
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3363 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3364 int *quote)
3365 {
3366 struct hci_conn_hash *h = &hdev->conn_hash;
3367 struct hci_conn *conn = NULL, *c;
3368 unsigned int num = 0, min = ~0;
3369
3370 /* We don't have to lock device here. Connections are always
3371 * added and removed with TX task disabled. */
3372
3373 rcu_read_lock();
3374
3375 list_for_each_entry_rcu(c, &h->list, list) {
3376 if (c->type != type || skb_queue_empty(&c->data_q))
3377 continue;
3378
3379 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3380 continue;
3381
3382 num++;
3383
3384 if (c->sent < min) {
3385 min = c->sent;
3386 conn = c;
3387 }
3388
3389 if (hci_conn_num(hdev, type) == num)
3390 break;
3391 }
3392
3393 rcu_read_unlock();
3394
3395 hci_quote_sent(conn, num, quote);
3396
3397 BT_DBG("conn %p quote %d", conn, *quote);
3398 return conn;
3399 }
3400
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3401 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3402 {
3403 struct hci_conn_hash *h = &hdev->conn_hash;
3404 struct hci_conn *c;
3405
3406 bt_dev_err(hdev, "link tx timeout");
3407
3408 hci_dev_lock(hdev);
3409
3410 /* Kill stalled connections */
3411 list_for_each_entry(c, &h->list, list) {
3412 if (c->type == type && c->sent) {
3413 bt_dev_err(hdev, "killing stalled connection %pMR",
3414 &c->dst);
3415 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3416 }
3417 }
3418
3419 hci_dev_unlock(hdev);
3420 }
3421
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3422 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3423 int *quote)
3424 {
3425 struct hci_conn_hash *h = &hdev->conn_hash;
3426 struct hci_chan *chan = NULL;
3427 unsigned int num = 0, min = ~0, cur_prio = 0;
3428 struct hci_conn *conn;
3429 int conn_num = 0;
3430
3431 BT_DBG("%s", hdev->name);
3432
3433 rcu_read_lock();
3434
3435 list_for_each_entry_rcu(conn, &h->list, list) {
3436 struct hci_chan *tmp;
3437
3438 if (conn->type != type)
3439 continue;
3440
3441 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3442 continue;
3443
3444 conn_num++;
3445
3446 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3447 struct sk_buff *skb;
3448
3449 if (skb_queue_empty(&tmp->data_q))
3450 continue;
3451
3452 skb = skb_peek(&tmp->data_q);
3453 if (skb->priority < cur_prio)
3454 continue;
3455
3456 if (skb->priority > cur_prio) {
3457 num = 0;
3458 min = ~0;
3459 cur_prio = skb->priority;
3460 }
3461
3462 num++;
3463
3464 if (conn->sent < min) {
3465 min = conn->sent;
3466 chan = tmp;
3467 }
3468 }
3469
3470 if (hci_conn_num(hdev, type) == conn_num)
3471 break;
3472 }
3473
3474 rcu_read_unlock();
3475
3476 if (!chan)
3477 return NULL;
3478
3479 hci_quote_sent(chan->conn, num, quote);
3480
3481 BT_DBG("chan %p quote %d", chan, *quote);
3482 return chan;
3483 }
3484
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3485 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3486 {
3487 struct hci_conn_hash *h = &hdev->conn_hash;
3488 struct hci_conn *conn;
3489 int num = 0;
3490
3491 BT_DBG("%s", hdev->name);
3492
3493 rcu_read_lock();
3494
3495 list_for_each_entry_rcu(conn, &h->list, list) {
3496 struct hci_chan *chan;
3497
3498 if (conn->type != type)
3499 continue;
3500
3501 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3502 continue;
3503
3504 num++;
3505
3506 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3507 struct sk_buff *skb;
3508
3509 if (chan->sent) {
3510 chan->sent = 0;
3511 continue;
3512 }
3513
3514 if (skb_queue_empty(&chan->data_q))
3515 continue;
3516
3517 skb = skb_peek(&chan->data_q);
3518 if (skb->priority >= HCI_PRIO_MAX - 1)
3519 continue;
3520
3521 skb->priority = HCI_PRIO_MAX - 1;
3522
3523 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3524 skb->priority);
3525 }
3526
3527 if (hci_conn_num(hdev, type) == num)
3528 break;
3529 }
3530
3531 rcu_read_unlock();
3532
3533 }
3534
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3535 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3536 {
3537 unsigned long last_tx;
3538
3539 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3540 return;
3541
3542 switch (type) {
3543 case LE_LINK:
3544 last_tx = hdev->le_last_tx;
3545 break;
3546 default:
3547 last_tx = hdev->acl_last_tx;
3548 break;
3549 }
3550
3551 /* tx timeout must be longer than maximum link supervision timeout
3552 * (40.9 seconds)
3553 */
3554 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3555 hci_link_tx_to(hdev, type);
3556 }
3557
3558 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3559 static void hci_sched_sco(struct hci_dev *hdev)
3560 {
3561 struct hci_conn *conn;
3562 struct sk_buff *skb;
3563 int quote;
3564
3565 BT_DBG("%s", hdev->name);
3566
3567 if (!hci_conn_num(hdev, SCO_LINK))
3568 return;
3569
3570 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3571 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3572 BT_DBG("skb %p len %d", skb, skb->len);
3573 hci_send_frame(hdev, skb);
3574
3575 conn->sent++;
3576 if (conn->sent == ~0)
3577 conn->sent = 0;
3578 }
3579 }
3580 }
3581
hci_sched_esco(struct hci_dev * hdev)3582 static void hci_sched_esco(struct hci_dev *hdev)
3583 {
3584 struct hci_conn *conn;
3585 struct sk_buff *skb;
3586 int quote;
3587
3588 BT_DBG("%s", hdev->name);
3589
3590 if (!hci_conn_num(hdev, ESCO_LINK))
3591 return;
3592
3593 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3594 "e))) {
3595 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3596 BT_DBG("skb %p len %d", skb, skb->len);
3597 hci_send_frame(hdev, skb);
3598
3599 conn->sent++;
3600 if (conn->sent == ~0)
3601 conn->sent = 0;
3602 }
3603 }
3604 }
3605
hci_sched_acl_pkt(struct hci_dev * hdev)3606 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3607 {
3608 unsigned int cnt = hdev->acl_cnt;
3609 struct hci_chan *chan;
3610 struct sk_buff *skb;
3611 int quote;
3612
3613 __check_timeout(hdev, cnt, ACL_LINK);
3614
3615 while (hdev->acl_cnt &&
3616 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3617 u32 priority = (skb_peek(&chan->data_q))->priority;
3618 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3619 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3620 skb->len, skb->priority);
3621
3622 /* Stop if priority has changed */
3623 if (skb->priority < priority)
3624 break;
3625
3626 skb = skb_dequeue(&chan->data_q);
3627
3628 hci_conn_enter_active_mode(chan->conn,
3629 bt_cb(skb)->force_active);
3630
3631 hci_send_frame(hdev, skb);
3632 hdev->acl_last_tx = jiffies;
3633
3634 hdev->acl_cnt--;
3635 chan->sent++;
3636 chan->conn->sent++;
3637
3638 /* Send pending SCO packets right away */
3639 hci_sched_sco(hdev);
3640 hci_sched_esco(hdev);
3641 }
3642 }
3643
3644 if (cnt != hdev->acl_cnt)
3645 hci_prio_recalculate(hdev, ACL_LINK);
3646 }
3647
hci_sched_acl(struct hci_dev * hdev)3648 static void hci_sched_acl(struct hci_dev *hdev)
3649 {
3650 BT_DBG("%s", hdev->name);
3651
3652 /* No ACL link over BR/EDR controller */
3653 if (!hci_conn_num(hdev, ACL_LINK))
3654 return;
3655
3656 hci_sched_acl_pkt(hdev);
3657 }
3658
hci_sched_le(struct hci_dev * hdev)3659 static void hci_sched_le(struct hci_dev *hdev)
3660 {
3661 struct hci_chan *chan;
3662 struct sk_buff *skb;
3663 int quote, *cnt, tmp;
3664
3665 BT_DBG("%s", hdev->name);
3666
3667 if (!hci_conn_num(hdev, LE_LINK))
3668 return;
3669
3670 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3671
3672 __check_timeout(hdev, *cnt, LE_LINK);
3673
3674 tmp = *cnt;
3675 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3676 u32 priority = (skb_peek(&chan->data_q))->priority;
3677 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3678 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3679 skb->len, skb->priority);
3680
3681 /* Stop if priority has changed */
3682 if (skb->priority < priority)
3683 break;
3684
3685 skb = skb_dequeue(&chan->data_q);
3686
3687 hci_send_frame(hdev, skb);
3688 hdev->le_last_tx = jiffies;
3689
3690 (*cnt)--;
3691 chan->sent++;
3692 chan->conn->sent++;
3693
3694 /* Send pending SCO packets right away */
3695 hci_sched_sco(hdev);
3696 hci_sched_esco(hdev);
3697 }
3698 }
3699
3700 if (*cnt != tmp)
3701 hci_prio_recalculate(hdev, LE_LINK);
3702 }
3703
3704 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3705 static void hci_sched_iso(struct hci_dev *hdev)
3706 {
3707 struct hci_conn *conn;
3708 struct sk_buff *skb;
3709 int quote, *cnt;
3710
3711 BT_DBG("%s", hdev->name);
3712
3713 if (!hci_conn_num(hdev, ISO_LINK))
3714 return;
3715
3716 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3717 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3718 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3719 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3720 BT_DBG("skb %p len %d", skb, skb->len);
3721 hci_send_frame(hdev, skb);
3722
3723 conn->sent++;
3724 if (conn->sent == ~0)
3725 conn->sent = 0;
3726 (*cnt)--;
3727 }
3728 }
3729 }
3730
hci_tx_work(struct work_struct * work)3731 static void hci_tx_work(struct work_struct *work)
3732 {
3733 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3734 struct sk_buff *skb;
3735
3736 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3737 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3738
3739 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3740 /* Schedule queues and send stuff to HCI driver */
3741 hci_sched_sco(hdev);
3742 hci_sched_esco(hdev);
3743 hci_sched_iso(hdev);
3744 hci_sched_acl(hdev);
3745 hci_sched_le(hdev);
3746 }
3747
3748 /* Send next queued raw (unknown type) packet */
3749 while ((skb = skb_dequeue(&hdev->raw_q)))
3750 hci_send_frame(hdev, skb);
3751 }
3752
3753 /* ----- HCI RX task (incoming data processing) ----- */
3754
3755 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3756 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3757 {
3758 struct hci_acl_hdr *hdr;
3759 struct hci_conn *conn;
3760 __u16 handle, flags;
3761
3762 hdr = skb_pull_data(skb, sizeof(*hdr));
3763 if (!hdr) {
3764 bt_dev_err(hdev, "ACL packet too small");
3765 goto drop;
3766 }
3767
3768 handle = __le16_to_cpu(hdr->handle);
3769 flags = hci_flags(handle);
3770 handle = hci_handle(handle);
3771
3772 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3773 handle, flags);
3774
3775 hdev->stat.acl_rx++;
3776
3777 hci_dev_lock(hdev);
3778 conn = hci_conn_hash_lookup_handle(hdev, handle);
3779 hci_dev_unlock(hdev);
3780
3781 if (conn) {
3782 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3783
3784 /* Send to upper protocol */
3785 l2cap_recv_acldata(conn, skb, flags);
3786 return;
3787 } else {
3788 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3789 handle);
3790 }
3791
3792 drop:
3793 kfree_skb(skb);
3794 }
3795
3796 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3797 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3798 {
3799 struct hci_sco_hdr *hdr = (void *) skb->data;
3800 struct hci_conn *conn;
3801 __u16 handle, flags;
3802
3803 skb_pull(skb, HCI_SCO_HDR_SIZE);
3804
3805 handle = __le16_to_cpu(hdr->handle);
3806 flags = hci_flags(handle);
3807 handle = hci_handle(handle);
3808
3809 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3810 handle, flags);
3811
3812 hdev->stat.sco_rx++;
3813
3814 hci_dev_lock(hdev);
3815 conn = hci_conn_hash_lookup_handle(hdev, handle);
3816 hci_dev_unlock(hdev);
3817
3818 if (conn) {
3819 /* Send to upper protocol */
3820 hci_skb_pkt_status(skb) = flags & 0x03;
3821 sco_recv_scodata(conn, skb);
3822 return;
3823 } else {
3824 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3825 handle);
3826 }
3827
3828 kfree_skb(skb);
3829 }
3830
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3831 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3832 {
3833 struct hci_iso_hdr *hdr;
3834 struct hci_conn *conn;
3835 __u16 handle, flags;
3836
3837 hdr = skb_pull_data(skb, sizeof(*hdr));
3838 if (!hdr) {
3839 bt_dev_err(hdev, "ISO packet too small");
3840 goto drop;
3841 }
3842
3843 handle = __le16_to_cpu(hdr->handle);
3844 flags = hci_flags(handle);
3845 handle = hci_handle(handle);
3846
3847 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3848 handle, flags);
3849
3850 hci_dev_lock(hdev);
3851 conn = hci_conn_hash_lookup_handle(hdev, handle);
3852 hci_dev_unlock(hdev);
3853
3854 if (!conn) {
3855 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3856 handle);
3857 goto drop;
3858 }
3859
3860 /* Send to upper protocol */
3861 iso_recv(conn, skb, flags);
3862 return;
3863
3864 drop:
3865 kfree_skb(skb);
3866 }
3867
hci_req_is_complete(struct hci_dev * hdev)3868 static bool hci_req_is_complete(struct hci_dev *hdev)
3869 {
3870 struct sk_buff *skb;
3871
3872 skb = skb_peek(&hdev->cmd_q);
3873 if (!skb)
3874 return true;
3875
3876 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3877 }
3878
hci_resend_last(struct hci_dev * hdev)3879 static void hci_resend_last(struct hci_dev *hdev)
3880 {
3881 struct hci_command_hdr *sent;
3882 struct sk_buff *skb;
3883 u16 opcode;
3884
3885 if (!hdev->sent_cmd)
3886 return;
3887
3888 sent = (void *) hdev->sent_cmd->data;
3889 opcode = __le16_to_cpu(sent->opcode);
3890 if (opcode == HCI_OP_RESET)
3891 return;
3892
3893 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3894 if (!skb)
3895 return;
3896
3897 skb_queue_head(&hdev->cmd_q, skb);
3898 queue_work(hdev->workqueue, &hdev->cmd_work);
3899 }
3900
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3901 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3902 hci_req_complete_t *req_complete,
3903 hci_req_complete_skb_t *req_complete_skb)
3904 {
3905 struct sk_buff *skb;
3906 unsigned long flags;
3907
3908 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3909
3910 /* If the completed command doesn't match the last one that was
3911 * sent we need to do special handling of it.
3912 */
3913 if (!hci_sent_cmd_data(hdev, opcode)) {
3914 /* Some CSR based controllers generate a spontaneous
3915 * reset complete event during init and any pending
3916 * command will never be completed. In such a case we
3917 * need to resend whatever was the last sent
3918 * command.
3919 */
3920 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3921 hci_resend_last(hdev);
3922
3923 return;
3924 }
3925
3926 /* If we reach this point this event matches the last command sent */
3927 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3928
3929 /* If the command succeeded and there's still more commands in
3930 * this request the request is not yet complete.
3931 */
3932 if (!status && !hci_req_is_complete(hdev))
3933 return;
3934
3935 skb = hdev->req_skb;
3936
3937 /* If this was the last command in a request the complete
3938 * callback would be found in hdev->req_skb instead of the
3939 * command queue (hdev->cmd_q).
3940 */
3941 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3942 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3943 return;
3944 }
3945
3946 if (skb && bt_cb(skb)->hci.req_complete) {
3947 *req_complete = bt_cb(skb)->hci.req_complete;
3948 return;
3949 }
3950
3951 /* Remove all pending commands belonging to this request */
3952 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3953 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3954 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3955 __skb_queue_head(&hdev->cmd_q, skb);
3956 break;
3957 }
3958
3959 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3960 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3961 else
3962 *req_complete = bt_cb(skb)->hci.req_complete;
3963 dev_kfree_skb_irq(skb);
3964 }
3965 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3966 }
3967
hci_rx_work(struct work_struct * work)3968 static void hci_rx_work(struct work_struct *work)
3969 {
3970 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3971 struct sk_buff *skb;
3972
3973 BT_DBG("%s", hdev->name);
3974
3975 /* The kcov_remote functions used for collecting packet parsing
3976 * coverage information from this background thread and associate
3977 * the coverage with the syscall's thread which originally injected
3978 * the packet. This helps fuzzing the kernel.
3979 */
3980 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3981 kcov_remote_start_common(skb_get_kcov_handle(skb));
3982
3983 /* Send copy to monitor */
3984 hci_send_to_monitor(hdev, skb);
3985
3986 if (atomic_read(&hdev->promisc)) {
3987 /* Send copy to the sockets */
3988 hci_send_to_sock(hdev, skb);
3989 }
3990
3991 /* If the device has been opened in HCI_USER_CHANNEL,
3992 * the userspace has exclusive access to device.
3993 * When device is HCI_INIT, we still need to process
3994 * the data packets to the driver in order
3995 * to complete its setup().
3996 */
3997 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3998 !test_bit(HCI_INIT, &hdev->flags)) {
3999 kfree_skb(skb);
4000 continue;
4001 }
4002
4003 if (test_bit(HCI_INIT, &hdev->flags)) {
4004 /* Don't process data packets in this states. */
4005 switch (hci_skb_pkt_type(skb)) {
4006 case HCI_ACLDATA_PKT:
4007 case HCI_SCODATA_PKT:
4008 case HCI_ISODATA_PKT:
4009 kfree_skb(skb);
4010 continue;
4011 }
4012 }
4013
4014 /* Process frame */
4015 switch (hci_skb_pkt_type(skb)) {
4016 case HCI_EVENT_PKT:
4017 BT_DBG("%s Event packet", hdev->name);
4018 hci_event_packet(hdev, skb);
4019 break;
4020
4021 case HCI_ACLDATA_PKT:
4022 BT_DBG("%s ACL data packet", hdev->name);
4023 hci_acldata_packet(hdev, skb);
4024 break;
4025
4026 case HCI_SCODATA_PKT:
4027 BT_DBG("%s SCO data packet", hdev->name);
4028 hci_scodata_packet(hdev, skb);
4029 break;
4030
4031 case HCI_ISODATA_PKT:
4032 BT_DBG("%s ISO data packet", hdev->name);
4033 hci_isodata_packet(hdev, skb);
4034 break;
4035
4036 default:
4037 kfree_skb(skb);
4038 break;
4039 }
4040 }
4041 }
4042
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4043 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4044 {
4045 int err;
4046
4047 bt_dev_dbg(hdev, "skb %p", skb);
4048
4049 kfree_skb(hdev->sent_cmd);
4050
4051 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4052 if (!hdev->sent_cmd) {
4053 skb_queue_head(&hdev->cmd_q, skb);
4054 queue_work(hdev->workqueue, &hdev->cmd_work);
4055 return;
4056 }
4057
4058 err = hci_send_frame(hdev, skb);
4059 if (err < 0) {
4060 hci_cmd_sync_cancel_sync(hdev, -err);
4061 return;
4062 }
4063
4064 if (hci_req_status_pend(hdev) &&
4065 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4066 kfree_skb(hdev->req_skb);
4067 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4068 }
4069
4070 atomic_dec(&hdev->cmd_cnt);
4071 }
4072
hci_cmd_work(struct work_struct * work)4073 static void hci_cmd_work(struct work_struct *work)
4074 {
4075 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4076 struct sk_buff *skb;
4077
4078 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4079 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4080
4081 /* Send queued commands */
4082 if (atomic_read(&hdev->cmd_cnt)) {
4083 skb = skb_dequeue(&hdev->cmd_q);
4084 if (!skb)
4085 return;
4086
4087 hci_send_cmd_sync(hdev, skb);
4088
4089 rcu_read_lock();
4090 if (test_bit(HCI_RESET, &hdev->flags) ||
4091 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4092 cancel_delayed_work(&hdev->cmd_timer);
4093 else
4094 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4095 HCI_CMD_TIMEOUT);
4096 rcu_read_unlock();
4097 }
4098 }
4099