1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 /* Get HCI device by index.
67 * Device is held on return. */
hci_dev_get(int index)68 struct hci_dev *hci_dev_get(int index)
69 {
70 struct hci_dev *hdev = NULL, *d;
71
72 BT_DBG("%d", index);
73
74 if (index < 0)
75 return NULL;
76
77 read_lock(&hci_dev_list_lock);
78 list_for_each_entry(d, &hci_dev_list, list) {
79 if (d->id == index) {
80 hdev = hci_dev_hold(d);
81 break;
82 }
83 }
84 read_unlock(&hci_dev_list_lock);
85 return hdev;
86 }
87
88 /* ---- Inquiry support ---- */
89
hci_discovery_active(struct hci_dev * hdev)90 bool hci_discovery_active(struct hci_dev *hdev)
91 {
92 struct discovery_state *discov = &hdev->discovery;
93
94 switch (discov->state) {
95 case DISCOVERY_FINDING:
96 case DISCOVERY_RESOLVING:
97 return true;
98
99 default:
100 return false;
101 }
102 }
103
hci_discovery_set_state(struct hci_dev * hdev,int state)104 void hci_discovery_set_state(struct hci_dev *hdev, int state)
105 {
106 int old_state = hdev->discovery.state;
107
108 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
109
110 if (old_state == state)
111 return;
112
113 hdev->discovery.state = state;
114
115 switch (state) {
116 case DISCOVERY_STOPPED:
117 hci_update_passive_scan(hdev);
118
119 if (old_state != DISCOVERY_STARTING)
120 mgmt_discovering(hdev, 0);
121 break;
122 case DISCOVERY_STARTING:
123 break;
124 case DISCOVERY_FINDING:
125 mgmt_discovering(hdev, 1);
126 break;
127 case DISCOVERY_RESOLVING:
128 break;
129 case DISCOVERY_STOPPING:
130 break;
131 }
132 }
133
hci_inquiry_cache_flush(struct hci_dev * hdev)134 void hci_inquiry_cache_flush(struct hci_dev *hdev)
135 {
136 struct discovery_state *cache = &hdev->discovery;
137 struct inquiry_entry *p, *n;
138
139 list_for_each_entry_safe(p, n, &cache->all, all) {
140 list_del(&p->all);
141 kfree(p);
142 }
143
144 INIT_LIST_HEAD(&cache->unknown);
145 INIT_LIST_HEAD(&cache->resolve);
146 }
147
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)148 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
149 bdaddr_t *bdaddr)
150 {
151 struct discovery_state *cache = &hdev->discovery;
152 struct inquiry_entry *e;
153
154 BT_DBG("cache %p, %pMR", cache, bdaddr);
155
156 list_for_each_entry(e, &cache->all, all) {
157 if (!bacmp(&e->data.bdaddr, bdaddr))
158 return e;
159 }
160
161 return NULL;
162 }
163
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)164 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
165 bdaddr_t *bdaddr)
166 {
167 struct discovery_state *cache = &hdev->discovery;
168 struct inquiry_entry *e;
169
170 BT_DBG("cache %p, %pMR", cache, bdaddr);
171
172 list_for_each_entry(e, &cache->unknown, list) {
173 if (!bacmp(&e->data.bdaddr, bdaddr))
174 return e;
175 }
176
177 return NULL;
178 }
179
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)180 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
181 bdaddr_t *bdaddr,
182 int state)
183 {
184 struct discovery_state *cache = &hdev->discovery;
185 struct inquiry_entry *e;
186
187 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
188
189 list_for_each_entry(e, &cache->resolve, list) {
190 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
191 return e;
192 if (!bacmp(&e->data.bdaddr, bdaddr))
193 return e;
194 }
195
196 return NULL;
197 }
198
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)199 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
200 struct inquiry_entry *ie)
201 {
202 struct discovery_state *cache = &hdev->discovery;
203 struct list_head *pos = &cache->resolve;
204 struct inquiry_entry *p;
205
206 list_del(&ie->list);
207
208 list_for_each_entry(p, &cache->resolve, list) {
209 if (p->name_state != NAME_PENDING &&
210 abs(p->data.rssi) >= abs(ie->data.rssi))
211 break;
212 pos = &p->list;
213 }
214
215 list_add(&ie->list, pos);
216 }
217
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)218 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
219 bool name_known)
220 {
221 struct discovery_state *cache = &hdev->discovery;
222 struct inquiry_entry *ie;
223 u32 flags = 0;
224
225 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
226
227 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
228
229 if (!data->ssp_mode)
230 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
231
232 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
233 if (ie) {
234 if (!ie->data.ssp_mode)
235 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
236
237 if (ie->name_state == NAME_NEEDED &&
238 data->rssi != ie->data.rssi) {
239 ie->data.rssi = data->rssi;
240 hci_inquiry_cache_update_resolve(hdev, ie);
241 }
242
243 goto update;
244 }
245
246 /* Entry not in the cache. Add new one. */
247 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
248 if (!ie) {
249 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
250 goto done;
251 }
252
253 list_add(&ie->all, &cache->all);
254
255 if (name_known) {
256 ie->name_state = NAME_KNOWN;
257 } else {
258 ie->name_state = NAME_NOT_KNOWN;
259 list_add(&ie->list, &cache->unknown);
260 }
261
262 update:
263 if (name_known && ie->name_state != NAME_KNOWN &&
264 ie->name_state != NAME_PENDING) {
265 ie->name_state = NAME_KNOWN;
266 list_del(&ie->list);
267 }
268
269 memcpy(&ie->data, data, sizeof(*data));
270 ie->timestamp = jiffies;
271 cache->timestamp = jiffies;
272
273 if (ie->name_state == NAME_NOT_KNOWN)
274 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
275
276 done:
277 return flags;
278 }
279
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)280 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
281 {
282 struct discovery_state *cache = &hdev->discovery;
283 struct inquiry_info *info = (struct inquiry_info *) buf;
284 struct inquiry_entry *e;
285 int copied = 0;
286
287 list_for_each_entry(e, &cache->all, all) {
288 struct inquiry_data *data = &e->data;
289
290 if (copied >= num)
291 break;
292
293 bacpy(&info->bdaddr, &data->bdaddr);
294 info->pscan_rep_mode = data->pscan_rep_mode;
295 info->pscan_period_mode = data->pscan_period_mode;
296 info->pscan_mode = data->pscan_mode;
297 memcpy(info->dev_class, data->dev_class, 3);
298 info->clock_offset = data->clock_offset;
299
300 info++;
301 copied++;
302 }
303
304 BT_DBG("cache %p, copied %d", cache, copied);
305 return copied;
306 }
307
hci_inq_req(struct hci_request * req,unsigned long opt)308 static int hci_inq_req(struct hci_request *req, unsigned long opt)
309 {
310 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
311 struct hci_dev *hdev = req->hdev;
312 struct hci_cp_inquiry cp;
313
314 BT_DBG("%s", hdev->name);
315
316 if (test_bit(HCI_INQUIRY, &hdev->flags))
317 return 0;
318
319 /* Start Inquiry */
320 memcpy(&cp.lap, &ir->lap, 3);
321 cp.length = ir->length;
322 cp.num_rsp = ir->num_rsp;
323 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
324
325 return 0;
326 }
327
hci_inquiry(void __user * arg)328 int hci_inquiry(void __user *arg)
329 {
330 __u8 __user *ptr = arg;
331 struct hci_inquiry_req ir;
332 struct hci_dev *hdev;
333 int err = 0, do_inquiry = 0, max_rsp;
334 long timeo;
335 __u8 *buf;
336
337 if (copy_from_user(&ir, ptr, sizeof(ir)))
338 return -EFAULT;
339
340 hdev = hci_dev_get(ir.dev_id);
341 if (!hdev)
342 return -ENODEV;
343
344 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
345 err = -EBUSY;
346 goto done;
347 }
348
349 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
350 err = -EOPNOTSUPP;
351 goto done;
352 }
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
355 err = -EOPNOTSUPP;
356 goto done;
357 }
358
359 /* Restrict maximum inquiry length to 60 seconds */
360 if (ir.length > 60) {
361 err = -EINVAL;
362 goto done;
363 }
364
365 hci_dev_lock(hdev);
366 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
367 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
368 hci_inquiry_cache_flush(hdev);
369 do_inquiry = 1;
370 }
371 hci_dev_unlock(hdev);
372
373 timeo = ir.length * msecs_to_jiffies(2000);
374
375 if (do_inquiry) {
376 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
377 timeo, NULL);
378 if (err < 0)
379 goto done;
380
381 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
382 * cleared). If it is interrupted by a signal, return -EINTR.
383 */
384 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
385 TASK_INTERRUPTIBLE)) {
386 err = -EINTR;
387 goto done;
388 }
389 }
390
391 /* for unlimited number of responses we will use buffer with
392 * 255 entries
393 */
394 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
395
396 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
397 * copy it to the user space.
398 */
399 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
400 if (!buf) {
401 err = -ENOMEM;
402 goto done;
403 }
404
405 hci_dev_lock(hdev);
406 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
407 hci_dev_unlock(hdev);
408
409 BT_DBG("num_rsp %d", ir.num_rsp);
410
411 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
412 ptr += sizeof(ir);
413 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
414 ir.num_rsp))
415 err = -EFAULT;
416 } else
417 err = -EFAULT;
418
419 kfree(buf);
420
421 done:
422 hci_dev_put(hdev);
423 return err;
424 }
425
hci_dev_do_open(struct hci_dev * hdev)426 static int hci_dev_do_open(struct hci_dev *hdev)
427 {
428 int ret = 0;
429
430 BT_DBG("%s %p", hdev->name, hdev);
431
432 hci_req_sync_lock(hdev);
433
434 ret = hci_dev_open_sync(hdev);
435
436 hci_req_sync_unlock(hdev);
437 return ret;
438 }
439
440 /* ---- HCI ioctl helpers ---- */
441
hci_dev_open(__u16 dev)442 int hci_dev_open(__u16 dev)
443 {
444 struct hci_dev *hdev;
445 int err;
446
447 hdev = hci_dev_get(dev);
448 if (!hdev)
449 return -ENODEV;
450
451 /* Devices that are marked as unconfigured can only be powered
452 * up as user channel. Trying to bring them up as normal devices
453 * will result into a failure. Only user channel operation is
454 * possible.
455 *
456 * When this function is called for a user channel, the flag
457 * HCI_USER_CHANNEL will be set first before attempting to
458 * open the device.
459 */
460 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
461 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
462 err = -EOPNOTSUPP;
463 goto done;
464 }
465
466 /* We need to ensure that no other power on/off work is pending
467 * before proceeding to call hci_dev_do_open. This is
468 * particularly important if the setup procedure has not yet
469 * completed.
470 */
471 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
472 cancel_delayed_work(&hdev->power_off);
473
474 /* After this call it is guaranteed that the setup procedure
475 * has finished. This means that error conditions like RFKILL
476 * or no valid public or static random address apply.
477 */
478 flush_workqueue(hdev->req_workqueue);
479
480 /* For controllers not using the management interface and that
481 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
482 * so that pairing works for them. Once the management interface
483 * is in use this bit will be cleared again and userspace has
484 * to explicitly enable it.
485 */
486 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
487 !hci_dev_test_flag(hdev, HCI_MGMT))
488 hci_dev_set_flag(hdev, HCI_BONDABLE);
489
490 err = hci_dev_do_open(hdev);
491
492 done:
493 hci_dev_put(hdev);
494 return err;
495 }
496
hci_dev_do_close(struct hci_dev * hdev)497 int hci_dev_do_close(struct hci_dev *hdev)
498 {
499 int err;
500
501 BT_DBG("%s %p", hdev->name, hdev);
502
503 hci_req_sync_lock(hdev);
504
505 err = hci_dev_close_sync(hdev);
506
507 hci_req_sync_unlock(hdev);
508
509 return err;
510 }
511
hci_dev_close(__u16 dev)512 int hci_dev_close(__u16 dev)
513 {
514 struct hci_dev *hdev;
515 int err;
516
517 hdev = hci_dev_get(dev);
518 if (!hdev)
519 return -ENODEV;
520
521 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
522 err = -EBUSY;
523 goto done;
524 }
525
526 cancel_work_sync(&hdev->power_on);
527 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
528 cancel_delayed_work(&hdev->power_off);
529
530 err = hci_dev_do_close(hdev);
531
532 done:
533 hci_dev_put(hdev);
534 return err;
535 }
536
hci_dev_do_reset(struct hci_dev * hdev)537 static int hci_dev_do_reset(struct hci_dev *hdev)
538 {
539 int ret;
540
541 BT_DBG("%s %p", hdev->name, hdev);
542
543 hci_req_sync_lock(hdev);
544
545 /* Drop queues */
546 skb_queue_purge(&hdev->rx_q);
547 skb_queue_purge(&hdev->cmd_q);
548
549 /* Cancel these to avoid queueing non-chained pending work */
550 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
551 /* Wait for
552 *
553 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
554 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
555 *
556 * inside RCU section to see the flag or complete scheduling.
557 */
558 synchronize_rcu();
559 /* Explicitly cancel works in case scheduled after setting the flag. */
560 cancel_delayed_work(&hdev->cmd_timer);
561 cancel_delayed_work(&hdev->ncmd_timer);
562
563 /* Avoid potential lockdep warnings from the *_flush() calls by
564 * ensuring the workqueue is empty up front.
565 */
566 drain_workqueue(hdev->workqueue);
567
568 hci_dev_lock(hdev);
569 hci_inquiry_cache_flush(hdev);
570 hci_conn_hash_flush(hdev);
571 hci_dev_unlock(hdev);
572
573 if (hdev->flush)
574 hdev->flush(hdev);
575
576 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
577
578 atomic_set(&hdev->cmd_cnt, 1);
579 hdev->acl_cnt = 0;
580 hdev->sco_cnt = 0;
581 hdev->le_cnt = 0;
582 hdev->iso_cnt = 0;
583
584 ret = hci_reset_sync(hdev);
585
586 hci_req_sync_unlock(hdev);
587 return ret;
588 }
589
hci_dev_reset(__u16 dev)590 int hci_dev_reset(__u16 dev)
591 {
592 struct hci_dev *hdev;
593 int err;
594
595 hdev = hci_dev_get(dev);
596 if (!hdev)
597 return -ENODEV;
598
599 if (!test_bit(HCI_UP, &hdev->flags)) {
600 err = -ENETDOWN;
601 goto done;
602 }
603
604 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
605 err = -EBUSY;
606 goto done;
607 }
608
609 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
610 err = -EOPNOTSUPP;
611 goto done;
612 }
613
614 err = hci_dev_do_reset(hdev);
615
616 done:
617 hci_dev_put(hdev);
618 return err;
619 }
620
hci_dev_reset_stat(__u16 dev)621 int hci_dev_reset_stat(__u16 dev)
622 {
623 struct hci_dev *hdev;
624 int ret = 0;
625
626 hdev = hci_dev_get(dev);
627 if (!hdev)
628 return -ENODEV;
629
630 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
631 ret = -EBUSY;
632 goto done;
633 }
634
635 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
636 ret = -EOPNOTSUPP;
637 goto done;
638 }
639
640 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
641
642 done:
643 hci_dev_put(hdev);
644 return ret;
645 }
646
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)647 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
648 {
649 bool conn_changed, discov_changed;
650
651 BT_DBG("%s scan 0x%02x", hdev->name, scan);
652
653 if ((scan & SCAN_PAGE))
654 conn_changed = !hci_dev_test_and_set_flag(hdev,
655 HCI_CONNECTABLE);
656 else
657 conn_changed = hci_dev_test_and_clear_flag(hdev,
658 HCI_CONNECTABLE);
659
660 if ((scan & SCAN_INQUIRY)) {
661 discov_changed = !hci_dev_test_and_set_flag(hdev,
662 HCI_DISCOVERABLE);
663 } else {
664 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
665 discov_changed = hci_dev_test_and_clear_flag(hdev,
666 HCI_DISCOVERABLE);
667 }
668
669 if (!hci_dev_test_flag(hdev, HCI_MGMT))
670 return;
671
672 if (conn_changed || discov_changed) {
673 /* In case this was disabled through mgmt */
674 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
675
676 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
677 hci_update_adv_data(hdev, hdev->cur_adv_instance);
678
679 mgmt_new_settings(hdev);
680 }
681 }
682
hci_dev_cmd(unsigned int cmd,void __user * arg)683 int hci_dev_cmd(unsigned int cmd, void __user *arg)
684 {
685 struct hci_dev *hdev;
686 struct hci_dev_req dr;
687 __le16 policy;
688 int err = 0;
689
690 if (copy_from_user(&dr, arg, sizeof(dr)))
691 return -EFAULT;
692
693 hdev = hci_dev_get(dr.dev_id);
694 if (!hdev)
695 return -ENODEV;
696
697 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
698 err = -EBUSY;
699 goto done;
700 }
701
702 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
703 err = -EOPNOTSUPP;
704 goto done;
705 }
706
707 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
708 err = -EOPNOTSUPP;
709 goto done;
710 }
711
712 switch (cmd) {
713 case HCISETAUTH:
714 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
715 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
716 break;
717
718 case HCISETENCRYPT:
719 if (!lmp_encrypt_capable(hdev)) {
720 err = -EOPNOTSUPP;
721 break;
722 }
723
724 if (!test_bit(HCI_AUTH, &hdev->flags)) {
725 /* Auth must be enabled first */
726 err = hci_cmd_sync_status(hdev,
727 HCI_OP_WRITE_AUTH_ENABLE,
728 1, &dr.dev_opt,
729 HCI_CMD_TIMEOUT);
730 if (err)
731 break;
732 }
733
734 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
735 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
736 break;
737
738 case HCISETSCAN:
739 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
740 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
741
742 /* Ensure that the connectable and discoverable states
743 * get correctly modified as this was a non-mgmt change.
744 */
745 if (!err)
746 hci_update_passive_scan_state(hdev, dr.dev_opt);
747 break;
748
749 case HCISETLINKPOL:
750 policy = cpu_to_le16(dr.dev_opt);
751
752 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
753 2, &policy, HCI_CMD_TIMEOUT);
754 break;
755
756 case HCISETLINKMODE:
757 hdev->link_mode = ((__u16) dr.dev_opt) &
758 (HCI_LM_MASTER | HCI_LM_ACCEPT);
759 break;
760
761 case HCISETPTYPE:
762 if (hdev->pkt_type == (__u16) dr.dev_opt)
763 break;
764
765 hdev->pkt_type = (__u16) dr.dev_opt;
766 mgmt_phy_configuration_changed(hdev, NULL);
767 break;
768
769 case HCISETACLMTU:
770 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
771 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
772 break;
773
774 case HCISETSCOMTU:
775 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
776 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
777 break;
778
779 default:
780 err = -EINVAL;
781 break;
782 }
783
784 done:
785 hci_dev_put(hdev);
786 return err;
787 }
788
hci_get_dev_list(void __user * arg)789 int hci_get_dev_list(void __user *arg)
790 {
791 struct hci_dev *hdev;
792 struct hci_dev_list_req *dl;
793 struct hci_dev_req *dr;
794 int n = 0, size, err;
795 __u16 dev_num;
796
797 if (get_user(dev_num, (__u16 __user *) arg))
798 return -EFAULT;
799
800 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
801 return -EINVAL;
802
803 size = sizeof(*dl) + dev_num * sizeof(*dr);
804
805 dl = kzalloc(size, GFP_KERNEL);
806 if (!dl)
807 return -ENOMEM;
808
809 dr = dl->dev_req;
810
811 read_lock(&hci_dev_list_lock);
812 list_for_each_entry(hdev, &hci_dev_list, list) {
813 unsigned long flags = hdev->flags;
814
815 /* When the auto-off is configured it means the transport
816 * is running, but in that case still indicate that the
817 * device is actually down.
818 */
819 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
820 flags &= ~BIT(HCI_UP);
821
822 (dr + n)->dev_id = hdev->id;
823 (dr + n)->dev_opt = flags;
824
825 if (++n >= dev_num)
826 break;
827 }
828 read_unlock(&hci_dev_list_lock);
829
830 dl->dev_num = n;
831 size = sizeof(*dl) + n * sizeof(*dr);
832
833 err = copy_to_user(arg, dl, size);
834 kfree(dl);
835
836 return err ? -EFAULT : 0;
837 }
838
hci_get_dev_info(void __user * arg)839 int hci_get_dev_info(void __user *arg)
840 {
841 struct hci_dev *hdev;
842 struct hci_dev_info di;
843 unsigned long flags;
844 int err = 0;
845
846 if (copy_from_user(&di, arg, sizeof(di)))
847 return -EFAULT;
848
849 hdev = hci_dev_get(di.dev_id);
850 if (!hdev)
851 return -ENODEV;
852
853 /* When the auto-off is configured it means the transport
854 * is running, but in that case still indicate that the
855 * device is actually down.
856 */
857 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
858 flags = hdev->flags & ~BIT(HCI_UP);
859 else
860 flags = hdev->flags;
861
862 strscpy(di.name, hdev->name, sizeof(di.name));
863 di.bdaddr = hdev->bdaddr;
864 di.type = (hdev->bus & 0x0f);
865 di.flags = flags;
866 di.pkt_type = hdev->pkt_type;
867 if (lmp_bredr_capable(hdev)) {
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 } else {
873 di.acl_mtu = hdev->le_mtu;
874 di.acl_pkts = hdev->le_pkts;
875 di.sco_mtu = 0;
876 di.sco_pkts = 0;
877 }
878 di.link_policy = hdev->link_policy;
879 di.link_mode = hdev->link_mode;
880
881 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
882 memcpy(&di.features, &hdev->features, sizeof(di.features));
883
884 if (copy_to_user(arg, &di, sizeof(di)))
885 err = -EFAULT;
886
887 hci_dev_put(hdev);
888
889 return err;
890 }
891
892 /* ---- Interface to HCI drivers ---- */
893
hci_rfkill_set_block(void * data,bool blocked)894 static int hci_rfkill_set_block(void *data, bool blocked)
895 {
896 struct hci_dev *hdev = data;
897
898 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
899
900 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
901 return -EBUSY;
902
903 if (blocked) {
904 hci_dev_set_flag(hdev, HCI_RFKILLED);
905 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
906 !hci_dev_test_flag(hdev, HCI_CONFIG))
907 hci_dev_do_close(hdev);
908 } else {
909 hci_dev_clear_flag(hdev, HCI_RFKILLED);
910 }
911
912 return 0;
913 }
914
915 static const struct rfkill_ops hci_rfkill_ops = {
916 .set_block = hci_rfkill_set_block,
917 };
918
hci_power_on(struct work_struct * work)919 static void hci_power_on(struct work_struct *work)
920 {
921 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
922 int err;
923
924 BT_DBG("%s", hdev->name);
925
926 if (test_bit(HCI_UP, &hdev->flags) &&
927 hci_dev_test_flag(hdev, HCI_MGMT) &&
928 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
929 cancel_delayed_work(&hdev->power_off);
930 err = hci_powered_update_sync(hdev);
931 mgmt_power_on(hdev, err);
932 return;
933 }
934
935 err = hci_dev_do_open(hdev);
936 if (err < 0) {
937 hci_dev_lock(hdev);
938 mgmt_set_powered_failed(hdev, err);
939 hci_dev_unlock(hdev);
940 return;
941 }
942
943 /* During the HCI setup phase, a few error conditions are
944 * ignored and they need to be checked now. If they are still
945 * valid, it is important to turn the device back off.
946 */
947 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
948 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
949 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
950 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
951 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
952 hci_dev_do_close(hdev);
953 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
954 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
955 HCI_AUTO_OFF_TIMEOUT);
956 }
957
958 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
959 /* For unconfigured devices, set the HCI_RAW flag
960 * so that userspace can easily identify them.
961 */
962 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
963 set_bit(HCI_RAW, &hdev->flags);
964
965 /* For fully configured devices, this will send
966 * the Index Added event. For unconfigured devices,
967 * it will send Unconfigued Index Added event.
968 *
969 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
970 * and no event will be send.
971 */
972 mgmt_index_added(hdev);
973 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
974 /* When the controller is now configured, then it
975 * is important to clear the HCI_RAW flag.
976 */
977 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
978 clear_bit(HCI_RAW, &hdev->flags);
979
980 /* Powering on the controller with HCI_CONFIG set only
981 * happens with the transition from unconfigured to
982 * configured. This will send the Index Added event.
983 */
984 mgmt_index_added(hdev);
985 }
986 }
987
hci_power_off(struct work_struct * work)988 static void hci_power_off(struct work_struct *work)
989 {
990 struct hci_dev *hdev = container_of(work, struct hci_dev,
991 power_off.work);
992
993 BT_DBG("%s", hdev->name);
994
995 hci_dev_do_close(hdev);
996 }
997
hci_error_reset(struct work_struct * work)998 static void hci_error_reset(struct work_struct *work)
999 {
1000 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1001
1002 hci_dev_hold(hdev);
1003 BT_DBG("%s", hdev->name);
1004
1005 if (hdev->hw_error)
1006 hdev->hw_error(hdev, hdev->hw_error_code);
1007 else
1008 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1009
1010 if (!hci_dev_do_close(hdev))
1011 hci_dev_do_open(hdev);
1012
1013 hci_dev_put(hdev);
1014 }
1015
hci_uuids_clear(struct hci_dev * hdev)1016 void hci_uuids_clear(struct hci_dev *hdev)
1017 {
1018 struct bt_uuid *uuid, *tmp;
1019
1020 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1021 list_del(&uuid->list);
1022 kfree(uuid);
1023 }
1024 }
1025
hci_link_keys_clear(struct hci_dev * hdev)1026 void hci_link_keys_clear(struct hci_dev *hdev)
1027 {
1028 struct link_key *key, *tmp;
1029
1030 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1031 list_del_rcu(&key->list);
1032 kfree_rcu(key, rcu);
1033 }
1034 }
1035
hci_smp_ltks_clear(struct hci_dev * hdev)1036 void hci_smp_ltks_clear(struct hci_dev *hdev)
1037 {
1038 struct smp_ltk *k, *tmp;
1039
1040 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1041 list_del_rcu(&k->list);
1042 kfree_rcu(k, rcu);
1043 }
1044 }
1045
hci_smp_irks_clear(struct hci_dev * hdev)1046 void hci_smp_irks_clear(struct hci_dev *hdev)
1047 {
1048 struct smp_irk *k, *tmp;
1049
1050 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1051 list_del_rcu(&k->list);
1052 kfree_rcu(k, rcu);
1053 }
1054 }
1055
hci_blocked_keys_clear(struct hci_dev * hdev)1056 void hci_blocked_keys_clear(struct hci_dev *hdev)
1057 {
1058 struct blocked_key *b, *tmp;
1059
1060 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1061 list_del_rcu(&b->list);
1062 kfree_rcu(b, rcu);
1063 }
1064 }
1065
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1066 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1067 {
1068 bool blocked = false;
1069 struct blocked_key *b;
1070
1071 rcu_read_lock();
1072 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1073 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1074 blocked = true;
1075 break;
1076 }
1077 }
1078
1079 rcu_read_unlock();
1080 return blocked;
1081 }
1082
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1083 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1084 {
1085 struct link_key *k;
1086
1087 rcu_read_lock();
1088 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1089 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1090 rcu_read_unlock();
1091
1092 if (hci_is_blocked_key(hdev,
1093 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1094 k->val)) {
1095 bt_dev_warn_ratelimited(hdev,
1096 "Link key blocked for %pMR",
1097 &k->bdaddr);
1098 return NULL;
1099 }
1100
1101 return k;
1102 }
1103 }
1104 rcu_read_unlock();
1105
1106 return NULL;
1107 }
1108
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1109 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1110 u8 key_type, u8 old_key_type)
1111 {
1112 /* Legacy key */
1113 if (key_type < 0x03)
1114 return true;
1115
1116 /* Debug keys are insecure so don't store them persistently */
1117 if (key_type == HCI_LK_DEBUG_COMBINATION)
1118 return false;
1119
1120 /* Changed combination key and there's no previous one */
1121 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1122 return false;
1123
1124 /* Security mode 3 case */
1125 if (!conn)
1126 return true;
1127
1128 /* BR/EDR key derived using SC from an LE link */
1129 if (conn->type == LE_LINK)
1130 return true;
1131
1132 /* Neither local nor remote side had no-bonding as requirement */
1133 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1134 return true;
1135
1136 /* Local side had dedicated bonding as requirement */
1137 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1138 return true;
1139
1140 /* Remote side had dedicated bonding as requirement */
1141 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1142 return true;
1143
1144 /* If none of the above criteria match, then don't store the key
1145 * persistently */
1146 return false;
1147 }
1148
ltk_role(u8 type)1149 static u8 ltk_role(u8 type)
1150 {
1151 if (type == SMP_LTK)
1152 return HCI_ROLE_MASTER;
1153
1154 return HCI_ROLE_SLAVE;
1155 }
1156
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1157 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1158 u8 addr_type, u8 role)
1159 {
1160 struct smp_ltk *k;
1161
1162 rcu_read_lock();
1163 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1164 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1165 continue;
1166
1167 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1168 rcu_read_unlock();
1169
1170 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1171 k->val)) {
1172 bt_dev_warn_ratelimited(hdev,
1173 "LTK blocked for %pMR",
1174 &k->bdaddr);
1175 return NULL;
1176 }
1177
1178 return k;
1179 }
1180 }
1181 rcu_read_unlock();
1182
1183 return NULL;
1184 }
1185
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1186 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1187 {
1188 struct smp_irk *irk_to_return = NULL;
1189 struct smp_irk *irk;
1190
1191 rcu_read_lock();
1192 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1193 if (!bacmp(&irk->rpa, rpa)) {
1194 irk_to_return = irk;
1195 goto done;
1196 }
1197 }
1198
1199 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1200 if (smp_irk_matches(hdev, irk->val, rpa)) {
1201 bacpy(&irk->rpa, rpa);
1202 irk_to_return = irk;
1203 goto done;
1204 }
1205 }
1206
1207 done:
1208 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1209 irk_to_return->val)) {
1210 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1211 &irk_to_return->bdaddr);
1212 irk_to_return = NULL;
1213 }
1214
1215 rcu_read_unlock();
1216
1217 return irk_to_return;
1218 }
1219
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1220 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1221 u8 addr_type)
1222 {
1223 struct smp_irk *irk_to_return = NULL;
1224 struct smp_irk *irk;
1225
1226 /* Identity Address must be public or static random */
1227 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1228 return NULL;
1229
1230 rcu_read_lock();
1231 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1232 if (addr_type == irk->addr_type &&
1233 bacmp(bdaddr, &irk->bdaddr) == 0) {
1234 irk_to_return = irk;
1235 goto done;
1236 }
1237 }
1238
1239 done:
1240
1241 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1242 irk_to_return->val)) {
1243 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1244 &irk_to_return->bdaddr);
1245 irk_to_return = NULL;
1246 }
1247
1248 rcu_read_unlock();
1249
1250 return irk_to_return;
1251 }
1252
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1253 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1254 bdaddr_t *bdaddr, u8 *val, u8 type,
1255 u8 pin_len, bool *persistent)
1256 {
1257 struct link_key *key, *old_key;
1258 u8 old_key_type;
1259
1260 old_key = hci_find_link_key(hdev, bdaddr);
1261 if (old_key) {
1262 old_key_type = old_key->type;
1263 key = old_key;
1264 } else {
1265 old_key_type = conn ? conn->key_type : 0xff;
1266 key = kzalloc(sizeof(*key), GFP_KERNEL);
1267 if (!key)
1268 return NULL;
1269 list_add_rcu(&key->list, &hdev->link_keys);
1270 }
1271
1272 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1273
1274 /* Some buggy controller combinations generate a changed
1275 * combination key for legacy pairing even when there's no
1276 * previous key */
1277 if (type == HCI_LK_CHANGED_COMBINATION &&
1278 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1279 type = HCI_LK_COMBINATION;
1280 if (conn)
1281 conn->key_type = type;
1282 }
1283
1284 bacpy(&key->bdaddr, bdaddr);
1285 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1286 key->pin_len = pin_len;
1287
1288 if (type == HCI_LK_CHANGED_COMBINATION)
1289 key->type = old_key_type;
1290 else
1291 key->type = type;
1292
1293 if (persistent)
1294 *persistent = hci_persistent_key(hdev, conn, type,
1295 old_key_type);
1296
1297 return key;
1298 }
1299
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1300 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1301 u8 addr_type, u8 type, u8 authenticated,
1302 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1303 {
1304 struct smp_ltk *key, *old_key;
1305 u8 role = ltk_role(type);
1306
1307 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1308 if (old_key)
1309 key = old_key;
1310 else {
1311 key = kzalloc(sizeof(*key), GFP_KERNEL);
1312 if (!key)
1313 return NULL;
1314 list_add_rcu(&key->list, &hdev->long_term_keys);
1315 }
1316
1317 bacpy(&key->bdaddr, bdaddr);
1318 key->bdaddr_type = addr_type;
1319 memcpy(key->val, tk, sizeof(key->val));
1320 key->authenticated = authenticated;
1321 key->ediv = ediv;
1322 key->rand = rand;
1323 key->enc_size = enc_size;
1324 key->type = type;
1325
1326 return key;
1327 }
1328
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1329 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1330 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1331 {
1332 struct smp_irk *irk;
1333
1334 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1335 if (!irk) {
1336 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1337 if (!irk)
1338 return NULL;
1339
1340 bacpy(&irk->bdaddr, bdaddr);
1341 irk->addr_type = addr_type;
1342
1343 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1344 }
1345
1346 memcpy(irk->val, val, 16);
1347 bacpy(&irk->rpa, rpa);
1348
1349 return irk;
1350 }
1351
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1352 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1353 {
1354 struct link_key *key;
1355
1356 key = hci_find_link_key(hdev, bdaddr);
1357 if (!key)
1358 return -ENOENT;
1359
1360 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1361
1362 list_del_rcu(&key->list);
1363 kfree_rcu(key, rcu);
1364
1365 return 0;
1366 }
1367
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1368 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1369 {
1370 struct smp_ltk *k, *tmp;
1371 int removed = 0;
1372
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1375 continue;
1376
1377 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1378
1379 list_del_rcu(&k->list);
1380 kfree_rcu(k, rcu);
1381 removed++;
1382 }
1383
1384 return removed ? 0 : -ENOENT;
1385 }
1386
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1387 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1388 {
1389 struct smp_irk *k, *tmp;
1390
1391 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1392 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1393 continue;
1394
1395 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1396
1397 list_del_rcu(&k->list);
1398 kfree_rcu(k, rcu);
1399 }
1400 }
1401
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1402 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1403 {
1404 struct smp_ltk *k;
1405 struct smp_irk *irk;
1406 u8 addr_type;
1407
1408 if (type == BDADDR_BREDR) {
1409 if (hci_find_link_key(hdev, bdaddr))
1410 return true;
1411 return false;
1412 }
1413
1414 /* Convert to HCI addr type which struct smp_ltk uses */
1415 if (type == BDADDR_LE_PUBLIC)
1416 addr_type = ADDR_LE_DEV_PUBLIC;
1417 else
1418 addr_type = ADDR_LE_DEV_RANDOM;
1419
1420 irk = hci_get_irk(hdev, bdaddr, addr_type);
1421 if (irk) {
1422 bdaddr = &irk->bdaddr;
1423 addr_type = irk->addr_type;
1424 }
1425
1426 rcu_read_lock();
1427 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1428 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1429 rcu_read_unlock();
1430 return true;
1431 }
1432 }
1433 rcu_read_unlock();
1434
1435 return false;
1436 }
1437
1438 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1439 static void hci_cmd_timeout(struct work_struct *work)
1440 {
1441 struct hci_dev *hdev = container_of(work, struct hci_dev,
1442 cmd_timer.work);
1443
1444 if (hdev->req_skb) {
1445 u16 opcode = hci_skb_opcode(hdev->req_skb);
1446
1447 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1448
1449 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1450 } else {
1451 bt_dev_err(hdev, "command tx timeout");
1452 }
1453
1454 if (hdev->cmd_timeout)
1455 hdev->cmd_timeout(hdev);
1456
1457 atomic_set(&hdev->cmd_cnt, 1);
1458 queue_work(hdev->workqueue, &hdev->cmd_work);
1459 }
1460
1461 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1462 static void hci_ncmd_timeout(struct work_struct *work)
1463 {
1464 struct hci_dev *hdev = container_of(work, struct hci_dev,
1465 ncmd_timer.work);
1466
1467 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1468
1469 /* During HCI_INIT phase no events can be injected if the ncmd timer
1470 * triggers since the procedure has its own timeout handling.
1471 */
1472 if (test_bit(HCI_INIT, &hdev->flags))
1473 return;
1474
1475 /* This is an irrecoverable state, inject hardware error event */
1476 hci_reset_dev(hdev);
1477 }
1478
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1479 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1480 bdaddr_t *bdaddr, u8 bdaddr_type)
1481 {
1482 struct oob_data *data;
1483
1484 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1485 if (bacmp(bdaddr, &data->bdaddr) != 0)
1486 continue;
1487 if (data->bdaddr_type != bdaddr_type)
1488 continue;
1489 return data;
1490 }
1491
1492 return NULL;
1493 }
1494
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1495 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1496 u8 bdaddr_type)
1497 {
1498 struct oob_data *data;
1499
1500 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1501 if (!data)
1502 return -ENOENT;
1503
1504 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1505
1506 list_del(&data->list);
1507 kfree(data);
1508
1509 return 0;
1510 }
1511
hci_remote_oob_data_clear(struct hci_dev * hdev)1512 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1513 {
1514 struct oob_data *data, *n;
1515
1516 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1517 list_del(&data->list);
1518 kfree(data);
1519 }
1520 }
1521
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1522 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1523 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1524 u8 *hash256, u8 *rand256)
1525 {
1526 struct oob_data *data;
1527
1528 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1529 if (!data) {
1530 data = kmalloc(sizeof(*data), GFP_KERNEL);
1531 if (!data)
1532 return -ENOMEM;
1533
1534 bacpy(&data->bdaddr, bdaddr);
1535 data->bdaddr_type = bdaddr_type;
1536 list_add(&data->list, &hdev->remote_oob_data);
1537 }
1538
1539 if (hash192 && rand192) {
1540 memcpy(data->hash192, hash192, sizeof(data->hash192));
1541 memcpy(data->rand192, rand192, sizeof(data->rand192));
1542 if (hash256 && rand256)
1543 data->present = 0x03;
1544 } else {
1545 memset(data->hash192, 0, sizeof(data->hash192));
1546 memset(data->rand192, 0, sizeof(data->rand192));
1547 if (hash256 && rand256)
1548 data->present = 0x02;
1549 else
1550 data->present = 0x00;
1551 }
1552
1553 if (hash256 && rand256) {
1554 memcpy(data->hash256, hash256, sizeof(data->hash256));
1555 memcpy(data->rand256, rand256, sizeof(data->rand256));
1556 } else {
1557 memset(data->hash256, 0, sizeof(data->hash256));
1558 memset(data->rand256, 0, sizeof(data->rand256));
1559 if (hash192 && rand192)
1560 data->present = 0x01;
1561 }
1562
1563 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1564
1565 return 0;
1566 }
1567
1568 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1569 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1570 {
1571 struct adv_info *adv_instance;
1572
1573 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1574 if (adv_instance->instance == instance)
1575 return adv_instance;
1576 }
1577
1578 return NULL;
1579 }
1580
1581 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1582 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1583 {
1584 struct adv_info *cur_instance;
1585
1586 cur_instance = hci_find_adv_instance(hdev, instance);
1587 if (!cur_instance)
1588 return NULL;
1589
1590 if (cur_instance == list_last_entry(&hdev->adv_instances,
1591 struct adv_info, list))
1592 return list_first_entry(&hdev->adv_instances,
1593 struct adv_info, list);
1594 else
1595 return list_next_entry(cur_instance, list);
1596 }
1597
1598 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1599 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1600 {
1601 struct adv_info *adv_instance;
1602
1603 adv_instance = hci_find_adv_instance(hdev, instance);
1604 if (!adv_instance)
1605 return -ENOENT;
1606
1607 BT_DBG("%s removing %dMR", hdev->name, instance);
1608
1609 if (hdev->cur_adv_instance == instance) {
1610 if (hdev->adv_instance_timeout) {
1611 cancel_delayed_work(&hdev->adv_instance_expire);
1612 hdev->adv_instance_timeout = 0;
1613 }
1614 hdev->cur_adv_instance = 0x00;
1615 }
1616
1617 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1618
1619 list_del(&adv_instance->list);
1620 kfree(adv_instance);
1621
1622 hdev->adv_instance_cnt--;
1623
1624 return 0;
1625 }
1626
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1627 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1628 {
1629 struct adv_info *adv_instance, *n;
1630
1631 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1632 adv_instance->rpa_expired = rpa_expired;
1633 }
1634
1635 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1636 void hci_adv_instances_clear(struct hci_dev *hdev)
1637 {
1638 struct adv_info *adv_instance, *n;
1639
1640 if (hdev->adv_instance_timeout) {
1641 cancel_delayed_work(&hdev->adv_instance_expire);
1642 hdev->adv_instance_timeout = 0;
1643 }
1644
1645 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1646 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1647 list_del(&adv_instance->list);
1648 kfree(adv_instance);
1649 }
1650
1651 hdev->adv_instance_cnt = 0;
1652 hdev->cur_adv_instance = 0x00;
1653 }
1654
adv_instance_rpa_expired(struct work_struct * work)1655 static void adv_instance_rpa_expired(struct work_struct *work)
1656 {
1657 struct adv_info *adv_instance = container_of(work, struct adv_info,
1658 rpa_expired_cb.work);
1659
1660 BT_DBG("");
1661
1662 adv_instance->rpa_expired = true;
1663 }
1664
1665 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1666 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1667 u32 flags, u16 adv_data_len, u8 *adv_data,
1668 u16 scan_rsp_len, u8 *scan_rsp_data,
1669 u16 timeout, u16 duration, s8 tx_power,
1670 u32 min_interval, u32 max_interval,
1671 u8 mesh_handle)
1672 {
1673 struct adv_info *adv;
1674
1675 adv = hci_find_adv_instance(hdev, instance);
1676 if (adv) {
1677 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1678 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1679 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1680 } else {
1681 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1682 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1683 return ERR_PTR(-EOVERFLOW);
1684
1685 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1686 if (!adv)
1687 return ERR_PTR(-ENOMEM);
1688
1689 adv->pending = true;
1690 adv->instance = instance;
1691 list_add(&adv->list, &hdev->adv_instances);
1692 hdev->adv_instance_cnt++;
1693 }
1694
1695 adv->flags = flags;
1696 adv->min_interval = min_interval;
1697 adv->max_interval = max_interval;
1698 adv->tx_power = tx_power;
1699 /* Defining a mesh_handle changes the timing units to ms,
1700 * rather than seconds, and ties the instance to the requested
1701 * mesh_tx queue.
1702 */
1703 adv->mesh = mesh_handle;
1704
1705 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1706 scan_rsp_len, scan_rsp_data);
1707
1708 adv->timeout = timeout;
1709 adv->remaining_time = timeout;
1710
1711 if (duration == 0)
1712 adv->duration = hdev->def_multi_adv_rotation_duration;
1713 else
1714 adv->duration = duration;
1715
1716 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1717
1718 BT_DBG("%s for %dMR", hdev->name, instance);
1719
1720 return adv;
1721 }
1722
1723 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1724 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1725 u32 flags, u8 data_len, u8 *data,
1726 u32 min_interval, u32 max_interval)
1727 {
1728 struct adv_info *adv;
1729
1730 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1731 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1732 min_interval, max_interval, 0);
1733 if (IS_ERR(adv))
1734 return adv;
1735
1736 adv->periodic = true;
1737 adv->per_adv_data_len = data_len;
1738
1739 if (data)
1740 memcpy(adv->per_adv_data, data, data_len);
1741
1742 return adv;
1743 }
1744
1745 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1746 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1747 u16 adv_data_len, u8 *adv_data,
1748 u16 scan_rsp_len, u8 *scan_rsp_data)
1749 {
1750 struct adv_info *adv;
1751
1752 adv = hci_find_adv_instance(hdev, instance);
1753
1754 /* If advertisement doesn't exist, we can't modify its data */
1755 if (!adv)
1756 return -ENOENT;
1757
1758 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1759 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1760 memcpy(adv->adv_data, adv_data, adv_data_len);
1761 adv->adv_data_len = adv_data_len;
1762 adv->adv_data_changed = true;
1763 }
1764
1765 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1766 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1767 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1768 adv->scan_rsp_len = scan_rsp_len;
1769 adv->scan_rsp_changed = true;
1770 }
1771
1772 /* Mark as changed if there are flags which would affect it */
1773 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1774 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1775 adv->scan_rsp_changed = true;
1776
1777 return 0;
1778 }
1779
1780 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1781 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1782 {
1783 u32 flags;
1784 struct adv_info *adv;
1785
1786 if (instance == 0x00) {
1787 /* Instance 0 always manages the "Tx Power" and "Flags"
1788 * fields
1789 */
1790 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1791
1792 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1793 * corresponds to the "connectable" instance flag.
1794 */
1795 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1796 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1797
1798 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1799 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1800 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1801 flags |= MGMT_ADV_FLAG_DISCOV;
1802
1803 return flags;
1804 }
1805
1806 adv = hci_find_adv_instance(hdev, instance);
1807
1808 /* Return 0 when we got an invalid instance identifier. */
1809 if (!adv)
1810 return 0;
1811
1812 return adv->flags;
1813 }
1814
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1815 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1816 {
1817 struct adv_info *adv;
1818
1819 /* Instance 0x00 always set local name */
1820 if (instance == 0x00)
1821 return true;
1822
1823 adv = hci_find_adv_instance(hdev, instance);
1824 if (!adv)
1825 return false;
1826
1827 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1828 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1829 return true;
1830
1831 return adv->scan_rsp_len ? true : false;
1832 }
1833
1834 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1835 void hci_adv_monitors_clear(struct hci_dev *hdev)
1836 {
1837 struct adv_monitor *monitor;
1838 int handle;
1839
1840 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1841 hci_free_adv_monitor(hdev, monitor);
1842
1843 idr_destroy(&hdev->adv_monitors_idr);
1844 }
1845
1846 /* Frees the monitor structure and do some bookkeepings.
1847 * This function requires the caller holds hdev->lock.
1848 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1849 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1850 {
1851 struct adv_pattern *pattern;
1852 struct adv_pattern *tmp;
1853
1854 if (!monitor)
1855 return;
1856
1857 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1858 list_del(&pattern->list);
1859 kfree(pattern);
1860 }
1861
1862 if (monitor->handle)
1863 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1864
1865 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
1866 hdev->adv_monitors_cnt--;
1867
1868 kfree(monitor);
1869 }
1870
1871 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1872 * also attempts to forward the request to the controller.
1873 * This function requires the caller holds hci_req_sync_lock.
1874 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1875 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1876 {
1877 int min, max, handle;
1878 int status = 0;
1879
1880 if (!monitor)
1881 return -EINVAL;
1882
1883 hci_dev_lock(hdev);
1884
1885 min = HCI_MIN_ADV_MONITOR_HANDLE;
1886 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1887 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1888 GFP_KERNEL);
1889
1890 hci_dev_unlock(hdev);
1891
1892 if (handle < 0)
1893 return handle;
1894
1895 monitor->handle = handle;
1896
1897 if (!hdev_is_powered(hdev))
1898 return status;
1899
1900 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1901 case HCI_ADV_MONITOR_EXT_NONE:
1902 bt_dev_dbg(hdev, "add monitor %d status %d",
1903 monitor->handle, status);
1904 /* Message was not forwarded to controller - not an error */
1905 break;
1906
1907 case HCI_ADV_MONITOR_EXT_MSFT:
1908 status = msft_add_monitor_pattern(hdev, monitor);
1909 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1910 handle, status);
1911 break;
1912 }
1913
1914 return status;
1915 }
1916
1917 /* Attempts to tell the controller and free the monitor. If somehow the
1918 * controller doesn't have a corresponding handle, remove anyway.
1919 * This function requires the caller holds hci_req_sync_lock.
1920 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1921 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1922 struct adv_monitor *monitor)
1923 {
1924 int status = 0;
1925 int handle;
1926
1927 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1928 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1929 bt_dev_dbg(hdev, "remove monitor %d status %d",
1930 monitor->handle, status);
1931 goto free_monitor;
1932
1933 case HCI_ADV_MONITOR_EXT_MSFT:
1934 handle = monitor->handle;
1935 status = msft_remove_monitor(hdev, monitor);
1936 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1937 handle, status);
1938 break;
1939 }
1940
1941 /* In case no matching handle registered, just free the monitor */
1942 if (status == -ENOENT)
1943 goto free_monitor;
1944
1945 return status;
1946
1947 free_monitor:
1948 if (status == -ENOENT)
1949 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1950 monitor->handle);
1951 hci_free_adv_monitor(hdev, monitor);
1952
1953 return status;
1954 }
1955
1956 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1957 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1958 {
1959 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1960
1961 if (!monitor)
1962 return -EINVAL;
1963
1964 return hci_remove_adv_monitor(hdev, monitor);
1965 }
1966
1967 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1968 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1969 {
1970 struct adv_monitor *monitor;
1971 int idr_next_id = 0;
1972 int status = 0;
1973
1974 while (1) {
1975 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1976 if (!monitor)
1977 break;
1978
1979 status = hci_remove_adv_monitor(hdev, monitor);
1980 if (status)
1981 return status;
1982
1983 idr_next_id++;
1984 }
1985
1986 return status;
1987 }
1988
1989 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)1990 bool hci_is_adv_monitoring(struct hci_dev *hdev)
1991 {
1992 return !idr_is_empty(&hdev->adv_monitors_idr);
1993 }
1994
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)1995 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
1996 {
1997 if (msft_monitor_supported(hdev))
1998 return HCI_ADV_MONITOR_EXT_MSFT;
1999
2000 return HCI_ADV_MONITOR_EXT_NONE;
2001 }
2002
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2003 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2004 bdaddr_t *bdaddr, u8 type)
2005 {
2006 struct bdaddr_list *b;
2007
2008 list_for_each_entry(b, bdaddr_list, list) {
2009 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2010 return b;
2011 }
2012
2013 return NULL;
2014 }
2015
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2016 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2017 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2018 u8 type)
2019 {
2020 struct bdaddr_list_with_irk *b;
2021
2022 list_for_each_entry(b, bdaddr_list, list) {
2023 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2024 return b;
2025 }
2026
2027 return NULL;
2028 }
2029
2030 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2031 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2032 bdaddr_t *bdaddr, u8 type)
2033 {
2034 struct bdaddr_list_with_flags *b;
2035
2036 list_for_each_entry(b, bdaddr_list, list) {
2037 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2038 return b;
2039 }
2040
2041 return NULL;
2042 }
2043
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2044 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2045 {
2046 struct bdaddr_list *b, *n;
2047
2048 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2049 list_del(&b->list);
2050 kfree(b);
2051 }
2052 }
2053
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2054 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2055 {
2056 struct bdaddr_list *entry;
2057
2058 if (!bacmp(bdaddr, BDADDR_ANY))
2059 return -EBADF;
2060
2061 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2062 return -EEXIST;
2063
2064 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2065 if (!entry)
2066 return -ENOMEM;
2067
2068 bacpy(&entry->bdaddr, bdaddr);
2069 entry->bdaddr_type = type;
2070
2071 list_add(&entry->list, list);
2072
2073 return 0;
2074 }
2075
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2076 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2077 u8 type, u8 *peer_irk, u8 *local_irk)
2078 {
2079 struct bdaddr_list_with_irk *entry;
2080
2081 if (!bacmp(bdaddr, BDADDR_ANY))
2082 return -EBADF;
2083
2084 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2085 return -EEXIST;
2086
2087 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2088 if (!entry)
2089 return -ENOMEM;
2090
2091 bacpy(&entry->bdaddr, bdaddr);
2092 entry->bdaddr_type = type;
2093
2094 if (peer_irk)
2095 memcpy(entry->peer_irk, peer_irk, 16);
2096
2097 if (local_irk)
2098 memcpy(entry->local_irk, local_irk, 16);
2099
2100 list_add(&entry->list, list);
2101
2102 return 0;
2103 }
2104
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2105 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2106 u8 type, u32 flags)
2107 {
2108 struct bdaddr_list_with_flags *entry;
2109
2110 if (!bacmp(bdaddr, BDADDR_ANY))
2111 return -EBADF;
2112
2113 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2114 return -EEXIST;
2115
2116 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2117 if (!entry)
2118 return -ENOMEM;
2119
2120 bacpy(&entry->bdaddr, bdaddr);
2121 entry->bdaddr_type = type;
2122 entry->flags = flags;
2123
2124 list_add(&entry->list, list);
2125
2126 return 0;
2127 }
2128
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2129 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2130 {
2131 struct bdaddr_list *entry;
2132
2133 if (!bacmp(bdaddr, BDADDR_ANY)) {
2134 hci_bdaddr_list_clear(list);
2135 return 0;
2136 }
2137
2138 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2139 if (!entry)
2140 return -ENOENT;
2141
2142 list_del(&entry->list);
2143 kfree(entry);
2144
2145 return 0;
2146 }
2147
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2148 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2149 u8 type)
2150 {
2151 struct bdaddr_list_with_irk *entry;
2152
2153 if (!bacmp(bdaddr, BDADDR_ANY)) {
2154 hci_bdaddr_list_clear(list);
2155 return 0;
2156 }
2157
2158 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2159 if (!entry)
2160 return -ENOENT;
2161
2162 list_del(&entry->list);
2163 kfree(entry);
2164
2165 return 0;
2166 }
2167
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2168 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2169 u8 type)
2170 {
2171 struct bdaddr_list_with_flags *entry;
2172
2173 if (!bacmp(bdaddr, BDADDR_ANY)) {
2174 hci_bdaddr_list_clear(list);
2175 return 0;
2176 }
2177
2178 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2179 if (!entry)
2180 return -ENOENT;
2181
2182 list_del(&entry->list);
2183 kfree(entry);
2184
2185 return 0;
2186 }
2187
2188 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2189 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2190 bdaddr_t *addr, u8 addr_type)
2191 {
2192 struct hci_conn_params *params;
2193
2194 list_for_each_entry(params, &hdev->le_conn_params, list) {
2195 if (bacmp(¶ms->addr, addr) == 0 &&
2196 params->addr_type == addr_type) {
2197 return params;
2198 }
2199 }
2200
2201 return NULL;
2202 }
2203
2204 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2205 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2206 bdaddr_t *addr, u8 addr_type)
2207 {
2208 struct hci_conn_params *param;
2209
2210 rcu_read_lock();
2211
2212 list_for_each_entry_rcu(param, list, action) {
2213 if (bacmp(¶m->addr, addr) == 0 &&
2214 param->addr_type == addr_type) {
2215 rcu_read_unlock();
2216 return param;
2217 }
2218 }
2219
2220 rcu_read_unlock();
2221
2222 return NULL;
2223 }
2224
2225 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2226 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2227 {
2228 if (list_empty(¶m->action))
2229 return;
2230
2231 list_del_rcu(¶m->action);
2232 synchronize_rcu();
2233 INIT_LIST_HEAD(¶m->action);
2234 }
2235
2236 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2237 void hci_pend_le_list_add(struct hci_conn_params *param,
2238 struct list_head *list)
2239 {
2240 list_add_rcu(¶m->action, list);
2241 }
2242
2243 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2244 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2245 bdaddr_t *addr, u8 addr_type)
2246 {
2247 struct hci_conn_params *params;
2248
2249 params = hci_conn_params_lookup(hdev, addr, addr_type);
2250 if (params)
2251 return params;
2252
2253 params = kzalloc(sizeof(*params), GFP_KERNEL);
2254 if (!params) {
2255 bt_dev_err(hdev, "out of memory");
2256 return NULL;
2257 }
2258
2259 bacpy(¶ms->addr, addr);
2260 params->addr_type = addr_type;
2261
2262 list_add(¶ms->list, &hdev->le_conn_params);
2263 INIT_LIST_HEAD(¶ms->action);
2264
2265 params->conn_min_interval = hdev->le_conn_min_interval;
2266 params->conn_max_interval = hdev->le_conn_max_interval;
2267 params->conn_latency = hdev->le_conn_latency;
2268 params->supervision_timeout = hdev->le_supv_timeout;
2269 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2270
2271 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2272
2273 return params;
2274 }
2275
hci_conn_params_free(struct hci_conn_params * params)2276 void hci_conn_params_free(struct hci_conn_params *params)
2277 {
2278 hci_pend_le_list_del_init(params);
2279
2280 if (params->conn) {
2281 hci_conn_drop(params->conn);
2282 hci_conn_put(params->conn);
2283 }
2284
2285 list_del(¶ms->list);
2286 kfree(params);
2287 }
2288
2289 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2290 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2291 {
2292 struct hci_conn_params *params;
2293
2294 params = hci_conn_params_lookup(hdev, addr, addr_type);
2295 if (!params)
2296 return;
2297
2298 hci_conn_params_free(params);
2299
2300 hci_update_passive_scan(hdev);
2301
2302 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2303 }
2304
2305 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2306 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2307 {
2308 struct hci_conn_params *params, *tmp;
2309
2310 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2311 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2312 continue;
2313
2314 /* If trying to establish one time connection to disabled
2315 * device, leave the params, but mark them as just once.
2316 */
2317 if (params->explicit_connect) {
2318 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2319 continue;
2320 }
2321
2322 hci_conn_params_free(params);
2323 }
2324
2325 BT_DBG("All LE disabled connection parameters were removed");
2326 }
2327
2328 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2329 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2330 {
2331 struct hci_conn_params *params, *tmp;
2332
2333 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2334 hci_conn_params_free(params);
2335
2336 BT_DBG("All LE connection parameters were removed");
2337 }
2338
2339 /* Copy the Identity Address of the controller.
2340 *
2341 * If the controller has a public BD_ADDR, then by default use that one.
2342 * If this is a LE only controller without a public address, default to
2343 * the static random address.
2344 *
2345 * For debugging purposes it is possible to force controllers with a
2346 * public address to use the static random address instead.
2347 *
2348 * In case BR/EDR has been disabled on a dual-mode controller and
2349 * userspace has configured a static address, then that address
2350 * becomes the identity address instead of the public BR/EDR address.
2351 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2352 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2353 u8 *bdaddr_type)
2354 {
2355 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2356 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2357 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2358 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2359 bacpy(bdaddr, &hdev->static_addr);
2360 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2361 } else {
2362 bacpy(bdaddr, &hdev->bdaddr);
2363 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2364 }
2365 }
2366
hci_clear_wake_reason(struct hci_dev * hdev)2367 static void hci_clear_wake_reason(struct hci_dev *hdev)
2368 {
2369 hci_dev_lock(hdev);
2370
2371 hdev->wake_reason = 0;
2372 bacpy(&hdev->wake_addr, BDADDR_ANY);
2373 hdev->wake_addr_type = 0;
2374
2375 hci_dev_unlock(hdev);
2376 }
2377
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2378 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2379 void *data)
2380 {
2381 struct hci_dev *hdev =
2382 container_of(nb, struct hci_dev, suspend_notifier);
2383 int ret = 0;
2384
2385 /* Userspace has full control of this device. Do nothing. */
2386 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2387 return NOTIFY_DONE;
2388
2389 /* To avoid a potential race with hci_unregister_dev. */
2390 hci_dev_hold(hdev);
2391
2392 switch (action) {
2393 case PM_HIBERNATION_PREPARE:
2394 case PM_SUSPEND_PREPARE:
2395 ret = hci_suspend_dev(hdev);
2396 break;
2397 case PM_POST_HIBERNATION:
2398 case PM_POST_SUSPEND:
2399 ret = hci_resume_dev(hdev);
2400 break;
2401 }
2402
2403 if (ret)
2404 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2405 action, ret);
2406
2407 hci_dev_put(hdev);
2408 return NOTIFY_DONE;
2409 }
2410
2411 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2412 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2413 {
2414 struct hci_dev *hdev;
2415 unsigned int alloc_size;
2416
2417 alloc_size = sizeof(*hdev);
2418 if (sizeof_priv) {
2419 /* Fixme: May need ALIGN-ment? */
2420 alloc_size += sizeof_priv;
2421 }
2422
2423 hdev = kzalloc(alloc_size, GFP_KERNEL);
2424 if (!hdev)
2425 return NULL;
2426
2427 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2428 hdev->esco_type = (ESCO_HV1);
2429 hdev->link_mode = (HCI_LM_ACCEPT);
2430 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2431 hdev->io_capability = 0x03; /* No Input No Output */
2432 hdev->manufacturer = 0xffff; /* Default to internal use */
2433 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2434 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2435 hdev->adv_instance_cnt = 0;
2436 hdev->cur_adv_instance = 0x00;
2437 hdev->adv_instance_timeout = 0;
2438
2439 hdev->advmon_allowlist_duration = 300;
2440 hdev->advmon_no_filter_duration = 500;
2441 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2442
2443 hdev->sniff_max_interval = 800;
2444 hdev->sniff_min_interval = 80;
2445
2446 hdev->le_adv_channel_map = 0x07;
2447 hdev->le_adv_min_interval = 0x0800;
2448 hdev->le_adv_max_interval = 0x0800;
2449 hdev->le_scan_interval = 0x0060;
2450 hdev->le_scan_window = 0x0030;
2451 hdev->le_scan_int_suspend = 0x0400;
2452 hdev->le_scan_window_suspend = 0x0012;
2453 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2454 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2455 hdev->le_scan_int_adv_monitor = 0x0060;
2456 hdev->le_scan_window_adv_monitor = 0x0030;
2457 hdev->le_scan_int_connect = 0x0060;
2458 hdev->le_scan_window_connect = 0x0060;
2459 hdev->le_conn_min_interval = 0x0018;
2460 hdev->le_conn_max_interval = 0x0028;
2461 hdev->le_conn_latency = 0x0000;
2462 hdev->le_supv_timeout = 0x002a;
2463 hdev->le_def_tx_len = 0x001b;
2464 hdev->le_def_tx_time = 0x0148;
2465 hdev->le_max_tx_len = 0x001b;
2466 hdev->le_max_tx_time = 0x0148;
2467 hdev->le_max_rx_len = 0x001b;
2468 hdev->le_max_rx_time = 0x0148;
2469 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2470 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2471 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2472 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2473 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2474 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2475 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2476 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2477 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2478
2479 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2480 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2481 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2482 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2483 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2484 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2485
2486 /* default 1.28 sec page scan */
2487 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2488 hdev->def_page_scan_int = 0x0800;
2489 hdev->def_page_scan_window = 0x0012;
2490
2491 mutex_init(&hdev->lock);
2492 mutex_init(&hdev->req_lock);
2493 mutex_init(&hdev->mgmt_pending_lock);
2494
2495 ida_init(&hdev->unset_handle_ida);
2496
2497 INIT_LIST_HEAD(&hdev->mesh_pending);
2498 INIT_LIST_HEAD(&hdev->mgmt_pending);
2499 INIT_LIST_HEAD(&hdev->reject_list);
2500 INIT_LIST_HEAD(&hdev->accept_list);
2501 INIT_LIST_HEAD(&hdev->uuids);
2502 INIT_LIST_HEAD(&hdev->link_keys);
2503 INIT_LIST_HEAD(&hdev->long_term_keys);
2504 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2505 INIT_LIST_HEAD(&hdev->remote_oob_data);
2506 INIT_LIST_HEAD(&hdev->le_accept_list);
2507 INIT_LIST_HEAD(&hdev->le_resolv_list);
2508 INIT_LIST_HEAD(&hdev->le_conn_params);
2509 INIT_LIST_HEAD(&hdev->pend_le_conns);
2510 INIT_LIST_HEAD(&hdev->pend_le_reports);
2511 INIT_LIST_HEAD(&hdev->conn_hash.list);
2512 INIT_LIST_HEAD(&hdev->adv_instances);
2513 INIT_LIST_HEAD(&hdev->blocked_keys);
2514 INIT_LIST_HEAD(&hdev->monitored_devices);
2515
2516 INIT_LIST_HEAD(&hdev->local_codecs);
2517 INIT_WORK(&hdev->rx_work, hci_rx_work);
2518 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2519 INIT_WORK(&hdev->tx_work, hci_tx_work);
2520 INIT_WORK(&hdev->power_on, hci_power_on);
2521 INIT_WORK(&hdev->error_reset, hci_error_reset);
2522
2523 hci_cmd_sync_init(hdev);
2524
2525 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2526
2527 skb_queue_head_init(&hdev->rx_q);
2528 skb_queue_head_init(&hdev->cmd_q);
2529 skb_queue_head_init(&hdev->raw_q);
2530
2531 init_waitqueue_head(&hdev->req_wait_q);
2532
2533 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2534 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2535
2536 hci_devcd_setup(hdev);
2537 hci_request_setup(hdev);
2538
2539 hci_init_sysfs(hdev);
2540 discovery_init(hdev);
2541
2542 return hdev;
2543 }
2544 EXPORT_SYMBOL(hci_alloc_dev_priv);
2545
2546 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2547 void hci_free_dev(struct hci_dev *hdev)
2548 {
2549 /* will free via device release */
2550 put_device(&hdev->dev);
2551 }
2552 EXPORT_SYMBOL(hci_free_dev);
2553
2554 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2555 int hci_register_dev(struct hci_dev *hdev)
2556 {
2557 int id, error;
2558
2559 if (!hdev->open || !hdev->close || !hdev->send)
2560 return -EINVAL;
2561
2562 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2563 if (id < 0)
2564 return id;
2565
2566 error = dev_set_name(&hdev->dev, "hci%u", id);
2567 if (error)
2568 return error;
2569
2570 hdev->name = dev_name(&hdev->dev);
2571 hdev->id = id;
2572
2573 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2574
2575 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2576 if (!hdev->workqueue) {
2577 error = -ENOMEM;
2578 goto err;
2579 }
2580
2581 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2582 hdev->name);
2583 if (!hdev->req_workqueue) {
2584 destroy_workqueue(hdev->workqueue);
2585 error = -ENOMEM;
2586 goto err;
2587 }
2588
2589 if (!IS_ERR_OR_NULL(bt_debugfs))
2590 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2591
2592 error = device_add(&hdev->dev);
2593 if (error < 0)
2594 goto err_wqueue;
2595
2596 hci_leds_init(hdev);
2597
2598 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2599 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2600 hdev);
2601 if (hdev->rfkill) {
2602 if (rfkill_register(hdev->rfkill) < 0) {
2603 rfkill_destroy(hdev->rfkill);
2604 hdev->rfkill = NULL;
2605 }
2606 }
2607
2608 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2609 hci_dev_set_flag(hdev, HCI_RFKILLED);
2610
2611 hci_dev_set_flag(hdev, HCI_SETUP);
2612 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2613
2614 /* Assume BR/EDR support until proven otherwise (such as
2615 * through reading supported features during init.
2616 */
2617 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2618
2619 write_lock(&hci_dev_list_lock);
2620 list_add(&hdev->list, &hci_dev_list);
2621 write_unlock(&hci_dev_list_lock);
2622
2623 /* Devices that are marked for raw-only usage are unconfigured
2624 * and should not be included in normal operation.
2625 */
2626 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2627 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2628
2629 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2630 * callback.
2631 */
2632 if (hdev->wakeup)
2633 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2634
2635 hci_sock_dev_event(hdev, HCI_DEV_REG);
2636 hci_dev_hold(hdev);
2637
2638 error = hci_register_suspend_notifier(hdev);
2639 if (error)
2640 BT_WARN("register suspend notifier failed error:%d\n", error);
2641
2642 queue_work(hdev->req_workqueue, &hdev->power_on);
2643
2644 idr_init(&hdev->adv_monitors_idr);
2645 msft_register(hdev);
2646
2647 return id;
2648
2649 err_wqueue:
2650 debugfs_remove_recursive(hdev->debugfs);
2651 destroy_workqueue(hdev->workqueue);
2652 destroy_workqueue(hdev->req_workqueue);
2653 err:
2654 ida_free(&hci_index_ida, hdev->id);
2655
2656 return error;
2657 }
2658 EXPORT_SYMBOL(hci_register_dev);
2659
2660 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2661 void hci_unregister_dev(struct hci_dev *hdev)
2662 {
2663 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2664
2665 mutex_lock(&hdev->unregister_lock);
2666 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2667 mutex_unlock(&hdev->unregister_lock);
2668
2669 write_lock(&hci_dev_list_lock);
2670 list_del(&hdev->list);
2671 write_unlock(&hci_dev_list_lock);
2672
2673 cancel_work_sync(&hdev->rx_work);
2674 cancel_work_sync(&hdev->cmd_work);
2675 cancel_work_sync(&hdev->tx_work);
2676 cancel_work_sync(&hdev->power_on);
2677 cancel_work_sync(&hdev->error_reset);
2678
2679 hci_cmd_sync_clear(hdev);
2680
2681 hci_unregister_suspend_notifier(hdev);
2682
2683 hci_dev_do_close(hdev);
2684
2685 if (!test_bit(HCI_INIT, &hdev->flags) &&
2686 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2687 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2688 hci_dev_lock(hdev);
2689 mgmt_index_removed(hdev);
2690 hci_dev_unlock(hdev);
2691 }
2692
2693 /* mgmt_index_removed should take care of emptying the
2694 * pending list */
2695 BUG_ON(!list_empty(&hdev->mgmt_pending));
2696
2697 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2698
2699 if (hdev->rfkill) {
2700 rfkill_unregister(hdev->rfkill);
2701 rfkill_destroy(hdev->rfkill);
2702 }
2703
2704 device_del(&hdev->dev);
2705 /* Actual cleanup is deferred until hci_release_dev(). */
2706 hci_dev_put(hdev);
2707 }
2708 EXPORT_SYMBOL(hci_unregister_dev);
2709
2710 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2711 void hci_release_dev(struct hci_dev *hdev)
2712 {
2713 debugfs_remove_recursive(hdev->debugfs);
2714 kfree_const(hdev->hw_info);
2715 kfree_const(hdev->fw_info);
2716
2717 destroy_workqueue(hdev->workqueue);
2718 destroy_workqueue(hdev->req_workqueue);
2719
2720 hci_dev_lock(hdev);
2721 hci_bdaddr_list_clear(&hdev->reject_list);
2722 hci_bdaddr_list_clear(&hdev->accept_list);
2723 hci_uuids_clear(hdev);
2724 hci_link_keys_clear(hdev);
2725 hci_smp_ltks_clear(hdev);
2726 hci_smp_irks_clear(hdev);
2727 hci_remote_oob_data_clear(hdev);
2728 hci_adv_instances_clear(hdev);
2729 hci_adv_monitors_clear(hdev);
2730 hci_bdaddr_list_clear(&hdev->le_accept_list);
2731 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2732 hci_conn_params_clear_all(hdev);
2733 hci_discovery_filter_clear(hdev);
2734 hci_blocked_keys_clear(hdev);
2735 hci_codec_list_clear(&hdev->local_codecs);
2736 msft_release(hdev);
2737 hci_dev_unlock(hdev);
2738
2739 ida_destroy(&hdev->unset_handle_ida);
2740 ida_free(&hci_index_ida, hdev->id);
2741 kfree_skb(hdev->sent_cmd);
2742 kfree_skb(hdev->req_skb);
2743 kfree_skb(hdev->recv_event);
2744 kfree(hdev);
2745 }
2746 EXPORT_SYMBOL(hci_release_dev);
2747
hci_register_suspend_notifier(struct hci_dev * hdev)2748 int hci_register_suspend_notifier(struct hci_dev *hdev)
2749 {
2750 int ret = 0;
2751
2752 if (!hdev->suspend_notifier.notifier_call &&
2753 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2754 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2755 ret = register_pm_notifier(&hdev->suspend_notifier);
2756 }
2757
2758 return ret;
2759 }
2760
hci_unregister_suspend_notifier(struct hci_dev * hdev)2761 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2762 {
2763 int ret = 0;
2764
2765 if (hdev->suspend_notifier.notifier_call) {
2766 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2767 if (!ret)
2768 hdev->suspend_notifier.notifier_call = NULL;
2769 }
2770
2771 return ret;
2772 }
2773
2774 /* Cancel ongoing command synchronously:
2775 *
2776 * - Cancel command timer
2777 * - Reset command counter
2778 * - Cancel command request
2779 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2780 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2781 {
2782 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2783
2784 cancel_delayed_work_sync(&hdev->cmd_timer);
2785 cancel_delayed_work_sync(&hdev->ncmd_timer);
2786 atomic_set(&hdev->cmd_cnt, 1);
2787
2788 hci_cmd_sync_cancel_sync(hdev, err);
2789 }
2790
2791 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2792 int hci_suspend_dev(struct hci_dev *hdev)
2793 {
2794 int ret;
2795
2796 bt_dev_dbg(hdev, "");
2797
2798 /* Suspend should only act on when powered. */
2799 if (!hdev_is_powered(hdev) ||
2800 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2801 return 0;
2802
2803 /* If powering down don't attempt to suspend */
2804 if (mgmt_powering_down(hdev))
2805 return 0;
2806
2807 /* Cancel potentially blocking sync operation before suspend */
2808 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2809
2810 hci_req_sync_lock(hdev);
2811 ret = hci_suspend_sync(hdev);
2812 hci_req_sync_unlock(hdev);
2813
2814 hci_clear_wake_reason(hdev);
2815 mgmt_suspending(hdev, hdev->suspend_state);
2816
2817 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2818 return ret;
2819 }
2820 EXPORT_SYMBOL(hci_suspend_dev);
2821
2822 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2823 int hci_resume_dev(struct hci_dev *hdev)
2824 {
2825 int ret;
2826
2827 bt_dev_dbg(hdev, "");
2828
2829 /* Resume should only act on when powered. */
2830 if (!hdev_is_powered(hdev) ||
2831 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2832 return 0;
2833
2834 /* If powering down don't attempt to resume */
2835 if (mgmt_powering_down(hdev))
2836 return 0;
2837
2838 hci_req_sync_lock(hdev);
2839 ret = hci_resume_sync(hdev);
2840 hci_req_sync_unlock(hdev);
2841
2842 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2843 hdev->wake_addr_type);
2844
2845 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2846 return ret;
2847 }
2848 EXPORT_SYMBOL(hci_resume_dev);
2849
2850 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2851 int hci_reset_dev(struct hci_dev *hdev)
2852 {
2853 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2854 struct sk_buff *skb;
2855
2856 skb = bt_skb_alloc(3, GFP_ATOMIC);
2857 if (!skb)
2858 return -ENOMEM;
2859
2860 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2861 skb_put_data(skb, hw_err, 3);
2862
2863 bt_dev_err(hdev, "Injecting HCI hardware error event");
2864
2865 /* Send Hardware Error to upper stack */
2866 return hci_recv_frame(hdev, skb);
2867 }
2868 EXPORT_SYMBOL(hci_reset_dev);
2869
2870 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2871 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2872 {
2873 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2874 && !test_bit(HCI_INIT, &hdev->flags))) {
2875 kfree_skb(skb);
2876 return -ENXIO;
2877 }
2878
2879 switch (hci_skb_pkt_type(skb)) {
2880 case HCI_EVENT_PKT:
2881 break;
2882 case HCI_ACLDATA_PKT:
2883 /* Detect if ISO packet has been sent as ACL */
2884 if (hci_conn_num(hdev, ISO_LINK)) {
2885 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2886 __u8 type;
2887
2888 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2889 if (type == ISO_LINK)
2890 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2891 }
2892 break;
2893 case HCI_SCODATA_PKT:
2894 break;
2895 case HCI_ISODATA_PKT:
2896 break;
2897 default:
2898 kfree_skb(skb);
2899 return -EINVAL;
2900 }
2901
2902 /* Incoming skb */
2903 bt_cb(skb)->incoming = 1;
2904
2905 /* Time stamp */
2906 __net_timestamp(skb);
2907
2908 skb_queue_tail(&hdev->rx_q, skb);
2909 queue_work(hdev->workqueue, &hdev->rx_work);
2910
2911 return 0;
2912 }
2913 EXPORT_SYMBOL(hci_recv_frame);
2914
2915 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2916 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2917 {
2918 /* Mark as diagnostic packet */
2919 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2920
2921 /* Time stamp */
2922 __net_timestamp(skb);
2923
2924 skb_queue_tail(&hdev->rx_q, skb);
2925 queue_work(hdev->workqueue, &hdev->rx_work);
2926
2927 return 0;
2928 }
2929 EXPORT_SYMBOL(hci_recv_diag);
2930
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2931 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2932 {
2933 va_list vargs;
2934
2935 va_start(vargs, fmt);
2936 kfree_const(hdev->hw_info);
2937 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2938 va_end(vargs);
2939 }
2940 EXPORT_SYMBOL(hci_set_hw_info);
2941
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2942 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2943 {
2944 va_list vargs;
2945
2946 va_start(vargs, fmt);
2947 kfree_const(hdev->fw_info);
2948 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2949 va_end(vargs);
2950 }
2951 EXPORT_SYMBOL(hci_set_fw_info);
2952
2953 /* ---- Interface to upper protocols ---- */
2954
hci_register_cb(struct hci_cb * cb)2955 int hci_register_cb(struct hci_cb *cb)
2956 {
2957 BT_DBG("%p name %s", cb, cb->name);
2958
2959 mutex_lock(&hci_cb_list_lock);
2960 list_add_tail(&cb->list, &hci_cb_list);
2961 mutex_unlock(&hci_cb_list_lock);
2962
2963 return 0;
2964 }
2965 EXPORT_SYMBOL(hci_register_cb);
2966
hci_unregister_cb(struct hci_cb * cb)2967 int hci_unregister_cb(struct hci_cb *cb)
2968 {
2969 BT_DBG("%p name %s", cb, cb->name);
2970
2971 mutex_lock(&hci_cb_list_lock);
2972 list_del(&cb->list);
2973 mutex_unlock(&hci_cb_list_lock);
2974
2975 return 0;
2976 }
2977 EXPORT_SYMBOL(hci_unregister_cb);
2978
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)2979 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2980 {
2981 int err;
2982
2983 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2984 skb->len);
2985
2986 /* Time stamp */
2987 __net_timestamp(skb);
2988
2989 /* Send copy to monitor */
2990 hci_send_to_monitor(hdev, skb);
2991
2992 if (atomic_read(&hdev->promisc)) {
2993 /* Send copy to the sockets */
2994 hci_send_to_sock(hdev, skb);
2995 }
2996
2997 /* Get rid of skb owner, prior to sending to the driver. */
2998 skb_orphan(skb);
2999
3000 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3001 kfree_skb(skb);
3002 return -EINVAL;
3003 }
3004
3005 err = hdev->send(hdev, skb);
3006 if (err < 0) {
3007 bt_dev_err(hdev, "sending frame failed (%d)", err);
3008 kfree_skb(skb);
3009 return err;
3010 }
3011
3012 return 0;
3013 }
3014
3015 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3016 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3017 const void *param)
3018 {
3019 struct sk_buff *skb;
3020
3021 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3022
3023 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3024 if (!skb) {
3025 bt_dev_err(hdev, "no memory for command");
3026 return -ENOMEM;
3027 }
3028
3029 /* Stand-alone HCI commands must be flagged as
3030 * single-command requests.
3031 */
3032 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3033
3034 skb_queue_tail(&hdev->cmd_q, skb);
3035 queue_work(hdev->workqueue, &hdev->cmd_work);
3036
3037 return 0;
3038 }
3039
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3040 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3041 const void *param)
3042 {
3043 struct sk_buff *skb;
3044
3045 if (hci_opcode_ogf(opcode) != 0x3f) {
3046 /* A controller receiving a command shall respond with either
3047 * a Command Status Event or a Command Complete Event.
3048 * Therefore, all standard HCI commands must be sent via the
3049 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3050 * Some vendors do not comply with this rule for vendor-specific
3051 * commands and do not return any event. We want to support
3052 * unresponded commands for such cases only.
3053 */
3054 bt_dev_err(hdev, "unresponded command not supported");
3055 return -EINVAL;
3056 }
3057
3058 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3059 if (!skb) {
3060 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3061 opcode);
3062 return -ENOMEM;
3063 }
3064
3065 hci_send_frame(hdev, skb);
3066
3067 return 0;
3068 }
3069 EXPORT_SYMBOL(__hci_cmd_send);
3070
3071 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3072 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3073 {
3074 struct hci_command_hdr *hdr;
3075
3076 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3077 return NULL;
3078
3079 hdr = (void *)skb->data;
3080
3081 if (hdr->opcode != cpu_to_le16(opcode))
3082 return NULL;
3083
3084 return skb->data + HCI_COMMAND_HDR_SIZE;
3085 }
3086
3087 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3088 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3089 {
3090 void *data;
3091
3092 /* Check if opcode matches last sent command */
3093 data = hci_cmd_data(hdev->sent_cmd, opcode);
3094 if (!data)
3095 /* Check if opcode matches last request */
3096 data = hci_cmd_data(hdev->req_skb, opcode);
3097
3098 return data;
3099 }
3100
3101 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3102 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3103 {
3104 struct hci_event_hdr *hdr;
3105 int offset;
3106
3107 if (!hdev->recv_event)
3108 return NULL;
3109
3110 hdr = (void *)hdev->recv_event->data;
3111 offset = sizeof(*hdr);
3112
3113 if (hdr->evt != event) {
3114 /* In case of LE metaevent check the subevent match */
3115 if (hdr->evt == HCI_EV_LE_META) {
3116 struct hci_ev_le_meta *ev;
3117
3118 ev = (void *)hdev->recv_event->data + offset;
3119 offset += sizeof(*ev);
3120 if (ev->subevent == event)
3121 goto found;
3122 }
3123 return NULL;
3124 }
3125
3126 found:
3127 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3128
3129 return hdev->recv_event->data + offset;
3130 }
3131
3132 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3133 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3134 {
3135 struct hci_acl_hdr *hdr;
3136 int len = skb->len;
3137
3138 skb_push(skb, HCI_ACL_HDR_SIZE);
3139 skb_reset_transport_header(skb);
3140 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3141 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3142 hdr->dlen = cpu_to_le16(len);
3143 }
3144
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3145 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3146 struct sk_buff *skb, __u16 flags)
3147 {
3148 struct hci_conn *conn = chan->conn;
3149 struct hci_dev *hdev = conn->hdev;
3150 struct sk_buff *list;
3151
3152 skb->len = skb_headlen(skb);
3153 skb->data_len = 0;
3154
3155 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3156
3157 hci_add_acl_hdr(skb, conn->handle, flags);
3158
3159 list = skb_shinfo(skb)->frag_list;
3160 if (!list) {
3161 /* Non fragmented */
3162 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3163
3164 skb_queue_tail(queue, skb);
3165 } else {
3166 /* Fragmented */
3167 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3168
3169 skb_shinfo(skb)->frag_list = NULL;
3170
3171 /* Queue all fragments atomically. We need to use spin_lock_bh
3172 * here because of 6LoWPAN links, as there this function is
3173 * called from softirq and using normal spin lock could cause
3174 * deadlocks.
3175 */
3176 spin_lock_bh(&queue->lock);
3177
3178 __skb_queue_tail(queue, skb);
3179
3180 flags &= ~ACL_START;
3181 flags |= ACL_CONT;
3182 do {
3183 skb = list; list = list->next;
3184
3185 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3186 hci_add_acl_hdr(skb, conn->handle, flags);
3187
3188 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3189
3190 __skb_queue_tail(queue, skb);
3191 } while (list);
3192
3193 spin_unlock_bh(&queue->lock);
3194 }
3195 }
3196
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3197 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3198 {
3199 struct hci_dev *hdev = chan->conn->hdev;
3200
3201 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3202
3203 hci_queue_acl(chan, &chan->data_q, skb, flags);
3204
3205 queue_work(hdev->workqueue, &hdev->tx_work);
3206 }
3207
3208 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3209 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3210 {
3211 struct hci_dev *hdev = conn->hdev;
3212 struct hci_sco_hdr hdr;
3213
3214 BT_DBG("%s len %d", hdev->name, skb->len);
3215
3216 hdr.handle = cpu_to_le16(conn->handle);
3217 hdr.dlen = skb->len;
3218
3219 skb_push(skb, HCI_SCO_HDR_SIZE);
3220 skb_reset_transport_header(skb);
3221 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3222
3223 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3224
3225 skb_queue_tail(&conn->data_q, skb);
3226 queue_work(hdev->workqueue, &hdev->tx_work);
3227 }
3228
3229 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3230 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3231 {
3232 struct hci_iso_hdr *hdr;
3233 int len = skb->len;
3234
3235 skb_push(skb, HCI_ISO_HDR_SIZE);
3236 skb_reset_transport_header(skb);
3237 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3238 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3239 hdr->dlen = cpu_to_le16(len);
3240 }
3241
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3242 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3243 struct sk_buff *skb)
3244 {
3245 struct hci_dev *hdev = conn->hdev;
3246 struct sk_buff *list;
3247 __u16 flags;
3248
3249 skb->len = skb_headlen(skb);
3250 skb->data_len = 0;
3251
3252 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3253
3254 list = skb_shinfo(skb)->frag_list;
3255
3256 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3257 hci_add_iso_hdr(skb, conn->handle, flags);
3258
3259 if (!list) {
3260 /* Non fragmented */
3261 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3262
3263 skb_queue_tail(queue, skb);
3264 } else {
3265 /* Fragmented */
3266 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3267
3268 skb_shinfo(skb)->frag_list = NULL;
3269
3270 __skb_queue_tail(queue, skb);
3271
3272 do {
3273 skb = list; list = list->next;
3274
3275 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3276 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3277 0x00);
3278 hci_add_iso_hdr(skb, conn->handle, flags);
3279
3280 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3281
3282 __skb_queue_tail(queue, skb);
3283 } while (list);
3284 }
3285 }
3286
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3287 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3288 {
3289 struct hci_dev *hdev = conn->hdev;
3290
3291 BT_DBG("%s len %d", hdev->name, skb->len);
3292
3293 hci_queue_iso(conn, &conn->data_q, skb);
3294
3295 queue_work(hdev->workqueue, &hdev->tx_work);
3296 }
3297
3298 /* ---- HCI TX task (outgoing data) ---- */
3299
3300 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3301 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3302 {
3303 struct hci_dev *hdev;
3304 int cnt, q;
3305
3306 if (!conn) {
3307 *quote = 0;
3308 return;
3309 }
3310
3311 hdev = conn->hdev;
3312
3313 switch (conn->type) {
3314 case ACL_LINK:
3315 cnt = hdev->acl_cnt;
3316 break;
3317 case SCO_LINK:
3318 case ESCO_LINK:
3319 cnt = hdev->sco_cnt;
3320 break;
3321 case LE_LINK:
3322 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3323 break;
3324 case ISO_LINK:
3325 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3326 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3327 break;
3328 default:
3329 cnt = 0;
3330 bt_dev_err(hdev, "unknown link type %d", conn->type);
3331 }
3332
3333 q = cnt / num;
3334 *quote = q ? q : 1;
3335 }
3336
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3337 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3338 int *quote)
3339 {
3340 struct hci_conn_hash *h = &hdev->conn_hash;
3341 struct hci_conn *conn = NULL, *c;
3342 unsigned int num = 0, min = ~0;
3343
3344 /* We don't have to lock device here. Connections are always
3345 * added and removed with TX task disabled. */
3346
3347 rcu_read_lock();
3348
3349 list_for_each_entry_rcu(c, &h->list, list) {
3350 if (c->type != type || skb_queue_empty(&c->data_q))
3351 continue;
3352
3353 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3354 continue;
3355
3356 num++;
3357
3358 if (c->sent < min) {
3359 min = c->sent;
3360 conn = c;
3361 }
3362
3363 if (hci_conn_num(hdev, type) == num)
3364 break;
3365 }
3366
3367 rcu_read_unlock();
3368
3369 hci_quote_sent(conn, num, quote);
3370
3371 BT_DBG("conn %p quote %d", conn, *quote);
3372 return conn;
3373 }
3374
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3375 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3376 {
3377 struct hci_conn_hash *h = &hdev->conn_hash;
3378 struct hci_conn *c;
3379
3380 bt_dev_err(hdev, "link tx timeout");
3381
3382 hci_dev_lock(hdev);
3383
3384 /* Kill stalled connections */
3385 list_for_each_entry(c, &h->list, list) {
3386 if (c->type == type && c->sent) {
3387 bt_dev_err(hdev, "killing stalled connection %pMR",
3388 &c->dst);
3389 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3390 }
3391 }
3392
3393 hci_dev_unlock(hdev);
3394 }
3395
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3396 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3397 int *quote)
3398 {
3399 struct hci_conn_hash *h = &hdev->conn_hash;
3400 struct hci_chan *chan = NULL;
3401 unsigned int num = 0, min = ~0, cur_prio = 0;
3402 struct hci_conn *conn;
3403 int conn_num = 0;
3404
3405 BT_DBG("%s", hdev->name);
3406
3407 rcu_read_lock();
3408
3409 list_for_each_entry_rcu(conn, &h->list, list) {
3410 struct hci_chan *tmp;
3411
3412 if (conn->type != type)
3413 continue;
3414
3415 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3416 continue;
3417
3418 conn_num++;
3419
3420 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3421 struct sk_buff *skb;
3422
3423 if (skb_queue_empty(&tmp->data_q))
3424 continue;
3425
3426 skb = skb_peek(&tmp->data_q);
3427 if (skb->priority < cur_prio)
3428 continue;
3429
3430 if (skb->priority > cur_prio) {
3431 num = 0;
3432 min = ~0;
3433 cur_prio = skb->priority;
3434 }
3435
3436 num++;
3437
3438 if (conn->sent < min) {
3439 min = conn->sent;
3440 chan = tmp;
3441 }
3442 }
3443
3444 if (hci_conn_num(hdev, type) == conn_num)
3445 break;
3446 }
3447
3448 rcu_read_unlock();
3449
3450 if (!chan)
3451 return NULL;
3452
3453 hci_quote_sent(chan->conn, num, quote);
3454
3455 BT_DBG("chan %p quote %d", chan, *quote);
3456 return chan;
3457 }
3458
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3459 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3460 {
3461 struct hci_conn_hash *h = &hdev->conn_hash;
3462 struct hci_conn *conn;
3463 int num = 0;
3464
3465 BT_DBG("%s", hdev->name);
3466
3467 rcu_read_lock();
3468
3469 list_for_each_entry_rcu(conn, &h->list, list) {
3470 struct hci_chan *chan;
3471
3472 if (conn->type != type)
3473 continue;
3474
3475 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3476 continue;
3477
3478 num++;
3479
3480 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3481 struct sk_buff *skb;
3482
3483 if (chan->sent) {
3484 chan->sent = 0;
3485 continue;
3486 }
3487
3488 if (skb_queue_empty(&chan->data_q))
3489 continue;
3490
3491 skb = skb_peek(&chan->data_q);
3492 if (skb->priority >= HCI_PRIO_MAX - 1)
3493 continue;
3494
3495 skb->priority = HCI_PRIO_MAX - 1;
3496
3497 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3498 skb->priority);
3499 }
3500
3501 if (hci_conn_num(hdev, type) == num)
3502 break;
3503 }
3504
3505 rcu_read_unlock();
3506
3507 }
3508
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3509 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3510 {
3511 unsigned long last_tx;
3512
3513 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3514 return;
3515
3516 switch (type) {
3517 case LE_LINK:
3518 last_tx = hdev->le_last_tx;
3519 break;
3520 default:
3521 last_tx = hdev->acl_last_tx;
3522 break;
3523 }
3524
3525 /* tx timeout must be longer than maximum link supervision timeout
3526 * (40.9 seconds)
3527 */
3528 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3529 hci_link_tx_to(hdev, type);
3530 }
3531
3532 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3533 static void hci_sched_sco(struct hci_dev *hdev)
3534 {
3535 struct hci_conn *conn;
3536 struct sk_buff *skb;
3537 int quote;
3538
3539 BT_DBG("%s", hdev->name);
3540
3541 if (!hci_conn_num(hdev, SCO_LINK))
3542 return;
3543
3544 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3545 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3546 BT_DBG("skb %p len %d", skb, skb->len);
3547 hci_send_frame(hdev, skb);
3548
3549 conn->sent++;
3550 if (conn->sent == ~0)
3551 conn->sent = 0;
3552 }
3553 }
3554 }
3555
hci_sched_esco(struct hci_dev * hdev)3556 static void hci_sched_esco(struct hci_dev *hdev)
3557 {
3558 struct hci_conn *conn;
3559 struct sk_buff *skb;
3560 int quote;
3561
3562 BT_DBG("%s", hdev->name);
3563
3564 if (!hci_conn_num(hdev, ESCO_LINK))
3565 return;
3566
3567 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3568 "e))) {
3569 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3570 BT_DBG("skb %p len %d", skb, skb->len);
3571 hci_send_frame(hdev, skb);
3572
3573 conn->sent++;
3574 if (conn->sent == ~0)
3575 conn->sent = 0;
3576 }
3577 }
3578 }
3579
hci_sched_acl_pkt(struct hci_dev * hdev)3580 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3581 {
3582 unsigned int cnt = hdev->acl_cnt;
3583 struct hci_chan *chan;
3584 struct sk_buff *skb;
3585 int quote;
3586
3587 __check_timeout(hdev, cnt, ACL_LINK);
3588
3589 while (hdev->acl_cnt &&
3590 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3591 u32 priority = (skb_peek(&chan->data_q))->priority;
3592 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3593 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3594 skb->len, skb->priority);
3595
3596 /* Stop if priority has changed */
3597 if (skb->priority < priority)
3598 break;
3599
3600 skb = skb_dequeue(&chan->data_q);
3601
3602 hci_conn_enter_active_mode(chan->conn,
3603 bt_cb(skb)->force_active);
3604
3605 hci_send_frame(hdev, skb);
3606 hdev->acl_last_tx = jiffies;
3607
3608 hdev->acl_cnt--;
3609 chan->sent++;
3610 chan->conn->sent++;
3611
3612 /* Send pending SCO packets right away */
3613 hci_sched_sco(hdev);
3614 hci_sched_esco(hdev);
3615 }
3616 }
3617
3618 if (cnt != hdev->acl_cnt)
3619 hci_prio_recalculate(hdev, ACL_LINK);
3620 }
3621
hci_sched_acl(struct hci_dev * hdev)3622 static void hci_sched_acl(struct hci_dev *hdev)
3623 {
3624 BT_DBG("%s", hdev->name);
3625
3626 /* No ACL link over BR/EDR controller */
3627 if (!hci_conn_num(hdev, ACL_LINK))
3628 return;
3629
3630 hci_sched_acl_pkt(hdev);
3631 }
3632
hci_sched_le(struct hci_dev * hdev)3633 static void hci_sched_le(struct hci_dev *hdev)
3634 {
3635 struct hci_chan *chan;
3636 struct sk_buff *skb;
3637 int quote, *cnt, tmp;
3638
3639 BT_DBG("%s", hdev->name);
3640
3641 if (!hci_conn_num(hdev, LE_LINK))
3642 return;
3643
3644 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3645
3646 __check_timeout(hdev, *cnt, LE_LINK);
3647
3648 tmp = *cnt;
3649 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3650 u32 priority = (skb_peek(&chan->data_q))->priority;
3651 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3652 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3653 skb->len, skb->priority);
3654
3655 /* Stop if priority has changed */
3656 if (skb->priority < priority)
3657 break;
3658
3659 skb = skb_dequeue(&chan->data_q);
3660
3661 hci_send_frame(hdev, skb);
3662 hdev->le_last_tx = jiffies;
3663
3664 (*cnt)--;
3665 chan->sent++;
3666 chan->conn->sent++;
3667
3668 /* Send pending SCO packets right away */
3669 hci_sched_sco(hdev);
3670 hci_sched_esco(hdev);
3671 }
3672 }
3673
3674 if (*cnt != tmp)
3675 hci_prio_recalculate(hdev, LE_LINK);
3676 }
3677
3678 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3679 static void hci_sched_iso(struct hci_dev *hdev)
3680 {
3681 struct hci_conn *conn;
3682 struct sk_buff *skb;
3683 int quote, *cnt;
3684
3685 BT_DBG("%s", hdev->name);
3686
3687 if (!hci_conn_num(hdev, ISO_LINK))
3688 return;
3689
3690 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3691 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3692 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3693 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3694 BT_DBG("skb %p len %d", skb, skb->len);
3695 hci_send_frame(hdev, skb);
3696
3697 conn->sent++;
3698 if (conn->sent == ~0)
3699 conn->sent = 0;
3700 (*cnt)--;
3701 }
3702 }
3703 }
3704
hci_tx_work(struct work_struct * work)3705 static void hci_tx_work(struct work_struct *work)
3706 {
3707 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3708 struct sk_buff *skb;
3709
3710 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3711 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3712
3713 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3714 /* Schedule queues and send stuff to HCI driver */
3715 hci_sched_sco(hdev);
3716 hci_sched_esco(hdev);
3717 hci_sched_iso(hdev);
3718 hci_sched_acl(hdev);
3719 hci_sched_le(hdev);
3720 }
3721
3722 /* Send next queued raw (unknown type) packet */
3723 while ((skb = skb_dequeue(&hdev->raw_q)))
3724 hci_send_frame(hdev, skb);
3725 }
3726
3727 /* ----- HCI RX task (incoming data processing) ----- */
3728
3729 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3730 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3731 {
3732 struct hci_acl_hdr *hdr;
3733 struct hci_conn *conn;
3734 __u16 handle, flags;
3735
3736 hdr = skb_pull_data(skb, sizeof(*hdr));
3737 if (!hdr) {
3738 bt_dev_err(hdev, "ACL packet too small");
3739 goto drop;
3740 }
3741
3742 handle = __le16_to_cpu(hdr->handle);
3743 flags = hci_flags(handle);
3744 handle = hci_handle(handle);
3745
3746 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3747 handle, flags);
3748
3749 hdev->stat.acl_rx++;
3750
3751 hci_dev_lock(hdev);
3752 conn = hci_conn_hash_lookup_handle(hdev, handle);
3753 hci_dev_unlock(hdev);
3754
3755 if (conn) {
3756 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3757
3758 /* Send to upper protocol */
3759 l2cap_recv_acldata(conn, skb, flags);
3760 return;
3761 } else {
3762 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3763 handle);
3764 }
3765
3766 drop:
3767 kfree_skb(skb);
3768 }
3769
3770 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3771 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3772 {
3773 struct hci_sco_hdr *hdr = (void *) skb->data;
3774 struct hci_conn *conn;
3775 __u16 handle, flags;
3776
3777 skb_pull(skb, HCI_SCO_HDR_SIZE);
3778
3779 handle = __le16_to_cpu(hdr->handle);
3780 flags = hci_flags(handle);
3781 handle = hci_handle(handle);
3782
3783 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3784 handle, flags);
3785
3786 hdev->stat.sco_rx++;
3787
3788 hci_dev_lock(hdev);
3789 conn = hci_conn_hash_lookup_handle(hdev, handle);
3790 hci_dev_unlock(hdev);
3791
3792 if (conn) {
3793 /* Send to upper protocol */
3794 hci_skb_pkt_status(skb) = flags & 0x03;
3795 sco_recv_scodata(conn, skb);
3796 return;
3797 } else {
3798 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3799 handle);
3800 }
3801
3802 kfree_skb(skb);
3803 }
3804
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3805 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3806 {
3807 struct hci_iso_hdr *hdr;
3808 struct hci_conn *conn;
3809 __u16 handle, flags;
3810
3811 hdr = skb_pull_data(skb, sizeof(*hdr));
3812 if (!hdr) {
3813 bt_dev_err(hdev, "ISO packet too small");
3814 goto drop;
3815 }
3816
3817 handle = __le16_to_cpu(hdr->handle);
3818 flags = hci_flags(handle);
3819 handle = hci_handle(handle);
3820
3821 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3822 handle, flags);
3823
3824 hci_dev_lock(hdev);
3825 conn = hci_conn_hash_lookup_handle(hdev, handle);
3826 hci_dev_unlock(hdev);
3827
3828 if (!conn) {
3829 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3830 handle);
3831 goto drop;
3832 }
3833
3834 /* Send to upper protocol */
3835 iso_recv(conn, skb, flags);
3836 return;
3837
3838 drop:
3839 kfree_skb(skb);
3840 }
3841
hci_req_is_complete(struct hci_dev * hdev)3842 static bool hci_req_is_complete(struct hci_dev *hdev)
3843 {
3844 struct sk_buff *skb;
3845
3846 skb = skb_peek(&hdev->cmd_q);
3847 if (!skb)
3848 return true;
3849
3850 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3851 }
3852
hci_resend_last(struct hci_dev * hdev)3853 static void hci_resend_last(struct hci_dev *hdev)
3854 {
3855 struct hci_command_hdr *sent;
3856 struct sk_buff *skb;
3857 u16 opcode;
3858
3859 if (!hdev->sent_cmd)
3860 return;
3861
3862 sent = (void *) hdev->sent_cmd->data;
3863 opcode = __le16_to_cpu(sent->opcode);
3864 if (opcode == HCI_OP_RESET)
3865 return;
3866
3867 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3868 if (!skb)
3869 return;
3870
3871 skb_queue_head(&hdev->cmd_q, skb);
3872 queue_work(hdev->workqueue, &hdev->cmd_work);
3873 }
3874
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3875 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3876 hci_req_complete_t *req_complete,
3877 hci_req_complete_skb_t *req_complete_skb)
3878 {
3879 struct sk_buff *skb;
3880 unsigned long flags;
3881
3882 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3883
3884 /* If the completed command doesn't match the last one that was
3885 * sent we need to do special handling of it.
3886 */
3887 if (!hci_sent_cmd_data(hdev, opcode)) {
3888 /* Some CSR based controllers generate a spontaneous
3889 * reset complete event during init and any pending
3890 * command will never be completed. In such a case we
3891 * need to resend whatever was the last sent
3892 * command.
3893 */
3894 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3895 hci_resend_last(hdev);
3896
3897 return;
3898 }
3899
3900 /* If we reach this point this event matches the last command sent */
3901 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3902
3903 /* If the command succeeded and there's still more commands in
3904 * this request the request is not yet complete.
3905 */
3906 if (!status && !hci_req_is_complete(hdev))
3907 return;
3908
3909 skb = hdev->req_skb;
3910
3911 /* If this was the last command in a request the complete
3912 * callback would be found in hdev->req_skb instead of the
3913 * command queue (hdev->cmd_q).
3914 */
3915 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3916 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3917 return;
3918 }
3919
3920 if (skb && bt_cb(skb)->hci.req_complete) {
3921 *req_complete = bt_cb(skb)->hci.req_complete;
3922 return;
3923 }
3924
3925 /* Remove all pending commands belonging to this request */
3926 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3927 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3928 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3929 __skb_queue_head(&hdev->cmd_q, skb);
3930 break;
3931 }
3932
3933 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3934 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3935 else
3936 *req_complete = bt_cb(skb)->hci.req_complete;
3937 dev_kfree_skb_irq(skb);
3938 }
3939 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3940 }
3941
hci_rx_work(struct work_struct * work)3942 static void hci_rx_work(struct work_struct *work)
3943 {
3944 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3945 struct sk_buff *skb;
3946
3947 BT_DBG("%s", hdev->name);
3948
3949 /* The kcov_remote functions used for collecting packet parsing
3950 * coverage information from this background thread and associate
3951 * the coverage with the syscall's thread which originally injected
3952 * the packet. This helps fuzzing the kernel.
3953 */
3954 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3955 kcov_remote_start_common(skb_get_kcov_handle(skb));
3956
3957 /* Send copy to monitor */
3958 hci_send_to_monitor(hdev, skb);
3959
3960 if (atomic_read(&hdev->promisc)) {
3961 /* Send copy to the sockets */
3962 hci_send_to_sock(hdev, skb);
3963 }
3964
3965 /* If the device has been opened in HCI_USER_CHANNEL,
3966 * the userspace has exclusive access to device.
3967 * When device is HCI_INIT, we still need to process
3968 * the data packets to the driver in order
3969 * to complete its setup().
3970 */
3971 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3972 !test_bit(HCI_INIT, &hdev->flags)) {
3973 kfree_skb(skb);
3974 continue;
3975 }
3976
3977 if (test_bit(HCI_INIT, &hdev->flags)) {
3978 /* Don't process data packets in this states. */
3979 switch (hci_skb_pkt_type(skb)) {
3980 case HCI_ACLDATA_PKT:
3981 case HCI_SCODATA_PKT:
3982 case HCI_ISODATA_PKT:
3983 kfree_skb(skb);
3984 continue;
3985 }
3986 }
3987
3988 /* Process frame */
3989 switch (hci_skb_pkt_type(skb)) {
3990 case HCI_EVENT_PKT:
3991 BT_DBG("%s Event packet", hdev->name);
3992 hci_event_packet(hdev, skb);
3993 break;
3994
3995 case HCI_ACLDATA_PKT:
3996 BT_DBG("%s ACL data packet", hdev->name);
3997 hci_acldata_packet(hdev, skb);
3998 break;
3999
4000 case HCI_SCODATA_PKT:
4001 BT_DBG("%s SCO data packet", hdev->name);
4002 hci_scodata_packet(hdev, skb);
4003 break;
4004
4005 case HCI_ISODATA_PKT:
4006 BT_DBG("%s ISO data packet", hdev->name);
4007 hci_isodata_packet(hdev, skb);
4008 break;
4009
4010 default:
4011 kfree_skb(skb);
4012 break;
4013 }
4014 }
4015 }
4016
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4017 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4018 {
4019 int err;
4020
4021 bt_dev_dbg(hdev, "skb %p", skb);
4022
4023 kfree_skb(hdev->sent_cmd);
4024
4025 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4026 if (!hdev->sent_cmd) {
4027 skb_queue_head(&hdev->cmd_q, skb);
4028 queue_work(hdev->workqueue, &hdev->cmd_work);
4029 return;
4030 }
4031
4032 err = hci_send_frame(hdev, skb);
4033 if (err < 0) {
4034 hci_cmd_sync_cancel_sync(hdev, -err);
4035 return;
4036 }
4037
4038 if (hci_req_status_pend(hdev) &&
4039 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4040 kfree_skb(hdev->req_skb);
4041 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4042 }
4043
4044 atomic_dec(&hdev->cmd_cnt);
4045 }
4046
hci_cmd_work(struct work_struct * work)4047 static void hci_cmd_work(struct work_struct *work)
4048 {
4049 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4050 struct sk_buff *skb;
4051
4052 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4053 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4054
4055 /* Send queued commands */
4056 if (atomic_read(&hdev->cmd_cnt)) {
4057 skb = skb_dequeue(&hdev->cmd_q);
4058 if (!skb)
4059 return;
4060
4061 hci_send_cmd_sync(hdev, skb);
4062
4063 rcu_read_lock();
4064 if (test_bit(HCI_RESET, &hdev->flags) ||
4065 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4066 cancel_delayed_work(&hdev->cmd_timer);
4067 else
4068 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4069 HCI_CMD_TIMEOUT);
4070 rcu_read_unlock();
4071 }
4072 }
4073