1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 /* Get HCI device by index.
67 * Device is held on return. */
hci_dev_get(int index)68 struct hci_dev *hci_dev_get(int index)
69 {
70 struct hci_dev *hdev = NULL, *d;
71
72 BT_DBG("%d", index);
73
74 if (index < 0)
75 return NULL;
76
77 read_lock(&hci_dev_list_lock);
78 list_for_each_entry(d, &hci_dev_list, list) {
79 if (d->id == index) {
80 hdev = hci_dev_hold(d);
81 break;
82 }
83 }
84 read_unlock(&hci_dev_list_lock);
85 return hdev;
86 }
87
88 /* ---- Inquiry support ---- */
89
hci_discovery_active(struct hci_dev * hdev)90 bool hci_discovery_active(struct hci_dev *hdev)
91 {
92 struct discovery_state *discov = &hdev->discovery;
93
94 switch (discov->state) {
95 case DISCOVERY_FINDING:
96 case DISCOVERY_RESOLVING:
97 return true;
98
99 default:
100 return false;
101 }
102 }
103
hci_discovery_set_state(struct hci_dev * hdev,int state)104 void hci_discovery_set_state(struct hci_dev *hdev, int state)
105 {
106 int old_state = hdev->discovery.state;
107
108 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
109
110 if (old_state == state)
111 return;
112
113 hdev->discovery.state = state;
114
115 switch (state) {
116 case DISCOVERY_STOPPED:
117 hci_update_passive_scan(hdev);
118
119 if (old_state != DISCOVERY_STARTING)
120 mgmt_discovering(hdev, 0);
121 break;
122 case DISCOVERY_STARTING:
123 break;
124 case DISCOVERY_FINDING:
125 mgmt_discovering(hdev, 1);
126 break;
127 case DISCOVERY_RESOLVING:
128 break;
129 case DISCOVERY_STOPPING:
130 break;
131 }
132 }
133
hci_inquiry_cache_flush(struct hci_dev * hdev)134 void hci_inquiry_cache_flush(struct hci_dev *hdev)
135 {
136 struct discovery_state *cache = &hdev->discovery;
137 struct inquiry_entry *p, *n;
138
139 list_for_each_entry_safe(p, n, &cache->all, all) {
140 list_del(&p->all);
141 kfree(p);
142 }
143
144 INIT_LIST_HEAD(&cache->unknown);
145 INIT_LIST_HEAD(&cache->resolve);
146 }
147
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)148 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
149 bdaddr_t *bdaddr)
150 {
151 struct discovery_state *cache = &hdev->discovery;
152 struct inquiry_entry *e;
153
154 BT_DBG("cache %p, %pMR", cache, bdaddr);
155
156 list_for_each_entry(e, &cache->all, all) {
157 if (!bacmp(&e->data.bdaddr, bdaddr))
158 return e;
159 }
160
161 return NULL;
162 }
163
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)164 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
165 bdaddr_t *bdaddr)
166 {
167 struct discovery_state *cache = &hdev->discovery;
168 struct inquiry_entry *e;
169
170 BT_DBG("cache %p, %pMR", cache, bdaddr);
171
172 list_for_each_entry(e, &cache->unknown, list) {
173 if (!bacmp(&e->data.bdaddr, bdaddr))
174 return e;
175 }
176
177 return NULL;
178 }
179
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)180 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
181 bdaddr_t *bdaddr,
182 int state)
183 {
184 struct discovery_state *cache = &hdev->discovery;
185 struct inquiry_entry *e;
186
187 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
188
189 list_for_each_entry(e, &cache->resolve, list) {
190 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
191 return e;
192 if (!bacmp(&e->data.bdaddr, bdaddr))
193 return e;
194 }
195
196 return NULL;
197 }
198
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)199 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
200 struct inquiry_entry *ie)
201 {
202 struct discovery_state *cache = &hdev->discovery;
203 struct list_head *pos = &cache->resolve;
204 struct inquiry_entry *p;
205
206 list_del(&ie->list);
207
208 list_for_each_entry(p, &cache->resolve, list) {
209 if (p->name_state != NAME_PENDING &&
210 abs(p->data.rssi) >= abs(ie->data.rssi))
211 break;
212 pos = &p->list;
213 }
214
215 list_add(&ie->list, pos);
216 }
217
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)218 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
219 bool name_known)
220 {
221 struct discovery_state *cache = &hdev->discovery;
222 struct inquiry_entry *ie;
223 u32 flags = 0;
224
225 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
226
227 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
228
229 if (!data->ssp_mode)
230 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
231
232 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
233 if (ie) {
234 if (!ie->data.ssp_mode)
235 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
236
237 if (ie->name_state == NAME_NEEDED &&
238 data->rssi != ie->data.rssi) {
239 ie->data.rssi = data->rssi;
240 hci_inquiry_cache_update_resolve(hdev, ie);
241 }
242
243 goto update;
244 }
245
246 /* Entry not in the cache. Add new one. */
247 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
248 if (!ie) {
249 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
250 goto done;
251 }
252
253 list_add(&ie->all, &cache->all);
254
255 if (name_known) {
256 ie->name_state = NAME_KNOWN;
257 } else {
258 ie->name_state = NAME_NOT_KNOWN;
259 list_add(&ie->list, &cache->unknown);
260 }
261
262 update:
263 if (name_known && ie->name_state != NAME_KNOWN &&
264 ie->name_state != NAME_PENDING) {
265 ie->name_state = NAME_KNOWN;
266 list_del(&ie->list);
267 }
268
269 memcpy(&ie->data, data, sizeof(*data));
270 ie->timestamp = jiffies;
271 cache->timestamp = jiffies;
272
273 if (ie->name_state == NAME_NOT_KNOWN)
274 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
275
276 done:
277 return flags;
278 }
279
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)280 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
281 {
282 struct discovery_state *cache = &hdev->discovery;
283 struct inquiry_info *info = (struct inquiry_info *) buf;
284 struct inquiry_entry *e;
285 int copied = 0;
286
287 list_for_each_entry(e, &cache->all, all) {
288 struct inquiry_data *data = &e->data;
289
290 if (copied >= num)
291 break;
292
293 bacpy(&info->bdaddr, &data->bdaddr);
294 info->pscan_rep_mode = data->pscan_rep_mode;
295 info->pscan_period_mode = data->pscan_period_mode;
296 info->pscan_mode = data->pscan_mode;
297 memcpy(info->dev_class, data->dev_class, 3);
298 info->clock_offset = data->clock_offset;
299
300 info++;
301 copied++;
302 }
303
304 BT_DBG("cache %p, copied %d", cache, copied);
305 return copied;
306 }
307
hci_inq_req(struct hci_request * req,unsigned long opt)308 static int hci_inq_req(struct hci_request *req, unsigned long opt)
309 {
310 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
311 struct hci_dev *hdev = req->hdev;
312 struct hci_cp_inquiry cp;
313
314 BT_DBG("%s", hdev->name);
315
316 if (test_bit(HCI_INQUIRY, &hdev->flags))
317 return 0;
318
319 /* Start Inquiry */
320 memcpy(&cp.lap, &ir->lap, 3);
321 cp.length = ir->length;
322 cp.num_rsp = ir->num_rsp;
323 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
324
325 return 0;
326 }
327
hci_inquiry(void __user * arg)328 int hci_inquiry(void __user *arg)
329 {
330 __u8 __user *ptr = arg;
331 struct hci_inquiry_req ir;
332 struct hci_dev *hdev;
333 int err = 0, do_inquiry = 0, max_rsp;
334 long timeo;
335 __u8 *buf;
336
337 if (copy_from_user(&ir, ptr, sizeof(ir)))
338 return -EFAULT;
339
340 hdev = hci_dev_get(ir.dev_id);
341 if (!hdev)
342 return -ENODEV;
343
344 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
345 err = -EBUSY;
346 goto done;
347 }
348
349 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
350 err = -EOPNOTSUPP;
351 goto done;
352 }
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
355 err = -EOPNOTSUPP;
356 goto done;
357 }
358
359 /* Restrict maximum inquiry length to 60 seconds */
360 if (ir.length > 60) {
361 err = -EINVAL;
362 goto done;
363 }
364
365 hci_dev_lock(hdev);
366 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
367 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
368 hci_inquiry_cache_flush(hdev);
369 do_inquiry = 1;
370 }
371 hci_dev_unlock(hdev);
372
373 timeo = ir.length * msecs_to_jiffies(2000);
374
375 if (do_inquiry) {
376 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
377 timeo, NULL);
378 if (err < 0)
379 goto done;
380
381 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
382 * cleared). If it is interrupted by a signal, return -EINTR.
383 */
384 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
385 TASK_INTERRUPTIBLE)) {
386 err = -EINTR;
387 goto done;
388 }
389 }
390
391 /* for unlimited number of responses we will use buffer with
392 * 255 entries
393 */
394 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
395
396 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
397 * copy it to the user space.
398 */
399 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
400 if (!buf) {
401 err = -ENOMEM;
402 goto done;
403 }
404
405 hci_dev_lock(hdev);
406 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
407 hci_dev_unlock(hdev);
408
409 BT_DBG("num_rsp %d", ir.num_rsp);
410
411 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
412 ptr += sizeof(ir);
413 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
414 ir.num_rsp))
415 err = -EFAULT;
416 } else
417 err = -EFAULT;
418
419 kfree(buf);
420
421 done:
422 hci_dev_put(hdev);
423 return err;
424 }
425
hci_dev_do_open(struct hci_dev * hdev)426 static int hci_dev_do_open(struct hci_dev *hdev)
427 {
428 int ret = 0;
429
430 BT_DBG("%s %p", hdev->name, hdev);
431
432 hci_req_sync_lock(hdev);
433
434 ret = hci_dev_open_sync(hdev);
435
436 hci_req_sync_unlock(hdev);
437 return ret;
438 }
439
440 /* ---- HCI ioctl helpers ---- */
441
hci_dev_open(__u16 dev)442 int hci_dev_open(__u16 dev)
443 {
444 struct hci_dev *hdev;
445 int err;
446
447 hdev = hci_dev_get(dev);
448 if (!hdev)
449 return -ENODEV;
450
451 /* Devices that are marked as unconfigured can only be powered
452 * up as user channel. Trying to bring them up as normal devices
453 * will result into a failure. Only user channel operation is
454 * possible.
455 *
456 * When this function is called for a user channel, the flag
457 * HCI_USER_CHANNEL will be set first before attempting to
458 * open the device.
459 */
460 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
461 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
462 err = -EOPNOTSUPP;
463 goto done;
464 }
465
466 /* We need to ensure that no other power on/off work is pending
467 * before proceeding to call hci_dev_do_open. This is
468 * particularly important if the setup procedure has not yet
469 * completed.
470 */
471 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
472 cancel_delayed_work(&hdev->power_off);
473
474 /* After this call it is guaranteed that the setup procedure
475 * has finished. This means that error conditions like RFKILL
476 * or no valid public or static random address apply.
477 */
478 flush_workqueue(hdev->req_workqueue);
479
480 /* For controllers not using the management interface and that
481 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
482 * so that pairing works for them. Once the management interface
483 * is in use this bit will be cleared again and userspace has
484 * to explicitly enable it.
485 */
486 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
487 !hci_dev_test_flag(hdev, HCI_MGMT))
488 hci_dev_set_flag(hdev, HCI_BONDABLE);
489
490 err = hci_dev_do_open(hdev);
491
492 done:
493 hci_dev_put(hdev);
494 return err;
495 }
496
hci_dev_do_close(struct hci_dev * hdev)497 int hci_dev_do_close(struct hci_dev *hdev)
498 {
499 int err;
500
501 BT_DBG("%s %p", hdev->name, hdev);
502
503 hci_req_sync_lock(hdev);
504
505 err = hci_dev_close_sync(hdev);
506
507 hci_req_sync_unlock(hdev);
508
509 return err;
510 }
511
hci_dev_close(__u16 dev)512 int hci_dev_close(__u16 dev)
513 {
514 struct hci_dev *hdev;
515 int err;
516
517 hdev = hci_dev_get(dev);
518 if (!hdev)
519 return -ENODEV;
520
521 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
522 err = -EBUSY;
523 goto done;
524 }
525
526 cancel_work_sync(&hdev->power_on);
527 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
528 cancel_delayed_work(&hdev->power_off);
529
530 err = hci_dev_do_close(hdev);
531
532 done:
533 hci_dev_put(hdev);
534 return err;
535 }
536
hci_dev_do_reset(struct hci_dev * hdev)537 static int hci_dev_do_reset(struct hci_dev *hdev)
538 {
539 int ret;
540
541 BT_DBG("%s %p", hdev->name, hdev);
542
543 hci_req_sync_lock(hdev);
544
545 /* Drop queues */
546 skb_queue_purge(&hdev->rx_q);
547 skb_queue_purge(&hdev->cmd_q);
548
549 /* Cancel these to avoid queueing non-chained pending work */
550 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
551 /* Wait for
552 *
553 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
554 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
555 *
556 * inside RCU section to see the flag or complete scheduling.
557 */
558 synchronize_rcu();
559 /* Explicitly cancel works in case scheduled after setting the flag. */
560 cancel_delayed_work(&hdev->cmd_timer);
561 cancel_delayed_work(&hdev->ncmd_timer);
562
563 /* Avoid potential lockdep warnings from the *_flush() calls by
564 * ensuring the workqueue is empty up front.
565 */
566 drain_workqueue(hdev->workqueue);
567
568 hci_dev_lock(hdev);
569 hci_inquiry_cache_flush(hdev);
570 hci_conn_hash_flush(hdev);
571 hci_dev_unlock(hdev);
572
573 if (hdev->flush)
574 hdev->flush(hdev);
575
576 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
577
578 atomic_set(&hdev->cmd_cnt, 1);
579 hdev->acl_cnt = 0;
580 hdev->sco_cnt = 0;
581 hdev->le_cnt = 0;
582 hdev->iso_cnt = 0;
583
584 ret = hci_reset_sync(hdev);
585
586 hci_req_sync_unlock(hdev);
587 return ret;
588 }
589
hci_dev_reset(__u16 dev)590 int hci_dev_reset(__u16 dev)
591 {
592 struct hci_dev *hdev;
593 int err;
594
595 hdev = hci_dev_get(dev);
596 if (!hdev)
597 return -ENODEV;
598
599 if (!test_bit(HCI_UP, &hdev->flags)) {
600 err = -ENETDOWN;
601 goto done;
602 }
603
604 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
605 err = -EBUSY;
606 goto done;
607 }
608
609 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
610 err = -EOPNOTSUPP;
611 goto done;
612 }
613
614 err = hci_dev_do_reset(hdev);
615
616 done:
617 hci_dev_put(hdev);
618 return err;
619 }
620
hci_dev_reset_stat(__u16 dev)621 int hci_dev_reset_stat(__u16 dev)
622 {
623 struct hci_dev *hdev;
624 int ret = 0;
625
626 hdev = hci_dev_get(dev);
627 if (!hdev)
628 return -ENODEV;
629
630 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
631 ret = -EBUSY;
632 goto done;
633 }
634
635 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
636 ret = -EOPNOTSUPP;
637 goto done;
638 }
639
640 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
641
642 done:
643 hci_dev_put(hdev);
644 return ret;
645 }
646
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)647 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
648 {
649 bool conn_changed, discov_changed;
650
651 BT_DBG("%s scan 0x%02x", hdev->name, scan);
652
653 if ((scan & SCAN_PAGE))
654 conn_changed = !hci_dev_test_and_set_flag(hdev,
655 HCI_CONNECTABLE);
656 else
657 conn_changed = hci_dev_test_and_clear_flag(hdev,
658 HCI_CONNECTABLE);
659
660 if ((scan & SCAN_INQUIRY)) {
661 discov_changed = !hci_dev_test_and_set_flag(hdev,
662 HCI_DISCOVERABLE);
663 } else {
664 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
665 discov_changed = hci_dev_test_and_clear_flag(hdev,
666 HCI_DISCOVERABLE);
667 }
668
669 if (!hci_dev_test_flag(hdev, HCI_MGMT))
670 return;
671
672 if (conn_changed || discov_changed) {
673 /* In case this was disabled through mgmt */
674 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
675
676 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
677 hci_update_adv_data(hdev, hdev->cur_adv_instance);
678
679 mgmt_new_settings(hdev);
680 }
681 }
682
hci_dev_cmd(unsigned int cmd,void __user * arg)683 int hci_dev_cmd(unsigned int cmd, void __user *arg)
684 {
685 struct hci_dev *hdev;
686 struct hci_dev_req dr;
687 __le16 policy;
688 int err = 0;
689
690 if (copy_from_user(&dr, arg, sizeof(dr)))
691 return -EFAULT;
692
693 hdev = hci_dev_get(dr.dev_id);
694 if (!hdev)
695 return -ENODEV;
696
697 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
698 err = -EBUSY;
699 goto done;
700 }
701
702 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
703 err = -EOPNOTSUPP;
704 goto done;
705 }
706
707 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
708 err = -EOPNOTSUPP;
709 goto done;
710 }
711
712 switch (cmd) {
713 case HCISETAUTH:
714 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
715 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
716 break;
717
718 case HCISETENCRYPT:
719 if (!lmp_encrypt_capable(hdev)) {
720 err = -EOPNOTSUPP;
721 break;
722 }
723
724 if (!test_bit(HCI_AUTH, &hdev->flags)) {
725 /* Auth must be enabled first */
726 err = hci_cmd_sync_status(hdev,
727 HCI_OP_WRITE_AUTH_ENABLE,
728 1, &dr.dev_opt,
729 HCI_CMD_TIMEOUT);
730 if (err)
731 break;
732 }
733
734 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
735 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
736 break;
737
738 case HCISETSCAN:
739 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
740 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
741
742 /* Ensure that the connectable and discoverable states
743 * get correctly modified as this was a non-mgmt change.
744 */
745 if (!err)
746 hci_update_passive_scan_state(hdev, dr.dev_opt);
747 break;
748
749 case HCISETLINKPOL:
750 policy = cpu_to_le16(dr.dev_opt);
751
752 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
753 2, &policy, HCI_CMD_TIMEOUT);
754 break;
755
756 case HCISETLINKMODE:
757 hdev->link_mode = ((__u16) dr.dev_opt) &
758 (HCI_LM_MASTER | HCI_LM_ACCEPT);
759 break;
760
761 case HCISETPTYPE:
762 if (hdev->pkt_type == (__u16) dr.dev_opt)
763 break;
764
765 hdev->pkt_type = (__u16) dr.dev_opt;
766 mgmt_phy_configuration_changed(hdev, NULL);
767 break;
768
769 case HCISETACLMTU:
770 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
771 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
772 break;
773
774 case HCISETSCOMTU:
775 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
776 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
777 break;
778
779 default:
780 err = -EINVAL;
781 break;
782 }
783
784 done:
785 hci_dev_put(hdev);
786 return err;
787 }
788
hci_get_dev_list(void __user * arg)789 int hci_get_dev_list(void __user *arg)
790 {
791 struct hci_dev *hdev;
792 struct hci_dev_list_req *dl;
793 struct hci_dev_req *dr;
794 int n = 0, size, err;
795 __u16 dev_num;
796
797 if (get_user(dev_num, (__u16 __user *) arg))
798 return -EFAULT;
799
800 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
801 return -EINVAL;
802
803 size = sizeof(*dl) + dev_num * sizeof(*dr);
804
805 dl = kzalloc(size, GFP_KERNEL);
806 if (!dl)
807 return -ENOMEM;
808
809 dr = dl->dev_req;
810
811 read_lock(&hci_dev_list_lock);
812 list_for_each_entry(hdev, &hci_dev_list, list) {
813 unsigned long flags = hdev->flags;
814
815 /* When the auto-off is configured it means the transport
816 * is running, but in that case still indicate that the
817 * device is actually down.
818 */
819 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
820 flags &= ~BIT(HCI_UP);
821
822 (dr + n)->dev_id = hdev->id;
823 (dr + n)->dev_opt = flags;
824
825 if (++n >= dev_num)
826 break;
827 }
828 read_unlock(&hci_dev_list_lock);
829
830 dl->dev_num = n;
831 size = sizeof(*dl) + n * sizeof(*dr);
832
833 err = copy_to_user(arg, dl, size);
834 kfree(dl);
835
836 return err ? -EFAULT : 0;
837 }
838
hci_get_dev_info(void __user * arg)839 int hci_get_dev_info(void __user *arg)
840 {
841 struct hci_dev *hdev;
842 struct hci_dev_info di;
843 unsigned long flags;
844 int err = 0;
845
846 if (copy_from_user(&di, arg, sizeof(di)))
847 return -EFAULT;
848
849 hdev = hci_dev_get(di.dev_id);
850 if (!hdev)
851 return -ENODEV;
852
853 /* When the auto-off is configured it means the transport
854 * is running, but in that case still indicate that the
855 * device is actually down.
856 */
857 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
858 flags = hdev->flags & ~BIT(HCI_UP);
859 else
860 flags = hdev->flags;
861
862 strscpy(di.name, hdev->name, sizeof(di.name));
863 di.bdaddr = hdev->bdaddr;
864 di.type = (hdev->bus & 0x0f);
865 di.flags = flags;
866 di.pkt_type = hdev->pkt_type;
867 if (lmp_bredr_capable(hdev)) {
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 } else {
873 di.acl_mtu = hdev->le_mtu;
874 di.acl_pkts = hdev->le_pkts;
875 di.sco_mtu = 0;
876 di.sco_pkts = 0;
877 }
878 di.link_policy = hdev->link_policy;
879 di.link_mode = hdev->link_mode;
880
881 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
882 memcpy(&di.features, &hdev->features, sizeof(di.features));
883
884 if (copy_to_user(arg, &di, sizeof(di)))
885 err = -EFAULT;
886
887 hci_dev_put(hdev);
888
889 return err;
890 }
891
892 /* ---- Interface to HCI drivers ---- */
893
hci_rfkill_set_block(void * data,bool blocked)894 static int hci_rfkill_set_block(void *data, bool blocked)
895 {
896 struct hci_dev *hdev = data;
897
898 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
899
900 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
901 return -EBUSY;
902
903 if (blocked) {
904 hci_dev_set_flag(hdev, HCI_RFKILLED);
905 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
906 !hci_dev_test_flag(hdev, HCI_CONFIG))
907 hci_dev_do_close(hdev);
908 } else {
909 hci_dev_clear_flag(hdev, HCI_RFKILLED);
910 }
911
912 return 0;
913 }
914
915 static const struct rfkill_ops hci_rfkill_ops = {
916 .set_block = hci_rfkill_set_block,
917 };
918
hci_power_on(struct work_struct * work)919 static void hci_power_on(struct work_struct *work)
920 {
921 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
922 int err;
923
924 BT_DBG("%s", hdev->name);
925
926 if (test_bit(HCI_UP, &hdev->flags) &&
927 hci_dev_test_flag(hdev, HCI_MGMT) &&
928 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
929 cancel_delayed_work(&hdev->power_off);
930 err = hci_powered_update_sync(hdev);
931 mgmt_power_on(hdev, err);
932 return;
933 }
934
935 err = hci_dev_do_open(hdev);
936 if (err < 0) {
937 hci_dev_lock(hdev);
938 mgmt_set_powered_failed(hdev, err);
939 hci_dev_unlock(hdev);
940 return;
941 }
942
943 /* During the HCI setup phase, a few error conditions are
944 * ignored and they need to be checked now. If they are still
945 * valid, it is important to turn the device back off.
946 */
947 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
948 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
949 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
950 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
951 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
952 hci_dev_do_close(hdev);
953 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
954 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
955 HCI_AUTO_OFF_TIMEOUT);
956 }
957
958 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
959 /* For unconfigured devices, set the HCI_RAW flag
960 * so that userspace can easily identify them.
961 */
962 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
963 set_bit(HCI_RAW, &hdev->flags);
964
965 /* For fully configured devices, this will send
966 * the Index Added event. For unconfigured devices,
967 * it will send Unconfigued Index Added event.
968 *
969 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
970 * and no event will be send.
971 */
972 mgmt_index_added(hdev);
973 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
974 /* When the controller is now configured, then it
975 * is important to clear the HCI_RAW flag.
976 */
977 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
978 clear_bit(HCI_RAW, &hdev->flags);
979
980 /* Powering on the controller with HCI_CONFIG set only
981 * happens with the transition from unconfigured to
982 * configured. This will send the Index Added event.
983 */
984 mgmt_index_added(hdev);
985 }
986 }
987
hci_power_off(struct work_struct * work)988 static void hci_power_off(struct work_struct *work)
989 {
990 struct hci_dev *hdev = container_of(work, struct hci_dev,
991 power_off.work);
992
993 BT_DBG("%s", hdev->name);
994
995 hci_dev_do_close(hdev);
996 }
997
hci_error_reset(struct work_struct * work)998 static void hci_error_reset(struct work_struct *work)
999 {
1000 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1001
1002 hci_dev_hold(hdev);
1003 BT_DBG("%s", hdev->name);
1004
1005 if (hdev->hw_error)
1006 hdev->hw_error(hdev, hdev->hw_error_code);
1007 else
1008 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1009
1010 if (!hci_dev_do_close(hdev))
1011 hci_dev_do_open(hdev);
1012
1013 hci_dev_put(hdev);
1014 }
1015
hci_uuids_clear(struct hci_dev * hdev)1016 void hci_uuids_clear(struct hci_dev *hdev)
1017 {
1018 struct bt_uuid *uuid, *tmp;
1019
1020 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1021 list_del(&uuid->list);
1022 kfree(uuid);
1023 }
1024 }
1025
hci_link_keys_clear(struct hci_dev * hdev)1026 void hci_link_keys_clear(struct hci_dev *hdev)
1027 {
1028 struct link_key *key, *tmp;
1029
1030 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1031 list_del_rcu(&key->list);
1032 kfree_rcu(key, rcu);
1033 }
1034 }
1035
hci_smp_ltks_clear(struct hci_dev * hdev)1036 void hci_smp_ltks_clear(struct hci_dev *hdev)
1037 {
1038 struct smp_ltk *k, *tmp;
1039
1040 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1041 list_del_rcu(&k->list);
1042 kfree_rcu(k, rcu);
1043 }
1044 }
1045
hci_smp_irks_clear(struct hci_dev * hdev)1046 void hci_smp_irks_clear(struct hci_dev *hdev)
1047 {
1048 struct smp_irk *k, *tmp;
1049
1050 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1051 list_del_rcu(&k->list);
1052 kfree_rcu(k, rcu);
1053 }
1054 }
1055
hci_blocked_keys_clear(struct hci_dev * hdev)1056 void hci_blocked_keys_clear(struct hci_dev *hdev)
1057 {
1058 struct blocked_key *b, *tmp;
1059
1060 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1061 list_del_rcu(&b->list);
1062 kfree_rcu(b, rcu);
1063 }
1064 }
1065
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1066 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1067 {
1068 bool blocked = false;
1069 struct blocked_key *b;
1070
1071 rcu_read_lock();
1072 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1073 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1074 blocked = true;
1075 break;
1076 }
1077 }
1078
1079 rcu_read_unlock();
1080 return blocked;
1081 }
1082
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1083 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1084 {
1085 struct link_key *k;
1086
1087 rcu_read_lock();
1088 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1089 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1090 rcu_read_unlock();
1091
1092 if (hci_is_blocked_key(hdev,
1093 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1094 k->val)) {
1095 bt_dev_warn_ratelimited(hdev,
1096 "Link key blocked for %pMR",
1097 &k->bdaddr);
1098 return NULL;
1099 }
1100
1101 return k;
1102 }
1103 }
1104 rcu_read_unlock();
1105
1106 return NULL;
1107 }
1108
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1109 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1110 u8 key_type, u8 old_key_type)
1111 {
1112 /* Legacy key */
1113 if (key_type < 0x03)
1114 return true;
1115
1116 /* Debug keys are insecure so don't store them persistently */
1117 if (key_type == HCI_LK_DEBUG_COMBINATION)
1118 return false;
1119
1120 /* Changed combination key and there's no previous one */
1121 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1122 return false;
1123
1124 /* Security mode 3 case */
1125 if (!conn)
1126 return true;
1127
1128 /* BR/EDR key derived using SC from an LE link */
1129 if (conn->type == LE_LINK)
1130 return true;
1131
1132 /* Neither local nor remote side had no-bonding as requirement */
1133 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1134 return true;
1135
1136 /* Local side had dedicated bonding as requirement */
1137 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1138 return true;
1139
1140 /* Remote side had dedicated bonding as requirement */
1141 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1142 return true;
1143
1144 /* If none of the above criteria match, then don't store the key
1145 * persistently */
1146 return false;
1147 }
1148
ltk_role(u8 type)1149 static u8 ltk_role(u8 type)
1150 {
1151 if (type == SMP_LTK)
1152 return HCI_ROLE_MASTER;
1153
1154 return HCI_ROLE_SLAVE;
1155 }
1156
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1157 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1158 u8 addr_type, u8 role)
1159 {
1160 struct smp_ltk *k;
1161
1162 rcu_read_lock();
1163 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1164 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1165 continue;
1166
1167 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1168 rcu_read_unlock();
1169
1170 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1171 k->val)) {
1172 bt_dev_warn_ratelimited(hdev,
1173 "LTK blocked for %pMR",
1174 &k->bdaddr);
1175 return NULL;
1176 }
1177
1178 return k;
1179 }
1180 }
1181 rcu_read_unlock();
1182
1183 return NULL;
1184 }
1185
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1186 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1187 {
1188 struct smp_irk *irk_to_return = NULL;
1189 struct smp_irk *irk;
1190
1191 rcu_read_lock();
1192 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1193 if (!bacmp(&irk->rpa, rpa)) {
1194 irk_to_return = irk;
1195 goto done;
1196 }
1197 }
1198
1199 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1200 if (smp_irk_matches(hdev, irk->val, rpa)) {
1201 bacpy(&irk->rpa, rpa);
1202 irk_to_return = irk;
1203 goto done;
1204 }
1205 }
1206
1207 done:
1208 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1209 irk_to_return->val)) {
1210 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1211 &irk_to_return->bdaddr);
1212 irk_to_return = NULL;
1213 }
1214
1215 rcu_read_unlock();
1216
1217 return irk_to_return;
1218 }
1219
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1220 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1221 u8 addr_type)
1222 {
1223 struct smp_irk *irk_to_return = NULL;
1224 struct smp_irk *irk;
1225
1226 /* Identity Address must be public or static random */
1227 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1228 return NULL;
1229
1230 rcu_read_lock();
1231 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1232 if (addr_type == irk->addr_type &&
1233 bacmp(bdaddr, &irk->bdaddr) == 0) {
1234 irk_to_return = irk;
1235 goto done;
1236 }
1237 }
1238
1239 done:
1240
1241 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1242 irk_to_return->val)) {
1243 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1244 &irk_to_return->bdaddr);
1245 irk_to_return = NULL;
1246 }
1247
1248 rcu_read_unlock();
1249
1250 return irk_to_return;
1251 }
1252
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1253 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1254 bdaddr_t *bdaddr, u8 *val, u8 type,
1255 u8 pin_len, bool *persistent)
1256 {
1257 struct link_key *key, *old_key;
1258 u8 old_key_type;
1259
1260 old_key = hci_find_link_key(hdev, bdaddr);
1261 if (old_key) {
1262 old_key_type = old_key->type;
1263 key = old_key;
1264 } else {
1265 old_key_type = conn ? conn->key_type : 0xff;
1266 key = kzalloc(sizeof(*key), GFP_KERNEL);
1267 if (!key)
1268 return NULL;
1269 list_add_rcu(&key->list, &hdev->link_keys);
1270 }
1271
1272 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1273
1274 /* Some buggy controller combinations generate a changed
1275 * combination key for legacy pairing even when there's no
1276 * previous key */
1277 if (type == HCI_LK_CHANGED_COMBINATION &&
1278 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1279 type = HCI_LK_COMBINATION;
1280 if (conn)
1281 conn->key_type = type;
1282 }
1283
1284 bacpy(&key->bdaddr, bdaddr);
1285 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1286 key->pin_len = pin_len;
1287
1288 if (type == HCI_LK_CHANGED_COMBINATION)
1289 key->type = old_key_type;
1290 else
1291 key->type = type;
1292
1293 if (persistent)
1294 *persistent = hci_persistent_key(hdev, conn, type,
1295 old_key_type);
1296
1297 return key;
1298 }
1299
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1300 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1301 u8 addr_type, u8 type, u8 authenticated,
1302 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1303 {
1304 struct smp_ltk *key, *old_key;
1305 u8 role = ltk_role(type);
1306
1307 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1308 if (old_key)
1309 key = old_key;
1310 else {
1311 key = kzalloc(sizeof(*key), GFP_KERNEL);
1312 if (!key)
1313 return NULL;
1314 list_add_rcu(&key->list, &hdev->long_term_keys);
1315 }
1316
1317 bacpy(&key->bdaddr, bdaddr);
1318 key->bdaddr_type = addr_type;
1319 memcpy(key->val, tk, sizeof(key->val));
1320 key->authenticated = authenticated;
1321 key->ediv = ediv;
1322 key->rand = rand;
1323 key->enc_size = enc_size;
1324 key->type = type;
1325
1326 return key;
1327 }
1328
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1329 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1330 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1331 {
1332 struct smp_irk *irk;
1333
1334 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1335 if (!irk) {
1336 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1337 if (!irk)
1338 return NULL;
1339
1340 bacpy(&irk->bdaddr, bdaddr);
1341 irk->addr_type = addr_type;
1342
1343 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1344 }
1345
1346 memcpy(irk->val, val, 16);
1347 bacpy(&irk->rpa, rpa);
1348
1349 return irk;
1350 }
1351
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1352 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1353 {
1354 struct link_key *key;
1355
1356 key = hci_find_link_key(hdev, bdaddr);
1357 if (!key)
1358 return -ENOENT;
1359
1360 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1361
1362 list_del_rcu(&key->list);
1363 kfree_rcu(key, rcu);
1364
1365 return 0;
1366 }
1367
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1368 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1369 {
1370 struct smp_ltk *k, *tmp;
1371 int removed = 0;
1372
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1375 continue;
1376
1377 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1378
1379 list_del_rcu(&k->list);
1380 kfree_rcu(k, rcu);
1381 removed++;
1382 }
1383
1384 return removed ? 0 : -ENOENT;
1385 }
1386
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1387 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1388 {
1389 struct smp_irk *k, *tmp;
1390
1391 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1392 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1393 continue;
1394
1395 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1396
1397 list_del_rcu(&k->list);
1398 kfree_rcu(k, rcu);
1399 }
1400 }
1401
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1402 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1403 {
1404 struct smp_ltk *k;
1405 struct smp_irk *irk;
1406 u8 addr_type;
1407
1408 if (type == BDADDR_BREDR) {
1409 if (hci_find_link_key(hdev, bdaddr))
1410 return true;
1411 return false;
1412 }
1413
1414 /* Convert to HCI addr type which struct smp_ltk uses */
1415 if (type == BDADDR_LE_PUBLIC)
1416 addr_type = ADDR_LE_DEV_PUBLIC;
1417 else
1418 addr_type = ADDR_LE_DEV_RANDOM;
1419
1420 irk = hci_get_irk(hdev, bdaddr, addr_type);
1421 if (irk) {
1422 bdaddr = &irk->bdaddr;
1423 addr_type = irk->addr_type;
1424 }
1425
1426 rcu_read_lock();
1427 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1428 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1429 rcu_read_unlock();
1430 return true;
1431 }
1432 }
1433 rcu_read_unlock();
1434
1435 return false;
1436 }
1437
1438 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1439 static void hci_cmd_timeout(struct work_struct *work)
1440 {
1441 struct hci_dev *hdev = container_of(work, struct hci_dev,
1442 cmd_timer.work);
1443
1444 if (hdev->req_skb) {
1445 u16 opcode = hci_skb_opcode(hdev->req_skb);
1446
1447 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1448
1449 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1450 } else {
1451 bt_dev_err(hdev, "command tx timeout");
1452 }
1453
1454 if (hdev->cmd_timeout)
1455 hdev->cmd_timeout(hdev);
1456
1457 atomic_set(&hdev->cmd_cnt, 1);
1458 queue_work(hdev->workqueue, &hdev->cmd_work);
1459 }
1460
1461 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1462 static void hci_ncmd_timeout(struct work_struct *work)
1463 {
1464 struct hci_dev *hdev = container_of(work, struct hci_dev,
1465 ncmd_timer.work);
1466
1467 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1468
1469 /* During HCI_INIT phase no events can be injected if the ncmd timer
1470 * triggers since the procedure has its own timeout handling.
1471 */
1472 if (test_bit(HCI_INIT, &hdev->flags))
1473 return;
1474
1475 /* This is an irrecoverable state, inject hardware error event */
1476 hci_reset_dev(hdev);
1477 }
1478
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1479 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1480 bdaddr_t *bdaddr, u8 bdaddr_type)
1481 {
1482 struct oob_data *data;
1483
1484 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1485 if (bacmp(bdaddr, &data->bdaddr) != 0)
1486 continue;
1487 if (data->bdaddr_type != bdaddr_type)
1488 continue;
1489 return data;
1490 }
1491
1492 return NULL;
1493 }
1494
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1495 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1496 u8 bdaddr_type)
1497 {
1498 struct oob_data *data;
1499
1500 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1501 if (!data)
1502 return -ENOENT;
1503
1504 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1505
1506 list_del(&data->list);
1507 kfree(data);
1508
1509 return 0;
1510 }
1511
hci_remote_oob_data_clear(struct hci_dev * hdev)1512 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1513 {
1514 struct oob_data *data, *n;
1515
1516 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1517 list_del(&data->list);
1518 kfree(data);
1519 }
1520 }
1521
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1522 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1523 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1524 u8 *hash256, u8 *rand256)
1525 {
1526 struct oob_data *data;
1527
1528 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1529 if (!data) {
1530 data = kmalloc(sizeof(*data), GFP_KERNEL);
1531 if (!data)
1532 return -ENOMEM;
1533
1534 bacpy(&data->bdaddr, bdaddr);
1535 data->bdaddr_type = bdaddr_type;
1536 list_add(&data->list, &hdev->remote_oob_data);
1537 }
1538
1539 if (hash192 && rand192) {
1540 memcpy(data->hash192, hash192, sizeof(data->hash192));
1541 memcpy(data->rand192, rand192, sizeof(data->rand192));
1542 if (hash256 && rand256)
1543 data->present = 0x03;
1544 } else {
1545 memset(data->hash192, 0, sizeof(data->hash192));
1546 memset(data->rand192, 0, sizeof(data->rand192));
1547 if (hash256 && rand256)
1548 data->present = 0x02;
1549 else
1550 data->present = 0x00;
1551 }
1552
1553 if (hash256 && rand256) {
1554 memcpy(data->hash256, hash256, sizeof(data->hash256));
1555 memcpy(data->rand256, rand256, sizeof(data->rand256));
1556 } else {
1557 memset(data->hash256, 0, sizeof(data->hash256));
1558 memset(data->rand256, 0, sizeof(data->rand256));
1559 if (hash192 && rand192)
1560 data->present = 0x01;
1561 }
1562
1563 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1564
1565 return 0;
1566 }
1567
1568 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1569 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1570 {
1571 struct adv_info *adv_instance;
1572
1573 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1574 if (adv_instance->instance == instance)
1575 return adv_instance;
1576 }
1577
1578 return NULL;
1579 }
1580
1581 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1582 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1583 {
1584 struct adv_info *cur_instance;
1585
1586 cur_instance = hci_find_adv_instance(hdev, instance);
1587 if (!cur_instance)
1588 return NULL;
1589
1590 if (cur_instance == list_last_entry(&hdev->adv_instances,
1591 struct adv_info, list))
1592 return list_first_entry(&hdev->adv_instances,
1593 struct adv_info, list);
1594 else
1595 return list_next_entry(cur_instance, list);
1596 }
1597
1598 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1599 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1600 {
1601 struct adv_info *adv_instance;
1602
1603 adv_instance = hci_find_adv_instance(hdev, instance);
1604 if (!adv_instance)
1605 return -ENOENT;
1606
1607 BT_DBG("%s removing %dMR", hdev->name, instance);
1608
1609 if (hdev->cur_adv_instance == instance) {
1610 if (hdev->adv_instance_timeout) {
1611 cancel_delayed_work(&hdev->adv_instance_expire);
1612 hdev->adv_instance_timeout = 0;
1613 }
1614 hdev->cur_adv_instance = 0x00;
1615 }
1616
1617 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1618
1619 list_del(&adv_instance->list);
1620 kfree(adv_instance);
1621
1622 hdev->adv_instance_cnt--;
1623
1624 return 0;
1625 }
1626
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1627 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1628 {
1629 struct adv_info *adv_instance, *n;
1630
1631 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1632 adv_instance->rpa_expired = rpa_expired;
1633 }
1634
1635 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1636 void hci_adv_instances_clear(struct hci_dev *hdev)
1637 {
1638 struct adv_info *adv_instance, *n;
1639
1640 if (hdev->adv_instance_timeout) {
1641 cancel_delayed_work(&hdev->adv_instance_expire);
1642 hdev->adv_instance_timeout = 0;
1643 }
1644
1645 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1646 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1647 list_del(&adv_instance->list);
1648 kfree(adv_instance);
1649 }
1650
1651 hdev->adv_instance_cnt = 0;
1652 hdev->cur_adv_instance = 0x00;
1653 }
1654
adv_instance_rpa_expired(struct work_struct * work)1655 static void adv_instance_rpa_expired(struct work_struct *work)
1656 {
1657 struct adv_info *adv_instance = container_of(work, struct adv_info,
1658 rpa_expired_cb.work);
1659
1660 BT_DBG("");
1661
1662 adv_instance->rpa_expired = true;
1663 }
1664
1665 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1666 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1667 u32 flags, u16 adv_data_len, u8 *adv_data,
1668 u16 scan_rsp_len, u8 *scan_rsp_data,
1669 u16 timeout, u16 duration, s8 tx_power,
1670 u32 min_interval, u32 max_interval,
1671 u8 mesh_handle)
1672 {
1673 struct adv_info *adv;
1674
1675 adv = hci_find_adv_instance(hdev, instance);
1676 if (adv) {
1677 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1678 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1679 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1680 } else {
1681 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1682 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1683 return ERR_PTR(-EOVERFLOW);
1684
1685 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1686 if (!adv)
1687 return ERR_PTR(-ENOMEM);
1688
1689 adv->pending = true;
1690 adv->instance = instance;
1691 list_add(&adv->list, &hdev->adv_instances);
1692 hdev->adv_instance_cnt++;
1693 }
1694
1695 adv->flags = flags;
1696 adv->min_interval = min_interval;
1697 adv->max_interval = max_interval;
1698 adv->tx_power = tx_power;
1699 /* Defining a mesh_handle changes the timing units to ms,
1700 * rather than seconds, and ties the instance to the requested
1701 * mesh_tx queue.
1702 */
1703 adv->mesh = mesh_handle;
1704
1705 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1706 scan_rsp_len, scan_rsp_data);
1707
1708 adv->timeout = timeout;
1709 adv->remaining_time = timeout;
1710
1711 if (duration == 0)
1712 adv->duration = hdev->def_multi_adv_rotation_duration;
1713 else
1714 adv->duration = duration;
1715
1716 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1717
1718 BT_DBG("%s for %dMR", hdev->name, instance);
1719
1720 return adv;
1721 }
1722
1723 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1724 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1725 u32 flags, u8 data_len, u8 *data,
1726 u32 min_interval, u32 max_interval)
1727 {
1728 struct adv_info *adv;
1729
1730 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1731 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1732 min_interval, max_interval, 0);
1733 if (IS_ERR(adv))
1734 return adv;
1735
1736 adv->periodic = true;
1737 adv->per_adv_data_len = data_len;
1738
1739 if (data)
1740 memcpy(adv->per_adv_data, data, data_len);
1741
1742 return adv;
1743 }
1744
1745 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1746 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1747 u16 adv_data_len, u8 *adv_data,
1748 u16 scan_rsp_len, u8 *scan_rsp_data)
1749 {
1750 struct adv_info *adv;
1751
1752 adv = hci_find_adv_instance(hdev, instance);
1753
1754 /* If advertisement doesn't exist, we can't modify its data */
1755 if (!adv)
1756 return -ENOENT;
1757
1758 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1759 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1760 memcpy(adv->adv_data, adv_data, adv_data_len);
1761 adv->adv_data_len = adv_data_len;
1762 adv->adv_data_changed = true;
1763 }
1764
1765 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1766 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1767 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1768 adv->scan_rsp_len = scan_rsp_len;
1769 adv->scan_rsp_changed = true;
1770 }
1771
1772 /* Mark as changed if there are flags which would affect it */
1773 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1774 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1775 adv->scan_rsp_changed = true;
1776
1777 return 0;
1778 }
1779
1780 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1781 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1782 {
1783 u32 flags;
1784 struct adv_info *adv;
1785
1786 if (instance == 0x00) {
1787 /* Instance 0 always manages the "Tx Power" and "Flags"
1788 * fields
1789 */
1790 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1791
1792 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1793 * corresponds to the "connectable" instance flag.
1794 */
1795 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1796 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1797
1798 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1799 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1800 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1801 flags |= MGMT_ADV_FLAG_DISCOV;
1802
1803 return flags;
1804 }
1805
1806 adv = hci_find_adv_instance(hdev, instance);
1807
1808 /* Return 0 when we got an invalid instance identifier. */
1809 if (!adv)
1810 return 0;
1811
1812 return adv->flags;
1813 }
1814
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1815 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1816 {
1817 struct adv_info *adv;
1818
1819 /* Instance 0x00 always set local name */
1820 if (instance == 0x00)
1821 return true;
1822
1823 adv = hci_find_adv_instance(hdev, instance);
1824 if (!adv)
1825 return false;
1826
1827 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1828 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1829 return true;
1830
1831 return adv->scan_rsp_len ? true : false;
1832 }
1833
1834 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1835 void hci_adv_monitors_clear(struct hci_dev *hdev)
1836 {
1837 struct adv_monitor *monitor;
1838 int handle;
1839
1840 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1841 hci_free_adv_monitor(hdev, monitor);
1842
1843 idr_destroy(&hdev->adv_monitors_idr);
1844 }
1845
1846 /* Frees the monitor structure and do some bookkeepings.
1847 * This function requires the caller holds hdev->lock.
1848 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1849 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1850 {
1851 struct adv_pattern *pattern;
1852 struct adv_pattern *tmp;
1853
1854 if (!monitor)
1855 return;
1856
1857 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1858 list_del(&pattern->list);
1859 kfree(pattern);
1860 }
1861
1862 if (monitor->handle)
1863 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1864
1865 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1866 hdev->adv_monitors_cnt--;
1867 mgmt_adv_monitor_removed(hdev, monitor->handle);
1868 }
1869
1870 kfree(monitor);
1871 }
1872
1873 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1874 * also attempts to forward the request to the controller.
1875 * This function requires the caller holds hci_req_sync_lock.
1876 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1877 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1878 {
1879 int min, max, handle;
1880 int status = 0;
1881
1882 if (!monitor)
1883 return -EINVAL;
1884
1885 hci_dev_lock(hdev);
1886
1887 min = HCI_MIN_ADV_MONITOR_HANDLE;
1888 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1889 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1890 GFP_KERNEL);
1891
1892 hci_dev_unlock(hdev);
1893
1894 if (handle < 0)
1895 return handle;
1896
1897 monitor->handle = handle;
1898
1899 if (!hdev_is_powered(hdev))
1900 return status;
1901
1902 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1903 case HCI_ADV_MONITOR_EXT_NONE:
1904 bt_dev_dbg(hdev, "add monitor %d status %d",
1905 monitor->handle, status);
1906 /* Message was not forwarded to controller - not an error */
1907 break;
1908
1909 case HCI_ADV_MONITOR_EXT_MSFT:
1910 status = msft_add_monitor_pattern(hdev, monitor);
1911 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1912 handle, status);
1913 break;
1914 }
1915
1916 return status;
1917 }
1918
1919 /* Attempts to tell the controller and free the monitor. If somehow the
1920 * controller doesn't have a corresponding handle, remove anyway.
1921 * This function requires the caller holds hci_req_sync_lock.
1922 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1923 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1924 struct adv_monitor *monitor)
1925 {
1926 int status = 0;
1927 int handle;
1928
1929 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1930 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1931 bt_dev_dbg(hdev, "remove monitor %d status %d",
1932 monitor->handle, status);
1933 goto free_monitor;
1934
1935 case HCI_ADV_MONITOR_EXT_MSFT:
1936 handle = monitor->handle;
1937 status = msft_remove_monitor(hdev, monitor);
1938 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1939 handle, status);
1940 break;
1941 }
1942
1943 /* In case no matching handle registered, just free the monitor */
1944 if (status == -ENOENT)
1945 goto free_monitor;
1946
1947 return status;
1948
1949 free_monitor:
1950 if (status == -ENOENT)
1951 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1952 monitor->handle);
1953 hci_free_adv_monitor(hdev, monitor);
1954
1955 return status;
1956 }
1957
1958 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1959 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1960 {
1961 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1962
1963 if (!monitor)
1964 return -EINVAL;
1965
1966 return hci_remove_adv_monitor(hdev, monitor);
1967 }
1968
1969 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1970 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1971 {
1972 struct adv_monitor *monitor;
1973 int idr_next_id = 0;
1974 int status = 0;
1975
1976 while (1) {
1977 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1978 if (!monitor)
1979 break;
1980
1981 status = hci_remove_adv_monitor(hdev, monitor);
1982 if (status)
1983 return status;
1984
1985 idr_next_id++;
1986 }
1987
1988 return status;
1989 }
1990
1991 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)1992 bool hci_is_adv_monitoring(struct hci_dev *hdev)
1993 {
1994 return !idr_is_empty(&hdev->adv_monitors_idr);
1995 }
1996
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)1997 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
1998 {
1999 if (msft_monitor_supported(hdev))
2000 return HCI_ADV_MONITOR_EXT_MSFT;
2001
2002 return HCI_ADV_MONITOR_EXT_NONE;
2003 }
2004
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2005 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2006 bdaddr_t *bdaddr, u8 type)
2007 {
2008 struct bdaddr_list *b;
2009
2010 list_for_each_entry(b, bdaddr_list, list) {
2011 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2012 return b;
2013 }
2014
2015 return NULL;
2016 }
2017
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2018 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2019 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2020 u8 type)
2021 {
2022 struct bdaddr_list_with_irk *b;
2023
2024 list_for_each_entry(b, bdaddr_list, list) {
2025 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2026 return b;
2027 }
2028
2029 return NULL;
2030 }
2031
2032 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2033 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2034 bdaddr_t *bdaddr, u8 type)
2035 {
2036 struct bdaddr_list_with_flags *b;
2037
2038 list_for_each_entry(b, bdaddr_list, list) {
2039 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2040 return b;
2041 }
2042
2043 return NULL;
2044 }
2045
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2046 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2047 {
2048 struct bdaddr_list *b, *n;
2049
2050 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2051 list_del(&b->list);
2052 kfree(b);
2053 }
2054 }
2055
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2056 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2057 {
2058 struct bdaddr_list *entry;
2059
2060 if (!bacmp(bdaddr, BDADDR_ANY))
2061 return -EBADF;
2062
2063 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2064 return -EEXIST;
2065
2066 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2067 if (!entry)
2068 return -ENOMEM;
2069
2070 bacpy(&entry->bdaddr, bdaddr);
2071 entry->bdaddr_type = type;
2072
2073 list_add(&entry->list, list);
2074
2075 return 0;
2076 }
2077
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2078 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2079 u8 type, u8 *peer_irk, u8 *local_irk)
2080 {
2081 struct bdaddr_list_with_irk *entry;
2082
2083 if (!bacmp(bdaddr, BDADDR_ANY))
2084 return -EBADF;
2085
2086 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2087 return -EEXIST;
2088
2089 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2090 if (!entry)
2091 return -ENOMEM;
2092
2093 bacpy(&entry->bdaddr, bdaddr);
2094 entry->bdaddr_type = type;
2095
2096 if (peer_irk)
2097 memcpy(entry->peer_irk, peer_irk, 16);
2098
2099 if (local_irk)
2100 memcpy(entry->local_irk, local_irk, 16);
2101
2102 list_add(&entry->list, list);
2103
2104 return 0;
2105 }
2106
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2107 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2108 u8 type, u32 flags)
2109 {
2110 struct bdaddr_list_with_flags *entry;
2111
2112 if (!bacmp(bdaddr, BDADDR_ANY))
2113 return -EBADF;
2114
2115 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2116 return -EEXIST;
2117
2118 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2119 if (!entry)
2120 return -ENOMEM;
2121
2122 bacpy(&entry->bdaddr, bdaddr);
2123 entry->bdaddr_type = type;
2124 entry->flags = flags;
2125
2126 list_add(&entry->list, list);
2127
2128 return 0;
2129 }
2130
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2131 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2132 {
2133 struct bdaddr_list *entry;
2134
2135 if (!bacmp(bdaddr, BDADDR_ANY)) {
2136 hci_bdaddr_list_clear(list);
2137 return 0;
2138 }
2139
2140 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2141 if (!entry)
2142 return -ENOENT;
2143
2144 list_del(&entry->list);
2145 kfree(entry);
2146
2147 return 0;
2148 }
2149
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2150 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2151 u8 type)
2152 {
2153 struct bdaddr_list_with_irk *entry;
2154
2155 if (!bacmp(bdaddr, BDADDR_ANY)) {
2156 hci_bdaddr_list_clear(list);
2157 return 0;
2158 }
2159
2160 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2161 if (!entry)
2162 return -ENOENT;
2163
2164 list_del(&entry->list);
2165 kfree(entry);
2166
2167 return 0;
2168 }
2169
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2170 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2171 u8 type)
2172 {
2173 struct bdaddr_list_with_flags *entry;
2174
2175 if (!bacmp(bdaddr, BDADDR_ANY)) {
2176 hci_bdaddr_list_clear(list);
2177 return 0;
2178 }
2179
2180 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2181 if (!entry)
2182 return -ENOENT;
2183
2184 list_del(&entry->list);
2185 kfree(entry);
2186
2187 return 0;
2188 }
2189
2190 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2191 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2192 bdaddr_t *addr, u8 addr_type)
2193 {
2194 struct hci_conn_params *params;
2195
2196 list_for_each_entry(params, &hdev->le_conn_params, list) {
2197 if (bacmp(¶ms->addr, addr) == 0 &&
2198 params->addr_type == addr_type) {
2199 return params;
2200 }
2201 }
2202
2203 return NULL;
2204 }
2205
2206 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2207 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2208 bdaddr_t *addr, u8 addr_type)
2209 {
2210 struct hci_conn_params *param;
2211
2212 rcu_read_lock();
2213
2214 list_for_each_entry_rcu(param, list, action) {
2215 if (bacmp(¶m->addr, addr) == 0 &&
2216 param->addr_type == addr_type) {
2217 rcu_read_unlock();
2218 return param;
2219 }
2220 }
2221
2222 rcu_read_unlock();
2223
2224 return NULL;
2225 }
2226
2227 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2228 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2229 {
2230 if (list_empty(¶m->action))
2231 return;
2232
2233 list_del_rcu(¶m->action);
2234 synchronize_rcu();
2235 INIT_LIST_HEAD(¶m->action);
2236 }
2237
2238 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2239 void hci_pend_le_list_add(struct hci_conn_params *param,
2240 struct list_head *list)
2241 {
2242 list_add_rcu(¶m->action, list);
2243 }
2244
2245 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2246 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2247 bdaddr_t *addr, u8 addr_type)
2248 {
2249 struct hci_conn_params *params;
2250
2251 params = hci_conn_params_lookup(hdev, addr, addr_type);
2252 if (params)
2253 return params;
2254
2255 params = kzalloc(sizeof(*params), GFP_KERNEL);
2256 if (!params) {
2257 bt_dev_err(hdev, "out of memory");
2258 return NULL;
2259 }
2260
2261 bacpy(¶ms->addr, addr);
2262 params->addr_type = addr_type;
2263
2264 list_add(¶ms->list, &hdev->le_conn_params);
2265 INIT_LIST_HEAD(¶ms->action);
2266
2267 params->conn_min_interval = hdev->le_conn_min_interval;
2268 params->conn_max_interval = hdev->le_conn_max_interval;
2269 params->conn_latency = hdev->le_conn_latency;
2270 params->supervision_timeout = hdev->le_supv_timeout;
2271 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2272
2273 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2274
2275 return params;
2276 }
2277
hci_conn_params_free(struct hci_conn_params * params)2278 void hci_conn_params_free(struct hci_conn_params *params)
2279 {
2280 hci_pend_le_list_del_init(params);
2281
2282 if (params->conn) {
2283 hci_conn_drop(params->conn);
2284 hci_conn_put(params->conn);
2285 }
2286
2287 list_del(¶ms->list);
2288 kfree(params);
2289 }
2290
2291 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2292 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2293 {
2294 struct hci_conn_params *params;
2295
2296 params = hci_conn_params_lookup(hdev, addr, addr_type);
2297 if (!params)
2298 return;
2299
2300 hci_conn_params_free(params);
2301
2302 hci_update_passive_scan(hdev);
2303
2304 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2305 }
2306
2307 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2308 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2309 {
2310 struct hci_conn_params *params, *tmp;
2311
2312 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2313 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2314 continue;
2315
2316 /* If trying to establish one time connection to disabled
2317 * device, leave the params, but mark them as just once.
2318 */
2319 if (params->explicit_connect) {
2320 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2321 continue;
2322 }
2323
2324 hci_conn_params_free(params);
2325 }
2326
2327 BT_DBG("All LE disabled connection parameters were removed");
2328 }
2329
2330 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2331 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2332 {
2333 struct hci_conn_params *params, *tmp;
2334
2335 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2336 hci_conn_params_free(params);
2337
2338 BT_DBG("All LE connection parameters were removed");
2339 }
2340
2341 /* Copy the Identity Address of the controller.
2342 *
2343 * If the controller has a public BD_ADDR, then by default use that one.
2344 * If this is a LE only controller without a public address, default to
2345 * the static random address.
2346 *
2347 * For debugging purposes it is possible to force controllers with a
2348 * public address to use the static random address instead.
2349 *
2350 * In case BR/EDR has been disabled on a dual-mode controller and
2351 * userspace has configured a static address, then that address
2352 * becomes the identity address instead of the public BR/EDR address.
2353 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2354 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2355 u8 *bdaddr_type)
2356 {
2357 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2358 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2359 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2360 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2361 bacpy(bdaddr, &hdev->static_addr);
2362 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2363 } else {
2364 bacpy(bdaddr, &hdev->bdaddr);
2365 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2366 }
2367 }
2368
hci_clear_wake_reason(struct hci_dev * hdev)2369 static void hci_clear_wake_reason(struct hci_dev *hdev)
2370 {
2371 hci_dev_lock(hdev);
2372
2373 hdev->wake_reason = 0;
2374 bacpy(&hdev->wake_addr, BDADDR_ANY);
2375 hdev->wake_addr_type = 0;
2376
2377 hci_dev_unlock(hdev);
2378 }
2379
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2380 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2381 void *data)
2382 {
2383 struct hci_dev *hdev =
2384 container_of(nb, struct hci_dev, suspend_notifier);
2385 int ret = 0;
2386
2387 /* Userspace has full control of this device. Do nothing. */
2388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2389 return NOTIFY_DONE;
2390
2391 /* To avoid a potential race with hci_unregister_dev. */
2392 hci_dev_hold(hdev);
2393
2394 switch (action) {
2395 case PM_HIBERNATION_PREPARE:
2396 case PM_SUSPEND_PREPARE:
2397 ret = hci_suspend_dev(hdev);
2398 break;
2399 case PM_POST_HIBERNATION:
2400 case PM_POST_SUSPEND:
2401 ret = hci_resume_dev(hdev);
2402 break;
2403 }
2404
2405 if (ret)
2406 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2407 action, ret);
2408
2409 hci_dev_put(hdev);
2410 return NOTIFY_DONE;
2411 }
2412
2413 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2414 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2415 {
2416 struct hci_dev *hdev;
2417 unsigned int alloc_size;
2418
2419 alloc_size = sizeof(*hdev);
2420 if (sizeof_priv) {
2421 /* Fixme: May need ALIGN-ment? */
2422 alloc_size += sizeof_priv;
2423 }
2424
2425 hdev = kzalloc(alloc_size, GFP_KERNEL);
2426 if (!hdev)
2427 return NULL;
2428
2429 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2430 hdev->esco_type = (ESCO_HV1);
2431 hdev->link_mode = (HCI_LM_ACCEPT);
2432 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2433 hdev->io_capability = 0x03; /* No Input No Output */
2434 hdev->manufacturer = 0xffff; /* Default to internal use */
2435 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2436 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2437 hdev->adv_instance_cnt = 0;
2438 hdev->cur_adv_instance = 0x00;
2439 hdev->adv_instance_timeout = 0;
2440
2441 hdev->advmon_allowlist_duration = 300;
2442 hdev->advmon_no_filter_duration = 500;
2443 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2444
2445 hdev->sniff_max_interval = 800;
2446 hdev->sniff_min_interval = 80;
2447
2448 hdev->le_adv_channel_map = 0x07;
2449 hdev->le_adv_min_interval = 0x0800;
2450 hdev->le_adv_max_interval = 0x0800;
2451 hdev->le_scan_interval = 0x0060;
2452 hdev->le_scan_window = 0x0030;
2453 hdev->le_scan_int_suspend = 0x0400;
2454 hdev->le_scan_window_suspend = 0x0012;
2455 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2456 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2457 hdev->le_scan_int_adv_monitor = 0x0060;
2458 hdev->le_scan_window_adv_monitor = 0x0030;
2459 hdev->le_scan_int_connect = 0x0060;
2460 hdev->le_scan_window_connect = 0x0060;
2461 hdev->le_conn_min_interval = 0x0018;
2462 hdev->le_conn_max_interval = 0x0028;
2463 hdev->le_conn_latency = 0x0000;
2464 hdev->le_supv_timeout = 0x002a;
2465 hdev->le_def_tx_len = 0x001b;
2466 hdev->le_def_tx_time = 0x0148;
2467 hdev->le_max_tx_len = 0x001b;
2468 hdev->le_max_tx_time = 0x0148;
2469 hdev->le_max_rx_len = 0x001b;
2470 hdev->le_max_rx_time = 0x0148;
2471 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2472 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2473 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2474 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2475 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2476 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2477 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2478 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2479 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2480
2481 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2482 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2483 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2484 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2485 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2486 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2487
2488 /* default 1.28 sec page scan */
2489 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2490 hdev->def_page_scan_int = 0x0800;
2491 hdev->def_page_scan_window = 0x0012;
2492
2493 mutex_init(&hdev->lock);
2494 mutex_init(&hdev->req_lock);
2495
2496 ida_init(&hdev->unset_handle_ida);
2497
2498 INIT_LIST_HEAD(&hdev->mesh_pending);
2499 INIT_LIST_HEAD(&hdev->mgmt_pending);
2500 INIT_LIST_HEAD(&hdev->reject_list);
2501 INIT_LIST_HEAD(&hdev->accept_list);
2502 INIT_LIST_HEAD(&hdev->uuids);
2503 INIT_LIST_HEAD(&hdev->link_keys);
2504 INIT_LIST_HEAD(&hdev->long_term_keys);
2505 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2506 INIT_LIST_HEAD(&hdev->remote_oob_data);
2507 INIT_LIST_HEAD(&hdev->le_accept_list);
2508 INIT_LIST_HEAD(&hdev->le_resolv_list);
2509 INIT_LIST_HEAD(&hdev->le_conn_params);
2510 INIT_LIST_HEAD(&hdev->pend_le_conns);
2511 INIT_LIST_HEAD(&hdev->pend_le_reports);
2512 INIT_LIST_HEAD(&hdev->conn_hash.list);
2513 INIT_LIST_HEAD(&hdev->adv_instances);
2514 INIT_LIST_HEAD(&hdev->blocked_keys);
2515 INIT_LIST_HEAD(&hdev->monitored_devices);
2516
2517 INIT_LIST_HEAD(&hdev->local_codecs);
2518 INIT_WORK(&hdev->rx_work, hci_rx_work);
2519 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2520 INIT_WORK(&hdev->tx_work, hci_tx_work);
2521 INIT_WORK(&hdev->power_on, hci_power_on);
2522 INIT_WORK(&hdev->error_reset, hci_error_reset);
2523
2524 hci_cmd_sync_init(hdev);
2525
2526 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2527
2528 skb_queue_head_init(&hdev->rx_q);
2529 skb_queue_head_init(&hdev->cmd_q);
2530 skb_queue_head_init(&hdev->raw_q);
2531
2532 init_waitqueue_head(&hdev->req_wait_q);
2533
2534 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2535 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2536
2537 hci_devcd_setup(hdev);
2538 hci_request_setup(hdev);
2539
2540 hci_init_sysfs(hdev);
2541 discovery_init(hdev);
2542
2543 return hdev;
2544 }
2545 EXPORT_SYMBOL(hci_alloc_dev_priv);
2546
2547 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2548 void hci_free_dev(struct hci_dev *hdev)
2549 {
2550 /* will free via device release */
2551 put_device(&hdev->dev);
2552 }
2553 EXPORT_SYMBOL(hci_free_dev);
2554
2555 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2556 int hci_register_dev(struct hci_dev *hdev)
2557 {
2558 int id, error;
2559
2560 if (!hdev->open || !hdev->close || !hdev->send)
2561 return -EINVAL;
2562
2563 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2564 if (id < 0)
2565 return id;
2566
2567 error = dev_set_name(&hdev->dev, "hci%u", id);
2568 if (error)
2569 return error;
2570
2571 hdev->name = dev_name(&hdev->dev);
2572 hdev->id = id;
2573
2574 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2575
2576 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2577 if (!hdev->workqueue) {
2578 error = -ENOMEM;
2579 goto err;
2580 }
2581
2582 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2583 hdev->name);
2584 if (!hdev->req_workqueue) {
2585 destroy_workqueue(hdev->workqueue);
2586 error = -ENOMEM;
2587 goto err;
2588 }
2589
2590 if (!IS_ERR_OR_NULL(bt_debugfs))
2591 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2592
2593 error = device_add(&hdev->dev);
2594 if (error < 0)
2595 goto err_wqueue;
2596
2597 hci_leds_init(hdev);
2598
2599 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2600 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2601 hdev);
2602 if (hdev->rfkill) {
2603 if (rfkill_register(hdev->rfkill) < 0) {
2604 rfkill_destroy(hdev->rfkill);
2605 hdev->rfkill = NULL;
2606 }
2607 }
2608
2609 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2610 hci_dev_set_flag(hdev, HCI_RFKILLED);
2611
2612 hci_dev_set_flag(hdev, HCI_SETUP);
2613 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2614
2615 /* Assume BR/EDR support until proven otherwise (such as
2616 * through reading supported features during init.
2617 */
2618 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2619
2620 write_lock(&hci_dev_list_lock);
2621 list_add(&hdev->list, &hci_dev_list);
2622 write_unlock(&hci_dev_list_lock);
2623
2624 /* Devices that are marked for raw-only usage are unconfigured
2625 * and should not be included in normal operation.
2626 */
2627 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2628 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2629
2630 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2631 * callback.
2632 */
2633 if (hdev->wakeup)
2634 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2635
2636 hci_sock_dev_event(hdev, HCI_DEV_REG);
2637 hci_dev_hold(hdev);
2638
2639 error = hci_register_suspend_notifier(hdev);
2640 if (error)
2641 BT_WARN("register suspend notifier failed error:%d\n", error);
2642
2643 queue_work(hdev->req_workqueue, &hdev->power_on);
2644
2645 idr_init(&hdev->adv_monitors_idr);
2646 msft_register(hdev);
2647
2648 return id;
2649
2650 err_wqueue:
2651 debugfs_remove_recursive(hdev->debugfs);
2652 destroy_workqueue(hdev->workqueue);
2653 destroy_workqueue(hdev->req_workqueue);
2654 err:
2655 ida_free(&hci_index_ida, hdev->id);
2656
2657 return error;
2658 }
2659 EXPORT_SYMBOL(hci_register_dev);
2660
2661 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2662 void hci_unregister_dev(struct hci_dev *hdev)
2663 {
2664 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2665
2666 mutex_lock(&hdev->unregister_lock);
2667 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2668 mutex_unlock(&hdev->unregister_lock);
2669
2670 write_lock(&hci_dev_list_lock);
2671 list_del(&hdev->list);
2672 write_unlock(&hci_dev_list_lock);
2673
2674 cancel_work_sync(&hdev->rx_work);
2675 cancel_work_sync(&hdev->cmd_work);
2676 cancel_work_sync(&hdev->tx_work);
2677 cancel_work_sync(&hdev->power_on);
2678 cancel_work_sync(&hdev->error_reset);
2679
2680 hci_cmd_sync_clear(hdev);
2681
2682 hci_unregister_suspend_notifier(hdev);
2683
2684 hci_dev_do_close(hdev);
2685
2686 if (!test_bit(HCI_INIT, &hdev->flags) &&
2687 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2688 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2689 hci_dev_lock(hdev);
2690 mgmt_index_removed(hdev);
2691 hci_dev_unlock(hdev);
2692 }
2693
2694 /* mgmt_index_removed should take care of emptying the
2695 * pending list */
2696 BUG_ON(!list_empty(&hdev->mgmt_pending));
2697
2698 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2699
2700 if (hdev->rfkill) {
2701 rfkill_unregister(hdev->rfkill);
2702 rfkill_destroy(hdev->rfkill);
2703 }
2704
2705 device_del(&hdev->dev);
2706 /* Actual cleanup is deferred until hci_release_dev(). */
2707 hci_dev_put(hdev);
2708 }
2709 EXPORT_SYMBOL(hci_unregister_dev);
2710
2711 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2712 void hci_release_dev(struct hci_dev *hdev)
2713 {
2714 debugfs_remove_recursive(hdev->debugfs);
2715 kfree_const(hdev->hw_info);
2716 kfree_const(hdev->fw_info);
2717
2718 destroy_workqueue(hdev->workqueue);
2719 destroy_workqueue(hdev->req_workqueue);
2720
2721 hci_dev_lock(hdev);
2722 hci_bdaddr_list_clear(&hdev->reject_list);
2723 hci_bdaddr_list_clear(&hdev->accept_list);
2724 hci_uuids_clear(hdev);
2725 hci_link_keys_clear(hdev);
2726 hci_smp_ltks_clear(hdev);
2727 hci_smp_irks_clear(hdev);
2728 hci_remote_oob_data_clear(hdev);
2729 hci_adv_instances_clear(hdev);
2730 hci_adv_monitors_clear(hdev);
2731 hci_bdaddr_list_clear(&hdev->le_accept_list);
2732 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2733 hci_conn_params_clear_all(hdev);
2734 hci_discovery_filter_clear(hdev);
2735 hci_blocked_keys_clear(hdev);
2736 hci_codec_list_clear(&hdev->local_codecs);
2737 msft_release(hdev);
2738 hci_dev_unlock(hdev);
2739
2740 ida_destroy(&hdev->unset_handle_ida);
2741 ida_free(&hci_index_ida, hdev->id);
2742 kfree_skb(hdev->sent_cmd);
2743 kfree_skb(hdev->req_skb);
2744 kfree_skb(hdev->recv_event);
2745 kfree(hdev);
2746 }
2747 EXPORT_SYMBOL(hci_release_dev);
2748
hci_register_suspend_notifier(struct hci_dev * hdev)2749 int hci_register_suspend_notifier(struct hci_dev *hdev)
2750 {
2751 int ret = 0;
2752
2753 if (!hdev->suspend_notifier.notifier_call &&
2754 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2755 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2756 ret = register_pm_notifier(&hdev->suspend_notifier);
2757 }
2758
2759 return ret;
2760 }
2761
hci_unregister_suspend_notifier(struct hci_dev * hdev)2762 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2763 {
2764 int ret = 0;
2765
2766 if (hdev->suspend_notifier.notifier_call) {
2767 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2768 if (!ret)
2769 hdev->suspend_notifier.notifier_call = NULL;
2770 }
2771
2772 return ret;
2773 }
2774
2775 /* Cancel ongoing command synchronously:
2776 *
2777 * - Cancel command timer
2778 * - Reset command counter
2779 * - Cancel command request
2780 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2781 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2782 {
2783 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2784
2785 cancel_delayed_work_sync(&hdev->cmd_timer);
2786 cancel_delayed_work_sync(&hdev->ncmd_timer);
2787 atomic_set(&hdev->cmd_cnt, 1);
2788
2789 hci_cmd_sync_cancel_sync(hdev, err);
2790 }
2791
2792 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2793 int hci_suspend_dev(struct hci_dev *hdev)
2794 {
2795 int ret;
2796
2797 bt_dev_dbg(hdev, "");
2798
2799 /* Suspend should only act on when powered. */
2800 if (!hdev_is_powered(hdev) ||
2801 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2802 return 0;
2803
2804 /* If powering down don't attempt to suspend */
2805 if (mgmt_powering_down(hdev))
2806 return 0;
2807
2808 /* Cancel potentially blocking sync operation before suspend */
2809 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2810
2811 hci_req_sync_lock(hdev);
2812 ret = hci_suspend_sync(hdev);
2813 hci_req_sync_unlock(hdev);
2814
2815 hci_clear_wake_reason(hdev);
2816 mgmt_suspending(hdev, hdev->suspend_state);
2817
2818 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2819 return ret;
2820 }
2821 EXPORT_SYMBOL(hci_suspend_dev);
2822
2823 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2824 int hci_resume_dev(struct hci_dev *hdev)
2825 {
2826 int ret;
2827
2828 bt_dev_dbg(hdev, "");
2829
2830 /* Resume should only act on when powered. */
2831 if (!hdev_is_powered(hdev) ||
2832 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2833 return 0;
2834
2835 /* If powering down don't attempt to resume */
2836 if (mgmt_powering_down(hdev))
2837 return 0;
2838
2839 hci_req_sync_lock(hdev);
2840 ret = hci_resume_sync(hdev);
2841 hci_req_sync_unlock(hdev);
2842
2843 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2844 hdev->wake_addr_type);
2845
2846 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2847 return ret;
2848 }
2849 EXPORT_SYMBOL(hci_resume_dev);
2850
2851 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2852 int hci_reset_dev(struct hci_dev *hdev)
2853 {
2854 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2855 struct sk_buff *skb;
2856
2857 skb = bt_skb_alloc(3, GFP_ATOMIC);
2858 if (!skb)
2859 return -ENOMEM;
2860
2861 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2862 skb_put_data(skb, hw_err, 3);
2863
2864 bt_dev_err(hdev, "Injecting HCI hardware error event");
2865
2866 /* Send Hardware Error to upper stack */
2867 return hci_recv_frame(hdev, skb);
2868 }
2869 EXPORT_SYMBOL(hci_reset_dev);
2870
2871 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2872 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2873 {
2874 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2875 && !test_bit(HCI_INIT, &hdev->flags))) {
2876 kfree_skb(skb);
2877 return -ENXIO;
2878 }
2879
2880 switch (hci_skb_pkt_type(skb)) {
2881 case HCI_EVENT_PKT:
2882 break;
2883 case HCI_ACLDATA_PKT:
2884 /* Detect if ISO packet has been sent as ACL */
2885 if (hci_conn_num(hdev, ISO_LINK)) {
2886 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2887 __u8 type;
2888
2889 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2890 if (type == ISO_LINK)
2891 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2892 }
2893 break;
2894 case HCI_SCODATA_PKT:
2895 break;
2896 case HCI_ISODATA_PKT:
2897 break;
2898 default:
2899 kfree_skb(skb);
2900 return -EINVAL;
2901 }
2902
2903 /* Incoming skb */
2904 bt_cb(skb)->incoming = 1;
2905
2906 /* Time stamp */
2907 __net_timestamp(skb);
2908
2909 skb_queue_tail(&hdev->rx_q, skb);
2910 queue_work(hdev->workqueue, &hdev->rx_work);
2911
2912 return 0;
2913 }
2914 EXPORT_SYMBOL(hci_recv_frame);
2915
2916 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2917 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2918 {
2919 /* Mark as diagnostic packet */
2920 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2921
2922 /* Time stamp */
2923 __net_timestamp(skb);
2924
2925 skb_queue_tail(&hdev->rx_q, skb);
2926 queue_work(hdev->workqueue, &hdev->rx_work);
2927
2928 return 0;
2929 }
2930 EXPORT_SYMBOL(hci_recv_diag);
2931
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2932 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2933 {
2934 va_list vargs;
2935
2936 va_start(vargs, fmt);
2937 kfree_const(hdev->hw_info);
2938 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2939 va_end(vargs);
2940 }
2941 EXPORT_SYMBOL(hci_set_hw_info);
2942
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2943 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2944 {
2945 va_list vargs;
2946
2947 va_start(vargs, fmt);
2948 kfree_const(hdev->fw_info);
2949 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2950 va_end(vargs);
2951 }
2952 EXPORT_SYMBOL(hci_set_fw_info);
2953
2954 /* ---- Interface to upper protocols ---- */
2955
hci_register_cb(struct hci_cb * cb)2956 int hci_register_cb(struct hci_cb *cb)
2957 {
2958 BT_DBG("%p name %s", cb, cb->name);
2959
2960 mutex_lock(&hci_cb_list_lock);
2961 list_add_tail(&cb->list, &hci_cb_list);
2962 mutex_unlock(&hci_cb_list_lock);
2963
2964 return 0;
2965 }
2966 EXPORT_SYMBOL(hci_register_cb);
2967
hci_unregister_cb(struct hci_cb * cb)2968 int hci_unregister_cb(struct hci_cb *cb)
2969 {
2970 BT_DBG("%p name %s", cb, cb->name);
2971
2972 mutex_lock(&hci_cb_list_lock);
2973 list_del(&cb->list);
2974 mutex_unlock(&hci_cb_list_lock);
2975
2976 return 0;
2977 }
2978 EXPORT_SYMBOL(hci_unregister_cb);
2979
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)2980 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2981 {
2982 int err;
2983
2984 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2985 skb->len);
2986
2987 /* Time stamp */
2988 __net_timestamp(skb);
2989
2990 /* Send copy to monitor */
2991 hci_send_to_monitor(hdev, skb);
2992
2993 if (atomic_read(&hdev->promisc)) {
2994 /* Send copy to the sockets */
2995 hci_send_to_sock(hdev, skb);
2996 }
2997
2998 /* Get rid of skb owner, prior to sending to the driver. */
2999 skb_orphan(skb);
3000
3001 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3002 kfree_skb(skb);
3003 return -EINVAL;
3004 }
3005
3006 err = hdev->send(hdev, skb);
3007 if (err < 0) {
3008 bt_dev_err(hdev, "sending frame failed (%d)", err);
3009 kfree_skb(skb);
3010 return err;
3011 }
3012
3013 return 0;
3014 }
3015
3016 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3017 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3018 const void *param)
3019 {
3020 struct sk_buff *skb;
3021
3022 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3023
3024 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3025 if (!skb) {
3026 bt_dev_err(hdev, "no memory for command");
3027 return -ENOMEM;
3028 }
3029
3030 /* Stand-alone HCI commands must be flagged as
3031 * single-command requests.
3032 */
3033 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3034
3035 skb_queue_tail(&hdev->cmd_q, skb);
3036 queue_work(hdev->workqueue, &hdev->cmd_work);
3037
3038 return 0;
3039 }
3040
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3041 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3042 const void *param)
3043 {
3044 struct sk_buff *skb;
3045
3046 if (hci_opcode_ogf(opcode) != 0x3f) {
3047 /* A controller receiving a command shall respond with either
3048 * a Command Status Event or a Command Complete Event.
3049 * Therefore, all standard HCI commands must be sent via the
3050 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3051 * Some vendors do not comply with this rule for vendor-specific
3052 * commands and do not return any event. We want to support
3053 * unresponded commands for such cases only.
3054 */
3055 bt_dev_err(hdev, "unresponded command not supported");
3056 return -EINVAL;
3057 }
3058
3059 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3060 if (!skb) {
3061 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3062 opcode);
3063 return -ENOMEM;
3064 }
3065
3066 hci_send_frame(hdev, skb);
3067
3068 return 0;
3069 }
3070 EXPORT_SYMBOL(__hci_cmd_send);
3071
3072 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3073 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3074 {
3075 struct hci_command_hdr *hdr;
3076
3077 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3078 return NULL;
3079
3080 hdr = (void *)skb->data;
3081
3082 if (hdr->opcode != cpu_to_le16(opcode))
3083 return NULL;
3084
3085 return skb->data + HCI_COMMAND_HDR_SIZE;
3086 }
3087
3088 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3089 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3090 {
3091 void *data;
3092
3093 /* Check if opcode matches last sent command */
3094 data = hci_cmd_data(hdev->sent_cmd, opcode);
3095 if (!data)
3096 /* Check if opcode matches last request */
3097 data = hci_cmd_data(hdev->req_skb, opcode);
3098
3099 return data;
3100 }
3101
3102 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3103 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3104 {
3105 struct hci_event_hdr *hdr;
3106 int offset;
3107
3108 if (!hdev->recv_event)
3109 return NULL;
3110
3111 hdr = (void *)hdev->recv_event->data;
3112 offset = sizeof(*hdr);
3113
3114 if (hdr->evt != event) {
3115 /* In case of LE metaevent check the subevent match */
3116 if (hdr->evt == HCI_EV_LE_META) {
3117 struct hci_ev_le_meta *ev;
3118
3119 ev = (void *)hdev->recv_event->data + offset;
3120 offset += sizeof(*ev);
3121 if (ev->subevent == event)
3122 goto found;
3123 }
3124 return NULL;
3125 }
3126
3127 found:
3128 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3129
3130 return hdev->recv_event->data + offset;
3131 }
3132
3133 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3134 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3135 {
3136 struct hci_acl_hdr *hdr;
3137 int len = skb->len;
3138
3139 skb_push(skb, HCI_ACL_HDR_SIZE);
3140 skb_reset_transport_header(skb);
3141 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3142 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3143 hdr->dlen = cpu_to_le16(len);
3144 }
3145
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3146 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3147 struct sk_buff *skb, __u16 flags)
3148 {
3149 struct hci_conn *conn = chan->conn;
3150 struct hci_dev *hdev = conn->hdev;
3151 struct sk_buff *list;
3152
3153 skb->len = skb_headlen(skb);
3154 skb->data_len = 0;
3155
3156 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3157
3158 hci_add_acl_hdr(skb, conn->handle, flags);
3159
3160 list = skb_shinfo(skb)->frag_list;
3161 if (!list) {
3162 /* Non fragmented */
3163 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3164
3165 skb_queue_tail(queue, skb);
3166 } else {
3167 /* Fragmented */
3168 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3169
3170 skb_shinfo(skb)->frag_list = NULL;
3171
3172 /* Queue all fragments atomically. We need to use spin_lock_bh
3173 * here because of 6LoWPAN links, as there this function is
3174 * called from softirq and using normal spin lock could cause
3175 * deadlocks.
3176 */
3177 spin_lock_bh(&queue->lock);
3178
3179 __skb_queue_tail(queue, skb);
3180
3181 flags &= ~ACL_START;
3182 flags |= ACL_CONT;
3183 do {
3184 skb = list; list = list->next;
3185
3186 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3187 hci_add_acl_hdr(skb, conn->handle, flags);
3188
3189 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3190
3191 __skb_queue_tail(queue, skb);
3192 } while (list);
3193
3194 spin_unlock_bh(&queue->lock);
3195 }
3196 }
3197
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3198 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3199 {
3200 struct hci_dev *hdev = chan->conn->hdev;
3201
3202 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3203
3204 hci_queue_acl(chan, &chan->data_q, skb, flags);
3205
3206 queue_work(hdev->workqueue, &hdev->tx_work);
3207 }
3208
3209 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3210 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3211 {
3212 struct hci_dev *hdev = conn->hdev;
3213 struct hci_sco_hdr hdr;
3214
3215 BT_DBG("%s len %d", hdev->name, skb->len);
3216
3217 hdr.handle = cpu_to_le16(conn->handle);
3218 hdr.dlen = skb->len;
3219
3220 skb_push(skb, HCI_SCO_HDR_SIZE);
3221 skb_reset_transport_header(skb);
3222 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3223
3224 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3225
3226 skb_queue_tail(&conn->data_q, skb);
3227 queue_work(hdev->workqueue, &hdev->tx_work);
3228 }
3229
3230 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3231 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3232 {
3233 struct hci_iso_hdr *hdr;
3234 int len = skb->len;
3235
3236 skb_push(skb, HCI_ISO_HDR_SIZE);
3237 skb_reset_transport_header(skb);
3238 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3239 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3240 hdr->dlen = cpu_to_le16(len);
3241 }
3242
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3243 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3244 struct sk_buff *skb)
3245 {
3246 struct hci_dev *hdev = conn->hdev;
3247 struct sk_buff *list;
3248 __u16 flags;
3249
3250 skb->len = skb_headlen(skb);
3251 skb->data_len = 0;
3252
3253 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3254
3255 list = skb_shinfo(skb)->frag_list;
3256
3257 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3258 hci_add_iso_hdr(skb, conn->handle, flags);
3259
3260 if (!list) {
3261 /* Non fragmented */
3262 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3263
3264 skb_queue_tail(queue, skb);
3265 } else {
3266 /* Fragmented */
3267 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3268
3269 skb_shinfo(skb)->frag_list = NULL;
3270
3271 __skb_queue_tail(queue, skb);
3272
3273 do {
3274 skb = list; list = list->next;
3275
3276 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3277 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3278 0x00);
3279 hci_add_iso_hdr(skb, conn->handle, flags);
3280
3281 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3282
3283 __skb_queue_tail(queue, skb);
3284 } while (list);
3285 }
3286 }
3287
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3288 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3289 {
3290 struct hci_dev *hdev = conn->hdev;
3291
3292 BT_DBG("%s len %d", hdev->name, skb->len);
3293
3294 hci_queue_iso(conn, &conn->data_q, skb);
3295
3296 queue_work(hdev->workqueue, &hdev->tx_work);
3297 }
3298
3299 /* ---- HCI TX task (outgoing data) ---- */
3300
3301 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3302 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3303 {
3304 struct hci_dev *hdev;
3305 int cnt, q;
3306
3307 if (!conn) {
3308 *quote = 0;
3309 return;
3310 }
3311
3312 hdev = conn->hdev;
3313
3314 switch (conn->type) {
3315 case ACL_LINK:
3316 cnt = hdev->acl_cnt;
3317 break;
3318 case SCO_LINK:
3319 case ESCO_LINK:
3320 cnt = hdev->sco_cnt;
3321 break;
3322 case LE_LINK:
3323 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3324 break;
3325 case ISO_LINK:
3326 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3327 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3328 break;
3329 default:
3330 cnt = 0;
3331 bt_dev_err(hdev, "unknown link type %d", conn->type);
3332 }
3333
3334 q = cnt / num;
3335 *quote = q ? q : 1;
3336 }
3337
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3338 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3339 int *quote)
3340 {
3341 struct hci_conn_hash *h = &hdev->conn_hash;
3342 struct hci_conn *conn = NULL, *c;
3343 unsigned int num = 0, min = ~0;
3344
3345 /* We don't have to lock device here. Connections are always
3346 * added and removed with TX task disabled. */
3347
3348 rcu_read_lock();
3349
3350 list_for_each_entry_rcu(c, &h->list, list) {
3351 if (c->type != type || skb_queue_empty(&c->data_q))
3352 continue;
3353
3354 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3355 continue;
3356
3357 num++;
3358
3359 if (c->sent < min) {
3360 min = c->sent;
3361 conn = c;
3362 }
3363
3364 if (hci_conn_num(hdev, type) == num)
3365 break;
3366 }
3367
3368 rcu_read_unlock();
3369
3370 hci_quote_sent(conn, num, quote);
3371
3372 BT_DBG("conn %p quote %d", conn, *quote);
3373 return conn;
3374 }
3375
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3376 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3377 {
3378 struct hci_conn_hash *h = &hdev->conn_hash;
3379 struct hci_conn *c;
3380
3381 bt_dev_err(hdev, "link tx timeout");
3382
3383 rcu_read_lock();
3384
3385 /* Kill stalled connections */
3386 list_for_each_entry_rcu(c, &h->list, list) {
3387 if (c->type == type && c->sent) {
3388 bt_dev_err(hdev, "killing stalled connection %pMR",
3389 &c->dst);
3390 /* hci_disconnect might sleep, so, we have to release
3391 * the RCU read lock before calling it.
3392 */
3393 rcu_read_unlock();
3394 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3395 rcu_read_lock();
3396 }
3397 }
3398
3399 rcu_read_unlock();
3400 }
3401
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3402 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3403 int *quote)
3404 {
3405 struct hci_conn_hash *h = &hdev->conn_hash;
3406 struct hci_chan *chan = NULL;
3407 unsigned int num = 0, min = ~0, cur_prio = 0;
3408 struct hci_conn *conn;
3409 int conn_num = 0;
3410
3411 BT_DBG("%s", hdev->name);
3412
3413 rcu_read_lock();
3414
3415 list_for_each_entry_rcu(conn, &h->list, list) {
3416 struct hci_chan *tmp;
3417
3418 if (conn->type != type)
3419 continue;
3420
3421 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3422 continue;
3423
3424 conn_num++;
3425
3426 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3427 struct sk_buff *skb;
3428
3429 if (skb_queue_empty(&tmp->data_q))
3430 continue;
3431
3432 skb = skb_peek(&tmp->data_q);
3433 if (skb->priority < cur_prio)
3434 continue;
3435
3436 if (skb->priority > cur_prio) {
3437 num = 0;
3438 min = ~0;
3439 cur_prio = skb->priority;
3440 }
3441
3442 num++;
3443
3444 if (conn->sent < min) {
3445 min = conn->sent;
3446 chan = tmp;
3447 }
3448 }
3449
3450 if (hci_conn_num(hdev, type) == conn_num)
3451 break;
3452 }
3453
3454 rcu_read_unlock();
3455
3456 if (!chan)
3457 return NULL;
3458
3459 hci_quote_sent(chan->conn, num, quote);
3460
3461 BT_DBG("chan %p quote %d", chan, *quote);
3462 return chan;
3463 }
3464
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3465 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3466 {
3467 struct hci_conn_hash *h = &hdev->conn_hash;
3468 struct hci_conn *conn;
3469 int num = 0;
3470
3471 BT_DBG("%s", hdev->name);
3472
3473 rcu_read_lock();
3474
3475 list_for_each_entry_rcu(conn, &h->list, list) {
3476 struct hci_chan *chan;
3477
3478 if (conn->type != type)
3479 continue;
3480
3481 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3482 continue;
3483
3484 num++;
3485
3486 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3487 struct sk_buff *skb;
3488
3489 if (chan->sent) {
3490 chan->sent = 0;
3491 continue;
3492 }
3493
3494 if (skb_queue_empty(&chan->data_q))
3495 continue;
3496
3497 skb = skb_peek(&chan->data_q);
3498 if (skb->priority >= HCI_PRIO_MAX - 1)
3499 continue;
3500
3501 skb->priority = HCI_PRIO_MAX - 1;
3502
3503 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3504 skb->priority);
3505 }
3506
3507 if (hci_conn_num(hdev, type) == num)
3508 break;
3509 }
3510
3511 rcu_read_unlock();
3512
3513 }
3514
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3515 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3516 {
3517 unsigned long last_tx;
3518
3519 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3520 return;
3521
3522 switch (type) {
3523 case LE_LINK:
3524 last_tx = hdev->le_last_tx;
3525 break;
3526 default:
3527 last_tx = hdev->acl_last_tx;
3528 break;
3529 }
3530
3531 /* tx timeout must be longer than maximum link supervision timeout
3532 * (40.9 seconds)
3533 */
3534 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3535 hci_link_tx_to(hdev, type);
3536 }
3537
3538 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3539 static void hci_sched_sco(struct hci_dev *hdev)
3540 {
3541 struct hci_conn *conn;
3542 struct sk_buff *skb;
3543 int quote;
3544
3545 BT_DBG("%s", hdev->name);
3546
3547 if (!hci_conn_num(hdev, SCO_LINK))
3548 return;
3549
3550 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3551 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3552 BT_DBG("skb %p len %d", skb, skb->len);
3553 hci_send_frame(hdev, skb);
3554
3555 conn->sent++;
3556 if (conn->sent == ~0)
3557 conn->sent = 0;
3558 }
3559 }
3560 }
3561
hci_sched_esco(struct hci_dev * hdev)3562 static void hci_sched_esco(struct hci_dev *hdev)
3563 {
3564 struct hci_conn *conn;
3565 struct sk_buff *skb;
3566 int quote;
3567
3568 BT_DBG("%s", hdev->name);
3569
3570 if (!hci_conn_num(hdev, ESCO_LINK))
3571 return;
3572
3573 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3574 "e))) {
3575 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3576 BT_DBG("skb %p len %d", skb, skb->len);
3577 hci_send_frame(hdev, skb);
3578
3579 conn->sent++;
3580 if (conn->sent == ~0)
3581 conn->sent = 0;
3582 }
3583 }
3584 }
3585
hci_sched_acl_pkt(struct hci_dev * hdev)3586 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3587 {
3588 unsigned int cnt = hdev->acl_cnt;
3589 struct hci_chan *chan;
3590 struct sk_buff *skb;
3591 int quote;
3592
3593 __check_timeout(hdev, cnt, ACL_LINK);
3594
3595 while (hdev->acl_cnt &&
3596 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3597 u32 priority = (skb_peek(&chan->data_q))->priority;
3598 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3599 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3600 skb->len, skb->priority);
3601
3602 /* Stop if priority has changed */
3603 if (skb->priority < priority)
3604 break;
3605
3606 skb = skb_dequeue(&chan->data_q);
3607
3608 hci_conn_enter_active_mode(chan->conn,
3609 bt_cb(skb)->force_active);
3610
3611 hci_send_frame(hdev, skb);
3612 hdev->acl_last_tx = jiffies;
3613
3614 hdev->acl_cnt--;
3615 chan->sent++;
3616 chan->conn->sent++;
3617
3618 /* Send pending SCO packets right away */
3619 hci_sched_sco(hdev);
3620 hci_sched_esco(hdev);
3621 }
3622 }
3623
3624 if (cnt != hdev->acl_cnt)
3625 hci_prio_recalculate(hdev, ACL_LINK);
3626 }
3627
hci_sched_acl(struct hci_dev * hdev)3628 static void hci_sched_acl(struct hci_dev *hdev)
3629 {
3630 BT_DBG("%s", hdev->name);
3631
3632 /* No ACL link over BR/EDR controller */
3633 if (!hci_conn_num(hdev, ACL_LINK))
3634 return;
3635
3636 hci_sched_acl_pkt(hdev);
3637 }
3638
hci_sched_le(struct hci_dev * hdev)3639 static void hci_sched_le(struct hci_dev *hdev)
3640 {
3641 struct hci_chan *chan;
3642 struct sk_buff *skb;
3643 int quote, *cnt, tmp;
3644
3645 BT_DBG("%s", hdev->name);
3646
3647 if (!hci_conn_num(hdev, LE_LINK))
3648 return;
3649
3650 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3651
3652 __check_timeout(hdev, *cnt, LE_LINK);
3653
3654 tmp = *cnt;
3655 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3656 u32 priority = (skb_peek(&chan->data_q))->priority;
3657 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3658 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3659 skb->len, skb->priority);
3660
3661 /* Stop if priority has changed */
3662 if (skb->priority < priority)
3663 break;
3664
3665 skb = skb_dequeue(&chan->data_q);
3666
3667 hci_send_frame(hdev, skb);
3668 hdev->le_last_tx = jiffies;
3669
3670 (*cnt)--;
3671 chan->sent++;
3672 chan->conn->sent++;
3673
3674 /* Send pending SCO packets right away */
3675 hci_sched_sco(hdev);
3676 hci_sched_esco(hdev);
3677 }
3678 }
3679
3680 if (*cnt != tmp)
3681 hci_prio_recalculate(hdev, LE_LINK);
3682 }
3683
3684 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3685 static void hci_sched_iso(struct hci_dev *hdev)
3686 {
3687 struct hci_conn *conn;
3688 struct sk_buff *skb;
3689 int quote, *cnt;
3690
3691 BT_DBG("%s", hdev->name);
3692
3693 if (!hci_conn_num(hdev, ISO_LINK))
3694 return;
3695
3696 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3697 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3698 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3699 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3700 BT_DBG("skb %p len %d", skb, skb->len);
3701 hci_send_frame(hdev, skb);
3702
3703 conn->sent++;
3704 if (conn->sent == ~0)
3705 conn->sent = 0;
3706 (*cnt)--;
3707 }
3708 }
3709 }
3710
hci_tx_work(struct work_struct * work)3711 static void hci_tx_work(struct work_struct *work)
3712 {
3713 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3714 struct sk_buff *skb;
3715
3716 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3717 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3718
3719 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3720 /* Schedule queues and send stuff to HCI driver */
3721 hci_sched_sco(hdev);
3722 hci_sched_esco(hdev);
3723 hci_sched_iso(hdev);
3724 hci_sched_acl(hdev);
3725 hci_sched_le(hdev);
3726 }
3727
3728 /* Send next queued raw (unknown type) packet */
3729 while ((skb = skb_dequeue(&hdev->raw_q)))
3730 hci_send_frame(hdev, skb);
3731 }
3732
3733 /* ----- HCI RX task (incoming data processing) ----- */
3734
3735 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3736 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3737 {
3738 struct hci_acl_hdr *hdr = (void *) skb->data;
3739 struct hci_conn *conn;
3740 __u16 handle, flags;
3741
3742 skb_pull(skb, HCI_ACL_HDR_SIZE);
3743
3744 handle = __le16_to_cpu(hdr->handle);
3745 flags = hci_flags(handle);
3746 handle = hci_handle(handle);
3747
3748 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3749 handle, flags);
3750
3751 hdev->stat.acl_rx++;
3752
3753 hci_dev_lock(hdev);
3754 conn = hci_conn_hash_lookup_handle(hdev, handle);
3755 hci_dev_unlock(hdev);
3756
3757 if (conn) {
3758 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3759
3760 /* Send to upper protocol */
3761 l2cap_recv_acldata(conn, skb, flags);
3762 return;
3763 } else {
3764 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3765 handle);
3766 }
3767
3768 kfree_skb(skb);
3769 }
3770
3771 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3772 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3773 {
3774 struct hci_sco_hdr *hdr = (void *) skb->data;
3775 struct hci_conn *conn;
3776 __u16 handle, flags;
3777
3778 skb_pull(skb, HCI_SCO_HDR_SIZE);
3779
3780 handle = __le16_to_cpu(hdr->handle);
3781 flags = hci_flags(handle);
3782 handle = hci_handle(handle);
3783
3784 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3785 handle, flags);
3786
3787 hdev->stat.sco_rx++;
3788
3789 hci_dev_lock(hdev);
3790 conn = hci_conn_hash_lookup_handle(hdev, handle);
3791 hci_dev_unlock(hdev);
3792
3793 if (conn) {
3794 /* Send to upper protocol */
3795 hci_skb_pkt_status(skb) = flags & 0x03;
3796 sco_recv_scodata(conn, skb);
3797 return;
3798 } else {
3799 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3800 handle);
3801 }
3802
3803 kfree_skb(skb);
3804 }
3805
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3806 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3807 {
3808 struct hci_iso_hdr *hdr;
3809 struct hci_conn *conn;
3810 __u16 handle, flags;
3811
3812 hdr = skb_pull_data(skb, sizeof(*hdr));
3813 if (!hdr) {
3814 bt_dev_err(hdev, "ISO packet too small");
3815 goto drop;
3816 }
3817
3818 handle = __le16_to_cpu(hdr->handle);
3819 flags = hci_flags(handle);
3820 handle = hci_handle(handle);
3821
3822 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3823 handle, flags);
3824
3825 hci_dev_lock(hdev);
3826 conn = hci_conn_hash_lookup_handle(hdev, handle);
3827 hci_dev_unlock(hdev);
3828
3829 if (!conn) {
3830 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3831 handle);
3832 goto drop;
3833 }
3834
3835 /* Send to upper protocol */
3836 iso_recv(conn, skb, flags);
3837 return;
3838
3839 drop:
3840 kfree_skb(skb);
3841 }
3842
hci_req_is_complete(struct hci_dev * hdev)3843 static bool hci_req_is_complete(struct hci_dev *hdev)
3844 {
3845 struct sk_buff *skb;
3846
3847 skb = skb_peek(&hdev->cmd_q);
3848 if (!skb)
3849 return true;
3850
3851 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3852 }
3853
hci_resend_last(struct hci_dev * hdev)3854 static void hci_resend_last(struct hci_dev *hdev)
3855 {
3856 struct hci_command_hdr *sent;
3857 struct sk_buff *skb;
3858 u16 opcode;
3859
3860 if (!hdev->sent_cmd)
3861 return;
3862
3863 sent = (void *) hdev->sent_cmd->data;
3864 opcode = __le16_to_cpu(sent->opcode);
3865 if (opcode == HCI_OP_RESET)
3866 return;
3867
3868 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3869 if (!skb)
3870 return;
3871
3872 skb_queue_head(&hdev->cmd_q, skb);
3873 queue_work(hdev->workqueue, &hdev->cmd_work);
3874 }
3875
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3876 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3877 hci_req_complete_t *req_complete,
3878 hci_req_complete_skb_t *req_complete_skb)
3879 {
3880 struct sk_buff *skb;
3881 unsigned long flags;
3882
3883 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3884
3885 /* If the completed command doesn't match the last one that was
3886 * sent we need to do special handling of it.
3887 */
3888 if (!hci_sent_cmd_data(hdev, opcode)) {
3889 /* Some CSR based controllers generate a spontaneous
3890 * reset complete event during init and any pending
3891 * command will never be completed. In such a case we
3892 * need to resend whatever was the last sent
3893 * command.
3894 */
3895 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3896 hci_resend_last(hdev);
3897
3898 return;
3899 }
3900
3901 /* If we reach this point this event matches the last command sent */
3902 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3903
3904 /* If the command succeeded and there's still more commands in
3905 * this request the request is not yet complete.
3906 */
3907 if (!status && !hci_req_is_complete(hdev))
3908 return;
3909
3910 skb = hdev->req_skb;
3911
3912 /* If this was the last command in a request the complete
3913 * callback would be found in hdev->req_skb instead of the
3914 * command queue (hdev->cmd_q).
3915 */
3916 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3917 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3918 return;
3919 }
3920
3921 if (skb && bt_cb(skb)->hci.req_complete) {
3922 *req_complete = bt_cb(skb)->hci.req_complete;
3923 return;
3924 }
3925
3926 /* Remove all pending commands belonging to this request */
3927 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3928 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3929 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3930 __skb_queue_head(&hdev->cmd_q, skb);
3931 break;
3932 }
3933
3934 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3935 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3936 else
3937 *req_complete = bt_cb(skb)->hci.req_complete;
3938 dev_kfree_skb_irq(skb);
3939 }
3940 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3941 }
3942
hci_rx_work(struct work_struct * work)3943 static void hci_rx_work(struct work_struct *work)
3944 {
3945 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3946 struct sk_buff *skb;
3947
3948 BT_DBG("%s", hdev->name);
3949
3950 /* The kcov_remote functions used for collecting packet parsing
3951 * coverage information from this background thread and associate
3952 * the coverage with the syscall's thread which originally injected
3953 * the packet. This helps fuzzing the kernel.
3954 */
3955 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3956 kcov_remote_start_common(skb_get_kcov_handle(skb));
3957
3958 /* Send copy to monitor */
3959 hci_send_to_monitor(hdev, skb);
3960
3961 if (atomic_read(&hdev->promisc)) {
3962 /* Send copy to the sockets */
3963 hci_send_to_sock(hdev, skb);
3964 }
3965
3966 /* If the device has been opened in HCI_USER_CHANNEL,
3967 * the userspace has exclusive access to device.
3968 * When device is HCI_INIT, we still need to process
3969 * the data packets to the driver in order
3970 * to complete its setup().
3971 */
3972 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3973 !test_bit(HCI_INIT, &hdev->flags)) {
3974 kfree_skb(skb);
3975 continue;
3976 }
3977
3978 if (test_bit(HCI_INIT, &hdev->flags)) {
3979 /* Don't process data packets in this states. */
3980 switch (hci_skb_pkt_type(skb)) {
3981 case HCI_ACLDATA_PKT:
3982 case HCI_SCODATA_PKT:
3983 case HCI_ISODATA_PKT:
3984 kfree_skb(skb);
3985 continue;
3986 }
3987 }
3988
3989 /* Process frame */
3990 switch (hci_skb_pkt_type(skb)) {
3991 case HCI_EVENT_PKT:
3992 BT_DBG("%s Event packet", hdev->name);
3993 hci_event_packet(hdev, skb);
3994 break;
3995
3996 case HCI_ACLDATA_PKT:
3997 BT_DBG("%s ACL data packet", hdev->name);
3998 hci_acldata_packet(hdev, skb);
3999 break;
4000
4001 case HCI_SCODATA_PKT:
4002 BT_DBG("%s SCO data packet", hdev->name);
4003 hci_scodata_packet(hdev, skb);
4004 break;
4005
4006 case HCI_ISODATA_PKT:
4007 BT_DBG("%s ISO data packet", hdev->name);
4008 hci_isodata_packet(hdev, skb);
4009 break;
4010
4011 default:
4012 kfree_skb(skb);
4013 break;
4014 }
4015 }
4016 }
4017
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4018 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4019 {
4020 int err;
4021
4022 bt_dev_dbg(hdev, "skb %p", skb);
4023
4024 kfree_skb(hdev->sent_cmd);
4025
4026 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4027 if (!hdev->sent_cmd) {
4028 skb_queue_head(&hdev->cmd_q, skb);
4029 queue_work(hdev->workqueue, &hdev->cmd_work);
4030 return;
4031 }
4032
4033 err = hci_send_frame(hdev, skb);
4034 if (err < 0) {
4035 hci_cmd_sync_cancel_sync(hdev, -err);
4036 return;
4037 }
4038
4039 if (hci_req_status_pend(hdev) &&
4040 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4041 kfree_skb(hdev->req_skb);
4042 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4043 }
4044
4045 atomic_dec(&hdev->cmd_cnt);
4046 }
4047
hci_cmd_work(struct work_struct * work)4048 static void hci_cmd_work(struct work_struct *work)
4049 {
4050 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4051 struct sk_buff *skb;
4052
4053 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4054 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4055
4056 /* Send queued commands */
4057 if (atomic_read(&hdev->cmd_cnt)) {
4058 skb = skb_dequeue(&hdev->cmd_q);
4059 if (!skb)
4060 return;
4061
4062 hci_send_cmd_sync(hdev, skb);
4063
4064 rcu_read_lock();
4065 if (test_bit(HCI_RESET, &hdev->flags) ||
4066 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4067 cancel_delayed_work(&hdev->cmd_timer);
4068 else
4069 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4070 HCI_CMD_TIMEOUT);
4071 rcu_read_unlock();
4072 }
4073 }
4074