1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 /* Get HCI device by index.
66 * Device is held on return. */
hci_dev_get(int index)67 struct hci_dev *hci_dev_get(int index)
68 {
69 struct hci_dev *hdev = NULL, *d;
70
71 BT_DBG("%d", index);
72
73 if (index < 0)
74 return NULL;
75
76 read_lock(&hci_dev_list_lock);
77 list_for_each_entry(d, &hci_dev_list, list) {
78 if (d->id == index) {
79 hdev = hci_dev_hold(d);
80 break;
81 }
82 }
83 read_unlock(&hci_dev_list_lock);
84 return hdev;
85 }
86
87 /* ---- Inquiry support ---- */
88
hci_discovery_active(struct hci_dev * hdev)89 bool hci_discovery_active(struct hci_dev *hdev)
90 {
91 struct discovery_state *discov = &hdev->discovery;
92
93 switch (discov->state) {
94 case DISCOVERY_FINDING:
95 case DISCOVERY_RESOLVING:
96 return true;
97
98 default:
99 return false;
100 }
101 }
102
hci_discovery_set_state(struct hci_dev * hdev,int state)103 void hci_discovery_set_state(struct hci_dev *hdev, int state)
104 {
105 int old_state = hdev->discovery.state;
106
107 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
108
109 if (old_state == state)
110 return;
111
112 hdev->discovery.state = state;
113
114 switch (state) {
115 case DISCOVERY_STOPPED:
116 hci_update_passive_scan(hdev);
117
118 if (old_state != DISCOVERY_STARTING)
119 mgmt_discovering(hdev, 0);
120 break;
121 case DISCOVERY_STARTING:
122 break;
123 case DISCOVERY_FINDING:
124 mgmt_discovering(hdev, 1);
125 break;
126 case DISCOVERY_RESOLVING:
127 break;
128 case DISCOVERY_STOPPING:
129 break;
130 }
131 }
132
hci_inquiry_cache_flush(struct hci_dev * hdev)133 void hci_inquiry_cache_flush(struct hci_dev *hdev)
134 {
135 struct discovery_state *cache = &hdev->discovery;
136 struct inquiry_entry *p, *n;
137
138 list_for_each_entry_safe(p, n, &cache->all, all) {
139 list_del(&p->all);
140 kfree(p);
141 }
142
143 INIT_LIST_HEAD(&cache->unknown);
144 INIT_LIST_HEAD(&cache->resolve);
145 }
146
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)147 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
148 bdaddr_t *bdaddr)
149 {
150 struct discovery_state *cache = &hdev->discovery;
151 struct inquiry_entry *e;
152
153 BT_DBG("cache %p, %pMR", cache, bdaddr);
154
155 list_for_each_entry(e, &cache->all, all) {
156 if (!bacmp(&e->data.bdaddr, bdaddr))
157 return e;
158 }
159
160 return NULL;
161 }
162
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)163 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
164 bdaddr_t *bdaddr)
165 {
166 struct discovery_state *cache = &hdev->discovery;
167 struct inquiry_entry *e;
168
169 BT_DBG("cache %p, %pMR", cache, bdaddr);
170
171 list_for_each_entry(e, &cache->unknown, list) {
172 if (!bacmp(&e->data.bdaddr, bdaddr))
173 return e;
174 }
175
176 return NULL;
177 }
178
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)179 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
180 bdaddr_t *bdaddr,
181 int state)
182 {
183 struct discovery_state *cache = &hdev->discovery;
184 struct inquiry_entry *e;
185
186 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
187
188 list_for_each_entry(e, &cache->resolve, list) {
189 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
190 return e;
191 if (!bacmp(&e->data.bdaddr, bdaddr))
192 return e;
193 }
194
195 return NULL;
196 }
197
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)198 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
199 struct inquiry_entry *ie)
200 {
201 struct discovery_state *cache = &hdev->discovery;
202 struct list_head *pos = &cache->resolve;
203 struct inquiry_entry *p;
204
205 list_del(&ie->list);
206
207 list_for_each_entry(p, &cache->resolve, list) {
208 if (p->name_state != NAME_PENDING &&
209 abs(p->data.rssi) >= abs(ie->data.rssi))
210 break;
211 pos = &p->list;
212 }
213
214 list_add(&ie->list, pos);
215 }
216
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)217 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
218 bool name_known)
219 {
220 struct discovery_state *cache = &hdev->discovery;
221 struct inquiry_entry *ie;
222 u32 flags = 0;
223
224 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
225
226 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
227
228 if (!data->ssp_mode)
229 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
230
231 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
232 if (ie) {
233 if (!ie->data.ssp_mode)
234 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
235
236 if (ie->name_state == NAME_NEEDED &&
237 data->rssi != ie->data.rssi) {
238 ie->data.rssi = data->rssi;
239 hci_inquiry_cache_update_resolve(hdev, ie);
240 }
241
242 goto update;
243 }
244
245 /* Entry not in the cache. Add new one. */
246 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
247 if (!ie) {
248 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
249 goto done;
250 }
251
252 list_add(&ie->all, &cache->all);
253
254 if (name_known) {
255 ie->name_state = NAME_KNOWN;
256 } else {
257 ie->name_state = NAME_NOT_KNOWN;
258 list_add(&ie->list, &cache->unknown);
259 }
260
261 update:
262 if (name_known && ie->name_state != NAME_KNOWN &&
263 ie->name_state != NAME_PENDING) {
264 ie->name_state = NAME_KNOWN;
265 list_del(&ie->list);
266 }
267
268 memcpy(&ie->data, data, sizeof(*data));
269 ie->timestamp = jiffies;
270 cache->timestamp = jiffies;
271
272 if (ie->name_state == NAME_NOT_KNOWN)
273 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
274
275 done:
276 return flags;
277 }
278
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)279 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
280 {
281 struct discovery_state *cache = &hdev->discovery;
282 struct inquiry_info *info = (struct inquiry_info *) buf;
283 struct inquiry_entry *e;
284 int copied = 0;
285
286 list_for_each_entry(e, &cache->all, all) {
287 struct inquiry_data *data = &e->data;
288
289 if (copied >= num)
290 break;
291
292 bacpy(&info->bdaddr, &data->bdaddr);
293 info->pscan_rep_mode = data->pscan_rep_mode;
294 info->pscan_period_mode = data->pscan_period_mode;
295 info->pscan_mode = data->pscan_mode;
296 memcpy(info->dev_class, data->dev_class, 3);
297 info->clock_offset = data->clock_offset;
298
299 info++;
300 copied++;
301 }
302
303 BT_DBG("cache %p, copied %d", cache, copied);
304 return copied;
305 }
306
hci_inq_req(struct hci_request * req,unsigned long opt)307 static int hci_inq_req(struct hci_request *req, unsigned long opt)
308 {
309 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
310 struct hci_dev *hdev = req->hdev;
311 struct hci_cp_inquiry cp;
312
313 BT_DBG("%s", hdev->name);
314
315 if (test_bit(HCI_INQUIRY, &hdev->flags))
316 return 0;
317
318 /* Start Inquiry */
319 memcpy(&cp.lap, &ir->lap, 3);
320 cp.length = ir->length;
321 cp.num_rsp = ir->num_rsp;
322 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
323
324 return 0;
325 }
326
hci_inquiry(void __user * arg)327 int hci_inquiry(void __user *arg)
328 {
329 __u8 __user *ptr = arg;
330 struct hci_inquiry_req ir;
331 struct hci_dev *hdev;
332 int err = 0, do_inquiry = 0, max_rsp;
333 long timeo;
334 __u8 *buf;
335
336 if (copy_from_user(&ir, ptr, sizeof(ir)))
337 return -EFAULT;
338
339 hdev = hci_dev_get(ir.dev_id);
340 if (!hdev)
341 return -ENODEV;
342
343 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
344 err = -EBUSY;
345 goto done;
346 }
347
348 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
349 err = -EOPNOTSUPP;
350 goto done;
351 }
352
353 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
354 err = -EOPNOTSUPP;
355 goto done;
356 }
357
358 /* Restrict maximum inquiry length to 60 seconds */
359 if (ir.length > 60) {
360 err = -EINVAL;
361 goto done;
362 }
363
364 hci_dev_lock(hdev);
365 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
366 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
367 hci_inquiry_cache_flush(hdev);
368 do_inquiry = 1;
369 }
370 hci_dev_unlock(hdev);
371
372 timeo = ir.length * msecs_to_jiffies(2000);
373
374 if (do_inquiry) {
375 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
376 timeo, NULL);
377 if (err < 0)
378 goto done;
379
380 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
381 * cleared). If it is interrupted by a signal, return -EINTR.
382 */
383 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
384 TASK_INTERRUPTIBLE)) {
385 err = -EINTR;
386 goto done;
387 }
388 }
389
390 /* for unlimited number of responses we will use buffer with
391 * 255 entries
392 */
393 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
394
395 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
396 * copy it to the user space.
397 */
398 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
399 if (!buf) {
400 err = -ENOMEM;
401 goto done;
402 }
403
404 hci_dev_lock(hdev);
405 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
406 hci_dev_unlock(hdev);
407
408 BT_DBG("num_rsp %d", ir.num_rsp);
409
410 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
411 ptr += sizeof(ir);
412 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
413 ir.num_rsp))
414 err = -EFAULT;
415 } else
416 err = -EFAULT;
417
418 kfree(buf);
419
420 done:
421 hci_dev_put(hdev);
422 return err;
423 }
424
hci_dev_do_open(struct hci_dev * hdev)425 static int hci_dev_do_open(struct hci_dev *hdev)
426 {
427 int ret = 0;
428
429 BT_DBG("%s %p", hdev->name, hdev);
430
431 hci_req_sync_lock(hdev);
432
433 ret = hci_dev_open_sync(hdev);
434
435 hci_req_sync_unlock(hdev);
436 return ret;
437 }
438
439 /* ---- HCI ioctl helpers ---- */
440
hci_dev_open(__u16 dev)441 int hci_dev_open(__u16 dev)
442 {
443 struct hci_dev *hdev;
444 int err;
445
446 hdev = hci_dev_get(dev);
447 if (!hdev)
448 return -ENODEV;
449
450 /* Devices that are marked as unconfigured can only be powered
451 * up as user channel. Trying to bring them up as normal devices
452 * will result into a failure. Only user channel operation is
453 * possible.
454 *
455 * When this function is called for a user channel, the flag
456 * HCI_USER_CHANNEL will be set first before attempting to
457 * open the device.
458 */
459 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
460 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
461 err = -EOPNOTSUPP;
462 goto done;
463 }
464
465 /* We need to ensure that no other power on/off work is pending
466 * before proceeding to call hci_dev_do_open. This is
467 * particularly important if the setup procedure has not yet
468 * completed.
469 */
470 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
471 cancel_delayed_work(&hdev->power_off);
472
473 /* After this call it is guaranteed that the setup procedure
474 * has finished. This means that error conditions like RFKILL
475 * or no valid public or static random address apply.
476 */
477 flush_workqueue(hdev->req_workqueue);
478
479 /* For controllers not using the management interface and that
480 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
481 * so that pairing works for them. Once the management interface
482 * is in use this bit will be cleared again and userspace has
483 * to explicitly enable it.
484 */
485 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
486 !hci_dev_test_flag(hdev, HCI_MGMT))
487 hci_dev_set_flag(hdev, HCI_BONDABLE);
488
489 err = hci_dev_do_open(hdev);
490
491 done:
492 hci_dev_put(hdev);
493 return err;
494 }
495
hci_dev_do_close(struct hci_dev * hdev)496 int hci_dev_do_close(struct hci_dev *hdev)
497 {
498 int err;
499
500 BT_DBG("%s %p", hdev->name, hdev);
501
502 hci_req_sync_lock(hdev);
503
504 err = hci_dev_close_sync(hdev);
505
506 hci_req_sync_unlock(hdev);
507
508 return err;
509 }
510
hci_dev_close(__u16 dev)511 int hci_dev_close(__u16 dev)
512 {
513 struct hci_dev *hdev;
514 int err;
515
516 hdev = hci_dev_get(dev);
517 if (!hdev)
518 return -ENODEV;
519
520 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
521 err = -EBUSY;
522 goto done;
523 }
524
525 cancel_work_sync(&hdev->power_on);
526 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
527 cancel_delayed_work(&hdev->power_off);
528
529 err = hci_dev_do_close(hdev);
530
531 done:
532 hci_dev_put(hdev);
533 return err;
534 }
535
hci_dev_do_reset(struct hci_dev * hdev)536 static int hci_dev_do_reset(struct hci_dev *hdev)
537 {
538 int ret;
539
540 BT_DBG("%s %p", hdev->name, hdev);
541
542 hci_req_sync_lock(hdev);
543
544 /* Drop queues */
545 skb_queue_purge(&hdev->rx_q);
546 skb_queue_purge(&hdev->cmd_q);
547
548 /* Cancel these to avoid queueing non-chained pending work */
549 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
550 /* Wait for
551 *
552 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
553 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
554 *
555 * inside RCU section to see the flag or complete scheduling.
556 */
557 synchronize_rcu();
558 /* Explicitly cancel works in case scheduled after setting the flag. */
559 cancel_delayed_work(&hdev->cmd_timer);
560 cancel_delayed_work(&hdev->ncmd_timer);
561
562 /* Avoid potential lockdep warnings from the *_flush() calls by
563 * ensuring the workqueue is empty up front.
564 */
565 drain_workqueue(hdev->workqueue);
566
567 hci_dev_lock(hdev);
568 hci_inquiry_cache_flush(hdev);
569 hci_conn_hash_flush(hdev);
570 hci_dev_unlock(hdev);
571
572 if (hdev->flush)
573 hdev->flush(hdev);
574
575 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
576
577 atomic_set(&hdev->cmd_cnt, 1);
578 hdev->acl_cnt = 0;
579 hdev->sco_cnt = 0;
580 hdev->le_cnt = 0;
581 hdev->iso_cnt = 0;
582
583 ret = hci_reset_sync(hdev);
584
585 hci_req_sync_unlock(hdev);
586 return ret;
587 }
588
hci_dev_reset(__u16 dev)589 int hci_dev_reset(__u16 dev)
590 {
591 struct hci_dev *hdev;
592 int err;
593
594 hdev = hci_dev_get(dev);
595 if (!hdev)
596 return -ENODEV;
597
598 if (!test_bit(HCI_UP, &hdev->flags)) {
599 err = -ENETDOWN;
600 goto done;
601 }
602
603 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
604 err = -EBUSY;
605 goto done;
606 }
607
608 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
609 err = -EOPNOTSUPP;
610 goto done;
611 }
612
613 err = hci_dev_do_reset(hdev);
614
615 done:
616 hci_dev_put(hdev);
617 return err;
618 }
619
hci_dev_reset_stat(__u16 dev)620 int hci_dev_reset_stat(__u16 dev)
621 {
622 struct hci_dev *hdev;
623 int ret = 0;
624
625 hdev = hci_dev_get(dev);
626 if (!hdev)
627 return -ENODEV;
628
629 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
630 ret = -EBUSY;
631 goto done;
632 }
633
634 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
635 ret = -EOPNOTSUPP;
636 goto done;
637 }
638
639 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
640
641 done:
642 hci_dev_put(hdev);
643 return ret;
644 }
645
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)646 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
647 {
648 bool conn_changed, discov_changed;
649
650 BT_DBG("%s scan 0x%02x", hdev->name, scan);
651
652 if ((scan & SCAN_PAGE))
653 conn_changed = !hci_dev_test_and_set_flag(hdev,
654 HCI_CONNECTABLE);
655 else
656 conn_changed = hci_dev_test_and_clear_flag(hdev,
657 HCI_CONNECTABLE);
658
659 if ((scan & SCAN_INQUIRY)) {
660 discov_changed = !hci_dev_test_and_set_flag(hdev,
661 HCI_DISCOVERABLE);
662 } else {
663 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
664 discov_changed = hci_dev_test_and_clear_flag(hdev,
665 HCI_DISCOVERABLE);
666 }
667
668 if (!hci_dev_test_flag(hdev, HCI_MGMT))
669 return;
670
671 if (conn_changed || discov_changed) {
672 /* In case this was disabled through mgmt */
673 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
674
675 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
676 hci_update_adv_data(hdev, hdev->cur_adv_instance);
677
678 mgmt_new_settings(hdev);
679 }
680 }
681
hci_dev_cmd(unsigned int cmd,void __user * arg)682 int hci_dev_cmd(unsigned int cmd, void __user *arg)
683 {
684 struct hci_dev *hdev;
685 struct hci_dev_req dr;
686 __le16 policy;
687 int err = 0;
688
689 if (copy_from_user(&dr, arg, sizeof(dr)))
690 return -EFAULT;
691
692 hdev = hci_dev_get(dr.dev_id);
693 if (!hdev)
694 return -ENODEV;
695
696 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
697 err = -EBUSY;
698 goto done;
699 }
700
701 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
702 err = -EOPNOTSUPP;
703 goto done;
704 }
705
706 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
707 err = -EOPNOTSUPP;
708 goto done;
709 }
710
711 switch (cmd) {
712 case HCISETAUTH:
713 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
714 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
715 break;
716
717 case HCISETENCRYPT:
718 if (!lmp_encrypt_capable(hdev)) {
719 err = -EOPNOTSUPP;
720 break;
721 }
722
723 if (!test_bit(HCI_AUTH, &hdev->flags)) {
724 /* Auth must be enabled first */
725 err = hci_cmd_sync_status(hdev,
726 HCI_OP_WRITE_AUTH_ENABLE,
727 1, &dr.dev_opt,
728 HCI_CMD_TIMEOUT);
729 if (err)
730 break;
731 }
732
733 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
734 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
735 break;
736
737 case HCISETSCAN:
738 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
739 1, &dr.dev_opt, HCI_CMD_TIMEOUT);
740
741 /* Ensure that the connectable and discoverable states
742 * get correctly modified as this was a non-mgmt change.
743 */
744 if (!err)
745 hci_update_passive_scan_state(hdev, dr.dev_opt);
746 break;
747
748 case HCISETLINKPOL:
749 policy = cpu_to_le16(dr.dev_opt);
750
751 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
752 2, &policy, HCI_CMD_TIMEOUT);
753 break;
754
755 case HCISETLINKMODE:
756 hdev->link_mode = ((__u16) dr.dev_opt) &
757 (HCI_LM_MASTER | HCI_LM_ACCEPT);
758 break;
759
760 case HCISETPTYPE:
761 if (hdev->pkt_type == (__u16) dr.dev_opt)
762 break;
763
764 hdev->pkt_type = (__u16) dr.dev_opt;
765 mgmt_phy_configuration_changed(hdev, NULL);
766 break;
767
768 case HCISETACLMTU:
769 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
770 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
771 break;
772
773 case HCISETSCOMTU:
774 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
775 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
776 break;
777
778 default:
779 err = -EINVAL;
780 break;
781 }
782
783 done:
784 hci_dev_put(hdev);
785 return err;
786 }
787
hci_get_dev_list(void __user * arg)788 int hci_get_dev_list(void __user *arg)
789 {
790 struct hci_dev *hdev;
791 struct hci_dev_list_req *dl;
792 struct hci_dev_req *dr;
793 int n = 0, size, err;
794 __u16 dev_num;
795
796 if (get_user(dev_num, (__u16 __user *) arg))
797 return -EFAULT;
798
799 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
800 return -EINVAL;
801
802 size = sizeof(*dl) + dev_num * sizeof(*dr);
803
804 dl = kzalloc(size, GFP_KERNEL);
805 if (!dl)
806 return -ENOMEM;
807
808 dr = dl->dev_req;
809
810 read_lock(&hci_dev_list_lock);
811 list_for_each_entry(hdev, &hci_dev_list, list) {
812 unsigned long flags = hdev->flags;
813
814 /* When the auto-off is configured it means the transport
815 * is running, but in that case still indicate that the
816 * device is actually down.
817 */
818 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
819 flags &= ~BIT(HCI_UP);
820
821 (dr + n)->dev_id = hdev->id;
822 (dr + n)->dev_opt = flags;
823
824 if (++n >= dev_num)
825 break;
826 }
827 read_unlock(&hci_dev_list_lock);
828
829 dl->dev_num = n;
830 size = sizeof(*dl) + n * sizeof(*dr);
831
832 err = copy_to_user(arg, dl, size);
833 kfree(dl);
834
835 return err ? -EFAULT : 0;
836 }
837
hci_get_dev_info(void __user * arg)838 int hci_get_dev_info(void __user *arg)
839 {
840 struct hci_dev *hdev;
841 struct hci_dev_info di;
842 unsigned long flags;
843 int err = 0;
844
845 if (copy_from_user(&di, arg, sizeof(di)))
846 return -EFAULT;
847
848 hdev = hci_dev_get(di.dev_id);
849 if (!hdev)
850 return -ENODEV;
851
852 /* When the auto-off is configured it means the transport
853 * is running, but in that case still indicate that the
854 * device is actually down.
855 */
856 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
857 flags = hdev->flags & ~BIT(HCI_UP);
858 else
859 flags = hdev->flags;
860
861 strscpy(di.name, hdev->name, sizeof(di.name));
862 di.bdaddr = hdev->bdaddr;
863 di.type = (hdev->bus & 0x0f);
864 di.flags = flags;
865 di.pkt_type = hdev->pkt_type;
866 if (lmp_bredr_capable(hdev)) {
867 di.acl_mtu = hdev->acl_mtu;
868 di.acl_pkts = hdev->acl_pkts;
869 di.sco_mtu = hdev->sco_mtu;
870 di.sco_pkts = hdev->sco_pkts;
871 } else {
872 di.acl_mtu = hdev->le_mtu;
873 di.acl_pkts = hdev->le_pkts;
874 di.sco_mtu = 0;
875 di.sco_pkts = 0;
876 }
877 di.link_policy = hdev->link_policy;
878 di.link_mode = hdev->link_mode;
879
880 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
881 memcpy(&di.features, &hdev->features, sizeof(di.features));
882
883 if (copy_to_user(arg, &di, sizeof(di)))
884 err = -EFAULT;
885
886 hci_dev_put(hdev);
887
888 return err;
889 }
890
891 /* ---- Interface to HCI drivers ---- */
892
hci_rfkill_set_block(void * data,bool blocked)893 static int hci_rfkill_set_block(void *data, bool blocked)
894 {
895 struct hci_dev *hdev = data;
896
897 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
898
899 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
900 return -EBUSY;
901
902 if (blocked) {
903 hci_dev_set_flag(hdev, HCI_RFKILLED);
904 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
905 !hci_dev_test_flag(hdev, HCI_CONFIG))
906 hci_dev_do_close(hdev);
907 } else {
908 hci_dev_clear_flag(hdev, HCI_RFKILLED);
909 }
910
911 return 0;
912 }
913
914 static const struct rfkill_ops hci_rfkill_ops = {
915 .set_block = hci_rfkill_set_block,
916 };
917
hci_power_on(struct work_struct * work)918 static void hci_power_on(struct work_struct *work)
919 {
920 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
921 int err;
922
923 BT_DBG("%s", hdev->name);
924
925 if (test_bit(HCI_UP, &hdev->flags) &&
926 hci_dev_test_flag(hdev, HCI_MGMT) &&
927 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
928 cancel_delayed_work(&hdev->power_off);
929 err = hci_powered_update_sync(hdev);
930 mgmt_power_on(hdev, err);
931 return;
932 }
933
934 err = hci_dev_do_open(hdev);
935 if (err < 0) {
936 hci_dev_lock(hdev);
937 mgmt_set_powered_failed(hdev, err);
938 hci_dev_unlock(hdev);
939 return;
940 }
941
942 /* During the HCI setup phase, a few error conditions are
943 * ignored and they need to be checked now. If they are still
944 * valid, it is important to turn the device back off.
945 */
946 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
947 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
948 (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
949 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
950 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
951 hci_dev_do_close(hdev);
952 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
953 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
954 HCI_AUTO_OFF_TIMEOUT);
955 }
956
957 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
958 /* For unconfigured devices, set the HCI_RAW flag
959 * so that userspace can easily identify them.
960 */
961 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
962 set_bit(HCI_RAW, &hdev->flags);
963
964 /* For fully configured devices, this will send
965 * the Index Added event. For unconfigured devices,
966 * it will send Unconfigued Index Added event.
967 *
968 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
969 * and no event will be send.
970 */
971 mgmt_index_added(hdev);
972 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
973 /* When the controller is now configured, then it
974 * is important to clear the HCI_RAW flag.
975 */
976 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
977 clear_bit(HCI_RAW, &hdev->flags);
978
979 /* Powering on the controller with HCI_CONFIG set only
980 * happens with the transition from unconfigured to
981 * configured. This will send the Index Added event.
982 */
983 mgmt_index_added(hdev);
984 }
985 }
986
hci_power_off(struct work_struct * work)987 static void hci_power_off(struct work_struct *work)
988 {
989 struct hci_dev *hdev = container_of(work, struct hci_dev,
990 power_off.work);
991
992 BT_DBG("%s", hdev->name);
993
994 hci_dev_do_close(hdev);
995 }
996
hci_error_reset(struct work_struct * work)997 static void hci_error_reset(struct work_struct *work)
998 {
999 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1000
1001 hci_dev_hold(hdev);
1002 BT_DBG("%s", hdev->name);
1003
1004 if (hdev->hw_error)
1005 hdev->hw_error(hdev, hdev->hw_error_code);
1006 else
1007 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1008
1009 if (!hci_dev_do_close(hdev))
1010 hci_dev_do_open(hdev);
1011
1012 hci_dev_put(hdev);
1013 }
1014
hci_uuids_clear(struct hci_dev * hdev)1015 void hci_uuids_clear(struct hci_dev *hdev)
1016 {
1017 struct bt_uuid *uuid, *tmp;
1018
1019 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1020 list_del(&uuid->list);
1021 kfree(uuid);
1022 }
1023 }
1024
hci_link_keys_clear(struct hci_dev * hdev)1025 void hci_link_keys_clear(struct hci_dev *hdev)
1026 {
1027 struct link_key *key, *tmp;
1028
1029 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1030 list_del_rcu(&key->list);
1031 kfree_rcu(key, rcu);
1032 }
1033 }
1034
hci_smp_ltks_clear(struct hci_dev * hdev)1035 void hci_smp_ltks_clear(struct hci_dev *hdev)
1036 {
1037 struct smp_ltk *k, *tmp;
1038
1039 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1040 list_del_rcu(&k->list);
1041 kfree_rcu(k, rcu);
1042 }
1043 }
1044
hci_smp_irks_clear(struct hci_dev * hdev)1045 void hci_smp_irks_clear(struct hci_dev *hdev)
1046 {
1047 struct smp_irk *k, *tmp;
1048
1049 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1050 list_del_rcu(&k->list);
1051 kfree_rcu(k, rcu);
1052 }
1053 }
1054
hci_blocked_keys_clear(struct hci_dev * hdev)1055 void hci_blocked_keys_clear(struct hci_dev *hdev)
1056 {
1057 struct blocked_key *b, *tmp;
1058
1059 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1060 list_del_rcu(&b->list);
1061 kfree_rcu(b, rcu);
1062 }
1063 }
1064
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1065 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1066 {
1067 bool blocked = false;
1068 struct blocked_key *b;
1069
1070 rcu_read_lock();
1071 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1072 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1073 blocked = true;
1074 break;
1075 }
1076 }
1077
1078 rcu_read_unlock();
1079 return blocked;
1080 }
1081
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1082 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1083 {
1084 struct link_key *k;
1085
1086 rcu_read_lock();
1087 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1088 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1089 rcu_read_unlock();
1090
1091 if (hci_is_blocked_key(hdev,
1092 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1093 k->val)) {
1094 bt_dev_warn_ratelimited(hdev,
1095 "Link key blocked for %pMR",
1096 &k->bdaddr);
1097 return NULL;
1098 }
1099
1100 return k;
1101 }
1102 }
1103 rcu_read_unlock();
1104
1105 return NULL;
1106 }
1107
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1108 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1109 u8 key_type, u8 old_key_type)
1110 {
1111 /* Legacy key */
1112 if (key_type < 0x03)
1113 return true;
1114
1115 /* Debug keys are insecure so don't store them persistently */
1116 if (key_type == HCI_LK_DEBUG_COMBINATION)
1117 return false;
1118
1119 /* Changed combination key and there's no previous one */
1120 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1121 return false;
1122
1123 /* Security mode 3 case */
1124 if (!conn)
1125 return true;
1126
1127 /* BR/EDR key derived using SC from an LE link */
1128 if (conn->type == LE_LINK)
1129 return true;
1130
1131 /* Neither local nor remote side had no-bonding as requirement */
1132 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1133 return true;
1134
1135 /* Local side had dedicated bonding as requirement */
1136 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1137 return true;
1138
1139 /* Remote side had dedicated bonding as requirement */
1140 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1141 return true;
1142
1143 /* If none of the above criteria match, then don't store the key
1144 * persistently */
1145 return false;
1146 }
1147
ltk_role(u8 type)1148 static u8 ltk_role(u8 type)
1149 {
1150 if (type == SMP_LTK)
1151 return HCI_ROLE_MASTER;
1152
1153 return HCI_ROLE_SLAVE;
1154 }
1155
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1156 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1157 u8 addr_type, u8 role)
1158 {
1159 struct smp_ltk *k;
1160
1161 rcu_read_lock();
1162 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1163 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1164 continue;
1165
1166 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1167 rcu_read_unlock();
1168
1169 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1170 k->val)) {
1171 bt_dev_warn_ratelimited(hdev,
1172 "LTK blocked for %pMR",
1173 &k->bdaddr);
1174 return NULL;
1175 }
1176
1177 return k;
1178 }
1179 }
1180 rcu_read_unlock();
1181
1182 return NULL;
1183 }
1184
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1185 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1186 {
1187 struct smp_irk *irk_to_return = NULL;
1188 struct smp_irk *irk;
1189
1190 rcu_read_lock();
1191 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1192 if (!bacmp(&irk->rpa, rpa)) {
1193 irk_to_return = irk;
1194 goto done;
1195 }
1196 }
1197
1198 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1199 if (smp_irk_matches(hdev, irk->val, rpa)) {
1200 bacpy(&irk->rpa, rpa);
1201 irk_to_return = irk;
1202 goto done;
1203 }
1204 }
1205
1206 done:
1207 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1208 irk_to_return->val)) {
1209 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1210 &irk_to_return->bdaddr);
1211 irk_to_return = NULL;
1212 }
1213
1214 rcu_read_unlock();
1215
1216 return irk_to_return;
1217 }
1218
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1219 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1220 u8 addr_type)
1221 {
1222 struct smp_irk *irk_to_return = NULL;
1223 struct smp_irk *irk;
1224
1225 /* Identity Address must be public or static random */
1226 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1227 return NULL;
1228
1229 rcu_read_lock();
1230 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1231 if (addr_type == irk->addr_type &&
1232 bacmp(bdaddr, &irk->bdaddr) == 0) {
1233 irk_to_return = irk;
1234 goto done;
1235 }
1236 }
1237
1238 done:
1239
1240 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1241 irk_to_return->val)) {
1242 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1243 &irk_to_return->bdaddr);
1244 irk_to_return = NULL;
1245 }
1246
1247 rcu_read_unlock();
1248
1249 return irk_to_return;
1250 }
1251
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1252 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1253 bdaddr_t *bdaddr, u8 *val, u8 type,
1254 u8 pin_len, bool *persistent)
1255 {
1256 struct link_key *key, *old_key;
1257 u8 old_key_type;
1258
1259 old_key = hci_find_link_key(hdev, bdaddr);
1260 if (old_key) {
1261 old_key_type = old_key->type;
1262 key = old_key;
1263 } else {
1264 old_key_type = conn ? conn->key_type : 0xff;
1265 key = kzalloc(sizeof(*key), GFP_KERNEL);
1266 if (!key)
1267 return NULL;
1268 list_add_rcu(&key->list, &hdev->link_keys);
1269 }
1270
1271 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1272
1273 /* Some buggy controller combinations generate a changed
1274 * combination key for legacy pairing even when there's no
1275 * previous key */
1276 if (type == HCI_LK_CHANGED_COMBINATION &&
1277 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1278 type = HCI_LK_COMBINATION;
1279 if (conn)
1280 conn->key_type = type;
1281 }
1282
1283 bacpy(&key->bdaddr, bdaddr);
1284 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1285 key->pin_len = pin_len;
1286
1287 if (type == HCI_LK_CHANGED_COMBINATION)
1288 key->type = old_key_type;
1289 else
1290 key->type = type;
1291
1292 if (persistent)
1293 *persistent = hci_persistent_key(hdev, conn, type,
1294 old_key_type);
1295
1296 return key;
1297 }
1298
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1299 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1300 u8 addr_type, u8 type, u8 authenticated,
1301 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1302 {
1303 struct smp_ltk *key, *old_key;
1304 u8 role = ltk_role(type);
1305
1306 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1307 if (old_key)
1308 key = old_key;
1309 else {
1310 key = kzalloc(sizeof(*key), GFP_KERNEL);
1311 if (!key)
1312 return NULL;
1313 list_add_rcu(&key->list, &hdev->long_term_keys);
1314 }
1315
1316 bacpy(&key->bdaddr, bdaddr);
1317 key->bdaddr_type = addr_type;
1318 memcpy(key->val, tk, sizeof(key->val));
1319 key->authenticated = authenticated;
1320 key->ediv = ediv;
1321 key->rand = rand;
1322 key->enc_size = enc_size;
1323 key->type = type;
1324
1325 return key;
1326 }
1327
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1328 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1329 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1330 {
1331 struct smp_irk *irk;
1332
1333 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1334 if (!irk) {
1335 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1336 if (!irk)
1337 return NULL;
1338
1339 bacpy(&irk->bdaddr, bdaddr);
1340 irk->addr_type = addr_type;
1341
1342 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1343 }
1344
1345 memcpy(irk->val, val, 16);
1346 bacpy(&irk->rpa, rpa);
1347
1348 return irk;
1349 }
1350
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1351 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1352 {
1353 struct link_key *key;
1354
1355 key = hci_find_link_key(hdev, bdaddr);
1356 if (!key)
1357 return -ENOENT;
1358
1359 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1360
1361 list_del_rcu(&key->list);
1362 kfree_rcu(key, rcu);
1363
1364 return 0;
1365 }
1366
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1367 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1368 {
1369 struct smp_ltk *k, *tmp;
1370 int removed = 0;
1371
1372 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1373 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1374 continue;
1375
1376 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1377
1378 list_del_rcu(&k->list);
1379 kfree_rcu(k, rcu);
1380 removed++;
1381 }
1382
1383 return removed ? 0 : -ENOENT;
1384 }
1385
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1386 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1387 {
1388 struct smp_irk *k, *tmp;
1389
1390 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1391 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1392 continue;
1393
1394 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1395
1396 list_del_rcu(&k->list);
1397 kfree_rcu(k, rcu);
1398 }
1399 }
1400
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1401 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1402 {
1403 struct smp_ltk *k;
1404 struct smp_irk *irk;
1405 u8 addr_type;
1406
1407 if (type == BDADDR_BREDR) {
1408 if (hci_find_link_key(hdev, bdaddr))
1409 return true;
1410 return false;
1411 }
1412
1413 /* Convert to HCI addr type which struct smp_ltk uses */
1414 if (type == BDADDR_LE_PUBLIC)
1415 addr_type = ADDR_LE_DEV_PUBLIC;
1416 else
1417 addr_type = ADDR_LE_DEV_RANDOM;
1418
1419 irk = hci_get_irk(hdev, bdaddr, addr_type);
1420 if (irk) {
1421 bdaddr = &irk->bdaddr;
1422 addr_type = irk->addr_type;
1423 }
1424
1425 rcu_read_lock();
1426 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1427 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1428 rcu_read_unlock();
1429 return true;
1430 }
1431 }
1432 rcu_read_unlock();
1433
1434 return false;
1435 }
1436
1437 /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1438 static void hci_cmd_timeout(struct work_struct *work)
1439 {
1440 struct hci_dev *hdev = container_of(work, struct hci_dev,
1441 cmd_timer.work);
1442
1443 if (hdev->req_skb) {
1444 u16 opcode = hci_skb_opcode(hdev->req_skb);
1445
1446 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1447
1448 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1449 } else {
1450 bt_dev_err(hdev, "command tx timeout");
1451 }
1452
1453 if (hdev->cmd_timeout)
1454 hdev->cmd_timeout(hdev);
1455
1456 atomic_set(&hdev->cmd_cnt, 1);
1457 queue_work(hdev->workqueue, &hdev->cmd_work);
1458 }
1459
1460 /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1461 static void hci_ncmd_timeout(struct work_struct *work)
1462 {
1463 struct hci_dev *hdev = container_of(work, struct hci_dev,
1464 ncmd_timer.work);
1465
1466 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1467
1468 /* During HCI_INIT phase no events can be injected if the ncmd timer
1469 * triggers since the procedure has its own timeout handling.
1470 */
1471 if (test_bit(HCI_INIT, &hdev->flags))
1472 return;
1473
1474 /* This is an irrecoverable state, inject hardware error event */
1475 hci_reset_dev(hdev);
1476 }
1477
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1478 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1479 bdaddr_t *bdaddr, u8 bdaddr_type)
1480 {
1481 struct oob_data *data;
1482
1483 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1484 if (bacmp(bdaddr, &data->bdaddr) != 0)
1485 continue;
1486 if (data->bdaddr_type != bdaddr_type)
1487 continue;
1488 return data;
1489 }
1490
1491 return NULL;
1492 }
1493
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1494 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1495 u8 bdaddr_type)
1496 {
1497 struct oob_data *data;
1498
1499 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1500 if (!data)
1501 return -ENOENT;
1502
1503 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1504
1505 list_del(&data->list);
1506 kfree(data);
1507
1508 return 0;
1509 }
1510
hci_remote_oob_data_clear(struct hci_dev * hdev)1511 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1512 {
1513 struct oob_data *data, *n;
1514
1515 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1516 list_del(&data->list);
1517 kfree(data);
1518 }
1519 }
1520
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1521 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1522 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1523 u8 *hash256, u8 *rand256)
1524 {
1525 struct oob_data *data;
1526
1527 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1528 if (!data) {
1529 data = kmalloc(sizeof(*data), GFP_KERNEL);
1530 if (!data)
1531 return -ENOMEM;
1532
1533 bacpy(&data->bdaddr, bdaddr);
1534 data->bdaddr_type = bdaddr_type;
1535 list_add(&data->list, &hdev->remote_oob_data);
1536 }
1537
1538 if (hash192 && rand192) {
1539 memcpy(data->hash192, hash192, sizeof(data->hash192));
1540 memcpy(data->rand192, rand192, sizeof(data->rand192));
1541 if (hash256 && rand256)
1542 data->present = 0x03;
1543 } else {
1544 memset(data->hash192, 0, sizeof(data->hash192));
1545 memset(data->rand192, 0, sizeof(data->rand192));
1546 if (hash256 && rand256)
1547 data->present = 0x02;
1548 else
1549 data->present = 0x00;
1550 }
1551
1552 if (hash256 && rand256) {
1553 memcpy(data->hash256, hash256, sizeof(data->hash256));
1554 memcpy(data->rand256, rand256, sizeof(data->rand256));
1555 } else {
1556 memset(data->hash256, 0, sizeof(data->hash256));
1557 memset(data->rand256, 0, sizeof(data->rand256));
1558 if (hash192 && rand192)
1559 data->present = 0x01;
1560 }
1561
1562 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1563
1564 return 0;
1565 }
1566
1567 /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1568 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1569 {
1570 struct adv_info *adv_instance;
1571
1572 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1573 if (adv_instance->instance == instance)
1574 return adv_instance;
1575 }
1576
1577 return NULL;
1578 }
1579
1580 /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1581 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1582 {
1583 struct adv_info *cur_instance;
1584
1585 cur_instance = hci_find_adv_instance(hdev, instance);
1586 if (!cur_instance)
1587 return NULL;
1588
1589 if (cur_instance == list_last_entry(&hdev->adv_instances,
1590 struct adv_info, list))
1591 return list_first_entry(&hdev->adv_instances,
1592 struct adv_info, list);
1593 else
1594 return list_next_entry(cur_instance, list);
1595 }
1596
1597 /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1598 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1599 {
1600 struct adv_info *adv_instance;
1601
1602 adv_instance = hci_find_adv_instance(hdev, instance);
1603 if (!adv_instance)
1604 return -ENOENT;
1605
1606 BT_DBG("%s removing %dMR", hdev->name, instance);
1607
1608 if (hdev->cur_adv_instance == instance) {
1609 if (hdev->adv_instance_timeout) {
1610 cancel_delayed_work(&hdev->adv_instance_expire);
1611 hdev->adv_instance_timeout = 0;
1612 }
1613 hdev->cur_adv_instance = 0x00;
1614 }
1615
1616 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1617
1618 list_del(&adv_instance->list);
1619 kfree(adv_instance);
1620
1621 hdev->adv_instance_cnt--;
1622
1623 return 0;
1624 }
1625
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1626 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1627 {
1628 struct adv_info *adv_instance, *n;
1629
1630 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1631 adv_instance->rpa_expired = rpa_expired;
1632 }
1633
1634 /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1635 void hci_adv_instances_clear(struct hci_dev *hdev)
1636 {
1637 struct adv_info *adv_instance, *n;
1638
1639 if (hdev->adv_instance_timeout) {
1640 cancel_delayed_work(&hdev->adv_instance_expire);
1641 hdev->adv_instance_timeout = 0;
1642 }
1643
1644 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1645 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1646 list_del(&adv_instance->list);
1647 kfree(adv_instance);
1648 }
1649
1650 hdev->adv_instance_cnt = 0;
1651 hdev->cur_adv_instance = 0x00;
1652 }
1653
adv_instance_rpa_expired(struct work_struct * work)1654 static void adv_instance_rpa_expired(struct work_struct *work)
1655 {
1656 struct adv_info *adv_instance = container_of(work, struct adv_info,
1657 rpa_expired_cb.work);
1658
1659 BT_DBG("");
1660
1661 adv_instance->rpa_expired = true;
1662 }
1663
1664 /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1665 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1666 u32 flags, u16 adv_data_len, u8 *adv_data,
1667 u16 scan_rsp_len, u8 *scan_rsp_data,
1668 u16 timeout, u16 duration, s8 tx_power,
1669 u32 min_interval, u32 max_interval,
1670 u8 mesh_handle)
1671 {
1672 struct adv_info *adv;
1673
1674 adv = hci_find_adv_instance(hdev, instance);
1675 if (adv) {
1676 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1677 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1678 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1679 } else {
1680 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1681 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1682 return ERR_PTR(-EOVERFLOW);
1683
1684 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1685 if (!adv)
1686 return ERR_PTR(-ENOMEM);
1687
1688 adv->pending = true;
1689 adv->instance = instance;
1690 list_add(&adv->list, &hdev->adv_instances);
1691 hdev->adv_instance_cnt++;
1692 }
1693
1694 adv->flags = flags;
1695 adv->min_interval = min_interval;
1696 adv->max_interval = max_interval;
1697 adv->tx_power = tx_power;
1698 /* Defining a mesh_handle changes the timing units to ms,
1699 * rather than seconds, and ties the instance to the requested
1700 * mesh_tx queue.
1701 */
1702 adv->mesh = mesh_handle;
1703
1704 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1705 scan_rsp_len, scan_rsp_data);
1706
1707 adv->timeout = timeout;
1708 adv->remaining_time = timeout;
1709
1710 if (duration == 0)
1711 adv->duration = hdev->def_multi_adv_rotation_duration;
1712 else
1713 adv->duration = duration;
1714
1715 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1716
1717 BT_DBG("%s for %dMR", hdev->name, instance);
1718
1719 return adv;
1720 }
1721
1722 /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1723 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1724 u32 flags, u8 data_len, u8 *data,
1725 u32 min_interval, u32 max_interval)
1726 {
1727 struct adv_info *adv;
1728
1729 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1730 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1731 min_interval, max_interval, 0);
1732 if (IS_ERR(adv))
1733 return adv;
1734
1735 adv->periodic = true;
1736 adv->per_adv_data_len = data_len;
1737
1738 if (data)
1739 memcpy(adv->per_adv_data, data, data_len);
1740
1741 return adv;
1742 }
1743
1744 /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1745 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1746 u16 adv_data_len, u8 *adv_data,
1747 u16 scan_rsp_len, u8 *scan_rsp_data)
1748 {
1749 struct adv_info *adv;
1750
1751 adv = hci_find_adv_instance(hdev, instance);
1752
1753 /* If advertisement doesn't exist, we can't modify its data */
1754 if (!adv)
1755 return -ENOENT;
1756
1757 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1758 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1759 memcpy(adv->adv_data, adv_data, adv_data_len);
1760 adv->adv_data_len = adv_data_len;
1761 adv->adv_data_changed = true;
1762 }
1763
1764 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1765 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1766 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1767 adv->scan_rsp_len = scan_rsp_len;
1768 adv->scan_rsp_changed = true;
1769 }
1770
1771 /* Mark as changed if there are flags which would affect it */
1772 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1773 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1774 adv->scan_rsp_changed = true;
1775
1776 return 0;
1777 }
1778
1779 /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1780 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1781 {
1782 u32 flags;
1783 struct adv_info *adv;
1784
1785 if (instance == 0x00) {
1786 /* Instance 0 always manages the "Tx Power" and "Flags"
1787 * fields
1788 */
1789 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1790
1791 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1792 * corresponds to the "connectable" instance flag.
1793 */
1794 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1795 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1796
1797 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1798 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1799 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1800 flags |= MGMT_ADV_FLAG_DISCOV;
1801
1802 return flags;
1803 }
1804
1805 adv = hci_find_adv_instance(hdev, instance);
1806
1807 /* Return 0 when we got an invalid instance identifier. */
1808 if (!adv)
1809 return 0;
1810
1811 return adv->flags;
1812 }
1813
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1814 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1815 {
1816 struct adv_info *adv;
1817
1818 /* Instance 0x00 always set local name */
1819 if (instance == 0x00)
1820 return true;
1821
1822 adv = hci_find_adv_instance(hdev, instance);
1823 if (!adv)
1824 return false;
1825
1826 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1827 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1828 return true;
1829
1830 return adv->scan_rsp_len ? true : false;
1831 }
1832
1833 /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1834 void hci_adv_monitors_clear(struct hci_dev *hdev)
1835 {
1836 struct adv_monitor *monitor;
1837 int handle;
1838
1839 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1840 hci_free_adv_monitor(hdev, monitor);
1841
1842 idr_destroy(&hdev->adv_monitors_idr);
1843 }
1844
1845 /* Frees the monitor structure and do some bookkeepings.
1846 * This function requires the caller holds hdev->lock.
1847 */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1848 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1849 {
1850 struct adv_pattern *pattern;
1851 struct adv_pattern *tmp;
1852
1853 if (!monitor)
1854 return;
1855
1856 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1857 list_del(&pattern->list);
1858 kfree(pattern);
1859 }
1860
1861 if (monitor->handle)
1862 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1863
1864 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1865 hdev->adv_monitors_cnt--;
1866 mgmt_adv_monitor_removed(hdev, monitor->handle);
1867 }
1868
1869 kfree(monitor);
1870 }
1871
1872 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1873 * also attempts to forward the request to the controller.
1874 * This function requires the caller holds hci_req_sync_lock.
1875 */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1876 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1877 {
1878 int min, max, handle;
1879 int status = 0;
1880
1881 if (!monitor)
1882 return -EINVAL;
1883
1884 hci_dev_lock(hdev);
1885
1886 min = HCI_MIN_ADV_MONITOR_HANDLE;
1887 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1888 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1889 GFP_KERNEL);
1890
1891 hci_dev_unlock(hdev);
1892
1893 if (handle < 0)
1894 return handle;
1895
1896 monitor->handle = handle;
1897
1898 if (!hdev_is_powered(hdev))
1899 return status;
1900
1901 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1902 case HCI_ADV_MONITOR_EXT_NONE:
1903 bt_dev_dbg(hdev, "add monitor %d status %d",
1904 monitor->handle, status);
1905 /* Message was not forwarded to controller - not an error */
1906 break;
1907
1908 case HCI_ADV_MONITOR_EXT_MSFT:
1909 status = msft_add_monitor_pattern(hdev, monitor);
1910 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1911 handle, status);
1912 break;
1913 }
1914
1915 return status;
1916 }
1917
1918 /* Attempts to tell the controller and free the monitor. If somehow the
1919 * controller doesn't have a corresponding handle, remove anyway.
1920 * This function requires the caller holds hci_req_sync_lock.
1921 */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1922 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1923 struct adv_monitor *monitor)
1924 {
1925 int status = 0;
1926 int handle;
1927
1928 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1929 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1930 bt_dev_dbg(hdev, "remove monitor %d status %d",
1931 monitor->handle, status);
1932 goto free_monitor;
1933
1934 case HCI_ADV_MONITOR_EXT_MSFT:
1935 handle = monitor->handle;
1936 status = msft_remove_monitor(hdev, monitor);
1937 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1938 handle, status);
1939 break;
1940 }
1941
1942 /* In case no matching handle registered, just free the monitor */
1943 if (status == -ENOENT)
1944 goto free_monitor;
1945
1946 return status;
1947
1948 free_monitor:
1949 if (status == -ENOENT)
1950 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1951 monitor->handle);
1952 hci_free_adv_monitor(hdev, monitor);
1953
1954 return status;
1955 }
1956
1957 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1958 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1959 {
1960 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1961
1962 if (!monitor)
1963 return -EINVAL;
1964
1965 return hci_remove_adv_monitor(hdev, monitor);
1966 }
1967
1968 /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1969 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1970 {
1971 struct adv_monitor *monitor;
1972 int idr_next_id = 0;
1973 int status = 0;
1974
1975 while (1) {
1976 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1977 if (!monitor)
1978 break;
1979
1980 status = hci_remove_adv_monitor(hdev, monitor);
1981 if (status)
1982 return status;
1983
1984 idr_next_id++;
1985 }
1986
1987 return status;
1988 }
1989
1990 /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)1991 bool hci_is_adv_monitoring(struct hci_dev *hdev)
1992 {
1993 return !idr_is_empty(&hdev->adv_monitors_idr);
1994 }
1995
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)1996 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
1997 {
1998 if (msft_monitor_supported(hdev))
1999 return HCI_ADV_MONITOR_EXT_MSFT;
2000
2001 return HCI_ADV_MONITOR_EXT_NONE;
2002 }
2003
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2004 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2005 bdaddr_t *bdaddr, u8 type)
2006 {
2007 struct bdaddr_list *b;
2008
2009 list_for_each_entry(b, bdaddr_list, list) {
2010 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2011 return b;
2012 }
2013
2014 return NULL;
2015 }
2016
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2017 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2018 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2019 u8 type)
2020 {
2021 struct bdaddr_list_with_irk *b;
2022
2023 list_for_each_entry(b, bdaddr_list, list) {
2024 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2025 return b;
2026 }
2027
2028 return NULL;
2029 }
2030
2031 struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2032 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2033 bdaddr_t *bdaddr, u8 type)
2034 {
2035 struct bdaddr_list_with_flags *b;
2036
2037 list_for_each_entry(b, bdaddr_list, list) {
2038 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2039 return b;
2040 }
2041
2042 return NULL;
2043 }
2044
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2045 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2046 {
2047 struct bdaddr_list *b, *n;
2048
2049 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2050 list_del(&b->list);
2051 kfree(b);
2052 }
2053 }
2054
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2055 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2056 {
2057 struct bdaddr_list *entry;
2058
2059 if (!bacmp(bdaddr, BDADDR_ANY))
2060 return -EBADF;
2061
2062 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2063 return -EEXIST;
2064
2065 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2066 if (!entry)
2067 return -ENOMEM;
2068
2069 bacpy(&entry->bdaddr, bdaddr);
2070 entry->bdaddr_type = type;
2071
2072 list_add(&entry->list, list);
2073
2074 return 0;
2075 }
2076
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2077 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2078 u8 type, u8 *peer_irk, u8 *local_irk)
2079 {
2080 struct bdaddr_list_with_irk *entry;
2081
2082 if (!bacmp(bdaddr, BDADDR_ANY))
2083 return -EBADF;
2084
2085 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2086 return -EEXIST;
2087
2088 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2089 if (!entry)
2090 return -ENOMEM;
2091
2092 bacpy(&entry->bdaddr, bdaddr);
2093 entry->bdaddr_type = type;
2094
2095 if (peer_irk)
2096 memcpy(entry->peer_irk, peer_irk, 16);
2097
2098 if (local_irk)
2099 memcpy(entry->local_irk, local_irk, 16);
2100
2101 list_add(&entry->list, list);
2102
2103 return 0;
2104 }
2105
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2106 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2107 u8 type, u32 flags)
2108 {
2109 struct bdaddr_list_with_flags *entry;
2110
2111 if (!bacmp(bdaddr, BDADDR_ANY))
2112 return -EBADF;
2113
2114 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2115 return -EEXIST;
2116
2117 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2118 if (!entry)
2119 return -ENOMEM;
2120
2121 bacpy(&entry->bdaddr, bdaddr);
2122 entry->bdaddr_type = type;
2123 entry->flags = flags;
2124
2125 list_add(&entry->list, list);
2126
2127 return 0;
2128 }
2129
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2130 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2131 {
2132 struct bdaddr_list *entry;
2133
2134 if (!bacmp(bdaddr, BDADDR_ANY)) {
2135 hci_bdaddr_list_clear(list);
2136 return 0;
2137 }
2138
2139 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2140 if (!entry)
2141 return -ENOENT;
2142
2143 list_del(&entry->list);
2144 kfree(entry);
2145
2146 return 0;
2147 }
2148
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2149 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2150 u8 type)
2151 {
2152 struct bdaddr_list_with_irk *entry;
2153
2154 if (!bacmp(bdaddr, BDADDR_ANY)) {
2155 hci_bdaddr_list_clear(list);
2156 return 0;
2157 }
2158
2159 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2160 if (!entry)
2161 return -ENOENT;
2162
2163 list_del(&entry->list);
2164 kfree(entry);
2165
2166 return 0;
2167 }
2168
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2169 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2170 u8 type)
2171 {
2172 struct bdaddr_list_with_flags *entry;
2173
2174 if (!bacmp(bdaddr, BDADDR_ANY)) {
2175 hci_bdaddr_list_clear(list);
2176 return 0;
2177 }
2178
2179 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2180 if (!entry)
2181 return -ENOENT;
2182
2183 list_del(&entry->list);
2184 kfree(entry);
2185
2186 return 0;
2187 }
2188
2189 /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2190 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2191 bdaddr_t *addr, u8 addr_type)
2192 {
2193 struct hci_conn_params *params;
2194
2195 list_for_each_entry(params, &hdev->le_conn_params, list) {
2196 if (bacmp(¶ms->addr, addr) == 0 &&
2197 params->addr_type == addr_type) {
2198 return params;
2199 }
2200 }
2201
2202 return NULL;
2203 }
2204
2205 /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2206 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2207 bdaddr_t *addr, u8 addr_type)
2208 {
2209 struct hci_conn_params *param;
2210
2211 rcu_read_lock();
2212
2213 list_for_each_entry_rcu(param, list, action) {
2214 if (bacmp(¶m->addr, addr) == 0 &&
2215 param->addr_type == addr_type) {
2216 rcu_read_unlock();
2217 return param;
2218 }
2219 }
2220
2221 rcu_read_unlock();
2222
2223 return NULL;
2224 }
2225
2226 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2227 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2228 {
2229 if (list_empty(¶m->action))
2230 return;
2231
2232 list_del_rcu(¶m->action);
2233 synchronize_rcu();
2234 INIT_LIST_HEAD(¶m->action);
2235 }
2236
2237 /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2238 void hci_pend_le_list_add(struct hci_conn_params *param,
2239 struct list_head *list)
2240 {
2241 list_add_rcu(¶m->action, list);
2242 }
2243
2244 /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2245 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2246 bdaddr_t *addr, u8 addr_type)
2247 {
2248 struct hci_conn_params *params;
2249
2250 params = hci_conn_params_lookup(hdev, addr, addr_type);
2251 if (params)
2252 return params;
2253
2254 params = kzalloc(sizeof(*params), GFP_KERNEL);
2255 if (!params) {
2256 bt_dev_err(hdev, "out of memory");
2257 return NULL;
2258 }
2259
2260 bacpy(¶ms->addr, addr);
2261 params->addr_type = addr_type;
2262
2263 list_add(¶ms->list, &hdev->le_conn_params);
2264 INIT_LIST_HEAD(¶ms->action);
2265
2266 params->conn_min_interval = hdev->le_conn_min_interval;
2267 params->conn_max_interval = hdev->le_conn_max_interval;
2268 params->conn_latency = hdev->le_conn_latency;
2269 params->supervision_timeout = hdev->le_supv_timeout;
2270 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2271
2272 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2273
2274 return params;
2275 }
2276
hci_conn_params_free(struct hci_conn_params * params)2277 void hci_conn_params_free(struct hci_conn_params *params)
2278 {
2279 hci_pend_le_list_del_init(params);
2280
2281 if (params->conn) {
2282 hci_conn_drop(params->conn);
2283 hci_conn_put(params->conn);
2284 }
2285
2286 list_del(¶ms->list);
2287 kfree(params);
2288 }
2289
2290 /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2291 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2292 {
2293 struct hci_conn_params *params;
2294
2295 params = hci_conn_params_lookup(hdev, addr, addr_type);
2296 if (!params)
2297 return;
2298
2299 hci_conn_params_free(params);
2300
2301 hci_update_passive_scan(hdev);
2302
2303 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2304 }
2305
2306 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2307 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2308 {
2309 struct hci_conn_params *params, *tmp;
2310
2311 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2312 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2313 continue;
2314
2315 /* If trying to establish one time connection to disabled
2316 * device, leave the params, but mark them as just once.
2317 */
2318 if (params->explicit_connect) {
2319 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2320 continue;
2321 }
2322
2323 hci_conn_params_free(params);
2324 }
2325
2326 BT_DBG("All LE disabled connection parameters were removed");
2327 }
2328
2329 /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2330 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2331 {
2332 struct hci_conn_params *params, *tmp;
2333
2334 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2335 hci_conn_params_free(params);
2336
2337 BT_DBG("All LE connection parameters were removed");
2338 }
2339
2340 /* Copy the Identity Address of the controller.
2341 *
2342 * If the controller has a public BD_ADDR, then by default use that one.
2343 * If this is a LE only controller without a public address, default to
2344 * the static random address.
2345 *
2346 * For debugging purposes it is possible to force controllers with a
2347 * public address to use the static random address instead.
2348 *
2349 * In case BR/EDR has been disabled on a dual-mode controller and
2350 * userspace has configured a static address, then that address
2351 * becomes the identity address instead of the public BR/EDR address.
2352 */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2353 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2354 u8 *bdaddr_type)
2355 {
2356 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2357 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2358 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2359 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2360 bacpy(bdaddr, &hdev->static_addr);
2361 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2362 } else {
2363 bacpy(bdaddr, &hdev->bdaddr);
2364 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2365 }
2366 }
2367
hci_clear_wake_reason(struct hci_dev * hdev)2368 static void hci_clear_wake_reason(struct hci_dev *hdev)
2369 {
2370 hci_dev_lock(hdev);
2371
2372 hdev->wake_reason = 0;
2373 bacpy(&hdev->wake_addr, BDADDR_ANY);
2374 hdev->wake_addr_type = 0;
2375
2376 hci_dev_unlock(hdev);
2377 }
2378
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2379 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2380 void *data)
2381 {
2382 struct hci_dev *hdev =
2383 container_of(nb, struct hci_dev, suspend_notifier);
2384 int ret = 0;
2385
2386 /* Userspace has full control of this device. Do nothing. */
2387 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2388 return NOTIFY_DONE;
2389
2390 /* To avoid a potential race with hci_unregister_dev. */
2391 hci_dev_hold(hdev);
2392
2393 switch (action) {
2394 case PM_HIBERNATION_PREPARE:
2395 case PM_SUSPEND_PREPARE:
2396 ret = hci_suspend_dev(hdev);
2397 break;
2398 case PM_POST_HIBERNATION:
2399 case PM_POST_SUSPEND:
2400 ret = hci_resume_dev(hdev);
2401 break;
2402 }
2403
2404 if (ret)
2405 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2406 action, ret);
2407
2408 hci_dev_put(hdev);
2409 return NOTIFY_DONE;
2410 }
2411
2412 /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2413 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2414 {
2415 struct hci_dev *hdev;
2416 unsigned int alloc_size;
2417
2418 alloc_size = sizeof(*hdev);
2419 if (sizeof_priv) {
2420 /* Fixme: May need ALIGN-ment? */
2421 alloc_size += sizeof_priv;
2422 }
2423
2424 hdev = kzalloc(alloc_size, GFP_KERNEL);
2425 if (!hdev)
2426 return NULL;
2427
2428 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2429 hdev->esco_type = (ESCO_HV1);
2430 hdev->link_mode = (HCI_LM_ACCEPT);
2431 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2432 hdev->io_capability = 0x03; /* No Input No Output */
2433 hdev->manufacturer = 0xffff; /* Default to internal use */
2434 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2435 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2436 hdev->adv_instance_cnt = 0;
2437 hdev->cur_adv_instance = 0x00;
2438 hdev->adv_instance_timeout = 0;
2439
2440 hdev->advmon_allowlist_duration = 300;
2441 hdev->advmon_no_filter_duration = 500;
2442 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2443
2444 hdev->sniff_max_interval = 800;
2445 hdev->sniff_min_interval = 80;
2446
2447 hdev->le_adv_channel_map = 0x07;
2448 hdev->le_adv_min_interval = 0x0800;
2449 hdev->le_adv_max_interval = 0x0800;
2450 hdev->le_scan_interval = 0x0060;
2451 hdev->le_scan_window = 0x0030;
2452 hdev->le_scan_int_suspend = 0x0400;
2453 hdev->le_scan_window_suspend = 0x0012;
2454 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2455 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2456 hdev->le_scan_int_adv_monitor = 0x0060;
2457 hdev->le_scan_window_adv_monitor = 0x0030;
2458 hdev->le_scan_int_connect = 0x0060;
2459 hdev->le_scan_window_connect = 0x0060;
2460 hdev->le_conn_min_interval = 0x0018;
2461 hdev->le_conn_max_interval = 0x0028;
2462 hdev->le_conn_latency = 0x0000;
2463 hdev->le_supv_timeout = 0x002a;
2464 hdev->le_def_tx_len = 0x001b;
2465 hdev->le_def_tx_time = 0x0148;
2466 hdev->le_max_tx_len = 0x001b;
2467 hdev->le_max_tx_time = 0x0148;
2468 hdev->le_max_rx_len = 0x001b;
2469 hdev->le_max_rx_time = 0x0148;
2470 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2471 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2472 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2473 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2474 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2475 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2476 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2477 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2478 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2479
2480 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2481 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2482 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2483 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2484 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2485 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2486
2487 /* default 1.28 sec page scan */
2488 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2489 hdev->def_page_scan_int = 0x0800;
2490 hdev->def_page_scan_window = 0x0012;
2491
2492 mutex_init(&hdev->lock);
2493 mutex_init(&hdev->req_lock);
2494
2495 ida_init(&hdev->unset_handle_ida);
2496
2497 INIT_LIST_HEAD(&hdev->mesh_pending);
2498 INIT_LIST_HEAD(&hdev->mgmt_pending);
2499 INIT_LIST_HEAD(&hdev->reject_list);
2500 INIT_LIST_HEAD(&hdev->accept_list);
2501 INIT_LIST_HEAD(&hdev->uuids);
2502 INIT_LIST_HEAD(&hdev->link_keys);
2503 INIT_LIST_HEAD(&hdev->long_term_keys);
2504 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2505 INIT_LIST_HEAD(&hdev->remote_oob_data);
2506 INIT_LIST_HEAD(&hdev->le_accept_list);
2507 INIT_LIST_HEAD(&hdev->le_resolv_list);
2508 INIT_LIST_HEAD(&hdev->le_conn_params);
2509 INIT_LIST_HEAD(&hdev->pend_le_conns);
2510 INIT_LIST_HEAD(&hdev->pend_le_reports);
2511 INIT_LIST_HEAD(&hdev->conn_hash.list);
2512 INIT_LIST_HEAD(&hdev->adv_instances);
2513 INIT_LIST_HEAD(&hdev->blocked_keys);
2514 INIT_LIST_HEAD(&hdev->monitored_devices);
2515
2516 INIT_LIST_HEAD(&hdev->local_codecs);
2517 INIT_WORK(&hdev->rx_work, hci_rx_work);
2518 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2519 INIT_WORK(&hdev->tx_work, hci_tx_work);
2520 INIT_WORK(&hdev->power_on, hci_power_on);
2521 INIT_WORK(&hdev->error_reset, hci_error_reset);
2522
2523 hci_cmd_sync_init(hdev);
2524
2525 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2526
2527 skb_queue_head_init(&hdev->rx_q);
2528 skb_queue_head_init(&hdev->cmd_q);
2529 skb_queue_head_init(&hdev->raw_q);
2530
2531 init_waitqueue_head(&hdev->req_wait_q);
2532
2533 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2534 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2535
2536 hci_devcd_setup(hdev);
2537 hci_request_setup(hdev);
2538
2539 hci_init_sysfs(hdev);
2540 discovery_init(hdev);
2541
2542 return hdev;
2543 }
2544 EXPORT_SYMBOL(hci_alloc_dev_priv);
2545
2546 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2547 void hci_free_dev(struct hci_dev *hdev)
2548 {
2549 /* will free via device release */
2550 put_device(&hdev->dev);
2551 }
2552 EXPORT_SYMBOL(hci_free_dev);
2553
2554 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2555 int hci_register_dev(struct hci_dev *hdev)
2556 {
2557 int id, error;
2558
2559 if (!hdev->open || !hdev->close || !hdev->send)
2560 return -EINVAL;
2561
2562 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2563 if (id < 0)
2564 return id;
2565
2566 error = dev_set_name(&hdev->dev, "hci%u", id);
2567 if (error)
2568 return error;
2569
2570 hdev->name = dev_name(&hdev->dev);
2571 hdev->id = id;
2572
2573 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2574
2575 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2576 if (!hdev->workqueue) {
2577 error = -ENOMEM;
2578 goto err;
2579 }
2580
2581 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2582 hdev->name);
2583 if (!hdev->req_workqueue) {
2584 destroy_workqueue(hdev->workqueue);
2585 error = -ENOMEM;
2586 goto err;
2587 }
2588
2589 if (!IS_ERR_OR_NULL(bt_debugfs))
2590 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2591
2592 error = device_add(&hdev->dev);
2593 if (error < 0)
2594 goto err_wqueue;
2595
2596 hci_leds_init(hdev);
2597
2598 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2599 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2600 hdev);
2601 if (hdev->rfkill) {
2602 if (rfkill_register(hdev->rfkill) < 0) {
2603 rfkill_destroy(hdev->rfkill);
2604 hdev->rfkill = NULL;
2605 }
2606 }
2607
2608 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2609 hci_dev_set_flag(hdev, HCI_RFKILLED);
2610
2611 hci_dev_set_flag(hdev, HCI_SETUP);
2612 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2613
2614 /* Assume BR/EDR support until proven otherwise (such as
2615 * through reading supported features during init.
2616 */
2617 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2618
2619 write_lock(&hci_dev_list_lock);
2620 list_add(&hdev->list, &hci_dev_list);
2621 write_unlock(&hci_dev_list_lock);
2622
2623 /* Devices that are marked for raw-only usage are unconfigured
2624 * and should not be included in normal operation.
2625 */
2626 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2627 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2628
2629 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2630 * callback.
2631 */
2632 if (hdev->wakeup)
2633 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2634
2635 hci_sock_dev_event(hdev, HCI_DEV_REG);
2636 hci_dev_hold(hdev);
2637
2638 error = hci_register_suspend_notifier(hdev);
2639 if (error)
2640 BT_WARN("register suspend notifier failed error:%d\n", error);
2641
2642 queue_work(hdev->req_workqueue, &hdev->power_on);
2643
2644 idr_init(&hdev->adv_monitors_idr);
2645 msft_register(hdev);
2646
2647 return id;
2648
2649 err_wqueue:
2650 debugfs_remove_recursive(hdev->debugfs);
2651 destroy_workqueue(hdev->workqueue);
2652 destroy_workqueue(hdev->req_workqueue);
2653 err:
2654 ida_free(&hci_index_ida, hdev->id);
2655
2656 return error;
2657 }
2658 EXPORT_SYMBOL(hci_register_dev);
2659
2660 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2661 void hci_unregister_dev(struct hci_dev *hdev)
2662 {
2663 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2664
2665 mutex_lock(&hdev->unregister_lock);
2666 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2667 mutex_unlock(&hdev->unregister_lock);
2668
2669 write_lock(&hci_dev_list_lock);
2670 list_del(&hdev->list);
2671 write_unlock(&hci_dev_list_lock);
2672
2673 cancel_work_sync(&hdev->rx_work);
2674 cancel_work_sync(&hdev->cmd_work);
2675 cancel_work_sync(&hdev->tx_work);
2676 cancel_work_sync(&hdev->power_on);
2677 cancel_work_sync(&hdev->error_reset);
2678
2679 hci_cmd_sync_clear(hdev);
2680
2681 hci_unregister_suspend_notifier(hdev);
2682
2683 hci_dev_do_close(hdev);
2684
2685 if (!test_bit(HCI_INIT, &hdev->flags) &&
2686 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2687 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2688 hci_dev_lock(hdev);
2689 mgmt_index_removed(hdev);
2690 hci_dev_unlock(hdev);
2691 }
2692
2693 /* mgmt_index_removed should take care of emptying the
2694 * pending list */
2695 BUG_ON(!list_empty(&hdev->mgmt_pending));
2696
2697 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2698
2699 if (hdev->rfkill) {
2700 rfkill_unregister(hdev->rfkill);
2701 rfkill_destroy(hdev->rfkill);
2702 }
2703
2704 device_del(&hdev->dev);
2705 /* Actual cleanup is deferred until hci_release_dev(). */
2706 hci_dev_put(hdev);
2707 }
2708 EXPORT_SYMBOL(hci_unregister_dev);
2709
2710 /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2711 void hci_release_dev(struct hci_dev *hdev)
2712 {
2713 debugfs_remove_recursive(hdev->debugfs);
2714 kfree_const(hdev->hw_info);
2715 kfree_const(hdev->fw_info);
2716
2717 destroy_workqueue(hdev->workqueue);
2718 destroy_workqueue(hdev->req_workqueue);
2719
2720 hci_dev_lock(hdev);
2721 hci_bdaddr_list_clear(&hdev->reject_list);
2722 hci_bdaddr_list_clear(&hdev->accept_list);
2723 hci_uuids_clear(hdev);
2724 hci_link_keys_clear(hdev);
2725 hci_smp_ltks_clear(hdev);
2726 hci_smp_irks_clear(hdev);
2727 hci_remote_oob_data_clear(hdev);
2728 hci_adv_instances_clear(hdev);
2729 hci_adv_monitors_clear(hdev);
2730 hci_bdaddr_list_clear(&hdev->le_accept_list);
2731 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2732 hci_conn_params_clear_all(hdev);
2733 hci_discovery_filter_clear(hdev);
2734 hci_blocked_keys_clear(hdev);
2735 hci_codec_list_clear(&hdev->local_codecs);
2736 msft_release(hdev);
2737 hci_dev_unlock(hdev);
2738
2739 ida_destroy(&hdev->unset_handle_ida);
2740 ida_free(&hci_index_ida, hdev->id);
2741 kfree_skb(hdev->sent_cmd);
2742 kfree_skb(hdev->req_skb);
2743 kfree_skb(hdev->recv_event);
2744 kfree(hdev);
2745 }
2746 EXPORT_SYMBOL(hci_release_dev);
2747
hci_register_suspend_notifier(struct hci_dev * hdev)2748 int hci_register_suspend_notifier(struct hci_dev *hdev)
2749 {
2750 int ret = 0;
2751
2752 if (!hdev->suspend_notifier.notifier_call &&
2753 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2754 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2755 ret = register_pm_notifier(&hdev->suspend_notifier);
2756 }
2757
2758 return ret;
2759 }
2760
hci_unregister_suspend_notifier(struct hci_dev * hdev)2761 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2762 {
2763 int ret = 0;
2764
2765 if (hdev->suspend_notifier.notifier_call) {
2766 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2767 if (!ret)
2768 hdev->suspend_notifier.notifier_call = NULL;
2769 }
2770
2771 return ret;
2772 }
2773
2774 /* Cancel ongoing command synchronously:
2775 *
2776 * - Cancel command timer
2777 * - Reset command counter
2778 * - Cancel command request
2779 */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2780 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2781 {
2782 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2783
2784 cancel_delayed_work_sync(&hdev->cmd_timer);
2785 cancel_delayed_work_sync(&hdev->ncmd_timer);
2786 atomic_set(&hdev->cmd_cnt, 1);
2787
2788 hci_cmd_sync_cancel_sync(hdev, err);
2789 }
2790
2791 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2792 int hci_suspend_dev(struct hci_dev *hdev)
2793 {
2794 int ret;
2795
2796 bt_dev_dbg(hdev, "");
2797
2798 /* Suspend should only act on when powered. */
2799 if (!hdev_is_powered(hdev) ||
2800 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2801 return 0;
2802
2803 /* If powering down don't attempt to suspend */
2804 if (mgmt_powering_down(hdev))
2805 return 0;
2806
2807 /* Cancel potentially blocking sync operation before suspend */
2808 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2809
2810 hci_req_sync_lock(hdev);
2811 ret = hci_suspend_sync(hdev);
2812 hci_req_sync_unlock(hdev);
2813
2814 hci_clear_wake_reason(hdev);
2815 mgmt_suspending(hdev, hdev->suspend_state);
2816
2817 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2818 return ret;
2819 }
2820 EXPORT_SYMBOL(hci_suspend_dev);
2821
2822 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2823 int hci_resume_dev(struct hci_dev *hdev)
2824 {
2825 int ret;
2826
2827 bt_dev_dbg(hdev, "");
2828
2829 /* Resume should only act on when powered. */
2830 if (!hdev_is_powered(hdev) ||
2831 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2832 return 0;
2833
2834 /* If powering down don't attempt to resume */
2835 if (mgmt_powering_down(hdev))
2836 return 0;
2837
2838 hci_req_sync_lock(hdev);
2839 ret = hci_resume_sync(hdev);
2840 hci_req_sync_unlock(hdev);
2841
2842 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2843 hdev->wake_addr_type);
2844
2845 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2846 return ret;
2847 }
2848 EXPORT_SYMBOL(hci_resume_dev);
2849
2850 /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2851 int hci_reset_dev(struct hci_dev *hdev)
2852 {
2853 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2854 struct sk_buff *skb;
2855
2856 skb = bt_skb_alloc(3, GFP_ATOMIC);
2857 if (!skb)
2858 return -ENOMEM;
2859
2860 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2861 skb_put_data(skb, hw_err, 3);
2862
2863 bt_dev_err(hdev, "Injecting HCI hardware error event");
2864
2865 /* Send Hardware Error to upper stack */
2866 return hci_recv_frame(hdev, skb);
2867 }
2868 EXPORT_SYMBOL(hci_reset_dev);
2869
2870 /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2871 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2872 {
2873 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2874 && !test_bit(HCI_INIT, &hdev->flags))) {
2875 kfree_skb(skb);
2876 return -ENXIO;
2877 }
2878
2879 switch (hci_skb_pkt_type(skb)) {
2880 case HCI_EVENT_PKT:
2881 break;
2882 case HCI_ACLDATA_PKT:
2883 /* Detect if ISO packet has been sent as ACL */
2884 if (hci_conn_num(hdev, ISO_LINK)) {
2885 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2886 __u8 type;
2887
2888 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2889 if (type == ISO_LINK)
2890 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2891 }
2892 break;
2893 case HCI_SCODATA_PKT:
2894 break;
2895 case HCI_ISODATA_PKT:
2896 break;
2897 default:
2898 kfree_skb(skb);
2899 return -EINVAL;
2900 }
2901
2902 /* Incoming skb */
2903 bt_cb(skb)->incoming = 1;
2904
2905 /* Time stamp */
2906 __net_timestamp(skb);
2907
2908 skb_queue_tail(&hdev->rx_q, skb);
2909 queue_work(hdev->workqueue, &hdev->rx_work);
2910
2911 return 0;
2912 }
2913 EXPORT_SYMBOL(hci_recv_frame);
2914
2915 /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2916 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2917 {
2918 /* Mark as diagnostic packet */
2919 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2920
2921 /* Time stamp */
2922 __net_timestamp(skb);
2923
2924 skb_queue_tail(&hdev->rx_q, skb);
2925 queue_work(hdev->workqueue, &hdev->rx_work);
2926
2927 return 0;
2928 }
2929 EXPORT_SYMBOL(hci_recv_diag);
2930
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2931 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2932 {
2933 va_list vargs;
2934
2935 va_start(vargs, fmt);
2936 kfree_const(hdev->hw_info);
2937 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2938 va_end(vargs);
2939 }
2940 EXPORT_SYMBOL(hci_set_hw_info);
2941
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2942 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2943 {
2944 va_list vargs;
2945
2946 va_start(vargs, fmt);
2947 kfree_const(hdev->fw_info);
2948 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2949 va_end(vargs);
2950 }
2951 EXPORT_SYMBOL(hci_set_fw_info);
2952
2953 /* ---- Interface to upper protocols ---- */
2954
hci_register_cb(struct hci_cb * cb)2955 int hci_register_cb(struct hci_cb *cb)
2956 {
2957 BT_DBG("%p name %s", cb, cb->name);
2958
2959 list_add_tail_rcu(&cb->list, &hci_cb_list);
2960
2961 return 0;
2962 }
2963 EXPORT_SYMBOL(hci_register_cb);
2964
hci_unregister_cb(struct hci_cb * cb)2965 int hci_unregister_cb(struct hci_cb *cb)
2966 {
2967 BT_DBG("%p name %s", cb, cb->name);
2968
2969 list_del_rcu(&cb->list);
2970 synchronize_rcu();
2971
2972 return 0;
2973 }
2974 EXPORT_SYMBOL(hci_unregister_cb);
2975
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)2976 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2977 {
2978 int err;
2979
2980 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2981 skb->len);
2982
2983 /* Time stamp */
2984 __net_timestamp(skb);
2985
2986 /* Send copy to monitor */
2987 hci_send_to_monitor(hdev, skb);
2988
2989 if (atomic_read(&hdev->promisc)) {
2990 /* Send copy to the sockets */
2991 hci_send_to_sock(hdev, skb);
2992 }
2993
2994 /* Get rid of skb owner, prior to sending to the driver. */
2995 skb_orphan(skb);
2996
2997 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2998 kfree_skb(skb);
2999 return -EINVAL;
3000 }
3001
3002 err = hdev->send(hdev, skb);
3003 if (err < 0) {
3004 bt_dev_err(hdev, "sending frame failed (%d)", err);
3005 kfree_skb(skb);
3006 return err;
3007 }
3008
3009 return 0;
3010 }
3011
3012 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3013 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3014 const void *param)
3015 {
3016 struct sk_buff *skb;
3017
3018 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3019
3020 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3021 if (!skb) {
3022 bt_dev_err(hdev, "no memory for command");
3023 return -ENOMEM;
3024 }
3025
3026 /* Stand-alone HCI commands must be flagged as
3027 * single-command requests.
3028 */
3029 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3030
3031 skb_queue_tail(&hdev->cmd_q, skb);
3032 queue_work(hdev->workqueue, &hdev->cmd_work);
3033
3034 return 0;
3035 }
3036
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3037 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3038 const void *param)
3039 {
3040 struct sk_buff *skb;
3041
3042 if (hci_opcode_ogf(opcode) != 0x3f) {
3043 /* A controller receiving a command shall respond with either
3044 * a Command Status Event or a Command Complete Event.
3045 * Therefore, all standard HCI commands must be sent via the
3046 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3047 * Some vendors do not comply with this rule for vendor-specific
3048 * commands and do not return any event. We want to support
3049 * unresponded commands for such cases only.
3050 */
3051 bt_dev_err(hdev, "unresponded command not supported");
3052 return -EINVAL;
3053 }
3054
3055 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3056 if (!skb) {
3057 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3058 opcode);
3059 return -ENOMEM;
3060 }
3061
3062 hci_send_frame(hdev, skb);
3063
3064 return 0;
3065 }
3066 EXPORT_SYMBOL(__hci_cmd_send);
3067
3068 /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3069 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3070 {
3071 struct hci_command_hdr *hdr;
3072
3073 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3074 return NULL;
3075
3076 hdr = (void *)skb->data;
3077
3078 if (hdr->opcode != cpu_to_le16(opcode))
3079 return NULL;
3080
3081 return skb->data + HCI_COMMAND_HDR_SIZE;
3082 }
3083
3084 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3085 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3086 {
3087 void *data;
3088
3089 /* Check if opcode matches last sent command */
3090 data = hci_cmd_data(hdev->sent_cmd, opcode);
3091 if (!data)
3092 /* Check if opcode matches last request */
3093 data = hci_cmd_data(hdev->req_skb, opcode);
3094
3095 return data;
3096 }
3097
3098 /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3099 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3100 {
3101 struct hci_event_hdr *hdr;
3102 int offset;
3103
3104 if (!hdev->recv_event)
3105 return NULL;
3106
3107 hdr = (void *)hdev->recv_event->data;
3108 offset = sizeof(*hdr);
3109
3110 if (hdr->evt != event) {
3111 /* In case of LE metaevent check the subevent match */
3112 if (hdr->evt == HCI_EV_LE_META) {
3113 struct hci_ev_le_meta *ev;
3114
3115 ev = (void *)hdev->recv_event->data + offset;
3116 offset += sizeof(*ev);
3117 if (ev->subevent == event)
3118 goto found;
3119 }
3120 return NULL;
3121 }
3122
3123 found:
3124 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3125
3126 return hdev->recv_event->data + offset;
3127 }
3128
3129 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3130 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3131 {
3132 struct hci_acl_hdr *hdr;
3133 int len = skb->len;
3134
3135 skb_push(skb, HCI_ACL_HDR_SIZE);
3136 skb_reset_transport_header(skb);
3137 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3138 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3139 hdr->dlen = cpu_to_le16(len);
3140 }
3141
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3142 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3143 struct sk_buff *skb, __u16 flags)
3144 {
3145 struct hci_conn *conn = chan->conn;
3146 struct hci_dev *hdev = conn->hdev;
3147 struct sk_buff *list;
3148
3149 skb->len = skb_headlen(skb);
3150 skb->data_len = 0;
3151
3152 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3153
3154 hci_add_acl_hdr(skb, conn->handle, flags);
3155
3156 list = skb_shinfo(skb)->frag_list;
3157 if (!list) {
3158 /* Non fragmented */
3159 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3160
3161 skb_queue_tail(queue, skb);
3162 } else {
3163 /* Fragmented */
3164 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3165
3166 skb_shinfo(skb)->frag_list = NULL;
3167
3168 /* Queue all fragments atomically. We need to use spin_lock_bh
3169 * here because of 6LoWPAN links, as there this function is
3170 * called from softirq and using normal spin lock could cause
3171 * deadlocks.
3172 */
3173 spin_lock_bh(&queue->lock);
3174
3175 __skb_queue_tail(queue, skb);
3176
3177 flags &= ~ACL_START;
3178 flags |= ACL_CONT;
3179 do {
3180 skb = list; list = list->next;
3181
3182 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3183 hci_add_acl_hdr(skb, conn->handle, flags);
3184
3185 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3186
3187 __skb_queue_tail(queue, skb);
3188 } while (list);
3189
3190 spin_unlock_bh(&queue->lock);
3191 }
3192 }
3193
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3194 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3195 {
3196 struct hci_dev *hdev = chan->conn->hdev;
3197
3198 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3199
3200 hci_queue_acl(chan, &chan->data_q, skb, flags);
3201
3202 queue_work(hdev->workqueue, &hdev->tx_work);
3203 }
3204
3205 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3206 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3207 {
3208 struct hci_dev *hdev = conn->hdev;
3209 struct hci_sco_hdr hdr;
3210
3211 BT_DBG("%s len %d", hdev->name, skb->len);
3212
3213 hdr.handle = cpu_to_le16(conn->handle);
3214 hdr.dlen = skb->len;
3215
3216 skb_push(skb, HCI_SCO_HDR_SIZE);
3217 skb_reset_transport_header(skb);
3218 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3219
3220 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3221
3222 skb_queue_tail(&conn->data_q, skb);
3223 queue_work(hdev->workqueue, &hdev->tx_work);
3224 }
3225
3226 /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3227 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3228 {
3229 struct hci_iso_hdr *hdr;
3230 int len = skb->len;
3231
3232 skb_push(skb, HCI_ISO_HDR_SIZE);
3233 skb_reset_transport_header(skb);
3234 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3235 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3236 hdr->dlen = cpu_to_le16(len);
3237 }
3238
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3239 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3240 struct sk_buff *skb)
3241 {
3242 struct hci_dev *hdev = conn->hdev;
3243 struct sk_buff *list;
3244 __u16 flags;
3245
3246 skb->len = skb_headlen(skb);
3247 skb->data_len = 0;
3248
3249 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3250
3251 list = skb_shinfo(skb)->frag_list;
3252
3253 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3254 hci_add_iso_hdr(skb, conn->handle, flags);
3255
3256 if (!list) {
3257 /* Non fragmented */
3258 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3259
3260 skb_queue_tail(queue, skb);
3261 } else {
3262 /* Fragmented */
3263 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3264
3265 skb_shinfo(skb)->frag_list = NULL;
3266
3267 __skb_queue_tail(queue, skb);
3268
3269 do {
3270 skb = list; list = list->next;
3271
3272 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3273 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3274 0x00);
3275 hci_add_iso_hdr(skb, conn->handle, flags);
3276
3277 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3278
3279 __skb_queue_tail(queue, skb);
3280 } while (list);
3281 }
3282 }
3283
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3284 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3285 {
3286 struct hci_dev *hdev = conn->hdev;
3287
3288 BT_DBG("%s len %d", hdev->name, skb->len);
3289
3290 hci_queue_iso(conn, &conn->data_q, skb);
3291
3292 queue_work(hdev->workqueue, &hdev->tx_work);
3293 }
3294
3295 /* ---- HCI TX task (outgoing data) ---- */
3296
3297 /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3298 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3299 {
3300 struct hci_dev *hdev;
3301 int cnt, q;
3302
3303 if (!conn) {
3304 *quote = 0;
3305 return;
3306 }
3307
3308 hdev = conn->hdev;
3309
3310 switch (conn->type) {
3311 case ACL_LINK:
3312 cnt = hdev->acl_cnt;
3313 break;
3314 case SCO_LINK:
3315 case ESCO_LINK:
3316 cnt = hdev->sco_cnt;
3317 break;
3318 case LE_LINK:
3319 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3320 break;
3321 case ISO_LINK:
3322 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3323 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3324 break;
3325 default:
3326 cnt = 0;
3327 bt_dev_err(hdev, "unknown link type %d", conn->type);
3328 }
3329
3330 q = cnt / num;
3331 *quote = q ? q : 1;
3332 }
3333
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3334 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3335 int *quote)
3336 {
3337 struct hci_conn_hash *h = &hdev->conn_hash;
3338 struct hci_conn *conn = NULL, *c;
3339 unsigned int num = 0, min = ~0;
3340
3341 /* We don't have to lock device here. Connections are always
3342 * added and removed with TX task disabled. */
3343
3344 rcu_read_lock();
3345
3346 list_for_each_entry_rcu(c, &h->list, list) {
3347 if (c->type != type || skb_queue_empty(&c->data_q))
3348 continue;
3349
3350 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3351 continue;
3352
3353 num++;
3354
3355 if (c->sent < min) {
3356 min = c->sent;
3357 conn = c;
3358 }
3359
3360 if (hci_conn_num(hdev, type) == num)
3361 break;
3362 }
3363
3364 rcu_read_unlock();
3365
3366 hci_quote_sent(conn, num, quote);
3367
3368 BT_DBG("conn %p quote %d", conn, *quote);
3369 return conn;
3370 }
3371
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3372 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3373 {
3374 struct hci_conn_hash *h = &hdev->conn_hash;
3375 struct hci_conn *c;
3376
3377 bt_dev_err(hdev, "link tx timeout");
3378
3379 rcu_read_lock();
3380
3381 /* Kill stalled connections */
3382 list_for_each_entry_rcu(c, &h->list, list) {
3383 if (c->type == type && c->sent) {
3384 bt_dev_err(hdev, "killing stalled connection %pMR",
3385 &c->dst);
3386 /* hci_disconnect might sleep, so, we have to release
3387 * the RCU read lock before calling it.
3388 */
3389 rcu_read_unlock();
3390 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3391 rcu_read_lock();
3392 }
3393 }
3394
3395 rcu_read_unlock();
3396 }
3397
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3398 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3399 int *quote)
3400 {
3401 struct hci_conn_hash *h = &hdev->conn_hash;
3402 struct hci_chan *chan = NULL;
3403 unsigned int num = 0, min = ~0, cur_prio = 0;
3404 struct hci_conn *conn;
3405 int conn_num = 0;
3406
3407 BT_DBG("%s", hdev->name);
3408
3409 rcu_read_lock();
3410
3411 list_for_each_entry_rcu(conn, &h->list, list) {
3412 struct hci_chan *tmp;
3413
3414 if (conn->type != type)
3415 continue;
3416
3417 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3418 continue;
3419
3420 conn_num++;
3421
3422 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3423 struct sk_buff *skb;
3424
3425 if (skb_queue_empty(&tmp->data_q))
3426 continue;
3427
3428 skb = skb_peek(&tmp->data_q);
3429 if (skb->priority < cur_prio)
3430 continue;
3431
3432 if (skb->priority > cur_prio) {
3433 num = 0;
3434 min = ~0;
3435 cur_prio = skb->priority;
3436 }
3437
3438 num++;
3439
3440 if (conn->sent < min) {
3441 min = conn->sent;
3442 chan = tmp;
3443 }
3444 }
3445
3446 if (hci_conn_num(hdev, type) == conn_num)
3447 break;
3448 }
3449
3450 rcu_read_unlock();
3451
3452 if (!chan)
3453 return NULL;
3454
3455 hci_quote_sent(chan->conn, num, quote);
3456
3457 BT_DBG("chan %p quote %d", chan, *quote);
3458 return chan;
3459 }
3460
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3461 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3462 {
3463 struct hci_conn_hash *h = &hdev->conn_hash;
3464 struct hci_conn *conn;
3465 int num = 0;
3466
3467 BT_DBG("%s", hdev->name);
3468
3469 rcu_read_lock();
3470
3471 list_for_each_entry_rcu(conn, &h->list, list) {
3472 struct hci_chan *chan;
3473
3474 if (conn->type != type)
3475 continue;
3476
3477 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3478 continue;
3479
3480 num++;
3481
3482 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3483 struct sk_buff *skb;
3484
3485 if (chan->sent) {
3486 chan->sent = 0;
3487 continue;
3488 }
3489
3490 if (skb_queue_empty(&chan->data_q))
3491 continue;
3492
3493 skb = skb_peek(&chan->data_q);
3494 if (skb->priority >= HCI_PRIO_MAX - 1)
3495 continue;
3496
3497 skb->priority = HCI_PRIO_MAX - 1;
3498
3499 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3500 skb->priority);
3501 }
3502
3503 if (hci_conn_num(hdev, type) == num)
3504 break;
3505 }
3506
3507 rcu_read_unlock();
3508
3509 }
3510
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3511 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3512 {
3513 unsigned long last_tx;
3514
3515 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3516 return;
3517
3518 switch (type) {
3519 case LE_LINK:
3520 last_tx = hdev->le_last_tx;
3521 break;
3522 default:
3523 last_tx = hdev->acl_last_tx;
3524 break;
3525 }
3526
3527 /* tx timeout must be longer than maximum link supervision timeout
3528 * (40.9 seconds)
3529 */
3530 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3531 hci_link_tx_to(hdev, type);
3532 }
3533
3534 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3535 static void hci_sched_sco(struct hci_dev *hdev)
3536 {
3537 struct hci_conn *conn;
3538 struct sk_buff *skb;
3539 int quote;
3540
3541 BT_DBG("%s", hdev->name);
3542
3543 if (!hci_conn_num(hdev, SCO_LINK))
3544 return;
3545
3546 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3547 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3548 BT_DBG("skb %p len %d", skb, skb->len);
3549 hci_send_frame(hdev, skb);
3550
3551 conn->sent++;
3552 if (conn->sent == ~0)
3553 conn->sent = 0;
3554 }
3555 }
3556 }
3557
hci_sched_esco(struct hci_dev * hdev)3558 static void hci_sched_esco(struct hci_dev *hdev)
3559 {
3560 struct hci_conn *conn;
3561 struct sk_buff *skb;
3562 int quote;
3563
3564 BT_DBG("%s", hdev->name);
3565
3566 if (!hci_conn_num(hdev, ESCO_LINK))
3567 return;
3568
3569 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3570 "e))) {
3571 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3572 BT_DBG("skb %p len %d", skb, skb->len);
3573 hci_send_frame(hdev, skb);
3574
3575 conn->sent++;
3576 if (conn->sent == ~0)
3577 conn->sent = 0;
3578 }
3579 }
3580 }
3581
hci_sched_acl_pkt(struct hci_dev * hdev)3582 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3583 {
3584 unsigned int cnt = hdev->acl_cnt;
3585 struct hci_chan *chan;
3586 struct sk_buff *skb;
3587 int quote;
3588
3589 __check_timeout(hdev, cnt, ACL_LINK);
3590
3591 while (hdev->acl_cnt &&
3592 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3593 u32 priority = (skb_peek(&chan->data_q))->priority;
3594 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3595 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3596 skb->len, skb->priority);
3597
3598 /* Stop if priority has changed */
3599 if (skb->priority < priority)
3600 break;
3601
3602 skb = skb_dequeue(&chan->data_q);
3603
3604 hci_conn_enter_active_mode(chan->conn,
3605 bt_cb(skb)->force_active);
3606
3607 hci_send_frame(hdev, skb);
3608 hdev->acl_last_tx = jiffies;
3609
3610 hdev->acl_cnt--;
3611 chan->sent++;
3612 chan->conn->sent++;
3613
3614 /* Send pending SCO packets right away */
3615 hci_sched_sco(hdev);
3616 hci_sched_esco(hdev);
3617 }
3618 }
3619
3620 if (cnt != hdev->acl_cnt)
3621 hci_prio_recalculate(hdev, ACL_LINK);
3622 }
3623
hci_sched_acl(struct hci_dev * hdev)3624 static void hci_sched_acl(struct hci_dev *hdev)
3625 {
3626 BT_DBG("%s", hdev->name);
3627
3628 /* No ACL link over BR/EDR controller */
3629 if (!hci_conn_num(hdev, ACL_LINK))
3630 return;
3631
3632 hci_sched_acl_pkt(hdev);
3633 }
3634
hci_sched_le(struct hci_dev * hdev)3635 static void hci_sched_le(struct hci_dev *hdev)
3636 {
3637 struct hci_chan *chan;
3638 struct sk_buff *skb;
3639 int quote, *cnt, tmp;
3640
3641 BT_DBG("%s", hdev->name);
3642
3643 if (!hci_conn_num(hdev, LE_LINK))
3644 return;
3645
3646 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3647
3648 __check_timeout(hdev, *cnt, LE_LINK);
3649
3650 tmp = *cnt;
3651 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3652 u32 priority = (skb_peek(&chan->data_q))->priority;
3653 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3654 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3655 skb->len, skb->priority);
3656
3657 /* Stop if priority has changed */
3658 if (skb->priority < priority)
3659 break;
3660
3661 skb = skb_dequeue(&chan->data_q);
3662
3663 hci_send_frame(hdev, skb);
3664 hdev->le_last_tx = jiffies;
3665
3666 (*cnt)--;
3667 chan->sent++;
3668 chan->conn->sent++;
3669
3670 /* Send pending SCO packets right away */
3671 hci_sched_sco(hdev);
3672 hci_sched_esco(hdev);
3673 }
3674 }
3675
3676 if (*cnt != tmp)
3677 hci_prio_recalculate(hdev, LE_LINK);
3678 }
3679
3680 /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3681 static void hci_sched_iso(struct hci_dev *hdev)
3682 {
3683 struct hci_conn *conn;
3684 struct sk_buff *skb;
3685 int quote, *cnt;
3686
3687 BT_DBG("%s", hdev->name);
3688
3689 if (!hci_conn_num(hdev, ISO_LINK))
3690 return;
3691
3692 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3693 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3694 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3695 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3696 BT_DBG("skb %p len %d", skb, skb->len);
3697 hci_send_frame(hdev, skb);
3698
3699 conn->sent++;
3700 if (conn->sent == ~0)
3701 conn->sent = 0;
3702 (*cnt)--;
3703 }
3704 }
3705 }
3706
hci_tx_work(struct work_struct * work)3707 static void hci_tx_work(struct work_struct *work)
3708 {
3709 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3710 struct sk_buff *skb;
3711
3712 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3713 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3714
3715 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3716 /* Schedule queues and send stuff to HCI driver */
3717 hci_sched_sco(hdev);
3718 hci_sched_esco(hdev);
3719 hci_sched_iso(hdev);
3720 hci_sched_acl(hdev);
3721 hci_sched_le(hdev);
3722 }
3723
3724 /* Send next queued raw (unknown type) packet */
3725 while ((skb = skb_dequeue(&hdev->raw_q)))
3726 hci_send_frame(hdev, skb);
3727 }
3728
3729 /* ----- HCI RX task (incoming data processing) ----- */
3730
3731 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3732 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3733 {
3734 struct hci_acl_hdr *hdr;
3735 struct hci_conn *conn;
3736 __u16 handle, flags;
3737
3738 hdr = skb_pull_data(skb, sizeof(*hdr));
3739 if (!hdr) {
3740 bt_dev_err(hdev, "ACL packet too small");
3741 goto drop;
3742 }
3743
3744 handle = __le16_to_cpu(hdr->handle);
3745 flags = hci_flags(handle);
3746 handle = hci_handle(handle);
3747
3748 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3749 handle, flags);
3750
3751 hdev->stat.acl_rx++;
3752
3753 hci_dev_lock(hdev);
3754 conn = hci_conn_hash_lookup_handle(hdev, handle);
3755 hci_dev_unlock(hdev);
3756
3757 if (conn) {
3758 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3759
3760 /* Send to upper protocol */
3761 l2cap_recv_acldata(conn, skb, flags);
3762 return;
3763 } else {
3764 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3765 handle);
3766 }
3767
3768 drop:
3769 kfree_skb(skb);
3770 }
3771
3772 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3773 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3774 {
3775 struct hci_sco_hdr *hdr = (void *) skb->data;
3776 struct hci_conn *conn;
3777 __u16 handle, flags;
3778
3779 skb_pull(skb, HCI_SCO_HDR_SIZE);
3780
3781 handle = __le16_to_cpu(hdr->handle);
3782 flags = hci_flags(handle);
3783 handle = hci_handle(handle);
3784
3785 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3786 handle, flags);
3787
3788 hdev->stat.sco_rx++;
3789
3790 hci_dev_lock(hdev);
3791 conn = hci_conn_hash_lookup_handle(hdev, handle);
3792 hci_dev_unlock(hdev);
3793
3794 if (conn) {
3795 /* Send to upper protocol */
3796 hci_skb_pkt_status(skb) = flags & 0x03;
3797 sco_recv_scodata(conn, skb);
3798 return;
3799 } else {
3800 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3801 handle);
3802 }
3803
3804 kfree_skb(skb);
3805 }
3806
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3807 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3808 {
3809 struct hci_iso_hdr *hdr;
3810 struct hci_conn *conn;
3811 __u16 handle, flags;
3812
3813 hdr = skb_pull_data(skb, sizeof(*hdr));
3814 if (!hdr) {
3815 bt_dev_err(hdev, "ISO packet too small");
3816 goto drop;
3817 }
3818
3819 handle = __le16_to_cpu(hdr->handle);
3820 flags = hci_flags(handle);
3821 handle = hci_handle(handle);
3822
3823 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3824 handle, flags);
3825
3826 hci_dev_lock(hdev);
3827 conn = hci_conn_hash_lookup_handle(hdev, handle);
3828 hci_dev_unlock(hdev);
3829
3830 if (!conn) {
3831 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3832 handle);
3833 goto drop;
3834 }
3835
3836 /* Send to upper protocol */
3837 iso_recv(conn, skb, flags);
3838 return;
3839
3840 drop:
3841 kfree_skb(skb);
3842 }
3843
hci_req_is_complete(struct hci_dev * hdev)3844 static bool hci_req_is_complete(struct hci_dev *hdev)
3845 {
3846 struct sk_buff *skb;
3847
3848 skb = skb_peek(&hdev->cmd_q);
3849 if (!skb)
3850 return true;
3851
3852 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3853 }
3854
hci_resend_last(struct hci_dev * hdev)3855 static void hci_resend_last(struct hci_dev *hdev)
3856 {
3857 struct hci_command_hdr *sent;
3858 struct sk_buff *skb;
3859 u16 opcode;
3860
3861 if (!hdev->sent_cmd)
3862 return;
3863
3864 sent = (void *) hdev->sent_cmd->data;
3865 opcode = __le16_to_cpu(sent->opcode);
3866 if (opcode == HCI_OP_RESET)
3867 return;
3868
3869 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3870 if (!skb)
3871 return;
3872
3873 skb_queue_head(&hdev->cmd_q, skb);
3874 queue_work(hdev->workqueue, &hdev->cmd_work);
3875 }
3876
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3877 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3878 hci_req_complete_t *req_complete,
3879 hci_req_complete_skb_t *req_complete_skb)
3880 {
3881 struct sk_buff *skb;
3882 unsigned long flags;
3883
3884 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3885
3886 /* If the completed command doesn't match the last one that was
3887 * sent we need to do special handling of it.
3888 */
3889 if (!hci_sent_cmd_data(hdev, opcode)) {
3890 /* Some CSR based controllers generate a spontaneous
3891 * reset complete event during init and any pending
3892 * command will never be completed. In such a case we
3893 * need to resend whatever was the last sent
3894 * command.
3895 */
3896 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3897 hci_resend_last(hdev);
3898
3899 return;
3900 }
3901
3902 /* If we reach this point this event matches the last command sent */
3903 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3904
3905 /* If the command succeeded and there's still more commands in
3906 * this request the request is not yet complete.
3907 */
3908 if (!status && !hci_req_is_complete(hdev))
3909 return;
3910
3911 skb = hdev->req_skb;
3912
3913 /* If this was the last command in a request the complete
3914 * callback would be found in hdev->req_skb instead of the
3915 * command queue (hdev->cmd_q).
3916 */
3917 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3918 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3919 return;
3920 }
3921
3922 if (skb && bt_cb(skb)->hci.req_complete) {
3923 *req_complete = bt_cb(skb)->hci.req_complete;
3924 return;
3925 }
3926
3927 /* Remove all pending commands belonging to this request */
3928 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3929 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3930 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3931 __skb_queue_head(&hdev->cmd_q, skb);
3932 break;
3933 }
3934
3935 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3936 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3937 else
3938 *req_complete = bt_cb(skb)->hci.req_complete;
3939 dev_kfree_skb_irq(skb);
3940 }
3941 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3942 }
3943
hci_rx_work(struct work_struct * work)3944 static void hci_rx_work(struct work_struct *work)
3945 {
3946 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3947 struct sk_buff *skb;
3948
3949 BT_DBG("%s", hdev->name);
3950
3951 /* The kcov_remote functions used for collecting packet parsing
3952 * coverage information from this background thread and associate
3953 * the coverage with the syscall's thread which originally injected
3954 * the packet. This helps fuzzing the kernel.
3955 */
3956 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3957 kcov_remote_start_common(skb_get_kcov_handle(skb));
3958
3959 /* Send copy to monitor */
3960 hci_send_to_monitor(hdev, skb);
3961
3962 if (atomic_read(&hdev->promisc)) {
3963 /* Send copy to the sockets */
3964 hci_send_to_sock(hdev, skb);
3965 }
3966
3967 /* If the device has been opened in HCI_USER_CHANNEL,
3968 * the userspace has exclusive access to device.
3969 * When device is HCI_INIT, we still need to process
3970 * the data packets to the driver in order
3971 * to complete its setup().
3972 */
3973 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3974 !test_bit(HCI_INIT, &hdev->flags)) {
3975 kfree_skb(skb);
3976 continue;
3977 }
3978
3979 if (test_bit(HCI_INIT, &hdev->flags)) {
3980 /* Don't process data packets in this states. */
3981 switch (hci_skb_pkt_type(skb)) {
3982 case HCI_ACLDATA_PKT:
3983 case HCI_SCODATA_PKT:
3984 case HCI_ISODATA_PKT:
3985 kfree_skb(skb);
3986 continue;
3987 }
3988 }
3989
3990 /* Process frame */
3991 switch (hci_skb_pkt_type(skb)) {
3992 case HCI_EVENT_PKT:
3993 BT_DBG("%s Event packet", hdev->name);
3994 hci_event_packet(hdev, skb);
3995 break;
3996
3997 case HCI_ACLDATA_PKT:
3998 BT_DBG("%s ACL data packet", hdev->name);
3999 hci_acldata_packet(hdev, skb);
4000 break;
4001
4002 case HCI_SCODATA_PKT:
4003 BT_DBG("%s SCO data packet", hdev->name);
4004 hci_scodata_packet(hdev, skb);
4005 break;
4006
4007 case HCI_ISODATA_PKT:
4008 BT_DBG("%s ISO data packet", hdev->name);
4009 hci_isodata_packet(hdev, skb);
4010 break;
4011
4012 default:
4013 kfree_skb(skb);
4014 break;
4015 }
4016 }
4017 }
4018
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4019 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4020 {
4021 int err;
4022
4023 bt_dev_dbg(hdev, "skb %p", skb);
4024
4025 kfree_skb(hdev->sent_cmd);
4026
4027 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4028 if (!hdev->sent_cmd) {
4029 skb_queue_head(&hdev->cmd_q, skb);
4030 queue_work(hdev->workqueue, &hdev->cmd_work);
4031 return;
4032 }
4033
4034 err = hci_send_frame(hdev, skb);
4035 if (err < 0) {
4036 hci_cmd_sync_cancel_sync(hdev, -err);
4037 return;
4038 }
4039
4040 if (hci_req_status_pend(hdev) &&
4041 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4042 kfree_skb(hdev->req_skb);
4043 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4044 }
4045
4046 atomic_dec(&hdev->cmd_cnt);
4047 }
4048
hci_cmd_work(struct work_struct * work)4049 static void hci_cmd_work(struct work_struct *work)
4050 {
4051 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4052 struct sk_buff *skb;
4053
4054 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4055 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4056
4057 /* Send queued commands */
4058 if (atomic_read(&hdev->cmd_cnt)) {
4059 skb = skb_dequeue(&hdev->cmd_q);
4060 if (!skb)
4061 return;
4062
4063 hci_send_cmd_sync(hdev, skb);
4064
4065 rcu_read_lock();
4066 if (test_bit(HCI_RESET, &hdev->flags) ||
4067 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4068 cancel_delayed_work(&hdev->cmd_timer);
4069 else
4070 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4071 HCI_CMD_TIMEOUT);
4072 rcu_read_unlock();
4073 }
4074 }
4075