1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/export.h> 29 #include <linux/rfkill.h> 30 #include <linux/debugfs.h> 31 #include <linux/crypto.h> 32 #include <linux/kcov.h> 33 #include <linux/property.h> 34 #include <linux/suspend.h> 35 #include <linux/wait.h> 36 #include <asm/unaligned.h> 37 38 #include <net/bluetooth/bluetooth.h> 39 #include <net/bluetooth/hci_core.h> 40 #include <net/bluetooth/l2cap.h> 41 #include <net/bluetooth/mgmt.h> 42 43 #include "hci_request.h" 44 #include "hci_debugfs.h" 45 #include "smp.h" 46 #include "leds.h" 47 #include "msft.h" 48 #include "aosp.h" 49 #include "hci_codec.h" 50 51 static void hci_rx_work(struct work_struct *work); 52 static void hci_cmd_work(struct work_struct *work); 53 static void hci_tx_work(struct work_struct *work); 54 55 /* HCI device list */ 56 LIST_HEAD(hci_dev_list); 57 DEFINE_RWLOCK(hci_dev_list_lock); 58 59 /* HCI callback list */ 60 LIST_HEAD(hci_cb_list); 61 DEFINE_MUTEX(hci_cb_list_lock); 62 63 /* HCI ID Numbering */ 64 static DEFINE_IDA(hci_index_ida); 65 66 /* Get HCI device by index. 67 * Device is held on return. */ 68 struct hci_dev *hci_dev_get(int index) 69 { 70 struct hci_dev *hdev = NULL, *d; 71 72 BT_DBG("%d", index); 73 74 if (index < 0) 75 return NULL; 76 77 read_lock(&hci_dev_list_lock); 78 list_for_each_entry(d, &hci_dev_list, list) { 79 if (d->id == index) { 80 hdev = hci_dev_hold(d); 81 break; 82 } 83 } 84 read_unlock(&hci_dev_list_lock); 85 return hdev; 86 } 87 88 /* ---- Inquiry support ---- */ 89 90 bool hci_discovery_active(struct hci_dev *hdev) 91 { 92 struct discovery_state *discov = &hdev->discovery; 93 94 switch (discov->state) { 95 case DISCOVERY_FINDING: 96 case DISCOVERY_RESOLVING: 97 return true; 98 99 default: 100 return false; 101 } 102 } 103 104 void hci_discovery_set_state(struct hci_dev *hdev, int state) 105 { 106 int old_state = hdev->discovery.state; 107 108 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 109 110 if (old_state == state) 111 return; 112 113 hdev->discovery.state = state; 114 115 switch (state) { 116 case DISCOVERY_STOPPED: 117 hci_update_passive_scan(hdev); 118 119 if (old_state != DISCOVERY_STARTING) 120 mgmt_discovering(hdev, 0); 121 break; 122 case DISCOVERY_STARTING: 123 break; 124 case DISCOVERY_FINDING: 125 mgmt_discovering(hdev, 1); 126 break; 127 case DISCOVERY_RESOLVING: 128 break; 129 case DISCOVERY_STOPPING: 130 break; 131 } 132 } 133 134 void hci_inquiry_cache_flush(struct hci_dev *hdev) 135 { 136 struct discovery_state *cache = &hdev->discovery; 137 struct inquiry_entry *p, *n; 138 139 list_for_each_entry_safe(p, n, &cache->all, all) { 140 list_del(&p->all); 141 kfree(p); 142 } 143 144 INIT_LIST_HEAD(&cache->unknown); 145 INIT_LIST_HEAD(&cache->resolve); 146 } 147 148 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 149 bdaddr_t *bdaddr) 150 { 151 struct discovery_state *cache = &hdev->discovery; 152 struct inquiry_entry *e; 153 154 BT_DBG("cache %p, %pMR", cache, bdaddr); 155 156 list_for_each_entry(e, &cache->all, all) { 157 if (!bacmp(&e->data.bdaddr, bdaddr)) 158 return e; 159 } 160 161 return NULL; 162 } 163 164 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 165 bdaddr_t *bdaddr) 166 { 167 struct discovery_state *cache = &hdev->discovery; 168 struct inquiry_entry *e; 169 170 BT_DBG("cache %p, %pMR", cache, bdaddr); 171 172 list_for_each_entry(e, &cache->unknown, list) { 173 if (!bacmp(&e->data.bdaddr, bdaddr)) 174 return e; 175 } 176 177 return NULL; 178 } 179 180 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 181 bdaddr_t *bdaddr, 182 int state) 183 { 184 struct discovery_state *cache = &hdev->discovery; 185 struct inquiry_entry *e; 186 187 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); 188 189 list_for_each_entry(e, &cache->resolve, list) { 190 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 191 return e; 192 if (!bacmp(&e->data.bdaddr, bdaddr)) 193 return e; 194 } 195 196 return NULL; 197 } 198 199 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 200 struct inquiry_entry *ie) 201 { 202 struct discovery_state *cache = &hdev->discovery; 203 struct list_head *pos = &cache->resolve; 204 struct inquiry_entry *p; 205 206 list_del(&ie->list); 207 208 list_for_each_entry(p, &cache->resolve, list) { 209 if (p->name_state != NAME_PENDING && 210 abs(p->data.rssi) >= abs(ie->data.rssi)) 211 break; 212 pos = &p->list; 213 } 214 215 list_add(&ie->list, pos); 216 } 217 218 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 219 bool name_known) 220 { 221 struct discovery_state *cache = &hdev->discovery; 222 struct inquiry_entry *ie; 223 u32 flags = 0; 224 225 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 226 227 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); 228 229 if (!data->ssp_mode) 230 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 231 232 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 233 if (ie) { 234 if (!ie->data.ssp_mode) 235 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 236 237 if (ie->name_state == NAME_NEEDED && 238 data->rssi != ie->data.rssi) { 239 ie->data.rssi = data->rssi; 240 hci_inquiry_cache_update_resolve(hdev, ie); 241 } 242 243 goto update; 244 } 245 246 /* Entry not in the cache. Add new one. */ 247 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 248 if (!ie) { 249 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 250 goto done; 251 } 252 253 list_add(&ie->all, &cache->all); 254 255 if (name_known) { 256 ie->name_state = NAME_KNOWN; 257 } else { 258 ie->name_state = NAME_NOT_KNOWN; 259 list_add(&ie->list, &cache->unknown); 260 } 261 262 update: 263 if (name_known && ie->name_state != NAME_KNOWN && 264 ie->name_state != NAME_PENDING) { 265 ie->name_state = NAME_KNOWN; 266 list_del(&ie->list); 267 } 268 269 memcpy(&ie->data, data, sizeof(*data)); 270 ie->timestamp = jiffies; 271 cache->timestamp = jiffies; 272 273 if (ie->name_state == NAME_NOT_KNOWN) 274 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 275 276 done: 277 return flags; 278 } 279 280 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 281 { 282 struct discovery_state *cache = &hdev->discovery; 283 struct inquiry_info *info = (struct inquiry_info *) buf; 284 struct inquiry_entry *e; 285 int copied = 0; 286 287 list_for_each_entry(e, &cache->all, all) { 288 struct inquiry_data *data = &e->data; 289 290 if (copied >= num) 291 break; 292 293 bacpy(&info->bdaddr, &data->bdaddr); 294 info->pscan_rep_mode = data->pscan_rep_mode; 295 info->pscan_period_mode = data->pscan_period_mode; 296 info->pscan_mode = data->pscan_mode; 297 memcpy(info->dev_class, data->dev_class, 3); 298 info->clock_offset = data->clock_offset; 299 300 info++; 301 copied++; 302 } 303 304 BT_DBG("cache %p, copied %d", cache, copied); 305 return copied; 306 } 307 308 static int hci_inq_req(struct hci_request *req, unsigned long opt) 309 { 310 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 311 struct hci_dev *hdev = req->hdev; 312 struct hci_cp_inquiry cp; 313 314 BT_DBG("%s", hdev->name); 315 316 if (test_bit(HCI_INQUIRY, &hdev->flags)) 317 return 0; 318 319 /* Start Inquiry */ 320 memcpy(&cp.lap, &ir->lap, 3); 321 cp.length = ir->length; 322 cp.num_rsp = ir->num_rsp; 323 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 324 325 return 0; 326 } 327 328 int hci_inquiry(void __user *arg) 329 { 330 __u8 __user *ptr = arg; 331 struct hci_inquiry_req ir; 332 struct hci_dev *hdev; 333 int err = 0, do_inquiry = 0, max_rsp; 334 long timeo; 335 __u8 *buf; 336 337 if (copy_from_user(&ir, ptr, sizeof(ir))) 338 return -EFAULT; 339 340 hdev = hci_dev_get(ir.dev_id); 341 if (!hdev) 342 return -ENODEV; 343 344 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 345 err = -EBUSY; 346 goto done; 347 } 348 349 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 350 err = -EOPNOTSUPP; 351 goto done; 352 } 353 354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 355 err = -EOPNOTSUPP; 356 goto done; 357 } 358 359 /* Restrict maximum inquiry length to 60 seconds */ 360 if (ir.length > 60) { 361 err = -EINVAL; 362 goto done; 363 } 364 365 hci_dev_lock(hdev); 366 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 367 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { 368 hci_inquiry_cache_flush(hdev); 369 do_inquiry = 1; 370 } 371 hci_dev_unlock(hdev); 372 373 timeo = ir.length * msecs_to_jiffies(2000); 374 375 if (do_inquiry) { 376 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 377 timeo, NULL); 378 if (err < 0) 379 goto done; 380 381 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 382 * cleared). If it is interrupted by a signal, return -EINTR. 383 */ 384 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 385 TASK_INTERRUPTIBLE)) { 386 err = -EINTR; 387 goto done; 388 } 389 } 390 391 /* for unlimited number of responses we will use buffer with 392 * 255 entries 393 */ 394 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 395 396 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 397 * copy it to the user space. 398 */ 399 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); 400 if (!buf) { 401 err = -ENOMEM; 402 goto done; 403 } 404 405 hci_dev_lock(hdev); 406 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 407 hci_dev_unlock(hdev); 408 409 BT_DBG("num_rsp %d", ir.num_rsp); 410 411 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 412 ptr += sizeof(ir); 413 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 414 ir.num_rsp)) 415 err = -EFAULT; 416 } else 417 err = -EFAULT; 418 419 kfree(buf); 420 421 done: 422 hci_dev_put(hdev); 423 return err; 424 } 425 426 static int hci_dev_do_open(struct hci_dev *hdev) 427 { 428 int ret = 0; 429 430 BT_DBG("%s %p", hdev->name, hdev); 431 432 hci_req_sync_lock(hdev); 433 434 ret = hci_dev_open_sync(hdev); 435 436 hci_req_sync_unlock(hdev); 437 return ret; 438 } 439 440 /* ---- HCI ioctl helpers ---- */ 441 442 int hci_dev_open(__u16 dev) 443 { 444 struct hci_dev *hdev; 445 int err; 446 447 hdev = hci_dev_get(dev); 448 if (!hdev) 449 return -ENODEV; 450 451 /* Devices that are marked as unconfigured can only be powered 452 * up as user channel. Trying to bring them up as normal devices 453 * will result into a failure. Only user channel operation is 454 * possible. 455 * 456 * When this function is called for a user channel, the flag 457 * HCI_USER_CHANNEL will be set first before attempting to 458 * open the device. 459 */ 460 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 461 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 462 err = -EOPNOTSUPP; 463 goto done; 464 } 465 466 /* We need to ensure that no other power on/off work is pending 467 * before proceeding to call hci_dev_do_open. This is 468 * particularly important if the setup procedure has not yet 469 * completed. 470 */ 471 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 472 cancel_delayed_work(&hdev->power_off); 473 474 /* After this call it is guaranteed that the setup procedure 475 * has finished. This means that error conditions like RFKILL 476 * or no valid public or static random address apply. 477 */ 478 flush_workqueue(hdev->req_workqueue); 479 480 /* For controllers not using the management interface and that 481 * are brought up using legacy ioctl, set the HCI_BONDABLE bit 482 * so that pairing works for them. Once the management interface 483 * is in use this bit will be cleared again and userspace has 484 * to explicitly enable it. 485 */ 486 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 487 !hci_dev_test_flag(hdev, HCI_MGMT)) 488 hci_dev_set_flag(hdev, HCI_BONDABLE); 489 490 err = hci_dev_do_open(hdev); 491 492 done: 493 hci_dev_put(hdev); 494 return err; 495 } 496 497 int hci_dev_do_close(struct hci_dev *hdev) 498 { 499 int err; 500 501 BT_DBG("%s %p", hdev->name, hdev); 502 503 hci_req_sync_lock(hdev); 504 505 err = hci_dev_close_sync(hdev); 506 507 hci_req_sync_unlock(hdev); 508 509 return err; 510 } 511 512 int hci_dev_close(__u16 dev) 513 { 514 struct hci_dev *hdev; 515 int err; 516 517 hdev = hci_dev_get(dev); 518 if (!hdev) 519 return -ENODEV; 520 521 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 522 err = -EBUSY; 523 goto done; 524 } 525 526 cancel_work_sync(&hdev->power_on); 527 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 528 cancel_delayed_work(&hdev->power_off); 529 530 err = hci_dev_do_close(hdev); 531 532 done: 533 hci_dev_put(hdev); 534 return err; 535 } 536 537 static int hci_dev_do_reset(struct hci_dev *hdev) 538 { 539 int ret; 540 541 BT_DBG("%s %p", hdev->name, hdev); 542 543 hci_req_sync_lock(hdev); 544 545 /* Drop queues */ 546 skb_queue_purge(&hdev->rx_q); 547 skb_queue_purge(&hdev->cmd_q); 548 549 /* Cancel these to avoid queueing non-chained pending work */ 550 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); 551 /* Wait for 552 * 553 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 554 * queue_delayed_work(&hdev->{cmd,ncmd}_timer) 555 * 556 * inside RCU section to see the flag or complete scheduling. 557 */ 558 synchronize_rcu(); 559 /* Explicitly cancel works in case scheduled after setting the flag. */ 560 cancel_delayed_work(&hdev->cmd_timer); 561 cancel_delayed_work(&hdev->ncmd_timer); 562 563 /* Avoid potential lockdep warnings from the *_flush() calls by 564 * ensuring the workqueue is empty up front. 565 */ 566 drain_workqueue(hdev->workqueue); 567 568 hci_dev_lock(hdev); 569 hci_inquiry_cache_flush(hdev); 570 hci_conn_hash_flush(hdev); 571 hci_dev_unlock(hdev); 572 573 if (hdev->flush) 574 hdev->flush(hdev); 575 576 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); 577 578 atomic_set(&hdev->cmd_cnt, 1); 579 hdev->acl_cnt = 0; 580 hdev->sco_cnt = 0; 581 hdev->le_cnt = 0; 582 hdev->iso_cnt = 0; 583 584 ret = hci_reset_sync(hdev); 585 586 hci_req_sync_unlock(hdev); 587 return ret; 588 } 589 590 int hci_dev_reset(__u16 dev) 591 { 592 struct hci_dev *hdev; 593 int err; 594 595 hdev = hci_dev_get(dev); 596 if (!hdev) 597 return -ENODEV; 598 599 if (!test_bit(HCI_UP, &hdev->flags)) { 600 err = -ENETDOWN; 601 goto done; 602 } 603 604 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 605 err = -EBUSY; 606 goto done; 607 } 608 609 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 610 err = -EOPNOTSUPP; 611 goto done; 612 } 613 614 err = hci_dev_do_reset(hdev); 615 616 done: 617 hci_dev_put(hdev); 618 return err; 619 } 620 621 int hci_dev_reset_stat(__u16 dev) 622 { 623 struct hci_dev *hdev; 624 int ret = 0; 625 626 hdev = hci_dev_get(dev); 627 if (!hdev) 628 return -ENODEV; 629 630 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 631 ret = -EBUSY; 632 goto done; 633 } 634 635 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 636 ret = -EOPNOTSUPP; 637 goto done; 638 } 639 640 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 641 642 done: 643 hci_dev_put(hdev); 644 return ret; 645 } 646 647 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) 648 { 649 bool conn_changed, discov_changed; 650 651 BT_DBG("%s scan 0x%02x", hdev->name, scan); 652 653 if ((scan & SCAN_PAGE)) 654 conn_changed = !hci_dev_test_and_set_flag(hdev, 655 HCI_CONNECTABLE); 656 else 657 conn_changed = hci_dev_test_and_clear_flag(hdev, 658 HCI_CONNECTABLE); 659 660 if ((scan & SCAN_INQUIRY)) { 661 discov_changed = !hci_dev_test_and_set_flag(hdev, 662 HCI_DISCOVERABLE); 663 } else { 664 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 665 discov_changed = hci_dev_test_and_clear_flag(hdev, 666 HCI_DISCOVERABLE); 667 } 668 669 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 670 return; 671 672 if (conn_changed || discov_changed) { 673 /* In case this was disabled through mgmt */ 674 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 675 676 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 677 hci_update_adv_data(hdev, hdev->cur_adv_instance); 678 679 mgmt_new_settings(hdev); 680 } 681 } 682 683 int hci_dev_cmd(unsigned int cmd, void __user *arg) 684 { 685 struct hci_dev *hdev; 686 struct hci_dev_req dr; 687 __le16 policy; 688 int err = 0; 689 690 if (copy_from_user(&dr, arg, sizeof(dr))) 691 return -EFAULT; 692 693 hdev = hci_dev_get(dr.dev_id); 694 if (!hdev) 695 return -ENODEV; 696 697 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 698 err = -EBUSY; 699 goto done; 700 } 701 702 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 703 err = -EOPNOTSUPP; 704 goto done; 705 } 706 707 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 708 err = -EOPNOTSUPP; 709 goto done; 710 } 711 712 switch (cmd) { 713 case HCISETAUTH: 714 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 715 1, &dr.dev_opt, HCI_CMD_TIMEOUT); 716 break; 717 718 case HCISETENCRYPT: 719 if (!lmp_encrypt_capable(hdev)) { 720 err = -EOPNOTSUPP; 721 break; 722 } 723 724 if (!test_bit(HCI_AUTH, &hdev->flags)) { 725 /* Auth must be enabled first */ 726 err = __hci_cmd_sync_status(hdev, 727 HCI_OP_WRITE_AUTH_ENABLE, 728 1, &dr.dev_opt, 729 HCI_CMD_TIMEOUT); 730 if (err) 731 break; 732 } 733 734 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 735 1, &dr.dev_opt, 736 HCI_CMD_TIMEOUT); 737 break; 738 739 case HCISETSCAN: 740 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 741 1, &dr.dev_opt, 742 HCI_CMD_TIMEOUT); 743 744 /* Ensure that the connectable and discoverable states 745 * get correctly modified as this was a non-mgmt change. 746 */ 747 if (!err) 748 hci_update_passive_scan_state(hdev, dr.dev_opt); 749 break; 750 751 case HCISETLINKPOL: 752 policy = cpu_to_le16(dr.dev_opt); 753 754 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 755 2, &policy, 756 HCI_CMD_TIMEOUT); 757 break; 758 759 case HCISETLINKMODE: 760 hdev->link_mode = ((__u16) dr.dev_opt) & 761 (HCI_LM_MASTER | HCI_LM_ACCEPT); 762 break; 763 764 case HCISETPTYPE: 765 if (hdev->pkt_type == (__u16) dr.dev_opt) 766 break; 767 768 hdev->pkt_type = (__u16) dr.dev_opt; 769 mgmt_phy_configuration_changed(hdev, NULL); 770 break; 771 772 case HCISETACLMTU: 773 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 774 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 775 break; 776 777 case HCISETSCOMTU: 778 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 779 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 780 break; 781 782 default: 783 err = -EINVAL; 784 break; 785 } 786 787 done: 788 hci_dev_put(hdev); 789 return err; 790 } 791 792 int hci_get_dev_list(void __user *arg) 793 { 794 struct hci_dev *hdev; 795 struct hci_dev_list_req *dl; 796 struct hci_dev_req *dr; 797 int n = 0, size, err; 798 __u16 dev_num; 799 800 if (get_user(dev_num, (__u16 __user *) arg)) 801 return -EFAULT; 802 803 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 804 return -EINVAL; 805 806 size = sizeof(*dl) + dev_num * sizeof(*dr); 807 808 dl = kzalloc(size, GFP_KERNEL); 809 if (!dl) 810 return -ENOMEM; 811 812 dr = dl->dev_req; 813 814 read_lock(&hci_dev_list_lock); 815 list_for_each_entry(hdev, &hci_dev_list, list) { 816 unsigned long flags = hdev->flags; 817 818 /* When the auto-off is configured it means the transport 819 * is running, but in that case still indicate that the 820 * device is actually down. 821 */ 822 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 823 flags &= ~BIT(HCI_UP); 824 825 (dr + n)->dev_id = hdev->id; 826 (dr + n)->dev_opt = flags; 827 828 if (++n >= dev_num) 829 break; 830 } 831 read_unlock(&hci_dev_list_lock); 832 833 dl->dev_num = n; 834 size = sizeof(*dl) + n * sizeof(*dr); 835 836 err = copy_to_user(arg, dl, size); 837 kfree(dl); 838 839 return err ? -EFAULT : 0; 840 } 841 842 int hci_get_dev_info(void __user *arg) 843 { 844 struct hci_dev *hdev; 845 struct hci_dev_info di; 846 unsigned long flags; 847 int err = 0; 848 849 if (copy_from_user(&di, arg, sizeof(di))) 850 return -EFAULT; 851 852 hdev = hci_dev_get(di.dev_id); 853 if (!hdev) 854 return -ENODEV; 855 856 /* When the auto-off is configured it means the transport 857 * is running, but in that case still indicate that the 858 * device is actually down. 859 */ 860 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 861 flags = hdev->flags & ~BIT(HCI_UP); 862 else 863 flags = hdev->flags; 864 865 strscpy(di.name, hdev->name, sizeof(di.name)); 866 di.bdaddr = hdev->bdaddr; 867 di.type = (hdev->bus & 0x0f); 868 di.flags = flags; 869 di.pkt_type = hdev->pkt_type; 870 if (lmp_bredr_capable(hdev)) { 871 di.acl_mtu = hdev->acl_mtu; 872 di.acl_pkts = hdev->acl_pkts; 873 di.sco_mtu = hdev->sco_mtu; 874 di.sco_pkts = hdev->sco_pkts; 875 } else { 876 di.acl_mtu = hdev->le_mtu; 877 di.acl_pkts = hdev->le_pkts; 878 di.sco_mtu = 0; 879 di.sco_pkts = 0; 880 } 881 di.link_policy = hdev->link_policy; 882 di.link_mode = hdev->link_mode; 883 884 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 885 memcpy(&di.features, &hdev->features, sizeof(di.features)); 886 887 if (copy_to_user(arg, &di, sizeof(di))) 888 err = -EFAULT; 889 890 hci_dev_put(hdev); 891 892 return err; 893 } 894 895 /* ---- Interface to HCI drivers ---- */ 896 897 static int hci_rfkill_set_block(void *data, bool blocked) 898 { 899 struct hci_dev *hdev = data; 900 901 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 902 903 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 904 return -EBUSY; 905 906 if (blocked) { 907 hci_dev_set_flag(hdev, HCI_RFKILLED); 908 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 909 !hci_dev_test_flag(hdev, HCI_CONFIG)) 910 hci_dev_do_close(hdev); 911 } else { 912 hci_dev_clear_flag(hdev, HCI_RFKILLED); 913 } 914 915 return 0; 916 } 917 918 static const struct rfkill_ops hci_rfkill_ops = { 919 .set_block = hci_rfkill_set_block, 920 }; 921 922 static void hci_power_on(struct work_struct *work) 923 { 924 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 925 int err; 926 927 BT_DBG("%s", hdev->name); 928 929 if (test_bit(HCI_UP, &hdev->flags) && 930 hci_dev_test_flag(hdev, HCI_MGMT) && 931 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 932 cancel_delayed_work(&hdev->power_off); 933 err = hci_powered_update_sync(hdev); 934 mgmt_power_on(hdev, err); 935 return; 936 } 937 938 err = hci_dev_do_open(hdev); 939 if (err < 0) { 940 hci_dev_lock(hdev); 941 mgmt_set_powered_failed(hdev, err); 942 hci_dev_unlock(hdev); 943 return; 944 } 945 946 /* During the HCI setup phase, a few error conditions are 947 * ignored and they need to be checked now. If they are still 948 * valid, it is important to turn the device back off. 949 */ 950 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 951 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 952 (!bacmp(&hdev->bdaddr, BDADDR_ANY) && 953 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 954 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 955 hci_dev_do_close(hdev); 956 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 957 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 958 HCI_AUTO_OFF_TIMEOUT); 959 } 960 961 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 962 /* For unconfigured devices, set the HCI_RAW flag 963 * so that userspace can easily identify them. 964 */ 965 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 966 set_bit(HCI_RAW, &hdev->flags); 967 968 /* For fully configured devices, this will send 969 * the Index Added event. For unconfigured devices, 970 * it will send Unconfigued Index Added event. 971 * 972 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 973 * and no event will be send. 974 */ 975 mgmt_index_added(hdev); 976 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 977 /* When the controller is now configured, then it 978 * is important to clear the HCI_RAW flag. 979 */ 980 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 981 clear_bit(HCI_RAW, &hdev->flags); 982 983 /* Powering on the controller with HCI_CONFIG set only 984 * happens with the transition from unconfigured to 985 * configured. This will send the Index Added event. 986 */ 987 mgmt_index_added(hdev); 988 } 989 } 990 991 static void hci_power_off(struct work_struct *work) 992 { 993 struct hci_dev *hdev = container_of(work, struct hci_dev, 994 power_off.work); 995 996 BT_DBG("%s", hdev->name); 997 998 hci_dev_do_close(hdev); 999 } 1000 1001 static void hci_error_reset(struct work_struct *work) 1002 { 1003 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); 1004 1005 hci_dev_hold(hdev); 1006 BT_DBG("%s", hdev->name); 1007 1008 if (hdev->hw_error) 1009 hdev->hw_error(hdev, hdev->hw_error_code); 1010 else 1011 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); 1012 1013 if (!hci_dev_do_close(hdev)) 1014 hci_dev_do_open(hdev); 1015 1016 hci_dev_put(hdev); 1017 } 1018 1019 void hci_uuids_clear(struct hci_dev *hdev) 1020 { 1021 struct bt_uuid *uuid, *tmp; 1022 1023 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { 1024 list_del(&uuid->list); 1025 kfree(uuid); 1026 } 1027 } 1028 1029 void hci_link_keys_clear(struct hci_dev *hdev) 1030 { 1031 struct link_key *key, *tmp; 1032 1033 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { 1034 list_del_rcu(&key->list); 1035 kfree_rcu(key, rcu); 1036 } 1037 } 1038 1039 void hci_smp_ltks_clear(struct hci_dev *hdev) 1040 { 1041 struct smp_ltk *k, *tmp; 1042 1043 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1044 list_del_rcu(&k->list); 1045 kfree_rcu(k, rcu); 1046 } 1047 } 1048 1049 void hci_smp_irks_clear(struct hci_dev *hdev) 1050 { 1051 struct smp_irk *k, *tmp; 1052 1053 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { 1054 list_del_rcu(&k->list); 1055 kfree_rcu(k, rcu); 1056 } 1057 } 1058 1059 void hci_blocked_keys_clear(struct hci_dev *hdev) 1060 { 1061 struct blocked_key *b, *tmp; 1062 1063 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { 1064 list_del_rcu(&b->list); 1065 kfree_rcu(b, rcu); 1066 } 1067 } 1068 1069 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) 1070 { 1071 bool blocked = false; 1072 struct blocked_key *b; 1073 1074 rcu_read_lock(); 1075 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { 1076 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { 1077 blocked = true; 1078 break; 1079 } 1080 } 1081 1082 rcu_read_unlock(); 1083 return blocked; 1084 } 1085 1086 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1087 { 1088 struct link_key *k; 1089 1090 rcu_read_lock(); 1091 list_for_each_entry_rcu(k, &hdev->link_keys, list) { 1092 if (bacmp(bdaddr, &k->bdaddr) == 0) { 1093 rcu_read_unlock(); 1094 1095 if (hci_is_blocked_key(hdev, 1096 HCI_BLOCKED_KEY_TYPE_LINKKEY, 1097 k->val)) { 1098 bt_dev_warn_ratelimited(hdev, 1099 "Link key blocked for %pMR", 1100 &k->bdaddr); 1101 return NULL; 1102 } 1103 1104 return k; 1105 } 1106 } 1107 rcu_read_unlock(); 1108 1109 return NULL; 1110 } 1111 1112 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1113 u8 key_type, u8 old_key_type) 1114 { 1115 /* Legacy key */ 1116 if (key_type < 0x03) 1117 return true; 1118 1119 /* Debug keys are insecure so don't store them persistently */ 1120 if (key_type == HCI_LK_DEBUG_COMBINATION) 1121 return false; 1122 1123 /* Changed combination key and there's no previous one */ 1124 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1125 return false; 1126 1127 /* Security mode 3 case */ 1128 if (!conn) 1129 return true; 1130 1131 /* BR/EDR key derived using SC from an LE link */ 1132 if (conn->type == LE_LINK) 1133 return true; 1134 1135 /* Neither local nor remote side had no-bonding as requirement */ 1136 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1137 return true; 1138 1139 /* Local side had dedicated bonding as requirement */ 1140 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1141 return true; 1142 1143 /* Remote side had dedicated bonding as requirement */ 1144 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1145 return true; 1146 1147 /* If none of the above criteria match, then don't store the key 1148 * persistently */ 1149 return false; 1150 } 1151 1152 static u8 ltk_role(u8 type) 1153 { 1154 if (type == SMP_LTK) 1155 return HCI_ROLE_MASTER; 1156 1157 return HCI_ROLE_SLAVE; 1158 } 1159 1160 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1161 u8 addr_type, u8 role) 1162 { 1163 struct smp_ltk *k; 1164 1165 rcu_read_lock(); 1166 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1167 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) 1168 continue; 1169 1170 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { 1171 rcu_read_unlock(); 1172 1173 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, 1174 k->val)) { 1175 bt_dev_warn_ratelimited(hdev, 1176 "LTK blocked for %pMR", 1177 &k->bdaddr); 1178 return NULL; 1179 } 1180 1181 return k; 1182 } 1183 } 1184 rcu_read_unlock(); 1185 1186 return NULL; 1187 } 1188 1189 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) 1190 { 1191 struct smp_irk *irk_to_return = NULL; 1192 struct smp_irk *irk; 1193 1194 rcu_read_lock(); 1195 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1196 if (!bacmp(&irk->rpa, rpa)) { 1197 irk_to_return = irk; 1198 goto done; 1199 } 1200 } 1201 1202 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1203 if (smp_irk_matches(hdev, irk->val, rpa)) { 1204 bacpy(&irk->rpa, rpa); 1205 irk_to_return = irk; 1206 goto done; 1207 } 1208 } 1209 1210 done: 1211 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1212 irk_to_return->val)) { 1213 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1214 &irk_to_return->bdaddr); 1215 irk_to_return = NULL; 1216 } 1217 1218 rcu_read_unlock(); 1219 1220 return irk_to_return; 1221 } 1222 1223 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1224 u8 addr_type) 1225 { 1226 struct smp_irk *irk_to_return = NULL; 1227 struct smp_irk *irk; 1228 1229 /* Identity Address must be public or static random */ 1230 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) 1231 return NULL; 1232 1233 rcu_read_lock(); 1234 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1235 if (addr_type == irk->addr_type && 1236 bacmp(bdaddr, &irk->bdaddr) == 0) { 1237 irk_to_return = irk; 1238 goto done; 1239 } 1240 } 1241 1242 done: 1243 1244 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1245 irk_to_return->val)) { 1246 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1247 &irk_to_return->bdaddr); 1248 irk_to_return = NULL; 1249 } 1250 1251 rcu_read_unlock(); 1252 1253 return irk_to_return; 1254 } 1255 1256 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 1257 bdaddr_t *bdaddr, u8 *val, u8 type, 1258 u8 pin_len, bool *persistent) 1259 { 1260 struct link_key *key, *old_key; 1261 u8 old_key_type; 1262 1263 old_key = hci_find_link_key(hdev, bdaddr); 1264 if (old_key) { 1265 old_key_type = old_key->type; 1266 key = old_key; 1267 } else { 1268 old_key_type = conn ? conn->key_type : 0xff; 1269 key = kzalloc(sizeof(*key), GFP_KERNEL); 1270 if (!key) 1271 return NULL; 1272 list_add_rcu(&key->list, &hdev->link_keys); 1273 } 1274 1275 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); 1276 1277 /* Some buggy controller combinations generate a changed 1278 * combination key for legacy pairing even when there's no 1279 * previous key */ 1280 if (type == HCI_LK_CHANGED_COMBINATION && 1281 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { 1282 type = HCI_LK_COMBINATION; 1283 if (conn) 1284 conn->key_type = type; 1285 } 1286 1287 bacpy(&key->bdaddr, bdaddr); 1288 memcpy(key->val, val, HCI_LINK_KEY_SIZE); 1289 key->pin_len = pin_len; 1290 1291 if (type == HCI_LK_CHANGED_COMBINATION) 1292 key->type = old_key_type; 1293 else 1294 key->type = type; 1295 1296 if (persistent) 1297 *persistent = hci_persistent_key(hdev, conn, type, 1298 old_key_type); 1299 1300 return key; 1301 } 1302 1303 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1304 u8 addr_type, u8 type, u8 authenticated, 1305 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 1306 { 1307 struct smp_ltk *key, *old_key; 1308 u8 role = ltk_role(type); 1309 1310 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); 1311 if (old_key) 1312 key = old_key; 1313 else { 1314 key = kzalloc(sizeof(*key), GFP_KERNEL); 1315 if (!key) 1316 return NULL; 1317 list_add_rcu(&key->list, &hdev->long_term_keys); 1318 } 1319 1320 bacpy(&key->bdaddr, bdaddr); 1321 key->bdaddr_type = addr_type; 1322 memcpy(key->val, tk, sizeof(key->val)); 1323 key->authenticated = authenticated; 1324 key->ediv = ediv; 1325 key->rand = rand; 1326 key->enc_size = enc_size; 1327 key->type = type; 1328 1329 return key; 1330 } 1331 1332 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1333 u8 addr_type, u8 val[16], bdaddr_t *rpa) 1334 { 1335 struct smp_irk *irk; 1336 1337 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); 1338 if (!irk) { 1339 irk = kzalloc(sizeof(*irk), GFP_KERNEL); 1340 if (!irk) 1341 return NULL; 1342 1343 bacpy(&irk->bdaddr, bdaddr); 1344 irk->addr_type = addr_type; 1345 1346 list_add_rcu(&irk->list, &hdev->identity_resolving_keys); 1347 } 1348 1349 memcpy(irk->val, val, 16); 1350 bacpy(&irk->rpa, rpa); 1351 1352 return irk; 1353 } 1354 1355 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1356 { 1357 struct link_key *key; 1358 1359 key = hci_find_link_key(hdev, bdaddr); 1360 if (!key) 1361 return -ENOENT; 1362 1363 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1364 1365 list_del_rcu(&key->list); 1366 kfree_rcu(key, rcu); 1367 1368 return 0; 1369 } 1370 1371 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 1372 { 1373 struct smp_ltk *k, *tmp; 1374 int removed = 0; 1375 1376 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1377 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 1378 continue; 1379 1380 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1381 1382 list_del_rcu(&k->list); 1383 kfree_rcu(k, rcu); 1384 removed++; 1385 } 1386 1387 return removed ? 0 : -ENOENT; 1388 } 1389 1390 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 1391 { 1392 struct smp_irk *k, *tmp; 1393 1394 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { 1395 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 1396 continue; 1397 1398 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1399 1400 list_del_rcu(&k->list); 1401 kfree_rcu(k, rcu); 1402 } 1403 } 1404 1405 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 1406 { 1407 struct smp_ltk *k; 1408 struct smp_irk *irk; 1409 u8 addr_type; 1410 1411 if (type == BDADDR_BREDR) { 1412 if (hci_find_link_key(hdev, bdaddr)) 1413 return true; 1414 return false; 1415 } 1416 1417 /* Convert to HCI addr type which struct smp_ltk uses */ 1418 if (type == BDADDR_LE_PUBLIC) 1419 addr_type = ADDR_LE_DEV_PUBLIC; 1420 else 1421 addr_type = ADDR_LE_DEV_RANDOM; 1422 1423 irk = hci_get_irk(hdev, bdaddr, addr_type); 1424 if (irk) { 1425 bdaddr = &irk->bdaddr; 1426 addr_type = irk->addr_type; 1427 } 1428 1429 rcu_read_lock(); 1430 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1431 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { 1432 rcu_read_unlock(); 1433 return true; 1434 } 1435 } 1436 rcu_read_unlock(); 1437 1438 return false; 1439 } 1440 1441 /* HCI command timer function */ 1442 static void hci_cmd_timeout(struct work_struct *work) 1443 { 1444 struct hci_dev *hdev = container_of(work, struct hci_dev, 1445 cmd_timer.work); 1446 1447 if (hdev->req_skb) { 1448 u16 opcode = hci_skb_opcode(hdev->req_skb); 1449 1450 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); 1451 1452 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT); 1453 } else { 1454 bt_dev_err(hdev, "command tx timeout"); 1455 } 1456 1457 if (hdev->cmd_timeout) 1458 hdev->cmd_timeout(hdev); 1459 1460 atomic_set(&hdev->cmd_cnt, 1); 1461 queue_work(hdev->workqueue, &hdev->cmd_work); 1462 } 1463 1464 /* HCI ncmd timer function */ 1465 static void hci_ncmd_timeout(struct work_struct *work) 1466 { 1467 struct hci_dev *hdev = container_of(work, struct hci_dev, 1468 ncmd_timer.work); 1469 1470 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); 1471 1472 /* During HCI_INIT phase no events can be injected if the ncmd timer 1473 * triggers since the procedure has its own timeout handling. 1474 */ 1475 if (test_bit(HCI_INIT, &hdev->flags)) 1476 return; 1477 1478 /* This is an irrecoverable state, inject hardware error event */ 1479 hci_reset_dev(hdev); 1480 } 1481 1482 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1483 bdaddr_t *bdaddr, u8 bdaddr_type) 1484 { 1485 struct oob_data *data; 1486 1487 list_for_each_entry(data, &hdev->remote_oob_data, list) { 1488 if (bacmp(bdaddr, &data->bdaddr) != 0) 1489 continue; 1490 if (data->bdaddr_type != bdaddr_type) 1491 continue; 1492 return data; 1493 } 1494 1495 return NULL; 1496 } 1497 1498 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1499 u8 bdaddr_type) 1500 { 1501 struct oob_data *data; 1502 1503 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1504 if (!data) 1505 return -ENOENT; 1506 1507 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); 1508 1509 list_del(&data->list); 1510 kfree(data); 1511 1512 return 0; 1513 } 1514 1515 void hci_remote_oob_data_clear(struct hci_dev *hdev) 1516 { 1517 struct oob_data *data, *n; 1518 1519 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 1520 list_del(&data->list); 1521 kfree(data); 1522 } 1523 } 1524 1525 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1526 u8 bdaddr_type, u8 *hash192, u8 *rand192, 1527 u8 *hash256, u8 *rand256) 1528 { 1529 struct oob_data *data; 1530 1531 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1532 if (!data) { 1533 data = kmalloc(sizeof(*data), GFP_KERNEL); 1534 if (!data) 1535 return -ENOMEM; 1536 1537 bacpy(&data->bdaddr, bdaddr); 1538 data->bdaddr_type = bdaddr_type; 1539 list_add(&data->list, &hdev->remote_oob_data); 1540 } 1541 1542 if (hash192 && rand192) { 1543 memcpy(data->hash192, hash192, sizeof(data->hash192)); 1544 memcpy(data->rand192, rand192, sizeof(data->rand192)); 1545 if (hash256 && rand256) 1546 data->present = 0x03; 1547 } else { 1548 memset(data->hash192, 0, sizeof(data->hash192)); 1549 memset(data->rand192, 0, sizeof(data->rand192)); 1550 if (hash256 && rand256) 1551 data->present = 0x02; 1552 else 1553 data->present = 0x00; 1554 } 1555 1556 if (hash256 && rand256) { 1557 memcpy(data->hash256, hash256, sizeof(data->hash256)); 1558 memcpy(data->rand256, rand256, sizeof(data->rand256)); 1559 } else { 1560 memset(data->hash256, 0, sizeof(data->hash256)); 1561 memset(data->rand256, 0, sizeof(data->rand256)); 1562 if (hash192 && rand192) 1563 data->present = 0x01; 1564 } 1565 1566 BT_DBG("%s for %pMR", hdev->name, bdaddr); 1567 1568 return 0; 1569 } 1570 1571 /* This function requires the caller holds hdev->lock */ 1572 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) 1573 { 1574 struct adv_info *adv_instance; 1575 1576 list_for_each_entry(adv_instance, &hdev->adv_instances, list) { 1577 if (adv_instance->instance == instance) 1578 return adv_instance; 1579 } 1580 1581 return NULL; 1582 } 1583 1584 /* This function requires the caller holds hdev->lock */ 1585 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) 1586 { 1587 struct adv_info *cur_instance; 1588 1589 cur_instance = hci_find_adv_instance(hdev, instance); 1590 if (!cur_instance) 1591 return NULL; 1592 1593 if (cur_instance == list_last_entry(&hdev->adv_instances, 1594 struct adv_info, list)) 1595 return list_first_entry(&hdev->adv_instances, 1596 struct adv_info, list); 1597 else 1598 return list_next_entry(cur_instance, list); 1599 } 1600 1601 /* This function requires the caller holds hdev->lock */ 1602 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) 1603 { 1604 struct adv_info *adv_instance; 1605 1606 adv_instance = hci_find_adv_instance(hdev, instance); 1607 if (!adv_instance) 1608 return -ENOENT; 1609 1610 BT_DBG("%s removing %dMR", hdev->name, instance); 1611 1612 if (hdev->cur_adv_instance == instance) { 1613 if (hdev->adv_instance_timeout) { 1614 cancel_delayed_work(&hdev->adv_instance_expire); 1615 hdev->adv_instance_timeout = 0; 1616 } 1617 hdev->cur_adv_instance = 0x00; 1618 } 1619 1620 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1621 1622 list_del(&adv_instance->list); 1623 kfree(adv_instance); 1624 1625 hdev->adv_instance_cnt--; 1626 1627 return 0; 1628 } 1629 1630 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) 1631 { 1632 struct adv_info *adv_instance, *n; 1633 1634 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) 1635 adv_instance->rpa_expired = rpa_expired; 1636 } 1637 1638 /* This function requires the caller holds hdev->lock */ 1639 void hci_adv_instances_clear(struct hci_dev *hdev) 1640 { 1641 struct adv_info *adv_instance, *n; 1642 1643 if (hdev->adv_instance_timeout) { 1644 cancel_delayed_work(&hdev->adv_instance_expire); 1645 hdev->adv_instance_timeout = 0; 1646 } 1647 1648 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 1649 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1650 list_del(&adv_instance->list); 1651 kfree(adv_instance); 1652 } 1653 1654 hdev->adv_instance_cnt = 0; 1655 hdev->cur_adv_instance = 0x00; 1656 } 1657 1658 static void adv_instance_rpa_expired(struct work_struct *work) 1659 { 1660 struct adv_info *adv_instance = container_of(work, struct adv_info, 1661 rpa_expired_cb.work); 1662 1663 BT_DBG(""); 1664 1665 adv_instance->rpa_expired = true; 1666 } 1667 1668 /* This function requires the caller holds hdev->lock */ 1669 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, 1670 u32 flags, u16 adv_data_len, u8 *adv_data, 1671 u16 scan_rsp_len, u8 *scan_rsp_data, 1672 u16 timeout, u16 duration, s8 tx_power, 1673 u32 min_interval, u32 max_interval, 1674 u8 mesh_handle) 1675 { 1676 struct adv_info *adv; 1677 1678 adv = hci_find_adv_instance(hdev, instance); 1679 if (adv) { 1680 memset(adv->adv_data, 0, sizeof(adv->adv_data)); 1681 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); 1682 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); 1683 } else { 1684 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || 1685 instance < 1 || instance > hdev->le_num_of_adv_sets + 1) 1686 return ERR_PTR(-EOVERFLOW); 1687 1688 adv = kzalloc(sizeof(*adv), GFP_KERNEL); 1689 if (!adv) 1690 return ERR_PTR(-ENOMEM); 1691 1692 adv->pending = true; 1693 adv->instance = instance; 1694 list_add(&adv->list, &hdev->adv_instances); 1695 hdev->adv_instance_cnt++; 1696 } 1697 1698 adv->flags = flags; 1699 adv->min_interval = min_interval; 1700 adv->max_interval = max_interval; 1701 adv->tx_power = tx_power; 1702 /* Defining a mesh_handle changes the timing units to ms, 1703 * rather than seconds, and ties the instance to the requested 1704 * mesh_tx queue. 1705 */ 1706 adv->mesh = mesh_handle; 1707 1708 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, 1709 scan_rsp_len, scan_rsp_data); 1710 1711 adv->timeout = timeout; 1712 adv->remaining_time = timeout; 1713 1714 if (duration == 0) 1715 adv->duration = hdev->def_multi_adv_rotation_duration; 1716 else 1717 adv->duration = duration; 1718 1719 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); 1720 1721 BT_DBG("%s for %dMR", hdev->name, instance); 1722 1723 return adv; 1724 } 1725 1726 /* This function requires the caller holds hdev->lock */ 1727 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, 1728 u32 flags, u8 data_len, u8 *data, 1729 u32 min_interval, u32 max_interval) 1730 { 1731 struct adv_info *adv; 1732 1733 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, 1734 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, 1735 min_interval, max_interval, 0); 1736 if (IS_ERR(adv)) 1737 return adv; 1738 1739 adv->periodic = true; 1740 adv->per_adv_data_len = data_len; 1741 1742 if (data) 1743 memcpy(adv->per_adv_data, data, data_len); 1744 1745 return adv; 1746 } 1747 1748 /* This function requires the caller holds hdev->lock */ 1749 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, 1750 u16 adv_data_len, u8 *adv_data, 1751 u16 scan_rsp_len, u8 *scan_rsp_data) 1752 { 1753 struct adv_info *adv; 1754 1755 adv = hci_find_adv_instance(hdev, instance); 1756 1757 /* If advertisement doesn't exist, we can't modify its data */ 1758 if (!adv) 1759 return -ENOENT; 1760 1761 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { 1762 memset(adv->adv_data, 0, sizeof(adv->adv_data)); 1763 memcpy(adv->adv_data, adv_data, adv_data_len); 1764 adv->adv_data_len = adv_data_len; 1765 adv->adv_data_changed = true; 1766 } 1767 1768 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { 1769 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); 1770 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); 1771 adv->scan_rsp_len = scan_rsp_len; 1772 adv->scan_rsp_changed = true; 1773 } 1774 1775 /* Mark as changed if there are flags which would affect it */ 1776 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || 1777 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 1778 adv->scan_rsp_changed = true; 1779 1780 return 0; 1781 } 1782 1783 /* This function requires the caller holds hdev->lock */ 1784 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) 1785 { 1786 u32 flags; 1787 struct adv_info *adv; 1788 1789 if (instance == 0x00) { 1790 /* Instance 0 always manages the "Tx Power" and "Flags" 1791 * fields 1792 */ 1793 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 1794 1795 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting 1796 * corresponds to the "connectable" instance flag. 1797 */ 1798 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) 1799 flags |= MGMT_ADV_FLAG_CONNECTABLE; 1800 1801 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 1802 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; 1803 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 1804 flags |= MGMT_ADV_FLAG_DISCOV; 1805 1806 return flags; 1807 } 1808 1809 adv = hci_find_adv_instance(hdev, instance); 1810 1811 /* Return 0 when we got an invalid instance identifier. */ 1812 if (!adv) 1813 return 0; 1814 1815 return adv->flags; 1816 } 1817 1818 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) 1819 { 1820 struct adv_info *adv; 1821 1822 /* Instance 0x00 always set local name */ 1823 if (instance == 0x00) 1824 return true; 1825 1826 adv = hci_find_adv_instance(hdev, instance); 1827 if (!adv) 1828 return false; 1829 1830 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || 1831 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 1832 return true; 1833 1834 return adv->scan_rsp_len ? true : false; 1835 } 1836 1837 /* This function requires the caller holds hdev->lock */ 1838 void hci_adv_monitors_clear(struct hci_dev *hdev) 1839 { 1840 struct adv_monitor *monitor; 1841 int handle; 1842 1843 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) 1844 hci_free_adv_monitor(hdev, monitor); 1845 1846 idr_destroy(&hdev->adv_monitors_idr); 1847 } 1848 1849 /* Frees the monitor structure and do some bookkeepings. 1850 * This function requires the caller holds hdev->lock. 1851 */ 1852 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 1853 { 1854 struct adv_pattern *pattern; 1855 struct adv_pattern *tmp; 1856 1857 if (!monitor) 1858 return; 1859 1860 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { 1861 list_del(&pattern->list); 1862 kfree(pattern); 1863 } 1864 1865 if (monitor->handle) 1866 idr_remove(&hdev->adv_monitors_idr, monitor->handle); 1867 1868 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { 1869 hdev->adv_monitors_cnt--; 1870 mgmt_adv_monitor_removed(hdev, monitor->handle); 1871 } 1872 1873 kfree(monitor); 1874 } 1875 1876 /* Assigns handle to a monitor, and if offloading is supported and power is on, 1877 * also attempts to forward the request to the controller. 1878 * This function requires the caller holds hci_req_sync_lock. 1879 */ 1880 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 1881 { 1882 int min, max, handle; 1883 int status = 0; 1884 1885 if (!monitor) 1886 return -EINVAL; 1887 1888 hci_dev_lock(hdev); 1889 1890 min = HCI_MIN_ADV_MONITOR_HANDLE; 1891 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; 1892 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, 1893 GFP_KERNEL); 1894 1895 hci_dev_unlock(hdev); 1896 1897 if (handle < 0) 1898 return handle; 1899 1900 monitor->handle = handle; 1901 1902 if (!hdev_is_powered(hdev)) 1903 return status; 1904 1905 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1906 case HCI_ADV_MONITOR_EXT_NONE: 1907 bt_dev_dbg(hdev, "add monitor %d status %d", 1908 monitor->handle, status); 1909 /* Message was not forwarded to controller - not an error */ 1910 break; 1911 1912 case HCI_ADV_MONITOR_EXT_MSFT: 1913 status = msft_add_monitor_pattern(hdev, monitor); 1914 bt_dev_dbg(hdev, "add monitor %d msft status %d", 1915 handle, status); 1916 break; 1917 } 1918 1919 return status; 1920 } 1921 1922 /* Attempts to tell the controller and free the monitor. If somehow the 1923 * controller doesn't have a corresponding handle, remove anyway. 1924 * This function requires the caller holds hci_req_sync_lock. 1925 */ 1926 static int hci_remove_adv_monitor(struct hci_dev *hdev, 1927 struct adv_monitor *monitor) 1928 { 1929 int status = 0; 1930 int handle; 1931 1932 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1933 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ 1934 bt_dev_dbg(hdev, "remove monitor %d status %d", 1935 monitor->handle, status); 1936 goto free_monitor; 1937 1938 case HCI_ADV_MONITOR_EXT_MSFT: 1939 handle = monitor->handle; 1940 status = msft_remove_monitor(hdev, monitor); 1941 bt_dev_dbg(hdev, "remove monitor %d msft status %d", 1942 handle, status); 1943 break; 1944 } 1945 1946 /* In case no matching handle registered, just free the monitor */ 1947 if (status == -ENOENT) 1948 goto free_monitor; 1949 1950 return status; 1951 1952 free_monitor: 1953 if (status == -ENOENT) 1954 bt_dev_warn(hdev, "Removing monitor with no matching handle %d", 1955 monitor->handle); 1956 hci_free_adv_monitor(hdev, monitor); 1957 1958 return status; 1959 } 1960 1961 /* This function requires the caller holds hci_req_sync_lock */ 1962 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) 1963 { 1964 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); 1965 1966 if (!monitor) 1967 return -EINVAL; 1968 1969 return hci_remove_adv_monitor(hdev, monitor); 1970 } 1971 1972 /* This function requires the caller holds hci_req_sync_lock */ 1973 int hci_remove_all_adv_monitor(struct hci_dev *hdev) 1974 { 1975 struct adv_monitor *monitor; 1976 int idr_next_id = 0; 1977 int status = 0; 1978 1979 while (1) { 1980 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); 1981 if (!monitor) 1982 break; 1983 1984 status = hci_remove_adv_monitor(hdev, monitor); 1985 if (status) 1986 return status; 1987 1988 idr_next_id++; 1989 } 1990 1991 return status; 1992 } 1993 1994 /* This function requires the caller holds hdev->lock */ 1995 bool hci_is_adv_monitoring(struct hci_dev *hdev) 1996 { 1997 return !idr_is_empty(&hdev->adv_monitors_idr); 1998 } 1999 2000 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) 2001 { 2002 if (msft_monitor_supported(hdev)) 2003 return HCI_ADV_MONITOR_EXT_MSFT; 2004 2005 return HCI_ADV_MONITOR_EXT_NONE; 2006 } 2007 2008 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, 2009 bdaddr_t *bdaddr, u8 type) 2010 { 2011 struct bdaddr_list *b; 2012 2013 list_for_each_entry(b, bdaddr_list, list) { 2014 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2015 return b; 2016 } 2017 2018 return NULL; 2019 } 2020 2021 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 2022 struct list_head *bdaddr_list, bdaddr_t *bdaddr, 2023 u8 type) 2024 { 2025 struct bdaddr_list_with_irk *b; 2026 2027 list_for_each_entry(b, bdaddr_list, list) { 2028 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2029 return b; 2030 } 2031 2032 return NULL; 2033 } 2034 2035 struct bdaddr_list_with_flags * 2036 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, 2037 bdaddr_t *bdaddr, u8 type) 2038 { 2039 struct bdaddr_list_with_flags *b; 2040 2041 list_for_each_entry(b, bdaddr_list, list) { 2042 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2043 return b; 2044 } 2045 2046 return NULL; 2047 } 2048 2049 void hci_bdaddr_list_clear(struct list_head *bdaddr_list) 2050 { 2051 struct bdaddr_list *b, *n; 2052 2053 list_for_each_entry_safe(b, n, bdaddr_list, list) { 2054 list_del(&b->list); 2055 kfree(b); 2056 } 2057 } 2058 2059 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2060 { 2061 struct bdaddr_list *entry; 2062 2063 if (!bacmp(bdaddr, BDADDR_ANY)) 2064 return -EBADF; 2065 2066 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2067 return -EEXIST; 2068 2069 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2070 if (!entry) 2071 return -ENOMEM; 2072 2073 bacpy(&entry->bdaddr, bdaddr); 2074 entry->bdaddr_type = type; 2075 2076 list_add(&entry->list, list); 2077 2078 return 0; 2079 } 2080 2081 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2082 u8 type, u8 *peer_irk, u8 *local_irk) 2083 { 2084 struct bdaddr_list_with_irk *entry; 2085 2086 if (!bacmp(bdaddr, BDADDR_ANY)) 2087 return -EBADF; 2088 2089 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2090 return -EEXIST; 2091 2092 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2093 if (!entry) 2094 return -ENOMEM; 2095 2096 bacpy(&entry->bdaddr, bdaddr); 2097 entry->bdaddr_type = type; 2098 2099 if (peer_irk) 2100 memcpy(entry->peer_irk, peer_irk, 16); 2101 2102 if (local_irk) 2103 memcpy(entry->local_irk, local_irk, 16); 2104 2105 list_add(&entry->list, list); 2106 2107 return 0; 2108 } 2109 2110 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2111 u8 type, u32 flags) 2112 { 2113 struct bdaddr_list_with_flags *entry; 2114 2115 if (!bacmp(bdaddr, BDADDR_ANY)) 2116 return -EBADF; 2117 2118 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2119 return -EEXIST; 2120 2121 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2122 if (!entry) 2123 return -ENOMEM; 2124 2125 bacpy(&entry->bdaddr, bdaddr); 2126 entry->bdaddr_type = type; 2127 entry->flags = flags; 2128 2129 list_add(&entry->list, list); 2130 2131 return 0; 2132 } 2133 2134 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2135 { 2136 struct bdaddr_list *entry; 2137 2138 if (!bacmp(bdaddr, BDADDR_ANY)) { 2139 hci_bdaddr_list_clear(list); 2140 return 0; 2141 } 2142 2143 entry = hci_bdaddr_list_lookup(list, bdaddr, type); 2144 if (!entry) 2145 return -ENOENT; 2146 2147 list_del(&entry->list); 2148 kfree(entry); 2149 2150 return 0; 2151 } 2152 2153 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2154 u8 type) 2155 { 2156 struct bdaddr_list_with_irk *entry; 2157 2158 if (!bacmp(bdaddr, BDADDR_ANY)) { 2159 hci_bdaddr_list_clear(list); 2160 return 0; 2161 } 2162 2163 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); 2164 if (!entry) 2165 return -ENOENT; 2166 2167 list_del(&entry->list); 2168 kfree(entry); 2169 2170 return 0; 2171 } 2172 2173 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2174 u8 type) 2175 { 2176 struct bdaddr_list_with_flags *entry; 2177 2178 if (!bacmp(bdaddr, BDADDR_ANY)) { 2179 hci_bdaddr_list_clear(list); 2180 return 0; 2181 } 2182 2183 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); 2184 if (!entry) 2185 return -ENOENT; 2186 2187 list_del(&entry->list); 2188 kfree(entry); 2189 2190 return 0; 2191 } 2192 2193 /* This function requires the caller holds hdev->lock */ 2194 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 2195 bdaddr_t *addr, u8 addr_type) 2196 { 2197 struct hci_conn_params *params; 2198 2199 list_for_each_entry(params, &hdev->le_conn_params, list) { 2200 if (bacmp(¶ms->addr, addr) == 0 && 2201 params->addr_type == addr_type) { 2202 return params; 2203 } 2204 } 2205 2206 return NULL; 2207 } 2208 2209 /* This function requires the caller holds hdev->lock or rcu_read_lock */ 2210 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 2211 bdaddr_t *addr, u8 addr_type) 2212 { 2213 struct hci_conn_params *param; 2214 2215 rcu_read_lock(); 2216 2217 list_for_each_entry_rcu(param, list, action) { 2218 if (bacmp(¶m->addr, addr) == 0 && 2219 param->addr_type == addr_type) { 2220 rcu_read_unlock(); 2221 return param; 2222 } 2223 } 2224 2225 rcu_read_unlock(); 2226 2227 return NULL; 2228 } 2229 2230 /* This function requires the caller holds hdev->lock */ 2231 void hci_pend_le_list_del_init(struct hci_conn_params *param) 2232 { 2233 if (list_empty(¶m->action)) 2234 return; 2235 2236 list_del_rcu(¶m->action); 2237 synchronize_rcu(); 2238 INIT_LIST_HEAD(¶m->action); 2239 } 2240 2241 /* This function requires the caller holds hdev->lock */ 2242 void hci_pend_le_list_add(struct hci_conn_params *param, 2243 struct list_head *list) 2244 { 2245 list_add_rcu(¶m->action, list); 2246 } 2247 2248 /* This function requires the caller holds hdev->lock */ 2249 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 2250 bdaddr_t *addr, u8 addr_type) 2251 { 2252 struct hci_conn_params *params; 2253 2254 params = hci_conn_params_lookup(hdev, addr, addr_type); 2255 if (params) 2256 return params; 2257 2258 params = kzalloc(sizeof(*params), GFP_KERNEL); 2259 if (!params) { 2260 bt_dev_err(hdev, "out of memory"); 2261 return NULL; 2262 } 2263 2264 bacpy(¶ms->addr, addr); 2265 params->addr_type = addr_type; 2266 2267 list_add(¶ms->list, &hdev->le_conn_params); 2268 INIT_LIST_HEAD(¶ms->action); 2269 2270 params->conn_min_interval = hdev->le_conn_min_interval; 2271 params->conn_max_interval = hdev->le_conn_max_interval; 2272 params->conn_latency = hdev->le_conn_latency; 2273 params->supervision_timeout = hdev->le_supv_timeout; 2274 params->auto_connect = HCI_AUTO_CONN_DISABLED; 2275 2276 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2277 2278 return params; 2279 } 2280 2281 void hci_conn_params_free(struct hci_conn_params *params) 2282 { 2283 hci_pend_le_list_del_init(params); 2284 2285 if (params->conn) { 2286 hci_conn_drop(params->conn); 2287 hci_conn_put(params->conn); 2288 } 2289 2290 list_del(¶ms->list); 2291 kfree(params); 2292 } 2293 2294 /* This function requires the caller holds hdev->lock */ 2295 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 2296 { 2297 struct hci_conn_params *params; 2298 2299 params = hci_conn_params_lookup(hdev, addr, addr_type); 2300 if (!params) 2301 return; 2302 2303 hci_conn_params_free(params); 2304 2305 hci_update_passive_scan(hdev); 2306 2307 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2308 } 2309 2310 /* This function requires the caller holds hdev->lock */ 2311 void hci_conn_params_clear_disabled(struct hci_dev *hdev) 2312 { 2313 struct hci_conn_params *params, *tmp; 2314 2315 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 2316 if (params->auto_connect != HCI_AUTO_CONN_DISABLED) 2317 continue; 2318 2319 /* If trying to establish one time connection to disabled 2320 * device, leave the params, but mark them as just once. 2321 */ 2322 if (params->explicit_connect) { 2323 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 2324 continue; 2325 } 2326 2327 hci_conn_params_free(params); 2328 } 2329 2330 BT_DBG("All LE disabled connection parameters were removed"); 2331 } 2332 2333 /* This function requires the caller holds hdev->lock */ 2334 static void hci_conn_params_clear_all(struct hci_dev *hdev) 2335 { 2336 struct hci_conn_params *params, *tmp; 2337 2338 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 2339 hci_conn_params_free(params); 2340 2341 BT_DBG("All LE connection parameters were removed"); 2342 } 2343 2344 /* Copy the Identity Address of the controller. 2345 * 2346 * If the controller has a public BD_ADDR, then by default use that one. 2347 * If this is a LE only controller without a public address, default to 2348 * the static random address. 2349 * 2350 * For debugging purposes it is possible to force controllers with a 2351 * public address to use the static random address instead. 2352 * 2353 * In case BR/EDR has been disabled on a dual-mode controller and 2354 * userspace has configured a static address, then that address 2355 * becomes the identity address instead of the public BR/EDR address. 2356 */ 2357 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 2358 u8 *bdaddr_type) 2359 { 2360 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 2361 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 2362 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 2363 bacmp(&hdev->static_addr, BDADDR_ANY))) { 2364 bacpy(bdaddr, &hdev->static_addr); 2365 *bdaddr_type = ADDR_LE_DEV_RANDOM; 2366 } else { 2367 bacpy(bdaddr, &hdev->bdaddr); 2368 *bdaddr_type = ADDR_LE_DEV_PUBLIC; 2369 } 2370 } 2371 2372 static void hci_clear_wake_reason(struct hci_dev *hdev) 2373 { 2374 hci_dev_lock(hdev); 2375 2376 hdev->wake_reason = 0; 2377 bacpy(&hdev->wake_addr, BDADDR_ANY); 2378 hdev->wake_addr_type = 0; 2379 2380 hci_dev_unlock(hdev); 2381 } 2382 2383 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, 2384 void *data) 2385 { 2386 struct hci_dev *hdev = 2387 container_of(nb, struct hci_dev, suspend_notifier); 2388 int ret = 0; 2389 2390 /* Userspace has full control of this device. Do nothing. */ 2391 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 2392 return NOTIFY_DONE; 2393 2394 /* To avoid a potential race with hci_unregister_dev. */ 2395 hci_dev_hold(hdev); 2396 2397 if (action == PM_SUSPEND_PREPARE) 2398 ret = hci_suspend_dev(hdev); 2399 else if (action == PM_POST_SUSPEND) 2400 ret = hci_resume_dev(hdev); 2401 2402 if (ret) 2403 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", 2404 action, ret); 2405 2406 hci_dev_put(hdev); 2407 return NOTIFY_DONE; 2408 } 2409 2410 /* Alloc HCI device */ 2411 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) 2412 { 2413 struct hci_dev *hdev; 2414 unsigned int alloc_size; 2415 2416 alloc_size = sizeof(*hdev); 2417 if (sizeof_priv) { 2418 /* Fixme: May need ALIGN-ment? */ 2419 alloc_size += sizeof_priv; 2420 } 2421 2422 hdev = kzalloc(alloc_size, GFP_KERNEL); 2423 if (!hdev) 2424 return NULL; 2425 2426 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 2427 hdev->esco_type = (ESCO_HV1); 2428 hdev->link_mode = (HCI_LM_ACCEPT); 2429 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 2430 hdev->io_capability = 0x03; /* No Input No Output */ 2431 hdev->manufacturer = 0xffff; /* Default to internal use */ 2432 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 2433 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 2434 hdev->adv_instance_cnt = 0; 2435 hdev->cur_adv_instance = 0x00; 2436 hdev->adv_instance_timeout = 0; 2437 2438 hdev->advmon_allowlist_duration = 300; 2439 hdev->advmon_no_filter_duration = 500; 2440 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ 2441 2442 hdev->sniff_max_interval = 800; 2443 hdev->sniff_min_interval = 80; 2444 2445 hdev->le_adv_channel_map = 0x07; 2446 hdev->le_adv_min_interval = 0x0800; 2447 hdev->le_adv_max_interval = 0x0800; 2448 hdev->le_scan_interval = 0x0060; 2449 hdev->le_scan_window = 0x0030; 2450 hdev->le_scan_int_suspend = 0x0400; 2451 hdev->le_scan_window_suspend = 0x0012; 2452 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; 2453 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; 2454 hdev->le_scan_int_adv_monitor = 0x0060; 2455 hdev->le_scan_window_adv_monitor = 0x0030; 2456 hdev->le_scan_int_connect = 0x0060; 2457 hdev->le_scan_window_connect = 0x0060; 2458 hdev->le_conn_min_interval = 0x0018; 2459 hdev->le_conn_max_interval = 0x0028; 2460 hdev->le_conn_latency = 0x0000; 2461 hdev->le_supv_timeout = 0x002a; 2462 hdev->le_def_tx_len = 0x001b; 2463 hdev->le_def_tx_time = 0x0148; 2464 hdev->le_max_tx_len = 0x001b; 2465 hdev->le_max_tx_time = 0x0148; 2466 hdev->le_max_rx_len = 0x001b; 2467 hdev->le_max_rx_time = 0x0148; 2468 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; 2469 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; 2470 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 2471 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 2472 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; 2473 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; 2474 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; 2475 hdev->min_le_tx_power = HCI_TX_POWER_INVALID; 2476 hdev->max_le_tx_power = HCI_TX_POWER_INVALID; 2477 2478 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 2479 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 2480 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 2481 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 2482 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 2483 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; 2484 2485 /* default 1.28 sec page scan */ 2486 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; 2487 hdev->def_page_scan_int = 0x0800; 2488 hdev->def_page_scan_window = 0x0012; 2489 2490 mutex_init(&hdev->lock); 2491 mutex_init(&hdev->req_lock); 2492 2493 ida_init(&hdev->unset_handle_ida); 2494 2495 INIT_LIST_HEAD(&hdev->mesh_pending); 2496 INIT_LIST_HEAD(&hdev->mgmt_pending); 2497 INIT_LIST_HEAD(&hdev->reject_list); 2498 INIT_LIST_HEAD(&hdev->accept_list); 2499 INIT_LIST_HEAD(&hdev->uuids); 2500 INIT_LIST_HEAD(&hdev->link_keys); 2501 INIT_LIST_HEAD(&hdev->long_term_keys); 2502 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 2503 INIT_LIST_HEAD(&hdev->remote_oob_data); 2504 INIT_LIST_HEAD(&hdev->le_accept_list); 2505 INIT_LIST_HEAD(&hdev->le_resolv_list); 2506 INIT_LIST_HEAD(&hdev->le_conn_params); 2507 INIT_LIST_HEAD(&hdev->pend_le_conns); 2508 INIT_LIST_HEAD(&hdev->pend_le_reports); 2509 INIT_LIST_HEAD(&hdev->conn_hash.list); 2510 INIT_LIST_HEAD(&hdev->adv_instances); 2511 INIT_LIST_HEAD(&hdev->blocked_keys); 2512 INIT_LIST_HEAD(&hdev->monitored_devices); 2513 2514 INIT_LIST_HEAD(&hdev->local_codecs); 2515 INIT_WORK(&hdev->rx_work, hci_rx_work); 2516 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 2517 INIT_WORK(&hdev->tx_work, hci_tx_work); 2518 INIT_WORK(&hdev->power_on, hci_power_on); 2519 INIT_WORK(&hdev->error_reset, hci_error_reset); 2520 2521 hci_cmd_sync_init(hdev); 2522 2523 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 2524 2525 skb_queue_head_init(&hdev->rx_q); 2526 skb_queue_head_init(&hdev->cmd_q); 2527 skb_queue_head_init(&hdev->raw_q); 2528 2529 init_waitqueue_head(&hdev->req_wait_q); 2530 2531 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 2532 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); 2533 2534 hci_devcd_setup(hdev); 2535 hci_request_setup(hdev); 2536 2537 hci_init_sysfs(hdev); 2538 discovery_init(hdev); 2539 2540 return hdev; 2541 } 2542 EXPORT_SYMBOL(hci_alloc_dev_priv); 2543 2544 /* Free HCI device */ 2545 void hci_free_dev(struct hci_dev *hdev) 2546 { 2547 /* will free via device release */ 2548 put_device(&hdev->dev); 2549 } 2550 EXPORT_SYMBOL(hci_free_dev); 2551 2552 /* Register HCI device */ 2553 int hci_register_dev(struct hci_dev *hdev) 2554 { 2555 int id, error; 2556 2557 if (!hdev->open || !hdev->close || !hdev->send) 2558 return -EINVAL; 2559 2560 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL); 2561 if (id < 0) 2562 return id; 2563 2564 error = dev_set_name(&hdev->dev, "hci%u", id); 2565 if (error) 2566 return error; 2567 2568 hdev->name = dev_name(&hdev->dev); 2569 hdev->id = id; 2570 2571 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2572 2573 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); 2574 if (!hdev->workqueue) { 2575 error = -ENOMEM; 2576 goto err; 2577 } 2578 2579 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, 2580 hdev->name); 2581 if (!hdev->req_workqueue) { 2582 destroy_workqueue(hdev->workqueue); 2583 error = -ENOMEM; 2584 goto err; 2585 } 2586 2587 if (!IS_ERR_OR_NULL(bt_debugfs)) 2588 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 2589 2590 error = device_add(&hdev->dev); 2591 if (error < 0) 2592 goto err_wqueue; 2593 2594 hci_leds_init(hdev); 2595 2596 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 2597 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, 2598 hdev); 2599 if (hdev->rfkill) { 2600 if (rfkill_register(hdev->rfkill) < 0) { 2601 rfkill_destroy(hdev->rfkill); 2602 hdev->rfkill = NULL; 2603 } 2604 } 2605 2606 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 2607 hci_dev_set_flag(hdev, HCI_RFKILLED); 2608 2609 hci_dev_set_flag(hdev, HCI_SETUP); 2610 hci_dev_set_flag(hdev, HCI_AUTO_OFF); 2611 2612 /* Assume BR/EDR support until proven otherwise (such as 2613 * through reading supported features during init. 2614 */ 2615 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 2616 2617 write_lock(&hci_dev_list_lock); 2618 list_add(&hdev->list, &hci_dev_list); 2619 write_unlock(&hci_dev_list_lock); 2620 2621 /* Devices that are marked for raw-only usage are unconfigured 2622 * and should not be included in normal operation. 2623 */ 2624 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 2625 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 2626 2627 /* Mark Remote Wakeup connection flag as supported if driver has wakeup 2628 * callback. 2629 */ 2630 if (hdev->wakeup) 2631 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; 2632 2633 hci_sock_dev_event(hdev, HCI_DEV_REG); 2634 hci_dev_hold(hdev); 2635 2636 error = hci_register_suspend_notifier(hdev); 2637 if (error) 2638 BT_WARN("register suspend notifier failed error:%d\n", error); 2639 2640 queue_work(hdev->req_workqueue, &hdev->power_on); 2641 2642 idr_init(&hdev->adv_monitors_idr); 2643 msft_register(hdev); 2644 2645 return id; 2646 2647 err_wqueue: 2648 debugfs_remove_recursive(hdev->debugfs); 2649 destroy_workqueue(hdev->workqueue); 2650 destroy_workqueue(hdev->req_workqueue); 2651 err: 2652 ida_free(&hci_index_ida, hdev->id); 2653 2654 return error; 2655 } 2656 EXPORT_SYMBOL(hci_register_dev); 2657 2658 /* Unregister HCI device */ 2659 void hci_unregister_dev(struct hci_dev *hdev) 2660 { 2661 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2662 2663 mutex_lock(&hdev->unregister_lock); 2664 hci_dev_set_flag(hdev, HCI_UNREGISTER); 2665 mutex_unlock(&hdev->unregister_lock); 2666 2667 write_lock(&hci_dev_list_lock); 2668 list_del(&hdev->list); 2669 write_unlock(&hci_dev_list_lock); 2670 2671 cancel_work_sync(&hdev->rx_work); 2672 cancel_work_sync(&hdev->cmd_work); 2673 cancel_work_sync(&hdev->tx_work); 2674 cancel_work_sync(&hdev->power_on); 2675 cancel_work_sync(&hdev->error_reset); 2676 2677 hci_cmd_sync_clear(hdev); 2678 2679 hci_unregister_suspend_notifier(hdev); 2680 2681 hci_dev_do_close(hdev); 2682 2683 if (!test_bit(HCI_INIT, &hdev->flags) && 2684 !hci_dev_test_flag(hdev, HCI_SETUP) && 2685 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 2686 hci_dev_lock(hdev); 2687 mgmt_index_removed(hdev); 2688 hci_dev_unlock(hdev); 2689 } 2690 2691 /* mgmt_index_removed should take care of emptying the 2692 * pending list */ 2693 BUG_ON(!list_empty(&hdev->mgmt_pending)); 2694 2695 hci_sock_dev_event(hdev, HCI_DEV_UNREG); 2696 2697 if (hdev->rfkill) { 2698 rfkill_unregister(hdev->rfkill); 2699 rfkill_destroy(hdev->rfkill); 2700 } 2701 2702 device_del(&hdev->dev); 2703 /* Actual cleanup is deferred until hci_release_dev(). */ 2704 hci_dev_put(hdev); 2705 } 2706 EXPORT_SYMBOL(hci_unregister_dev); 2707 2708 /* Release HCI device */ 2709 void hci_release_dev(struct hci_dev *hdev) 2710 { 2711 debugfs_remove_recursive(hdev->debugfs); 2712 kfree_const(hdev->hw_info); 2713 kfree_const(hdev->fw_info); 2714 2715 destroy_workqueue(hdev->workqueue); 2716 destroy_workqueue(hdev->req_workqueue); 2717 2718 hci_dev_lock(hdev); 2719 hci_bdaddr_list_clear(&hdev->reject_list); 2720 hci_bdaddr_list_clear(&hdev->accept_list); 2721 hci_uuids_clear(hdev); 2722 hci_link_keys_clear(hdev); 2723 hci_smp_ltks_clear(hdev); 2724 hci_smp_irks_clear(hdev); 2725 hci_remote_oob_data_clear(hdev); 2726 hci_adv_instances_clear(hdev); 2727 hci_adv_monitors_clear(hdev); 2728 hci_bdaddr_list_clear(&hdev->le_accept_list); 2729 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2730 hci_conn_params_clear_all(hdev); 2731 hci_discovery_filter_clear(hdev); 2732 hci_blocked_keys_clear(hdev); 2733 hci_codec_list_clear(&hdev->local_codecs); 2734 msft_release(hdev); 2735 hci_dev_unlock(hdev); 2736 2737 ida_destroy(&hdev->unset_handle_ida); 2738 ida_free(&hci_index_ida, hdev->id); 2739 kfree_skb(hdev->sent_cmd); 2740 kfree_skb(hdev->req_skb); 2741 kfree_skb(hdev->recv_event); 2742 kfree(hdev); 2743 } 2744 EXPORT_SYMBOL(hci_release_dev); 2745 2746 int hci_register_suspend_notifier(struct hci_dev *hdev) 2747 { 2748 int ret = 0; 2749 2750 if (!hdev->suspend_notifier.notifier_call && 2751 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 2752 hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 2753 ret = register_pm_notifier(&hdev->suspend_notifier); 2754 } 2755 2756 return ret; 2757 } 2758 2759 int hci_unregister_suspend_notifier(struct hci_dev *hdev) 2760 { 2761 int ret = 0; 2762 2763 if (hdev->suspend_notifier.notifier_call) { 2764 ret = unregister_pm_notifier(&hdev->suspend_notifier); 2765 if (!ret) 2766 hdev->suspend_notifier.notifier_call = NULL; 2767 } 2768 2769 return ret; 2770 } 2771 2772 /* Cancel ongoing command synchronously: 2773 * 2774 * - Cancel command timer 2775 * - Reset command counter 2776 * - Cancel command request 2777 */ 2778 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err) 2779 { 2780 bt_dev_dbg(hdev, "err 0x%2.2x", err); 2781 2782 cancel_delayed_work_sync(&hdev->cmd_timer); 2783 cancel_delayed_work_sync(&hdev->ncmd_timer); 2784 atomic_set(&hdev->cmd_cnt, 1); 2785 2786 hci_cmd_sync_cancel_sync(hdev, err); 2787 } 2788 2789 /* Suspend HCI device */ 2790 int hci_suspend_dev(struct hci_dev *hdev) 2791 { 2792 int ret; 2793 2794 bt_dev_dbg(hdev, ""); 2795 2796 /* Suspend should only act on when powered. */ 2797 if (!hdev_is_powered(hdev) || 2798 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2799 return 0; 2800 2801 /* If powering down don't attempt to suspend */ 2802 if (mgmt_powering_down(hdev)) 2803 return 0; 2804 2805 /* Cancel potentially blocking sync operation before suspend */ 2806 hci_cancel_cmd_sync(hdev, EHOSTDOWN); 2807 2808 hci_req_sync_lock(hdev); 2809 ret = hci_suspend_sync(hdev); 2810 hci_req_sync_unlock(hdev); 2811 2812 hci_clear_wake_reason(hdev); 2813 mgmt_suspending(hdev, hdev->suspend_state); 2814 2815 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); 2816 return ret; 2817 } 2818 EXPORT_SYMBOL(hci_suspend_dev); 2819 2820 /* Resume HCI device */ 2821 int hci_resume_dev(struct hci_dev *hdev) 2822 { 2823 int ret; 2824 2825 bt_dev_dbg(hdev, ""); 2826 2827 /* Resume should only act on when powered. */ 2828 if (!hdev_is_powered(hdev) || 2829 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2830 return 0; 2831 2832 /* If powering down don't attempt to resume */ 2833 if (mgmt_powering_down(hdev)) 2834 return 0; 2835 2836 hci_req_sync_lock(hdev); 2837 ret = hci_resume_sync(hdev); 2838 hci_req_sync_unlock(hdev); 2839 2840 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, 2841 hdev->wake_addr_type); 2842 2843 hci_sock_dev_event(hdev, HCI_DEV_RESUME); 2844 return ret; 2845 } 2846 EXPORT_SYMBOL(hci_resume_dev); 2847 2848 /* Reset HCI device */ 2849 int hci_reset_dev(struct hci_dev *hdev) 2850 { 2851 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; 2852 struct sk_buff *skb; 2853 2854 skb = bt_skb_alloc(3, GFP_ATOMIC); 2855 if (!skb) 2856 return -ENOMEM; 2857 2858 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 2859 skb_put_data(skb, hw_err, 3); 2860 2861 bt_dev_err(hdev, "Injecting HCI hardware error event"); 2862 2863 /* Send Hardware Error to upper stack */ 2864 return hci_recv_frame(hdev, skb); 2865 } 2866 EXPORT_SYMBOL(hci_reset_dev); 2867 2868 /* Receive frame from HCI drivers */ 2869 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 2870 { 2871 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 2872 && !test_bit(HCI_INIT, &hdev->flags))) { 2873 kfree_skb(skb); 2874 return -ENXIO; 2875 } 2876 2877 switch (hci_skb_pkt_type(skb)) { 2878 case HCI_EVENT_PKT: 2879 break; 2880 case HCI_ACLDATA_PKT: 2881 /* Detect if ISO packet has been sent as ACL */ 2882 if (hci_conn_num(hdev, ISO_LINK)) { 2883 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); 2884 __u8 type; 2885 2886 type = hci_conn_lookup_type(hdev, hci_handle(handle)); 2887 if (type == ISO_LINK) 2888 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 2889 } 2890 break; 2891 case HCI_SCODATA_PKT: 2892 break; 2893 case HCI_ISODATA_PKT: 2894 break; 2895 default: 2896 kfree_skb(skb); 2897 return -EINVAL; 2898 } 2899 2900 /* Incoming skb */ 2901 bt_cb(skb)->incoming = 1; 2902 2903 /* Time stamp */ 2904 __net_timestamp(skb); 2905 2906 skb_queue_tail(&hdev->rx_q, skb); 2907 queue_work(hdev->workqueue, &hdev->rx_work); 2908 2909 return 0; 2910 } 2911 EXPORT_SYMBOL(hci_recv_frame); 2912 2913 /* Receive diagnostic message from HCI drivers */ 2914 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 2915 { 2916 /* Mark as diagnostic packet */ 2917 hci_skb_pkt_type(skb) = HCI_DIAG_PKT; 2918 2919 /* Time stamp */ 2920 __net_timestamp(skb); 2921 2922 skb_queue_tail(&hdev->rx_q, skb); 2923 queue_work(hdev->workqueue, &hdev->rx_work); 2924 2925 return 0; 2926 } 2927 EXPORT_SYMBOL(hci_recv_diag); 2928 2929 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) 2930 { 2931 va_list vargs; 2932 2933 va_start(vargs, fmt); 2934 kfree_const(hdev->hw_info); 2935 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2936 va_end(vargs); 2937 } 2938 EXPORT_SYMBOL(hci_set_hw_info); 2939 2940 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) 2941 { 2942 va_list vargs; 2943 2944 va_start(vargs, fmt); 2945 kfree_const(hdev->fw_info); 2946 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2947 va_end(vargs); 2948 } 2949 EXPORT_SYMBOL(hci_set_fw_info); 2950 2951 /* ---- Interface to upper protocols ---- */ 2952 2953 int hci_register_cb(struct hci_cb *cb) 2954 { 2955 BT_DBG("%p name %s", cb, cb->name); 2956 2957 mutex_lock(&hci_cb_list_lock); 2958 list_add_tail(&cb->list, &hci_cb_list); 2959 mutex_unlock(&hci_cb_list_lock); 2960 2961 return 0; 2962 } 2963 EXPORT_SYMBOL(hci_register_cb); 2964 2965 int hci_unregister_cb(struct hci_cb *cb) 2966 { 2967 BT_DBG("%p name %s", cb, cb->name); 2968 2969 mutex_lock(&hci_cb_list_lock); 2970 list_del(&cb->list); 2971 mutex_unlock(&hci_cb_list_lock); 2972 2973 return 0; 2974 } 2975 EXPORT_SYMBOL(hci_unregister_cb); 2976 2977 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 2978 { 2979 int err; 2980 2981 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), 2982 skb->len); 2983 2984 /* Time stamp */ 2985 __net_timestamp(skb); 2986 2987 /* Send copy to monitor */ 2988 hci_send_to_monitor(hdev, skb); 2989 2990 if (atomic_read(&hdev->promisc)) { 2991 /* Send copy to the sockets */ 2992 hci_send_to_sock(hdev, skb); 2993 } 2994 2995 /* Get rid of skb owner, prior to sending to the driver. */ 2996 skb_orphan(skb); 2997 2998 if (!test_bit(HCI_RUNNING, &hdev->flags)) { 2999 kfree_skb(skb); 3000 return -EINVAL; 3001 } 3002 3003 err = hdev->send(hdev, skb); 3004 if (err < 0) { 3005 bt_dev_err(hdev, "sending frame failed (%d)", err); 3006 kfree_skb(skb); 3007 return err; 3008 } 3009 3010 return 0; 3011 } 3012 3013 /* Send HCI command */ 3014 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 3015 const void *param) 3016 { 3017 struct sk_buff *skb; 3018 3019 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 3020 3021 skb = hci_prepare_cmd(hdev, opcode, plen, param); 3022 if (!skb) { 3023 bt_dev_err(hdev, "no memory for command"); 3024 return -ENOMEM; 3025 } 3026 3027 /* Stand-alone HCI commands must be flagged as 3028 * single-command requests. 3029 */ 3030 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 3031 3032 skb_queue_tail(&hdev->cmd_q, skb); 3033 queue_work(hdev->workqueue, &hdev->cmd_work); 3034 3035 return 0; 3036 } 3037 3038 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 3039 const void *param) 3040 { 3041 struct sk_buff *skb; 3042 3043 if (hci_opcode_ogf(opcode) != 0x3f) { 3044 /* A controller receiving a command shall respond with either 3045 * a Command Status Event or a Command Complete Event. 3046 * Therefore, all standard HCI commands must be sent via the 3047 * standard API, using hci_send_cmd or hci_cmd_sync helpers. 3048 * Some vendors do not comply with this rule for vendor-specific 3049 * commands and do not return any event. We want to support 3050 * unresponded commands for such cases only. 3051 */ 3052 bt_dev_err(hdev, "unresponded command not supported"); 3053 return -EINVAL; 3054 } 3055 3056 skb = hci_prepare_cmd(hdev, opcode, plen, param); 3057 if (!skb) { 3058 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 3059 opcode); 3060 return -ENOMEM; 3061 } 3062 3063 hci_send_frame(hdev, skb); 3064 3065 return 0; 3066 } 3067 EXPORT_SYMBOL(__hci_cmd_send); 3068 3069 /* Get data from the previously sent command */ 3070 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode) 3071 { 3072 struct hci_command_hdr *hdr; 3073 3074 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE) 3075 return NULL; 3076 3077 hdr = (void *)skb->data; 3078 3079 if (hdr->opcode != cpu_to_le16(opcode)) 3080 return NULL; 3081 3082 return skb->data + HCI_COMMAND_HDR_SIZE; 3083 } 3084 3085 /* Get data from the previously sent command */ 3086 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 3087 { 3088 void *data; 3089 3090 /* Check if opcode matches last sent command */ 3091 data = hci_cmd_data(hdev->sent_cmd, opcode); 3092 if (!data) 3093 /* Check if opcode matches last request */ 3094 data = hci_cmd_data(hdev->req_skb, opcode); 3095 3096 return data; 3097 } 3098 3099 /* Get data from last received event */ 3100 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) 3101 { 3102 struct hci_event_hdr *hdr; 3103 int offset; 3104 3105 if (!hdev->recv_event) 3106 return NULL; 3107 3108 hdr = (void *)hdev->recv_event->data; 3109 offset = sizeof(*hdr); 3110 3111 if (hdr->evt != event) { 3112 /* In case of LE metaevent check the subevent match */ 3113 if (hdr->evt == HCI_EV_LE_META) { 3114 struct hci_ev_le_meta *ev; 3115 3116 ev = (void *)hdev->recv_event->data + offset; 3117 offset += sizeof(*ev); 3118 if (ev->subevent == event) 3119 goto found; 3120 } 3121 return NULL; 3122 } 3123 3124 found: 3125 bt_dev_dbg(hdev, "event 0x%2.2x", event); 3126 3127 return hdev->recv_event->data + offset; 3128 } 3129 3130 /* Send ACL data */ 3131 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 3132 { 3133 struct hci_acl_hdr *hdr; 3134 int len = skb->len; 3135 3136 skb_push(skb, HCI_ACL_HDR_SIZE); 3137 skb_reset_transport_header(skb); 3138 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 3139 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3140 hdr->dlen = cpu_to_le16(len); 3141 } 3142 3143 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, 3144 struct sk_buff *skb, __u16 flags) 3145 { 3146 struct hci_conn *conn = chan->conn; 3147 struct hci_dev *hdev = conn->hdev; 3148 struct sk_buff *list; 3149 3150 skb->len = skb_headlen(skb); 3151 skb->data_len = 0; 3152 3153 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3154 3155 hci_add_acl_hdr(skb, conn->handle, flags); 3156 3157 list = skb_shinfo(skb)->frag_list; 3158 if (!list) { 3159 /* Non fragmented */ 3160 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3161 3162 skb_queue_tail(queue, skb); 3163 } else { 3164 /* Fragmented */ 3165 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3166 3167 skb_shinfo(skb)->frag_list = NULL; 3168 3169 /* Queue all fragments atomically. We need to use spin_lock_bh 3170 * here because of 6LoWPAN links, as there this function is 3171 * called from softirq and using normal spin lock could cause 3172 * deadlocks. 3173 */ 3174 spin_lock_bh(&queue->lock); 3175 3176 __skb_queue_tail(queue, skb); 3177 3178 flags &= ~ACL_START; 3179 flags |= ACL_CONT; 3180 do { 3181 skb = list; list = list->next; 3182 3183 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3184 hci_add_acl_hdr(skb, conn->handle, flags); 3185 3186 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3187 3188 __skb_queue_tail(queue, skb); 3189 } while (list); 3190 3191 spin_unlock_bh(&queue->lock); 3192 } 3193 } 3194 3195 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 3196 { 3197 struct hci_dev *hdev = chan->conn->hdev; 3198 3199 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 3200 3201 hci_queue_acl(chan, &chan->data_q, skb, flags); 3202 3203 queue_work(hdev->workqueue, &hdev->tx_work); 3204 } 3205 3206 /* Send SCO data */ 3207 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 3208 { 3209 struct hci_dev *hdev = conn->hdev; 3210 struct hci_sco_hdr hdr; 3211 3212 BT_DBG("%s len %d", hdev->name, skb->len); 3213 3214 hdr.handle = cpu_to_le16(conn->handle); 3215 hdr.dlen = skb->len; 3216 3217 skb_push(skb, HCI_SCO_HDR_SIZE); 3218 skb_reset_transport_header(skb); 3219 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 3220 3221 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; 3222 3223 skb_queue_tail(&conn->data_q, skb); 3224 queue_work(hdev->workqueue, &hdev->tx_work); 3225 } 3226 3227 /* Send ISO data */ 3228 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) 3229 { 3230 struct hci_iso_hdr *hdr; 3231 int len = skb->len; 3232 3233 skb_push(skb, HCI_ISO_HDR_SIZE); 3234 skb_reset_transport_header(skb); 3235 hdr = (struct hci_iso_hdr *)skb_transport_header(skb); 3236 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3237 hdr->dlen = cpu_to_le16(len); 3238 } 3239 3240 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, 3241 struct sk_buff *skb) 3242 { 3243 struct hci_dev *hdev = conn->hdev; 3244 struct sk_buff *list; 3245 __u16 flags; 3246 3247 skb->len = skb_headlen(skb); 3248 skb->data_len = 0; 3249 3250 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 3251 3252 list = skb_shinfo(skb)->frag_list; 3253 3254 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); 3255 hci_add_iso_hdr(skb, conn->handle, flags); 3256 3257 if (!list) { 3258 /* Non fragmented */ 3259 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3260 3261 skb_queue_tail(queue, skb); 3262 } else { 3263 /* Fragmented */ 3264 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3265 3266 skb_shinfo(skb)->frag_list = NULL; 3267 3268 __skb_queue_tail(queue, skb); 3269 3270 do { 3271 skb = list; list = list->next; 3272 3273 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 3274 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, 3275 0x00); 3276 hci_add_iso_hdr(skb, conn->handle, flags); 3277 3278 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3279 3280 __skb_queue_tail(queue, skb); 3281 } while (list); 3282 } 3283 } 3284 3285 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) 3286 { 3287 struct hci_dev *hdev = conn->hdev; 3288 3289 BT_DBG("%s len %d", hdev->name, skb->len); 3290 3291 hci_queue_iso(conn, &conn->data_q, skb); 3292 3293 queue_work(hdev->workqueue, &hdev->tx_work); 3294 } 3295 3296 /* ---- HCI TX task (outgoing data) ---- */ 3297 3298 /* HCI Connection scheduler */ 3299 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) 3300 { 3301 struct hci_dev *hdev; 3302 int cnt, q; 3303 3304 if (!conn) { 3305 *quote = 0; 3306 return; 3307 } 3308 3309 hdev = conn->hdev; 3310 3311 switch (conn->type) { 3312 case ACL_LINK: 3313 cnt = hdev->acl_cnt; 3314 break; 3315 case SCO_LINK: 3316 case ESCO_LINK: 3317 cnt = hdev->sco_cnt; 3318 break; 3319 case LE_LINK: 3320 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3321 break; 3322 case ISO_LINK: 3323 cnt = hdev->iso_mtu ? hdev->iso_cnt : 3324 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3325 break; 3326 default: 3327 cnt = 0; 3328 bt_dev_err(hdev, "unknown link type %d", conn->type); 3329 } 3330 3331 q = cnt / num; 3332 *quote = q ? q : 1; 3333 } 3334 3335 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, 3336 int *quote) 3337 { 3338 struct hci_conn_hash *h = &hdev->conn_hash; 3339 struct hci_conn *conn = NULL, *c; 3340 unsigned int num = 0, min = ~0; 3341 3342 /* We don't have to lock device here. Connections are always 3343 * added and removed with TX task disabled. */ 3344 3345 rcu_read_lock(); 3346 3347 list_for_each_entry_rcu(c, &h->list, list) { 3348 if (c->type != type || skb_queue_empty(&c->data_q)) 3349 continue; 3350 3351 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 3352 continue; 3353 3354 num++; 3355 3356 if (c->sent < min) { 3357 min = c->sent; 3358 conn = c; 3359 } 3360 3361 if (hci_conn_num(hdev, type) == num) 3362 break; 3363 } 3364 3365 rcu_read_unlock(); 3366 3367 hci_quote_sent(conn, num, quote); 3368 3369 BT_DBG("conn %p quote %d", conn, *quote); 3370 return conn; 3371 } 3372 3373 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 3374 { 3375 struct hci_conn_hash *h = &hdev->conn_hash; 3376 struct hci_conn *c; 3377 3378 bt_dev_err(hdev, "link tx timeout"); 3379 3380 rcu_read_lock(); 3381 3382 /* Kill stalled connections */ 3383 list_for_each_entry_rcu(c, &h->list, list) { 3384 if (c->type == type && c->sent) { 3385 bt_dev_err(hdev, "killing stalled connection %pMR", 3386 &c->dst); 3387 /* hci_disconnect might sleep, so, we have to release 3388 * the RCU read lock before calling it. 3389 */ 3390 rcu_read_unlock(); 3391 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); 3392 rcu_read_lock(); 3393 } 3394 } 3395 3396 rcu_read_unlock(); 3397 } 3398 3399 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 3400 int *quote) 3401 { 3402 struct hci_conn_hash *h = &hdev->conn_hash; 3403 struct hci_chan *chan = NULL; 3404 unsigned int num = 0, min = ~0, cur_prio = 0; 3405 struct hci_conn *conn; 3406 int conn_num = 0; 3407 3408 BT_DBG("%s", hdev->name); 3409 3410 rcu_read_lock(); 3411 3412 list_for_each_entry_rcu(conn, &h->list, list) { 3413 struct hci_chan *tmp; 3414 3415 if (conn->type != type) 3416 continue; 3417 3418 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3419 continue; 3420 3421 conn_num++; 3422 3423 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 3424 struct sk_buff *skb; 3425 3426 if (skb_queue_empty(&tmp->data_q)) 3427 continue; 3428 3429 skb = skb_peek(&tmp->data_q); 3430 if (skb->priority < cur_prio) 3431 continue; 3432 3433 if (skb->priority > cur_prio) { 3434 num = 0; 3435 min = ~0; 3436 cur_prio = skb->priority; 3437 } 3438 3439 num++; 3440 3441 if (conn->sent < min) { 3442 min = conn->sent; 3443 chan = tmp; 3444 } 3445 } 3446 3447 if (hci_conn_num(hdev, type) == conn_num) 3448 break; 3449 } 3450 3451 rcu_read_unlock(); 3452 3453 if (!chan) 3454 return NULL; 3455 3456 hci_quote_sent(chan->conn, num, quote); 3457 3458 BT_DBG("chan %p quote %d", chan, *quote); 3459 return chan; 3460 } 3461 3462 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 3463 { 3464 struct hci_conn_hash *h = &hdev->conn_hash; 3465 struct hci_conn *conn; 3466 int num = 0; 3467 3468 BT_DBG("%s", hdev->name); 3469 3470 rcu_read_lock(); 3471 3472 list_for_each_entry_rcu(conn, &h->list, list) { 3473 struct hci_chan *chan; 3474 3475 if (conn->type != type) 3476 continue; 3477 3478 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3479 continue; 3480 3481 num++; 3482 3483 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 3484 struct sk_buff *skb; 3485 3486 if (chan->sent) { 3487 chan->sent = 0; 3488 continue; 3489 } 3490 3491 if (skb_queue_empty(&chan->data_q)) 3492 continue; 3493 3494 skb = skb_peek(&chan->data_q); 3495 if (skb->priority >= HCI_PRIO_MAX - 1) 3496 continue; 3497 3498 skb->priority = HCI_PRIO_MAX - 1; 3499 3500 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 3501 skb->priority); 3502 } 3503 3504 if (hci_conn_num(hdev, type) == num) 3505 break; 3506 } 3507 3508 rcu_read_unlock(); 3509 3510 } 3511 3512 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) 3513 { 3514 unsigned long last_tx; 3515 3516 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 3517 return; 3518 3519 switch (type) { 3520 case LE_LINK: 3521 last_tx = hdev->le_last_tx; 3522 break; 3523 default: 3524 last_tx = hdev->acl_last_tx; 3525 break; 3526 } 3527 3528 /* tx timeout must be longer than maximum link supervision timeout 3529 * (40.9 seconds) 3530 */ 3531 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) 3532 hci_link_tx_to(hdev, type); 3533 } 3534 3535 /* Schedule SCO */ 3536 static void hci_sched_sco(struct hci_dev *hdev) 3537 { 3538 struct hci_conn *conn; 3539 struct sk_buff *skb; 3540 int quote; 3541 3542 BT_DBG("%s", hdev->name); 3543 3544 if (!hci_conn_num(hdev, SCO_LINK)) 3545 return; 3546 3547 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 3548 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3549 BT_DBG("skb %p len %d", skb, skb->len); 3550 hci_send_frame(hdev, skb); 3551 3552 conn->sent++; 3553 if (conn->sent == ~0) 3554 conn->sent = 0; 3555 } 3556 } 3557 } 3558 3559 static void hci_sched_esco(struct hci_dev *hdev) 3560 { 3561 struct hci_conn *conn; 3562 struct sk_buff *skb; 3563 int quote; 3564 3565 BT_DBG("%s", hdev->name); 3566 3567 if (!hci_conn_num(hdev, ESCO_LINK)) 3568 return; 3569 3570 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, 3571 "e))) { 3572 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3573 BT_DBG("skb %p len %d", skb, skb->len); 3574 hci_send_frame(hdev, skb); 3575 3576 conn->sent++; 3577 if (conn->sent == ~0) 3578 conn->sent = 0; 3579 } 3580 } 3581 } 3582 3583 static void hci_sched_acl_pkt(struct hci_dev *hdev) 3584 { 3585 unsigned int cnt = hdev->acl_cnt; 3586 struct hci_chan *chan; 3587 struct sk_buff *skb; 3588 int quote; 3589 3590 __check_timeout(hdev, cnt, ACL_LINK); 3591 3592 while (hdev->acl_cnt && 3593 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 3594 u32 priority = (skb_peek(&chan->data_q))->priority; 3595 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3596 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3597 skb->len, skb->priority); 3598 3599 /* Stop if priority has changed */ 3600 if (skb->priority < priority) 3601 break; 3602 3603 skb = skb_dequeue(&chan->data_q); 3604 3605 hci_conn_enter_active_mode(chan->conn, 3606 bt_cb(skb)->force_active); 3607 3608 hci_send_frame(hdev, skb); 3609 hdev->acl_last_tx = jiffies; 3610 3611 hdev->acl_cnt--; 3612 chan->sent++; 3613 chan->conn->sent++; 3614 3615 /* Send pending SCO packets right away */ 3616 hci_sched_sco(hdev); 3617 hci_sched_esco(hdev); 3618 } 3619 } 3620 3621 if (cnt != hdev->acl_cnt) 3622 hci_prio_recalculate(hdev, ACL_LINK); 3623 } 3624 3625 static void hci_sched_acl(struct hci_dev *hdev) 3626 { 3627 BT_DBG("%s", hdev->name); 3628 3629 /* No ACL link over BR/EDR controller */ 3630 if (!hci_conn_num(hdev, ACL_LINK)) 3631 return; 3632 3633 hci_sched_acl_pkt(hdev); 3634 } 3635 3636 static void hci_sched_le(struct hci_dev *hdev) 3637 { 3638 struct hci_chan *chan; 3639 struct sk_buff *skb; 3640 int quote, cnt, tmp; 3641 3642 BT_DBG("%s", hdev->name); 3643 3644 if (!hci_conn_num(hdev, LE_LINK)) 3645 return; 3646 3647 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 3648 3649 __check_timeout(hdev, cnt, LE_LINK); 3650 3651 tmp = cnt; 3652 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 3653 u32 priority = (skb_peek(&chan->data_q))->priority; 3654 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3655 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3656 skb->len, skb->priority); 3657 3658 /* Stop if priority has changed */ 3659 if (skb->priority < priority) 3660 break; 3661 3662 skb = skb_dequeue(&chan->data_q); 3663 3664 hci_send_frame(hdev, skb); 3665 hdev->le_last_tx = jiffies; 3666 3667 cnt--; 3668 chan->sent++; 3669 chan->conn->sent++; 3670 3671 /* Send pending SCO packets right away */ 3672 hci_sched_sco(hdev); 3673 hci_sched_esco(hdev); 3674 } 3675 } 3676 3677 if (hdev->le_pkts) 3678 hdev->le_cnt = cnt; 3679 else 3680 hdev->acl_cnt = cnt; 3681 3682 if (cnt != tmp) 3683 hci_prio_recalculate(hdev, LE_LINK); 3684 } 3685 3686 /* Schedule CIS */ 3687 static void hci_sched_iso(struct hci_dev *hdev) 3688 { 3689 struct hci_conn *conn; 3690 struct sk_buff *skb; 3691 int quote, *cnt; 3692 3693 BT_DBG("%s", hdev->name); 3694 3695 if (!hci_conn_num(hdev, ISO_LINK)) 3696 return; 3697 3698 cnt = hdev->iso_pkts ? &hdev->iso_cnt : 3699 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; 3700 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { 3701 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3702 BT_DBG("skb %p len %d", skb, skb->len); 3703 hci_send_frame(hdev, skb); 3704 3705 conn->sent++; 3706 if (conn->sent == ~0) 3707 conn->sent = 0; 3708 (*cnt)--; 3709 } 3710 } 3711 } 3712 3713 static void hci_tx_work(struct work_struct *work) 3714 { 3715 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 3716 struct sk_buff *skb; 3717 3718 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, 3719 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); 3720 3721 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 3722 /* Schedule queues and send stuff to HCI driver */ 3723 hci_sched_sco(hdev); 3724 hci_sched_esco(hdev); 3725 hci_sched_iso(hdev); 3726 hci_sched_acl(hdev); 3727 hci_sched_le(hdev); 3728 } 3729 3730 /* Send next queued raw (unknown type) packet */ 3731 while ((skb = skb_dequeue(&hdev->raw_q))) 3732 hci_send_frame(hdev, skb); 3733 } 3734 3735 /* ----- HCI RX task (incoming data processing) ----- */ 3736 3737 /* ACL data packet */ 3738 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3739 { 3740 struct hci_acl_hdr *hdr = (void *) skb->data; 3741 struct hci_conn *conn; 3742 __u16 handle, flags; 3743 3744 skb_pull(skb, HCI_ACL_HDR_SIZE); 3745 3746 handle = __le16_to_cpu(hdr->handle); 3747 flags = hci_flags(handle); 3748 handle = hci_handle(handle); 3749 3750 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3751 handle, flags); 3752 3753 hdev->stat.acl_rx++; 3754 3755 hci_dev_lock(hdev); 3756 conn = hci_conn_hash_lookup_handle(hdev, handle); 3757 hci_dev_unlock(hdev); 3758 3759 if (conn) { 3760 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 3761 3762 /* Send to upper protocol */ 3763 l2cap_recv_acldata(conn, skb, flags); 3764 return; 3765 } else { 3766 bt_dev_err(hdev, "ACL packet for unknown connection handle %d", 3767 handle); 3768 } 3769 3770 kfree_skb(skb); 3771 } 3772 3773 /* SCO data packet */ 3774 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3775 { 3776 struct hci_sco_hdr *hdr = (void *) skb->data; 3777 struct hci_conn *conn; 3778 __u16 handle, flags; 3779 3780 skb_pull(skb, HCI_SCO_HDR_SIZE); 3781 3782 handle = __le16_to_cpu(hdr->handle); 3783 flags = hci_flags(handle); 3784 handle = hci_handle(handle); 3785 3786 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3787 handle, flags); 3788 3789 hdev->stat.sco_rx++; 3790 3791 hci_dev_lock(hdev); 3792 conn = hci_conn_hash_lookup_handle(hdev, handle); 3793 hci_dev_unlock(hdev); 3794 3795 if (conn) { 3796 /* Send to upper protocol */ 3797 hci_skb_pkt_status(skb) = flags & 0x03; 3798 sco_recv_scodata(conn, skb); 3799 return; 3800 } else { 3801 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", 3802 handle); 3803 } 3804 3805 kfree_skb(skb); 3806 } 3807 3808 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3809 { 3810 struct hci_iso_hdr *hdr; 3811 struct hci_conn *conn; 3812 __u16 handle, flags; 3813 3814 hdr = skb_pull_data(skb, sizeof(*hdr)); 3815 if (!hdr) { 3816 bt_dev_err(hdev, "ISO packet too small"); 3817 goto drop; 3818 } 3819 3820 handle = __le16_to_cpu(hdr->handle); 3821 flags = hci_flags(handle); 3822 handle = hci_handle(handle); 3823 3824 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, 3825 handle, flags); 3826 3827 hci_dev_lock(hdev); 3828 conn = hci_conn_hash_lookup_handle(hdev, handle); 3829 hci_dev_unlock(hdev); 3830 3831 if (!conn) { 3832 bt_dev_err(hdev, "ISO packet for unknown connection handle %d", 3833 handle); 3834 goto drop; 3835 } 3836 3837 /* Send to upper protocol */ 3838 iso_recv(conn, skb, flags); 3839 return; 3840 3841 drop: 3842 kfree_skb(skb); 3843 } 3844 3845 static bool hci_req_is_complete(struct hci_dev *hdev) 3846 { 3847 struct sk_buff *skb; 3848 3849 skb = skb_peek(&hdev->cmd_q); 3850 if (!skb) 3851 return true; 3852 3853 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); 3854 } 3855 3856 static void hci_resend_last(struct hci_dev *hdev) 3857 { 3858 struct hci_command_hdr *sent; 3859 struct sk_buff *skb; 3860 u16 opcode; 3861 3862 if (!hdev->sent_cmd) 3863 return; 3864 3865 sent = (void *) hdev->sent_cmd->data; 3866 opcode = __le16_to_cpu(sent->opcode); 3867 if (opcode == HCI_OP_RESET) 3868 return; 3869 3870 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 3871 if (!skb) 3872 return; 3873 3874 skb_queue_head(&hdev->cmd_q, skb); 3875 queue_work(hdev->workqueue, &hdev->cmd_work); 3876 } 3877 3878 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 3879 hci_req_complete_t *req_complete, 3880 hci_req_complete_skb_t *req_complete_skb) 3881 { 3882 struct sk_buff *skb; 3883 unsigned long flags; 3884 3885 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 3886 3887 /* If the completed command doesn't match the last one that was 3888 * sent we need to do special handling of it. 3889 */ 3890 if (!hci_sent_cmd_data(hdev, opcode)) { 3891 /* Some CSR based controllers generate a spontaneous 3892 * reset complete event during init and any pending 3893 * command will never be completed. In such a case we 3894 * need to resend whatever was the last sent 3895 * command. 3896 */ 3897 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) 3898 hci_resend_last(hdev); 3899 3900 return; 3901 } 3902 3903 /* If we reach this point this event matches the last command sent */ 3904 hci_dev_clear_flag(hdev, HCI_CMD_PENDING); 3905 3906 /* If the command succeeded and there's still more commands in 3907 * this request the request is not yet complete. 3908 */ 3909 if (!status && !hci_req_is_complete(hdev)) 3910 return; 3911 3912 skb = hdev->req_skb; 3913 3914 /* If this was the last command in a request the complete 3915 * callback would be found in hdev->req_skb instead of the 3916 * command queue (hdev->cmd_q). 3917 */ 3918 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) { 3919 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 3920 return; 3921 } 3922 3923 if (skb && bt_cb(skb)->hci.req_complete) { 3924 *req_complete = bt_cb(skb)->hci.req_complete; 3925 return; 3926 } 3927 3928 /* Remove all pending commands belonging to this request */ 3929 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 3930 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 3931 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { 3932 __skb_queue_head(&hdev->cmd_q, skb); 3933 break; 3934 } 3935 3936 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) 3937 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 3938 else 3939 *req_complete = bt_cb(skb)->hci.req_complete; 3940 dev_kfree_skb_irq(skb); 3941 } 3942 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 3943 } 3944 3945 static void hci_rx_work(struct work_struct *work) 3946 { 3947 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 3948 struct sk_buff *skb; 3949 3950 BT_DBG("%s", hdev->name); 3951 3952 /* The kcov_remote functions used for collecting packet parsing 3953 * coverage information from this background thread and associate 3954 * the coverage with the syscall's thread which originally injected 3955 * the packet. This helps fuzzing the kernel. 3956 */ 3957 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { 3958 kcov_remote_start_common(skb_get_kcov_handle(skb)); 3959 3960 /* Send copy to monitor */ 3961 hci_send_to_monitor(hdev, skb); 3962 3963 if (atomic_read(&hdev->promisc)) { 3964 /* Send copy to the sockets */ 3965 hci_send_to_sock(hdev, skb); 3966 } 3967 3968 /* If the device has been opened in HCI_USER_CHANNEL, 3969 * the userspace has exclusive access to device. 3970 * When device is HCI_INIT, we still need to process 3971 * the data packets to the driver in order 3972 * to complete its setup(). 3973 */ 3974 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 3975 !test_bit(HCI_INIT, &hdev->flags)) { 3976 kfree_skb(skb); 3977 continue; 3978 } 3979 3980 if (test_bit(HCI_INIT, &hdev->flags)) { 3981 /* Don't process data packets in this states. */ 3982 switch (hci_skb_pkt_type(skb)) { 3983 case HCI_ACLDATA_PKT: 3984 case HCI_SCODATA_PKT: 3985 case HCI_ISODATA_PKT: 3986 kfree_skb(skb); 3987 continue; 3988 } 3989 } 3990 3991 /* Process frame */ 3992 switch (hci_skb_pkt_type(skb)) { 3993 case HCI_EVENT_PKT: 3994 BT_DBG("%s Event packet", hdev->name); 3995 hci_event_packet(hdev, skb); 3996 break; 3997 3998 case HCI_ACLDATA_PKT: 3999 BT_DBG("%s ACL data packet", hdev->name); 4000 hci_acldata_packet(hdev, skb); 4001 break; 4002 4003 case HCI_SCODATA_PKT: 4004 BT_DBG("%s SCO data packet", hdev->name); 4005 hci_scodata_packet(hdev, skb); 4006 break; 4007 4008 case HCI_ISODATA_PKT: 4009 BT_DBG("%s ISO data packet", hdev->name); 4010 hci_isodata_packet(hdev, skb); 4011 break; 4012 4013 default: 4014 kfree_skb(skb); 4015 break; 4016 } 4017 } 4018 } 4019 4020 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) 4021 { 4022 int err; 4023 4024 bt_dev_dbg(hdev, "skb %p", skb); 4025 4026 kfree_skb(hdev->sent_cmd); 4027 4028 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); 4029 if (!hdev->sent_cmd) { 4030 skb_queue_head(&hdev->cmd_q, skb); 4031 queue_work(hdev->workqueue, &hdev->cmd_work); 4032 return; 4033 } 4034 4035 err = hci_send_frame(hdev, skb); 4036 if (err < 0) { 4037 hci_cmd_sync_cancel_sync(hdev, -err); 4038 return; 4039 } 4040 4041 if (hci_req_status_pend(hdev) && 4042 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { 4043 kfree_skb(hdev->req_skb); 4044 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 4045 } 4046 4047 atomic_dec(&hdev->cmd_cnt); 4048 } 4049 4050 static void hci_cmd_work(struct work_struct *work) 4051 { 4052 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 4053 struct sk_buff *skb; 4054 4055 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, 4056 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); 4057 4058 /* Send queued commands */ 4059 if (atomic_read(&hdev->cmd_cnt)) { 4060 skb = skb_dequeue(&hdev->cmd_q); 4061 if (!skb) 4062 return; 4063 4064 hci_send_cmd_sync(hdev, skb); 4065 4066 rcu_read_lock(); 4067 if (test_bit(HCI_RESET, &hdev->flags) || 4068 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 4069 cancel_delayed_work(&hdev->cmd_timer); 4070 else 4071 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, 4072 HCI_CMD_TIMEOUT); 4073 rcu_read_unlock(); 4074 } 4075 } 4076