1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/export.h> 29 #include <linux/rfkill.h> 30 #include <linux/debugfs.h> 31 #include <linux/crypto.h> 32 #include <linux/kcov.h> 33 #include <linux/property.h> 34 #include <linux/suspend.h> 35 #include <linux/wait.h> 36 #include <asm/unaligned.h> 37 38 #include <net/bluetooth/bluetooth.h> 39 #include <net/bluetooth/hci_core.h> 40 #include <net/bluetooth/l2cap.h> 41 #include <net/bluetooth/mgmt.h> 42 43 #include "hci_request.h" 44 #include "hci_debugfs.h" 45 #include "smp.h" 46 #include "leds.h" 47 #include "msft.h" 48 #include "aosp.h" 49 #include "hci_codec.h" 50 51 static void hci_rx_work(struct work_struct *work); 52 static void hci_cmd_work(struct work_struct *work); 53 static void hci_tx_work(struct work_struct *work); 54 55 /* HCI device list */ 56 LIST_HEAD(hci_dev_list); 57 DEFINE_RWLOCK(hci_dev_list_lock); 58 59 /* HCI callback list */ 60 LIST_HEAD(hci_cb_list); 61 DEFINE_MUTEX(hci_cb_list_lock); 62 63 /* HCI ID Numbering */ 64 static DEFINE_IDA(hci_index_ida); 65 66 static int hci_scan_req(struct hci_request *req, unsigned long opt) 67 { 68 __u8 scan = opt; 69 70 BT_DBG("%s %x", req->hdev->name, scan); 71 72 /* Inquiry and Page scans */ 73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 74 return 0; 75 } 76 77 static int hci_auth_req(struct hci_request *req, unsigned long opt) 78 { 79 __u8 auth = opt; 80 81 BT_DBG("%s %x", req->hdev->name, auth); 82 83 /* Authentication */ 84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 85 return 0; 86 } 87 88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt) 89 { 90 __u8 encrypt = opt; 91 92 BT_DBG("%s %x", req->hdev->name, encrypt); 93 94 /* Encryption */ 95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 96 return 0; 97 } 98 99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt) 100 { 101 __le16 policy = cpu_to_le16(opt); 102 103 BT_DBG("%s %x", req->hdev->name, policy); 104 105 /* Default link policy */ 106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 107 return 0; 108 } 109 110 /* Get HCI device by index. 111 * Device is held on return. */ 112 struct hci_dev *hci_dev_get(int index) 113 { 114 struct hci_dev *hdev = NULL, *d; 115 116 BT_DBG("%d", index); 117 118 if (index < 0) 119 return NULL; 120 121 read_lock(&hci_dev_list_lock); 122 list_for_each_entry(d, &hci_dev_list, list) { 123 if (d->id == index) { 124 hdev = hci_dev_hold(d); 125 break; 126 } 127 } 128 read_unlock(&hci_dev_list_lock); 129 return hdev; 130 } 131 132 /* ---- Inquiry support ---- */ 133 134 bool hci_discovery_active(struct hci_dev *hdev) 135 { 136 struct discovery_state *discov = &hdev->discovery; 137 138 switch (discov->state) { 139 case DISCOVERY_FINDING: 140 case DISCOVERY_RESOLVING: 141 return true; 142 143 default: 144 return false; 145 } 146 } 147 148 void hci_discovery_set_state(struct hci_dev *hdev, int state) 149 { 150 int old_state = hdev->discovery.state; 151 152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 153 154 if (old_state == state) 155 return; 156 157 hdev->discovery.state = state; 158 159 switch (state) { 160 case DISCOVERY_STOPPED: 161 hci_update_passive_scan(hdev); 162 163 if (old_state != DISCOVERY_STARTING) 164 mgmt_discovering(hdev, 0); 165 break; 166 case DISCOVERY_STARTING: 167 break; 168 case DISCOVERY_FINDING: 169 mgmt_discovering(hdev, 1); 170 break; 171 case DISCOVERY_RESOLVING: 172 break; 173 case DISCOVERY_STOPPING: 174 break; 175 } 176 } 177 178 void hci_inquiry_cache_flush(struct hci_dev *hdev) 179 { 180 struct discovery_state *cache = &hdev->discovery; 181 struct inquiry_entry *p, *n; 182 183 list_for_each_entry_safe(p, n, &cache->all, all) { 184 list_del(&p->all); 185 kfree(p); 186 } 187 188 INIT_LIST_HEAD(&cache->unknown); 189 INIT_LIST_HEAD(&cache->resolve); 190 } 191 192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 193 bdaddr_t *bdaddr) 194 { 195 struct discovery_state *cache = &hdev->discovery; 196 struct inquiry_entry *e; 197 198 BT_DBG("cache %p, %pMR", cache, bdaddr); 199 200 list_for_each_entry(e, &cache->all, all) { 201 if (!bacmp(&e->data.bdaddr, bdaddr)) 202 return e; 203 } 204 205 return NULL; 206 } 207 208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 209 bdaddr_t *bdaddr) 210 { 211 struct discovery_state *cache = &hdev->discovery; 212 struct inquiry_entry *e; 213 214 BT_DBG("cache %p, %pMR", cache, bdaddr); 215 216 list_for_each_entry(e, &cache->unknown, list) { 217 if (!bacmp(&e->data.bdaddr, bdaddr)) 218 return e; 219 } 220 221 return NULL; 222 } 223 224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 225 bdaddr_t *bdaddr, 226 int state) 227 { 228 struct discovery_state *cache = &hdev->discovery; 229 struct inquiry_entry *e; 230 231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); 232 233 list_for_each_entry(e, &cache->resolve, list) { 234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 235 return e; 236 if (!bacmp(&e->data.bdaddr, bdaddr)) 237 return e; 238 } 239 240 return NULL; 241 } 242 243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 244 struct inquiry_entry *ie) 245 { 246 struct discovery_state *cache = &hdev->discovery; 247 struct list_head *pos = &cache->resolve; 248 struct inquiry_entry *p; 249 250 list_del(&ie->list); 251 252 list_for_each_entry(p, &cache->resolve, list) { 253 if (p->name_state != NAME_PENDING && 254 abs(p->data.rssi) >= abs(ie->data.rssi)) 255 break; 256 pos = &p->list; 257 } 258 259 list_add(&ie->list, pos); 260 } 261 262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 263 bool name_known) 264 { 265 struct discovery_state *cache = &hdev->discovery; 266 struct inquiry_entry *ie; 267 u32 flags = 0; 268 269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 270 271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); 272 273 if (!data->ssp_mode) 274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 275 276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 277 if (ie) { 278 if (!ie->data.ssp_mode) 279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 280 281 if (ie->name_state == NAME_NEEDED && 282 data->rssi != ie->data.rssi) { 283 ie->data.rssi = data->rssi; 284 hci_inquiry_cache_update_resolve(hdev, ie); 285 } 286 287 goto update; 288 } 289 290 /* Entry not in the cache. Add new one. */ 291 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 292 if (!ie) { 293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 294 goto done; 295 } 296 297 list_add(&ie->all, &cache->all); 298 299 if (name_known) { 300 ie->name_state = NAME_KNOWN; 301 } else { 302 ie->name_state = NAME_NOT_KNOWN; 303 list_add(&ie->list, &cache->unknown); 304 } 305 306 update: 307 if (name_known && ie->name_state != NAME_KNOWN && 308 ie->name_state != NAME_PENDING) { 309 ie->name_state = NAME_KNOWN; 310 list_del(&ie->list); 311 } 312 313 memcpy(&ie->data, data, sizeof(*data)); 314 ie->timestamp = jiffies; 315 cache->timestamp = jiffies; 316 317 if (ie->name_state == NAME_NOT_KNOWN) 318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 319 320 done: 321 return flags; 322 } 323 324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 325 { 326 struct discovery_state *cache = &hdev->discovery; 327 struct inquiry_info *info = (struct inquiry_info *) buf; 328 struct inquiry_entry *e; 329 int copied = 0; 330 331 list_for_each_entry(e, &cache->all, all) { 332 struct inquiry_data *data = &e->data; 333 334 if (copied >= num) 335 break; 336 337 bacpy(&info->bdaddr, &data->bdaddr); 338 info->pscan_rep_mode = data->pscan_rep_mode; 339 info->pscan_period_mode = data->pscan_period_mode; 340 info->pscan_mode = data->pscan_mode; 341 memcpy(info->dev_class, data->dev_class, 3); 342 info->clock_offset = data->clock_offset; 343 344 info++; 345 copied++; 346 } 347 348 BT_DBG("cache %p, copied %d", cache, copied); 349 return copied; 350 } 351 352 static int hci_inq_req(struct hci_request *req, unsigned long opt) 353 { 354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 355 struct hci_dev *hdev = req->hdev; 356 struct hci_cp_inquiry cp; 357 358 BT_DBG("%s", hdev->name); 359 360 if (test_bit(HCI_INQUIRY, &hdev->flags)) 361 return 0; 362 363 /* Start Inquiry */ 364 memcpy(&cp.lap, &ir->lap, 3); 365 cp.length = ir->length; 366 cp.num_rsp = ir->num_rsp; 367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 368 369 return 0; 370 } 371 372 int hci_inquiry(void __user *arg) 373 { 374 __u8 __user *ptr = arg; 375 struct hci_inquiry_req ir; 376 struct hci_dev *hdev; 377 int err = 0, do_inquiry = 0, max_rsp; 378 long timeo; 379 __u8 *buf; 380 381 if (copy_from_user(&ir, ptr, sizeof(ir))) 382 return -EFAULT; 383 384 hdev = hci_dev_get(ir.dev_id); 385 if (!hdev) 386 return -ENODEV; 387 388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 389 err = -EBUSY; 390 goto done; 391 } 392 393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 394 err = -EOPNOTSUPP; 395 goto done; 396 } 397 398 if (hdev->dev_type != HCI_PRIMARY) { 399 err = -EOPNOTSUPP; 400 goto done; 401 } 402 403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 404 err = -EOPNOTSUPP; 405 goto done; 406 } 407 408 /* Restrict maximum inquiry length to 60 seconds */ 409 if (ir.length > 60) { 410 err = -EINVAL; 411 goto done; 412 } 413 414 hci_dev_lock(hdev); 415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { 417 hci_inquiry_cache_flush(hdev); 418 do_inquiry = 1; 419 } 420 hci_dev_unlock(hdev); 421 422 timeo = ir.length * msecs_to_jiffies(2000); 423 424 if (do_inquiry) { 425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 426 timeo, NULL); 427 if (err < 0) 428 goto done; 429 430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 431 * cleared). If it is interrupted by a signal, return -EINTR. 432 */ 433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 434 TASK_INTERRUPTIBLE)) { 435 err = -EINTR; 436 goto done; 437 } 438 } 439 440 /* for unlimited number of responses we will use buffer with 441 * 255 entries 442 */ 443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 444 445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 446 * copy it to the user space. 447 */ 448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); 449 if (!buf) { 450 err = -ENOMEM; 451 goto done; 452 } 453 454 hci_dev_lock(hdev); 455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 456 hci_dev_unlock(hdev); 457 458 BT_DBG("num_rsp %d", ir.num_rsp); 459 460 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 461 ptr += sizeof(ir); 462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 463 ir.num_rsp)) 464 err = -EFAULT; 465 } else 466 err = -EFAULT; 467 468 kfree(buf); 469 470 done: 471 hci_dev_put(hdev); 472 return err; 473 } 474 475 static int hci_dev_do_open(struct hci_dev *hdev) 476 { 477 int ret = 0; 478 479 BT_DBG("%s %p", hdev->name, hdev); 480 481 hci_req_sync_lock(hdev); 482 483 ret = hci_dev_open_sync(hdev); 484 485 hci_req_sync_unlock(hdev); 486 return ret; 487 } 488 489 /* ---- HCI ioctl helpers ---- */ 490 491 int hci_dev_open(__u16 dev) 492 { 493 struct hci_dev *hdev; 494 int err; 495 496 hdev = hci_dev_get(dev); 497 if (!hdev) 498 return -ENODEV; 499 500 /* Devices that are marked as unconfigured can only be powered 501 * up as user channel. Trying to bring them up as normal devices 502 * will result into a failure. Only user channel operation is 503 * possible. 504 * 505 * When this function is called for a user channel, the flag 506 * HCI_USER_CHANNEL will be set first before attempting to 507 * open the device. 508 */ 509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 511 err = -EOPNOTSUPP; 512 goto done; 513 } 514 515 /* We need to ensure that no other power on/off work is pending 516 * before proceeding to call hci_dev_do_open. This is 517 * particularly important if the setup procedure has not yet 518 * completed. 519 */ 520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 521 cancel_delayed_work(&hdev->power_off); 522 523 /* After this call it is guaranteed that the setup procedure 524 * has finished. This means that error conditions like RFKILL 525 * or no valid public or static random address apply. 526 */ 527 flush_workqueue(hdev->req_workqueue); 528 529 /* For controllers not using the management interface and that 530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit 531 * so that pairing works for them. Once the management interface 532 * is in use this bit will be cleared again and userspace has 533 * to explicitly enable it. 534 */ 535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 536 !hci_dev_test_flag(hdev, HCI_MGMT)) 537 hci_dev_set_flag(hdev, HCI_BONDABLE); 538 539 err = hci_dev_do_open(hdev); 540 541 done: 542 hci_dev_put(hdev); 543 return err; 544 } 545 546 int hci_dev_do_close(struct hci_dev *hdev) 547 { 548 int err; 549 550 BT_DBG("%s %p", hdev->name, hdev); 551 552 hci_req_sync_lock(hdev); 553 554 err = hci_dev_close_sync(hdev); 555 556 hci_req_sync_unlock(hdev); 557 558 return err; 559 } 560 561 int hci_dev_close(__u16 dev) 562 { 563 struct hci_dev *hdev; 564 int err; 565 566 hdev = hci_dev_get(dev); 567 if (!hdev) 568 return -ENODEV; 569 570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 571 err = -EBUSY; 572 goto done; 573 } 574 575 cancel_work_sync(&hdev->power_on); 576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 577 cancel_delayed_work(&hdev->power_off); 578 579 err = hci_dev_do_close(hdev); 580 581 done: 582 hci_dev_put(hdev); 583 return err; 584 } 585 586 static int hci_dev_do_reset(struct hci_dev *hdev) 587 { 588 int ret; 589 590 BT_DBG("%s %p", hdev->name, hdev); 591 592 hci_req_sync_lock(hdev); 593 594 /* Drop queues */ 595 skb_queue_purge(&hdev->rx_q); 596 skb_queue_purge(&hdev->cmd_q); 597 598 /* Cancel these to avoid queueing non-chained pending work */ 599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); 600 /* Wait for 601 * 602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer) 604 * 605 * inside RCU section to see the flag or complete scheduling. 606 */ 607 synchronize_rcu(); 608 /* Explicitly cancel works in case scheduled after setting the flag. */ 609 cancel_delayed_work(&hdev->cmd_timer); 610 cancel_delayed_work(&hdev->ncmd_timer); 611 612 /* Avoid potential lockdep warnings from the *_flush() calls by 613 * ensuring the workqueue is empty up front. 614 */ 615 drain_workqueue(hdev->workqueue); 616 617 hci_dev_lock(hdev); 618 hci_inquiry_cache_flush(hdev); 619 hci_conn_hash_flush(hdev); 620 hci_dev_unlock(hdev); 621 622 if (hdev->flush) 623 hdev->flush(hdev); 624 625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); 626 627 atomic_set(&hdev->cmd_cnt, 1); 628 hdev->acl_cnt = 0; 629 hdev->sco_cnt = 0; 630 hdev->le_cnt = 0; 631 hdev->iso_cnt = 0; 632 633 ret = hci_reset_sync(hdev); 634 635 hci_req_sync_unlock(hdev); 636 return ret; 637 } 638 639 int hci_dev_reset(__u16 dev) 640 { 641 struct hci_dev *hdev; 642 int err; 643 644 hdev = hci_dev_get(dev); 645 if (!hdev) 646 return -ENODEV; 647 648 if (!test_bit(HCI_UP, &hdev->flags)) { 649 err = -ENETDOWN; 650 goto done; 651 } 652 653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 654 err = -EBUSY; 655 goto done; 656 } 657 658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 659 err = -EOPNOTSUPP; 660 goto done; 661 } 662 663 err = hci_dev_do_reset(hdev); 664 665 done: 666 hci_dev_put(hdev); 667 return err; 668 } 669 670 int hci_dev_reset_stat(__u16 dev) 671 { 672 struct hci_dev *hdev; 673 int ret = 0; 674 675 hdev = hci_dev_get(dev); 676 if (!hdev) 677 return -ENODEV; 678 679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 680 ret = -EBUSY; 681 goto done; 682 } 683 684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 685 ret = -EOPNOTSUPP; 686 goto done; 687 } 688 689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 690 691 done: 692 hci_dev_put(hdev); 693 return ret; 694 } 695 696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) 697 { 698 bool conn_changed, discov_changed; 699 700 BT_DBG("%s scan 0x%02x", hdev->name, scan); 701 702 if ((scan & SCAN_PAGE)) 703 conn_changed = !hci_dev_test_and_set_flag(hdev, 704 HCI_CONNECTABLE); 705 else 706 conn_changed = hci_dev_test_and_clear_flag(hdev, 707 HCI_CONNECTABLE); 708 709 if ((scan & SCAN_INQUIRY)) { 710 discov_changed = !hci_dev_test_and_set_flag(hdev, 711 HCI_DISCOVERABLE); 712 } else { 713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 714 discov_changed = hci_dev_test_and_clear_flag(hdev, 715 HCI_DISCOVERABLE); 716 } 717 718 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 719 return; 720 721 if (conn_changed || discov_changed) { 722 /* In case this was disabled through mgmt */ 723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 724 725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 726 hci_update_adv_data(hdev, hdev->cur_adv_instance); 727 728 mgmt_new_settings(hdev); 729 } 730 } 731 732 int hci_dev_cmd(unsigned int cmd, void __user *arg) 733 { 734 struct hci_dev *hdev; 735 struct hci_dev_req dr; 736 int err = 0; 737 738 if (copy_from_user(&dr, arg, sizeof(dr))) 739 return -EFAULT; 740 741 hdev = hci_dev_get(dr.dev_id); 742 if (!hdev) 743 return -ENODEV; 744 745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 746 err = -EBUSY; 747 goto done; 748 } 749 750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 751 err = -EOPNOTSUPP; 752 goto done; 753 } 754 755 if (hdev->dev_type != HCI_PRIMARY) { 756 err = -EOPNOTSUPP; 757 goto done; 758 } 759 760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 761 err = -EOPNOTSUPP; 762 goto done; 763 } 764 765 switch (cmd) { 766 case HCISETAUTH: 767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 768 HCI_INIT_TIMEOUT, NULL); 769 break; 770 771 case HCISETENCRYPT: 772 if (!lmp_encrypt_capable(hdev)) { 773 err = -EOPNOTSUPP; 774 break; 775 } 776 777 if (!test_bit(HCI_AUTH, &hdev->flags)) { 778 /* Auth must be enabled first */ 779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 780 HCI_INIT_TIMEOUT, NULL); 781 if (err) 782 break; 783 } 784 785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, 786 HCI_INIT_TIMEOUT, NULL); 787 break; 788 789 case HCISETSCAN: 790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 791 HCI_INIT_TIMEOUT, NULL); 792 793 /* Ensure that the connectable and discoverable states 794 * get correctly modified as this was a non-mgmt change. 795 */ 796 if (!err) 797 hci_update_passive_scan_state(hdev, dr.dev_opt); 798 break; 799 800 case HCISETLINKPOL: 801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, 802 HCI_INIT_TIMEOUT, NULL); 803 break; 804 805 case HCISETLINKMODE: 806 hdev->link_mode = ((__u16) dr.dev_opt) & 807 (HCI_LM_MASTER | HCI_LM_ACCEPT); 808 break; 809 810 case HCISETPTYPE: 811 if (hdev->pkt_type == (__u16) dr.dev_opt) 812 break; 813 814 hdev->pkt_type = (__u16) dr.dev_opt; 815 mgmt_phy_configuration_changed(hdev, NULL); 816 break; 817 818 case HCISETACLMTU: 819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 821 break; 822 823 case HCISETSCOMTU: 824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 826 break; 827 828 default: 829 err = -EINVAL; 830 break; 831 } 832 833 done: 834 hci_dev_put(hdev); 835 return err; 836 } 837 838 int hci_get_dev_list(void __user *arg) 839 { 840 struct hci_dev *hdev; 841 struct hci_dev_list_req *dl; 842 struct hci_dev_req *dr; 843 int n = 0, size, err; 844 __u16 dev_num; 845 846 if (get_user(dev_num, (__u16 __user *) arg)) 847 return -EFAULT; 848 849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 850 return -EINVAL; 851 852 size = sizeof(*dl) + dev_num * sizeof(*dr); 853 854 dl = kzalloc(size, GFP_KERNEL); 855 if (!dl) 856 return -ENOMEM; 857 858 dr = dl->dev_req; 859 860 read_lock(&hci_dev_list_lock); 861 list_for_each_entry(hdev, &hci_dev_list, list) { 862 unsigned long flags = hdev->flags; 863 864 /* When the auto-off is configured it means the transport 865 * is running, but in that case still indicate that the 866 * device is actually down. 867 */ 868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 869 flags &= ~BIT(HCI_UP); 870 871 (dr + n)->dev_id = hdev->id; 872 (dr + n)->dev_opt = flags; 873 874 if (++n >= dev_num) 875 break; 876 } 877 read_unlock(&hci_dev_list_lock); 878 879 dl->dev_num = n; 880 size = sizeof(*dl) + n * sizeof(*dr); 881 882 err = copy_to_user(arg, dl, size); 883 kfree(dl); 884 885 return err ? -EFAULT : 0; 886 } 887 888 int hci_get_dev_info(void __user *arg) 889 { 890 struct hci_dev *hdev; 891 struct hci_dev_info di; 892 unsigned long flags; 893 int err = 0; 894 895 if (copy_from_user(&di, arg, sizeof(di))) 896 return -EFAULT; 897 898 hdev = hci_dev_get(di.dev_id); 899 if (!hdev) 900 return -ENODEV; 901 902 /* When the auto-off is configured it means the transport 903 * is running, but in that case still indicate that the 904 * device is actually down. 905 */ 906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 907 flags = hdev->flags & ~BIT(HCI_UP); 908 else 909 flags = hdev->flags; 910 911 strcpy(di.name, hdev->name); 912 di.bdaddr = hdev->bdaddr; 913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); 914 di.flags = flags; 915 di.pkt_type = hdev->pkt_type; 916 if (lmp_bredr_capable(hdev)) { 917 di.acl_mtu = hdev->acl_mtu; 918 di.acl_pkts = hdev->acl_pkts; 919 di.sco_mtu = hdev->sco_mtu; 920 di.sco_pkts = hdev->sco_pkts; 921 } else { 922 di.acl_mtu = hdev->le_mtu; 923 di.acl_pkts = hdev->le_pkts; 924 di.sco_mtu = 0; 925 di.sco_pkts = 0; 926 } 927 di.link_policy = hdev->link_policy; 928 di.link_mode = hdev->link_mode; 929 930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 931 memcpy(&di.features, &hdev->features, sizeof(di.features)); 932 933 if (copy_to_user(arg, &di, sizeof(di))) 934 err = -EFAULT; 935 936 hci_dev_put(hdev); 937 938 return err; 939 } 940 941 /* ---- Interface to HCI drivers ---- */ 942 943 static int hci_rfkill_set_block(void *data, bool blocked) 944 { 945 struct hci_dev *hdev = data; 946 947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 948 949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 950 return -EBUSY; 951 952 if (blocked) { 953 hci_dev_set_flag(hdev, HCI_RFKILLED); 954 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 955 !hci_dev_test_flag(hdev, HCI_CONFIG)) 956 hci_dev_do_close(hdev); 957 } else { 958 hci_dev_clear_flag(hdev, HCI_RFKILLED); 959 } 960 961 return 0; 962 } 963 964 static const struct rfkill_ops hci_rfkill_ops = { 965 .set_block = hci_rfkill_set_block, 966 }; 967 968 static void hci_power_on(struct work_struct *work) 969 { 970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 971 int err; 972 973 BT_DBG("%s", hdev->name); 974 975 if (test_bit(HCI_UP, &hdev->flags) && 976 hci_dev_test_flag(hdev, HCI_MGMT) && 977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 978 cancel_delayed_work(&hdev->power_off); 979 err = hci_powered_update_sync(hdev); 980 mgmt_power_on(hdev, err); 981 return; 982 } 983 984 err = hci_dev_do_open(hdev); 985 if (err < 0) { 986 hci_dev_lock(hdev); 987 mgmt_set_powered_failed(hdev, err); 988 hci_dev_unlock(hdev); 989 return; 990 } 991 992 /* During the HCI setup phase, a few error conditions are 993 * ignored and they need to be checked now. If they are still 994 * valid, it is important to turn the device back off. 995 */ 996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 998 (hdev->dev_type == HCI_PRIMARY && 999 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 1000 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 1002 hci_dev_do_close(hdev); 1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 1004 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 1005 HCI_AUTO_OFF_TIMEOUT); 1006 } 1007 1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 1009 /* For unconfigured devices, set the HCI_RAW flag 1010 * so that userspace can easily identify them. 1011 */ 1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 1013 set_bit(HCI_RAW, &hdev->flags); 1014 1015 /* For fully configured devices, this will send 1016 * the Index Added event. For unconfigured devices, 1017 * it will send Unconfigued Index Added event. 1018 * 1019 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 1020 * and no event will be send. 1021 */ 1022 mgmt_index_added(hdev); 1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 1024 /* When the controller is now configured, then it 1025 * is important to clear the HCI_RAW flag. 1026 */ 1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 1028 clear_bit(HCI_RAW, &hdev->flags); 1029 1030 /* Powering on the controller with HCI_CONFIG set only 1031 * happens with the transition from unconfigured to 1032 * configured. This will send the Index Added event. 1033 */ 1034 mgmt_index_added(hdev); 1035 } 1036 } 1037 1038 static void hci_power_off(struct work_struct *work) 1039 { 1040 struct hci_dev *hdev = container_of(work, struct hci_dev, 1041 power_off.work); 1042 1043 BT_DBG("%s", hdev->name); 1044 1045 hci_dev_do_close(hdev); 1046 } 1047 1048 static void hci_error_reset(struct work_struct *work) 1049 { 1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); 1051 1052 BT_DBG("%s", hdev->name); 1053 1054 if (hdev->hw_error) 1055 hdev->hw_error(hdev, hdev->hw_error_code); 1056 else 1057 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); 1058 1059 if (hci_dev_do_close(hdev)) 1060 return; 1061 1062 hci_dev_do_open(hdev); 1063 } 1064 1065 void hci_uuids_clear(struct hci_dev *hdev) 1066 { 1067 struct bt_uuid *uuid, *tmp; 1068 1069 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { 1070 list_del(&uuid->list); 1071 kfree(uuid); 1072 } 1073 } 1074 1075 void hci_link_keys_clear(struct hci_dev *hdev) 1076 { 1077 struct link_key *key, *tmp; 1078 1079 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { 1080 list_del_rcu(&key->list); 1081 kfree_rcu(key, rcu); 1082 } 1083 } 1084 1085 void hci_smp_ltks_clear(struct hci_dev *hdev) 1086 { 1087 struct smp_ltk *k, *tmp; 1088 1089 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1090 list_del_rcu(&k->list); 1091 kfree_rcu(k, rcu); 1092 } 1093 } 1094 1095 void hci_smp_irks_clear(struct hci_dev *hdev) 1096 { 1097 struct smp_irk *k, *tmp; 1098 1099 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { 1100 list_del_rcu(&k->list); 1101 kfree_rcu(k, rcu); 1102 } 1103 } 1104 1105 void hci_blocked_keys_clear(struct hci_dev *hdev) 1106 { 1107 struct blocked_key *b, *tmp; 1108 1109 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { 1110 list_del_rcu(&b->list); 1111 kfree_rcu(b, rcu); 1112 } 1113 } 1114 1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) 1116 { 1117 bool blocked = false; 1118 struct blocked_key *b; 1119 1120 rcu_read_lock(); 1121 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { 1122 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { 1123 blocked = true; 1124 break; 1125 } 1126 } 1127 1128 rcu_read_unlock(); 1129 return blocked; 1130 } 1131 1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1133 { 1134 struct link_key *k; 1135 1136 rcu_read_lock(); 1137 list_for_each_entry_rcu(k, &hdev->link_keys, list) { 1138 if (bacmp(bdaddr, &k->bdaddr) == 0) { 1139 rcu_read_unlock(); 1140 1141 if (hci_is_blocked_key(hdev, 1142 HCI_BLOCKED_KEY_TYPE_LINKKEY, 1143 k->val)) { 1144 bt_dev_warn_ratelimited(hdev, 1145 "Link key blocked for %pMR", 1146 &k->bdaddr); 1147 return NULL; 1148 } 1149 1150 return k; 1151 } 1152 } 1153 rcu_read_unlock(); 1154 1155 return NULL; 1156 } 1157 1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1159 u8 key_type, u8 old_key_type) 1160 { 1161 /* Legacy key */ 1162 if (key_type < 0x03) 1163 return true; 1164 1165 /* Debug keys are insecure so don't store them persistently */ 1166 if (key_type == HCI_LK_DEBUG_COMBINATION) 1167 return false; 1168 1169 /* Changed combination key and there's no previous one */ 1170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1171 return false; 1172 1173 /* Security mode 3 case */ 1174 if (!conn) 1175 return true; 1176 1177 /* BR/EDR key derived using SC from an LE link */ 1178 if (conn->type == LE_LINK) 1179 return true; 1180 1181 /* Neither local nor remote side had no-bonding as requirement */ 1182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1183 return true; 1184 1185 /* Local side had dedicated bonding as requirement */ 1186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1187 return true; 1188 1189 /* Remote side had dedicated bonding as requirement */ 1190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1191 return true; 1192 1193 /* If none of the above criteria match, then don't store the key 1194 * persistently */ 1195 return false; 1196 } 1197 1198 static u8 ltk_role(u8 type) 1199 { 1200 if (type == SMP_LTK) 1201 return HCI_ROLE_MASTER; 1202 1203 return HCI_ROLE_SLAVE; 1204 } 1205 1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1207 u8 addr_type, u8 role) 1208 { 1209 struct smp_ltk *k; 1210 1211 rcu_read_lock(); 1212 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1213 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) 1214 continue; 1215 1216 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { 1217 rcu_read_unlock(); 1218 1219 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, 1220 k->val)) { 1221 bt_dev_warn_ratelimited(hdev, 1222 "LTK blocked for %pMR", 1223 &k->bdaddr); 1224 return NULL; 1225 } 1226 1227 return k; 1228 } 1229 } 1230 rcu_read_unlock(); 1231 1232 return NULL; 1233 } 1234 1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) 1236 { 1237 struct smp_irk *irk_to_return = NULL; 1238 struct smp_irk *irk; 1239 1240 rcu_read_lock(); 1241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1242 if (!bacmp(&irk->rpa, rpa)) { 1243 irk_to_return = irk; 1244 goto done; 1245 } 1246 } 1247 1248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1249 if (smp_irk_matches(hdev, irk->val, rpa)) { 1250 bacpy(&irk->rpa, rpa); 1251 irk_to_return = irk; 1252 goto done; 1253 } 1254 } 1255 1256 done: 1257 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1258 irk_to_return->val)) { 1259 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1260 &irk_to_return->bdaddr); 1261 irk_to_return = NULL; 1262 } 1263 1264 rcu_read_unlock(); 1265 1266 return irk_to_return; 1267 } 1268 1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1270 u8 addr_type) 1271 { 1272 struct smp_irk *irk_to_return = NULL; 1273 struct smp_irk *irk; 1274 1275 /* Identity Address must be public or static random */ 1276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) 1277 return NULL; 1278 1279 rcu_read_lock(); 1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 1281 if (addr_type == irk->addr_type && 1282 bacmp(bdaddr, &irk->bdaddr) == 0) { 1283 irk_to_return = irk; 1284 goto done; 1285 } 1286 } 1287 1288 done: 1289 1290 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 1291 irk_to_return->val)) { 1292 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 1293 &irk_to_return->bdaddr); 1294 irk_to_return = NULL; 1295 } 1296 1297 rcu_read_unlock(); 1298 1299 return irk_to_return; 1300 } 1301 1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 1303 bdaddr_t *bdaddr, u8 *val, u8 type, 1304 u8 pin_len, bool *persistent) 1305 { 1306 struct link_key *key, *old_key; 1307 u8 old_key_type; 1308 1309 old_key = hci_find_link_key(hdev, bdaddr); 1310 if (old_key) { 1311 old_key_type = old_key->type; 1312 key = old_key; 1313 } else { 1314 old_key_type = conn ? conn->key_type : 0xff; 1315 key = kzalloc(sizeof(*key), GFP_KERNEL); 1316 if (!key) 1317 return NULL; 1318 list_add_rcu(&key->list, &hdev->link_keys); 1319 } 1320 1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); 1322 1323 /* Some buggy controller combinations generate a changed 1324 * combination key for legacy pairing even when there's no 1325 * previous key */ 1326 if (type == HCI_LK_CHANGED_COMBINATION && 1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { 1328 type = HCI_LK_COMBINATION; 1329 if (conn) 1330 conn->key_type = type; 1331 } 1332 1333 bacpy(&key->bdaddr, bdaddr); 1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE); 1335 key->pin_len = pin_len; 1336 1337 if (type == HCI_LK_CHANGED_COMBINATION) 1338 key->type = old_key_type; 1339 else 1340 key->type = type; 1341 1342 if (persistent) 1343 *persistent = hci_persistent_key(hdev, conn, type, 1344 old_key_type); 1345 1346 return key; 1347 } 1348 1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1350 u8 addr_type, u8 type, u8 authenticated, 1351 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 1352 { 1353 struct smp_ltk *key, *old_key; 1354 u8 role = ltk_role(type); 1355 1356 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); 1357 if (old_key) 1358 key = old_key; 1359 else { 1360 key = kzalloc(sizeof(*key), GFP_KERNEL); 1361 if (!key) 1362 return NULL; 1363 list_add_rcu(&key->list, &hdev->long_term_keys); 1364 } 1365 1366 bacpy(&key->bdaddr, bdaddr); 1367 key->bdaddr_type = addr_type; 1368 memcpy(key->val, tk, sizeof(key->val)); 1369 key->authenticated = authenticated; 1370 key->ediv = ediv; 1371 key->rand = rand; 1372 key->enc_size = enc_size; 1373 key->type = type; 1374 1375 return key; 1376 } 1377 1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1379 u8 addr_type, u8 val[16], bdaddr_t *rpa) 1380 { 1381 struct smp_irk *irk; 1382 1383 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); 1384 if (!irk) { 1385 irk = kzalloc(sizeof(*irk), GFP_KERNEL); 1386 if (!irk) 1387 return NULL; 1388 1389 bacpy(&irk->bdaddr, bdaddr); 1390 irk->addr_type = addr_type; 1391 1392 list_add_rcu(&irk->list, &hdev->identity_resolving_keys); 1393 } 1394 1395 memcpy(irk->val, val, 16); 1396 bacpy(&irk->rpa, rpa); 1397 1398 return irk; 1399 } 1400 1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1402 { 1403 struct link_key *key; 1404 1405 key = hci_find_link_key(hdev, bdaddr); 1406 if (!key) 1407 return -ENOENT; 1408 1409 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1410 1411 list_del_rcu(&key->list); 1412 kfree_rcu(key, rcu); 1413 1414 return 0; 1415 } 1416 1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 1418 { 1419 struct smp_ltk *k, *tmp; 1420 int removed = 0; 1421 1422 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1423 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 1424 continue; 1425 1426 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1427 1428 list_del_rcu(&k->list); 1429 kfree_rcu(k, rcu); 1430 removed++; 1431 } 1432 1433 return removed ? 0 : -ENOENT; 1434 } 1435 1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 1437 { 1438 struct smp_irk *k, *tmp; 1439 1440 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { 1441 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 1442 continue; 1443 1444 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 1445 1446 list_del_rcu(&k->list); 1447 kfree_rcu(k, rcu); 1448 } 1449 } 1450 1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 1452 { 1453 struct smp_ltk *k; 1454 struct smp_irk *irk; 1455 u8 addr_type; 1456 1457 if (type == BDADDR_BREDR) { 1458 if (hci_find_link_key(hdev, bdaddr)) 1459 return true; 1460 return false; 1461 } 1462 1463 /* Convert to HCI addr type which struct smp_ltk uses */ 1464 if (type == BDADDR_LE_PUBLIC) 1465 addr_type = ADDR_LE_DEV_PUBLIC; 1466 else 1467 addr_type = ADDR_LE_DEV_RANDOM; 1468 1469 irk = hci_get_irk(hdev, bdaddr, addr_type); 1470 if (irk) { 1471 bdaddr = &irk->bdaddr; 1472 addr_type = irk->addr_type; 1473 } 1474 1475 rcu_read_lock(); 1476 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 1477 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { 1478 rcu_read_unlock(); 1479 return true; 1480 } 1481 } 1482 rcu_read_unlock(); 1483 1484 return false; 1485 } 1486 1487 /* HCI command timer function */ 1488 static void hci_cmd_timeout(struct work_struct *work) 1489 { 1490 struct hci_dev *hdev = container_of(work, struct hci_dev, 1491 cmd_timer.work); 1492 1493 if (hdev->sent_cmd) { 1494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 1495 u16 opcode = __le16_to_cpu(sent->opcode); 1496 1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); 1498 } else { 1499 bt_dev_err(hdev, "command tx timeout"); 1500 } 1501 1502 if (hdev->cmd_timeout) 1503 hdev->cmd_timeout(hdev); 1504 1505 atomic_set(&hdev->cmd_cnt, 1); 1506 queue_work(hdev->workqueue, &hdev->cmd_work); 1507 } 1508 1509 /* HCI ncmd timer function */ 1510 static void hci_ncmd_timeout(struct work_struct *work) 1511 { 1512 struct hci_dev *hdev = container_of(work, struct hci_dev, 1513 ncmd_timer.work); 1514 1515 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); 1516 1517 /* During HCI_INIT phase no events can be injected if the ncmd timer 1518 * triggers since the procedure has its own timeout handling. 1519 */ 1520 if (test_bit(HCI_INIT, &hdev->flags)) 1521 return; 1522 1523 /* This is an irrecoverable state, inject hardware error event */ 1524 hci_reset_dev(hdev); 1525 } 1526 1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1528 bdaddr_t *bdaddr, u8 bdaddr_type) 1529 { 1530 struct oob_data *data; 1531 1532 list_for_each_entry(data, &hdev->remote_oob_data, list) { 1533 if (bacmp(bdaddr, &data->bdaddr) != 0) 1534 continue; 1535 if (data->bdaddr_type != bdaddr_type) 1536 continue; 1537 return data; 1538 } 1539 1540 return NULL; 1541 } 1542 1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1544 u8 bdaddr_type) 1545 { 1546 struct oob_data *data; 1547 1548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1549 if (!data) 1550 return -ENOENT; 1551 1552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); 1553 1554 list_del(&data->list); 1555 kfree(data); 1556 1557 return 0; 1558 } 1559 1560 void hci_remote_oob_data_clear(struct hci_dev *hdev) 1561 { 1562 struct oob_data *data, *n; 1563 1564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 1565 list_del(&data->list); 1566 kfree(data); 1567 } 1568 } 1569 1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1571 u8 bdaddr_type, u8 *hash192, u8 *rand192, 1572 u8 *hash256, u8 *rand256) 1573 { 1574 struct oob_data *data; 1575 1576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 1577 if (!data) { 1578 data = kmalloc(sizeof(*data), GFP_KERNEL); 1579 if (!data) 1580 return -ENOMEM; 1581 1582 bacpy(&data->bdaddr, bdaddr); 1583 data->bdaddr_type = bdaddr_type; 1584 list_add(&data->list, &hdev->remote_oob_data); 1585 } 1586 1587 if (hash192 && rand192) { 1588 memcpy(data->hash192, hash192, sizeof(data->hash192)); 1589 memcpy(data->rand192, rand192, sizeof(data->rand192)); 1590 if (hash256 && rand256) 1591 data->present = 0x03; 1592 } else { 1593 memset(data->hash192, 0, sizeof(data->hash192)); 1594 memset(data->rand192, 0, sizeof(data->rand192)); 1595 if (hash256 && rand256) 1596 data->present = 0x02; 1597 else 1598 data->present = 0x00; 1599 } 1600 1601 if (hash256 && rand256) { 1602 memcpy(data->hash256, hash256, sizeof(data->hash256)); 1603 memcpy(data->rand256, rand256, sizeof(data->rand256)); 1604 } else { 1605 memset(data->hash256, 0, sizeof(data->hash256)); 1606 memset(data->rand256, 0, sizeof(data->rand256)); 1607 if (hash192 && rand192) 1608 data->present = 0x01; 1609 } 1610 1611 BT_DBG("%s for %pMR", hdev->name, bdaddr); 1612 1613 return 0; 1614 } 1615 1616 /* This function requires the caller holds hdev->lock */ 1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) 1618 { 1619 struct adv_info *adv_instance; 1620 1621 list_for_each_entry(adv_instance, &hdev->adv_instances, list) { 1622 if (adv_instance->instance == instance) 1623 return adv_instance; 1624 } 1625 1626 return NULL; 1627 } 1628 1629 /* This function requires the caller holds hdev->lock */ 1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) 1631 { 1632 struct adv_info *cur_instance; 1633 1634 cur_instance = hci_find_adv_instance(hdev, instance); 1635 if (!cur_instance) 1636 return NULL; 1637 1638 if (cur_instance == list_last_entry(&hdev->adv_instances, 1639 struct adv_info, list)) 1640 return list_first_entry(&hdev->adv_instances, 1641 struct adv_info, list); 1642 else 1643 return list_next_entry(cur_instance, list); 1644 } 1645 1646 /* This function requires the caller holds hdev->lock */ 1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) 1648 { 1649 struct adv_info *adv_instance; 1650 1651 adv_instance = hci_find_adv_instance(hdev, instance); 1652 if (!adv_instance) 1653 return -ENOENT; 1654 1655 BT_DBG("%s removing %dMR", hdev->name, instance); 1656 1657 if (hdev->cur_adv_instance == instance) { 1658 if (hdev->adv_instance_timeout) { 1659 cancel_delayed_work(&hdev->adv_instance_expire); 1660 hdev->adv_instance_timeout = 0; 1661 } 1662 hdev->cur_adv_instance = 0x00; 1663 } 1664 1665 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1666 1667 list_del(&adv_instance->list); 1668 kfree(adv_instance); 1669 1670 hdev->adv_instance_cnt--; 1671 1672 return 0; 1673 } 1674 1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) 1676 { 1677 struct adv_info *adv_instance, *n; 1678 1679 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) 1680 adv_instance->rpa_expired = rpa_expired; 1681 } 1682 1683 /* This function requires the caller holds hdev->lock */ 1684 void hci_adv_instances_clear(struct hci_dev *hdev) 1685 { 1686 struct adv_info *adv_instance, *n; 1687 1688 if (hdev->adv_instance_timeout) { 1689 cancel_delayed_work(&hdev->adv_instance_expire); 1690 hdev->adv_instance_timeout = 0; 1691 } 1692 1693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1695 list_del(&adv_instance->list); 1696 kfree(adv_instance); 1697 } 1698 1699 hdev->adv_instance_cnt = 0; 1700 hdev->cur_adv_instance = 0x00; 1701 } 1702 1703 static void adv_instance_rpa_expired(struct work_struct *work) 1704 { 1705 struct adv_info *adv_instance = container_of(work, struct adv_info, 1706 rpa_expired_cb.work); 1707 1708 BT_DBG(""); 1709 1710 adv_instance->rpa_expired = true; 1711 } 1712 1713 /* This function requires the caller holds hdev->lock */ 1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, 1715 u32 flags, u16 adv_data_len, u8 *adv_data, 1716 u16 scan_rsp_len, u8 *scan_rsp_data, 1717 u16 timeout, u16 duration, s8 tx_power, 1718 u32 min_interval, u32 max_interval, 1719 u8 mesh_handle) 1720 { 1721 struct adv_info *adv; 1722 1723 adv = hci_find_adv_instance(hdev, instance); 1724 if (adv) { 1725 memset(adv->adv_data, 0, sizeof(adv->adv_data)); 1726 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); 1727 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); 1728 } else { 1729 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || 1730 instance < 1 || instance > hdev->le_num_of_adv_sets + 1) 1731 return ERR_PTR(-EOVERFLOW); 1732 1733 adv = kzalloc(sizeof(*adv), GFP_KERNEL); 1734 if (!adv) 1735 return ERR_PTR(-ENOMEM); 1736 1737 adv->pending = true; 1738 adv->instance = instance; 1739 list_add(&adv->list, &hdev->adv_instances); 1740 hdev->adv_instance_cnt++; 1741 } 1742 1743 adv->flags = flags; 1744 adv->min_interval = min_interval; 1745 adv->max_interval = max_interval; 1746 adv->tx_power = tx_power; 1747 /* Defining a mesh_handle changes the timing units to ms, 1748 * rather than seconds, and ties the instance to the requested 1749 * mesh_tx queue. 1750 */ 1751 adv->mesh = mesh_handle; 1752 1753 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, 1754 scan_rsp_len, scan_rsp_data); 1755 1756 adv->timeout = timeout; 1757 adv->remaining_time = timeout; 1758 1759 if (duration == 0) 1760 adv->duration = hdev->def_multi_adv_rotation_duration; 1761 else 1762 adv->duration = duration; 1763 1764 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); 1765 1766 BT_DBG("%s for %dMR", hdev->name, instance); 1767 1768 return adv; 1769 } 1770 1771 /* This function requires the caller holds hdev->lock */ 1772 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, 1773 u32 flags, u8 data_len, u8 *data, 1774 u32 min_interval, u32 max_interval) 1775 { 1776 struct adv_info *adv; 1777 1778 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, 1779 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, 1780 min_interval, max_interval, 0); 1781 if (IS_ERR(adv)) 1782 return adv; 1783 1784 adv->periodic = true; 1785 adv->per_adv_data_len = data_len; 1786 1787 if (data) 1788 memcpy(adv->per_adv_data, data, data_len); 1789 1790 return adv; 1791 } 1792 1793 /* This function requires the caller holds hdev->lock */ 1794 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, 1795 u16 adv_data_len, u8 *adv_data, 1796 u16 scan_rsp_len, u8 *scan_rsp_data) 1797 { 1798 struct adv_info *adv; 1799 1800 adv = hci_find_adv_instance(hdev, instance); 1801 1802 /* If advertisement doesn't exist, we can't modify its data */ 1803 if (!adv) 1804 return -ENOENT; 1805 1806 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { 1807 memset(adv->adv_data, 0, sizeof(adv->adv_data)); 1808 memcpy(adv->adv_data, adv_data, adv_data_len); 1809 adv->adv_data_len = adv_data_len; 1810 adv->adv_data_changed = true; 1811 } 1812 1813 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { 1814 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); 1815 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); 1816 adv->scan_rsp_len = scan_rsp_len; 1817 adv->scan_rsp_changed = true; 1818 } 1819 1820 /* Mark as changed if there are flags which would affect it */ 1821 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || 1822 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 1823 adv->scan_rsp_changed = true; 1824 1825 return 0; 1826 } 1827 1828 /* This function requires the caller holds hdev->lock */ 1829 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) 1830 { 1831 u32 flags; 1832 struct adv_info *adv; 1833 1834 if (instance == 0x00) { 1835 /* Instance 0 always manages the "Tx Power" and "Flags" 1836 * fields 1837 */ 1838 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 1839 1840 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting 1841 * corresponds to the "connectable" instance flag. 1842 */ 1843 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) 1844 flags |= MGMT_ADV_FLAG_CONNECTABLE; 1845 1846 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 1847 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; 1848 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 1849 flags |= MGMT_ADV_FLAG_DISCOV; 1850 1851 return flags; 1852 } 1853 1854 adv = hci_find_adv_instance(hdev, instance); 1855 1856 /* Return 0 when we got an invalid instance identifier. */ 1857 if (!adv) 1858 return 0; 1859 1860 return adv->flags; 1861 } 1862 1863 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) 1864 { 1865 struct adv_info *adv; 1866 1867 /* Instance 0x00 always set local name */ 1868 if (instance == 0x00) 1869 return true; 1870 1871 adv = hci_find_adv_instance(hdev, instance); 1872 if (!adv) 1873 return false; 1874 1875 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || 1876 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 1877 return true; 1878 1879 return adv->scan_rsp_len ? true : false; 1880 } 1881 1882 /* This function requires the caller holds hdev->lock */ 1883 void hci_adv_monitors_clear(struct hci_dev *hdev) 1884 { 1885 struct adv_monitor *monitor; 1886 int handle; 1887 1888 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) 1889 hci_free_adv_monitor(hdev, monitor); 1890 1891 idr_destroy(&hdev->adv_monitors_idr); 1892 } 1893 1894 /* Frees the monitor structure and do some bookkeepings. 1895 * This function requires the caller holds hdev->lock. 1896 */ 1897 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 1898 { 1899 struct adv_pattern *pattern; 1900 struct adv_pattern *tmp; 1901 1902 if (!monitor) 1903 return; 1904 1905 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { 1906 list_del(&pattern->list); 1907 kfree(pattern); 1908 } 1909 1910 if (monitor->handle) 1911 idr_remove(&hdev->adv_monitors_idr, monitor->handle); 1912 1913 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { 1914 hdev->adv_monitors_cnt--; 1915 mgmt_adv_monitor_removed(hdev, monitor->handle); 1916 } 1917 1918 kfree(monitor); 1919 } 1920 1921 /* Assigns handle to a monitor, and if offloading is supported and power is on, 1922 * also attempts to forward the request to the controller. 1923 * This function requires the caller holds hci_req_sync_lock. 1924 */ 1925 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 1926 { 1927 int min, max, handle; 1928 int status = 0; 1929 1930 if (!monitor) 1931 return -EINVAL; 1932 1933 hci_dev_lock(hdev); 1934 1935 min = HCI_MIN_ADV_MONITOR_HANDLE; 1936 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; 1937 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, 1938 GFP_KERNEL); 1939 1940 hci_dev_unlock(hdev); 1941 1942 if (handle < 0) 1943 return handle; 1944 1945 monitor->handle = handle; 1946 1947 if (!hdev_is_powered(hdev)) 1948 return status; 1949 1950 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1951 case HCI_ADV_MONITOR_EXT_NONE: 1952 bt_dev_dbg(hdev, "add monitor %d status %d", 1953 monitor->handle, status); 1954 /* Message was not forwarded to controller - not an error */ 1955 break; 1956 1957 case HCI_ADV_MONITOR_EXT_MSFT: 1958 status = msft_add_monitor_pattern(hdev, monitor); 1959 bt_dev_dbg(hdev, "add monitor %d msft status %d", 1960 handle, status); 1961 break; 1962 } 1963 1964 return status; 1965 } 1966 1967 /* Attempts to tell the controller and free the monitor. If somehow the 1968 * controller doesn't have a corresponding handle, remove anyway. 1969 * This function requires the caller holds hci_req_sync_lock. 1970 */ 1971 static int hci_remove_adv_monitor(struct hci_dev *hdev, 1972 struct adv_monitor *monitor) 1973 { 1974 int status = 0; 1975 int handle; 1976 1977 switch (hci_get_adv_monitor_offload_ext(hdev)) { 1978 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ 1979 bt_dev_dbg(hdev, "remove monitor %d status %d", 1980 monitor->handle, status); 1981 goto free_monitor; 1982 1983 case HCI_ADV_MONITOR_EXT_MSFT: 1984 handle = monitor->handle; 1985 status = msft_remove_monitor(hdev, monitor); 1986 bt_dev_dbg(hdev, "remove monitor %d msft status %d", 1987 handle, status); 1988 break; 1989 } 1990 1991 /* In case no matching handle registered, just free the monitor */ 1992 if (status == -ENOENT) 1993 goto free_monitor; 1994 1995 return status; 1996 1997 free_monitor: 1998 if (status == -ENOENT) 1999 bt_dev_warn(hdev, "Removing monitor with no matching handle %d", 2000 monitor->handle); 2001 hci_free_adv_monitor(hdev, monitor); 2002 2003 return status; 2004 } 2005 2006 /* This function requires the caller holds hci_req_sync_lock */ 2007 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) 2008 { 2009 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); 2010 2011 if (!monitor) 2012 return -EINVAL; 2013 2014 return hci_remove_adv_monitor(hdev, monitor); 2015 } 2016 2017 /* This function requires the caller holds hci_req_sync_lock */ 2018 int hci_remove_all_adv_monitor(struct hci_dev *hdev) 2019 { 2020 struct adv_monitor *monitor; 2021 int idr_next_id = 0; 2022 int status = 0; 2023 2024 while (1) { 2025 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); 2026 if (!monitor) 2027 break; 2028 2029 status = hci_remove_adv_monitor(hdev, monitor); 2030 if (status) 2031 return status; 2032 2033 idr_next_id++; 2034 } 2035 2036 return status; 2037 } 2038 2039 /* This function requires the caller holds hdev->lock */ 2040 bool hci_is_adv_monitoring(struct hci_dev *hdev) 2041 { 2042 return !idr_is_empty(&hdev->adv_monitors_idr); 2043 } 2044 2045 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) 2046 { 2047 if (msft_monitor_supported(hdev)) 2048 return HCI_ADV_MONITOR_EXT_MSFT; 2049 2050 return HCI_ADV_MONITOR_EXT_NONE; 2051 } 2052 2053 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, 2054 bdaddr_t *bdaddr, u8 type) 2055 { 2056 struct bdaddr_list *b; 2057 2058 list_for_each_entry(b, bdaddr_list, list) { 2059 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2060 return b; 2061 } 2062 2063 return NULL; 2064 } 2065 2066 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 2067 struct list_head *bdaddr_list, bdaddr_t *bdaddr, 2068 u8 type) 2069 { 2070 struct bdaddr_list_with_irk *b; 2071 2072 list_for_each_entry(b, bdaddr_list, list) { 2073 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2074 return b; 2075 } 2076 2077 return NULL; 2078 } 2079 2080 struct bdaddr_list_with_flags * 2081 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, 2082 bdaddr_t *bdaddr, u8 type) 2083 { 2084 struct bdaddr_list_with_flags *b; 2085 2086 list_for_each_entry(b, bdaddr_list, list) { 2087 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2088 return b; 2089 } 2090 2091 return NULL; 2092 } 2093 2094 void hci_bdaddr_list_clear(struct list_head *bdaddr_list) 2095 { 2096 struct bdaddr_list *b, *n; 2097 2098 list_for_each_entry_safe(b, n, bdaddr_list, list) { 2099 list_del(&b->list); 2100 kfree(b); 2101 } 2102 } 2103 2104 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2105 { 2106 struct bdaddr_list *entry; 2107 2108 if (!bacmp(bdaddr, BDADDR_ANY)) 2109 return -EBADF; 2110 2111 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2112 return -EEXIST; 2113 2114 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2115 if (!entry) 2116 return -ENOMEM; 2117 2118 bacpy(&entry->bdaddr, bdaddr); 2119 entry->bdaddr_type = type; 2120 2121 list_add(&entry->list, list); 2122 2123 return 0; 2124 } 2125 2126 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2127 u8 type, u8 *peer_irk, u8 *local_irk) 2128 { 2129 struct bdaddr_list_with_irk *entry; 2130 2131 if (!bacmp(bdaddr, BDADDR_ANY)) 2132 return -EBADF; 2133 2134 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2135 return -EEXIST; 2136 2137 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2138 if (!entry) 2139 return -ENOMEM; 2140 2141 bacpy(&entry->bdaddr, bdaddr); 2142 entry->bdaddr_type = type; 2143 2144 if (peer_irk) 2145 memcpy(entry->peer_irk, peer_irk, 16); 2146 2147 if (local_irk) 2148 memcpy(entry->local_irk, local_irk, 16); 2149 2150 list_add(&entry->list, list); 2151 2152 return 0; 2153 } 2154 2155 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2156 u8 type, u32 flags) 2157 { 2158 struct bdaddr_list_with_flags *entry; 2159 2160 if (!bacmp(bdaddr, BDADDR_ANY)) 2161 return -EBADF; 2162 2163 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2164 return -EEXIST; 2165 2166 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2167 if (!entry) 2168 return -ENOMEM; 2169 2170 bacpy(&entry->bdaddr, bdaddr); 2171 entry->bdaddr_type = type; 2172 entry->flags = flags; 2173 2174 list_add(&entry->list, list); 2175 2176 return 0; 2177 } 2178 2179 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2180 { 2181 struct bdaddr_list *entry; 2182 2183 if (!bacmp(bdaddr, BDADDR_ANY)) { 2184 hci_bdaddr_list_clear(list); 2185 return 0; 2186 } 2187 2188 entry = hci_bdaddr_list_lookup(list, bdaddr, type); 2189 if (!entry) 2190 return -ENOENT; 2191 2192 list_del(&entry->list); 2193 kfree(entry); 2194 2195 return 0; 2196 } 2197 2198 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 2199 u8 type) 2200 { 2201 struct bdaddr_list_with_irk *entry; 2202 2203 if (!bacmp(bdaddr, BDADDR_ANY)) { 2204 hci_bdaddr_list_clear(list); 2205 return 0; 2206 } 2207 2208 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); 2209 if (!entry) 2210 return -ENOENT; 2211 2212 list_del(&entry->list); 2213 kfree(entry); 2214 2215 return 0; 2216 } 2217 2218 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, 2219 u8 type) 2220 { 2221 struct bdaddr_list_with_flags *entry; 2222 2223 if (!bacmp(bdaddr, BDADDR_ANY)) { 2224 hci_bdaddr_list_clear(list); 2225 return 0; 2226 } 2227 2228 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); 2229 if (!entry) 2230 return -ENOENT; 2231 2232 list_del(&entry->list); 2233 kfree(entry); 2234 2235 return 0; 2236 } 2237 2238 /* This function requires the caller holds hdev->lock */ 2239 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 2240 bdaddr_t *addr, u8 addr_type) 2241 { 2242 struct hci_conn_params *params; 2243 2244 list_for_each_entry(params, &hdev->le_conn_params, list) { 2245 if (bacmp(¶ms->addr, addr) == 0 && 2246 params->addr_type == addr_type) { 2247 return params; 2248 } 2249 } 2250 2251 return NULL; 2252 } 2253 2254 /* This function requires the caller holds hdev->lock or rcu_read_lock */ 2255 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 2256 bdaddr_t *addr, u8 addr_type) 2257 { 2258 struct hci_conn_params *param; 2259 2260 rcu_read_lock(); 2261 2262 list_for_each_entry_rcu(param, list, action) { 2263 if (bacmp(¶m->addr, addr) == 0 && 2264 param->addr_type == addr_type) { 2265 rcu_read_unlock(); 2266 return param; 2267 } 2268 } 2269 2270 rcu_read_unlock(); 2271 2272 return NULL; 2273 } 2274 2275 /* This function requires the caller holds hdev->lock */ 2276 void hci_pend_le_list_del_init(struct hci_conn_params *param) 2277 { 2278 if (list_empty(¶m->action)) 2279 return; 2280 2281 list_del_rcu(¶m->action); 2282 synchronize_rcu(); 2283 INIT_LIST_HEAD(¶m->action); 2284 } 2285 2286 /* This function requires the caller holds hdev->lock */ 2287 void hci_pend_le_list_add(struct hci_conn_params *param, 2288 struct list_head *list) 2289 { 2290 list_add_rcu(¶m->action, list); 2291 } 2292 2293 /* This function requires the caller holds hdev->lock */ 2294 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 2295 bdaddr_t *addr, u8 addr_type) 2296 { 2297 struct hci_conn_params *params; 2298 2299 params = hci_conn_params_lookup(hdev, addr, addr_type); 2300 if (params) 2301 return params; 2302 2303 params = kzalloc(sizeof(*params), GFP_KERNEL); 2304 if (!params) { 2305 bt_dev_err(hdev, "out of memory"); 2306 return NULL; 2307 } 2308 2309 bacpy(¶ms->addr, addr); 2310 params->addr_type = addr_type; 2311 2312 list_add(¶ms->list, &hdev->le_conn_params); 2313 INIT_LIST_HEAD(¶ms->action); 2314 2315 params->conn_min_interval = hdev->le_conn_min_interval; 2316 params->conn_max_interval = hdev->le_conn_max_interval; 2317 params->conn_latency = hdev->le_conn_latency; 2318 params->supervision_timeout = hdev->le_supv_timeout; 2319 params->auto_connect = HCI_AUTO_CONN_DISABLED; 2320 2321 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2322 2323 return params; 2324 } 2325 2326 void hci_conn_params_free(struct hci_conn_params *params) 2327 { 2328 hci_pend_le_list_del_init(params); 2329 2330 if (params->conn) { 2331 hci_conn_drop(params->conn); 2332 hci_conn_put(params->conn); 2333 } 2334 2335 list_del(¶ms->list); 2336 kfree(params); 2337 } 2338 2339 /* This function requires the caller holds hdev->lock */ 2340 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 2341 { 2342 struct hci_conn_params *params; 2343 2344 params = hci_conn_params_lookup(hdev, addr, addr_type); 2345 if (!params) 2346 return; 2347 2348 hci_conn_params_free(params); 2349 2350 hci_update_passive_scan(hdev); 2351 2352 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2353 } 2354 2355 /* This function requires the caller holds hdev->lock */ 2356 void hci_conn_params_clear_disabled(struct hci_dev *hdev) 2357 { 2358 struct hci_conn_params *params, *tmp; 2359 2360 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 2361 if (params->auto_connect != HCI_AUTO_CONN_DISABLED) 2362 continue; 2363 2364 /* If trying to establish one time connection to disabled 2365 * device, leave the params, but mark them as just once. 2366 */ 2367 if (params->explicit_connect) { 2368 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 2369 continue; 2370 } 2371 2372 hci_conn_params_free(params); 2373 } 2374 2375 BT_DBG("All LE disabled connection parameters were removed"); 2376 } 2377 2378 /* This function requires the caller holds hdev->lock */ 2379 static void hci_conn_params_clear_all(struct hci_dev *hdev) 2380 { 2381 struct hci_conn_params *params, *tmp; 2382 2383 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 2384 hci_conn_params_free(params); 2385 2386 BT_DBG("All LE connection parameters were removed"); 2387 } 2388 2389 /* Copy the Identity Address of the controller. 2390 * 2391 * If the controller has a public BD_ADDR, then by default use that one. 2392 * If this is a LE only controller without a public address, default to 2393 * the static random address. 2394 * 2395 * For debugging purposes it is possible to force controllers with a 2396 * public address to use the static random address instead. 2397 * 2398 * In case BR/EDR has been disabled on a dual-mode controller and 2399 * userspace has configured a static address, then that address 2400 * becomes the identity address instead of the public BR/EDR address. 2401 */ 2402 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 2403 u8 *bdaddr_type) 2404 { 2405 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 2406 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 2407 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 2408 bacmp(&hdev->static_addr, BDADDR_ANY))) { 2409 bacpy(bdaddr, &hdev->static_addr); 2410 *bdaddr_type = ADDR_LE_DEV_RANDOM; 2411 } else { 2412 bacpy(bdaddr, &hdev->bdaddr); 2413 *bdaddr_type = ADDR_LE_DEV_PUBLIC; 2414 } 2415 } 2416 2417 static void hci_clear_wake_reason(struct hci_dev *hdev) 2418 { 2419 hci_dev_lock(hdev); 2420 2421 hdev->wake_reason = 0; 2422 bacpy(&hdev->wake_addr, BDADDR_ANY); 2423 hdev->wake_addr_type = 0; 2424 2425 hci_dev_unlock(hdev); 2426 } 2427 2428 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, 2429 void *data) 2430 { 2431 struct hci_dev *hdev = 2432 container_of(nb, struct hci_dev, suspend_notifier); 2433 int ret = 0; 2434 2435 /* Userspace has full control of this device. Do nothing. */ 2436 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 2437 return NOTIFY_DONE; 2438 2439 /* To avoid a potential race with hci_unregister_dev. */ 2440 hci_dev_hold(hdev); 2441 2442 if (action == PM_SUSPEND_PREPARE) 2443 ret = hci_suspend_dev(hdev); 2444 else if (action == PM_POST_SUSPEND) 2445 ret = hci_resume_dev(hdev); 2446 2447 if (ret) 2448 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", 2449 action, ret); 2450 2451 hci_dev_put(hdev); 2452 return NOTIFY_DONE; 2453 } 2454 2455 /* Alloc HCI device */ 2456 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) 2457 { 2458 struct hci_dev *hdev; 2459 unsigned int alloc_size; 2460 2461 alloc_size = sizeof(*hdev); 2462 if (sizeof_priv) { 2463 /* Fixme: May need ALIGN-ment? */ 2464 alloc_size += sizeof_priv; 2465 } 2466 2467 hdev = kzalloc(alloc_size, GFP_KERNEL); 2468 if (!hdev) 2469 return NULL; 2470 2471 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 2472 hdev->esco_type = (ESCO_HV1); 2473 hdev->link_mode = (HCI_LM_ACCEPT); 2474 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 2475 hdev->io_capability = 0x03; /* No Input No Output */ 2476 hdev->manufacturer = 0xffff; /* Default to internal use */ 2477 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 2478 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 2479 hdev->adv_instance_cnt = 0; 2480 hdev->cur_adv_instance = 0x00; 2481 hdev->adv_instance_timeout = 0; 2482 2483 hdev->advmon_allowlist_duration = 300; 2484 hdev->advmon_no_filter_duration = 500; 2485 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ 2486 2487 hdev->sniff_max_interval = 800; 2488 hdev->sniff_min_interval = 80; 2489 2490 hdev->le_adv_channel_map = 0x07; 2491 hdev->le_adv_min_interval = 0x0800; 2492 hdev->le_adv_max_interval = 0x0800; 2493 hdev->le_scan_interval = 0x0060; 2494 hdev->le_scan_window = 0x0030; 2495 hdev->le_scan_int_suspend = 0x0400; 2496 hdev->le_scan_window_suspend = 0x0012; 2497 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; 2498 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; 2499 hdev->le_scan_int_adv_monitor = 0x0060; 2500 hdev->le_scan_window_adv_monitor = 0x0030; 2501 hdev->le_scan_int_connect = 0x0060; 2502 hdev->le_scan_window_connect = 0x0060; 2503 hdev->le_conn_min_interval = 0x0018; 2504 hdev->le_conn_max_interval = 0x0028; 2505 hdev->le_conn_latency = 0x0000; 2506 hdev->le_supv_timeout = 0x002a; 2507 hdev->le_def_tx_len = 0x001b; 2508 hdev->le_def_tx_time = 0x0148; 2509 hdev->le_max_tx_len = 0x001b; 2510 hdev->le_max_tx_time = 0x0148; 2511 hdev->le_max_rx_len = 0x001b; 2512 hdev->le_max_rx_time = 0x0148; 2513 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; 2514 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; 2515 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 2516 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 2517 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; 2518 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; 2519 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; 2520 hdev->min_le_tx_power = HCI_TX_POWER_INVALID; 2521 hdev->max_le_tx_power = HCI_TX_POWER_INVALID; 2522 2523 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 2524 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 2525 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 2526 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 2527 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 2528 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; 2529 2530 /* default 1.28 sec page scan */ 2531 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; 2532 hdev->def_page_scan_int = 0x0800; 2533 hdev->def_page_scan_window = 0x0012; 2534 2535 mutex_init(&hdev->lock); 2536 mutex_init(&hdev->req_lock); 2537 2538 INIT_LIST_HEAD(&hdev->mesh_pending); 2539 INIT_LIST_HEAD(&hdev->mgmt_pending); 2540 INIT_LIST_HEAD(&hdev->reject_list); 2541 INIT_LIST_HEAD(&hdev->accept_list); 2542 INIT_LIST_HEAD(&hdev->uuids); 2543 INIT_LIST_HEAD(&hdev->link_keys); 2544 INIT_LIST_HEAD(&hdev->long_term_keys); 2545 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 2546 INIT_LIST_HEAD(&hdev->remote_oob_data); 2547 INIT_LIST_HEAD(&hdev->le_accept_list); 2548 INIT_LIST_HEAD(&hdev->le_resolv_list); 2549 INIT_LIST_HEAD(&hdev->le_conn_params); 2550 INIT_LIST_HEAD(&hdev->pend_le_conns); 2551 INIT_LIST_HEAD(&hdev->pend_le_reports); 2552 INIT_LIST_HEAD(&hdev->conn_hash.list); 2553 INIT_LIST_HEAD(&hdev->adv_instances); 2554 INIT_LIST_HEAD(&hdev->blocked_keys); 2555 INIT_LIST_HEAD(&hdev->monitored_devices); 2556 2557 INIT_LIST_HEAD(&hdev->local_codecs); 2558 INIT_WORK(&hdev->rx_work, hci_rx_work); 2559 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 2560 INIT_WORK(&hdev->tx_work, hci_tx_work); 2561 INIT_WORK(&hdev->power_on, hci_power_on); 2562 INIT_WORK(&hdev->error_reset, hci_error_reset); 2563 2564 hci_cmd_sync_init(hdev); 2565 2566 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 2567 2568 skb_queue_head_init(&hdev->rx_q); 2569 skb_queue_head_init(&hdev->cmd_q); 2570 skb_queue_head_init(&hdev->raw_q); 2571 2572 init_waitqueue_head(&hdev->req_wait_q); 2573 2574 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 2575 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); 2576 2577 hci_devcd_setup(hdev); 2578 hci_request_setup(hdev); 2579 2580 hci_init_sysfs(hdev); 2581 discovery_init(hdev); 2582 2583 return hdev; 2584 } 2585 EXPORT_SYMBOL(hci_alloc_dev_priv); 2586 2587 /* Free HCI device */ 2588 void hci_free_dev(struct hci_dev *hdev) 2589 { 2590 /* will free via device release */ 2591 put_device(&hdev->dev); 2592 } 2593 EXPORT_SYMBOL(hci_free_dev); 2594 2595 /* Register HCI device */ 2596 int hci_register_dev(struct hci_dev *hdev) 2597 { 2598 int id, error; 2599 2600 if (!hdev->open || !hdev->close || !hdev->send) 2601 return -EINVAL; 2602 2603 /* Do not allow HCI_AMP devices to register at index 0, 2604 * so the index can be used as the AMP controller ID. 2605 */ 2606 switch (hdev->dev_type) { 2607 case HCI_PRIMARY: 2608 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL); 2609 break; 2610 case HCI_AMP: 2611 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL); 2612 break; 2613 default: 2614 return -EINVAL; 2615 } 2616 2617 if (id < 0) 2618 return id; 2619 2620 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id); 2621 hdev->id = id; 2622 2623 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2624 2625 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); 2626 if (!hdev->workqueue) { 2627 error = -ENOMEM; 2628 goto err; 2629 } 2630 2631 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, 2632 hdev->name); 2633 if (!hdev->req_workqueue) { 2634 destroy_workqueue(hdev->workqueue); 2635 error = -ENOMEM; 2636 goto err; 2637 } 2638 2639 if (!IS_ERR_OR_NULL(bt_debugfs)) 2640 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 2641 2642 dev_set_name(&hdev->dev, "%s", hdev->name); 2643 2644 error = device_add(&hdev->dev); 2645 if (error < 0) 2646 goto err_wqueue; 2647 2648 hci_leds_init(hdev); 2649 2650 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 2651 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, 2652 hdev); 2653 if (hdev->rfkill) { 2654 if (rfkill_register(hdev->rfkill) < 0) { 2655 rfkill_destroy(hdev->rfkill); 2656 hdev->rfkill = NULL; 2657 } 2658 } 2659 2660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 2661 hci_dev_set_flag(hdev, HCI_RFKILLED); 2662 2663 hci_dev_set_flag(hdev, HCI_SETUP); 2664 hci_dev_set_flag(hdev, HCI_AUTO_OFF); 2665 2666 if (hdev->dev_type == HCI_PRIMARY) { 2667 /* Assume BR/EDR support until proven otherwise (such as 2668 * through reading supported features during init. 2669 */ 2670 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 2671 } 2672 2673 write_lock(&hci_dev_list_lock); 2674 list_add(&hdev->list, &hci_dev_list); 2675 write_unlock(&hci_dev_list_lock); 2676 2677 /* Devices that are marked for raw-only usage are unconfigured 2678 * and should not be included in normal operation. 2679 */ 2680 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 2681 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 2682 2683 /* Mark Remote Wakeup connection flag as supported if driver has wakeup 2684 * callback. 2685 */ 2686 if (hdev->wakeup) 2687 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; 2688 2689 hci_sock_dev_event(hdev, HCI_DEV_REG); 2690 hci_dev_hold(hdev); 2691 2692 error = hci_register_suspend_notifier(hdev); 2693 if (error) 2694 BT_WARN("register suspend notifier failed error:%d\n", error); 2695 2696 queue_work(hdev->req_workqueue, &hdev->power_on); 2697 2698 idr_init(&hdev->adv_monitors_idr); 2699 msft_register(hdev); 2700 2701 return id; 2702 2703 err_wqueue: 2704 debugfs_remove_recursive(hdev->debugfs); 2705 destroy_workqueue(hdev->workqueue); 2706 destroy_workqueue(hdev->req_workqueue); 2707 err: 2708 ida_simple_remove(&hci_index_ida, hdev->id); 2709 2710 return error; 2711 } 2712 EXPORT_SYMBOL(hci_register_dev); 2713 2714 /* Unregister HCI device */ 2715 void hci_unregister_dev(struct hci_dev *hdev) 2716 { 2717 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 2718 2719 mutex_lock(&hdev->unregister_lock); 2720 hci_dev_set_flag(hdev, HCI_UNREGISTER); 2721 mutex_unlock(&hdev->unregister_lock); 2722 2723 write_lock(&hci_dev_list_lock); 2724 list_del(&hdev->list); 2725 write_unlock(&hci_dev_list_lock); 2726 2727 cancel_work_sync(&hdev->power_on); 2728 2729 hci_cmd_sync_clear(hdev); 2730 2731 hci_unregister_suspend_notifier(hdev); 2732 2733 msft_unregister(hdev); 2734 2735 hci_dev_do_close(hdev); 2736 2737 if (!test_bit(HCI_INIT, &hdev->flags) && 2738 !hci_dev_test_flag(hdev, HCI_SETUP) && 2739 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 2740 hci_dev_lock(hdev); 2741 mgmt_index_removed(hdev); 2742 hci_dev_unlock(hdev); 2743 } 2744 2745 /* mgmt_index_removed should take care of emptying the 2746 * pending list */ 2747 BUG_ON(!list_empty(&hdev->mgmt_pending)); 2748 2749 hci_sock_dev_event(hdev, HCI_DEV_UNREG); 2750 2751 if (hdev->rfkill) { 2752 rfkill_unregister(hdev->rfkill); 2753 rfkill_destroy(hdev->rfkill); 2754 } 2755 2756 device_del(&hdev->dev); 2757 /* Actual cleanup is deferred until hci_release_dev(). */ 2758 hci_dev_put(hdev); 2759 } 2760 EXPORT_SYMBOL(hci_unregister_dev); 2761 2762 /* Release HCI device */ 2763 void hci_release_dev(struct hci_dev *hdev) 2764 { 2765 debugfs_remove_recursive(hdev->debugfs); 2766 kfree_const(hdev->hw_info); 2767 kfree_const(hdev->fw_info); 2768 2769 destroy_workqueue(hdev->workqueue); 2770 destroy_workqueue(hdev->req_workqueue); 2771 2772 hci_dev_lock(hdev); 2773 hci_bdaddr_list_clear(&hdev->reject_list); 2774 hci_bdaddr_list_clear(&hdev->accept_list); 2775 hci_uuids_clear(hdev); 2776 hci_link_keys_clear(hdev); 2777 hci_smp_ltks_clear(hdev); 2778 hci_smp_irks_clear(hdev); 2779 hci_remote_oob_data_clear(hdev); 2780 hci_adv_instances_clear(hdev); 2781 hci_adv_monitors_clear(hdev); 2782 hci_bdaddr_list_clear(&hdev->le_accept_list); 2783 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2784 hci_conn_params_clear_all(hdev); 2785 hci_discovery_filter_clear(hdev); 2786 hci_blocked_keys_clear(hdev); 2787 hci_dev_unlock(hdev); 2788 2789 ida_simple_remove(&hci_index_ida, hdev->id); 2790 kfree_skb(hdev->sent_cmd); 2791 kfree_skb(hdev->recv_event); 2792 kfree(hdev); 2793 } 2794 EXPORT_SYMBOL(hci_release_dev); 2795 2796 int hci_register_suspend_notifier(struct hci_dev *hdev) 2797 { 2798 int ret = 0; 2799 2800 if (!hdev->suspend_notifier.notifier_call && 2801 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 2802 hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 2803 ret = register_pm_notifier(&hdev->suspend_notifier); 2804 } 2805 2806 return ret; 2807 } 2808 2809 int hci_unregister_suspend_notifier(struct hci_dev *hdev) 2810 { 2811 int ret = 0; 2812 2813 if (hdev->suspend_notifier.notifier_call) { 2814 ret = unregister_pm_notifier(&hdev->suspend_notifier); 2815 if (!ret) 2816 hdev->suspend_notifier.notifier_call = NULL; 2817 } 2818 2819 return ret; 2820 } 2821 2822 /* Suspend HCI device */ 2823 int hci_suspend_dev(struct hci_dev *hdev) 2824 { 2825 int ret; 2826 2827 bt_dev_dbg(hdev, ""); 2828 2829 /* Suspend should only act on when powered. */ 2830 if (!hdev_is_powered(hdev) || 2831 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2832 return 0; 2833 2834 /* If powering down don't attempt to suspend */ 2835 if (mgmt_powering_down(hdev)) 2836 return 0; 2837 2838 /* Cancel potentially blocking sync operation before suspend */ 2839 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN); 2840 2841 hci_req_sync_lock(hdev); 2842 ret = hci_suspend_sync(hdev); 2843 hci_req_sync_unlock(hdev); 2844 2845 hci_clear_wake_reason(hdev); 2846 mgmt_suspending(hdev, hdev->suspend_state); 2847 2848 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); 2849 return ret; 2850 } 2851 EXPORT_SYMBOL(hci_suspend_dev); 2852 2853 /* Resume HCI device */ 2854 int hci_resume_dev(struct hci_dev *hdev) 2855 { 2856 int ret; 2857 2858 bt_dev_dbg(hdev, ""); 2859 2860 /* Resume should only act on when powered. */ 2861 if (!hdev_is_powered(hdev) || 2862 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2863 return 0; 2864 2865 /* If powering down don't attempt to resume */ 2866 if (mgmt_powering_down(hdev)) 2867 return 0; 2868 2869 hci_req_sync_lock(hdev); 2870 ret = hci_resume_sync(hdev); 2871 hci_req_sync_unlock(hdev); 2872 2873 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, 2874 hdev->wake_addr_type); 2875 2876 hci_sock_dev_event(hdev, HCI_DEV_RESUME); 2877 return ret; 2878 } 2879 EXPORT_SYMBOL(hci_resume_dev); 2880 2881 /* Reset HCI device */ 2882 int hci_reset_dev(struct hci_dev *hdev) 2883 { 2884 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; 2885 struct sk_buff *skb; 2886 2887 skb = bt_skb_alloc(3, GFP_ATOMIC); 2888 if (!skb) 2889 return -ENOMEM; 2890 2891 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 2892 skb_put_data(skb, hw_err, 3); 2893 2894 bt_dev_err(hdev, "Injecting HCI hardware error event"); 2895 2896 /* Send Hardware Error to upper stack */ 2897 return hci_recv_frame(hdev, skb); 2898 } 2899 EXPORT_SYMBOL(hci_reset_dev); 2900 2901 /* Receive frame from HCI drivers */ 2902 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 2903 { 2904 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 2905 && !test_bit(HCI_INIT, &hdev->flags))) { 2906 kfree_skb(skb); 2907 return -ENXIO; 2908 } 2909 2910 switch (hci_skb_pkt_type(skb)) { 2911 case HCI_EVENT_PKT: 2912 break; 2913 case HCI_ACLDATA_PKT: 2914 /* Detect if ISO packet has been sent as ACL */ 2915 if (hci_conn_num(hdev, ISO_LINK)) { 2916 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); 2917 __u8 type; 2918 2919 type = hci_conn_lookup_type(hdev, hci_handle(handle)); 2920 if (type == ISO_LINK) 2921 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 2922 } 2923 break; 2924 case HCI_SCODATA_PKT: 2925 break; 2926 case HCI_ISODATA_PKT: 2927 break; 2928 default: 2929 kfree_skb(skb); 2930 return -EINVAL; 2931 } 2932 2933 /* Incoming skb */ 2934 bt_cb(skb)->incoming = 1; 2935 2936 /* Time stamp */ 2937 __net_timestamp(skb); 2938 2939 skb_queue_tail(&hdev->rx_q, skb); 2940 queue_work(hdev->workqueue, &hdev->rx_work); 2941 2942 return 0; 2943 } 2944 EXPORT_SYMBOL(hci_recv_frame); 2945 2946 /* Receive diagnostic message from HCI drivers */ 2947 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 2948 { 2949 /* Mark as diagnostic packet */ 2950 hci_skb_pkt_type(skb) = HCI_DIAG_PKT; 2951 2952 /* Time stamp */ 2953 __net_timestamp(skb); 2954 2955 skb_queue_tail(&hdev->rx_q, skb); 2956 queue_work(hdev->workqueue, &hdev->rx_work); 2957 2958 return 0; 2959 } 2960 EXPORT_SYMBOL(hci_recv_diag); 2961 2962 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) 2963 { 2964 va_list vargs; 2965 2966 va_start(vargs, fmt); 2967 kfree_const(hdev->hw_info); 2968 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2969 va_end(vargs); 2970 } 2971 EXPORT_SYMBOL(hci_set_hw_info); 2972 2973 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) 2974 { 2975 va_list vargs; 2976 2977 va_start(vargs, fmt); 2978 kfree_const(hdev->fw_info); 2979 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 2980 va_end(vargs); 2981 } 2982 EXPORT_SYMBOL(hci_set_fw_info); 2983 2984 /* ---- Interface to upper protocols ---- */ 2985 2986 int hci_register_cb(struct hci_cb *cb) 2987 { 2988 BT_DBG("%p name %s", cb, cb->name); 2989 2990 mutex_lock(&hci_cb_list_lock); 2991 list_add_tail(&cb->list, &hci_cb_list); 2992 mutex_unlock(&hci_cb_list_lock); 2993 2994 return 0; 2995 } 2996 EXPORT_SYMBOL(hci_register_cb); 2997 2998 int hci_unregister_cb(struct hci_cb *cb) 2999 { 3000 BT_DBG("%p name %s", cb, cb->name); 3001 3002 mutex_lock(&hci_cb_list_lock); 3003 list_del(&cb->list); 3004 mutex_unlock(&hci_cb_list_lock); 3005 3006 return 0; 3007 } 3008 EXPORT_SYMBOL(hci_unregister_cb); 3009 3010 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 3011 { 3012 int err; 3013 3014 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), 3015 skb->len); 3016 3017 /* Time stamp */ 3018 __net_timestamp(skb); 3019 3020 /* Send copy to monitor */ 3021 hci_send_to_monitor(hdev, skb); 3022 3023 if (atomic_read(&hdev->promisc)) { 3024 /* Send copy to the sockets */ 3025 hci_send_to_sock(hdev, skb); 3026 } 3027 3028 /* Get rid of skb owner, prior to sending to the driver. */ 3029 skb_orphan(skb); 3030 3031 if (!test_bit(HCI_RUNNING, &hdev->flags)) { 3032 kfree_skb(skb); 3033 return -EINVAL; 3034 } 3035 3036 err = hdev->send(hdev, skb); 3037 if (err < 0) { 3038 bt_dev_err(hdev, "sending frame failed (%d)", err); 3039 kfree_skb(skb); 3040 return err; 3041 } 3042 3043 return 0; 3044 } 3045 3046 /* Send HCI command */ 3047 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 3048 const void *param) 3049 { 3050 struct sk_buff *skb; 3051 3052 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 3053 3054 skb = hci_prepare_cmd(hdev, opcode, plen, param); 3055 if (!skb) { 3056 bt_dev_err(hdev, "no memory for command"); 3057 return -ENOMEM; 3058 } 3059 3060 /* Stand-alone HCI commands must be flagged as 3061 * single-command requests. 3062 */ 3063 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 3064 3065 skb_queue_tail(&hdev->cmd_q, skb); 3066 queue_work(hdev->workqueue, &hdev->cmd_work); 3067 3068 return 0; 3069 } 3070 3071 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 3072 const void *param) 3073 { 3074 struct sk_buff *skb; 3075 3076 if (hci_opcode_ogf(opcode) != 0x3f) { 3077 /* A controller receiving a command shall respond with either 3078 * a Command Status Event or a Command Complete Event. 3079 * Therefore, all standard HCI commands must be sent via the 3080 * standard API, using hci_send_cmd or hci_cmd_sync helpers. 3081 * Some vendors do not comply with this rule for vendor-specific 3082 * commands and do not return any event. We want to support 3083 * unresponded commands for such cases only. 3084 */ 3085 bt_dev_err(hdev, "unresponded command not supported"); 3086 return -EINVAL; 3087 } 3088 3089 skb = hci_prepare_cmd(hdev, opcode, plen, param); 3090 if (!skb) { 3091 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 3092 opcode); 3093 return -ENOMEM; 3094 } 3095 3096 hci_send_frame(hdev, skb); 3097 3098 return 0; 3099 } 3100 EXPORT_SYMBOL(__hci_cmd_send); 3101 3102 /* Get data from the previously sent command */ 3103 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 3104 { 3105 struct hci_command_hdr *hdr; 3106 3107 if (!hdev->sent_cmd) 3108 return NULL; 3109 3110 hdr = (void *) hdev->sent_cmd->data; 3111 3112 if (hdr->opcode != cpu_to_le16(opcode)) 3113 return NULL; 3114 3115 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 3116 3117 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 3118 } 3119 3120 /* Get data from last received event */ 3121 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) 3122 { 3123 struct hci_event_hdr *hdr; 3124 int offset; 3125 3126 if (!hdev->recv_event) 3127 return NULL; 3128 3129 hdr = (void *)hdev->recv_event->data; 3130 offset = sizeof(*hdr); 3131 3132 if (hdr->evt != event) { 3133 /* In case of LE metaevent check the subevent match */ 3134 if (hdr->evt == HCI_EV_LE_META) { 3135 struct hci_ev_le_meta *ev; 3136 3137 ev = (void *)hdev->recv_event->data + offset; 3138 offset += sizeof(*ev); 3139 if (ev->subevent == event) 3140 goto found; 3141 } 3142 return NULL; 3143 } 3144 3145 found: 3146 bt_dev_dbg(hdev, "event 0x%2.2x", event); 3147 3148 return hdev->recv_event->data + offset; 3149 } 3150 3151 /* Send ACL data */ 3152 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 3153 { 3154 struct hci_acl_hdr *hdr; 3155 int len = skb->len; 3156 3157 skb_push(skb, HCI_ACL_HDR_SIZE); 3158 skb_reset_transport_header(skb); 3159 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 3160 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3161 hdr->dlen = cpu_to_le16(len); 3162 } 3163 3164 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, 3165 struct sk_buff *skb, __u16 flags) 3166 { 3167 struct hci_conn *conn = chan->conn; 3168 struct hci_dev *hdev = conn->hdev; 3169 struct sk_buff *list; 3170 3171 skb->len = skb_headlen(skb); 3172 skb->data_len = 0; 3173 3174 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3175 3176 switch (hdev->dev_type) { 3177 case HCI_PRIMARY: 3178 hci_add_acl_hdr(skb, conn->handle, flags); 3179 break; 3180 case HCI_AMP: 3181 hci_add_acl_hdr(skb, chan->handle, flags); 3182 break; 3183 default: 3184 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 3185 return; 3186 } 3187 3188 list = skb_shinfo(skb)->frag_list; 3189 if (!list) { 3190 /* Non fragmented */ 3191 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3192 3193 skb_queue_tail(queue, skb); 3194 } else { 3195 /* Fragmented */ 3196 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3197 3198 skb_shinfo(skb)->frag_list = NULL; 3199 3200 /* Queue all fragments atomically. We need to use spin_lock_bh 3201 * here because of 6LoWPAN links, as there this function is 3202 * called from softirq and using normal spin lock could cause 3203 * deadlocks. 3204 */ 3205 spin_lock_bh(&queue->lock); 3206 3207 __skb_queue_tail(queue, skb); 3208 3209 flags &= ~ACL_START; 3210 flags |= ACL_CONT; 3211 do { 3212 skb = list; list = list->next; 3213 3214 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3215 hci_add_acl_hdr(skb, conn->handle, flags); 3216 3217 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3218 3219 __skb_queue_tail(queue, skb); 3220 } while (list); 3221 3222 spin_unlock_bh(&queue->lock); 3223 } 3224 } 3225 3226 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 3227 { 3228 struct hci_dev *hdev = chan->conn->hdev; 3229 3230 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 3231 3232 hci_queue_acl(chan, &chan->data_q, skb, flags); 3233 3234 queue_work(hdev->workqueue, &hdev->tx_work); 3235 } 3236 3237 /* Send SCO data */ 3238 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 3239 { 3240 struct hci_dev *hdev = conn->hdev; 3241 struct hci_sco_hdr hdr; 3242 3243 BT_DBG("%s len %d", hdev->name, skb->len); 3244 3245 hdr.handle = cpu_to_le16(conn->handle); 3246 hdr.dlen = skb->len; 3247 3248 skb_push(skb, HCI_SCO_HDR_SIZE); 3249 skb_reset_transport_header(skb); 3250 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 3251 3252 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; 3253 3254 skb_queue_tail(&conn->data_q, skb); 3255 queue_work(hdev->workqueue, &hdev->tx_work); 3256 } 3257 3258 /* Send ISO data */ 3259 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) 3260 { 3261 struct hci_iso_hdr *hdr; 3262 int len = skb->len; 3263 3264 skb_push(skb, HCI_ISO_HDR_SIZE); 3265 skb_reset_transport_header(skb); 3266 hdr = (struct hci_iso_hdr *)skb_transport_header(skb); 3267 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3268 hdr->dlen = cpu_to_le16(len); 3269 } 3270 3271 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, 3272 struct sk_buff *skb) 3273 { 3274 struct hci_dev *hdev = conn->hdev; 3275 struct sk_buff *list; 3276 __u16 flags; 3277 3278 skb->len = skb_headlen(skb); 3279 skb->data_len = 0; 3280 3281 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 3282 3283 list = skb_shinfo(skb)->frag_list; 3284 3285 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); 3286 hci_add_iso_hdr(skb, conn->handle, flags); 3287 3288 if (!list) { 3289 /* Non fragmented */ 3290 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3291 3292 skb_queue_tail(queue, skb); 3293 } else { 3294 /* Fragmented */ 3295 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3296 3297 skb_shinfo(skb)->frag_list = NULL; 3298 3299 __skb_queue_tail(queue, skb); 3300 3301 do { 3302 skb = list; list = list->next; 3303 3304 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; 3305 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, 3306 0x00); 3307 hci_add_iso_hdr(skb, conn->handle, flags); 3308 3309 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3310 3311 __skb_queue_tail(queue, skb); 3312 } while (list); 3313 } 3314 } 3315 3316 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) 3317 { 3318 struct hci_dev *hdev = conn->hdev; 3319 3320 BT_DBG("%s len %d", hdev->name, skb->len); 3321 3322 hci_queue_iso(conn, &conn->data_q, skb); 3323 3324 queue_work(hdev->workqueue, &hdev->tx_work); 3325 } 3326 3327 /* ---- HCI TX task (outgoing data) ---- */ 3328 3329 /* HCI Connection scheduler */ 3330 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) 3331 { 3332 struct hci_dev *hdev; 3333 int cnt, q; 3334 3335 if (!conn) { 3336 *quote = 0; 3337 return; 3338 } 3339 3340 hdev = conn->hdev; 3341 3342 switch (conn->type) { 3343 case ACL_LINK: 3344 cnt = hdev->acl_cnt; 3345 break; 3346 case AMP_LINK: 3347 cnt = hdev->block_cnt; 3348 break; 3349 case SCO_LINK: 3350 case ESCO_LINK: 3351 cnt = hdev->sco_cnt; 3352 break; 3353 case LE_LINK: 3354 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3355 break; 3356 case ISO_LINK: 3357 cnt = hdev->iso_mtu ? hdev->iso_cnt : 3358 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3359 break; 3360 default: 3361 cnt = 0; 3362 bt_dev_err(hdev, "unknown link type %d", conn->type); 3363 } 3364 3365 q = cnt / num; 3366 *quote = q ? q : 1; 3367 } 3368 3369 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, 3370 int *quote) 3371 { 3372 struct hci_conn_hash *h = &hdev->conn_hash; 3373 struct hci_conn *conn = NULL, *c; 3374 unsigned int num = 0, min = ~0; 3375 3376 /* We don't have to lock device here. Connections are always 3377 * added and removed with TX task disabled. */ 3378 3379 rcu_read_lock(); 3380 3381 list_for_each_entry_rcu(c, &h->list, list) { 3382 if (c->type != type || skb_queue_empty(&c->data_q)) 3383 continue; 3384 3385 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 3386 continue; 3387 3388 num++; 3389 3390 if (c->sent < min) { 3391 min = c->sent; 3392 conn = c; 3393 } 3394 3395 if (hci_conn_num(hdev, type) == num) 3396 break; 3397 } 3398 3399 rcu_read_unlock(); 3400 3401 hci_quote_sent(conn, num, quote); 3402 3403 BT_DBG("conn %p quote %d", conn, *quote); 3404 return conn; 3405 } 3406 3407 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 3408 { 3409 struct hci_conn_hash *h = &hdev->conn_hash; 3410 struct hci_conn *c; 3411 3412 bt_dev_err(hdev, "link tx timeout"); 3413 3414 rcu_read_lock(); 3415 3416 /* Kill stalled connections */ 3417 list_for_each_entry_rcu(c, &h->list, list) { 3418 if (c->type == type && c->sent) { 3419 bt_dev_err(hdev, "killing stalled connection %pMR", 3420 &c->dst); 3421 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); 3422 } 3423 } 3424 3425 rcu_read_unlock(); 3426 } 3427 3428 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 3429 int *quote) 3430 { 3431 struct hci_conn_hash *h = &hdev->conn_hash; 3432 struct hci_chan *chan = NULL; 3433 unsigned int num = 0, min = ~0, cur_prio = 0; 3434 struct hci_conn *conn; 3435 int conn_num = 0; 3436 3437 BT_DBG("%s", hdev->name); 3438 3439 rcu_read_lock(); 3440 3441 list_for_each_entry_rcu(conn, &h->list, list) { 3442 struct hci_chan *tmp; 3443 3444 if (conn->type != type) 3445 continue; 3446 3447 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3448 continue; 3449 3450 conn_num++; 3451 3452 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 3453 struct sk_buff *skb; 3454 3455 if (skb_queue_empty(&tmp->data_q)) 3456 continue; 3457 3458 skb = skb_peek(&tmp->data_q); 3459 if (skb->priority < cur_prio) 3460 continue; 3461 3462 if (skb->priority > cur_prio) { 3463 num = 0; 3464 min = ~0; 3465 cur_prio = skb->priority; 3466 } 3467 3468 num++; 3469 3470 if (conn->sent < min) { 3471 min = conn->sent; 3472 chan = tmp; 3473 } 3474 } 3475 3476 if (hci_conn_num(hdev, type) == conn_num) 3477 break; 3478 } 3479 3480 rcu_read_unlock(); 3481 3482 if (!chan) 3483 return NULL; 3484 3485 hci_quote_sent(chan->conn, num, quote); 3486 3487 BT_DBG("chan %p quote %d", chan, *quote); 3488 return chan; 3489 } 3490 3491 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 3492 { 3493 struct hci_conn_hash *h = &hdev->conn_hash; 3494 struct hci_conn *conn; 3495 int num = 0; 3496 3497 BT_DBG("%s", hdev->name); 3498 3499 rcu_read_lock(); 3500 3501 list_for_each_entry_rcu(conn, &h->list, list) { 3502 struct hci_chan *chan; 3503 3504 if (conn->type != type) 3505 continue; 3506 3507 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3508 continue; 3509 3510 num++; 3511 3512 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 3513 struct sk_buff *skb; 3514 3515 if (chan->sent) { 3516 chan->sent = 0; 3517 continue; 3518 } 3519 3520 if (skb_queue_empty(&chan->data_q)) 3521 continue; 3522 3523 skb = skb_peek(&chan->data_q); 3524 if (skb->priority >= HCI_PRIO_MAX - 1) 3525 continue; 3526 3527 skb->priority = HCI_PRIO_MAX - 1; 3528 3529 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 3530 skb->priority); 3531 } 3532 3533 if (hci_conn_num(hdev, type) == num) 3534 break; 3535 } 3536 3537 rcu_read_unlock(); 3538 3539 } 3540 3541 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) 3542 { 3543 /* Calculate count of blocks used by this packet */ 3544 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 3545 } 3546 3547 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) 3548 { 3549 unsigned long last_tx; 3550 3551 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 3552 return; 3553 3554 switch (type) { 3555 case LE_LINK: 3556 last_tx = hdev->le_last_tx; 3557 break; 3558 default: 3559 last_tx = hdev->acl_last_tx; 3560 break; 3561 } 3562 3563 /* tx timeout must be longer than maximum link supervision timeout 3564 * (40.9 seconds) 3565 */ 3566 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) 3567 hci_link_tx_to(hdev, type); 3568 } 3569 3570 /* Schedule SCO */ 3571 static void hci_sched_sco(struct hci_dev *hdev) 3572 { 3573 struct hci_conn *conn; 3574 struct sk_buff *skb; 3575 int quote; 3576 3577 BT_DBG("%s", hdev->name); 3578 3579 if (!hci_conn_num(hdev, SCO_LINK)) 3580 return; 3581 3582 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 3583 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3584 BT_DBG("skb %p len %d", skb, skb->len); 3585 hci_send_frame(hdev, skb); 3586 3587 conn->sent++; 3588 if (conn->sent == ~0) 3589 conn->sent = 0; 3590 } 3591 } 3592 } 3593 3594 static void hci_sched_esco(struct hci_dev *hdev) 3595 { 3596 struct hci_conn *conn; 3597 struct sk_buff *skb; 3598 int quote; 3599 3600 BT_DBG("%s", hdev->name); 3601 3602 if (!hci_conn_num(hdev, ESCO_LINK)) 3603 return; 3604 3605 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, 3606 "e))) { 3607 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3608 BT_DBG("skb %p len %d", skb, skb->len); 3609 hci_send_frame(hdev, skb); 3610 3611 conn->sent++; 3612 if (conn->sent == ~0) 3613 conn->sent = 0; 3614 } 3615 } 3616 } 3617 3618 static void hci_sched_acl_pkt(struct hci_dev *hdev) 3619 { 3620 unsigned int cnt = hdev->acl_cnt; 3621 struct hci_chan *chan; 3622 struct sk_buff *skb; 3623 int quote; 3624 3625 __check_timeout(hdev, cnt, ACL_LINK); 3626 3627 while (hdev->acl_cnt && 3628 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 3629 u32 priority = (skb_peek(&chan->data_q))->priority; 3630 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3631 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3632 skb->len, skb->priority); 3633 3634 /* Stop if priority has changed */ 3635 if (skb->priority < priority) 3636 break; 3637 3638 skb = skb_dequeue(&chan->data_q); 3639 3640 hci_conn_enter_active_mode(chan->conn, 3641 bt_cb(skb)->force_active); 3642 3643 hci_send_frame(hdev, skb); 3644 hdev->acl_last_tx = jiffies; 3645 3646 hdev->acl_cnt--; 3647 chan->sent++; 3648 chan->conn->sent++; 3649 3650 /* Send pending SCO packets right away */ 3651 hci_sched_sco(hdev); 3652 hci_sched_esco(hdev); 3653 } 3654 } 3655 3656 if (cnt != hdev->acl_cnt) 3657 hci_prio_recalculate(hdev, ACL_LINK); 3658 } 3659 3660 static void hci_sched_acl_blk(struct hci_dev *hdev) 3661 { 3662 unsigned int cnt = hdev->block_cnt; 3663 struct hci_chan *chan; 3664 struct sk_buff *skb; 3665 int quote; 3666 u8 type; 3667 3668 BT_DBG("%s", hdev->name); 3669 3670 if (hdev->dev_type == HCI_AMP) 3671 type = AMP_LINK; 3672 else 3673 type = ACL_LINK; 3674 3675 __check_timeout(hdev, cnt, type); 3676 3677 while (hdev->block_cnt > 0 && 3678 (chan = hci_chan_sent(hdev, type, "e))) { 3679 u32 priority = (skb_peek(&chan->data_q))->priority; 3680 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 3681 int blocks; 3682 3683 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3684 skb->len, skb->priority); 3685 3686 /* Stop if priority has changed */ 3687 if (skb->priority < priority) 3688 break; 3689 3690 skb = skb_dequeue(&chan->data_q); 3691 3692 blocks = __get_blocks(hdev, skb); 3693 if (blocks > hdev->block_cnt) 3694 return; 3695 3696 hci_conn_enter_active_mode(chan->conn, 3697 bt_cb(skb)->force_active); 3698 3699 hci_send_frame(hdev, skb); 3700 hdev->acl_last_tx = jiffies; 3701 3702 hdev->block_cnt -= blocks; 3703 quote -= blocks; 3704 3705 chan->sent += blocks; 3706 chan->conn->sent += blocks; 3707 } 3708 } 3709 3710 if (cnt != hdev->block_cnt) 3711 hci_prio_recalculate(hdev, type); 3712 } 3713 3714 static void hci_sched_acl(struct hci_dev *hdev) 3715 { 3716 BT_DBG("%s", hdev->name); 3717 3718 /* No ACL link over BR/EDR controller */ 3719 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) 3720 return; 3721 3722 /* No AMP link over AMP controller */ 3723 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) 3724 return; 3725 3726 switch (hdev->flow_ctl_mode) { 3727 case HCI_FLOW_CTL_MODE_PACKET_BASED: 3728 hci_sched_acl_pkt(hdev); 3729 break; 3730 3731 case HCI_FLOW_CTL_MODE_BLOCK_BASED: 3732 hci_sched_acl_blk(hdev); 3733 break; 3734 } 3735 } 3736 3737 static void hci_sched_le(struct hci_dev *hdev) 3738 { 3739 struct hci_chan *chan; 3740 struct sk_buff *skb; 3741 int quote, cnt, tmp; 3742 3743 BT_DBG("%s", hdev->name); 3744 3745 if (!hci_conn_num(hdev, LE_LINK)) 3746 return; 3747 3748 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 3749 3750 __check_timeout(hdev, cnt, LE_LINK); 3751 3752 tmp = cnt; 3753 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 3754 u32 priority = (skb_peek(&chan->data_q))->priority; 3755 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3756 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3757 skb->len, skb->priority); 3758 3759 /* Stop if priority has changed */ 3760 if (skb->priority < priority) 3761 break; 3762 3763 skb = skb_dequeue(&chan->data_q); 3764 3765 hci_send_frame(hdev, skb); 3766 hdev->le_last_tx = jiffies; 3767 3768 cnt--; 3769 chan->sent++; 3770 chan->conn->sent++; 3771 3772 /* Send pending SCO packets right away */ 3773 hci_sched_sco(hdev); 3774 hci_sched_esco(hdev); 3775 } 3776 } 3777 3778 if (hdev->le_pkts) 3779 hdev->le_cnt = cnt; 3780 else 3781 hdev->acl_cnt = cnt; 3782 3783 if (cnt != tmp) 3784 hci_prio_recalculate(hdev, LE_LINK); 3785 } 3786 3787 /* Schedule CIS */ 3788 static void hci_sched_iso(struct hci_dev *hdev) 3789 { 3790 struct hci_conn *conn; 3791 struct sk_buff *skb; 3792 int quote, *cnt; 3793 3794 BT_DBG("%s", hdev->name); 3795 3796 if (!hci_conn_num(hdev, ISO_LINK)) 3797 return; 3798 3799 cnt = hdev->iso_pkts ? &hdev->iso_cnt : 3800 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; 3801 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { 3802 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 3803 BT_DBG("skb %p len %d", skb, skb->len); 3804 hci_send_frame(hdev, skb); 3805 3806 conn->sent++; 3807 if (conn->sent == ~0) 3808 conn->sent = 0; 3809 (*cnt)--; 3810 } 3811 } 3812 } 3813 3814 static void hci_tx_work(struct work_struct *work) 3815 { 3816 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 3817 struct sk_buff *skb; 3818 3819 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, 3820 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); 3821 3822 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 3823 /* Schedule queues and send stuff to HCI driver */ 3824 hci_sched_sco(hdev); 3825 hci_sched_esco(hdev); 3826 hci_sched_iso(hdev); 3827 hci_sched_acl(hdev); 3828 hci_sched_le(hdev); 3829 } 3830 3831 /* Send next queued raw (unknown type) packet */ 3832 while ((skb = skb_dequeue(&hdev->raw_q))) 3833 hci_send_frame(hdev, skb); 3834 } 3835 3836 /* ----- HCI RX task (incoming data processing) ----- */ 3837 3838 /* ACL data packet */ 3839 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3840 { 3841 struct hci_acl_hdr *hdr = (void *) skb->data; 3842 struct hci_conn *conn; 3843 __u16 handle, flags; 3844 3845 skb_pull(skb, HCI_ACL_HDR_SIZE); 3846 3847 handle = __le16_to_cpu(hdr->handle); 3848 flags = hci_flags(handle); 3849 handle = hci_handle(handle); 3850 3851 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3852 handle, flags); 3853 3854 hdev->stat.acl_rx++; 3855 3856 hci_dev_lock(hdev); 3857 conn = hci_conn_hash_lookup_handle(hdev, handle); 3858 hci_dev_unlock(hdev); 3859 3860 if (conn) { 3861 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 3862 3863 /* Send to upper protocol */ 3864 l2cap_recv_acldata(conn, skb, flags); 3865 return; 3866 } else { 3867 bt_dev_err(hdev, "ACL packet for unknown connection handle %d", 3868 handle); 3869 } 3870 3871 kfree_skb(skb); 3872 } 3873 3874 /* SCO data packet */ 3875 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3876 { 3877 struct hci_sco_hdr *hdr = (void *) skb->data; 3878 struct hci_conn *conn; 3879 __u16 handle, flags; 3880 3881 skb_pull(skb, HCI_SCO_HDR_SIZE); 3882 3883 handle = __le16_to_cpu(hdr->handle); 3884 flags = hci_flags(handle); 3885 handle = hci_handle(handle); 3886 3887 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 3888 handle, flags); 3889 3890 hdev->stat.sco_rx++; 3891 3892 hci_dev_lock(hdev); 3893 conn = hci_conn_hash_lookup_handle(hdev, handle); 3894 hci_dev_unlock(hdev); 3895 3896 if (conn) { 3897 /* Send to upper protocol */ 3898 hci_skb_pkt_status(skb) = flags & 0x03; 3899 sco_recv_scodata(conn, skb); 3900 return; 3901 } else { 3902 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", 3903 handle); 3904 } 3905 3906 kfree_skb(skb); 3907 } 3908 3909 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 3910 { 3911 struct hci_iso_hdr *hdr; 3912 struct hci_conn *conn; 3913 __u16 handle, flags; 3914 3915 hdr = skb_pull_data(skb, sizeof(*hdr)); 3916 if (!hdr) { 3917 bt_dev_err(hdev, "ISO packet too small"); 3918 goto drop; 3919 } 3920 3921 handle = __le16_to_cpu(hdr->handle); 3922 flags = hci_flags(handle); 3923 handle = hci_handle(handle); 3924 3925 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, 3926 handle, flags); 3927 3928 hci_dev_lock(hdev); 3929 conn = hci_conn_hash_lookup_handle(hdev, handle); 3930 hci_dev_unlock(hdev); 3931 3932 if (!conn) { 3933 bt_dev_err(hdev, "ISO packet for unknown connection handle %d", 3934 handle); 3935 goto drop; 3936 } 3937 3938 /* Send to upper protocol */ 3939 iso_recv(conn, skb, flags); 3940 return; 3941 3942 drop: 3943 kfree_skb(skb); 3944 } 3945 3946 static bool hci_req_is_complete(struct hci_dev *hdev) 3947 { 3948 struct sk_buff *skb; 3949 3950 skb = skb_peek(&hdev->cmd_q); 3951 if (!skb) 3952 return true; 3953 3954 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); 3955 } 3956 3957 static void hci_resend_last(struct hci_dev *hdev) 3958 { 3959 struct hci_command_hdr *sent; 3960 struct sk_buff *skb; 3961 u16 opcode; 3962 3963 if (!hdev->sent_cmd) 3964 return; 3965 3966 sent = (void *) hdev->sent_cmd->data; 3967 opcode = __le16_to_cpu(sent->opcode); 3968 if (opcode == HCI_OP_RESET) 3969 return; 3970 3971 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 3972 if (!skb) 3973 return; 3974 3975 skb_queue_head(&hdev->cmd_q, skb); 3976 queue_work(hdev->workqueue, &hdev->cmd_work); 3977 } 3978 3979 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 3980 hci_req_complete_t *req_complete, 3981 hci_req_complete_skb_t *req_complete_skb) 3982 { 3983 struct sk_buff *skb; 3984 unsigned long flags; 3985 3986 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 3987 3988 /* If the completed command doesn't match the last one that was 3989 * sent we need to do special handling of it. 3990 */ 3991 if (!hci_sent_cmd_data(hdev, opcode)) { 3992 /* Some CSR based controllers generate a spontaneous 3993 * reset complete event during init and any pending 3994 * command will never be completed. In such a case we 3995 * need to resend whatever was the last sent 3996 * command. 3997 */ 3998 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) 3999 hci_resend_last(hdev); 4000 4001 return; 4002 } 4003 4004 /* If we reach this point this event matches the last command sent */ 4005 hci_dev_clear_flag(hdev, HCI_CMD_PENDING); 4006 4007 /* If the command succeeded and there's still more commands in 4008 * this request the request is not yet complete. 4009 */ 4010 if (!status && !hci_req_is_complete(hdev)) 4011 return; 4012 4013 /* If this was the last command in a request the complete 4014 * callback would be found in hdev->sent_cmd instead of the 4015 * command queue (hdev->cmd_q). 4016 */ 4017 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { 4018 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; 4019 return; 4020 } 4021 4022 if (bt_cb(hdev->sent_cmd)->hci.req_complete) { 4023 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; 4024 return; 4025 } 4026 4027 /* Remove all pending commands belonging to this request */ 4028 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 4029 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 4030 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { 4031 __skb_queue_head(&hdev->cmd_q, skb); 4032 break; 4033 } 4034 4035 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) 4036 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 4037 else 4038 *req_complete = bt_cb(skb)->hci.req_complete; 4039 dev_kfree_skb_irq(skb); 4040 } 4041 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 4042 } 4043 4044 static void hci_rx_work(struct work_struct *work) 4045 { 4046 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 4047 struct sk_buff *skb; 4048 4049 BT_DBG("%s", hdev->name); 4050 4051 /* The kcov_remote functions used for collecting packet parsing 4052 * coverage information from this background thread and associate 4053 * the coverage with the syscall's thread which originally injected 4054 * the packet. This helps fuzzing the kernel. 4055 */ 4056 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { 4057 kcov_remote_start_common(skb_get_kcov_handle(skb)); 4058 4059 /* Send copy to monitor */ 4060 hci_send_to_monitor(hdev, skb); 4061 4062 if (atomic_read(&hdev->promisc)) { 4063 /* Send copy to the sockets */ 4064 hci_send_to_sock(hdev, skb); 4065 } 4066 4067 /* If the device has been opened in HCI_USER_CHANNEL, 4068 * the userspace has exclusive access to device. 4069 * When device is HCI_INIT, we still need to process 4070 * the data packets to the driver in order 4071 * to complete its setup(). 4072 */ 4073 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4074 !test_bit(HCI_INIT, &hdev->flags)) { 4075 kfree_skb(skb); 4076 continue; 4077 } 4078 4079 if (test_bit(HCI_INIT, &hdev->flags)) { 4080 /* Don't process data packets in this states. */ 4081 switch (hci_skb_pkt_type(skb)) { 4082 case HCI_ACLDATA_PKT: 4083 case HCI_SCODATA_PKT: 4084 case HCI_ISODATA_PKT: 4085 kfree_skb(skb); 4086 continue; 4087 } 4088 } 4089 4090 /* Process frame */ 4091 switch (hci_skb_pkt_type(skb)) { 4092 case HCI_EVENT_PKT: 4093 BT_DBG("%s Event packet", hdev->name); 4094 hci_event_packet(hdev, skb); 4095 break; 4096 4097 case HCI_ACLDATA_PKT: 4098 BT_DBG("%s ACL data packet", hdev->name); 4099 hci_acldata_packet(hdev, skb); 4100 break; 4101 4102 case HCI_SCODATA_PKT: 4103 BT_DBG("%s SCO data packet", hdev->name); 4104 hci_scodata_packet(hdev, skb); 4105 break; 4106 4107 case HCI_ISODATA_PKT: 4108 BT_DBG("%s ISO data packet", hdev->name); 4109 hci_isodata_packet(hdev, skb); 4110 break; 4111 4112 default: 4113 kfree_skb(skb); 4114 break; 4115 } 4116 } 4117 } 4118 4119 static void hci_cmd_work(struct work_struct *work) 4120 { 4121 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 4122 struct sk_buff *skb; 4123 4124 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, 4125 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); 4126 4127 /* Send queued commands */ 4128 if (atomic_read(&hdev->cmd_cnt)) { 4129 skb = skb_dequeue(&hdev->cmd_q); 4130 if (!skb) 4131 return; 4132 4133 kfree_skb(hdev->sent_cmd); 4134 4135 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); 4136 if (hdev->sent_cmd) { 4137 int res; 4138 if (hci_req_status_pend(hdev)) 4139 hci_dev_set_flag(hdev, HCI_CMD_PENDING); 4140 atomic_dec(&hdev->cmd_cnt); 4141 4142 res = hci_send_frame(hdev, skb); 4143 if (res < 0) 4144 __hci_cmd_sync_cancel(hdev, -res); 4145 4146 rcu_read_lock(); 4147 if (test_bit(HCI_RESET, &hdev->flags) || 4148 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 4149 cancel_delayed_work(&hdev->cmd_timer); 4150 else 4151 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, 4152 HCI_CMD_TIMEOUT); 4153 rcu_read_unlock(); 4154 } else { 4155 skb_queue_head(&hdev->cmd_q, skb); 4156 queue_work(hdev->workqueue, &hdev->cmd_work); 4157 } 4158 } 4159 } 4160