1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/jiffies.h> 29 #include <linux/module.h> 30 #include <linux/kmod.h> 31 32 #include <linux/types.h> 33 #include <linux/errno.h> 34 #include <linux/kernel.h> 35 #include <linux/sched.h> 36 #include <linux/slab.h> 37 #include <linux/poll.h> 38 #include <linux/fcntl.h> 39 #include <linux/init.h> 40 #include <linux/skbuff.h> 41 #include <linux/workqueue.h> 42 #include <linux/interrupt.h> 43 #include <linux/rfkill.h> 44 #include <linux/timer.h> 45 #include <linux/crypto.h> 46 #include <net/sock.h> 47 48 #include <asm/system.h> 49 #include <linux/uaccess.h> 50 #include <asm/unaligned.h> 51 52 #include <net/bluetooth/bluetooth.h> 53 #include <net/bluetooth/hci_core.h> 54 55 #define AUTO_OFF_TIMEOUT 2000 56 57 static void hci_rx_work(struct work_struct *work); 58 static void hci_cmd_work(struct work_struct *work); 59 static void hci_tx_work(struct work_struct *work); 60 61 /* HCI device list */ 62 LIST_HEAD(hci_dev_list); 63 DEFINE_RWLOCK(hci_dev_list_lock); 64 65 /* HCI callback list */ 66 LIST_HEAD(hci_cb_list); 67 DEFINE_RWLOCK(hci_cb_list_lock); 68 69 /* ---- HCI notifications ---- */ 70 71 static void hci_notify(struct hci_dev *hdev, int event) 72 { 73 hci_sock_dev_event(hdev, event); 74 } 75 76 /* ---- HCI requests ---- */ 77 78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) 79 { 80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result); 81 82 /* If this is the init phase check if the completed command matches 83 * the last init command, and if not just return. 84 */ 85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) { 86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 87 struct sk_buff *skb; 88 89 /* Some CSR based controllers generate a spontaneous 90 * reset complete event during init and any pending 91 * command will never be completed. In such a case we 92 * need to resend whatever was the last sent 93 * command. 94 */ 95 96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET) 97 return; 98 99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC); 100 if (skb) { 101 skb_queue_head(&hdev->cmd_q, skb); 102 queue_work(hdev->workqueue, &hdev->cmd_work); 103 } 104 105 return; 106 } 107 108 if (hdev->req_status == HCI_REQ_PEND) { 109 hdev->req_result = result; 110 hdev->req_status = HCI_REQ_DONE; 111 wake_up_interruptible(&hdev->req_wait_q); 112 } 113 } 114 115 static void hci_req_cancel(struct hci_dev *hdev, int err) 116 { 117 BT_DBG("%s err 0x%2.2x", hdev->name, err); 118 119 if (hdev->req_status == HCI_REQ_PEND) { 120 hdev->req_result = err; 121 hdev->req_status = HCI_REQ_CANCELED; 122 wake_up_interruptible(&hdev->req_wait_q); 123 } 124 } 125 126 /* Execute request and wait for completion. */ 127 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 128 unsigned long opt, __u32 timeout) 129 { 130 DECLARE_WAITQUEUE(wait, current); 131 int err = 0; 132 133 BT_DBG("%s start", hdev->name); 134 135 hdev->req_status = HCI_REQ_PEND; 136 137 add_wait_queue(&hdev->req_wait_q, &wait); 138 set_current_state(TASK_INTERRUPTIBLE); 139 140 req(hdev, opt); 141 schedule_timeout(timeout); 142 143 remove_wait_queue(&hdev->req_wait_q, &wait); 144 145 if (signal_pending(current)) 146 return -EINTR; 147 148 switch (hdev->req_status) { 149 case HCI_REQ_DONE: 150 err = -bt_to_errno(hdev->req_result); 151 break; 152 153 case HCI_REQ_CANCELED: 154 err = -hdev->req_result; 155 break; 156 157 default: 158 err = -ETIMEDOUT; 159 break; 160 } 161 162 hdev->req_status = hdev->req_result = 0; 163 164 BT_DBG("%s end: err %d", hdev->name, err); 165 166 return err; 167 } 168 169 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 170 unsigned long opt, __u32 timeout) 171 { 172 int ret; 173 174 if (!test_bit(HCI_UP, &hdev->flags)) 175 return -ENETDOWN; 176 177 /* Serialize all requests */ 178 hci_req_lock(hdev); 179 ret = __hci_request(hdev, req, opt, timeout); 180 hci_req_unlock(hdev); 181 182 return ret; 183 } 184 185 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) 186 { 187 BT_DBG("%s %ld", hdev->name, opt); 188 189 /* Reset device */ 190 set_bit(HCI_RESET, &hdev->flags); 191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 192 } 193 194 static void bredr_init(struct hci_dev *hdev) 195 { 196 struct hci_cp_delete_stored_link_key cp; 197 __le16 param; 198 __u8 flt_type; 199 200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 201 202 /* Mandatory initialization */ 203 204 /* Reset */ 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 206 set_bit(HCI_RESET, &hdev->flags); 207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 208 } 209 210 /* Read Local Supported Features */ 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 212 213 /* Read Local Version */ 214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 215 216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); 218 219 /* Read BD Address */ 220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); 221 222 /* Read Class of Device */ 223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); 224 225 /* Read Local Name */ 226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); 227 228 /* Read Voice Setting */ 229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); 230 231 /* Optional initialization */ 232 233 /* Clear Event Filters */ 234 flt_type = HCI_FLT_CLEAR_ALL; 235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 236 237 /* Connection accept timeout ~20 secs */ 238 param = cpu_to_le16(0x7d00); 239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); 240 241 bacpy(&cp.bdaddr, BDADDR_ANY); 242 cp.delete_all = 1; 243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp); 244 } 245 246 static void amp_init(struct hci_dev *hdev) 247 { 248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 249 250 /* Reset */ 251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 252 253 /* Read Local Version */ 254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 255 } 256 257 static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 258 { 259 struct sk_buff *skb; 260 261 BT_DBG("%s %ld", hdev->name, opt); 262 263 /* Driver initialization */ 264 265 /* Special commands */ 266 while ((skb = skb_dequeue(&hdev->driver_init))) { 267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 268 skb->dev = (void *) hdev; 269 270 skb_queue_tail(&hdev->cmd_q, skb); 271 queue_work(hdev->workqueue, &hdev->cmd_work); 272 } 273 skb_queue_purge(&hdev->driver_init); 274 275 switch (hdev->dev_type) { 276 case HCI_BREDR: 277 bredr_init(hdev); 278 break; 279 280 case HCI_AMP: 281 amp_init(hdev); 282 break; 283 284 default: 285 BT_ERR("Unknown device type %d", hdev->dev_type); 286 break; 287 } 288 289 } 290 291 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt) 292 { 293 BT_DBG("%s", hdev->name); 294 295 /* Read LE buffer size */ 296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); 297 } 298 299 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 300 { 301 __u8 scan = opt; 302 303 BT_DBG("%s %x", hdev->name, scan); 304 305 /* Inquiry and Page scans */ 306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 307 } 308 309 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 310 { 311 __u8 auth = opt; 312 313 BT_DBG("%s %x", hdev->name, auth); 314 315 /* Authentication */ 316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 317 } 318 319 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 320 { 321 __u8 encrypt = opt; 322 323 BT_DBG("%s %x", hdev->name, encrypt); 324 325 /* Encryption */ 326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 327 } 328 329 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) 330 { 331 __le16 policy = cpu_to_le16(opt); 332 333 BT_DBG("%s %x", hdev->name, policy); 334 335 /* Default link policy */ 336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 337 } 338 339 /* Get HCI device by index. 340 * Device is held on return. */ 341 struct hci_dev *hci_dev_get(int index) 342 { 343 struct hci_dev *hdev = NULL, *d; 344 345 BT_DBG("%d", index); 346 347 if (index < 0) 348 return NULL; 349 350 read_lock(&hci_dev_list_lock); 351 list_for_each_entry(d, &hci_dev_list, list) { 352 if (d->id == index) { 353 hdev = hci_dev_hold(d); 354 break; 355 } 356 } 357 read_unlock(&hci_dev_list_lock); 358 return hdev; 359 } 360 361 /* ---- Inquiry support ---- */ 362 363 bool hci_discovery_active(struct hci_dev *hdev) 364 { 365 struct discovery_state *discov = &hdev->discovery; 366 367 switch (discov->state) { 368 case DISCOVERY_FINDING: 369 case DISCOVERY_RESOLVING: 370 return true; 371 372 default: 373 return false; 374 } 375 } 376 377 void hci_discovery_set_state(struct hci_dev *hdev, int state) 378 { 379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 380 381 if (hdev->discovery.state == state) 382 return; 383 384 switch (state) { 385 case DISCOVERY_STOPPED: 386 if (hdev->discovery.state != DISCOVERY_STARTING) 387 mgmt_discovering(hdev, 0); 388 hdev->discovery.type = 0; 389 break; 390 case DISCOVERY_STARTING: 391 break; 392 case DISCOVERY_FINDING: 393 mgmt_discovering(hdev, 1); 394 break; 395 case DISCOVERY_RESOLVING: 396 break; 397 case DISCOVERY_STOPPING: 398 break; 399 } 400 401 hdev->discovery.state = state; 402 } 403 404 static void inquiry_cache_flush(struct hci_dev *hdev) 405 { 406 struct discovery_state *cache = &hdev->discovery; 407 struct inquiry_entry *p, *n; 408 409 list_for_each_entry_safe(p, n, &cache->all, all) { 410 list_del(&p->all); 411 kfree(p); 412 } 413 414 INIT_LIST_HEAD(&cache->unknown); 415 INIT_LIST_HEAD(&cache->resolve); 416 } 417 418 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 419 { 420 struct discovery_state *cache = &hdev->discovery; 421 struct inquiry_entry *e; 422 423 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 424 425 list_for_each_entry(e, &cache->all, all) { 426 if (!bacmp(&e->data.bdaddr, bdaddr)) 427 return e; 428 } 429 430 return NULL; 431 } 432 433 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 434 bdaddr_t *bdaddr) 435 { 436 struct discovery_state *cache = &hdev->discovery; 437 struct inquiry_entry *e; 438 439 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 440 441 list_for_each_entry(e, &cache->unknown, list) { 442 if (!bacmp(&e->data.bdaddr, bdaddr)) 443 return e; 444 } 445 446 return NULL; 447 } 448 449 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 450 bdaddr_t *bdaddr, 451 int state) 452 { 453 struct discovery_state *cache = &hdev->discovery; 454 struct inquiry_entry *e; 455 456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state); 457 458 list_for_each_entry(e, &cache->resolve, list) { 459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 460 return e; 461 if (!bacmp(&e->data.bdaddr, bdaddr)) 462 return e; 463 } 464 465 return NULL; 466 } 467 468 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 469 struct inquiry_entry *ie) 470 { 471 struct discovery_state *cache = &hdev->discovery; 472 struct list_head *pos = &cache->resolve; 473 struct inquiry_entry *p; 474 475 list_del(&ie->list); 476 477 list_for_each_entry(p, &cache->resolve, list) { 478 if (p->name_state != NAME_PENDING && 479 abs(p->data.rssi) >= abs(ie->data.rssi)) 480 break; 481 pos = &p->list; 482 } 483 484 list_add(&ie->list, pos); 485 } 486 487 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 488 bool name_known, bool *ssp) 489 { 490 struct discovery_state *cache = &hdev->discovery; 491 struct inquiry_entry *ie; 492 493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 494 495 if (ssp) 496 *ssp = data->ssp_mode; 497 498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 499 if (ie) { 500 if (ie->data.ssp_mode && ssp) 501 *ssp = true; 502 503 if (ie->name_state == NAME_NEEDED && 504 data->rssi != ie->data.rssi) { 505 ie->data.rssi = data->rssi; 506 hci_inquiry_cache_update_resolve(hdev, ie); 507 } 508 509 goto update; 510 } 511 512 /* Entry not in the cache. Add new one. */ 513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); 514 if (!ie) 515 return false; 516 517 list_add(&ie->all, &cache->all); 518 519 if (name_known) { 520 ie->name_state = NAME_KNOWN; 521 } else { 522 ie->name_state = NAME_NOT_KNOWN; 523 list_add(&ie->list, &cache->unknown); 524 } 525 526 update: 527 if (name_known && ie->name_state != NAME_KNOWN && 528 ie->name_state != NAME_PENDING) { 529 ie->name_state = NAME_KNOWN; 530 list_del(&ie->list); 531 } 532 533 memcpy(&ie->data, data, sizeof(*data)); 534 ie->timestamp = jiffies; 535 cache->timestamp = jiffies; 536 537 if (ie->name_state == NAME_NOT_KNOWN) 538 return false; 539 540 return true; 541 } 542 543 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 544 { 545 struct discovery_state *cache = &hdev->discovery; 546 struct inquiry_info *info = (struct inquiry_info *) buf; 547 struct inquiry_entry *e; 548 int copied = 0; 549 550 list_for_each_entry(e, &cache->all, all) { 551 struct inquiry_data *data = &e->data; 552 553 if (copied >= num) 554 break; 555 556 bacpy(&info->bdaddr, &data->bdaddr); 557 info->pscan_rep_mode = data->pscan_rep_mode; 558 info->pscan_period_mode = data->pscan_period_mode; 559 info->pscan_mode = data->pscan_mode; 560 memcpy(info->dev_class, data->dev_class, 3); 561 info->clock_offset = data->clock_offset; 562 563 info++; 564 copied++; 565 } 566 567 BT_DBG("cache %p, copied %d", cache, copied); 568 return copied; 569 } 570 571 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) 572 { 573 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 574 struct hci_cp_inquiry cp; 575 576 BT_DBG("%s", hdev->name); 577 578 if (test_bit(HCI_INQUIRY, &hdev->flags)) 579 return; 580 581 /* Start Inquiry */ 582 memcpy(&cp.lap, &ir->lap, 3); 583 cp.length = ir->length; 584 cp.num_rsp = ir->num_rsp; 585 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); 586 } 587 588 int hci_inquiry(void __user *arg) 589 { 590 __u8 __user *ptr = arg; 591 struct hci_inquiry_req ir; 592 struct hci_dev *hdev; 593 int err = 0, do_inquiry = 0, max_rsp; 594 long timeo; 595 __u8 *buf; 596 597 if (copy_from_user(&ir, ptr, sizeof(ir))) 598 return -EFAULT; 599 600 hdev = hci_dev_get(ir.dev_id); 601 if (!hdev) 602 return -ENODEV; 603 604 hci_dev_lock(hdev); 605 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 606 inquiry_cache_empty(hdev) || 607 ir.flags & IREQ_CACHE_FLUSH) { 608 inquiry_cache_flush(hdev); 609 do_inquiry = 1; 610 } 611 hci_dev_unlock(hdev); 612 613 timeo = ir.length * msecs_to_jiffies(2000); 614 615 if (do_inquiry) { 616 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo); 617 if (err < 0) 618 goto done; 619 } 620 621 /* for unlimited number of responses we will use buffer with 255 entries */ 622 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 623 624 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 625 * copy it to the user space. 626 */ 627 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL); 628 if (!buf) { 629 err = -ENOMEM; 630 goto done; 631 } 632 633 hci_dev_lock(hdev); 634 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 635 hci_dev_unlock(hdev); 636 637 BT_DBG("num_rsp %d", ir.num_rsp); 638 639 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 640 ptr += sizeof(ir); 641 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 642 ir.num_rsp)) 643 err = -EFAULT; 644 } else 645 err = -EFAULT; 646 647 kfree(buf); 648 649 done: 650 hci_dev_put(hdev); 651 return err; 652 } 653 654 /* ---- HCI ioctl helpers ---- */ 655 656 int hci_dev_open(__u16 dev) 657 { 658 struct hci_dev *hdev; 659 int ret = 0; 660 661 hdev = hci_dev_get(dev); 662 if (!hdev) 663 return -ENODEV; 664 665 BT_DBG("%s %p", hdev->name, hdev); 666 667 hci_req_lock(hdev); 668 669 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 670 ret = -ERFKILL; 671 goto done; 672 } 673 674 if (test_bit(HCI_UP, &hdev->flags)) { 675 ret = -EALREADY; 676 goto done; 677 } 678 679 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 680 set_bit(HCI_RAW, &hdev->flags); 681 682 /* Treat all non BR/EDR controllers as raw devices if 683 enable_hs is not set */ 684 if (hdev->dev_type != HCI_BREDR && !enable_hs) 685 set_bit(HCI_RAW, &hdev->flags); 686 687 if (hdev->open(hdev)) { 688 ret = -EIO; 689 goto done; 690 } 691 692 if (!test_bit(HCI_RAW, &hdev->flags)) { 693 atomic_set(&hdev->cmd_cnt, 1); 694 set_bit(HCI_INIT, &hdev->flags); 695 hdev->init_last_cmd = 0; 696 697 ret = __hci_request(hdev, hci_init_req, 0, 698 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 699 700 if (lmp_host_le_capable(hdev)) 701 ret = __hci_request(hdev, hci_le_init_req, 0, 702 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 703 704 clear_bit(HCI_INIT, &hdev->flags); 705 } 706 707 if (!ret) { 708 hci_dev_hold(hdev); 709 set_bit(HCI_UP, &hdev->flags); 710 hci_notify(hdev, HCI_DEV_UP); 711 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { 712 hci_dev_lock(hdev); 713 mgmt_powered(hdev, 1); 714 hci_dev_unlock(hdev); 715 } 716 } else { 717 /* Init failed, cleanup */ 718 flush_work(&hdev->tx_work); 719 flush_work(&hdev->cmd_work); 720 flush_work(&hdev->rx_work); 721 722 skb_queue_purge(&hdev->cmd_q); 723 skb_queue_purge(&hdev->rx_q); 724 725 if (hdev->flush) 726 hdev->flush(hdev); 727 728 if (hdev->sent_cmd) { 729 kfree_skb(hdev->sent_cmd); 730 hdev->sent_cmd = NULL; 731 } 732 733 hdev->close(hdev); 734 hdev->flags = 0; 735 } 736 737 done: 738 hci_req_unlock(hdev); 739 hci_dev_put(hdev); 740 return ret; 741 } 742 743 static int hci_dev_do_close(struct hci_dev *hdev) 744 { 745 BT_DBG("%s %p", hdev->name, hdev); 746 747 cancel_work_sync(&hdev->le_scan); 748 749 hci_req_cancel(hdev, ENODEV); 750 hci_req_lock(hdev); 751 752 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 753 del_timer_sync(&hdev->cmd_timer); 754 hci_req_unlock(hdev); 755 return 0; 756 } 757 758 /* Flush RX and TX works */ 759 flush_work(&hdev->tx_work); 760 flush_work(&hdev->rx_work); 761 762 if (hdev->discov_timeout > 0) { 763 cancel_delayed_work(&hdev->discov_off); 764 hdev->discov_timeout = 0; 765 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 766 } 767 768 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 769 cancel_delayed_work(&hdev->service_cache); 770 771 cancel_delayed_work_sync(&hdev->le_scan_disable); 772 773 hci_dev_lock(hdev); 774 inquiry_cache_flush(hdev); 775 hci_conn_hash_flush(hdev); 776 hci_dev_unlock(hdev); 777 778 hci_notify(hdev, HCI_DEV_DOWN); 779 780 if (hdev->flush) 781 hdev->flush(hdev); 782 783 /* Reset device */ 784 skb_queue_purge(&hdev->cmd_q); 785 atomic_set(&hdev->cmd_cnt, 1); 786 if (!test_bit(HCI_RAW, &hdev->flags) && 787 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { 788 set_bit(HCI_INIT, &hdev->flags); 789 __hci_request(hdev, hci_reset_req, 0, 790 msecs_to_jiffies(250)); 791 clear_bit(HCI_INIT, &hdev->flags); 792 } 793 794 /* flush cmd work */ 795 flush_work(&hdev->cmd_work); 796 797 /* Drop queues */ 798 skb_queue_purge(&hdev->rx_q); 799 skb_queue_purge(&hdev->cmd_q); 800 skb_queue_purge(&hdev->raw_q); 801 802 /* Drop last sent command */ 803 if (hdev->sent_cmd) { 804 del_timer_sync(&hdev->cmd_timer); 805 kfree_skb(hdev->sent_cmd); 806 hdev->sent_cmd = NULL; 807 } 808 809 /* After this point our queues are empty 810 * and no tasks are scheduled. */ 811 hdev->close(hdev); 812 813 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 814 hci_dev_lock(hdev); 815 mgmt_powered(hdev, 0); 816 hci_dev_unlock(hdev); 817 } 818 819 /* Clear flags */ 820 hdev->flags = 0; 821 822 memset(hdev->eir, 0, sizeof(hdev->eir)); 823 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 824 825 hci_req_unlock(hdev); 826 827 hci_dev_put(hdev); 828 return 0; 829 } 830 831 int hci_dev_close(__u16 dev) 832 { 833 struct hci_dev *hdev; 834 int err; 835 836 hdev = hci_dev_get(dev); 837 if (!hdev) 838 return -ENODEV; 839 840 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 841 cancel_delayed_work(&hdev->power_off); 842 843 err = hci_dev_do_close(hdev); 844 845 hci_dev_put(hdev); 846 return err; 847 } 848 849 int hci_dev_reset(__u16 dev) 850 { 851 struct hci_dev *hdev; 852 int ret = 0; 853 854 hdev = hci_dev_get(dev); 855 if (!hdev) 856 return -ENODEV; 857 858 hci_req_lock(hdev); 859 860 if (!test_bit(HCI_UP, &hdev->flags)) 861 goto done; 862 863 /* Drop queues */ 864 skb_queue_purge(&hdev->rx_q); 865 skb_queue_purge(&hdev->cmd_q); 866 867 hci_dev_lock(hdev); 868 inquiry_cache_flush(hdev); 869 hci_conn_hash_flush(hdev); 870 hci_dev_unlock(hdev); 871 872 if (hdev->flush) 873 hdev->flush(hdev); 874 875 atomic_set(&hdev->cmd_cnt, 1); 876 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 877 878 if (!test_bit(HCI_RAW, &hdev->flags)) 879 ret = __hci_request(hdev, hci_reset_req, 0, 880 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 881 882 done: 883 hci_req_unlock(hdev); 884 hci_dev_put(hdev); 885 return ret; 886 } 887 888 int hci_dev_reset_stat(__u16 dev) 889 { 890 struct hci_dev *hdev; 891 int ret = 0; 892 893 hdev = hci_dev_get(dev); 894 if (!hdev) 895 return -ENODEV; 896 897 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 898 899 hci_dev_put(hdev); 900 901 return ret; 902 } 903 904 int hci_dev_cmd(unsigned int cmd, void __user *arg) 905 { 906 struct hci_dev *hdev; 907 struct hci_dev_req dr; 908 int err = 0; 909 910 if (copy_from_user(&dr, arg, sizeof(dr))) 911 return -EFAULT; 912 913 hdev = hci_dev_get(dr.dev_id); 914 if (!hdev) 915 return -ENODEV; 916 917 switch (cmd) { 918 case HCISETAUTH: 919 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 920 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 921 break; 922 923 case HCISETENCRYPT: 924 if (!lmp_encrypt_capable(hdev)) { 925 err = -EOPNOTSUPP; 926 break; 927 } 928 929 if (!test_bit(HCI_AUTH, &hdev->flags)) { 930 /* Auth must be enabled first */ 931 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 932 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 933 if (err) 934 break; 935 } 936 937 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 938 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 939 break; 940 941 case HCISETSCAN: 942 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 943 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 944 break; 945 946 case HCISETLINKPOL: 947 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 948 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 949 break; 950 951 case HCISETLINKMODE: 952 hdev->link_mode = ((__u16) dr.dev_opt) & 953 (HCI_LM_MASTER | HCI_LM_ACCEPT); 954 break; 955 956 case HCISETPTYPE: 957 hdev->pkt_type = (__u16) dr.dev_opt; 958 break; 959 960 case HCISETACLMTU: 961 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 962 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 963 break; 964 965 case HCISETSCOMTU: 966 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 967 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 968 break; 969 970 default: 971 err = -EINVAL; 972 break; 973 } 974 975 hci_dev_put(hdev); 976 return err; 977 } 978 979 int hci_get_dev_list(void __user *arg) 980 { 981 struct hci_dev *hdev; 982 struct hci_dev_list_req *dl; 983 struct hci_dev_req *dr; 984 int n = 0, size, err; 985 __u16 dev_num; 986 987 if (get_user(dev_num, (__u16 __user *) arg)) 988 return -EFAULT; 989 990 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 991 return -EINVAL; 992 993 size = sizeof(*dl) + dev_num * sizeof(*dr); 994 995 dl = kzalloc(size, GFP_KERNEL); 996 if (!dl) 997 return -ENOMEM; 998 999 dr = dl->dev_req; 1000 1001 read_lock(&hci_dev_list_lock); 1002 list_for_each_entry(hdev, &hci_dev_list, list) { 1003 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1004 cancel_delayed_work(&hdev->power_off); 1005 1006 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1007 set_bit(HCI_PAIRABLE, &hdev->dev_flags); 1008 1009 (dr + n)->dev_id = hdev->id; 1010 (dr + n)->dev_opt = hdev->flags; 1011 1012 if (++n >= dev_num) 1013 break; 1014 } 1015 read_unlock(&hci_dev_list_lock); 1016 1017 dl->dev_num = n; 1018 size = sizeof(*dl) + n * sizeof(*dr); 1019 1020 err = copy_to_user(arg, dl, size); 1021 kfree(dl); 1022 1023 return err ? -EFAULT : 0; 1024 } 1025 1026 int hci_get_dev_info(void __user *arg) 1027 { 1028 struct hci_dev *hdev; 1029 struct hci_dev_info di; 1030 int err = 0; 1031 1032 if (copy_from_user(&di, arg, sizeof(di))) 1033 return -EFAULT; 1034 1035 hdev = hci_dev_get(di.dev_id); 1036 if (!hdev) 1037 return -ENODEV; 1038 1039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1040 cancel_delayed_work_sync(&hdev->power_off); 1041 1042 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1043 set_bit(HCI_PAIRABLE, &hdev->dev_flags); 1044 1045 strcpy(di.name, hdev->name); 1046 di.bdaddr = hdev->bdaddr; 1047 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); 1048 di.flags = hdev->flags; 1049 di.pkt_type = hdev->pkt_type; 1050 di.acl_mtu = hdev->acl_mtu; 1051 di.acl_pkts = hdev->acl_pkts; 1052 di.sco_mtu = hdev->sco_mtu; 1053 di.sco_pkts = hdev->sco_pkts; 1054 di.link_policy = hdev->link_policy; 1055 di.link_mode = hdev->link_mode; 1056 1057 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 1058 memcpy(&di.features, &hdev->features, sizeof(di.features)); 1059 1060 if (copy_to_user(arg, &di, sizeof(di))) 1061 err = -EFAULT; 1062 1063 hci_dev_put(hdev); 1064 1065 return err; 1066 } 1067 1068 /* ---- Interface to HCI drivers ---- */ 1069 1070 static int hci_rfkill_set_block(void *data, bool blocked) 1071 { 1072 struct hci_dev *hdev = data; 1073 1074 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 1075 1076 if (!blocked) 1077 return 0; 1078 1079 hci_dev_do_close(hdev); 1080 1081 return 0; 1082 } 1083 1084 static const struct rfkill_ops hci_rfkill_ops = { 1085 .set_block = hci_rfkill_set_block, 1086 }; 1087 1088 /* Alloc HCI device */ 1089 struct hci_dev *hci_alloc_dev(void) 1090 { 1091 struct hci_dev *hdev; 1092 1093 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); 1094 if (!hdev) 1095 return NULL; 1096 1097 hci_init_sysfs(hdev); 1098 skb_queue_head_init(&hdev->driver_init); 1099 1100 return hdev; 1101 } 1102 EXPORT_SYMBOL(hci_alloc_dev); 1103 1104 /* Free HCI device */ 1105 void hci_free_dev(struct hci_dev *hdev) 1106 { 1107 skb_queue_purge(&hdev->driver_init); 1108 1109 /* will free via device release */ 1110 put_device(&hdev->dev); 1111 } 1112 EXPORT_SYMBOL(hci_free_dev); 1113 1114 static void hci_power_on(struct work_struct *work) 1115 { 1116 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 1117 1118 BT_DBG("%s", hdev->name); 1119 1120 if (hci_dev_open(hdev->id) < 0) 1121 return; 1122 1123 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 1124 schedule_delayed_work(&hdev->power_off, 1125 msecs_to_jiffies(AUTO_OFF_TIMEOUT)); 1126 1127 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) 1128 mgmt_index_added(hdev); 1129 } 1130 1131 static void hci_power_off(struct work_struct *work) 1132 { 1133 struct hci_dev *hdev = container_of(work, struct hci_dev, 1134 power_off.work); 1135 1136 BT_DBG("%s", hdev->name); 1137 1138 hci_dev_do_close(hdev); 1139 } 1140 1141 static void hci_discov_off(struct work_struct *work) 1142 { 1143 struct hci_dev *hdev; 1144 u8 scan = SCAN_PAGE; 1145 1146 hdev = container_of(work, struct hci_dev, discov_off.work); 1147 1148 BT_DBG("%s", hdev->name); 1149 1150 hci_dev_lock(hdev); 1151 1152 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); 1153 1154 hdev->discov_timeout = 0; 1155 1156 hci_dev_unlock(hdev); 1157 } 1158 1159 int hci_uuids_clear(struct hci_dev *hdev) 1160 { 1161 struct list_head *p, *n; 1162 1163 list_for_each_safe(p, n, &hdev->uuids) { 1164 struct bt_uuid *uuid; 1165 1166 uuid = list_entry(p, struct bt_uuid, list); 1167 1168 list_del(p); 1169 kfree(uuid); 1170 } 1171 1172 return 0; 1173 } 1174 1175 int hci_link_keys_clear(struct hci_dev *hdev) 1176 { 1177 struct list_head *p, *n; 1178 1179 list_for_each_safe(p, n, &hdev->link_keys) { 1180 struct link_key *key; 1181 1182 key = list_entry(p, struct link_key, list); 1183 1184 list_del(p); 1185 kfree(key); 1186 } 1187 1188 return 0; 1189 } 1190 1191 int hci_smp_ltks_clear(struct hci_dev *hdev) 1192 { 1193 struct smp_ltk *k, *tmp; 1194 1195 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1196 list_del(&k->list); 1197 kfree(k); 1198 } 1199 1200 return 0; 1201 } 1202 1203 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1204 { 1205 struct link_key *k; 1206 1207 list_for_each_entry(k, &hdev->link_keys, list) 1208 if (bacmp(bdaddr, &k->bdaddr) == 0) 1209 return k; 1210 1211 return NULL; 1212 } 1213 1214 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 1215 u8 key_type, u8 old_key_type) 1216 { 1217 /* Legacy key */ 1218 if (key_type < 0x03) 1219 return 1; 1220 1221 /* Debug keys are insecure so don't store them persistently */ 1222 if (key_type == HCI_LK_DEBUG_COMBINATION) 1223 return 0; 1224 1225 /* Changed combination key and there's no previous one */ 1226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 1227 return 0; 1228 1229 /* Security mode 3 case */ 1230 if (!conn) 1231 return 1; 1232 1233 /* Neither local nor remote side had no-bonding as requirement */ 1234 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 1235 return 1; 1236 1237 /* Local side had dedicated bonding as requirement */ 1238 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 1239 return 1; 1240 1241 /* Remote side had dedicated bonding as requirement */ 1242 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 1243 return 1; 1244 1245 /* If none of the above criteria match, then don't store the key 1246 * persistently */ 1247 return 0; 1248 } 1249 1250 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) 1251 { 1252 struct smp_ltk *k; 1253 1254 list_for_each_entry(k, &hdev->long_term_keys, list) { 1255 if (k->ediv != ediv || 1256 memcmp(rand, k->rand, sizeof(k->rand))) 1257 continue; 1258 1259 return k; 1260 } 1261 1262 return NULL; 1263 } 1264 EXPORT_SYMBOL(hci_find_ltk); 1265 1266 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1267 u8 addr_type) 1268 { 1269 struct smp_ltk *k; 1270 1271 list_for_each_entry(k, &hdev->long_term_keys, list) 1272 if (addr_type == k->bdaddr_type && 1273 bacmp(bdaddr, &k->bdaddr) == 0) 1274 return k; 1275 1276 return NULL; 1277 } 1278 EXPORT_SYMBOL(hci_find_ltk_by_addr); 1279 1280 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1281 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1282 { 1283 struct link_key *key, *old_key; 1284 u8 old_key_type, persistent; 1285 1286 old_key = hci_find_link_key(hdev, bdaddr); 1287 if (old_key) { 1288 old_key_type = old_key->type; 1289 key = old_key; 1290 } else { 1291 old_key_type = conn ? conn->key_type : 0xff; 1292 key = kzalloc(sizeof(*key), GFP_ATOMIC); 1293 if (!key) 1294 return -ENOMEM; 1295 list_add(&key->list, &hdev->link_keys); 1296 } 1297 1298 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type); 1299 1300 /* Some buggy controller combinations generate a changed 1301 * combination key for legacy pairing even when there's no 1302 * previous key */ 1303 if (type == HCI_LK_CHANGED_COMBINATION && 1304 (!conn || conn->remote_auth == 0xff) && 1305 old_key_type == 0xff) { 1306 type = HCI_LK_COMBINATION; 1307 if (conn) 1308 conn->key_type = type; 1309 } 1310 1311 bacpy(&key->bdaddr, bdaddr); 1312 memcpy(key->val, val, 16); 1313 key->pin_len = pin_len; 1314 1315 if (type == HCI_LK_CHANGED_COMBINATION) 1316 key->type = old_key_type; 1317 else 1318 key->type = type; 1319 1320 if (!new_key) 1321 return 0; 1322 1323 persistent = hci_persistent_key(hdev, conn, type, old_key_type); 1324 1325 mgmt_new_link_key(hdev, key, persistent); 1326 1327 if (!persistent) { 1328 list_del(&key->list); 1329 kfree(key); 1330 } 1331 1332 return 0; 1333 } 1334 1335 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, 1336 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16 1337 ediv, u8 rand[8]) 1338 { 1339 struct smp_ltk *key, *old_key; 1340 1341 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK)) 1342 return 0; 1343 1344 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type); 1345 if (old_key) 1346 key = old_key; 1347 else { 1348 key = kzalloc(sizeof(*key), GFP_ATOMIC); 1349 if (!key) 1350 return -ENOMEM; 1351 list_add(&key->list, &hdev->long_term_keys); 1352 } 1353 1354 bacpy(&key->bdaddr, bdaddr); 1355 key->bdaddr_type = addr_type; 1356 memcpy(key->val, tk, sizeof(key->val)); 1357 key->authenticated = authenticated; 1358 key->ediv = ediv; 1359 key->enc_size = enc_size; 1360 key->type = type; 1361 memcpy(key->rand, rand, sizeof(key->rand)); 1362 1363 if (!new_key) 1364 return 0; 1365 1366 if (type & HCI_SMP_LTK) 1367 mgmt_new_ltk(hdev, key, 1); 1368 1369 return 0; 1370 } 1371 1372 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1373 { 1374 struct link_key *key; 1375 1376 key = hci_find_link_key(hdev, bdaddr); 1377 if (!key) 1378 return -ENOENT; 1379 1380 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); 1381 1382 list_del(&key->list); 1383 kfree(key); 1384 1385 return 0; 1386 } 1387 1388 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr) 1389 { 1390 struct smp_ltk *k, *tmp; 1391 1392 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { 1393 if (bacmp(bdaddr, &k->bdaddr)) 1394 continue; 1395 1396 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); 1397 1398 list_del(&k->list); 1399 kfree(k); 1400 } 1401 1402 return 0; 1403 } 1404 1405 /* HCI command timer function */ 1406 static void hci_cmd_timer(unsigned long arg) 1407 { 1408 struct hci_dev *hdev = (void *) arg; 1409 1410 BT_ERR("%s command tx timeout", hdev->name); 1411 atomic_set(&hdev->cmd_cnt, 1); 1412 queue_work(hdev->workqueue, &hdev->cmd_work); 1413 } 1414 1415 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1416 bdaddr_t *bdaddr) 1417 { 1418 struct oob_data *data; 1419 1420 list_for_each_entry(data, &hdev->remote_oob_data, list) 1421 if (bacmp(bdaddr, &data->bdaddr) == 0) 1422 return data; 1423 1424 return NULL; 1425 } 1426 1427 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr) 1428 { 1429 struct oob_data *data; 1430 1431 data = hci_find_remote_oob_data(hdev, bdaddr); 1432 if (!data) 1433 return -ENOENT; 1434 1435 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr)); 1436 1437 list_del(&data->list); 1438 kfree(data); 1439 1440 return 0; 1441 } 1442 1443 int hci_remote_oob_data_clear(struct hci_dev *hdev) 1444 { 1445 struct oob_data *data, *n; 1446 1447 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 1448 list_del(&data->list); 1449 kfree(data); 1450 } 1451 1452 return 0; 1453 } 1454 1455 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, 1456 u8 *randomizer) 1457 { 1458 struct oob_data *data; 1459 1460 data = hci_find_remote_oob_data(hdev, bdaddr); 1461 1462 if (!data) { 1463 data = kmalloc(sizeof(*data), GFP_ATOMIC); 1464 if (!data) 1465 return -ENOMEM; 1466 1467 bacpy(&data->bdaddr, bdaddr); 1468 list_add(&data->list, &hdev->remote_oob_data); 1469 } 1470 1471 memcpy(data->hash, hash, sizeof(data->hash)); 1472 memcpy(data->randomizer, randomizer, sizeof(data->randomizer)); 1473 1474 BT_DBG("%s for %s", hdev->name, batostr(bdaddr)); 1475 1476 return 0; 1477 } 1478 1479 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 1480 { 1481 struct bdaddr_list *b; 1482 1483 list_for_each_entry(b, &hdev->blacklist, list) 1484 if (bacmp(bdaddr, &b->bdaddr) == 0) 1485 return b; 1486 1487 return NULL; 1488 } 1489 1490 int hci_blacklist_clear(struct hci_dev *hdev) 1491 { 1492 struct list_head *p, *n; 1493 1494 list_for_each_safe(p, n, &hdev->blacklist) { 1495 struct bdaddr_list *b; 1496 1497 b = list_entry(p, struct bdaddr_list, list); 1498 1499 list_del(p); 1500 kfree(b); 1501 } 1502 1503 return 0; 1504 } 1505 1506 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 1507 { 1508 struct bdaddr_list *entry; 1509 1510 if (bacmp(bdaddr, BDADDR_ANY) == 0) 1511 return -EBADF; 1512 1513 if (hci_blacklist_lookup(hdev, bdaddr)) 1514 return -EEXIST; 1515 1516 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); 1517 if (!entry) 1518 return -ENOMEM; 1519 1520 bacpy(&entry->bdaddr, bdaddr); 1521 1522 list_add(&entry->list, &hdev->blacklist); 1523 1524 return mgmt_device_blocked(hdev, bdaddr, type); 1525 } 1526 1527 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 1528 { 1529 struct bdaddr_list *entry; 1530 1531 if (bacmp(bdaddr, BDADDR_ANY) == 0) 1532 return hci_blacklist_clear(hdev); 1533 1534 entry = hci_blacklist_lookup(hdev, bdaddr); 1535 if (!entry) 1536 return -ENOENT; 1537 1538 list_del(&entry->list); 1539 kfree(entry); 1540 1541 return mgmt_device_unblocked(hdev, bdaddr, type); 1542 } 1543 1544 static void hci_clear_adv_cache(struct work_struct *work) 1545 { 1546 struct hci_dev *hdev = container_of(work, struct hci_dev, 1547 adv_work.work); 1548 1549 hci_dev_lock(hdev); 1550 1551 hci_adv_entries_clear(hdev); 1552 1553 hci_dev_unlock(hdev); 1554 } 1555 1556 int hci_adv_entries_clear(struct hci_dev *hdev) 1557 { 1558 struct adv_entry *entry, *tmp; 1559 1560 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) { 1561 list_del(&entry->list); 1562 kfree(entry); 1563 } 1564 1565 BT_DBG("%s adv cache cleared", hdev->name); 1566 1567 return 0; 1568 } 1569 1570 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr) 1571 { 1572 struct adv_entry *entry; 1573 1574 list_for_each_entry(entry, &hdev->adv_entries, list) 1575 if (bacmp(bdaddr, &entry->bdaddr) == 0) 1576 return entry; 1577 1578 return NULL; 1579 } 1580 1581 static inline int is_connectable_adv(u8 evt_type) 1582 { 1583 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND) 1584 return 1; 1585 1586 return 0; 1587 } 1588 1589 int hci_add_adv_entry(struct hci_dev *hdev, 1590 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type)) 1591 return -EINVAL; 1592 1593 /* Only new entries should be added to adv_entries. So, if 1594 * bdaddr was found, don't add it. */ 1595 if (hci_find_adv_entry(hdev, &ev->bdaddr)) 1596 return 0; 1597 1598 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1599 if (!entry) 1600 return -ENOMEM; 1601 1602 bacpy(&entry->bdaddr, &ev->bdaddr); 1603 entry->bdaddr_type = ev->bdaddr_type; 1604 1605 list_add(&entry->list, &hdev->adv_entries); 1606 1607 BT_DBG("%s adv entry added: address %s type %u", hdev->name, 1608 batostr(&entry->bdaddr), entry->bdaddr_type); 1609 1610 return 0; 1611 } 1612 1613 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt) 1614 { 1615 struct le_scan_params *param = (struct le_scan_params *) opt; 1616 struct hci_cp_le_set_scan_param cp; 1617 1618 memset(&cp, 0, sizeof(cp)); 1619 cp.type = param->type; 1620 cp.interval = cpu_to_le16(param->interval); 1621 cp.window = cpu_to_le16(param->window); 1622 1623 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp); 1624 } 1625 1626 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) 1627 { 1628 struct hci_cp_le_set_scan_enable cp; 1629 1630 memset(&cp, 0, sizeof(cp)); 1631 cp.enable = 1; 1632 1633 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1634 } 1635 1636 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval, 1637 u16 window, int timeout) 1638 { 1639 long timeo = msecs_to_jiffies(3000); 1640 struct le_scan_params param; 1641 int err; 1642 1643 BT_DBG("%s", hdev->name); 1644 1645 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) 1646 return -EINPROGRESS; 1647 1648 param.type = type; 1649 param.interval = interval; 1650 param.window = window; 1651 1652 hci_req_lock(hdev); 1653 1654 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m, 1655 timeo); 1656 if (!err) 1657 err = __hci_request(hdev, le_scan_enable_req, 0, timeo); 1658 1659 hci_req_unlock(hdev); 1660 1661 if (err < 0) 1662 return err; 1663 1664 schedule_delayed_work(&hdev->le_scan_disable, 1665 msecs_to_jiffies(timeout)); 1666 1667 return 0; 1668 } 1669 1670 static void le_scan_disable_work(struct work_struct *work) 1671 { 1672 struct hci_dev *hdev = container_of(work, struct hci_dev, 1673 le_scan_disable.work); 1674 struct hci_cp_le_set_scan_enable cp; 1675 1676 BT_DBG("%s", hdev->name); 1677 1678 memset(&cp, 0, sizeof(cp)); 1679 1680 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 1681 } 1682 1683 static void le_scan_work(struct work_struct *work) 1684 { 1685 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan); 1686 struct le_scan_params *param = &hdev->le_scan_params; 1687 1688 BT_DBG("%s", hdev->name); 1689 1690 hci_do_le_scan(hdev, param->type, param->interval, param->window, 1691 param->timeout); 1692 } 1693 1694 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window, 1695 int timeout) 1696 { 1697 struct le_scan_params *param = &hdev->le_scan_params; 1698 1699 BT_DBG("%s", hdev->name); 1700 1701 if (work_busy(&hdev->le_scan)) 1702 return -EINPROGRESS; 1703 1704 param->type = type; 1705 param->interval = interval; 1706 param->window = window; 1707 param->timeout = timeout; 1708 1709 queue_work(system_long_wq, &hdev->le_scan); 1710 1711 return 0; 1712 } 1713 1714 /* Register HCI device */ 1715 int hci_register_dev(struct hci_dev *hdev) 1716 { 1717 struct list_head *head = &hci_dev_list, *p; 1718 int i, id, error; 1719 1720 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1721 1722 if (!hdev->open || !hdev->close) 1723 return -EINVAL; 1724 1725 /* Do not allow HCI_AMP devices to register at index 0, 1726 * so the index can be used as the AMP controller ID. 1727 */ 1728 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; 1729 1730 write_lock(&hci_dev_list_lock); 1731 1732 /* Find first available device id */ 1733 list_for_each(p, &hci_dev_list) { 1734 if (list_entry(p, struct hci_dev, list)->id != id) 1735 break; 1736 head = p; id++; 1737 } 1738 1739 sprintf(hdev->name, "hci%d", id); 1740 hdev->id = id; 1741 list_add_tail(&hdev->list, head); 1742 1743 mutex_init(&hdev->lock); 1744 1745 hdev->flags = 0; 1746 hdev->dev_flags = 0; 1747 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 1748 hdev->esco_type = (ESCO_HV1); 1749 hdev->link_mode = (HCI_LM_ACCEPT); 1750 hdev->io_capability = 0x03; /* No Input No Output */ 1751 1752 hdev->idle_timeout = 0; 1753 hdev->sniff_max_interval = 800; 1754 hdev->sniff_min_interval = 80; 1755 1756 INIT_WORK(&hdev->rx_work, hci_rx_work); 1757 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 1758 INIT_WORK(&hdev->tx_work, hci_tx_work); 1759 1760 1761 skb_queue_head_init(&hdev->rx_q); 1762 skb_queue_head_init(&hdev->cmd_q); 1763 skb_queue_head_init(&hdev->raw_q); 1764 1765 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev); 1766 1767 for (i = 0; i < NUM_REASSEMBLY; i++) 1768 hdev->reassembly[i] = NULL; 1769 1770 init_waitqueue_head(&hdev->req_wait_q); 1771 mutex_init(&hdev->req_lock); 1772 1773 discovery_init(hdev); 1774 1775 hci_conn_hash_init(hdev); 1776 1777 INIT_LIST_HEAD(&hdev->mgmt_pending); 1778 1779 INIT_LIST_HEAD(&hdev->blacklist); 1780 1781 INIT_LIST_HEAD(&hdev->uuids); 1782 1783 INIT_LIST_HEAD(&hdev->link_keys); 1784 INIT_LIST_HEAD(&hdev->long_term_keys); 1785 1786 INIT_LIST_HEAD(&hdev->remote_oob_data); 1787 1788 INIT_LIST_HEAD(&hdev->adv_entries); 1789 1790 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache); 1791 INIT_WORK(&hdev->power_on, hci_power_on); 1792 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 1793 1794 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); 1795 1796 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1797 1798 atomic_set(&hdev->promisc, 0); 1799 1800 INIT_WORK(&hdev->le_scan, le_scan_work); 1801 1802 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); 1803 1804 write_unlock(&hci_dev_list_lock); 1805 1806 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | 1807 WQ_MEM_RECLAIM, 1); 1808 if (!hdev->workqueue) { 1809 error = -ENOMEM; 1810 goto err; 1811 } 1812 1813 error = hci_add_sysfs(hdev); 1814 if (error < 0) 1815 goto err_wqueue; 1816 1817 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1818 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 1819 if (hdev->rfkill) { 1820 if (rfkill_register(hdev->rfkill) < 0) { 1821 rfkill_destroy(hdev->rfkill); 1822 hdev->rfkill = NULL; 1823 } 1824 } 1825 1826 set_bit(HCI_AUTO_OFF, &hdev->dev_flags); 1827 set_bit(HCI_SETUP, &hdev->dev_flags); 1828 schedule_work(&hdev->power_on); 1829 1830 hci_notify(hdev, HCI_DEV_REG); 1831 hci_dev_hold(hdev); 1832 1833 return id; 1834 1835 err_wqueue: 1836 destroy_workqueue(hdev->workqueue); 1837 err: 1838 write_lock(&hci_dev_list_lock); 1839 list_del(&hdev->list); 1840 write_unlock(&hci_dev_list_lock); 1841 1842 return error; 1843 } 1844 EXPORT_SYMBOL(hci_register_dev); 1845 1846 /* Unregister HCI device */ 1847 void hci_unregister_dev(struct hci_dev *hdev) 1848 { 1849 int i; 1850 1851 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1852 1853 write_lock(&hci_dev_list_lock); 1854 list_del(&hdev->list); 1855 write_unlock(&hci_dev_list_lock); 1856 1857 hci_dev_do_close(hdev); 1858 1859 for (i = 0; i < NUM_REASSEMBLY; i++) 1860 kfree_skb(hdev->reassembly[i]); 1861 1862 if (!test_bit(HCI_INIT, &hdev->flags) && 1863 !test_bit(HCI_SETUP, &hdev->dev_flags)) { 1864 hci_dev_lock(hdev); 1865 mgmt_index_removed(hdev); 1866 hci_dev_unlock(hdev); 1867 } 1868 1869 /* mgmt_index_removed should take care of emptying the 1870 * pending list */ 1871 BUG_ON(!list_empty(&hdev->mgmt_pending)); 1872 1873 hci_notify(hdev, HCI_DEV_UNREG); 1874 1875 if (hdev->rfkill) { 1876 rfkill_unregister(hdev->rfkill); 1877 rfkill_destroy(hdev->rfkill); 1878 } 1879 1880 hci_del_sysfs(hdev); 1881 1882 cancel_delayed_work_sync(&hdev->adv_work); 1883 1884 destroy_workqueue(hdev->workqueue); 1885 1886 hci_dev_lock(hdev); 1887 hci_blacklist_clear(hdev); 1888 hci_uuids_clear(hdev); 1889 hci_link_keys_clear(hdev); 1890 hci_smp_ltks_clear(hdev); 1891 hci_remote_oob_data_clear(hdev); 1892 hci_adv_entries_clear(hdev); 1893 hci_dev_unlock(hdev); 1894 1895 hci_dev_put(hdev); 1896 } 1897 EXPORT_SYMBOL(hci_unregister_dev); 1898 1899 /* Suspend HCI device */ 1900 int hci_suspend_dev(struct hci_dev *hdev) 1901 { 1902 hci_notify(hdev, HCI_DEV_SUSPEND); 1903 return 0; 1904 } 1905 EXPORT_SYMBOL(hci_suspend_dev); 1906 1907 /* Resume HCI device */ 1908 int hci_resume_dev(struct hci_dev *hdev) 1909 { 1910 hci_notify(hdev, HCI_DEV_RESUME); 1911 return 0; 1912 } 1913 EXPORT_SYMBOL(hci_resume_dev); 1914 1915 /* Receive frame from HCI drivers */ 1916 int hci_recv_frame(struct sk_buff *skb) 1917 { 1918 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1919 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 1920 && !test_bit(HCI_INIT, &hdev->flags))) { 1921 kfree_skb(skb); 1922 return -ENXIO; 1923 } 1924 1925 /* Incomming skb */ 1926 bt_cb(skb)->incoming = 1; 1927 1928 /* Time stamp */ 1929 __net_timestamp(skb); 1930 1931 skb_queue_tail(&hdev->rx_q, skb); 1932 queue_work(hdev->workqueue, &hdev->rx_work); 1933 1934 return 0; 1935 } 1936 EXPORT_SYMBOL(hci_recv_frame); 1937 1938 static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1939 int count, __u8 index) 1940 { 1941 int len = 0; 1942 int hlen = 0; 1943 int remain = count; 1944 struct sk_buff *skb; 1945 struct bt_skb_cb *scb; 1946 1947 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || 1948 index >= NUM_REASSEMBLY) 1949 return -EILSEQ; 1950 1951 skb = hdev->reassembly[index]; 1952 1953 if (!skb) { 1954 switch (type) { 1955 case HCI_ACLDATA_PKT: 1956 len = HCI_MAX_FRAME_SIZE; 1957 hlen = HCI_ACL_HDR_SIZE; 1958 break; 1959 case HCI_EVENT_PKT: 1960 len = HCI_MAX_EVENT_SIZE; 1961 hlen = HCI_EVENT_HDR_SIZE; 1962 break; 1963 case HCI_SCODATA_PKT: 1964 len = HCI_MAX_SCO_SIZE; 1965 hlen = HCI_SCO_HDR_SIZE; 1966 break; 1967 } 1968 1969 skb = bt_skb_alloc(len, GFP_ATOMIC); 1970 if (!skb) 1971 return -ENOMEM; 1972 1973 scb = (void *) skb->cb; 1974 scb->expect = hlen; 1975 scb->pkt_type = type; 1976 1977 skb->dev = (void *) hdev; 1978 hdev->reassembly[index] = skb; 1979 } 1980 1981 while (count) { 1982 scb = (void *) skb->cb; 1983 len = min_t(uint, scb->expect, count); 1984 1985 memcpy(skb_put(skb, len), data, len); 1986 1987 count -= len; 1988 data += len; 1989 scb->expect -= len; 1990 remain = count; 1991 1992 switch (type) { 1993 case HCI_EVENT_PKT: 1994 if (skb->len == HCI_EVENT_HDR_SIZE) { 1995 struct hci_event_hdr *h = hci_event_hdr(skb); 1996 scb->expect = h->plen; 1997 1998 if (skb_tailroom(skb) < scb->expect) { 1999 kfree_skb(skb); 2000 hdev->reassembly[index] = NULL; 2001 return -ENOMEM; 2002 } 2003 } 2004 break; 2005 2006 case HCI_ACLDATA_PKT: 2007 if (skb->len == HCI_ACL_HDR_SIZE) { 2008 struct hci_acl_hdr *h = hci_acl_hdr(skb); 2009 scb->expect = __le16_to_cpu(h->dlen); 2010 2011 if (skb_tailroom(skb) < scb->expect) { 2012 kfree_skb(skb); 2013 hdev->reassembly[index] = NULL; 2014 return -ENOMEM; 2015 } 2016 } 2017 break; 2018 2019 case HCI_SCODATA_PKT: 2020 if (skb->len == HCI_SCO_HDR_SIZE) { 2021 struct hci_sco_hdr *h = hci_sco_hdr(skb); 2022 scb->expect = h->dlen; 2023 2024 if (skb_tailroom(skb) < scb->expect) { 2025 kfree_skb(skb); 2026 hdev->reassembly[index] = NULL; 2027 return -ENOMEM; 2028 } 2029 } 2030 break; 2031 } 2032 2033 if (scb->expect == 0) { 2034 /* Complete frame */ 2035 2036 bt_cb(skb)->pkt_type = type; 2037 hci_recv_frame(skb); 2038 2039 hdev->reassembly[index] = NULL; 2040 return remain; 2041 } 2042 } 2043 2044 return remain; 2045 } 2046 2047 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) 2048 { 2049 int rem = 0; 2050 2051 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) 2052 return -EILSEQ; 2053 2054 while (count) { 2055 rem = hci_reassembly(hdev, type, data, count, type - 1); 2056 if (rem < 0) 2057 return rem; 2058 2059 data += (count - rem); 2060 count = rem; 2061 } 2062 2063 return rem; 2064 } 2065 EXPORT_SYMBOL(hci_recv_fragment); 2066 2067 #define STREAM_REASSEMBLY 0 2068 2069 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) 2070 { 2071 int type; 2072 int rem = 0; 2073 2074 while (count) { 2075 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY]; 2076 2077 if (!skb) { 2078 struct { char type; } *pkt; 2079 2080 /* Start of the frame */ 2081 pkt = data; 2082 type = pkt->type; 2083 2084 data++; 2085 count--; 2086 } else 2087 type = bt_cb(skb)->pkt_type; 2088 2089 rem = hci_reassembly(hdev, type, data, count, 2090 STREAM_REASSEMBLY); 2091 if (rem < 0) 2092 return rem; 2093 2094 data += (count - rem); 2095 count = rem; 2096 } 2097 2098 return rem; 2099 } 2100 EXPORT_SYMBOL(hci_recv_stream_fragment); 2101 2102 /* ---- Interface to upper protocols ---- */ 2103 2104 int hci_register_cb(struct hci_cb *cb) 2105 { 2106 BT_DBG("%p name %s", cb, cb->name); 2107 2108 write_lock(&hci_cb_list_lock); 2109 list_add(&cb->list, &hci_cb_list); 2110 write_unlock(&hci_cb_list_lock); 2111 2112 return 0; 2113 } 2114 EXPORT_SYMBOL(hci_register_cb); 2115 2116 int hci_unregister_cb(struct hci_cb *cb) 2117 { 2118 BT_DBG("%p name %s", cb, cb->name); 2119 2120 write_lock(&hci_cb_list_lock); 2121 list_del(&cb->list); 2122 write_unlock(&hci_cb_list_lock); 2123 2124 return 0; 2125 } 2126 EXPORT_SYMBOL(hci_unregister_cb); 2127 2128 static int hci_send_frame(struct sk_buff *skb) 2129 { 2130 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 2131 2132 if (!hdev) { 2133 kfree_skb(skb); 2134 return -ENODEV; 2135 } 2136 2137 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 2138 2139 /* Time stamp */ 2140 __net_timestamp(skb); 2141 2142 /* Send copy to monitor */ 2143 hci_send_to_monitor(hdev, skb); 2144 2145 if (atomic_read(&hdev->promisc)) { 2146 /* Send copy to the sockets */ 2147 hci_send_to_sock(hdev, skb); 2148 } 2149 2150 /* Get rid of skb owner, prior to sending to the driver. */ 2151 skb_orphan(skb); 2152 2153 return hdev->send(skb); 2154 } 2155 2156 /* Send HCI command */ 2157 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) 2158 { 2159 int len = HCI_COMMAND_HDR_SIZE + plen; 2160 struct hci_command_hdr *hdr; 2161 struct sk_buff *skb; 2162 2163 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); 2164 2165 skb = bt_skb_alloc(len, GFP_ATOMIC); 2166 if (!skb) { 2167 BT_ERR("%s no memory for command", hdev->name); 2168 return -ENOMEM; 2169 } 2170 2171 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 2172 hdr->opcode = cpu_to_le16(opcode); 2173 hdr->plen = plen; 2174 2175 if (plen) 2176 memcpy(skb_put(skb, plen), param, plen); 2177 2178 BT_DBG("skb len %d", skb->len); 2179 2180 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 2181 skb->dev = (void *) hdev; 2182 2183 if (test_bit(HCI_INIT, &hdev->flags)) 2184 hdev->init_last_cmd = opcode; 2185 2186 skb_queue_tail(&hdev->cmd_q, skb); 2187 queue_work(hdev->workqueue, &hdev->cmd_work); 2188 2189 return 0; 2190 } 2191 2192 /* Get data from the previously sent command */ 2193 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 2194 { 2195 struct hci_command_hdr *hdr; 2196 2197 if (!hdev->sent_cmd) 2198 return NULL; 2199 2200 hdr = (void *) hdev->sent_cmd->data; 2201 2202 if (hdr->opcode != cpu_to_le16(opcode)) 2203 return NULL; 2204 2205 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2206 2207 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 2208 } 2209 2210 /* Send ACL data */ 2211 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 2212 { 2213 struct hci_acl_hdr *hdr; 2214 int len = skb->len; 2215 2216 skb_push(skb, HCI_ACL_HDR_SIZE); 2217 skb_reset_transport_header(skb); 2218 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 2219 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 2220 hdr->dlen = cpu_to_le16(len); 2221 } 2222 2223 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, 2224 struct sk_buff *skb, __u16 flags) 2225 { 2226 struct hci_dev *hdev = conn->hdev; 2227 struct sk_buff *list; 2228 2229 list = skb_shinfo(skb)->frag_list; 2230 if (!list) { 2231 /* Non fragmented */ 2232 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 2233 2234 skb_queue_tail(queue, skb); 2235 } else { 2236 /* Fragmented */ 2237 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 2238 2239 skb_shinfo(skb)->frag_list = NULL; 2240 2241 /* Queue all fragments atomically */ 2242 spin_lock(&queue->lock); 2243 2244 __skb_queue_tail(queue, skb); 2245 2246 flags &= ~ACL_START; 2247 flags |= ACL_CONT; 2248 do { 2249 skb = list; list = list->next; 2250 2251 skb->dev = (void *) hdev; 2252 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 2253 hci_add_acl_hdr(skb, conn->handle, flags); 2254 2255 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 2256 2257 __skb_queue_tail(queue, skb); 2258 } while (list); 2259 2260 spin_unlock(&queue->lock); 2261 } 2262 } 2263 2264 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 2265 { 2266 struct hci_conn *conn = chan->conn; 2267 struct hci_dev *hdev = conn->hdev; 2268 2269 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); 2270 2271 skb->dev = (void *) hdev; 2272 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 2273 hci_add_acl_hdr(skb, conn->handle, flags); 2274 2275 hci_queue_acl(conn, &chan->data_q, skb, flags); 2276 2277 queue_work(hdev->workqueue, &hdev->tx_work); 2278 } 2279 EXPORT_SYMBOL(hci_send_acl); 2280 2281 /* Send SCO data */ 2282 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 2283 { 2284 struct hci_dev *hdev = conn->hdev; 2285 struct hci_sco_hdr hdr; 2286 2287 BT_DBG("%s len %d", hdev->name, skb->len); 2288 2289 hdr.handle = cpu_to_le16(conn->handle); 2290 hdr.dlen = skb->len; 2291 2292 skb_push(skb, HCI_SCO_HDR_SIZE); 2293 skb_reset_transport_header(skb); 2294 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 2295 2296 skb->dev = (void *) hdev; 2297 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 2298 2299 skb_queue_tail(&conn->data_q, skb); 2300 queue_work(hdev->workqueue, &hdev->tx_work); 2301 } 2302 EXPORT_SYMBOL(hci_send_sco); 2303 2304 /* ---- HCI TX task (outgoing data) ---- */ 2305 2306 /* HCI Connection scheduler */ 2307 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 2308 { 2309 struct hci_conn_hash *h = &hdev->conn_hash; 2310 struct hci_conn *conn = NULL, *c; 2311 int num = 0, min = ~0; 2312 2313 /* We don't have to lock device here. Connections are always 2314 * added and removed with TX task disabled. */ 2315 2316 rcu_read_lock(); 2317 2318 list_for_each_entry_rcu(c, &h->list, list) { 2319 if (c->type != type || skb_queue_empty(&c->data_q)) 2320 continue; 2321 2322 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 2323 continue; 2324 2325 num++; 2326 2327 if (c->sent < min) { 2328 min = c->sent; 2329 conn = c; 2330 } 2331 2332 if (hci_conn_num(hdev, type) == num) 2333 break; 2334 } 2335 2336 rcu_read_unlock(); 2337 2338 if (conn) { 2339 int cnt, q; 2340 2341 switch (conn->type) { 2342 case ACL_LINK: 2343 cnt = hdev->acl_cnt; 2344 break; 2345 case SCO_LINK: 2346 case ESCO_LINK: 2347 cnt = hdev->sco_cnt; 2348 break; 2349 case LE_LINK: 2350 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 2351 break; 2352 default: 2353 cnt = 0; 2354 BT_ERR("Unknown link type"); 2355 } 2356 2357 q = cnt / num; 2358 *quote = q ? q : 1; 2359 } else 2360 *quote = 0; 2361 2362 BT_DBG("conn %p quote %d", conn, *quote); 2363 return conn; 2364 } 2365 2366 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 2367 { 2368 struct hci_conn_hash *h = &hdev->conn_hash; 2369 struct hci_conn *c; 2370 2371 BT_ERR("%s link tx timeout", hdev->name); 2372 2373 rcu_read_lock(); 2374 2375 /* Kill stalled connections */ 2376 list_for_each_entry_rcu(c, &h->list, list) { 2377 if (c->type == type && c->sent) { 2378 BT_ERR("%s killing stalled connection %s", 2379 hdev->name, batostr(&c->dst)); 2380 hci_acl_disconn(c, 0x13); 2381 } 2382 } 2383 2384 rcu_read_unlock(); 2385 } 2386 2387 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 2388 int *quote) 2389 { 2390 struct hci_conn_hash *h = &hdev->conn_hash; 2391 struct hci_chan *chan = NULL; 2392 int num = 0, min = ~0, cur_prio = 0; 2393 struct hci_conn *conn; 2394 int cnt, q, conn_num = 0; 2395 2396 BT_DBG("%s", hdev->name); 2397 2398 rcu_read_lock(); 2399 2400 list_for_each_entry_rcu(conn, &h->list, list) { 2401 struct hci_chan *tmp; 2402 2403 if (conn->type != type) 2404 continue; 2405 2406 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 2407 continue; 2408 2409 conn_num++; 2410 2411 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 2412 struct sk_buff *skb; 2413 2414 if (skb_queue_empty(&tmp->data_q)) 2415 continue; 2416 2417 skb = skb_peek(&tmp->data_q); 2418 if (skb->priority < cur_prio) 2419 continue; 2420 2421 if (skb->priority > cur_prio) { 2422 num = 0; 2423 min = ~0; 2424 cur_prio = skb->priority; 2425 } 2426 2427 num++; 2428 2429 if (conn->sent < min) { 2430 min = conn->sent; 2431 chan = tmp; 2432 } 2433 } 2434 2435 if (hci_conn_num(hdev, type) == conn_num) 2436 break; 2437 } 2438 2439 rcu_read_unlock(); 2440 2441 if (!chan) 2442 return NULL; 2443 2444 switch (chan->conn->type) { 2445 case ACL_LINK: 2446 cnt = hdev->acl_cnt; 2447 break; 2448 case SCO_LINK: 2449 case ESCO_LINK: 2450 cnt = hdev->sco_cnt; 2451 break; 2452 case LE_LINK: 2453 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 2454 break; 2455 default: 2456 cnt = 0; 2457 BT_ERR("Unknown link type"); 2458 } 2459 2460 q = cnt / num; 2461 *quote = q ? q : 1; 2462 BT_DBG("chan %p quote %d", chan, *quote); 2463 return chan; 2464 } 2465 2466 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 2467 { 2468 struct hci_conn_hash *h = &hdev->conn_hash; 2469 struct hci_conn *conn; 2470 int num = 0; 2471 2472 BT_DBG("%s", hdev->name); 2473 2474 rcu_read_lock(); 2475 2476 list_for_each_entry_rcu(conn, &h->list, list) { 2477 struct hci_chan *chan; 2478 2479 if (conn->type != type) 2480 continue; 2481 2482 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 2483 continue; 2484 2485 num++; 2486 2487 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 2488 struct sk_buff *skb; 2489 2490 if (chan->sent) { 2491 chan->sent = 0; 2492 continue; 2493 } 2494 2495 if (skb_queue_empty(&chan->data_q)) 2496 continue; 2497 2498 skb = skb_peek(&chan->data_q); 2499 if (skb->priority >= HCI_PRIO_MAX - 1) 2500 continue; 2501 2502 skb->priority = HCI_PRIO_MAX - 1; 2503 2504 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 2505 skb->priority); 2506 } 2507 2508 if (hci_conn_num(hdev, type) == num) 2509 break; 2510 } 2511 2512 rcu_read_unlock(); 2513 2514 } 2515 2516 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) 2517 { 2518 /* Calculate count of blocks used by this packet */ 2519 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 2520 } 2521 2522 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 2523 { 2524 if (!test_bit(HCI_RAW, &hdev->flags)) { 2525 /* ACL tx timeout must be longer than maximum 2526 * link supervision timeout (40.9 seconds) */ 2527 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 2528 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) 2529 hci_link_tx_to(hdev, ACL_LINK); 2530 } 2531 } 2532 2533 static inline void hci_sched_acl_pkt(struct hci_dev *hdev) 2534 { 2535 unsigned int cnt = hdev->acl_cnt; 2536 struct hci_chan *chan; 2537 struct sk_buff *skb; 2538 int quote; 2539 2540 __check_timeout(hdev, cnt); 2541 2542 while (hdev->acl_cnt && 2543 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 2544 u32 priority = (skb_peek(&chan->data_q))->priority; 2545 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2546 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2547 skb->len, skb->priority); 2548 2549 /* Stop if priority has changed */ 2550 if (skb->priority < priority) 2551 break; 2552 2553 skb = skb_dequeue(&chan->data_q); 2554 2555 hci_conn_enter_active_mode(chan->conn, 2556 bt_cb(skb)->force_active); 2557 2558 hci_send_frame(skb); 2559 hdev->acl_last_tx = jiffies; 2560 2561 hdev->acl_cnt--; 2562 chan->sent++; 2563 chan->conn->sent++; 2564 } 2565 } 2566 2567 if (cnt != hdev->acl_cnt) 2568 hci_prio_recalculate(hdev, ACL_LINK); 2569 } 2570 2571 static inline void hci_sched_acl_blk(struct hci_dev *hdev) 2572 { 2573 unsigned int cnt = hdev->block_cnt; 2574 struct hci_chan *chan; 2575 struct sk_buff *skb; 2576 int quote; 2577 2578 __check_timeout(hdev, cnt); 2579 2580 while (hdev->block_cnt > 0 && 2581 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 2582 u32 priority = (skb_peek(&chan->data_q))->priority; 2583 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 2584 int blocks; 2585 2586 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2587 skb->len, skb->priority); 2588 2589 /* Stop if priority has changed */ 2590 if (skb->priority < priority) 2591 break; 2592 2593 skb = skb_dequeue(&chan->data_q); 2594 2595 blocks = __get_blocks(hdev, skb); 2596 if (blocks > hdev->block_cnt) 2597 return; 2598 2599 hci_conn_enter_active_mode(chan->conn, 2600 bt_cb(skb)->force_active); 2601 2602 hci_send_frame(skb); 2603 hdev->acl_last_tx = jiffies; 2604 2605 hdev->block_cnt -= blocks; 2606 quote -= blocks; 2607 2608 chan->sent += blocks; 2609 chan->conn->sent += blocks; 2610 } 2611 } 2612 2613 if (cnt != hdev->block_cnt) 2614 hci_prio_recalculate(hdev, ACL_LINK); 2615 } 2616 2617 static inline void hci_sched_acl(struct hci_dev *hdev) 2618 { 2619 BT_DBG("%s", hdev->name); 2620 2621 if (!hci_conn_num(hdev, ACL_LINK)) 2622 return; 2623 2624 switch (hdev->flow_ctl_mode) { 2625 case HCI_FLOW_CTL_MODE_PACKET_BASED: 2626 hci_sched_acl_pkt(hdev); 2627 break; 2628 2629 case HCI_FLOW_CTL_MODE_BLOCK_BASED: 2630 hci_sched_acl_blk(hdev); 2631 break; 2632 } 2633 } 2634 2635 /* Schedule SCO */ 2636 static inline void hci_sched_sco(struct hci_dev *hdev) 2637 { 2638 struct hci_conn *conn; 2639 struct sk_buff *skb; 2640 int quote; 2641 2642 BT_DBG("%s", hdev->name); 2643 2644 if (!hci_conn_num(hdev, SCO_LINK)) 2645 return; 2646 2647 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 2648 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2649 BT_DBG("skb %p len %d", skb, skb->len); 2650 hci_send_frame(skb); 2651 2652 conn->sent++; 2653 if (conn->sent == ~0) 2654 conn->sent = 0; 2655 } 2656 } 2657 } 2658 2659 static inline void hci_sched_esco(struct hci_dev *hdev) 2660 { 2661 struct hci_conn *conn; 2662 struct sk_buff *skb; 2663 int quote; 2664 2665 BT_DBG("%s", hdev->name); 2666 2667 if (!hci_conn_num(hdev, ESCO_LINK)) 2668 return; 2669 2670 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { 2671 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2672 BT_DBG("skb %p len %d", skb, skb->len); 2673 hci_send_frame(skb); 2674 2675 conn->sent++; 2676 if (conn->sent == ~0) 2677 conn->sent = 0; 2678 } 2679 } 2680 } 2681 2682 static inline void hci_sched_le(struct hci_dev *hdev) 2683 { 2684 struct hci_chan *chan; 2685 struct sk_buff *skb; 2686 int quote, cnt, tmp; 2687 2688 BT_DBG("%s", hdev->name); 2689 2690 if (!hci_conn_num(hdev, LE_LINK)) 2691 return; 2692 2693 if (!test_bit(HCI_RAW, &hdev->flags)) { 2694 /* LE tx timeout must be longer than maximum 2695 * link supervision timeout (40.9 seconds) */ 2696 if (!hdev->le_cnt && hdev->le_pkts && 2697 time_after(jiffies, hdev->le_last_tx + HZ * 45)) 2698 hci_link_tx_to(hdev, LE_LINK); 2699 } 2700 2701 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 2702 tmp = cnt; 2703 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 2704 u32 priority = (skb_peek(&chan->data_q))->priority; 2705 while (quote-- && (skb = skb_peek(&chan->data_q))) { 2706 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 2707 skb->len, skb->priority); 2708 2709 /* Stop if priority has changed */ 2710 if (skb->priority < priority) 2711 break; 2712 2713 skb = skb_dequeue(&chan->data_q); 2714 2715 hci_send_frame(skb); 2716 hdev->le_last_tx = jiffies; 2717 2718 cnt--; 2719 chan->sent++; 2720 chan->conn->sent++; 2721 } 2722 } 2723 2724 if (hdev->le_pkts) 2725 hdev->le_cnt = cnt; 2726 else 2727 hdev->acl_cnt = cnt; 2728 2729 if (cnt != tmp) 2730 hci_prio_recalculate(hdev, LE_LINK); 2731 } 2732 2733 static void hci_tx_work(struct work_struct *work) 2734 { 2735 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 2736 struct sk_buff *skb; 2737 2738 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 2739 hdev->sco_cnt, hdev->le_cnt); 2740 2741 /* Schedule queues and send stuff to HCI driver */ 2742 2743 hci_sched_acl(hdev); 2744 2745 hci_sched_sco(hdev); 2746 2747 hci_sched_esco(hdev); 2748 2749 hci_sched_le(hdev); 2750 2751 /* Send next queued raw (unknown type) packet */ 2752 while ((skb = skb_dequeue(&hdev->raw_q))) 2753 hci_send_frame(skb); 2754 } 2755 2756 /* ----- HCI RX task (incoming data processing) ----- */ 2757 2758 /* ACL data packet */ 2759 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2760 { 2761 struct hci_acl_hdr *hdr = (void *) skb->data; 2762 struct hci_conn *conn; 2763 __u16 handle, flags; 2764 2765 skb_pull(skb, HCI_ACL_HDR_SIZE); 2766 2767 handle = __le16_to_cpu(hdr->handle); 2768 flags = hci_flags(handle); 2769 handle = hci_handle(handle); 2770 2771 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 2772 2773 hdev->stat.acl_rx++; 2774 2775 hci_dev_lock(hdev); 2776 conn = hci_conn_hash_lookup_handle(hdev, handle); 2777 hci_dev_unlock(hdev); 2778 2779 if (conn) { 2780 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 2781 2782 /* Send to upper protocol */ 2783 l2cap_recv_acldata(conn, skb, flags); 2784 return; 2785 } else { 2786 BT_ERR("%s ACL packet for unknown connection handle %d", 2787 hdev->name, handle); 2788 } 2789 2790 kfree_skb(skb); 2791 } 2792 2793 /* SCO data packet */ 2794 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 2795 { 2796 struct hci_sco_hdr *hdr = (void *) skb->data; 2797 struct hci_conn *conn; 2798 __u16 handle; 2799 2800 skb_pull(skb, HCI_SCO_HDR_SIZE); 2801 2802 handle = __le16_to_cpu(hdr->handle); 2803 2804 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); 2805 2806 hdev->stat.sco_rx++; 2807 2808 hci_dev_lock(hdev); 2809 conn = hci_conn_hash_lookup_handle(hdev, handle); 2810 hci_dev_unlock(hdev); 2811 2812 if (conn) { 2813 /* Send to upper protocol */ 2814 sco_recv_scodata(conn, skb); 2815 return; 2816 } else { 2817 BT_ERR("%s SCO packet for unknown connection handle %d", 2818 hdev->name, handle); 2819 } 2820 2821 kfree_skb(skb); 2822 } 2823 2824 static void hci_rx_work(struct work_struct *work) 2825 { 2826 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 2827 struct sk_buff *skb; 2828 2829 BT_DBG("%s", hdev->name); 2830 2831 while ((skb = skb_dequeue(&hdev->rx_q))) { 2832 /* Send copy to monitor */ 2833 hci_send_to_monitor(hdev, skb); 2834 2835 if (atomic_read(&hdev->promisc)) { 2836 /* Send copy to the sockets */ 2837 hci_send_to_sock(hdev, skb); 2838 } 2839 2840 if (test_bit(HCI_RAW, &hdev->flags)) { 2841 kfree_skb(skb); 2842 continue; 2843 } 2844 2845 if (test_bit(HCI_INIT, &hdev->flags)) { 2846 /* Don't process data packets in this states. */ 2847 switch (bt_cb(skb)->pkt_type) { 2848 case HCI_ACLDATA_PKT: 2849 case HCI_SCODATA_PKT: 2850 kfree_skb(skb); 2851 continue; 2852 } 2853 } 2854 2855 /* Process frame */ 2856 switch (bt_cb(skb)->pkt_type) { 2857 case HCI_EVENT_PKT: 2858 BT_DBG("%s Event packet", hdev->name); 2859 hci_event_packet(hdev, skb); 2860 break; 2861 2862 case HCI_ACLDATA_PKT: 2863 BT_DBG("%s ACL data packet", hdev->name); 2864 hci_acldata_packet(hdev, skb); 2865 break; 2866 2867 case HCI_SCODATA_PKT: 2868 BT_DBG("%s SCO data packet", hdev->name); 2869 hci_scodata_packet(hdev, skb); 2870 break; 2871 2872 default: 2873 kfree_skb(skb); 2874 break; 2875 } 2876 } 2877 } 2878 2879 static void hci_cmd_work(struct work_struct *work) 2880 { 2881 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 2882 struct sk_buff *skb; 2883 2884 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 2885 2886 /* Send queued commands */ 2887 if (atomic_read(&hdev->cmd_cnt)) { 2888 skb = skb_dequeue(&hdev->cmd_q); 2889 if (!skb) 2890 return; 2891 2892 kfree_skb(hdev->sent_cmd); 2893 2894 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); 2895 if (hdev->sent_cmd) { 2896 atomic_dec(&hdev->cmd_cnt); 2897 hci_send_frame(skb); 2898 if (test_bit(HCI_RESET, &hdev->flags)) 2899 del_timer(&hdev->cmd_timer); 2900 else 2901 mod_timer(&hdev->cmd_timer, 2902 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); 2903 } else { 2904 skb_queue_head(&hdev->cmd_q, skb); 2905 queue_work(hdev->workqueue, &hdev->cmd_work); 2906 } 2907 } 2908 } 2909 2910 int hci_do_inquiry(struct hci_dev *hdev, u8 length) 2911 { 2912 /* General inquiry access code (GIAC) */ 2913 u8 lap[3] = { 0x33, 0x8b, 0x9e }; 2914 struct hci_cp_inquiry cp; 2915 2916 BT_DBG("%s", hdev->name); 2917 2918 if (test_bit(HCI_INQUIRY, &hdev->flags)) 2919 return -EINPROGRESS; 2920 2921 inquiry_cache_flush(hdev); 2922 2923 memset(&cp, 0, sizeof(cp)); 2924 memcpy(&cp.lap, lap, sizeof(cp.lap)); 2925 cp.length = length; 2926 2927 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); 2928 } 2929 2930 int hci_cancel_inquiry(struct hci_dev *hdev) 2931 { 2932 BT_DBG("%s", hdev->name); 2933 2934 if (!test_bit(HCI_INQUIRY, &hdev->flags)) 2935 return -EPERM; 2936 2937 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); 2938 } 2939