1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI core. */ 26 27 #include <linux/module.h> 28 #include <linux/kmod.h> 29 30 #include <linux/types.h> 31 #include <linux/errno.h> 32 #include <linux/kernel.h> 33 #include <linux/sched.h> 34 #include <linux/slab.h> 35 #include <linux/poll.h> 36 #include <linux/fcntl.h> 37 #include <linux/init.h> 38 #include <linux/skbuff.h> 39 #include <linux/interrupt.h> 40 #include <linux/notifier.h> 41 #include <net/sock.h> 42 43 #include <asm/system.h> 44 #include <asm/uaccess.h> 45 #include <asm/unaligned.h> 46 47 #include <net/bluetooth/bluetooth.h> 48 #include <net/bluetooth/hci_core.h> 49 50 #ifndef CONFIG_BT_HCI_CORE_DEBUG 51 #undef BT_DBG 52 #define BT_DBG(D...) 53 #endif 54 55 static void hci_cmd_task(unsigned long arg); 56 static void hci_rx_task(unsigned long arg); 57 static void hci_tx_task(unsigned long arg); 58 static void hci_notify(struct hci_dev *hdev, int event); 59 60 static DEFINE_RWLOCK(hci_task_lock); 61 62 /* HCI device list */ 63 LIST_HEAD(hci_dev_list); 64 DEFINE_RWLOCK(hci_dev_list_lock); 65 66 /* HCI callback list */ 67 LIST_HEAD(hci_cb_list); 68 DEFINE_RWLOCK(hci_cb_list_lock); 69 70 /* HCI protocols */ 71 #define HCI_MAX_PROTO 2 72 struct hci_proto *hci_proto[HCI_MAX_PROTO]; 73 74 /* HCI notifiers list */ 75 static ATOMIC_NOTIFIER_HEAD(hci_notifier); 76 77 /* ---- HCI notifications ---- */ 78 79 int hci_register_notifier(struct notifier_block *nb) 80 { 81 return atomic_notifier_chain_register(&hci_notifier, nb); 82 } 83 84 int hci_unregister_notifier(struct notifier_block *nb) 85 { 86 return atomic_notifier_chain_unregister(&hci_notifier, nb); 87 } 88 89 static void hci_notify(struct hci_dev *hdev, int event) 90 { 91 atomic_notifier_call_chain(&hci_notifier, event, hdev); 92 } 93 94 /* ---- HCI requests ---- */ 95 96 void hci_req_complete(struct hci_dev *hdev, int result) 97 { 98 BT_DBG("%s result 0x%2.2x", hdev->name, result); 99 100 if (hdev->req_status == HCI_REQ_PEND) { 101 hdev->req_result = result; 102 hdev->req_status = HCI_REQ_DONE; 103 wake_up_interruptible(&hdev->req_wait_q); 104 } 105 } 106 107 static void hci_req_cancel(struct hci_dev *hdev, int err) 108 { 109 BT_DBG("%s err 0x%2.2x", hdev->name, err); 110 111 if (hdev->req_status == HCI_REQ_PEND) { 112 hdev->req_result = err; 113 hdev->req_status = HCI_REQ_CANCELED; 114 wake_up_interruptible(&hdev->req_wait_q); 115 } 116 } 117 118 /* Execute request and wait for completion. */ 119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 120 unsigned long opt, __u32 timeout) 121 { 122 DECLARE_WAITQUEUE(wait, current); 123 int err = 0; 124 125 BT_DBG("%s start", hdev->name); 126 127 hdev->req_status = HCI_REQ_PEND; 128 129 add_wait_queue(&hdev->req_wait_q, &wait); 130 set_current_state(TASK_INTERRUPTIBLE); 131 132 req(hdev, opt); 133 schedule_timeout(timeout); 134 135 remove_wait_queue(&hdev->req_wait_q, &wait); 136 137 if (signal_pending(current)) 138 return -EINTR; 139 140 switch (hdev->req_status) { 141 case HCI_REQ_DONE: 142 err = -bt_err(hdev->req_result); 143 break; 144 145 case HCI_REQ_CANCELED: 146 err = -hdev->req_result; 147 break; 148 149 default: 150 err = -ETIMEDOUT; 151 break; 152 } 153 154 hdev->req_status = hdev->req_result = 0; 155 156 BT_DBG("%s end: err %d", hdev->name, err); 157 158 return err; 159 } 160 161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 162 unsigned long opt, __u32 timeout) 163 { 164 int ret; 165 166 /* Serialize all requests */ 167 hci_req_lock(hdev); 168 ret = __hci_request(hdev, req, opt, timeout); 169 hci_req_unlock(hdev); 170 171 return ret; 172 } 173 174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) 175 { 176 BT_DBG("%s %ld", hdev->name, opt); 177 178 /* Reset device */ 179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 180 } 181 182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 183 { 184 struct sk_buff *skb; 185 __le16 param; 186 187 BT_DBG("%s %ld", hdev->name, opt); 188 189 /* Driver initialization */ 190 191 /* Special commands */ 192 while ((skb = skb_dequeue(&hdev->driver_init))) { 193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 194 skb->dev = (void *) hdev; 195 skb_queue_tail(&hdev->cmd_q, skb); 196 hci_sched_cmd(hdev); 197 } 198 skb_queue_purge(&hdev->driver_init); 199 200 /* Mandatory initialization */ 201 202 /* Reset */ 203 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) 204 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 205 206 /* Read Local Supported Features */ 207 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); 208 209 /* Read Local Version */ 210 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL); 211 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 213 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); 214 215 #if 0 216 /* Host buffer size */ 217 { 218 struct hci_cp_host_buffer_size cp; 219 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE); 220 cp.sco_mtu = HCI_MAX_SCO_SIZE; 221 cp.acl_max_pkt = cpu_to_le16(0xffff); 222 cp.sco_max_pkt = cpu_to_le16(0xffff); 223 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp); 224 } 225 #endif 226 227 /* Read BD Address */ 228 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL); 229 230 /* Read Voice Setting */ 231 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL); 232 233 /* Optional initialization */ 234 235 /* Clear Event Filters */ 236 { 237 struct hci_cp_set_event_flt cp; 238 cp.flt_type = HCI_FLT_CLEAR_ALL; 239 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp); 240 } 241 242 /* Page timeout ~20 secs */ 243 param = cpu_to_le16(0x8000); 244 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m); 245 246 /* Connection accept timeout ~20 secs */ 247 param = cpu_to_le16(0x7d00); 248 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m); 249 } 250 251 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 252 { 253 __u8 scan = opt; 254 255 BT_DBG("%s %x", hdev->name, scan); 256 257 /* Inquiry and Page scans */ 258 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan); 259 } 260 261 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 262 { 263 __u8 auth = opt; 264 265 BT_DBG("%s %x", hdev->name, auth); 266 267 /* Authentication */ 268 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth); 269 } 270 271 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 272 { 273 __u8 encrypt = opt; 274 275 BT_DBG("%s %x", hdev->name, encrypt); 276 277 /* Authentication */ 278 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); 279 } 280 281 /* Get HCI device by index. 282 * Device is held on return. */ 283 struct hci_dev *hci_dev_get(int index) 284 { 285 struct hci_dev *hdev = NULL; 286 struct list_head *p; 287 288 BT_DBG("%d", index); 289 290 if (index < 0) 291 return NULL; 292 293 read_lock(&hci_dev_list_lock); 294 list_for_each(p, &hci_dev_list) { 295 struct hci_dev *d = list_entry(p, struct hci_dev, list); 296 if (d->id == index) { 297 hdev = hci_dev_hold(d); 298 break; 299 } 300 } 301 read_unlock(&hci_dev_list_lock); 302 return hdev; 303 } 304 305 /* ---- Inquiry support ---- */ 306 static void inquiry_cache_flush(struct hci_dev *hdev) 307 { 308 struct inquiry_cache *cache = &hdev->inq_cache; 309 struct inquiry_entry *next = cache->list, *e; 310 311 BT_DBG("cache %p", cache); 312 313 cache->list = NULL; 314 while ((e = next)) { 315 next = e->next; 316 kfree(e); 317 } 318 } 319 320 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 321 { 322 struct inquiry_cache *cache = &hdev->inq_cache; 323 struct inquiry_entry *e; 324 325 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 326 327 for (e = cache->list; e; e = e->next) 328 if (!bacmp(&e->data.bdaddr, bdaddr)) 329 break; 330 return e; 331 } 332 333 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) 334 { 335 struct inquiry_cache *cache = &hdev->inq_cache; 336 struct inquiry_entry *e; 337 338 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 339 340 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { 341 /* Entry not in the cache. Add new one. */ 342 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) 343 return; 344 e->next = cache->list; 345 cache->list = e; 346 } 347 348 memcpy(&e->data, data, sizeof(*data)); 349 e->timestamp = jiffies; 350 cache->timestamp = jiffies; 351 } 352 353 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 354 { 355 struct inquiry_cache *cache = &hdev->inq_cache; 356 struct inquiry_info *info = (struct inquiry_info *) buf; 357 struct inquiry_entry *e; 358 int copied = 0; 359 360 for (e = cache->list; e && copied < num; e = e->next, copied++) { 361 struct inquiry_data *data = &e->data; 362 bacpy(&info->bdaddr, &data->bdaddr); 363 info->pscan_rep_mode = data->pscan_rep_mode; 364 info->pscan_period_mode = data->pscan_period_mode; 365 info->pscan_mode = data->pscan_mode; 366 memcpy(info->dev_class, data->dev_class, 3); 367 info->clock_offset = data->clock_offset; 368 info++; 369 } 370 371 BT_DBG("cache %p, copied %d", cache, copied); 372 return copied; 373 } 374 375 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) 376 { 377 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 378 struct hci_cp_inquiry cp; 379 380 BT_DBG("%s", hdev->name); 381 382 if (test_bit(HCI_INQUIRY, &hdev->flags)) 383 return; 384 385 /* Start Inquiry */ 386 memcpy(&cp.lap, &ir->lap, 3); 387 cp.length = ir->length; 388 cp.num_rsp = ir->num_rsp; 389 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp); 390 } 391 392 int hci_inquiry(void __user *arg) 393 { 394 __u8 __user *ptr = arg; 395 struct hci_inquiry_req ir; 396 struct hci_dev *hdev; 397 int err = 0, do_inquiry = 0, max_rsp; 398 long timeo; 399 __u8 *buf; 400 401 if (copy_from_user(&ir, ptr, sizeof(ir))) 402 return -EFAULT; 403 404 if (!(hdev = hci_dev_get(ir.dev_id))) 405 return -ENODEV; 406 407 hci_dev_lock_bh(hdev); 408 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 409 inquiry_cache_empty(hdev) || 410 ir.flags & IREQ_CACHE_FLUSH) { 411 inquiry_cache_flush(hdev); 412 do_inquiry = 1; 413 } 414 hci_dev_unlock_bh(hdev); 415 416 timeo = ir.length * msecs_to_jiffies(2000); 417 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) 418 goto done; 419 420 /* for unlimited number of responses we will use buffer with 255 entries */ 421 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 422 423 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 424 * copy it to the user space. 425 */ 426 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) { 427 err = -ENOMEM; 428 goto done; 429 } 430 431 hci_dev_lock_bh(hdev); 432 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 433 hci_dev_unlock_bh(hdev); 434 435 BT_DBG("num_rsp %d", ir.num_rsp); 436 437 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 438 ptr += sizeof(ir); 439 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 440 ir.num_rsp)) 441 err = -EFAULT; 442 } else 443 err = -EFAULT; 444 445 kfree(buf); 446 447 done: 448 hci_dev_put(hdev); 449 return err; 450 } 451 452 /* ---- HCI ioctl helpers ---- */ 453 454 int hci_dev_open(__u16 dev) 455 { 456 struct hci_dev *hdev; 457 int ret = 0; 458 459 if (!(hdev = hci_dev_get(dev))) 460 return -ENODEV; 461 462 BT_DBG("%s %p", hdev->name, hdev); 463 464 hci_req_lock(hdev); 465 466 if (test_bit(HCI_UP, &hdev->flags)) { 467 ret = -EALREADY; 468 goto done; 469 } 470 471 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 472 set_bit(HCI_RAW, &hdev->flags); 473 474 if (hdev->open(hdev)) { 475 ret = -EIO; 476 goto done; 477 } 478 479 if (!test_bit(HCI_RAW, &hdev->flags)) { 480 atomic_set(&hdev->cmd_cnt, 1); 481 set_bit(HCI_INIT, &hdev->flags); 482 483 //__hci_request(hdev, hci_reset_req, 0, HZ); 484 ret = __hci_request(hdev, hci_init_req, 0, 485 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 486 487 clear_bit(HCI_INIT, &hdev->flags); 488 } 489 490 if (!ret) { 491 hci_dev_hold(hdev); 492 set_bit(HCI_UP, &hdev->flags); 493 hci_notify(hdev, HCI_DEV_UP); 494 } else { 495 /* Init failed, cleanup */ 496 tasklet_kill(&hdev->rx_task); 497 tasklet_kill(&hdev->tx_task); 498 tasklet_kill(&hdev->cmd_task); 499 500 skb_queue_purge(&hdev->cmd_q); 501 skb_queue_purge(&hdev->rx_q); 502 503 if (hdev->flush) 504 hdev->flush(hdev); 505 506 if (hdev->sent_cmd) { 507 kfree_skb(hdev->sent_cmd); 508 hdev->sent_cmd = NULL; 509 } 510 511 hdev->close(hdev); 512 hdev->flags = 0; 513 } 514 515 done: 516 hci_req_unlock(hdev); 517 hci_dev_put(hdev); 518 return ret; 519 } 520 521 static int hci_dev_do_close(struct hci_dev *hdev) 522 { 523 BT_DBG("%s %p", hdev->name, hdev); 524 525 hci_req_cancel(hdev, ENODEV); 526 hci_req_lock(hdev); 527 528 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 529 hci_req_unlock(hdev); 530 return 0; 531 } 532 533 /* Kill RX and TX tasks */ 534 tasklet_kill(&hdev->rx_task); 535 tasklet_kill(&hdev->tx_task); 536 537 hci_dev_lock_bh(hdev); 538 inquiry_cache_flush(hdev); 539 hci_conn_hash_flush(hdev); 540 hci_dev_unlock_bh(hdev); 541 542 hci_notify(hdev, HCI_DEV_DOWN); 543 544 if (hdev->flush) 545 hdev->flush(hdev); 546 547 /* Reset device */ 548 skb_queue_purge(&hdev->cmd_q); 549 atomic_set(&hdev->cmd_cnt, 1); 550 if (!test_bit(HCI_RAW, &hdev->flags)) { 551 set_bit(HCI_INIT, &hdev->flags); 552 __hci_request(hdev, hci_reset_req, 0, 553 msecs_to_jiffies(250)); 554 clear_bit(HCI_INIT, &hdev->flags); 555 } 556 557 /* Kill cmd task */ 558 tasklet_kill(&hdev->cmd_task); 559 560 /* Drop queues */ 561 skb_queue_purge(&hdev->rx_q); 562 skb_queue_purge(&hdev->cmd_q); 563 skb_queue_purge(&hdev->raw_q); 564 565 /* Drop last sent command */ 566 if (hdev->sent_cmd) { 567 kfree_skb(hdev->sent_cmd); 568 hdev->sent_cmd = NULL; 569 } 570 571 /* After this point our queues are empty 572 * and no tasks are scheduled. */ 573 hdev->close(hdev); 574 575 /* Clear flags */ 576 hdev->flags = 0; 577 578 hci_req_unlock(hdev); 579 580 hci_dev_put(hdev); 581 return 0; 582 } 583 584 int hci_dev_close(__u16 dev) 585 { 586 struct hci_dev *hdev; 587 int err; 588 589 if (!(hdev = hci_dev_get(dev))) 590 return -ENODEV; 591 err = hci_dev_do_close(hdev); 592 hci_dev_put(hdev); 593 return err; 594 } 595 596 int hci_dev_reset(__u16 dev) 597 { 598 struct hci_dev *hdev; 599 int ret = 0; 600 601 if (!(hdev = hci_dev_get(dev))) 602 return -ENODEV; 603 604 hci_req_lock(hdev); 605 tasklet_disable(&hdev->tx_task); 606 607 if (!test_bit(HCI_UP, &hdev->flags)) 608 goto done; 609 610 /* Drop queues */ 611 skb_queue_purge(&hdev->rx_q); 612 skb_queue_purge(&hdev->cmd_q); 613 614 hci_dev_lock_bh(hdev); 615 inquiry_cache_flush(hdev); 616 hci_conn_hash_flush(hdev); 617 hci_dev_unlock_bh(hdev); 618 619 if (hdev->flush) 620 hdev->flush(hdev); 621 622 atomic_set(&hdev->cmd_cnt, 1); 623 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 624 625 if (!test_bit(HCI_RAW, &hdev->flags)) 626 ret = __hci_request(hdev, hci_reset_req, 0, 627 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 628 629 done: 630 tasklet_enable(&hdev->tx_task); 631 hci_req_unlock(hdev); 632 hci_dev_put(hdev); 633 return ret; 634 } 635 636 int hci_dev_reset_stat(__u16 dev) 637 { 638 struct hci_dev *hdev; 639 int ret = 0; 640 641 if (!(hdev = hci_dev_get(dev))) 642 return -ENODEV; 643 644 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 645 646 hci_dev_put(hdev); 647 648 return ret; 649 } 650 651 int hci_dev_cmd(unsigned int cmd, void __user *arg) 652 { 653 struct hci_dev *hdev; 654 struct hci_dev_req dr; 655 int err = 0; 656 657 if (copy_from_user(&dr, arg, sizeof(dr))) 658 return -EFAULT; 659 660 if (!(hdev = hci_dev_get(dr.dev_id))) 661 return -ENODEV; 662 663 switch (cmd) { 664 case HCISETAUTH: 665 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 666 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 667 break; 668 669 case HCISETENCRYPT: 670 if (!lmp_encrypt_capable(hdev)) { 671 err = -EOPNOTSUPP; 672 break; 673 } 674 675 if (!test_bit(HCI_AUTH, &hdev->flags)) { 676 /* Auth must be enabled first */ 677 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 678 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 679 if (err) 680 break; 681 } 682 683 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 684 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 685 break; 686 687 case HCISETSCAN: 688 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 689 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 690 break; 691 692 case HCISETPTYPE: 693 hdev->pkt_type = (__u16) dr.dev_opt; 694 break; 695 696 case HCISETLINKPOL: 697 hdev->link_policy = (__u16) dr.dev_opt; 698 break; 699 700 case HCISETLINKMODE: 701 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); 702 break; 703 704 case HCISETACLMTU: 705 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1); 706 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0); 707 break; 708 709 case HCISETSCOMTU: 710 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1); 711 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0); 712 break; 713 714 default: 715 err = -EINVAL; 716 break; 717 } 718 hci_dev_put(hdev); 719 return err; 720 } 721 722 int hci_get_dev_list(void __user *arg) 723 { 724 struct hci_dev_list_req *dl; 725 struct hci_dev_req *dr; 726 struct list_head *p; 727 int n = 0, size, err; 728 __u16 dev_num; 729 730 if (get_user(dev_num, (__u16 __user *) arg)) 731 return -EFAULT; 732 733 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 734 return -EINVAL; 735 736 size = sizeof(*dl) + dev_num * sizeof(*dr); 737 738 if (!(dl = kmalloc(size, GFP_KERNEL))) 739 return -ENOMEM; 740 741 dr = dl->dev_req; 742 743 read_lock_bh(&hci_dev_list_lock); 744 list_for_each(p, &hci_dev_list) { 745 struct hci_dev *hdev; 746 hdev = list_entry(p, struct hci_dev, list); 747 (dr + n)->dev_id = hdev->id; 748 (dr + n)->dev_opt = hdev->flags; 749 if (++n >= dev_num) 750 break; 751 } 752 read_unlock_bh(&hci_dev_list_lock); 753 754 dl->dev_num = n; 755 size = sizeof(*dl) + n * sizeof(*dr); 756 757 err = copy_to_user(arg, dl, size); 758 kfree(dl); 759 760 return err ? -EFAULT : 0; 761 } 762 763 int hci_get_dev_info(void __user *arg) 764 { 765 struct hci_dev *hdev; 766 struct hci_dev_info di; 767 int err = 0; 768 769 if (copy_from_user(&di, arg, sizeof(di))) 770 return -EFAULT; 771 772 if (!(hdev = hci_dev_get(di.dev_id))) 773 return -ENODEV; 774 775 strcpy(di.name, hdev->name); 776 di.bdaddr = hdev->bdaddr; 777 di.type = hdev->type; 778 di.flags = hdev->flags; 779 di.pkt_type = hdev->pkt_type; 780 di.acl_mtu = hdev->acl_mtu; 781 di.acl_pkts = hdev->acl_pkts; 782 di.sco_mtu = hdev->sco_mtu; 783 di.sco_pkts = hdev->sco_pkts; 784 di.link_policy = hdev->link_policy; 785 di.link_mode = hdev->link_mode; 786 787 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 788 memcpy(&di.features, &hdev->features, sizeof(di.features)); 789 790 if (copy_to_user(arg, &di, sizeof(di))) 791 err = -EFAULT; 792 793 hci_dev_put(hdev); 794 795 return err; 796 } 797 798 /* ---- Interface to HCI drivers ---- */ 799 800 /* Alloc HCI device */ 801 struct hci_dev *hci_alloc_dev(void) 802 { 803 struct hci_dev *hdev; 804 805 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); 806 if (!hdev) 807 return NULL; 808 809 skb_queue_head_init(&hdev->driver_init); 810 811 return hdev; 812 } 813 EXPORT_SYMBOL(hci_alloc_dev); 814 815 /* Free HCI device */ 816 void hci_free_dev(struct hci_dev *hdev) 817 { 818 skb_queue_purge(&hdev->driver_init); 819 820 /* will free via device release */ 821 put_device(&hdev->dev); 822 } 823 EXPORT_SYMBOL(hci_free_dev); 824 825 /* Register HCI device */ 826 int hci_register_dev(struct hci_dev *hdev) 827 { 828 struct list_head *head = &hci_dev_list, *p; 829 int id = 0; 830 831 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); 832 833 if (!hdev->open || !hdev->close || !hdev->destruct) 834 return -EINVAL; 835 836 write_lock_bh(&hci_dev_list_lock); 837 838 /* Find first available device id */ 839 list_for_each(p, &hci_dev_list) { 840 if (list_entry(p, struct hci_dev, list)->id != id) 841 break; 842 head = p; id++; 843 } 844 845 sprintf(hdev->name, "hci%d", id); 846 hdev->id = id; 847 list_add(&hdev->list, head); 848 849 atomic_set(&hdev->refcnt, 1); 850 spin_lock_init(&hdev->lock); 851 852 hdev->flags = 0; 853 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 854 hdev->link_mode = (HCI_LM_ACCEPT); 855 856 hdev->idle_timeout = 0; 857 hdev->sniff_max_interval = 800; 858 hdev->sniff_min_interval = 80; 859 860 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); 861 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); 862 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); 863 864 skb_queue_head_init(&hdev->rx_q); 865 skb_queue_head_init(&hdev->cmd_q); 866 skb_queue_head_init(&hdev->raw_q); 867 868 init_waitqueue_head(&hdev->req_wait_q); 869 init_MUTEX(&hdev->req_lock); 870 871 inquiry_cache_init(hdev); 872 873 hci_conn_hash_init(hdev); 874 875 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 876 877 atomic_set(&hdev->promisc, 0); 878 879 write_unlock_bh(&hci_dev_list_lock); 880 881 hci_register_sysfs(hdev); 882 883 hci_notify(hdev, HCI_DEV_REG); 884 885 return id; 886 } 887 EXPORT_SYMBOL(hci_register_dev); 888 889 /* Unregister HCI device */ 890 int hci_unregister_dev(struct hci_dev *hdev) 891 { 892 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 893 894 hci_unregister_sysfs(hdev); 895 896 write_lock_bh(&hci_dev_list_lock); 897 list_del(&hdev->list); 898 write_unlock_bh(&hci_dev_list_lock); 899 900 hci_dev_do_close(hdev); 901 902 hci_notify(hdev, HCI_DEV_UNREG); 903 904 __hci_dev_put(hdev); 905 return 0; 906 } 907 EXPORT_SYMBOL(hci_unregister_dev); 908 909 /* Suspend HCI device */ 910 int hci_suspend_dev(struct hci_dev *hdev) 911 { 912 hci_notify(hdev, HCI_DEV_SUSPEND); 913 return 0; 914 } 915 EXPORT_SYMBOL(hci_suspend_dev); 916 917 /* Resume HCI device */ 918 int hci_resume_dev(struct hci_dev *hdev) 919 { 920 hci_notify(hdev, HCI_DEV_RESUME); 921 return 0; 922 } 923 EXPORT_SYMBOL(hci_resume_dev); 924 925 /* ---- Interface to upper protocols ---- */ 926 927 /* Register/Unregister protocols. 928 * hci_task_lock is used to ensure that no tasks are running. */ 929 int hci_register_proto(struct hci_proto *hp) 930 { 931 int err = 0; 932 933 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 934 935 if (hp->id >= HCI_MAX_PROTO) 936 return -EINVAL; 937 938 write_lock_bh(&hci_task_lock); 939 940 if (!hci_proto[hp->id]) 941 hci_proto[hp->id] = hp; 942 else 943 err = -EEXIST; 944 945 write_unlock_bh(&hci_task_lock); 946 947 return err; 948 } 949 EXPORT_SYMBOL(hci_register_proto); 950 951 int hci_unregister_proto(struct hci_proto *hp) 952 { 953 int err = 0; 954 955 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 956 957 if (hp->id >= HCI_MAX_PROTO) 958 return -EINVAL; 959 960 write_lock_bh(&hci_task_lock); 961 962 if (hci_proto[hp->id]) 963 hci_proto[hp->id] = NULL; 964 else 965 err = -ENOENT; 966 967 write_unlock_bh(&hci_task_lock); 968 969 return err; 970 } 971 EXPORT_SYMBOL(hci_unregister_proto); 972 973 int hci_register_cb(struct hci_cb *cb) 974 { 975 BT_DBG("%p name %s", cb, cb->name); 976 977 write_lock_bh(&hci_cb_list_lock); 978 list_add(&cb->list, &hci_cb_list); 979 write_unlock_bh(&hci_cb_list_lock); 980 981 return 0; 982 } 983 EXPORT_SYMBOL(hci_register_cb); 984 985 int hci_unregister_cb(struct hci_cb *cb) 986 { 987 BT_DBG("%p name %s", cb, cb->name); 988 989 write_lock_bh(&hci_cb_list_lock); 990 list_del(&cb->list); 991 write_unlock_bh(&hci_cb_list_lock); 992 993 return 0; 994 } 995 EXPORT_SYMBOL(hci_unregister_cb); 996 997 static int hci_send_frame(struct sk_buff *skb) 998 { 999 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1000 1001 if (!hdev) { 1002 kfree_skb(skb); 1003 return -ENODEV; 1004 } 1005 1006 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 1007 1008 if (atomic_read(&hdev->promisc)) { 1009 /* Time stamp */ 1010 __net_timestamp(skb); 1011 1012 hci_send_to_sock(hdev, skb); 1013 } 1014 1015 /* Get rid of skb owner, prior to sending to the driver. */ 1016 skb_orphan(skb); 1017 1018 return hdev->send(skb); 1019 } 1020 1021 /* Send HCI command */ 1022 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param) 1023 { 1024 int len = HCI_COMMAND_HDR_SIZE + plen; 1025 struct hci_command_hdr *hdr; 1026 struct sk_buff *skb; 1027 1028 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); 1029 1030 skb = bt_skb_alloc(len, GFP_ATOMIC); 1031 if (!skb) { 1032 BT_ERR("%s Can't allocate memory for HCI command", hdev->name); 1033 return -ENOMEM; 1034 } 1035 1036 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 1037 hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf)); 1038 hdr->plen = plen; 1039 1040 if (plen) 1041 memcpy(skb_put(skb, plen), param, plen); 1042 1043 BT_DBG("skb len %d", skb->len); 1044 1045 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1046 skb->dev = (void *) hdev; 1047 skb_queue_tail(&hdev->cmd_q, skb); 1048 hci_sched_cmd(hdev); 1049 1050 return 0; 1051 } 1052 1053 /* Get data from the previously sent command */ 1054 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) 1055 { 1056 struct hci_command_hdr *hdr; 1057 1058 if (!hdev->sent_cmd) 1059 return NULL; 1060 1061 hdr = (void *) hdev->sent_cmd->data; 1062 1063 if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf))) 1064 return NULL; 1065 1066 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); 1067 1068 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 1069 } 1070 1071 /* Send ACL data */ 1072 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 1073 { 1074 struct hci_acl_hdr *hdr; 1075 int len = skb->len; 1076 1077 skb_push(skb, HCI_ACL_HDR_SIZE); 1078 skb_reset_transport_header(skb); 1079 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 1080 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 1081 hdr->dlen = cpu_to_le16(len); 1082 } 1083 1084 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) 1085 { 1086 struct hci_dev *hdev = conn->hdev; 1087 struct sk_buff *list; 1088 1089 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); 1090 1091 skb->dev = (void *) hdev; 1092 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1093 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1094 1095 if (!(list = skb_shinfo(skb)->frag_list)) { 1096 /* Non fragmented */ 1097 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 1098 1099 skb_queue_tail(&conn->data_q, skb); 1100 } else { 1101 /* Fragmented */ 1102 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1103 1104 skb_shinfo(skb)->frag_list = NULL; 1105 1106 /* Queue all fragments atomically */ 1107 spin_lock_bh(&conn->data_q.lock); 1108 1109 __skb_queue_tail(&conn->data_q, skb); 1110 do { 1111 skb = list; list = list->next; 1112 1113 skb->dev = (void *) hdev; 1114 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1115 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1116 1117 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1118 1119 __skb_queue_tail(&conn->data_q, skb); 1120 } while (list); 1121 1122 spin_unlock_bh(&conn->data_q.lock); 1123 } 1124 1125 hci_sched_tx(hdev); 1126 return 0; 1127 } 1128 EXPORT_SYMBOL(hci_send_acl); 1129 1130 /* Send SCO data */ 1131 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 1132 { 1133 struct hci_dev *hdev = conn->hdev; 1134 struct hci_sco_hdr hdr; 1135 1136 BT_DBG("%s len %d", hdev->name, skb->len); 1137 1138 if (skb->len > hdev->sco_mtu) { 1139 kfree_skb(skb); 1140 return -EINVAL; 1141 } 1142 1143 hdr.handle = cpu_to_le16(conn->handle); 1144 hdr.dlen = skb->len; 1145 1146 skb_push(skb, HCI_SCO_HDR_SIZE); 1147 skb_reset_transport_header(skb); 1148 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 1149 1150 skb->dev = (void *) hdev; 1151 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 1152 skb_queue_tail(&conn->data_q, skb); 1153 hci_sched_tx(hdev); 1154 return 0; 1155 } 1156 EXPORT_SYMBOL(hci_send_sco); 1157 1158 /* ---- HCI TX task (outgoing data) ---- */ 1159 1160 /* HCI Connection scheduler */ 1161 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 1162 { 1163 struct hci_conn_hash *h = &hdev->conn_hash; 1164 struct hci_conn *conn = NULL; 1165 int num = 0, min = ~0; 1166 struct list_head *p; 1167 1168 /* We don't have to lock device here. Connections are always 1169 * added and removed with TX task disabled. */ 1170 list_for_each(p, &h->list) { 1171 struct hci_conn *c; 1172 c = list_entry(p, struct hci_conn, list); 1173 1174 if (c->type != type || c->state != BT_CONNECTED 1175 || skb_queue_empty(&c->data_q)) 1176 continue; 1177 num++; 1178 1179 if (c->sent < min) { 1180 min = c->sent; 1181 conn = c; 1182 } 1183 } 1184 1185 if (conn) { 1186 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1187 int q = cnt / num; 1188 *quote = q ? q : 1; 1189 } else 1190 *quote = 0; 1191 1192 BT_DBG("conn %p quote %d", conn, *quote); 1193 return conn; 1194 } 1195 1196 static inline void hci_acl_tx_to(struct hci_dev *hdev) 1197 { 1198 struct hci_conn_hash *h = &hdev->conn_hash; 1199 struct list_head *p; 1200 struct hci_conn *c; 1201 1202 BT_ERR("%s ACL tx timeout", hdev->name); 1203 1204 /* Kill stalled connections */ 1205 list_for_each(p, &h->list) { 1206 c = list_entry(p, struct hci_conn, list); 1207 if (c->type == ACL_LINK && c->sent) { 1208 BT_ERR("%s killing stalled ACL connection %s", 1209 hdev->name, batostr(&c->dst)); 1210 hci_acl_disconn(c, 0x13); 1211 } 1212 } 1213 } 1214 1215 static inline void hci_sched_acl(struct hci_dev *hdev) 1216 { 1217 struct hci_conn *conn; 1218 struct sk_buff *skb; 1219 int quote; 1220 1221 BT_DBG("%s", hdev->name); 1222 1223 if (!test_bit(HCI_RAW, &hdev->flags)) { 1224 /* ACL tx timeout must be longer than maximum 1225 * link supervision timeout (40.9 seconds) */ 1226 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45)) 1227 hci_acl_tx_to(hdev); 1228 } 1229 1230 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { 1231 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1232 BT_DBG("skb %p len %d", skb, skb->len); 1233 1234 hci_conn_enter_active_mode(conn); 1235 1236 hci_send_frame(skb); 1237 hdev->acl_last_tx = jiffies; 1238 1239 hdev->acl_cnt--; 1240 conn->sent++; 1241 } 1242 } 1243 } 1244 1245 /* Schedule SCO */ 1246 static inline void hci_sched_sco(struct hci_dev *hdev) 1247 { 1248 struct hci_conn *conn; 1249 struct sk_buff *skb; 1250 int quote; 1251 1252 BT_DBG("%s", hdev->name); 1253 1254 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 1255 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1256 BT_DBG("skb %p len %d", skb, skb->len); 1257 hci_send_frame(skb); 1258 1259 conn->sent++; 1260 if (conn->sent == ~0) 1261 conn->sent = 0; 1262 } 1263 } 1264 } 1265 1266 static void hci_tx_task(unsigned long arg) 1267 { 1268 struct hci_dev *hdev = (struct hci_dev *) arg; 1269 struct sk_buff *skb; 1270 1271 read_lock(&hci_task_lock); 1272 1273 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1274 1275 /* Schedule queues and send stuff to HCI driver */ 1276 1277 hci_sched_acl(hdev); 1278 1279 hci_sched_sco(hdev); 1280 1281 /* Send next queued raw (unknown type) packet */ 1282 while ((skb = skb_dequeue(&hdev->raw_q))) 1283 hci_send_frame(skb); 1284 1285 read_unlock(&hci_task_lock); 1286 } 1287 1288 /* ----- HCI RX task (incoming data proccessing) ----- */ 1289 1290 /* ACL data packet */ 1291 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1292 { 1293 struct hci_acl_hdr *hdr = (void *) skb->data; 1294 struct hci_conn *conn; 1295 __u16 handle, flags; 1296 1297 skb_pull(skb, HCI_ACL_HDR_SIZE); 1298 1299 handle = __le16_to_cpu(hdr->handle); 1300 flags = hci_flags(handle); 1301 handle = hci_handle(handle); 1302 1303 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 1304 1305 hdev->stat.acl_rx++; 1306 1307 hci_dev_lock(hdev); 1308 conn = hci_conn_hash_lookup_handle(hdev, handle); 1309 hci_dev_unlock(hdev); 1310 1311 if (conn) { 1312 register struct hci_proto *hp; 1313 1314 hci_conn_enter_active_mode(conn); 1315 1316 /* Send to upper protocol */ 1317 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { 1318 hp->recv_acldata(conn, skb, flags); 1319 return; 1320 } 1321 } else { 1322 BT_ERR("%s ACL packet for unknown connection handle %d", 1323 hdev->name, handle); 1324 } 1325 1326 kfree_skb(skb); 1327 } 1328 1329 /* SCO data packet */ 1330 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1331 { 1332 struct hci_sco_hdr *hdr = (void *) skb->data; 1333 struct hci_conn *conn; 1334 __u16 handle; 1335 1336 skb_pull(skb, HCI_SCO_HDR_SIZE); 1337 1338 handle = __le16_to_cpu(hdr->handle); 1339 1340 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); 1341 1342 hdev->stat.sco_rx++; 1343 1344 hci_dev_lock(hdev); 1345 conn = hci_conn_hash_lookup_handle(hdev, handle); 1346 hci_dev_unlock(hdev); 1347 1348 if (conn) { 1349 register struct hci_proto *hp; 1350 1351 /* Send to upper protocol */ 1352 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { 1353 hp->recv_scodata(conn, skb); 1354 return; 1355 } 1356 } else { 1357 BT_ERR("%s SCO packet for unknown connection handle %d", 1358 hdev->name, handle); 1359 } 1360 1361 kfree_skb(skb); 1362 } 1363 1364 static void hci_rx_task(unsigned long arg) 1365 { 1366 struct hci_dev *hdev = (struct hci_dev *) arg; 1367 struct sk_buff *skb; 1368 1369 BT_DBG("%s", hdev->name); 1370 1371 read_lock(&hci_task_lock); 1372 1373 while ((skb = skb_dequeue(&hdev->rx_q))) { 1374 if (atomic_read(&hdev->promisc)) { 1375 /* Send copy to the sockets */ 1376 hci_send_to_sock(hdev, skb); 1377 } 1378 1379 if (test_bit(HCI_RAW, &hdev->flags)) { 1380 kfree_skb(skb); 1381 continue; 1382 } 1383 1384 if (test_bit(HCI_INIT, &hdev->flags)) { 1385 /* Don't process data packets in this states. */ 1386 switch (bt_cb(skb)->pkt_type) { 1387 case HCI_ACLDATA_PKT: 1388 case HCI_SCODATA_PKT: 1389 kfree_skb(skb); 1390 continue; 1391 } 1392 } 1393 1394 /* Process frame */ 1395 switch (bt_cb(skb)->pkt_type) { 1396 case HCI_EVENT_PKT: 1397 hci_event_packet(hdev, skb); 1398 break; 1399 1400 case HCI_ACLDATA_PKT: 1401 BT_DBG("%s ACL data packet", hdev->name); 1402 hci_acldata_packet(hdev, skb); 1403 break; 1404 1405 case HCI_SCODATA_PKT: 1406 BT_DBG("%s SCO data packet", hdev->name); 1407 hci_scodata_packet(hdev, skb); 1408 break; 1409 1410 default: 1411 kfree_skb(skb); 1412 break; 1413 } 1414 } 1415 1416 read_unlock(&hci_task_lock); 1417 } 1418 1419 static void hci_cmd_task(unsigned long arg) 1420 { 1421 struct hci_dev *hdev = (struct hci_dev *) arg; 1422 struct sk_buff *skb; 1423 1424 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 1425 1426 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) { 1427 BT_ERR("%s command tx timeout", hdev->name); 1428 atomic_set(&hdev->cmd_cnt, 1); 1429 } 1430 1431 /* Send queued commands */ 1432 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 1433 if (hdev->sent_cmd) 1434 kfree_skb(hdev->sent_cmd); 1435 1436 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { 1437 atomic_dec(&hdev->cmd_cnt); 1438 hci_send_frame(skb); 1439 hdev->cmd_last_tx = jiffies; 1440 } else { 1441 skb_queue_head(&hdev->cmd_q, skb); 1442 hci_sched_cmd(hdev); 1443 } 1444 } 1445 } 1446