1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/export.h> 29 #include <linux/idr.h> 30 #include <linux/rfkill.h> 31 #include <linux/debugfs.h> 32 #include <linux/crypto.h> 33 #include <asm/unaligned.h> 34 35 #include <net/bluetooth/bluetooth.h> 36 #include <net/bluetooth/hci_core.h> 37 #include <net/bluetooth/l2cap.h> 38 #include <net/bluetooth/mgmt.h> 39 40 #include "hci_request.h" 41 #include "hci_debugfs.h" 42 #include "smp.h" 43 44 static void hci_rx_work(struct work_struct *work); 45 static void hci_cmd_work(struct work_struct *work); 46 static void hci_tx_work(struct work_struct *work); 47 48 /* HCI device list */ 49 LIST_HEAD(hci_dev_list); 50 DEFINE_RWLOCK(hci_dev_list_lock); 51 52 /* HCI callback list */ 53 LIST_HEAD(hci_cb_list); 54 DEFINE_MUTEX(hci_cb_list_lock); 55 56 /* HCI ID Numbering */ 57 static DEFINE_IDA(hci_index_ida); 58 59 /* ----- HCI requests ----- */ 60 61 #define HCI_REQ_DONE 0 62 #define HCI_REQ_PEND 1 63 #define HCI_REQ_CANCELED 2 64 65 #define hci_req_lock(d) mutex_lock(&d->req_lock) 66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock) 67 68 /* ---- HCI notifications ---- */ 69 70 static void hci_notify(struct hci_dev *hdev, int event) 71 { 72 hci_sock_dev_event(hdev, event); 73 } 74 75 /* ---- HCI debugfs entries ---- */ 76 77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf, 78 size_t count, loff_t *ppos) 79 { 80 struct hci_dev *hdev = file->private_data; 81 char buf[3]; 82 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N'; 84 buf[1] = '\n'; 85 buf[2] = '\0'; 86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 87 } 88 89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, 90 size_t count, loff_t *ppos) 91 { 92 struct hci_dev *hdev = file->private_data; 93 struct sk_buff *skb; 94 char buf[32]; 95 size_t buf_size = min(count, (sizeof(buf)-1)); 96 bool enable; 97 98 if (!test_bit(HCI_UP, &hdev->flags)) 99 return -ENETDOWN; 100 101 if (copy_from_user(buf, user_buf, buf_size)) 102 return -EFAULT; 103 104 buf[buf_size] = '\0'; 105 if (strtobool(buf, &enable)) 106 return -EINVAL; 107 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) 109 return -EALREADY; 110 111 hci_req_lock(hdev); 112 if (enable) 113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, 114 HCI_CMD_TIMEOUT); 115 else 116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, 117 HCI_CMD_TIMEOUT); 118 hci_req_unlock(hdev); 119 120 if (IS_ERR(skb)) 121 return PTR_ERR(skb); 122 123 kfree_skb(skb); 124 125 hci_dev_change_flag(hdev, HCI_DUT_MODE); 126 127 return count; 128 } 129 130 static const struct file_operations dut_mode_fops = { 131 .open = simple_open, 132 .read = dut_mode_read, 133 .write = dut_mode_write, 134 .llseek = default_llseek, 135 }; 136 137 /* ---- HCI requests ---- */ 138 139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, 140 struct sk_buff *skb) 141 { 142 BT_DBG("%s result 0x%2.2x", hdev->name, result); 143 144 if (hdev->req_status == HCI_REQ_PEND) { 145 hdev->req_result = result; 146 hdev->req_status = HCI_REQ_DONE; 147 if (skb) 148 hdev->req_skb = skb_get(skb); 149 wake_up_interruptible(&hdev->req_wait_q); 150 } 151 } 152 153 static void hci_req_cancel(struct hci_dev *hdev, int err) 154 { 155 BT_DBG("%s err 0x%2.2x", hdev->name, err); 156 157 if (hdev->req_status == HCI_REQ_PEND) { 158 hdev->req_result = err; 159 hdev->req_status = HCI_REQ_CANCELED; 160 wake_up_interruptible(&hdev->req_wait_q); 161 } 162 } 163 164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, 165 const void *param, u8 event, u32 timeout) 166 { 167 DECLARE_WAITQUEUE(wait, current); 168 struct hci_request req; 169 struct sk_buff *skb; 170 int err = 0; 171 172 BT_DBG("%s", hdev->name); 173 174 hci_req_init(&req, hdev); 175 176 hci_req_add_ev(&req, opcode, plen, param, event); 177 178 hdev->req_status = HCI_REQ_PEND; 179 180 add_wait_queue(&hdev->req_wait_q, &wait); 181 set_current_state(TASK_INTERRUPTIBLE); 182 183 err = hci_req_run_skb(&req, hci_req_sync_complete); 184 if (err < 0) { 185 remove_wait_queue(&hdev->req_wait_q, &wait); 186 set_current_state(TASK_RUNNING); 187 return ERR_PTR(err); 188 } 189 190 schedule_timeout(timeout); 191 192 remove_wait_queue(&hdev->req_wait_q, &wait); 193 194 if (signal_pending(current)) 195 return ERR_PTR(-EINTR); 196 197 switch (hdev->req_status) { 198 case HCI_REQ_DONE: 199 err = -bt_to_errno(hdev->req_result); 200 break; 201 202 case HCI_REQ_CANCELED: 203 err = -hdev->req_result; 204 break; 205 206 default: 207 err = -ETIMEDOUT; 208 break; 209 } 210 211 hdev->req_status = hdev->req_result = 0; 212 skb = hdev->req_skb; 213 hdev->req_skb = NULL; 214 215 BT_DBG("%s end: err %d", hdev->name, err); 216 217 if (err < 0) { 218 kfree_skb(skb); 219 return ERR_PTR(err); 220 } 221 222 if (!skb) 223 return ERR_PTR(-ENODATA); 224 225 return skb; 226 } 227 EXPORT_SYMBOL(__hci_cmd_sync_ev); 228 229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 230 const void *param, u32 timeout) 231 { 232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); 233 } 234 EXPORT_SYMBOL(__hci_cmd_sync); 235 236 /* Execute request and wait for completion. */ 237 static int __hci_req_sync(struct hci_dev *hdev, 238 void (*func)(struct hci_request *req, 239 unsigned long opt), 240 unsigned long opt, __u32 timeout) 241 { 242 struct hci_request req; 243 DECLARE_WAITQUEUE(wait, current); 244 int err = 0; 245 246 BT_DBG("%s start", hdev->name); 247 248 hci_req_init(&req, hdev); 249 250 hdev->req_status = HCI_REQ_PEND; 251 252 func(&req, opt); 253 254 add_wait_queue(&hdev->req_wait_q, &wait); 255 set_current_state(TASK_INTERRUPTIBLE); 256 257 err = hci_req_run_skb(&req, hci_req_sync_complete); 258 if (err < 0) { 259 hdev->req_status = 0; 260 261 remove_wait_queue(&hdev->req_wait_q, &wait); 262 set_current_state(TASK_RUNNING); 263 264 /* ENODATA means the HCI request command queue is empty. 265 * This can happen when a request with conditionals doesn't 266 * trigger any commands to be sent. This is normal behavior 267 * and should not trigger an error return. 268 */ 269 if (err == -ENODATA) 270 return 0; 271 272 return err; 273 } 274 275 schedule_timeout(timeout); 276 277 remove_wait_queue(&hdev->req_wait_q, &wait); 278 279 if (signal_pending(current)) 280 return -EINTR; 281 282 switch (hdev->req_status) { 283 case HCI_REQ_DONE: 284 err = -bt_to_errno(hdev->req_result); 285 break; 286 287 case HCI_REQ_CANCELED: 288 err = -hdev->req_result; 289 break; 290 291 default: 292 err = -ETIMEDOUT; 293 break; 294 } 295 296 hdev->req_status = hdev->req_result = 0; 297 298 BT_DBG("%s end: err %d", hdev->name, err); 299 300 return err; 301 } 302 303 static int hci_req_sync(struct hci_dev *hdev, 304 void (*req)(struct hci_request *req, 305 unsigned long opt), 306 unsigned long opt, __u32 timeout) 307 { 308 int ret; 309 310 if (!test_bit(HCI_UP, &hdev->flags)) 311 return -ENETDOWN; 312 313 /* Serialize all requests */ 314 hci_req_lock(hdev); 315 ret = __hci_req_sync(hdev, req, opt, timeout); 316 hci_req_unlock(hdev); 317 318 return ret; 319 } 320 321 static void hci_reset_req(struct hci_request *req, unsigned long opt) 322 { 323 BT_DBG("%s %ld", req->hdev->name, opt); 324 325 /* Reset device */ 326 set_bit(HCI_RESET, &req->hdev->flags); 327 hci_req_add(req, HCI_OP_RESET, 0, NULL); 328 } 329 330 static void bredr_init(struct hci_request *req) 331 { 332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 333 334 /* Read Local Supported Features */ 335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 336 337 /* Read Local Version */ 338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 339 340 /* Read BD Address */ 341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); 342 } 343 344 static void amp_init1(struct hci_request *req) 345 { 346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 347 348 /* Read Local Version */ 349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 350 351 /* Read Local Supported Commands */ 352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 353 354 /* Read Local AMP Info */ 355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 356 357 /* Read Data Blk size */ 358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); 359 360 /* Read Flow Control Mode */ 361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); 362 363 /* Read Location Data */ 364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); 365 } 366 367 static void amp_init2(struct hci_request *req) 368 { 369 /* Read Local Supported Features. Not all AMP controllers 370 * support this so it's placed conditionally in the second 371 * stage init. 372 */ 373 if (req->hdev->commands[14] & 0x20) 374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 375 } 376 377 static void hci_init1_req(struct hci_request *req, unsigned long opt) 378 { 379 struct hci_dev *hdev = req->hdev; 380 381 BT_DBG("%s %ld", hdev->name, opt); 382 383 /* Reset */ 384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 385 hci_reset_req(req, 0); 386 387 switch (hdev->dev_type) { 388 case HCI_BREDR: 389 bredr_init(req); 390 break; 391 392 case HCI_AMP: 393 amp_init1(req); 394 break; 395 396 default: 397 BT_ERR("Unknown device type %d", hdev->dev_type); 398 break; 399 } 400 } 401 402 static void bredr_setup(struct hci_request *req) 403 { 404 __le16 param; 405 __u8 flt_type; 406 407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); 409 410 /* Read Class of Device */ 411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); 412 413 /* Read Local Name */ 414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); 415 416 /* Read Voice Setting */ 417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); 418 419 /* Read Number of Supported IAC */ 420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); 421 422 /* Read Current IAC LAP */ 423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); 424 425 /* Clear Event Filters */ 426 flt_type = HCI_FLT_CLEAR_ALL; 427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 428 429 /* Connection accept timeout ~20 secs */ 430 param = cpu_to_le16(0x7d00); 431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); 432 } 433 434 static void le_setup(struct hci_request *req) 435 { 436 struct hci_dev *hdev = req->hdev; 437 438 /* Read LE Buffer Size */ 439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); 440 441 /* Read LE Local Supported Features */ 442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); 443 444 /* Read LE Supported States */ 445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); 446 447 /* Read LE White List Size */ 448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); 449 450 /* Clear LE White List */ 451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); 452 453 /* LE-only controllers have LE implicitly enabled */ 454 if (!lmp_bredr_capable(hdev)) 455 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 456 } 457 458 static void hci_setup_event_mask(struct hci_request *req) 459 { 460 struct hci_dev *hdev = req->hdev; 461 462 /* The second byte is 0xff instead of 0x9f (two reserved bits 463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 464 * command otherwise. 465 */ 466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 467 468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 469 * any event mask for pre 1.2 devices. 470 */ 471 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 472 return; 473 474 if (lmp_bredr_capable(hdev)) { 475 events[4] |= 0x01; /* Flow Specification Complete */ 476 events[4] |= 0x02; /* Inquiry Result with RSSI */ 477 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 478 events[5] |= 0x08; /* Synchronous Connection Complete */ 479 events[5] |= 0x10; /* Synchronous Connection Changed */ 480 } else { 481 /* Use a different default for LE-only devices */ 482 memset(events, 0, sizeof(events)); 483 events[0] |= 0x10; /* Disconnection Complete */ 484 events[1] |= 0x08; /* Read Remote Version Information Complete */ 485 events[1] |= 0x20; /* Command Complete */ 486 events[1] |= 0x40; /* Command Status */ 487 events[1] |= 0x80; /* Hardware Error */ 488 events[2] |= 0x04; /* Number of Completed Packets */ 489 events[3] |= 0x02; /* Data Buffer Overflow */ 490 491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { 492 events[0] |= 0x80; /* Encryption Change */ 493 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 494 } 495 } 496 497 if (lmp_inq_rssi_capable(hdev)) 498 events[4] |= 0x02; /* Inquiry Result with RSSI */ 499 500 if (lmp_sniffsubr_capable(hdev)) 501 events[5] |= 0x20; /* Sniff Subrating */ 502 503 if (lmp_pause_enc_capable(hdev)) 504 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 505 506 if (lmp_ext_inq_capable(hdev)) 507 events[5] |= 0x40; /* Extended Inquiry Result */ 508 509 if (lmp_no_flush_capable(hdev)) 510 events[7] |= 0x01; /* Enhanced Flush Complete */ 511 512 if (lmp_lsto_capable(hdev)) 513 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 514 515 if (lmp_ssp_capable(hdev)) { 516 events[6] |= 0x01; /* IO Capability Request */ 517 events[6] |= 0x02; /* IO Capability Response */ 518 events[6] |= 0x04; /* User Confirmation Request */ 519 events[6] |= 0x08; /* User Passkey Request */ 520 events[6] |= 0x10; /* Remote OOB Data Request */ 521 events[6] |= 0x20; /* Simple Pairing Complete */ 522 events[7] |= 0x04; /* User Passkey Notification */ 523 events[7] |= 0x08; /* Keypress Notification */ 524 events[7] |= 0x10; /* Remote Host Supported 525 * Features Notification 526 */ 527 } 528 529 if (lmp_le_capable(hdev)) 530 events[7] |= 0x20; /* LE Meta-Event */ 531 532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 533 } 534 535 static void hci_init2_req(struct hci_request *req, unsigned long opt) 536 { 537 struct hci_dev *hdev = req->hdev; 538 539 if (hdev->dev_type == HCI_AMP) 540 return amp_init2(req); 541 542 if (lmp_bredr_capable(hdev)) 543 bredr_setup(req); 544 else 545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); 546 547 if (lmp_le_capable(hdev)) 548 le_setup(req); 549 550 /* All Bluetooth 1.2 and later controllers should support the 551 * HCI command for reading the local supported commands. 552 * 553 * Unfortunately some controllers indicate Bluetooth 1.2 support, 554 * but do not have support for this command. If that is the case, 555 * the driver can quirk the behavior and skip reading the local 556 * supported commands. 557 */ 558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 && 559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) 560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 561 562 if (lmp_ssp_capable(hdev)) { 563 /* When SSP is available, then the host features page 564 * should also be available as well. However some 565 * controllers list the max_page as 0 as long as SSP 566 * has not been enabled. To achieve proper debugging 567 * output, force the minimum max_page to 1 at least. 568 */ 569 hdev->max_page = 0x01; 570 571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { 572 u8 mode = 0x01; 573 574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, 575 sizeof(mode), &mode); 576 } else { 577 struct hci_cp_write_eir cp; 578 579 memset(hdev->eir, 0, sizeof(hdev->eir)); 580 memset(&cp, 0, sizeof(cp)); 581 582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 583 } 584 } 585 586 if (lmp_inq_rssi_capable(hdev) || 587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) { 588 u8 mode; 589 590 /* If Extended Inquiry Result events are supported, then 591 * they are clearly preferred over Inquiry Result with RSSI 592 * events. 593 */ 594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; 595 596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 597 } 598 599 if (lmp_inq_tx_pwr_capable(hdev)) 600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 601 602 if (lmp_ext_feat_capable(hdev)) { 603 struct hci_cp_read_local_ext_features cp; 604 605 cp.page = 0x01; 606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, 607 sizeof(cp), &cp); 608 } 609 610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { 611 u8 enable = 1; 612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), 613 &enable); 614 } 615 } 616 617 static void hci_setup_link_policy(struct hci_request *req) 618 { 619 struct hci_dev *hdev = req->hdev; 620 struct hci_cp_write_def_link_policy cp; 621 u16 link_policy = 0; 622 623 if (lmp_rswitch_capable(hdev)) 624 link_policy |= HCI_LP_RSWITCH; 625 if (lmp_hold_capable(hdev)) 626 link_policy |= HCI_LP_HOLD; 627 if (lmp_sniff_capable(hdev)) 628 link_policy |= HCI_LP_SNIFF; 629 if (lmp_park_capable(hdev)) 630 link_policy |= HCI_LP_PARK; 631 632 cp.policy = cpu_to_le16(link_policy); 633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 634 } 635 636 static void hci_set_le_support(struct hci_request *req) 637 { 638 struct hci_dev *hdev = req->hdev; 639 struct hci_cp_write_le_host_supported cp; 640 641 /* LE-only devices do not support explicit enablement */ 642 if (!lmp_bredr_capable(hdev)) 643 return; 644 645 memset(&cp, 0, sizeof(cp)); 646 647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 648 cp.le = 0x01; 649 cp.simul = 0x00; 650 } 651 652 if (cp.le != lmp_host_le_capable(hdev)) 653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), 654 &cp); 655 } 656 657 static void hci_set_event_mask_page_2(struct hci_request *req) 658 { 659 struct hci_dev *hdev = req->hdev; 660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 661 662 /* If Connectionless Slave Broadcast master role is supported 663 * enable all necessary events for it. 664 */ 665 if (lmp_csb_master_capable(hdev)) { 666 events[1] |= 0x40; /* Triggered Clock Capture */ 667 events[1] |= 0x80; /* Synchronization Train Complete */ 668 events[2] |= 0x10; /* Slave Page Response Timeout */ 669 events[2] |= 0x20; /* CSB Channel Map Change */ 670 } 671 672 /* If Connectionless Slave Broadcast slave role is supported 673 * enable all necessary events for it. 674 */ 675 if (lmp_csb_slave_capable(hdev)) { 676 events[2] |= 0x01; /* Synchronization Train Received */ 677 events[2] |= 0x02; /* CSB Receive */ 678 events[2] |= 0x04; /* CSB Timeout */ 679 events[2] |= 0x08; /* Truncated Page Complete */ 680 } 681 682 /* Enable Authenticated Payload Timeout Expired event if supported */ 683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) 684 events[2] |= 0x80; 685 686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); 687 } 688 689 static void hci_init3_req(struct hci_request *req, unsigned long opt) 690 { 691 struct hci_dev *hdev = req->hdev; 692 u8 p; 693 694 hci_setup_event_mask(req); 695 696 if (hdev->commands[6] & 0x20) { 697 struct hci_cp_read_stored_link_key cp; 698 699 bacpy(&cp.bdaddr, BDADDR_ANY); 700 cp.read_all = 0x01; 701 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp); 702 } 703 704 if (hdev->commands[5] & 0x10) 705 hci_setup_link_policy(req); 706 707 if (hdev->commands[8] & 0x01) 708 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); 709 710 /* Some older Broadcom based Bluetooth 1.2 controllers do not 711 * support the Read Page Scan Type command. Check support for 712 * this command in the bit mask of supported commands. 713 */ 714 if (hdev->commands[13] & 0x01) 715 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); 716 717 if (lmp_le_capable(hdev)) { 718 u8 events[8]; 719 720 memset(events, 0, sizeof(events)); 721 events[0] = 0x0f; 722 723 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) 724 events[0] |= 0x10; /* LE Long Term Key Request */ 725 726 /* If controller supports the Connection Parameters Request 727 * Link Layer Procedure, enable the corresponding event. 728 */ 729 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) 730 events[0] |= 0x20; /* LE Remote Connection 731 * Parameter Request 732 */ 733 734 /* If the controller supports the Data Length Extension 735 * feature, enable the corresponding event. 736 */ 737 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) 738 events[0] |= 0x40; /* LE Data Length Change */ 739 740 /* If the controller supports Extended Scanner Filter 741 * Policies, enable the correspondig event. 742 */ 743 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) 744 events[1] |= 0x04; /* LE Direct Advertising 745 * Report 746 */ 747 748 /* If the controller supports the LE Read Local P-256 749 * Public Key command, enable the corresponding event. 750 */ 751 if (hdev->commands[34] & 0x02) 752 events[0] |= 0x80; /* LE Read Local P-256 753 * Public Key Complete 754 */ 755 756 /* If the controller supports the LE Generate DHKey 757 * command, enable the corresponding event. 758 */ 759 if (hdev->commands[34] & 0x04) 760 events[1] |= 0x01; /* LE Generate DHKey Complete */ 761 762 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), 763 events); 764 765 if (hdev->commands[25] & 0x40) { 766 /* Read LE Advertising Channel TX Power */ 767 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 768 } 769 770 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { 771 /* Read LE Maximum Data Length */ 772 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); 773 774 /* Read LE Suggested Default Data Length */ 775 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); 776 } 777 778 hci_set_le_support(req); 779 } 780 781 /* Read features beyond page 1 if available */ 782 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { 783 struct hci_cp_read_local_ext_features cp; 784 785 cp.page = p; 786 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, 787 sizeof(cp), &cp); 788 } 789 } 790 791 static void hci_init4_req(struct hci_request *req, unsigned long opt) 792 { 793 struct hci_dev *hdev = req->hdev; 794 795 /* Some Broadcom based Bluetooth controllers do not support the 796 * Delete Stored Link Key command. They are clearly indicating its 797 * absence in the bit mask of supported commands. 798 * 799 * Check the supported commands and only if the the command is marked 800 * as supported send it. If not supported assume that the controller 801 * does not have actual support for stored link keys which makes this 802 * command redundant anyway. 803 * 804 * Some controllers indicate that they support handling deleting 805 * stored link keys, but they don't. The quirk lets a driver 806 * just disable this command. 807 */ 808 if (hdev->commands[6] & 0x80 && 809 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { 810 struct hci_cp_delete_stored_link_key cp; 811 812 bacpy(&cp.bdaddr, BDADDR_ANY); 813 cp.delete_all = 0x01; 814 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, 815 sizeof(cp), &cp); 816 } 817 818 /* Set event mask page 2 if the HCI command for it is supported */ 819 if (hdev->commands[22] & 0x04) 820 hci_set_event_mask_page_2(req); 821 822 /* Read local codec list if the HCI command is supported */ 823 if (hdev->commands[29] & 0x20) 824 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL); 825 826 /* Get MWS transport configuration if the HCI command is supported */ 827 if (hdev->commands[30] & 0x08) 828 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); 829 830 /* Check for Synchronization Train support */ 831 if (lmp_sync_train_capable(hdev)) 832 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); 833 834 /* Enable Secure Connections if supported and configured */ 835 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && 836 bredr_sc_enabled(hdev)) { 837 u8 support = 0x01; 838 839 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, 840 sizeof(support), &support); 841 } 842 } 843 844 static int __hci_init(struct hci_dev *hdev) 845 { 846 int err; 847 848 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT); 849 if (err < 0) 850 return err; 851 852 /* The Device Under Test (DUT) mode is special and available for 853 * all controller types. So just create it early on. 854 */ 855 if (hci_dev_test_flag(hdev, HCI_SETUP)) { 856 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, 857 &dut_mode_fops); 858 } 859 860 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); 861 if (err < 0) 862 return err; 863 864 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode 865 * BR/EDR/LE type controllers. AMP controllers only need the 866 * first two stages of init. 867 */ 868 if (hdev->dev_type != HCI_BREDR) 869 return 0; 870 871 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); 872 if (err < 0) 873 return err; 874 875 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT); 876 if (err < 0) 877 return err; 878 879 /* This function is only called when the controller is actually in 880 * configured state. When the controller is marked as unconfigured, 881 * this initialization procedure is not run. 882 * 883 * It means that it is possible that a controller runs through its 884 * setup phase and then discovers missing settings. If that is the 885 * case, then this function will not be called. It then will only 886 * be called during the config phase. 887 * 888 * So only when in setup phase or config phase, create the debugfs 889 * entries and register the SMP channels. 890 */ 891 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 892 !hci_dev_test_flag(hdev, HCI_CONFIG)) 893 return 0; 894 895 hci_debugfs_create_common(hdev); 896 897 if (lmp_bredr_capable(hdev)) 898 hci_debugfs_create_bredr(hdev); 899 900 if (lmp_le_capable(hdev)) 901 hci_debugfs_create_le(hdev); 902 903 return 0; 904 } 905 906 static void hci_init0_req(struct hci_request *req, unsigned long opt) 907 { 908 struct hci_dev *hdev = req->hdev; 909 910 BT_DBG("%s %ld", hdev->name, opt); 911 912 /* Reset */ 913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 914 hci_reset_req(req, 0); 915 916 /* Read Local Version */ 917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 918 919 /* Read BD Address */ 920 if (hdev->set_bdaddr) 921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); 922 } 923 924 static int __hci_unconf_init(struct hci_dev *hdev) 925 { 926 int err; 927 928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 929 return 0; 930 931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT); 932 if (err < 0) 933 return err; 934 935 return 0; 936 } 937 938 static void hci_scan_req(struct hci_request *req, unsigned long opt) 939 { 940 __u8 scan = opt; 941 942 BT_DBG("%s %x", req->hdev->name, scan); 943 944 /* Inquiry and Page scans */ 945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 946 } 947 948 static void hci_auth_req(struct hci_request *req, unsigned long opt) 949 { 950 __u8 auth = opt; 951 952 BT_DBG("%s %x", req->hdev->name, auth); 953 954 /* Authentication */ 955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 956 } 957 958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt) 959 { 960 __u8 encrypt = opt; 961 962 BT_DBG("%s %x", req->hdev->name, encrypt); 963 964 /* Encryption */ 965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 966 } 967 968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt) 969 { 970 __le16 policy = cpu_to_le16(opt); 971 972 BT_DBG("%s %x", req->hdev->name, policy); 973 974 /* Default link policy */ 975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 976 } 977 978 /* Get HCI device by index. 979 * Device is held on return. */ 980 struct hci_dev *hci_dev_get(int index) 981 { 982 struct hci_dev *hdev = NULL, *d; 983 984 BT_DBG("%d", index); 985 986 if (index < 0) 987 return NULL; 988 989 read_lock(&hci_dev_list_lock); 990 list_for_each_entry(d, &hci_dev_list, list) { 991 if (d->id == index) { 992 hdev = hci_dev_hold(d); 993 break; 994 } 995 } 996 read_unlock(&hci_dev_list_lock); 997 return hdev; 998 } 999 1000 /* ---- Inquiry support ---- */ 1001 1002 bool hci_discovery_active(struct hci_dev *hdev) 1003 { 1004 struct discovery_state *discov = &hdev->discovery; 1005 1006 switch (discov->state) { 1007 case DISCOVERY_FINDING: 1008 case DISCOVERY_RESOLVING: 1009 return true; 1010 1011 default: 1012 return false; 1013 } 1014 } 1015 1016 void hci_discovery_set_state(struct hci_dev *hdev, int state) 1017 { 1018 int old_state = hdev->discovery.state; 1019 1020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 1021 1022 if (old_state == state) 1023 return; 1024 1025 hdev->discovery.state = state; 1026 1027 switch (state) { 1028 case DISCOVERY_STOPPED: 1029 hci_update_background_scan(hdev); 1030 1031 if (old_state != DISCOVERY_STARTING) 1032 mgmt_discovering(hdev, 0); 1033 break; 1034 case DISCOVERY_STARTING: 1035 break; 1036 case DISCOVERY_FINDING: 1037 mgmt_discovering(hdev, 1); 1038 break; 1039 case DISCOVERY_RESOLVING: 1040 break; 1041 case DISCOVERY_STOPPING: 1042 break; 1043 } 1044 } 1045 1046 void hci_inquiry_cache_flush(struct hci_dev *hdev) 1047 { 1048 struct discovery_state *cache = &hdev->discovery; 1049 struct inquiry_entry *p, *n; 1050 1051 list_for_each_entry_safe(p, n, &cache->all, all) { 1052 list_del(&p->all); 1053 kfree(p); 1054 } 1055 1056 INIT_LIST_HEAD(&cache->unknown); 1057 INIT_LIST_HEAD(&cache->resolve); 1058 } 1059 1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 1061 bdaddr_t *bdaddr) 1062 { 1063 struct discovery_state *cache = &hdev->discovery; 1064 struct inquiry_entry *e; 1065 1066 BT_DBG("cache %p, %pMR", cache, bdaddr); 1067 1068 list_for_each_entry(e, &cache->all, all) { 1069 if (!bacmp(&e->data.bdaddr, bdaddr)) 1070 return e; 1071 } 1072 1073 return NULL; 1074 } 1075 1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 1077 bdaddr_t *bdaddr) 1078 { 1079 struct discovery_state *cache = &hdev->discovery; 1080 struct inquiry_entry *e; 1081 1082 BT_DBG("cache %p, %pMR", cache, bdaddr); 1083 1084 list_for_each_entry(e, &cache->unknown, list) { 1085 if (!bacmp(&e->data.bdaddr, bdaddr)) 1086 return e; 1087 } 1088 1089 return NULL; 1090 } 1091 1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 1093 bdaddr_t *bdaddr, 1094 int state) 1095 { 1096 struct discovery_state *cache = &hdev->discovery; 1097 struct inquiry_entry *e; 1098 1099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); 1100 1101 list_for_each_entry(e, &cache->resolve, list) { 1102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 1103 return e; 1104 if (!bacmp(&e->data.bdaddr, bdaddr)) 1105 return e; 1106 } 1107 1108 return NULL; 1109 } 1110 1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 1112 struct inquiry_entry *ie) 1113 { 1114 struct discovery_state *cache = &hdev->discovery; 1115 struct list_head *pos = &cache->resolve; 1116 struct inquiry_entry *p; 1117 1118 list_del(&ie->list); 1119 1120 list_for_each_entry(p, &cache->resolve, list) { 1121 if (p->name_state != NAME_PENDING && 1122 abs(p->data.rssi) >= abs(ie->data.rssi)) 1123 break; 1124 pos = &p->list; 1125 } 1126 1127 list_add(&ie->list, pos); 1128 } 1129 1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 1131 bool name_known) 1132 { 1133 struct discovery_state *cache = &hdev->discovery; 1134 struct inquiry_entry *ie; 1135 u32 flags = 0; 1136 1137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 1138 1139 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); 1140 1141 if (!data->ssp_mode) 1142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 1143 1144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 1145 if (ie) { 1146 if (!ie->data.ssp_mode) 1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 1148 1149 if (ie->name_state == NAME_NEEDED && 1150 data->rssi != ie->data.rssi) { 1151 ie->data.rssi = data->rssi; 1152 hci_inquiry_cache_update_resolve(hdev, ie); 1153 } 1154 1155 goto update; 1156 } 1157 1158 /* Entry not in the cache. Add new one. */ 1159 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 1160 if (!ie) { 1161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 1162 goto done; 1163 } 1164 1165 list_add(&ie->all, &cache->all); 1166 1167 if (name_known) { 1168 ie->name_state = NAME_KNOWN; 1169 } else { 1170 ie->name_state = NAME_NOT_KNOWN; 1171 list_add(&ie->list, &cache->unknown); 1172 } 1173 1174 update: 1175 if (name_known && ie->name_state != NAME_KNOWN && 1176 ie->name_state != NAME_PENDING) { 1177 ie->name_state = NAME_KNOWN; 1178 list_del(&ie->list); 1179 } 1180 1181 memcpy(&ie->data, data, sizeof(*data)); 1182 ie->timestamp = jiffies; 1183 cache->timestamp = jiffies; 1184 1185 if (ie->name_state == NAME_NOT_KNOWN) 1186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 1187 1188 done: 1189 return flags; 1190 } 1191 1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 1193 { 1194 struct discovery_state *cache = &hdev->discovery; 1195 struct inquiry_info *info = (struct inquiry_info *) buf; 1196 struct inquiry_entry *e; 1197 int copied = 0; 1198 1199 list_for_each_entry(e, &cache->all, all) { 1200 struct inquiry_data *data = &e->data; 1201 1202 if (copied >= num) 1203 break; 1204 1205 bacpy(&info->bdaddr, &data->bdaddr); 1206 info->pscan_rep_mode = data->pscan_rep_mode; 1207 info->pscan_period_mode = data->pscan_period_mode; 1208 info->pscan_mode = data->pscan_mode; 1209 memcpy(info->dev_class, data->dev_class, 3); 1210 info->clock_offset = data->clock_offset; 1211 1212 info++; 1213 copied++; 1214 } 1215 1216 BT_DBG("cache %p, copied %d", cache, copied); 1217 return copied; 1218 } 1219 1220 static void hci_inq_req(struct hci_request *req, unsigned long opt) 1221 { 1222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 1223 struct hci_dev *hdev = req->hdev; 1224 struct hci_cp_inquiry cp; 1225 1226 BT_DBG("%s", hdev->name); 1227 1228 if (test_bit(HCI_INQUIRY, &hdev->flags)) 1229 return; 1230 1231 /* Start Inquiry */ 1232 memcpy(&cp.lap, &ir->lap, 3); 1233 cp.length = ir->length; 1234 cp.num_rsp = ir->num_rsp; 1235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 1236 } 1237 1238 int hci_inquiry(void __user *arg) 1239 { 1240 __u8 __user *ptr = arg; 1241 struct hci_inquiry_req ir; 1242 struct hci_dev *hdev; 1243 int err = 0, do_inquiry = 0, max_rsp; 1244 long timeo; 1245 __u8 *buf; 1246 1247 if (copy_from_user(&ir, ptr, sizeof(ir))) 1248 return -EFAULT; 1249 1250 hdev = hci_dev_get(ir.dev_id); 1251 if (!hdev) 1252 return -ENODEV; 1253 1254 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1255 err = -EBUSY; 1256 goto done; 1257 } 1258 1259 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1260 err = -EOPNOTSUPP; 1261 goto done; 1262 } 1263 1264 if (hdev->dev_type != HCI_BREDR) { 1265 err = -EOPNOTSUPP; 1266 goto done; 1267 } 1268 1269 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1270 err = -EOPNOTSUPP; 1271 goto done; 1272 } 1273 1274 hci_dev_lock(hdev); 1275 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 1276 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { 1277 hci_inquiry_cache_flush(hdev); 1278 do_inquiry = 1; 1279 } 1280 hci_dev_unlock(hdev); 1281 1282 timeo = ir.length * msecs_to_jiffies(2000); 1283 1284 if (do_inquiry) { 1285 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 1286 timeo); 1287 if (err < 0) 1288 goto done; 1289 1290 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 1291 * cleared). If it is interrupted by a signal, return -EINTR. 1292 */ 1293 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 1294 TASK_INTERRUPTIBLE)) 1295 return -EINTR; 1296 } 1297 1298 /* for unlimited number of responses we will use buffer with 1299 * 255 entries 1300 */ 1301 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 1302 1303 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 1304 * copy it to the user space. 1305 */ 1306 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL); 1307 if (!buf) { 1308 err = -ENOMEM; 1309 goto done; 1310 } 1311 1312 hci_dev_lock(hdev); 1313 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 1314 hci_dev_unlock(hdev); 1315 1316 BT_DBG("num_rsp %d", ir.num_rsp); 1317 1318 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 1319 ptr += sizeof(ir); 1320 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 1321 ir.num_rsp)) 1322 err = -EFAULT; 1323 } else 1324 err = -EFAULT; 1325 1326 kfree(buf); 1327 1328 done: 1329 hci_dev_put(hdev); 1330 return err; 1331 } 1332 1333 static int hci_dev_do_open(struct hci_dev *hdev) 1334 { 1335 int ret = 0; 1336 1337 BT_DBG("%s %p", hdev->name, hdev); 1338 1339 hci_req_lock(hdev); 1340 1341 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 1342 ret = -ENODEV; 1343 goto done; 1344 } 1345 1346 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 1347 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 1348 /* Check for rfkill but allow the HCI setup stage to 1349 * proceed (which in itself doesn't cause any RF activity). 1350 */ 1351 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { 1352 ret = -ERFKILL; 1353 goto done; 1354 } 1355 1356 /* Check for valid public address or a configured static 1357 * random adddress, but let the HCI setup proceed to 1358 * be able to determine if there is a public address 1359 * or not. 1360 * 1361 * In case of user channel usage, it is not important 1362 * if a public address or static random address is 1363 * available. 1364 * 1365 * This check is only valid for BR/EDR controllers 1366 * since AMP controllers do not have an address. 1367 */ 1368 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1369 hdev->dev_type == HCI_BREDR && 1370 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 1371 !bacmp(&hdev->static_addr, BDADDR_ANY)) { 1372 ret = -EADDRNOTAVAIL; 1373 goto done; 1374 } 1375 } 1376 1377 if (test_bit(HCI_UP, &hdev->flags)) { 1378 ret = -EALREADY; 1379 goto done; 1380 } 1381 1382 if (hdev->open(hdev)) { 1383 ret = -EIO; 1384 goto done; 1385 } 1386 1387 atomic_set(&hdev->cmd_cnt, 1); 1388 set_bit(HCI_INIT, &hdev->flags); 1389 1390 if (hci_dev_test_flag(hdev, HCI_SETUP)) { 1391 if (hdev->setup) 1392 ret = hdev->setup(hdev); 1393 1394 /* The transport driver can set these quirks before 1395 * creating the HCI device or in its setup callback. 1396 * 1397 * In case any of them is set, the controller has to 1398 * start up as unconfigured. 1399 */ 1400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 1401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks)) 1402 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 1403 1404 /* For an unconfigured controller it is required to 1405 * read at least the version information provided by 1406 * the Read Local Version Information command. 1407 * 1408 * If the set_bdaddr driver callback is provided, then 1409 * also the original Bluetooth public device address 1410 * will be read using the Read BD Address command. 1411 */ 1412 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 1413 ret = __hci_unconf_init(hdev); 1414 } 1415 1416 if (hci_dev_test_flag(hdev, HCI_CONFIG)) { 1417 /* If public address change is configured, ensure that 1418 * the address gets programmed. If the driver does not 1419 * support changing the public address, fail the power 1420 * on procedure. 1421 */ 1422 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 1423 hdev->set_bdaddr) 1424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 1425 else 1426 ret = -EADDRNOTAVAIL; 1427 } 1428 1429 if (!ret) { 1430 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1431 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 1432 ret = __hci_init(hdev); 1433 } 1434 1435 clear_bit(HCI_INIT, &hdev->flags); 1436 1437 if (!ret) { 1438 hci_dev_hold(hdev); 1439 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 1440 set_bit(HCI_UP, &hdev->flags); 1441 hci_notify(hdev, HCI_DEV_UP); 1442 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 1443 !hci_dev_test_flag(hdev, HCI_CONFIG) && 1444 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1445 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1446 hdev->dev_type == HCI_BREDR) { 1447 hci_dev_lock(hdev); 1448 mgmt_powered(hdev, 1); 1449 hci_dev_unlock(hdev); 1450 } 1451 } else { 1452 /* Init failed, cleanup */ 1453 flush_work(&hdev->tx_work); 1454 flush_work(&hdev->cmd_work); 1455 flush_work(&hdev->rx_work); 1456 1457 skb_queue_purge(&hdev->cmd_q); 1458 skb_queue_purge(&hdev->rx_q); 1459 1460 if (hdev->flush) 1461 hdev->flush(hdev); 1462 1463 if (hdev->sent_cmd) { 1464 kfree_skb(hdev->sent_cmd); 1465 hdev->sent_cmd = NULL; 1466 } 1467 1468 hdev->close(hdev); 1469 hdev->flags &= BIT(HCI_RAW); 1470 } 1471 1472 done: 1473 hci_req_unlock(hdev); 1474 return ret; 1475 } 1476 1477 /* ---- HCI ioctl helpers ---- */ 1478 1479 int hci_dev_open(__u16 dev) 1480 { 1481 struct hci_dev *hdev; 1482 int err; 1483 1484 hdev = hci_dev_get(dev); 1485 if (!hdev) 1486 return -ENODEV; 1487 1488 /* Devices that are marked as unconfigured can only be powered 1489 * up as user channel. Trying to bring them up as normal devices 1490 * will result into a failure. Only user channel operation is 1491 * possible. 1492 * 1493 * When this function is called for a user channel, the flag 1494 * HCI_USER_CHANNEL will be set first before attempting to 1495 * open the device. 1496 */ 1497 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1498 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1499 err = -EOPNOTSUPP; 1500 goto done; 1501 } 1502 1503 /* We need to ensure that no other power on/off work is pending 1504 * before proceeding to call hci_dev_do_open. This is 1505 * particularly important if the setup procedure has not yet 1506 * completed. 1507 */ 1508 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 1509 cancel_delayed_work(&hdev->power_off); 1510 1511 /* After this call it is guaranteed that the setup procedure 1512 * has finished. This means that error conditions like RFKILL 1513 * or no valid public or static random address apply. 1514 */ 1515 flush_workqueue(hdev->req_workqueue); 1516 1517 /* For controllers not using the management interface and that 1518 * are brought up using legacy ioctl, set the HCI_BONDABLE bit 1519 * so that pairing works for them. Once the management interface 1520 * is in use this bit will be cleared again and userspace has 1521 * to explicitly enable it. 1522 */ 1523 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1524 !hci_dev_test_flag(hdev, HCI_MGMT)) 1525 hci_dev_set_flag(hdev, HCI_BONDABLE); 1526 1527 err = hci_dev_do_open(hdev); 1528 1529 done: 1530 hci_dev_put(hdev); 1531 return err; 1532 } 1533 1534 /* This function requires the caller holds hdev->lock */ 1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev) 1536 { 1537 struct hci_conn_params *p; 1538 1539 list_for_each_entry(p, &hdev->le_conn_params, list) { 1540 if (p->conn) { 1541 hci_conn_drop(p->conn); 1542 hci_conn_put(p->conn); 1543 p->conn = NULL; 1544 } 1545 list_del_init(&p->action); 1546 } 1547 1548 BT_DBG("All LE pending actions cleared"); 1549 } 1550 1551 static int hci_dev_do_close(struct hci_dev *hdev) 1552 { 1553 BT_DBG("%s %p", hdev->name, hdev); 1554 1555 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && 1556 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1557 test_bit(HCI_UP, &hdev->flags)) { 1558 /* Execute vendor specific shutdown routine */ 1559 if (hdev->shutdown) 1560 hdev->shutdown(hdev); 1561 } 1562 1563 cancel_delayed_work(&hdev->power_off); 1564 1565 hci_req_cancel(hdev, ENODEV); 1566 hci_req_lock(hdev); 1567 1568 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 1569 cancel_delayed_work_sync(&hdev->cmd_timer); 1570 hci_req_unlock(hdev); 1571 return 0; 1572 } 1573 1574 /* Flush RX and TX works */ 1575 flush_work(&hdev->tx_work); 1576 flush_work(&hdev->rx_work); 1577 1578 if (hdev->discov_timeout > 0) { 1579 cancel_delayed_work(&hdev->discov_off); 1580 hdev->discov_timeout = 0; 1581 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 1582 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 1583 } 1584 1585 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 1586 cancel_delayed_work(&hdev->service_cache); 1587 1588 cancel_delayed_work_sync(&hdev->le_scan_disable); 1589 cancel_delayed_work_sync(&hdev->le_scan_restart); 1590 1591 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1592 cancel_delayed_work_sync(&hdev->rpa_expired); 1593 1594 if (hdev->adv_instance_timeout) { 1595 cancel_delayed_work_sync(&hdev->adv_instance_expire); 1596 hdev->adv_instance_timeout = 0; 1597 } 1598 1599 /* Avoid potential lockdep warnings from the *_flush() calls by 1600 * ensuring the workqueue is empty up front. 1601 */ 1602 drain_workqueue(hdev->workqueue); 1603 1604 hci_dev_lock(hdev); 1605 1606 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1607 1608 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 1609 if (hdev->dev_type == HCI_BREDR) 1610 mgmt_powered(hdev, 0); 1611 } 1612 1613 hci_inquiry_cache_flush(hdev); 1614 hci_pend_le_actions_clear(hdev); 1615 hci_conn_hash_flush(hdev); 1616 hci_dev_unlock(hdev); 1617 1618 smp_unregister(hdev); 1619 1620 hci_notify(hdev, HCI_DEV_DOWN); 1621 1622 if (hdev->flush) 1623 hdev->flush(hdev); 1624 1625 /* Reset device */ 1626 skb_queue_purge(&hdev->cmd_q); 1627 atomic_set(&hdev->cmd_cnt, 1); 1628 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && 1629 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1630 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 1631 set_bit(HCI_INIT, &hdev->flags); 1632 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); 1633 clear_bit(HCI_INIT, &hdev->flags); 1634 } 1635 1636 /* flush cmd work */ 1637 flush_work(&hdev->cmd_work); 1638 1639 /* Drop queues */ 1640 skb_queue_purge(&hdev->rx_q); 1641 skb_queue_purge(&hdev->cmd_q); 1642 skb_queue_purge(&hdev->raw_q); 1643 1644 /* Drop last sent command */ 1645 if (hdev->sent_cmd) { 1646 cancel_delayed_work_sync(&hdev->cmd_timer); 1647 kfree_skb(hdev->sent_cmd); 1648 hdev->sent_cmd = NULL; 1649 } 1650 1651 /* After this point our queues are empty 1652 * and no tasks are scheduled. */ 1653 hdev->close(hdev); 1654 1655 /* Clear flags */ 1656 hdev->flags &= BIT(HCI_RAW); 1657 hci_dev_clear_volatile_flags(hdev); 1658 1659 /* Controller radio is available but is currently powered down */ 1660 hdev->amp_status = AMP_STATUS_POWERED_DOWN; 1661 1662 memset(hdev->eir, 0, sizeof(hdev->eir)); 1663 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 1664 bacpy(&hdev->random_addr, BDADDR_ANY); 1665 1666 hci_req_unlock(hdev); 1667 1668 hci_dev_put(hdev); 1669 return 0; 1670 } 1671 1672 int hci_dev_close(__u16 dev) 1673 { 1674 struct hci_dev *hdev; 1675 int err; 1676 1677 hdev = hci_dev_get(dev); 1678 if (!hdev) 1679 return -ENODEV; 1680 1681 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1682 err = -EBUSY; 1683 goto done; 1684 } 1685 1686 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 1687 cancel_delayed_work(&hdev->power_off); 1688 1689 err = hci_dev_do_close(hdev); 1690 1691 done: 1692 hci_dev_put(hdev); 1693 return err; 1694 } 1695 1696 static int hci_dev_do_reset(struct hci_dev *hdev) 1697 { 1698 int ret; 1699 1700 BT_DBG("%s %p", hdev->name, hdev); 1701 1702 hci_req_lock(hdev); 1703 1704 /* Drop queues */ 1705 skb_queue_purge(&hdev->rx_q); 1706 skb_queue_purge(&hdev->cmd_q); 1707 1708 /* Avoid potential lockdep warnings from the *_flush() calls by 1709 * ensuring the workqueue is empty up front. 1710 */ 1711 drain_workqueue(hdev->workqueue); 1712 1713 hci_dev_lock(hdev); 1714 hci_inquiry_cache_flush(hdev); 1715 hci_conn_hash_flush(hdev); 1716 hci_dev_unlock(hdev); 1717 1718 if (hdev->flush) 1719 hdev->flush(hdev); 1720 1721 atomic_set(&hdev->cmd_cnt, 1); 1722 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 1723 1724 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); 1725 1726 hci_req_unlock(hdev); 1727 return ret; 1728 } 1729 1730 int hci_dev_reset(__u16 dev) 1731 { 1732 struct hci_dev *hdev; 1733 int err; 1734 1735 hdev = hci_dev_get(dev); 1736 if (!hdev) 1737 return -ENODEV; 1738 1739 if (!test_bit(HCI_UP, &hdev->flags)) { 1740 err = -ENETDOWN; 1741 goto done; 1742 } 1743 1744 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1745 err = -EBUSY; 1746 goto done; 1747 } 1748 1749 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1750 err = -EOPNOTSUPP; 1751 goto done; 1752 } 1753 1754 err = hci_dev_do_reset(hdev); 1755 1756 done: 1757 hci_dev_put(hdev); 1758 return err; 1759 } 1760 1761 int hci_dev_reset_stat(__u16 dev) 1762 { 1763 struct hci_dev *hdev; 1764 int ret = 0; 1765 1766 hdev = hci_dev_get(dev); 1767 if (!hdev) 1768 return -ENODEV; 1769 1770 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1771 ret = -EBUSY; 1772 goto done; 1773 } 1774 1775 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1776 ret = -EOPNOTSUPP; 1777 goto done; 1778 } 1779 1780 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1781 1782 done: 1783 hci_dev_put(hdev); 1784 return ret; 1785 } 1786 1787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) 1788 { 1789 bool conn_changed, discov_changed; 1790 1791 BT_DBG("%s scan 0x%02x", hdev->name, scan); 1792 1793 if ((scan & SCAN_PAGE)) 1794 conn_changed = !hci_dev_test_and_set_flag(hdev, 1795 HCI_CONNECTABLE); 1796 else 1797 conn_changed = hci_dev_test_and_clear_flag(hdev, 1798 HCI_CONNECTABLE); 1799 1800 if ((scan & SCAN_INQUIRY)) { 1801 discov_changed = !hci_dev_test_and_set_flag(hdev, 1802 HCI_DISCOVERABLE); 1803 } else { 1804 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 1805 discov_changed = hci_dev_test_and_clear_flag(hdev, 1806 HCI_DISCOVERABLE); 1807 } 1808 1809 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 1810 return; 1811 1812 if (conn_changed || discov_changed) { 1813 /* In case this was disabled through mgmt */ 1814 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 1815 1816 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1817 mgmt_update_adv_data(hdev); 1818 1819 mgmt_new_settings(hdev); 1820 } 1821 } 1822 1823 int hci_dev_cmd(unsigned int cmd, void __user *arg) 1824 { 1825 struct hci_dev *hdev; 1826 struct hci_dev_req dr; 1827 int err = 0; 1828 1829 if (copy_from_user(&dr, arg, sizeof(dr))) 1830 return -EFAULT; 1831 1832 hdev = hci_dev_get(dr.dev_id); 1833 if (!hdev) 1834 return -ENODEV; 1835 1836 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1837 err = -EBUSY; 1838 goto done; 1839 } 1840 1841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1842 err = -EOPNOTSUPP; 1843 goto done; 1844 } 1845 1846 if (hdev->dev_type != HCI_BREDR) { 1847 err = -EOPNOTSUPP; 1848 goto done; 1849 } 1850 1851 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1852 err = -EOPNOTSUPP; 1853 goto done; 1854 } 1855 1856 switch (cmd) { 1857 case HCISETAUTH: 1858 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 1859 HCI_INIT_TIMEOUT); 1860 break; 1861 1862 case HCISETENCRYPT: 1863 if (!lmp_encrypt_capable(hdev)) { 1864 err = -EOPNOTSUPP; 1865 break; 1866 } 1867 1868 if (!test_bit(HCI_AUTH, &hdev->flags)) { 1869 /* Auth must be enabled first */ 1870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 1871 HCI_INIT_TIMEOUT); 1872 if (err) 1873 break; 1874 } 1875 1876 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, 1877 HCI_INIT_TIMEOUT); 1878 break; 1879 1880 case HCISETSCAN: 1881 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 1882 HCI_INIT_TIMEOUT); 1883 1884 /* Ensure that the connectable and discoverable states 1885 * get correctly modified as this was a non-mgmt change. 1886 */ 1887 if (!err) 1888 hci_update_scan_state(hdev, dr.dev_opt); 1889 break; 1890 1891 case HCISETLINKPOL: 1892 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, 1893 HCI_INIT_TIMEOUT); 1894 break; 1895 1896 case HCISETLINKMODE: 1897 hdev->link_mode = ((__u16) dr.dev_opt) & 1898 (HCI_LM_MASTER | HCI_LM_ACCEPT); 1899 break; 1900 1901 case HCISETPTYPE: 1902 hdev->pkt_type = (__u16) dr.dev_opt; 1903 break; 1904 1905 case HCISETACLMTU: 1906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 1907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 1908 break; 1909 1910 case HCISETSCOMTU: 1911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 1912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 1913 break; 1914 1915 default: 1916 err = -EINVAL; 1917 break; 1918 } 1919 1920 done: 1921 hci_dev_put(hdev); 1922 return err; 1923 } 1924 1925 int hci_get_dev_list(void __user *arg) 1926 { 1927 struct hci_dev *hdev; 1928 struct hci_dev_list_req *dl; 1929 struct hci_dev_req *dr; 1930 int n = 0, size, err; 1931 __u16 dev_num; 1932 1933 if (get_user(dev_num, (__u16 __user *) arg)) 1934 return -EFAULT; 1935 1936 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 1937 return -EINVAL; 1938 1939 size = sizeof(*dl) + dev_num * sizeof(*dr); 1940 1941 dl = kzalloc(size, GFP_KERNEL); 1942 if (!dl) 1943 return -ENOMEM; 1944 1945 dr = dl->dev_req; 1946 1947 read_lock(&hci_dev_list_lock); 1948 list_for_each_entry(hdev, &hci_dev_list, list) { 1949 unsigned long flags = hdev->flags; 1950 1951 /* When the auto-off is configured it means the transport 1952 * is running, but in that case still indicate that the 1953 * device is actually down. 1954 */ 1955 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 1956 flags &= ~BIT(HCI_UP); 1957 1958 (dr + n)->dev_id = hdev->id; 1959 (dr + n)->dev_opt = flags; 1960 1961 if (++n >= dev_num) 1962 break; 1963 } 1964 read_unlock(&hci_dev_list_lock); 1965 1966 dl->dev_num = n; 1967 size = sizeof(*dl) + n * sizeof(*dr); 1968 1969 err = copy_to_user(arg, dl, size); 1970 kfree(dl); 1971 1972 return err ? -EFAULT : 0; 1973 } 1974 1975 int hci_get_dev_info(void __user *arg) 1976 { 1977 struct hci_dev *hdev; 1978 struct hci_dev_info di; 1979 unsigned long flags; 1980 int err = 0; 1981 1982 if (copy_from_user(&di, arg, sizeof(di))) 1983 return -EFAULT; 1984 1985 hdev = hci_dev_get(di.dev_id); 1986 if (!hdev) 1987 return -ENODEV; 1988 1989 /* When the auto-off is configured it means the transport 1990 * is running, but in that case still indicate that the 1991 * device is actually down. 1992 */ 1993 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 1994 flags = hdev->flags & ~BIT(HCI_UP); 1995 else 1996 flags = hdev->flags; 1997 1998 strcpy(di.name, hdev->name); 1999 di.bdaddr = hdev->bdaddr; 2000 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); 2001 di.flags = flags; 2002 di.pkt_type = hdev->pkt_type; 2003 if (lmp_bredr_capable(hdev)) { 2004 di.acl_mtu = hdev->acl_mtu; 2005 di.acl_pkts = hdev->acl_pkts; 2006 di.sco_mtu = hdev->sco_mtu; 2007 di.sco_pkts = hdev->sco_pkts; 2008 } else { 2009 di.acl_mtu = hdev->le_mtu; 2010 di.acl_pkts = hdev->le_pkts; 2011 di.sco_mtu = 0; 2012 di.sco_pkts = 0; 2013 } 2014 di.link_policy = hdev->link_policy; 2015 di.link_mode = hdev->link_mode; 2016 2017 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 2018 memcpy(&di.features, &hdev->features, sizeof(di.features)); 2019 2020 if (copy_to_user(arg, &di, sizeof(di))) 2021 err = -EFAULT; 2022 2023 hci_dev_put(hdev); 2024 2025 return err; 2026 } 2027 2028 /* ---- Interface to HCI drivers ---- */ 2029 2030 static int hci_rfkill_set_block(void *data, bool blocked) 2031 { 2032 struct hci_dev *hdev = data; 2033 2034 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 2035 2036 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 2037 return -EBUSY; 2038 2039 if (blocked) { 2040 hci_dev_set_flag(hdev, HCI_RFKILLED); 2041 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 2042 !hci_dev_test_flag(hdev, HCI_CONFIG)) 2043 hci_dev_do_close(hdev); 2044 } else { 2045 hci_dev_clear_flag(hdev, HCI_RFKILLED); 2046 } 2047 2048 return 0; 2049 } 2050 2051 static const struct rfkill_ops hci_rfkill_ops = { 2052 .set_block = hci_rfkill_set_block, 2053 }; 2054 2055 static void hci_power_on(struct work_struct *work) 2056 { 2057 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 2058 int err; 2059 2060 BT_DBG("%s", hdev->name); 2061 2062 err = hci_dev_do_open(hdev); 2063 if (err < 0) { 2064 hci_dev_lock(hdev); 2065 mgmt_set_powered_failed(hdev, err); 2066 hci_dev_unlock(hdev); 2067 return; 2068 } 2069 2070 /* During the HCI setup phase, a few error conditions are 2071 * ignored and they need to be checked now. If they are still 2072 * valid, it is important to turn the device back off. 2073 */ 2074 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 2075 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 2076 (hdev->dev_type == HCI_BREDR && 2077 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 2078 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 2079 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 2080 hci_dev_do_close(hdev); 2081 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 2082 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 2083 HCI_AUTO_OFF_TIMEOUT); 2084 } 2085 2086 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 2087 /* For unconfigured devices, set the HCI_RAW flag 2088 * so that userspace can easily identify them. 2089 */ 2090 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 2091 set_bit(HCI_RAW, &hdev->flags); 2092 2093 /* For fully configured devices, this will send 2094 * the Index Added event. For unconfigured devices, 2095 * it will send Unconfigued Index Added event. 2096 * 2097 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 2098 * and no event will be send. 2099 */ 2100 mgmt_index_added(hdev); 2101 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 2102 /* When the controller is now configured, then it 2103 * is important to clear the HCI_RAW flag. 2104 */ 2105 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 2106 clear_bit(HCI_RAW, &hdev->flags); 2107 2108 /* Powering on the controller with HCI_CONFIG set only 2109 * happens with the transition from unconfigured to 2110 * configured. This will send the Index Added event. 2111 */ 2112 mgmt_index_added(hdev); 2113 } 2114 } 2115 2116 static void hci_power_off(struct work_struct *work) 2117 { 2118 struct hci_dev *hdev = container_of(work, struct hci_dev, 2119 power_off.work); 2120 2121 BT_DBG("%s", hdev->name); 2122 2123 hci_dev_do_close(hdev); 2124 } 2125 2126 static void hci_error_reset(struct work_struct *work) 2127 { 2128 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); 2129 2130 BT_DBG("%s", hdev->name); 2131 2132 if (hdev->hw_error) 2133 hdev->hw_error(hdev, hdev->hw_error_code); 2134 else 2135 BT_ERR("%s hardware error 0x%2.2x", hdev->name, 2136 hdev->hw_error_code); 2137 2138 if (hci_dev_do_close(hdev)) 2139 return; 2140 2141 hci_dev_do_open(hdev); 2142 } 2143 2144 static void hci_discov_off(struct work_struct *work) 2145 { 2146 struct hci_dev *hdev; 2147 2148 hdev = container_of(work, struct hci_dev, discov_off.work); 2149 2150 BT_DBG("%s", hdev->name); 2151 2152 mgmt_discoverable_timeout(hdev); 2153 } 2154 2155 static void hci_adv_timeout_expire(struct work_struct *work) 2156 { 2157 struct hci_dev *hdev; 2158 2159 hdev = container_of(work, struct hci_dev, adv_instance_expire.work); 2160 2161 BT_DBG("%s", hdev->name); 2162 2163 mgmt_adv_timeout_expired(hdev); 2164 } 2165 2166 void hci_uuids_clear(struct hci_dev *hdev) 2167 { 2168 struct bt_uuid *uuid, *tmp; 2169 2170 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { 2171 list_del(&uuid->list); 2172 kfree(uuid); 2173 } 2174 } 2175 2176 void hci_link_keys_clear(struct hci_dev *hdev) 2177 { 2178 struct link_key *key; 2179 2180 list_for_each_entry_rcu(key, &hdev->link_keys, list) { 2181 list_del_rcu(&key->list); 2182 kfree_rcu(key, rcu); 2183 } 2184 } 2185 2186 void hci_smp_ltks_clear(struct hci_dev *hdev) 2187 { 2188 struct smp_ltk *k; 2189 2190 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2191 list_del_rcu(&k->list); 2192 kfree_rcu(k, rcu); 2193 } 2194 } 2195 2196 void hci_smp_irks_clear(struct hci_dev *hdev) 2197 { 2198 struct smp_irk *k; 2199 2200 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { 2201 list_del_rcu(&k->list); 2202 kfree_rcu(k, rcu); 2203 } 2204 } 2205 2206 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 2207 { 2208 struct link_key *k; 2209 2210 rcu_read_lock(); 2211 list_for_each_entry_rcu(k, &hdev->link_keys, list) { 2212 if (bacmp(bdaddr, &k->bdaddr) == 0) { 2213 rcu_read_unlock(); 2214 return k; 2215 } 2216 } 2217 rcu_read_unlock(); 2218 2219 return NULL; 2220 } 2221 2222 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 2223 u8 key_type, u8 old_key_type) 2224 { 2225 /* Legacy key */ 2226 if (key_type < 0x03) 2227 return true; 2228 2229 /* Debug keys are insecure so don't store them persistently */ 2230 if (key_type == HCI_LK_DEBUG_COMBINATION) 2231 return false; 2232 2233 /* Changed combination key and there's no previous one */ 2234 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 2235 return false; 2236 2237 /* Security mode 3 case */ 2238 if (!conn) 2239 return true; 2240 2241 /* BR/EDR key derived using SC from an LE link */ 2242 if (conn->type == LE_LINK) 2243 return true; 2244 2245 /* Neither local nor remote side had no-bonding as requirement */ 2246 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 2247 return true; 2248 2249 /* Local side had dedicated bonding as requirement */ 2250 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 2251 return true; 2252 2253 /* Remote side had dedicated bonding as requirement */ 2254 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 2255 return true; 2256 2257 /* If none of the above criteria match, then don't store the key 2258 * persistently */ 2259 return false; 2260 } 2261 2262 static u8 ltk_role(u8 type) 2263 { 2264 if (type == SMP_LTK) 2265 return HCI_ROLE_MASTER; 2266 2267 return HCI_ROLE_SLAVE; 2268 } 2269 2270 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2271 u8 addr_type, u8 role) 2272 { 2273 struct smp_ltk *k; 2274 2275 rcu_read_lock(); 2276 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2277 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) 2278 continue; 2279 2280 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { 2281 rcu_read_unlock(); 2282 return k; 2283 } 2284 } 2285 rcu_read_unlock(); 2286 2287 return NULL; 2288 } 2289 2290 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) 2291 { 2292 struct smp_irk *irk; 2293 2294 rcu_read_lock(); 2295 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2296 if (!bacmp(&irk->rpa, rpa)) { 2297 rcu_read_unlock(); 2298 return irk; 2299 } 2300 } 2301 2302 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2303 if (smp_irk_matches(hdev, irk->val, rpa)) { 2304 bacpy(&irk->rpa, rpa); 2305 rcu_read_unlock(); 2306 return irk; 2307 } 2308 } 2309 rcu_read_unlock(); 2310 2311 return NULL; 2312 } 2313 2314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 2315 u8 addr_type) 2316 { 2317 struct smp_irk *irk; 2318 2319 /* Identity Address must be public or static random */ 2320 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) 2321 return NULL; 2322 2323 rcu_read_lock(); 2324 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2325 if (addr_type == irk->addr_type && 2326 bacmp(bdaddr, &irk->bdaddr) == 0) { 2327 rcu_read_unlock(); 2328 return irk; 2329 } 2330 } 2331 rcu_read_unlock(); 2332 2333 return NULL; 2334 } 2335 2336 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 2337 bdaddr_t *bdaddr, u8 *val, u8 type, 2338 u8 pin_len, bool *persistent) 2339 { 2340 struct link_key *key, *old_key; 2341 u8 old_key_type; 2342 2343 old_key = hci_find_link_key(hdev, bdaddr); 2344 if (old_key) { 2345 old_key_type = old_key->type; 2346 key = old_key; 2347 } else { 2348 old_key_type = conn ? conn->key_type : 0xff; 2349 key = kzalloc(sizeof(*key), GFP_KERNEL); 2350 if (!key) 2351 return NULL; 2352 list_add_rcu(&key->list, &hdev->link_keys); 2353 } 2354 2355 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); 2356 2357 /* Some buggy controller combinations generate a changed 2358 * combination key for legacy pairing even when there's no 2359 * previous key */ 2360 if (type == HCI_LK_CHANGED_COMBINATION && 2361 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { 2362 type = HCI_LK_COMBINATION; 2363 if (conn) 2364 conn->key_type = type; 2365 } 2366 2367 bacpy(&key->bdaddr, bdaddr); 2368 memcpy(key->val, val, HCI_LINK_KEY_SIZE); 2369 key->pin_len = pin_len; 2370 2371 if (type == HCI_LK_CHANGED_COMBINATION) 2372 key->type = old_key_type; 2373 else 2374 key->type = type; 2375 2376 if (persistent) 2377 *persistent = hci_persistent_key(hdev, conn, type, 2378 old_key_type); 2379 2380 return key; 2381 } 2382 2383 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2384 u8 addr_type, u8 type, u8 authenticated, 2385 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 2386 { 2387 struct smp_ltk *key, *old_key; 2388 u8 role = ltk_role(type); 2389 2390 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); 2391 if (old_key) 2392 key = old_key; 2393 else { 2394 key = kzalloc(sizeof(*key), GFP_KERNEL); 2395 if (!key) 2396 return NULL; 2397 list_add_rcu(&key->list, &hdev->long_term_keys); 2398 } 2399 2400 bacpy(&key->bdaddr, bdaddr); 2401 key->bdaddr_type = addr_type; 2402 memcpy(key->val, tk, sizeof(key->val)); 2403 key->authenticated = authenticated; 2404 key->ediv = ediv; 2405 key->rand = rand; 2406 key->enc_size = enc_size; 2407 key->type = type; 2408 2409 return key; 2410 } 2411 2412 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2413 u8 addr_type, u8 val[16], bdaddr_t *rpa) 2414 { 2415 struct smp_irk *irk; 2416 2417 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); 2418 if (!irk) { 2419 irk = kzalloc(sizeof(*irk), GFP_KERNEL); 2420 if (!irk) 2421 return NULL; 2422 2423 bacpy(&irk->bdaddr, bdaddr); 2424 irk->addr_type = addr_type; 2425 2426 list_add_rcu(&irk->list, &hdev->identity_resolving_keys); 2427 } 2428 2429 memcpy(irk->val, val, 16); 2430 bacpy(&irk->rpa, rpa); 2431 2432 return irk; 2433 } 2434 2435 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 2436 { 2437 struct link_key *key; 2438 2439 key = hci_find_link_key(hdev, bdaddr); 2440 if (!key) 2441 return -ENOENT; 2442 2443 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2444 2445 list_del_rcu(&key->list); 2446 kfree_rcu(key, rcu); 2447 2448 return 0; 2449 } 2450 2451 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 2452 { 2453 struct smp_ltk *k; 2454 int removed = 0; 2455 2456 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2457 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 2458 continue; 2459 2460 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2461 2462 list_del_rcu(&k->list); 2463 kfree_rcu(k, rcu); 2464 removed++; 2465 } 2466 2467 return removed ? 0 : -ENOENT; 2468 } 2469 2470 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 2471 { 2472 struct smp_irk *k; 2473 2474 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { 2475 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 2476 continue; 2477 2478 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2479 2480 list_del_rcu(&k->list); 2481 kfree_rcu(k, rcu); 2482 } 2483 } 2484 2485 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 2486 { 2487 struct smp_ltk *k; 2488 struct smp_irk *irk; 2489 u8 addr_type; 2490 2491 if (type == BDADDR_BREDR) { 2492 if (hci_find_link_key(hdev, bdaddr)) 2493 return true; 2494 return false; 2495 } 2496 2497 /* Convert to HCI addr type which struct smp_ltk uses */ 2498 if (type == BDADDR_LE_PUBLIC) 2499 addr_type = ADDR_LE_DEV_PUBLIC; 2500 else 2501 addr_type = ADDR_LE_DEV_RANDOM; 2502 2503 irk = hci_get_irk(hdev, bdaddr, addr_type); 2504 if (irk) { 2505 bdaddr = &irk->bdaddr; 2506 addr_type = irk->addr_type; 2507 } 2508 2509 rcu_read_lock(); 2510 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2511 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { 2512 rcu_read_unlock(); 2513 return true; 2514 } 2515 } 2516 rcu_read_unlock(); 2517 2518 return false; 2519 } 2520 2521 /* HCI command timer function */ 2522 static void hci_cmd_timeout(struct work_struct *work) 2523 { 2524 struct hci_dev *hdev = container_of(work, struct hci_dev, 2525 cmd_timer.work); 2526 2527 if (hdev->sent_cmd) { 2528 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 2529 u16 opcode = __le16_to_cpu(sent->opcode); 2530 2531 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode); 2532 } else { 2533 BT_ERR("%s command tx timeout", hdev->name); 2534 } 2535 2536 atomic_set(&hdev->cmd_cnt, 1); 2537 queue_work(hdev->workqueue, &hdev->cmd_work); 2538 } 2539 2540 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 2541 bdaddr_t *bdaddr, u8 bdaddr_type) 2542 { 2543 struct oob_data *data; 2544 2545 list_for_each_entry(data, &hdev->remote_oob_data, list) { 2546 if (bacmp(bdaddr, &data->bdaddr) != 0) 2547 continue; 2548 if (data->bdaddr_type != bdaddr_type) 2549 continue; 2550 return data; 2551 } 2552 2553 return NULL; 2554 } 2555 2556 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 2557 u8 bdaddr_type) 2558 { 2559 struct oob_data *data; 2560 2561 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 2562 if (!data) 2563 return -ENOENT; 2564 2565 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); 2566 2567 list_del(&data->list); 2568 kfree(data); 2569 2570 return 0; 2571 } 2572 2573 void hci_remote_oob_data_clear(struct hci_dev *hdev) 2574 { 2575 struct oob_data *data, *n; 2576 2577 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 2578 list_del(&data->list); 2579 kfree(data); 2580 } 2581 } 2582 2583 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 2584 u8 bdaddr_type, u8 *hash192, u8 *rand192, 2585 u8 *hash256, u8 *rand256) 2586 { 2587 struct oob_data *data; 2588 2589 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 2590 if (!data) { 2591 data = kmalloc(sizeof(*data), GFP_KERNEL); 2592 if (!data) 2593 return -ENOMEM; 2594 2595 bacpy(&data->bdaddr, bdaddr); 2596 data->bdaddr_type = bdaddr_type; 2597 list_add(&data->list, &hdev->remote_oob_data); 2598 } 2599 2600 if (hash192 && rand192) { 2601 memcpy(data->hash192, hash192, sizeof(data->hash192)); 2602 memcpy(data->rand192, rand192, sizeof(data->rand192)); 2603 if (hash256 && rand256) 2604 data->present = 0x03; 2605 } else { 2606 memset(data->hash192, 0, sizeof(data->hash192)); 2607 memset(data->rand192, 0, sizeof(data->rand192)); 2608 if (hash256 && rand256) 2609 data->present = 0x02; 2610 else 2611 data->present = 0x00; 2612 } 2613 2614 if (hash256 && rand256) { 2615 memcpy(data->hash256, hash256, sizeof(data->hash256)); 2616 memcpy(data->rand256, rand256, sizeof(data->rand256)); 2617 } else { 2618 memset(data->hash256, 0, sizeof(data->hash256)); 2619 memset(data->rand256, 0, sizeof(data->rand256)); 2620 if (hash192 && rand192) 2621 data->present = 0x01; 2622 } 2623 2624 BT_DBG("%s for %pMR", hdev->name, bdaddr); 2625 2626 return 0; 2627 } 2628 2629 /* This function requires the caller holds hdev->lock */ 2630 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) 2631 { 2632 struct adv_info *adv_instance; 2633 2634 list_for_each_entry(adv_instance, &hdev->adv_instances, list) { 2635 if (adv_instance->instance == instance) 2636 return adv_instance; 2637 } 2638 2639 return NULL; 2640 } 2641 2642 /* This function requires the caller holds hdev->lock */ 2643 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { 2644 struct adv_info *cur_instance; 2645 2646 cur_instance = hci_find_adv_instance(hdev, instance); 2647 if (!cur_instance) 2648 return NULL; 2649 2650 if (cur_instance == list_last_entry(&hdev->adv_instances, 2651 struct adv_info, list)) 2652 return list_first_entry(&hdev->adv_instances, 2653 struct adv_info, list); 2654 else 2655 return list_next_entry(cur_instance, list); 2656 } 2657 2658 /* This function requires the caller holds hdev->lock */ 2659 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) 2660 { 2661 struct adv_info *adv_instance; 2662 2663 adv_instance = hci_find_adv_instance(hdev, instance); 2664 if (!adv_instance) 2665 return -ENOENT; 2666 2667 BT_DBG("%s removing %dMR", hdev->name, instance); 2668 2669 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) { 2670 cancel_delayed_work(&hdev->adv_instance_expire); 2671 hdev->adv_instance_timeout = 0; 2672 } 2673 2674 list_del(&adv_instance->list); 2675 kfree(adv_instance); 2676 2677 hdev->adv_instance_cnt--; 2678 2679 return 0; 2680 } 2681 2682 /* This function requires the caller holds hdev->lock */ 2683 void hci_adv_instances_clear(struct hci_dev *hdev) 2684 { 2685 struct adv_info *adv_instance, *n; 2686 2687 if (hdev->adv_instance_timeout) { 2688 cancel_delayed_work(&hdev->adv_instance_expire); 2689 hdev->adv_instance_timeout = 0; 2690 } 2691 2692 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 2693 list_del(&adv_instance->list); 2694 kfree(adv_instance); 2695 } 2696 2697 hdev->adv_instance_cnt = 0; 2698 } 2699 2700 /* This function requires the caller holds hdev->lock */ 2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, 2702 u16 adv_data_len, u8 *adv_data, 2703 u16 scan_rsp_len, u8 *scan_rsp_data, 2704 u16 timeout, u16 duration) 2705 { 2706 struct adv_info *adv_instance; 2707 2708 adv_instance = hci_find_adv_instance(hdev, instance); 2709 if (adv_instance) { 2710 memset(adv_instance->adv_data, 0, 2711 sizeof(adv_instance->adv_data)); 2712 memset(adv_instance->scan_rsp_data, 0, 2713 sizeof(adv_instance->scan_rsp_data)); 2714 } else { 2715 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES || 2716 instance < 1 || instance > HCI_MAX_ADV_INSTANCES) 2717 return -EOVERFLOW; 2718 2719 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); 2720 if (!adv_instance) 2721 return -ENOMEM; 2722 2723 adv_instance->pending = true; 2724 adv_instance->instance = instance; 2725 list_add(&adv_instance->list, &hdev->adv_instances); 2726 hdev->adv_instance_cnt++; 2727 } 2728 2729 adv_instance->flags = flags; 2730 adv_instance->adv_data_len = adv_data_len; 2731 adv_instance->scan_rsp_len = scan_rsp_len; 2732 2733 if (adv_data_len) 2734 memcpy(adv_instance->adv_data, adv_data, adv_data_len); 2735 2736 if (scan_rsp_len) 2737 memcpy(adv_instance->scan_rsp_data, 2738 scan_rsp_data, scan_rsp_len); 2739 2740 adv_instance->timeout = timeout; 2741 adv_instance->remaining_time = timeout; 2742 2743 if (duration == 0) 2744 adv_instance->duration = HCI_DEFAULT_ADV_DURATION; 2745 else 2746 adv_instance->duration = duration; 2747 2748 BT_DBG("%s for %dMR", hdev->name, instance); 2749 2750 return 0; 2751 } 2752 2753 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, 2754 bdaddr_t *bdaddr, u8 type) 2755 { 2756 struct bdaddr_list *b; 2757 2758 list_for_each_entry(b, bdaddr_list, list) { 2759 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2760 return b; 2761 } 2762 2763 return NULL; 2764 } 2765 2766 void hci_bdaddr_list_clear(struct list_head *bdaddr_list) 2767 { 2768 struct list_head *p, *n; 2769 2770 list_for_each_safe(p, n, bdaddr_list) { 2771 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); 2772 2773 list_del(p); 2774 kfree(b); 2775 } 2776 } 2777 2778 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2779 { 2780 struct bdaddr_list *entry; 2781 2782 if (!bacmp(bdaddr, BDADDR_ANY)) 2783 return -EBADF; 2784 2785 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 2786 return -EEXIST; 2787 2788 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 2789 if (!entry) 2790 return -ENOMEM; 2791 2792 bacpy(&entry->bdaddr, bdaddr); 2793 entry->bdaddr_type = type; 2794 2795 list_add(&entry->list, list); 2796 2797 return 0; 2798 } 2799 2800 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) 2801 { 2802 struct bdaddr_list *entry; 2803 2804 if (!bacmp(bdaddr, BDADDR_ANY)) { 2805 hci_bdaddr_list_clear(list); 2806 return 0; 2807 } 2808 2809 entry = hci_bdaddr_list_lookup(list, bdaddr, type); 2810 if (!entry) 2811 return -ENOENT; 2812 2813 list_del(&entry->list); 2814 kfree(entry); 2815 2816 return 0; 2817 } 2818 2819 /* This function requires the caller holds hdev->lock */ 2820 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 2821 bdaddr_t *addr, u8 addr_type) 2822 { 2823 struct hci_conn_params *params; 2824 2825 list_for_each_entry(params, &hdev->le_conn_params, list) { 2826 if (bacmp(¶ms->addr, addr) == 0 && 2827 params->addr_type == addr_type) { 2828 return params; 2829 } 2830 } 2831 2832 return NULL; 2833 } 2834 2835 /* This function requires the caller holds hdev->lock */ 2836 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 2837 bdaddr_t *addr, u8 addr_type) 2838 { 2839 struct hci_conn_params *param; 2840 2841 list_for_each_entry(param, list, action) { 2842 if (bacmp(¶m->addr, addr) == 0 && 2843 param->addr_type == addr_type) 2844 return param; 2845 } 2846 2847 return NULL; 2848 } 2849 2850 /* This function requires the caller holds hdev->lock */ 2851 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 2852 bdaddr_t *addr, u8 addr_type) 2853 { 2854 struct hci_conn_params *params; 2855 2856 params = hci_conn_params_lookup(hdev, addr, addr_type); 2857 if (params) 2858 return params; 2859 2860 params = kzalloc(sizeof(*params), GFP_KERNEL); 2861 if (!params) { 2862 BT_ERR("Out of memory"); 2863 return NULL; 2864 } 2865 2866 bacpy(¶ms->addr, addr); 2867 params->addr_type = addr_type; 2868 2869 list_add(¶ms->list, &hdev->le_conn_params); 2870 INIT_LIST_HEAD(¶ms->action); 2871 2872 params->conn_min_interval = hdev->le_conn_min_interval; 2873 params->conn_max_interval = hdev->le_conn_max_interval; 2874 params->conn_latency = hdev->le_conn_latency; 2875 params->supervision_timeout = hdev->le_supv_timeout; 2876 params->auto_connect = HCI_AUTO_CONN_DISABLED; 2877 2878 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2879 2880 return params; 2881 } 2882 2883 static void hci_conn_params_free(struct hci_conn_params *params) 2884 { 2885 if (params->conn) { 2886 hci_conn_drop(params->conn); 2887 hci_conn_put(params->conn); 2888 } 2889 2890 list_del(¶ms->action); 2891 list_del(¶ms->list); 2892 kfree(params); 2893 } 2894 2895 /* This function requires the caller holds hdev->lock */ 2896 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 2897 { 2898 struct hci_conn_params *params; 2899 2900 params = hci_conn_params_lookup(hdev, addr, addr_type); 2901 if (!params) 2902 return; 2903 2904 hci_conn_params_free(params); 2905 2906 hci_update_background_scan(hdev); 2907 2908 BT_DBG("addr %pMR (type %u)", addr, addr_type); 2909 } 2910 2911 /* This function requires the caller holds hdev->lock */ 2912 void hci_conn_params_clear_disabled(struct hci_dev *hdev) 2913 { 2914 struct hci_conn_params *params, *tmp; 2915 2916 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 2917 if (params->auto_connect != HCI_AUTO_CONN_DISABLED) 2918 continue; 2919 list_del(¶ms->list); 2920 kfree(params); 2921 } 2922 2923 BT_DBG("All LE disabled connection parameters were removed"); 2924 } 2925 2926 /* This function requires the caller holds hdev->lock */ 2927 void hci_conn_params_clear_all(struct hci_dev *hdev) 2928 { 2929 struct hci_conn_params *params, *tmp; 2930 2931 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 2932 hci_conn_params_free(params); 2933 2934 hci_update_background_scan(hdev); 2935 2936 BT_DBG("All LE connection parameters were removed"); 2937 } 2938 2939 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode) 2940 { 2941 if (status) { 2942 BT_ERR("Failed to start inquiry: status %d", status); 2943 2944 hci_dev_lock(hdev); 2945 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2946 hci_dev_unlock(hdev); 2947 return; 2948 } 2949 } 2950 2951 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status, 2952 u16 opcode) 2953 { 2954 /* General inquiry access code (GIAC) */ 2955 u8 lap[3] = { 0x33, 0x8b, 0x9e }; 2956 struct hci_cp_inquiry cp; 2957 int err; 2958 2959 if (status) { 2960 BT_ERR("Failed to disable LE scanning: status %d", status); 2961 return; 2962 } 2963 2964 hdev->discovery.scan_start = 0; 2965 2966 switch (hdev->discovery.type) { 2967 case DISCOV_TYPE_LE: 2968 hci_dev_lock(hdev); 2969 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2970 hci_dev_unlock(hdev); 2971 break; 2972 2973 case DISCOV_TYPE_INTERLEAVED: 2974 hci_dev_lock(hdev); 2975 2976 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 2977 &hdev->quirks)) { 2978 /* If we were running LE only scan, change discovery 2979 * state. If we were running both LE and BR/EDR inquiry 2980 * simultaneously, and BR/EDR inquiry is already 2981 * finished, stop discovery, otherwise BR/EDR inquiry 2982 * will stop discovery when finished. If we will resolve 2983 * remote device name, do not change discovery state. 2984 */ 2985 if (!test_bit(HCI_INQUIRY, &hdev->flags) && 2986 hdev->discovery.state != DISCOVERY_RESOLVING) 2987 hci_discovery_set_state(hdev, 2988 DISCOVERY_STOPPED); 2989 } else { 2990 struct hci_request req; 2991 2992 hci_inquiry_cache_flush(hdev); 2993 2994 hci_req_init(&req, hdev); 2995 2996 memset(&cp, 0, sizeof(cp)); 2997 memcpy(&cp.lap, lap, sizeof(cp.lap)); 2998 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN; 2999 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp); 3000 3001 err = hci_req_run(&req, inquiry_complete); 3002 if (err) { 3003 BT_ERR("Inquiry request failed: err %d", err); 3004 hci_discovery_set_state(hdev, 3005 DISCOVERY_STOPPED); 3006 } 3007 } 3008 3009 hci_dev_unlock(hdev); 3010 break; 3011 } 3012 } 3013 3014 static void le_scan_disable_work(struct work_struct *work) 3015 { 3016 struct hci_dev *hdev = container_of(work, struct hci_dev, 3017 le_scan_disable.work); 3018 struct hci_request req; 3019 int err; 3020 3021 BT_DBG("%s", hdev->name); 3022 3023 cancel_delayed_work_sync(&hdev->le_scan_restart); 3024 3025 hci_req_init(&req, hdev); 3026 3027 hci_req_add_le_scan_disable(&req); 3028 3029 err = hci_req_run(&req, le_scan_disable_work_complete); 3030 if (err) 3031 BT_ERR("Disable LE scanning request failed: err %d", err); 3032 } 3033 3034 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status, 3035 u16 opcode) 3036 { 3037 unsigned long timeout, duration, scan_start, now; 3038 3039 BT_DBG("%s", hdev->name); 3040 3041 if (status) { 3042 BT_ERR("Failed to restart LE scan: status %d", status); 3043 return; 3044 } 3045 3046 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || 3047 !hdev->discovery.scan_start) 3048 return; 3049 3050 /* When the scan was started, hdev->le_scan_disable has been queued 3051 * after duration from scan_start. During scan restart this job 3052 * has been canceled, and we need to queue it again after proper 3053 * timeout, to make sure that scan does not run indefinitely. 3054 */ 3055 duration = hdev->discovery.scan_duration; 3056 scan_start = hdev->discovery.scan_start; 3057 now = jiffies; 3058 if (now - scan_start <= duration) { 3059 int elapsed; 3060 3061 if (now >= scan_start) 3062 elapsed = now - scan_start; 3063 else 3064 elapsed = ULONG_MAX - scan_start + now; 3065 3066 timeout = duration - elapsed; 3067 } else { 3068 timeout = 0; 3069 } 3070 queue_delayed_work(hdev->workqueue, 3071 &hdev->le_scan_disable, timeout); 3072 } 3073 3074 static void le_scan_restart_work(struct work_struct *work) 3075 { 3076 struct hci_dev *hdev = container_of(work, struct hci_dev, 3077 le_scan_restart.work); 3078 struct hci_request req; 3079 struct hci_cp_le_set_scan_enable cp; 3080 int err; 3081 3082 BT_DBG("%s", hdev->name); 3083 3084 /* If controller is not scanning we are done. */ 3085 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 3086 return; 3087 3088 hci_req_init(&req, hdev); 3089 3090 hci_req_add_le_scan_disable(&req); 3091 3092 memset(&cp, 0, sizeof(cp)); 3093 cp.enable = LE_SCAN_ENABLE; 3094 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 3095 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); 3096 3097 err = hci_req_run(&req, le_scan_restart_work_complete); 3098 if (err) 3099 BT_ERR("Restart LE scan request failed: err %d", err); 3100 } 3101 3102 /* Copy the Identity Address of the controller. 3103 * 3104 * If the controller has a public BD_ADDR, then by default use that one. 3105 * If this is a LE only controller without a public address, default to 3106 * the static random address. 3107 * 3108 * For debugging purposes it is possible to force controllers with a 3109 * public address to use the static random address instead. 3110 * 3111 * In case BR/EDR has been disabled on a dual-mode controller and 3112 * userspace has configured a static address, then that address 3113 * becomes the identity address instead of the public BR/EDR address. 3114 */ 3115 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 3116 u8 *bdaddr_type) 3117 { 3118 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 3119 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 3120 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 3121 bacmp(&hdev->static_addr, BDADDR_ANY))) { 3122 bacpy(bdaddr, &hdev->static_addr); 3123 *bdaddr_type = ADDR_LE_DEV_RANDOM; 3124 } else { 3125 bacpy(bdaddr, &hdev->bdaddr); 3126 *bdaddr_type = ADDR_LE_DEV_PUBLIC; 3127 } 3128 } 3129 3130 /* Alloc HCI device */ 3131 struct hci_dev *hci_alloc_dev(void) 3132 { 3133 struct hci_dev *hdev; 3134 3135 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 3136 if (!hdev) 3137 return NULL; 3138 3139 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 3140 hdev->esco_type = (ESCO_HV1); 3141 hdev->link_mode = (HCI_LM_ACCEPT); 3142 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 3143 hdev->io_capability = 0x03; /* No Input No Output */ 3144 hdev->manufacturer = 0xffff; /* Default to internal use */ 3145 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 3146 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 3147 hdev->adv_instance_cnt = 0; 3148 hdev->cur_adv_instance = 0x00; 3149 hdev->adv_instance_timeout = 0; 3150 3151 hdev->sniff_max_interval = 800; 3152 hdev->sniff_min_interval = 80; 3153 3154 hdev->le_adv_channel_map = 0x07; 3155 hdev->le_adv_min_interval = 0x0800; 3156 hdev->le_adv_max_interval = 0x0800; 3157 hdev->le_scan_interval = 0x0060; 3158 hdev->le_scan_window = 0x0030; 3159 hdev->le_conn_min_interval = 0x0028; 3160 hdev->le_conn_max_interval = 0x0038; 3161 hdev->le_conn_latency = 0x0000; 3162 hdev->le_supv_timeout = 0x002a; 3163 hdev->le_def_tx_len = 0x001b; 3164 hdev->le_def_tx_time = 0x0148; 3165 hdev->le_max_tx_len = 0x001b; 3166 hdev->le_max_tx_time = 0x0148; 3167 hdev->le_max_rx_len = 0x001b; 3168 hdev->le_max_rx_time = 0x0148; 3169 3170 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3171 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 3172 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 3173 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 3174 3175 mutex_init(&hdev->lock); 3176 mutex_init(&hdev->req_lock); 3177 3178 INIT_LIST_HEAD(&hdev->mgmt_pending); 3179 INIT_LIST_HEAD(&hdev->blacklist); 3180 INIT_LIST_HEAD(&hdev->whitelist); 3181 INIT_LIST_HEAD(&hdev->uuids); 3182 INIT_LIST_HEAD(&hdev->link_keys); 3183 INIT_LIST_HEAD(&hdev->long_term_keys); 3184 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 3185 INIT_LIST_HEAD(&hdev->remote_oob_data); 3186 INIT_LIST_HEAD(&hdev->le_white_list); 3187 INIT_LIST_HEAD(&hdev->le_conn_params); 3188 INIT_LIST_HEAD(&hdev->pend_le_conns); 3189 INIT_LIST_HEAD(&hdev->pend_le_reports); 3190 INIT_LIST_HEAD(&hdev->conn_hash.list); 3191 INIT_LIST_HEAD(&hdev->adv_instances); 3192 3193 INIT_WORK(&hdev->rx_work, hci_rx_work); 3194 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 3195 INIT_WORK(&hdev->tx_work, hci_tx_work); 3196 INIT_WORK(&hdev->power_on, hci_power_on); 3197 INIT_WORK(&hdev->error_reset, hci_error_reset); 3198 3199 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 3200 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); 3201 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); 3202 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); 3203 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire); 3204 3205 skb_queue_head_init(&hdev->rx_q); 3206 skb_queue_head_init(&hdev->cmd_q); 3207 skb_queue_head_init(&hdev->raw_q); 3208 3209 init_waitqueue_head(&hdev->req_wait_q); 3210 3211 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 3212 3213 hci_init_sysfs(hdev); 3214 discovery_init(hdev); 3215 3216 return hdev; 3217 } 3218 EXPORT_SYMBOL(hci_alloc_dev); 3219 3220 /* Free HCI device */ 3221 void hci_free_dev(struct hci_dev *hdev) 3222 { 3223 /* will free via device release */ 3224 put_device(&hdev->dev); 3225 } 3226 EXPORT_SYMBOL(hci_free_dev); 3227 3228 /* Register HCI device */ 3229 int hci_register_dev(struct hci_dev *hdev) 3230 { 3231 int id, error; 3232 3233 if (!hdev->open || !hdev->close || !hdev->send) 3234 return -EINVAL; 3235 3236 /* Do not allow HCI_AMP devices to register at index 0, 3237 * so the index can be used as the AMP controller ID. 3238 */ 3239 switch (hdev->dev_type) { 3240 case HCI_BREDR: 3241 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); 3242 break; 3243 case HCI_AMP: 3244 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); 3245 break; 3246 default: 3247 return -EINVAL; 3248 } 3249 3250 if (id < 0) 3251 return id; 3252 3253 sprintf(hdev->name, "hci%d", id); 3254 hdev->id = id; 3255 3256 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 3257 3258 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | 3259 WQ_MEM_RECLAIM, 1, hdev->name); 3260 if (!hdev->workqueue) { 3261 error = -ENOMEM; 3262 goto err; 3263 } 3264 3265 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | 3266 WQ_MEM_RECLAIM, 1, hdev->name); 3267 if (!hdev->req_workqueue) { 3268 destroy_workqueue(hdev->workqueue); 3269 error = -ENOMEM; 3270 goto err; 3271 } 3272 3273 if (!IS_ERR_OR_NULL(bt_debugfs)) 3274 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 3275 3276 dev_set_name(&hdev->dev, "%s", hdev->name); 3277 3278 error = device_add(&hdev->dev); 3279 if (error < 0) 3280 goto err_wqueue; 3281 3282 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 3283 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, 3284 hdev); 3285 if (hdev->rfkill) { 3286 if (rfkill_register(hdev->rfkill) < 0) { 3287 rfkill_destroy(hdev->rfkill); 3288 hdev->rfkill = NULL; 3289 } 3290 } 3291 3292 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 3293 hci_dev_set_flag(hdev, HCI_RFKILLED); 3294 3295 hci_dev_set_flag(hdev, HCI_SETUP); 3296 hci_dev_set_flag(hdev, HCI_AUTO_OFF); 3297 3298 if (hdev->dev_type == HCI_BREDR) { 3299 /* Assume BR/EDR support until proven otherwise (such as 3300 * through reading supported features during init. 3301 */ 3302 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 3303 } 3304 3305 write_lock(&hci_dev_list_lock); 3306 list_add(&hdev->list, &hci_dev_list); 3307 write_unlock(&hci_dev_list_lock); 3308 3309 /* Devices that are marked for raw-only usage are unconfigured 3310 * and should not be included in normal operation. 3311 */ 3312 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 3313 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 3314 3315 hci_notify(hdev, HCI_DEV_REG); 3316 hci_dev_hold(hdev); 3317 3318 queue_work(hdev->req_workqueue, &hdev->power_on); 3319 3320 return id; 3321 3322 err_wqueue: 3323 destroy_workqueue(hdev->workqueue); 3324 destroy_workqueue(hdev->req_workqueue); 3325 err: 3326 ida_simple_remove(&hci_index_ida, hdev->id); 3327 3328 return error; 3329 } 3330 EXPORT_SYMBOL(hci_register_dev); 3331 3332 /* Unregister HCI device */ 3333 void hci_unregister_dev(struct hci_dev *hdev) 3334 { 3335 int id; 3336 3337 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 3338 3339 hci_dev_set_flag(hdev, HCI_UNREGISTER); 3340 3341 id = hdev->id; 3342 3343 write_lock(&hci_dev_list_lock); 3344 list_del(&hdev->list); 3345 write_unlock(&hci_dev_list_lock); 3346 3347 hci_dev_do_close(hdev); 3348 3349 cancel_work_sync(&hdev->power_on); 3350 3351 if (!test_bit(HCI_INIT, &hdev->flags) && 3352 !hci_dev_test_flag(hdev, HCI_SETUP) && 3353 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 3354 hci_dev_lock(hdev); 3355 mgmt_index_removed(hdev); 3356 hci_dev_unlock(hdev); 3357 } 3358 3359 /* mgmt_index_removed should take care of emptying the 3360 * pending list */ 3361 BUG_ON(!list_empty(&hdev->mgmt_pending)); 3362 3363 hci_notify(hdev, HCI_DEV_UNREG); 3364 3365 if (hdev->rfkill) { 3366 rfkill_unregister(hdev->rfkill); 3367 rfkill_destroy(hdev->rfkill); 3368 } 3369 3370 device_del(&hdev->dev); 3371 3372 debugfs_remove_recursive(hdev->debugfs); 3373 3374 destroy_workqueue(hdev->workqueue); 3375 destroy_workqueue(hdev->req_workqueue); 3376 3377 hci_dev_lock(hdev); 3378 hci_bdaddr_list_clear(&hdev->blacklist); 3379 hci_bdaddr_list_clear(&hdev->whitelist); 3380 hci_uuids_clear(hdev); 3381 hci_link_keys_clear(hdev); 3382 hci_smp_ltks_clear(hdev); 3383 hci_smp_irks_clear(hdev); 3384 hci_remote_oob_data_clear(hdev); 3385 hci_adv_instances_clear(hdev); 3386 hci_bdaddr_list_clear(&hdev->le_white_list); 3387 hci_conn_params_clear_all(hdev); 3388 hci_discovery_filter_clear(hdev); 3389 hci_dev_unlock(hdev); 3390 3391 hci_dev_put(hdev); 3392 3393 ida_simple_remove(&hci_index_ida, id); 3394 } 3395 EXPORT_SYMBOL(hci_unregister_dev); 3396 3397 /* Suspend HCI device */ 3398 int hci_suspend_dev(struct hci_dev *hdev) 3399 { 3400 hci_notify(hdev, HCI_DEV_SUSPEND); 3401 return 0; 3402 } 3403 EXPORT_SYMBOL(hci_suspend_dev); 3404 3405 /* Resume HCI device */ 3406 int hci_resume_dev(struct hci_dev *hdev) 3407 { 3408 hci_notify(hdev, HCI_DEV_RESUME); 3409 return 0; 3410 } 3411 EXPORT_SYMBOL(hci_resume_dev); 3412 3413 /* Reset HCI device */ 3414 int hci_reset_dev(struct hci_dev *hdev) 3415 { 3416 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; 3417 struct sk_buff *skb; 3418 3419 skb = bt_skb_alloc(3, GFP_ATOMIC); 3420 if (!skb) 3421 return -ENOMEM; 3422 3423 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 3424 memcpy(skb_put(skb, 3), hw_err, 3); 3425 3426 /* Send Hardware Error to upper stack */ 3427 return hci_recv_frame(hdev, skb); 3428 } 3429 EXPORT_SYMBOL(hci_reset_dev); 3430 3431 /* Receive frame from HCI drivers */ 3432 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 3433 { 3434 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 3435 && !test_bit(HCI_INIT, &hdev->flags))) { 3436 kfree_skb(skb); 3437 return -ENXIO; 3438 } 3439 3440 /* Incoming skb */ 3441 bt_cb(skb)->incoming = 1; 3442 3443 /* Time stamp */ 3444 __net_timestamp(skb); 3445 3446 skb_queue_tail(&hdev->rx_q, skb); 3447 queue_work(hdev->workqueue, &hdev->rx_work); 3448 3449 return 0; 3450 } 3451 EXPORT_SYMBOL(hci_recv_frame); 3452 3453 /* ---- Interface to upper protocols ---- */ 3454 3455 int hci_register_cb(struct hci_cb *cb) 3456 { 3457 BT_DBG("%p name %s", cb, cb->name); 3458 3459 mutex_lock(&hci_cb_list_lock); 3460 list_add_tail(&cb->list, &hci_cb_list); 3461 mutex_unlock(&hci_cb_list_lock); 3462 3463 return 0; 3464 } 3465 EXPORT_SYMBOL(hci_register_cb); 3466 3467 int hci_unregister_cb(struct hci_cb *cb) 3468 { 3469 BT_DBG("%p name %s", cb, cb->name); 3470 3471 mutex_lock(&hci_cb_list_lock); 3472 list_del(&cb->list); 3473 mutex_unlock(&hci_cb_list_lock); 3474 3475 return 0; 3476 } 3477 EXPORT_SYMBOL(hci_unregister_cb); 3478 3479 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 3480 { 3481 int err; 3482 3483 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 3484 3485 /* Time stamp */ 3486 __net_timestamp(skb); 3487 3488 /* Send copy to monitor */ 3489 hci_send_to_monitor(hdev, skb); 3490 3491 if (atomic_read(&hdev->promisc)) { 3492 /* Send copy to the sockets */ 3493 hci_send_to_sock(hdev, skb); 3494 } 3495 3496 /* Get rid of skb owner, prior to sending to the driver. */ 3497 skb_orphan(skb); 3498 3499 err = hdev->send(hdev, skb); 3500 if (err < 0) { 3501 BT_ERR("%s sending frame failed (%d)", hdev->name, err); 3502 kfree_skb(skb); 3503 } 3504 } 3505 3506 /* Send HCI command */ 3507 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 3508 const void *param) 3509 { 3510 struct sk_buff *skb; 3511 3512 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 3513 3514 skb = hci_prepare_cmd(hdev, opcode, plen, param); 3515 if (!skb) { 3516 BT_ERR("%s no memory for command", hdev->name); 3517 return -ENOMEM; 3518 } 3519 3520 /* Stand-alone HCI commands must be flagged as 3521 * single-command requests. 3522 */ 3523 bt_cb(skb)->req.start = true; 3524 3525 skb_queue_tail(&hdev->cmd_q, skb); 3526 queue_work(hdev->workqueue, &hdev->cmd_work); 3527 3528 return 0; 3529 } 3530 3531 /* Get data from the previously sent command */ 3532 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 3533 { 3534 struct hci_command_hdr *hdr; 3535 3536 if (!hdev->sent_cmd) 3537 return NULL; 3538 3539 hdr = (void *) hdev->sent_cmd->data; 3540 3541 if (hdr->opcode != cpu_to_le16(opcode)) 3542 return NULL; 3543 3544 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 3545 3546 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 3547 } 3548 3549 /* Send ACL data */ 3550 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 3551 { 3552 struct hci_acl_hdr *hdr; 3553 int len = skb->len; 3554 3555 skb_push(skb, HCI_ACL_HDR_SIZE); 3556 skb_reset_transport_header(skb); 3557 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 3558 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3559 hdr->dlen = cpu_to_le16(len); 3560 } 3561 3562 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, 3563 struct sk_buff *skb, __u16 flags) 3564 { 3565 struct hci_conn *conn = chan->conn; 3566 struct hci_dev *hdev = conn->hdev; 3567 struct sk_buff *list; 3568 3569 skb->len = skb_headlen(skb); 3570 skb->data_len = 0; 3571 3572 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 3573 3574 switch (hdev->dev_type) { 3575 case HCI_BREDR: 3576 hci_add_acl_hdr(skb, conn->handle, flags); 3577 break; 3578 case HCI_AMP: 3579 hci_add_acl_hdr(skb, chan->handle, flags); 3580 break; 3581 default: 3582 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); 3583 return; 3584 } 3585 3586 list = skb_shinfo(skb)->frag_list; 3587 if (!list) { 3588 /* Non fragmented */ 3589 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3590 3591 skb_queue_tail(queue, skb); 3592 } else { 3593 /* Fragmented */ 3594 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3595 3596 skb_shinfo(skb)->frag_list = NULL; 3597 3598 /* Queue all fragments atomically. We need to use spin_lock_bh 3599 * here because of 6LoWPAN links, as there this function is 3600 * called from softirq and using normal spin lock could cause 3601 * deadlocks. 3602 */ 3603 spin_lock_bh(&queue->lock); 3604 3605 __skb_queue_tail(queue, skb); 3606 3607 flags &= ~ACL_START; 3608 flags |= ACL_CONT; 3609 do { 3610 skb = list; list = list->next; 3611 3612 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 3613 hci_add_acl_hdr(skb, conn->handle, flags); 3614 3615 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3616 3617 __skb_queue_tail(queue, skb); 3618 } while (list); 3619 3620 spin_unlock_bh(&queue->lock); 3621 } 3622 } 3623 3624 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 3625 { 3626 struct hci_dev *hdev = chan->conn->hdev; 3627 3628 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 3629 3630 hci_queue_acl(chan, &chan->data_q, skb, flags); 3631 3632 queue_work(hdev->workqueue, &hdev->tx_work); 3633 } 3634 3635 /* Send SCO data */ 3636 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 3637 { 3638 struct hci_dev *hdev = conn->hdev; 3639 struct hci_sco_hdr hdr; 3640 3641 BT_DBG("%s len %d", hdev->name, skb->len); 3642 3643 hdr.handle = cpu_to_le16(conn->handle); 3644 hdr.dlen = skb->len; 3645 3646 skb_push(skb, HCI_SCO_HDR_SIZE); 3647 skb_reset_transport_header(skb); 3648 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 3649 3650 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 3651 3652 skb_queue_tail(&conn->data_q, skb); 3653 queue_work(hdev->workqueue, &hdev->tx_work); 3654 } 3655 3656 /* ---- HCI TX task (outgoing data) ---- */ 3657 3658 /* HCI Connection scheduler */ 3659 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, 3660 int *quote) 3661 { 3662 struct hci_conn_hash *h = &hdev->conn_hash; 3663 struct hci_conn *conn = NULL, *c; 3664 unsigned int num = 0, min = ~0; 3665 3666 /* We don't have to lock device here. Connections are always 3667 * added and removed with TX task disabled. */ 3668 3669 rcu_read_lock(); 3670 3671 list_for_each_entry_rcu(c, &h->list, list) { 3672 if (c->type != type || skb_queue_empty(&c->data_q)) 3673 continue; 3674 3675 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 3676 continue; 3677 3678 num++; 3679 3680 if (c->sent < min) { 3681 min = c->sent; 3682 conn = c; 3683 } 3684 3685 if (hci_conn_num(hdev, type) == num) 3686 break; 3687 } 3688 3689 rcu_read_unlock(); 3690 3691 if (conn) { 3692 int cnt, q; 3693 3694 switch (conn->type) { 3695 case ACL_LINK: 3696 cnt = hdev->acl_cnt; 3697 break; 3698 case SCO_LINK: 3699 case ESCO_LINK: 3700 cnt = hdev->sco_cnt; 3701 break; 3702 case LE_LINK: 3703 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3704 break; 3705 default: 3706 cnt = 0; 3707 BT_ERR("Unknown link type"); 3708 } 3709 3710 q = cnt / num; 3711 *quote = q ? q : 1; 3712 } else 3713 *quote = 0; 3714 3715 BT_DBG("conn %p quote %d", conn, *quote); 3716 return conn; 3717 } 3718 3719 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 3720 { 3721 struct hci_conn_hash *h = &hdev->conn_hash; 3722 struct hci_conn *c; 3723 3724 BT_ERR("%s link tx timeout", hdev->name); 3725 3726 rcu_read_lock(); 3727 3728 /* Kill stalled connections */ 3729 list_for_each_entry_rcu(c, &h->list, list) { 3730 if (c->type == type && c->sent) { 3731 BT_ERR("%s killing stalled connection %pMR", 3732 hdev->name, &c->dst); 3733 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); 3734 } 3735 } 3736 3737 rcu_read_unlock(); 3738 } 3739 3740 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 3741 int *quote) 3742 { 3743 struct hci_conn_hash *h = &hdev->conn_hash; 3744 struct hci_chan *chan = NULL; 3745 unsigned int num = 0, min = ~0, cur_prio = 0; 3746 struct hci_conn *conn; 3747 int cnt, q, conn_num = 0; 3748 3749 BT_DBG("%s", hdev->name); 3750 3751 rcu_read_lock(); 3752 3753 list_for_each_entry_rcu(conn, &h->list, list) { 3754 struct hci_chan *tmp; 3755 3756 if (conn->type != type) 3757 continue; 3758 3759 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3760 continue; 3761 3762 conn_num++; 3763 3764 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 3765 struct sk_buff *skb; 3766 3767 if (skb_queue_empty(&tmp->data_q)) 3768 continue; 3769 3770 skb = skb_peek(&tmp->data_q); 3771 if (skb->priority < cur_prio) 3772 continue; 3773 3774 if (skb->priority > cur_prio) { 3775 num = 0; 3776 min = ~0; 3777 cur_prio = skb->priority; 3778 } 3779 3780 num++; 3781 3782 if (conn->sent < min) { 3783 min = conn->sent; 3784 chan = tmp; 3785 } 3786 } 3787 3788 if (hci_conn_num(hdev, type) == conn_num) 3789 break; 3790 } 3791 3792 rcu_read_unlock(); 3793 3794 if (!chan) 3795 return NULL; 3796 3797 switch (chan->conn->type) { 3798 case ACL_LINK: 3799 cnt = hdev->acl_cnt; 3800 break; 3801 case AMP_LINK: 3802 cnt = hdev->block_cnt; 3803 break; 3804 case SCO_LINK: 3805 case ESCO_LINK: 3806 cnt = hdev->sco_cnt; 3807 break; 3808 case LE_LINK: 3809 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 3810 break; 3811 default: 3812 cnt = 0; 3813 BT_ERR("Unknown link type"); 3814 } 3815 3816 q = cnt / num; 3817 *quote = q ? q : 1; 3818 BT_DBG("chan %p quote %d", chan, *quote); 3819 return chan; 3820 } 3821 3822 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 3823 { 3824 struct hci_conn_hash *h = &hdev->conn_hash; 3825 struct hci_conn *conn; 3826 int num = 0; 3827 3828 BT_DBG("%s", hdev->name); 3829 3830 rcu_read_lock(); 3831 3832 list_for_each_entry_rcu(conn, &h->list, list) { 3833 struct hci_chan *chan; 3834 3835 if (conn->type != type) 3836 continue; 3837 3838 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3839 continue; 3840 3841 num++; 3842 3843 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 3844 struct sk_buff *skb; 3845 3846 if (chan->sent) { 3847 chan->sent = 0; 3848 continue; 3849 } 3850 3851 if (skb_queue_empty(&chan->data_q)) 3852 continue; 3853 3854 skb = skb_peek(&chan->data_q); 3855 if (skb->priority >= HCI_PRIO_MAX - 1) 3856 continue; 3857 3858 skb->priority = HCI_PRIO_MAX - 1; 3859 3860 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 3861 skb->priority); 3862 } 3863 3864 if (hci_conn_num(hdev, type) == num) 3865 break; 3866 } 3867 3868 rcu_read_unlock(); 3869 3870 } 3871 3872 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) 3873 { 3874 /* Calculate count of blocks used by this packet */ 3875 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 3876 } 3877 3878 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 3879 { 3880 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 3881 /* ACL tx timeout must be longer than maximum 3882 * link supervision timeout (40.9 seconds) */ 3883 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 3884 HCI_ACL_TX_TIMEOUT)) 3885 hci_link_tx_to(hdev, ACL_LINK); 3886 } 3887 } 3888 3889 static void hci_sched_acl_pkt(struct hci_dev *hdev) 3890 { 3891 unsigned int cnt = hdev->acl_cnt; 3892 struct hci_chan *chan; 3893 struct sk_buff *skb; 3894 int quote; 3895 3896 __check_timeout(hdev, cnt); 3897 3898 while (hdev->acl_cnt && 3899 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 3900 u32 priority = (skb_peek(&chan->data_q))->priority; 3901 while (quote-- && (skb = skb_peek(&chan->data_q))) { 3902 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3903 skb->len, skb->priority); 3904 3905 /* Stop if priority has changed */ 3906 if (skb->priority < priority) 3907 break; 3908 3909 skb = skb_dequeue(&chan->data_q); 3910 3911 hci_conn_enter_active_mode(chan->conn, 3912 bt_cb(skb)->force_active); 3913 3914 hci_send_frame(hdev, skb); 3915 hdev->acl_last_tx = jiffies; 3916 3917 hdev->acl_cnt--; 3918 chan->sent++; 3919 chan->conn->sent++; 3920 } 3921 } 3922 3923 if (cnt != hdev->acl_cnt) 3924 hci_prio_recalculate(hdev, ACL_LINK); 3925 } 3926 3927 static void hci_sched_acl_blk(struct hci_dev *hdev) 3928 { 3929 unsigned int cnt = hdev->block_cnt; 3930 struct hci_chan *chan; 3931 struct sk_buff *skb; 3932 int quote; 3933 u8 type; 3934 3935 __check_timeout(hdev, cnt); 3936 3937 BT_DBG("%s", hdev->name); 3938 3939 if (hdev->dev_type == HCI_AMP) 3940 type = AMP_LINK; 3941 else 3942 type = ACL_LINK; 3943 3944 while (hdev->block_cnt > 0 && 3945 (chan = hci_chan_sent(hdev, type, "e))) { 3946 u32 priority = (skb_peek(&chan->data_q))->priority; 3947 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 3948 int blocks; 3949 3950 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 3951 skb->len, skb->priority); 3952 3953 /* Stop if priority has changed */ 3954 if (skb->priority < priority) 3955 break; 3956 3957 skb = skb_dequeue(&chan->data_q); 3958 3959 blocks = __get_blocks(hdev, skb); 3960 if (blocks > hdev->block_cnt) 3961 return; 3962 3963 hci_conn_enter_active_mode(chan->conn, 3964 bt_cb(skb)->force_active); 3965 3966 hci_send_frame(hdev, skb); 3967 hdev->acl_last_tx = jiffies; 3968 3969 hdev->block_cnt -= blocks; 3970 quote -= blocks; 3971 3972 chan->sent += blocks; 3973 chan->conn->sent += blocks; 3974 } 3975 } 3976 3977 if (cnt != hdev->block_cnt) 3978 hci_prio_recalculate(hdev, type); 3979 } 3980 3981 static void hci_sched_acl(struct hci_dev *hdev) 3982 { 3983 BT_DBG("%s", hdev->name); 3984 3985 /* No ACL link over BR/EDR controller */ 3986 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR) 3987 return; 3988 3989 /* No AMP link over AMP controller */ 3990 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) 3991 return; 3992 3993 switch (hdev->flow_ctl_mode) { 3994 case HCI_FLOW_CTL_MODE_PACKET_BASED: 3995 hci_sched_acl_pkt(hdev); 3996 break; 3997 3998 case HCI_FLOW_CTL_MODE_BLOCK_BASED: 3999 hci_sched_acl_blk(hdev); 4000 break; 4001 } 4002 } 4003 4004 /* Schedule SCO */ 4005 static void hci_sched_sco(struct hci_dev *hdev) 4006 { 4007 struct hci_conn *conn; 4008 struct sk_buff *skb; 4009 int quote; 4010 4011 BT_DBG("%s", hdev->name); 4012 4013 if (!hci_conn_num(hdev, SCO_LINK)) 4014 return; 4015 4016 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 4017 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 4018 BT_DBG("skb %p len %d", skb, skb->len); 4019 hci_send_frame(hdev, skb); 4020 4021 conn->sent++; 4022 if (conn->sent == ~0) 4023 conn->sent = 0; 4024 } 4025 } 4026 } 4027 4028 static void hci_sched_esco(struct hci_dev *hdev) 4029 { 4030 struct hci_conn *conn; 4031 struct sk_buff *skb; 4032 int quote; 4033 4034 BT_DBG("%s", hdev->name); 4035 4036 if (!hci_conn_num(hdev, ESCO_LINK)) 4037 return; 4038 4039 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, 4040 "e))) { 4041 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 4042 BT_DBG("skb %p len %d", skb, skb->len); 4043 hci_send_frame(hdev, skb); 4044 4045 conn->sent++; 4046 if (conn->sent == ~0) 4047 conn->sent = 0; 4048 } 4049 } 4050 } 4051 4052 static void hci_sched_le(struct hci_dev *hdev) 4053 { 4054 struct hci_chan *chan; 4055 struct sk_buff *skb; 4056 int quote, cnt, tmp; 4057 4058 BT_DBG("%s", hdev->name); 4059 4060 if (!hci_conn_num(hdev, LE_LINK)) 4061 return; 4062 4063 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 4064 /* LE tx timeout must be longer than maximum 4065 * link supervision timeout (40.9 seconds) */ 4066 if (!hdev->le_cnt && hdev->le_pkts && 4067 time_after(jiffies, hdev->le_last_tx + HZ * 45)) 4068 hci_link_tx_to(hdev, LE_LINK); 4069 } 4070 4071 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 4072 tmp = cnt; 4073 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 4074 u32 priority = (skb_peek(&chan->data_q))->priority; 4075 while (quote-- && (skb = skb_peek(&chan->data_q))) { 4076 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 4077 skb->len, skb->priority); 4078 4079 /* Stop if priority has changed */ 4080 if (skb->priority < priority) 4081 break; 4082 4083 skb = skb_dequeue(&chan->data_q); 4084 4085 hci_send_frame(hdev, skb); 4086 hdev->le_last_tx = jiffies; 4087 4088 cnt--; 4089 chan->sent++; 4090 chan->conn->sent++; 4091 } 4092 } 4093 4094 if (hdev->le_pkts) 4095 hdev->le_cnt = cnt; 4096 else 4097 hdev->acl_cnt = cnt; 4098 4099 if (cnt != tmp) 4100 hci_prio_recalculate(hdev, LE_LINK); 4101 } 4102 4103 static void hci_tx_work(struct work_struct *work) 4104 { 4105 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 4106 struct sk_buff *skb; 4107 4108 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 4109 hdev->sco_cnt, hdev->le_cnt); 4110 4111 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4112 /* Schedule queues and send stuff to HCI driver */ 4113 hci_sched_acl(hdev); 4114 hci_sched_sco(hdev); 4115 hci_sched_esco(hdev); 4116 hci_sched_le(hdev); 4117 } 4118 4119 /* Send next queued raw (unknown type) packet */ 4120 while ((skb = skb_dequeue(&hdev->raw_q))) 4121 hci_send_frame(hdev, skb); 4122 } 4123 4124 /* ----- HCI RX task (incoming data processing) ----- */ 4125 4126 /* ACL data packet */ 4127 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 4128 { 4129 struct hci_acl_hdr *hdr = (void *) skb->data; 4130 struct hci_conn *conn; 4131 __u16 handle, flags; 4132 4133 skb_pull(skb, HCI_ACL_HDR_SIZE); 4134 4135 handle = __le16_to_cpu(hdr->handle); 4136 flags = hci_flags(handle); 4137 handle = hci_handle(handle); 4138 4139 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 4140 handle, flags); 4141 4142 hdev->stat.acl_rx++; 4143 4144 hci_dev_lock(hdev); 4145 conn = hci_conn_hash_lookup_handle(hdev, handle); 4146 hci_dev_unlock(hdev); 4147 4148 if (conn) { 4149 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 4150 4151 /* Send to upper protocol */ 4152 l2cap_recv_acldata(conn, skb, flags); 4153 return; 4154 } else { 4155 BT_ERR("%s ACL packet for unknown connection handle %d", 4156 hdev->name, handle); 4157 } 4158 4159 kfree_skb(skb); 4160 } 4161 4162 /* SCO data packet */ 4163 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 4164 { 4165 struct hci_sco_hdr *hdr = (void *) skb->data; 4166 struct hci_conn *conn; 4167 __u16 handle; 4168 4169 skb_pull(skb, HCI_SCO_HDR_SIZE); 4170 4171 handle = __le16_to_cpu(hdr->handle); 4172 4173 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle); 4174 4175 hdev->stat.sco_rx++; 4176 4177 hci_dev_lock(hdev); 4178 conn = hci_conn_hash_lookup_handle(hdev, handle); 4179 hci_dev_unlock(hdev); 4180 4181 if (conn) { 4182 /* Send to upper protocol */ 4183 sco_recv_scodata(conn, skb); 4184 return; 4185 } else { 4186 BT_ERR("%s SCO packet for unknown connection handle %d", 4187 hdev->name, handle); 4188 } 4189 4190 kfree_skb(skb); 4191 } 4192 4193 static bool hci_req_is_complete(struct hci_dev *hdev) 4194 { 4195 struct sk_buff *skb; 4196 4197 skb = skb_peek(&hdev->cmd_q); 4198 if (!skb) 4199 return true; 4200 4201 return bt_cb(skb)->req.start; 4202 } 4203 4204 static void hci_resend_last(struct hci_dev *hdev) 4205 { 4206 struct hci_command_hdr *sent; 4207 struct sk_buff *skb; 4208 u16 opcode; 4209 4210 if (!hdev->sent_cmd) 4211 return; 4212 4213 sent = (void *) hdev->sent_cmd->data; 4214 opcode = __le16_to_cpu(sent->opcode); 4215 if (opcode == HCI_OP_RESET) 4216 return; 4217 4218 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 4219 if (!skb) 4220 return; 4221 4222 skb_queue_head(&hdev->cmd_q, skb); 4223 queue_work(hdev->workqueue, &hdev->cmd_work); 4224 } 4225 4226 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 4227 hci_req_complete_t *req_complete, 4228 hci_req_complete_skb_t *req_complete_skb) 4229 { 4230 struct sk_buff *skb; 4231 unsigned long flags; 4232 4233 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 4234 4235 /* If the completed command doesn't match the last one that was 4236 * sent we need to do special handling of it. 4237 */ 4238 if (!hci_sent_cmd_data(hdev, opcode)) { 4239 /* Some CSR based controllers generate a spontaneous 4240 * reset complete event during init and any pending 4241 * command will never be completed. In such a case we 4242 * need to resend whatever was the last sent 4243 * command. 4244 */ 4245 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) 4246 hci_resend_last(hdev); 4247 4248 return; 4249 } 4250 4251 /* If the command succeeded and there's still more commands in 4252 * this request the request is not yet complete. 4253 */ 4254 if (!status && !hci_req_is_complete(hdev)) 4255 return; 4256 4257 /* If this was the last command in a request the complete 4258 * callback would be found in hdev->sent_cmd instead of the 4259 * command queue (hdev->cmd_q). 4260 */ 4261 if (bt_cb(hdev->sent_cmd)->req.complete) { 4262 *req_complete = bt_cb(hdev->sent_cmd)->req.complete; 4263 return; 4264 } 4265 4266 if (bt_cb(hdev->sent_cmd)->req.complete_skb) { 4267 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb; 4268 return; 4269 } 4270 4271 /* Remove all pending commands belonging to this request */ 4272 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 4273 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 4274 if (bt_cb(skb)->req.start) { 4275 __skb_queue_head(&hdev->cmd_q, skb); 4276 break; 4277 } 4278 4279 *req_complete = bt_cb(skb)->req.complete; 4280 *req_complete_skb = bt_cb(skb)->req.complete_skb; 4281 kfree_skb(skb); 4282 } 4283 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 4284 } 4285 4286 static void hci_rx_work(struct work_struct *work) 4287 { 4288 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 4289 struct sk_buff *skb; 4290 4291 BT_DBG("%s", hdev->name); 4292 4293 while ((skb = skb_dequeue(&hdev->rx_q))) { 4294 /* Send copy to monitor */ 4295 hci_send_to_monitor(hdev, skb); 4296 4297 if (atomic_read(&hdev->promisc)) { 4298 /* Send copy to the sockets */ 4299 hci_send_to_sock(hdev, skb); 4300 } 4301 4302 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4303 kfree_skb(skb); 4304 continue; 4305 } 4306 4307 if (test_bit(HCI_INIT, &hdev->flags)) { 4308 /* Don't process data packets in this states. */ 4309 switch (bt_cb(skb)->pkt_type) { 4310 case HCI_ACLDATA_PKT: 4311 case HCI_SCODATA_PKT: 4312 kfree_skb(skb); 4313 continue; 4314 } 4315 } 4316 4317 /* Process frame */ 4318 switch (bt_cb(skb)->pkt_type) { 4319 case HCI_EVENT_PKT: 4320 BT_DBG("%s Event packet", hdev->name); 4321 hci_event_packet(hdev, skb); 4322 break; 4323 4324 case HCI_ACLDATA_PKT: 4325 BT_DBG("%s ACL data packet", hdev->name); 4326 hci_acldata_packet(hdev, skb); 4327 break; 4328 4329 case HCI_SCODATA_PKT: 4330 BT_DBG("%s SCO data packet", hdev->name); 4331 hci_scodata_packet(hdev, skb); 4332 break; 4333 4334 default: 4335 kfree_skb(skb); 4336 break; 4337 } 4338 } 4339 } 4340 4341 static void hci_cmd_work(struct work_struct *work) 4342 { 4343 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 4344 struct sk_buff *skb; 4345 4346 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, 4347 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); 4348 4349 /* Send queued commands */ 4350 if (atomic_read(&hdev->cmd_cnt)) { 4351 skb = skb_dequeue(&hdev->cmd_q); 4352 if (!skb) 4353 return; 4354 4355 kfree_skb(hdev->sent_cmd); 4356 4357 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); 4358 if (hdev->sent_cmd) { 4359 atomic_dec(&hdev->cmd_cnt); 4360 hci_send_frame(hdev, skb); 4361 if (test_bit(HCI_RESET, &hdev->flags)) 4362 cancel_delayed_work(&hdev->cmd_timer); 4363 else 4364 schedule_delayed_work(&hdev->cmd_timer, 4365 HCI_CMD_TIMEOUT); 4366 } else { 4367 skb_queue_head(&hdev->cmd_q, skb); 4368 queue_work(hdev->workqueue, &hdev->cmd_work); 4369 } 4370 } 4371 } 4372