1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 ev, size_t len) 50 { 51 void *data; 52 53 data = skb_pull_data(skb, len); 54 if (!data) 55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 56 57 return data; 58 } 59 60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 61 u16 op, size_t len) 62 { 63 void *data; 64 65 data = skb_pull_data(skb, len); 66 if (!data) 67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 68 69 return data; 70 } 71 72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 73 u8 ev, size_t len) 74 { 75 void *data; 76 77 data = skb_pull_data(skb, len); 78 if (!data) 79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 80 81 return data; 82 } 83 84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 85 struct sk_buff *skb) 86 { 87 struct hci_ev_status *rp = data; 88 89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 90 91 /* It is possible that we receive Inquiry Complete event right 92 * before we receive Inquiry Cancel Command Complete event, in 93 * which case the latter event should have status of Command 94 * Disallowed (0x0c). This should not be treated as error, since 95 * we actually achieve what Inquiry Cancel wants to achieve, 96 * which is to end the last Inquiry session. 97 */ 98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 100 rp->status = 0x00; 101 } 102 103 if (rp->status) 104 return rp->status; 105 106 clear_bit(HCI_INQUIRY, &hdev->flags); 107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 108 wake_up_bit(&hdev->flags, HCI_INQUIRY); 109 110 hci_dev_lock(hdev); 111 /* Set discovery state to stopped if we're not doing LE active 112 * scanning. 113 */ 114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 115 hdev->le_scan_type != LE_SCAN_ACTIVE) 116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 117 hci_dev_unlock(hdev); 118 119 hci_conn_check_pending(hdev); 120 121 return rp->status; 122 } 123 124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 125 struct sk_buff *skb) 126 { 127 struct hci_ev_status *rp = data; 128 129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 130 131 if (rp->status) 132 return rp->status; 133 134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 135 136 return rp->status; 137 } 138 139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 140 struct sk_buff *skb) 141 { 142 struct hci_ev_status *rp = data; 143 144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 145 146 if (rp->status) 147 return rp->status; 148 149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 150 151 hci_conn_check_pending(hdev); 152 153 return rp->status; 154 } 155 156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 157 struct sk_buff *skb) 158 { 159 struct hci_ev_status *rp = data; 160 161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 162 163 return rp->status; 164 } 165 166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 167 struct sk_buff *skb) 168 { 169 struct hci_rp_role_discovery *rp = data; 170 struct hci_conn *conn; 171 172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 173 174 if (rp->status) 175 return rp->status; 176 177 hci_dev_lock(hdev); 178 179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 180 if (conn) 181 conn->role = rp->role; 182 183 hci_dev_unlock(hdev); 184 185 return rp->status; 186 } 187 188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 189 struct sk_buff *skb) 190 { 191 struct hci_rp_read_link_policy *rp = data; 192 struct hci_conn *conn; 193 194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 195 196 if (rp->status) 197 return rp->status; 198 199 hci_dev_lock(hdev); 200 201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 202 if (conn) 203 conn->link_policy = __le16_to_cpu(rp->policy); 204 205 hci_dev_unlock(hdev); 206 207 return rp->status; 208 } 209 210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 211 struct sk_buff *skb) 212 { 213 struct hci_rp_write_link_policy *rp = data; 214 struct hci_conn *conn; 215 void *sent; 216 217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 218 219 if (rp->status) 220 return rp->status; 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 223 if (!sent) 224 return rp->status; 225 226 hci_dev_lock(hdev); 227 228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 229 if (conn) 230 conn->link_policy = get_unaligned_le16(sent + 2); 231 232 hci_dev_unlock(hdev); 233 234 return rp->status; 235 } 236 237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 238 struct sk_buff *skb) 239 { 240 struct hci_rp_read_def_link_policy *rp = data; 241 242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 243 244 if (rp->status) 245 return rp->status; 246 247 hdev->link_policy = __le16_to_cpu(rp->policy); 248 249 return rp->status; 250 } 251 252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 253 struct sk_buff *skb) 254 { 255 struct hci_ev_status *rp = data; 256 void *sent; 257 258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 259 260 if (rp->status) 261 return rp->status; 262 263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 264 if (!sent) 265 return rp->status; 266 267 hdev->link_policy = get_unaligned_le16(sent); 268 269 return rp->status; 270 } 271 272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 273 { 274 struct hci_ev_status *rp = data; 275 276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 277 278 clear_bit(HCI_RESET, &hdev->flags); 279 280 if (rp->status) 281 return rp->status; 282 283 /* Reset all non-persistent flags */ 284 hci_dev_clear_volatile_flags(hdev); 285 286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 287 288 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 289 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 290 291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 292 hdev->adv_data_len = 0; 293 294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 295 hdev->scan_rsp_data_len = 0; 296 297 hdev->le_scan_type = LE_SCAN_PASSIVE; 298 299 hdev->ssp_debug_mode = 0; 300 301 hci_bdaddr_list_clear(&hdev->le_accept_list); 302 hci_bdaddr_list_clear(&hdev->le_resolv_list); 303 304 return rp->status; 305 } 306 307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 308 struct sk_buff *skb) 309 { 310 struct hci_rp_read_stored_link_key *rp = data; 311 struct hci_cp_read_stored_link_key *sent; 312 313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 314 315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 316 if (!sent) 317 return rp->status; 318 319 if (!rp->status && sent->read_all == 0x01) { 320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 322 } 323 324 return rp->status; 325 } 326 327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 328 struct sk_buff *skb) 329 { 330 struct hci_rp_delete_stored_link_key *rp = data; 331 u16 num_keys; 332 333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 334 335 if (rp->status) 336 return rp->status; 337 338 num_keys = le16_to_cpu(rp->num_keys); 339 340 if (num_keys <= hdev->stored_num_keys) 341 hdev->stored_num_keys -= num_keys; 342 else 343 hdev->stored_num_keys = 0; 344 345 return rp->status; 346 } 347 348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 349 struct sk_buff *skb) 350 { 351 struct hci_ev_status *rp = data; 352 void *sent; 353 354 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 355 356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 357 if (!sent) 358 return rp->status; 359 360 hci_dev_lock(hdev); 361 362 if (hci_dev_test_flag(hdev, HCI_MGMT)) 363 mgmt_set_local_name_complete(hdev, sent, rp->status); 364 else if (!rp->status) 365 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 366 367 hci_dev_unlock(hdev); 368 369 return rp->status; 370 } 371 372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 373 struct sk_buff *skb) 374 { 375 struct hci_rp_read_local_name *rp = data; 376 377 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 378 379 if (rp->status) 380 return rp->status; 381 382 if (hci_dev_test_flag(hdev, HCI_SETUP) || 383 hci_dev_test_flag(hdev, HCI_CONFIG)) 384 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 385 386 return rp->status; 387 } 388 389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 390 struct sk_buff *skb) 391 { 392 struct hci_ev_status *rp = data; 393 void *sent; 394 395 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 396 397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 398 if (!sent) 399 return rp->status; 400 401 hci_dev_lock(hdev); 402 403 if (!rp->status) { 404 __u8 param = *((__u8 *) sent); 405 406 if (param == AUTH_ENABLED) 407 set_bit(HCI_AUTH, &hdev->flags); 408 else 409 clear_bit(HCI_AUTH, &hdev->flags); 410 } 411 412 if (hci_dev_test_flag(hdev, HCI_MGMT)) 413 mgmt_auth_enable_complete(hdev, rp->status); 414 415 hci_dev_unlock(hdev); 416 417 return rp->status; 418 } 419 420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 421 struct sk_buff *skb) 422 { 423 struct hci_ev_status *rp = data; 424 __u8 param; 425 void *sent; 426 427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 428 429 if (rp->status) 430 return rp->status; 431 432 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 433 if (!sent) 434 return rp->status; 435 436 param = *((__u8 *) sent); 437 438 if (param) 439 set_bit(HCI_ENCRYPT, &hdev->flags); 440 else 441 clear_bit(HCI_ENCRYPT, &hdev->flags); 442 443 return rp->status; 444 } 445 446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 447 struct sk_buff *skb) 448 { 449 struct hci_ev_status *rp = data; 450 __u8 param; 451 void *sent; 452 453 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 454 455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 456 if (!sent) 457 return rp->status; 458 459 param = *((__u8 *) sent); 460 461 hci_dev_lock(hdev); 462 463 if (rp->status) { 464 hdev->discov_timeout = 0; 465 goto done; 466 } 467 468 if (param & SCAN_INQUIRY) 469 set_bit(HCI_ISCAN, &hdev->flags); 470 else 471 clear_bit(HCI_ISCAN, &hdev->flags); 472 473 if (param & SCAN_PAGE) 474 set_bit(HCI_PSCAN, &hdev->flags); 475 else 476 clear_bit(HCI_PSCAN, &hdev->flags); 477 478 done: 479 hci_dev_unlock(hdev); 480 481 return rp->status; 482 } 483 484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 485 struct sk_buff *skb) 486 { 487 struct hci_ev_status *rp = data; 488 struct hci_cp_set_event_filter *cp; 489 void *sent; 490 491 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 492 493 if (rp->status) 494 return rp->status; 495 496 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 497 if (!sent) 498 return rp->status; 499 500 cp = (struct hci_cp_set_event_filter *)sent; 501 502 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 503 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 504 else 505 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 506 507 return rp->status; 508 } 509 510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 511 struct sk_buff *skb) 512 { 513 struct hci_rp_read_class_of_dev *rp = data; 514 515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 516 517 if (rp->status) 518 return rp->status; 519 520 memcpy(hdev->dev_class, rp->dev_class, 3); 521 522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 523 hdev->dev_class[1], hdev->dev_class[0]); 524 525 return rp->status; 526 } 527 528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 529 struct sk_buff *skb) 530 { 531 struct hci_ev_status *rp = data; 532 void *sent; 533 534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 535 536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 537 if (!sent) 538 return rp->status; 539 540 hci_dev_lock(hdev); 541 542 if (!rp->status) 543 memcpy(hdev->dev_class, sent, 3); 544 545 if (hci_dev_test_flag(hdev, HCI_MGMT)) 546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 547 548 hci_dev_unlock(hdev); 549 550 return rp->status; 551 } 552 553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 554 struct sk_buff *skb) 555 { 556 struct hci_rp_read_voice_setting *rp = data; 557 __u16 setting; 558 559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 560 561 if (rp->status) 562 return rp->status; 563 564 setting = __le16_to_cpu(rp->voice_setting); 565 566 if (hdev->voice_setting == setting) 567 return rp->status; 568 569 hdev->voice_setting = setting; 570 571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 572 573 if (hdev->notify) 574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 575 576 return rp->status; 577 } 578 579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 580 struct sk_buff *skb) 581 { 582 struct hci_ev_status *rp = data; 583 __u16 setting; 584 void *sent; 585 586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 587 588 if (rp->status) 589 return rp->status; 590 591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 592 if (!sent) 593 return rp->status; 594 595 setting = get_unaligned_le16(sent); 596 597 if (hdev->voice_setting == setting) 598 return rp->status; 599 600 hdev->voice_setting = setting; 601 602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 603 604 if (hdev->notify) 605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 606 607 return rp->status; 608 } 609 610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 611 struct sk_buff *skb) 612 { 613 struct hci_rp_read_num_supported_iac *rp = data; 614 615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 616 617 if (rp->status) 618 return rp->status; 619 620 hdev->num_iac = rp->num_iac; 621 622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 623 624 return rp->status; 625 } 626 627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 628 struct sk_buff *skb) 629 { 630 struct hci_ev_status *rp = data; 631 struct hci_cp_write_ssp_mode *sent; 632 633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 634 635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 636 if (!sent) 637 return rp->status; 638 639 hci_dev_lock(hdev); 640 641 if (!rp->status) { 642 if (sent->mode) 643 hdev->features[1][0] |= LMP_HOST_SSP; 644 else 645 hdev->features[1][0] &= ~LMP_HOST_SSP; 646 } 647 648 if (!rp->status) { 649 if (sent->mode) 650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 651 else 652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 653 } 654 655 hci_dev_unlock(hdev); 656 657 return rp->status; 658 } 659 660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 661 struct sk_buff *skb) 662 { 663 struct hci_ev_status *rp = data; 664 struct hci_cp_write_sc_support *sent; 665 666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 667 668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 669 if (!sent) 670 return rp->status; 671 672 hci_dev_lock(hdev); 673 674 if (!rp->status) { 675 if (sent->support) 676 hdev->features[1][0] |= LMP_HOST_SC; 677 else 678 hdev->features[1][0] &= ~LMP_HOST_SC; 679 } 680 681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 682 if (sent->support) 683 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 684 else 685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 686 } 687 688 hci_dev_unlock(hdev); 689 690 return rp->status; 691 } 692 693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 694 struct sk_buff *skb) 695 { 696 struct hci_rp_read_local_version *rp = data; 697 698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 699 700 if (rp->status) 701 return rp->status; 702 703 if (hci_dev_test_flag(hdev, HCI_SETUP) || 704 hci_dev_test_flag(hdev, HCI_CONFIG)) { 705 hdev->hci_ver = rp->hci_ver; 706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 707 hdev->lmp_ver = rp->lmp_ver; 708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 710 } 711 712 return rp->status; 713 } 714 715 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 716 struct sk_buff *skb) 717 { 718 struct hci_rp_read_local_commands *rp = data; 719 720 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 721 722 if (rp->status) 723 return rp->status; 724 725 if (hci_dev_test_flag(hdev, HCI_SETUP) || 726 hci_dev_test_flag(hdev, HCI_CONFIG)) 727 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 728 729 return rp->status; 730 } 731 732 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 733 struct sk_buff *skb) 734 { 735 struct hci_rp_read_auth_payload_to *rp = data; 736 struct hci_conn *conn; 737 738 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 739 740 if (rp->status) 741 return rp->status; 742 743 hci_dev_lock(hdev); 744 745 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 746 if (conn) 747 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 748 749 hci_dev_unlock(hdev); 750 751 return rp->status; 752 } 753 754 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 755 struct sk_buff *skb) 756 { 757 struct hci_rp_write_auth_payload_to *rp = data; 758 struct hci_conn *conn; 759 void *sent; 760 761 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 762 763 if (rp->status) 764 return rp->status; 765 766 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 767 if (!sent) 768 return rp->status; 769 770 hci_dev_lock(hdev); 771 772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 773 if (conn) 774 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 775 776 hci_dev_unlock(hdev); 777 778 return rp->status; 779 } 780 781 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 782 struct sk_buff *skb) 783 { 784 struct hci_rp_read_local_features *rp = data; 785 786 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 787 788 if (rp->status) 789 return rp->status; 790 791 memcpy(hdev->features, rp->features, 8); 792 793 /* Adjust default settings according to features 794 * supported by device. */ 795 796 if (hdev->features[0][0] & LMP_3SLOT) 797 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 798 799 if (hdev->features[0][0] & LMP_5SLOT) 800 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 801 802 if (hdev->features[0][1] & LMP_HV2) { 803 hdev->pkt_type |= (HCI_HV2); 804 hdev->esco_type |= (ESCO_HV2); 805 } 806 807 if (hdev->features[0][1] & LMP_HV3) { 808 hdev->pkt_type |= (HCI_HV3); 809 hdev->esco_type |= (ESCO_HV3); 810 } 811 812 if (lmp_esco_capable(hdev)) 813 hdev->esco_type |= (ESCO_EV3); 814 815 if (hdev->features[0][4] & LMP_EV4) 816 hdev->esco_type |= (ESCO_EV4); 817 818 if (hdev->features[0][4] & LMP_EV5) 819 hdev->esco_type |= (ESCO_EV5); 820 821 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 822 hdev->esco_type |= (ESCO_2EV3); 823 824 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 825 hdev->esco_type |= (ESCO_3EV3); 826 827 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 828 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 829 830 return rp->status; 831 } 832 833 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 834 struct sk_buff *skb) 835 { 836 struct hci_rp_read_local_ext_features *rp = data; 837 838 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 839 840 if (rp->status) 841 return rp->status; 842 843 if (hdev->max_page < rp->max_page) 844 hdev->max_page = rp->max_page; 845 846 if (rp->page < HCI_MAX_PAGES) 847 memcpy(hdev->features[rp->page], rp->features, 8); 848 849 return rp->status; 850 } 851 852 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, 853 struct sk_buff *skb) 854 { 855 struct hci_rp_read_flow_control_mode *rp = data; 856 857 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 858 859 if (rp->status) 860 return rp->status; 861 862 hdev->flow_ctl_mode = rp->mode; 863 864 return rp->status; 865 } 866 867 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 868 struct sk_buff *skb) 869 { 870 struct hci_rp_read_buffer_size *rp = data; 871 872 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 873 874 if (rp->status) 875 return rp->status; 876 877 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 878 hdev->sco_mtu = rp->sco_mtu; 879 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 880 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 881 882 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 883 hdev->sco_mtu = 64; 884 hdev->sco_pkts = 8; 885 } 886 887 hdev->acl_cnt = hdev->acl_pkts; 888 hdev->sco_cnt = hdev->sco_pkts; 889 890 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 891 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 892 893 return rp->status; 894 } 895 896 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 897 struct sk_buff *skb) 898 { 899 struct hci_rp_read_bd_addr *rp = data; 900 901 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 902 903 if (rp->status) 904 return rp->status; 905 906 if (test_bit(HCI_INIT, &hdev->flags)) 907 bacpy(&hdev->bdaddr, &rp->bdaddr); 908 909 if (hci_dev_test_flag(hdev, HCI_SETUP)) 910 bacpy(&hdev->setup_addr, &rp->bdaddr); 911 912 return rp->status; 913 } 914 915 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 916 struct sk_buff *skb) 917 { 918 struct hci_rp_read_local_pairing_opts *rp = data; 919 920 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 921 922 if (rp->status) 923 return rp->status; 924 925 if (hci_dev_test_flag(hdev, HCI_SETUP) || 926 hci_dev_test_flag(hdev, HCI_CONFIG)) { 927 hdev->pairing_opts = rp->pairing_opts; 928 hdev->max_enc_key_size = rp->max_key_size; 929 } 930 931 return rp->status; 932 } 933 934 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 935 struct sk_buff *skb) 936 { 937 struct hci_rp_read_page_scan_activity *rp = data; 938 939 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 940 941 if (rp->status) 942 return rp->status; 943 944 if (test_bit(HCI_INIT, &hdev->flags)) { 945 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 946 hdev->page_scan_window = __le16_to_cpu(rp->window); 947 } 948 949 return rp->status; 950 } 951 952 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 953 struct sk_buff *skb) 954 { 955 struct hci_ev_status *rp = data; 956 struct hci_cp_write_page_scan_activity *sent; 957 958 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 959 960 if (rp->status) 961 return rp->status; 962 963 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 964 if (!sent) 965 return rp->status; 966 967 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 968 hdev->page_scan_window = __le16_to_cpu(sent->window); 969 970 return rp->status; 971 } 972 973 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 974 struct sk_buff *skb) 975 { 976 struct hci_rp_read_page_scan_type *rp = data; 977 978 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 979 980 if (rp->status) 981 return rp->status; 982 983 if (test_bit(HCI_INIT, &hdev->flags)) 984 hdev->page_scan_type = rp->type; 985 986 return rp->status; 987 } 988 989 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 990 struct sk_buff *skb) 991 { 992 struct hci_ev_status *rp = data; 993 u8 *type; 994 995 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 996 997 if (rp->status) 998 return rp->status; 999 1000 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 1001 if (type) 1002 hdev->page_scan_type = *type; 1003 1004 return rp->status; 1005 } 1006 1007 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, 1008 struct sk_buff *skb) 1009 { 1010 struct hci_rp_read_data_block_size *rp = data; 1011 1012 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1013 1014 if (rp->status) 1015 return rp->status; 1016 1017 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 1018 hdev->block_len = __le16_to_cpu(rp->block_len); 1019 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 1020 1021 hdev->block_cnt = hdev->num_blocks; 1022 1023 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 1024 hdev->block_cnt, hdev->block_len); 1025 1026 return rp->status; 1027 } 1028 1029 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1030 struct sk_buff *skb) 1031 { 1032 struct hci_rp_read_clock *rp = data; 1033 struct hci_cp_read_clock *cp; 1034 struct hci_conn *conn; 1035 1036 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1037 1038 if (rp->status) 1039 return rp->status; 1040 1041 hci_dev_lock(hdev); 1042 1043 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1044 if (!cp) 1045 goto unlock; 1046 1047 if (cp->which == 0x00) { 1048 hdev->clock = le32_to_cpu(rp->clock); 1049 goto unlock; 1050 } 1051 1052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1053 if (conn) { 1054 conn->clock = le32_to_cpu(rp->clock); 1055 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1056 } 1057 1058 unlock: 1059 hci_dev_unlock(hdev); 1060 return rp->status; 1061 } 1062 1063 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, 1064 struct sk_buff *skb) 1065 { 1066 struct hci_rp_read_local_amp_info *rp = data; 1067 1068 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1069 1070 if (rp->status) 1071 return rp->status; 1072 1073 hdev->amp_status = rp->amp_status; 1074 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 1075 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 1076 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 1077 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 1078 hdev->amp_type = rp->amp_type; 1079 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 1080 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 1081 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 1082 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 1083 1084 return rp->status; 1085 } 1086 1087 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1088 struct sk_buff *skb) 1089 { 1090 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1091 1092 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1093 1094 if (rp->status) 1095 return rp->status; 1096 1097 hdev->inq_tx_power = rp->tx_power; 1098 1099 return rp->status; 1100 } 1101 1102 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1103 struct sk_buff *skb) 1104 { 1105 struct hci_rp_read_def_err_data_reporting *rp = data; 1106 1107 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1108 1109 if (rp->status) 1110 return rp->status; 1111 1112 hdev->err_data_reporting = rp->err_data_reporting; 1113 1114 return rp->status; 1115 } 1116 1117 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1118 struct sk_buff *skb) 1119 { 1120 struct hci_ev_status *rp = data; 1121 struct hci_cp_write_def_err_data_reporting *cp; 1122 1123 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1124 1125 if (rp->status) 1126 return rp->status; 1127 1128 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1129 if (!cp) 1130 return rp->status; 1131 1132 hdev->err_data_reporting = cp->err_data_reporting; 1133 1134 return rp->status; 1135 } 1136 1137 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1138 struct sk_buff *skb) 1139 { 1140 struct hci_rp_pin_code_reply *rp = data; 1141 struct hci_cp_pin_code_reply *cp; 1142 struct hci_conn *conn; 1143 1144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1145 1146 hci_dev_lock(hdev); 1147 1148 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1149 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1150 1151 if (rp->status) 1152 goto unlock; 1153 1154 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1155 if (!cp) 1156 goto unlock; 1157 1158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1159 if (conn) 1160 conn->pin_length = cp->pin_len; 1161 1162 unlock: 1163 hci_dev_unlock(hdev); 1164 return rp->status; 1165 } 1166 1167 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1168 struct sk_buff *skb) 1169 { 1170 struct hci_rp_pin_code_neg_reply *rp = data; 1171 1172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1173 1174 hci_dev_lock(hdev); 1175 1176 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1177 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1178 rp->status); 1179 1180 hci_dev_unlock(hdev); 1181 1182 return rp->status; 1183 } 1184 1185 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1186 struct sk_buff *skb) 1187 { 1188 struct hci_rp_le_read_buffer_size *rp = data; 1189 1190 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1191 1192 if (rp->status) 1193 return rp->status; 1194 1195 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1196 hdev->le_pkts = rp->le_max_pkt; 1197 1198 hdev->le_cnt = hdev->le_pkts; 1199 1200 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1201 1202 return rp->status; 1203 } 1204 1205 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1206 struct sk_buff *skb) 1207 { 1208 struct hci_rp_le_read_local_features *rp = data; 1209 1210 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1211 1212 if (rp->status) 1213 return rp->status; 1214 1215 memcpy(hdev->le_features, rp->features, 8); 1216 1217 return rp->status; 1218 } 1219 1220 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1221 struct sk_buff *skb) 1222 { 1223 struct hci_rp_le_read_adv_tx_power *rp = data; 1224 1225 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1226 1227 if (rp->status) 1228 return rp->status; 1229 1230 hdev->adv_tx_power = rp->tx_power; 1231 1232 return rp->status; 1233 } 1234 1235 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1236 struct sk_buff *skb) 1237 { 1238 struct hci_rp_user_confirm_reply *rp = data; 1239 1240 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1241 1242 hci_dev_lock(hdev); 1243 1244 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1245 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1246 rp->status); 1247 1248 hci_dev_unlock(hdev); 1249 1250 return rp->status; 1251 } 1252 1253 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1254 struct sk_buff *skb) 1255 { 1256 struct hci_rp_user_confirm_reply *rp = data; 1257 1258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1259 1260 hci_dev_lock(hdev); 1261 1262 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1263 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1264 ACL_LINK, 0, rp->status); 1265 1266 hci_dev_unlock(hdev); 1267 1268 return rp->status; 1269 } 1270 1271 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1272 struct sk_buff *skb) 1273 { 1274 struct hci_rp_user_confirm_reply *rp = data; 1275 1276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1277 1278 hci_dev_lock(hdev); 1279 1280 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1281 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1282 0, rp->status); 1283 1284 hci_dev_unlock(hdev); 1285 1286 return rp->status; 1287 } 1288 1289 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1290 struct sk_buff *skb) 1291 { 1292 struct hci_rp_user_confirm_reply *rp = data; 1293 1294 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1295 1296 hci_dev_lock(hdev); 1297 1298 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1299 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1300 ACL_LINK, 0, rp->status); 1301 1302 hci_dev_unlock(hdev); 1303 1304 return rp->status; 1305 } 1306 1307 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1308 struct sk_buff *skb) 1309 { 1310 struct hci_rp_read_local_oob_data *rp = data; 1311 1312 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1313 1314 return rp->status; 1315 } 1316 1317 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1318 struct sk_buff *skb) 1319 { 1320 struct hci_rp_read_local_oob_ext_data *rp = data; 1321 1322 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1323 1324 return rp->status; 1325 } 1326 1327 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1328 struct sk_buff *skb) 1329 { 1330 struct hci_ev_status *rp = data; 1331 bdaddr_t *sent; 1332 1333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1334 1335 if (rp->status) 1336 return rp->status; 1337 1338 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1339 if (!sent) 1340 return rp->status; 1341 1342 hci_dev_lock(hdev); 1343 1344 bacpy(&hdev->random_addr, sent); 1345 1346 if (!bacmp(&hdev->rpa, sent)) { 1347 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1348 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1349 secs_to_jiffies(hdev->rpa_timeout)); 1350 } 1351 1352 hci_dev_unlock(hdev); 1353 1354 return rp->status; 1355 } 1356 1357 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1358 struct sk_buff *skb) 1359 { 1360 struct hci_ev_status *rp = data; 1361 struct hci_cp_le_set_default_phy *cp; 1362 1363 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1364 1365 if (rp->status) 1366 return rp->status; 1367 1368 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1369 if (!cp) 1370 return rp->status; 1371 1372 hci_dev_lock(hdev); 1373 1374 hdev->le_tx_def_phys = cp->tx_phys; 1375 hdev->le_rx_def_phys = cp->rx_phys; 1376 1377 hci_dev_unlock(hdev); 1378 1379 return rp->status; 1380 } 1381 1382 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1383 struct sk_buff *skb) 1384 { 1385 struct hci_ev_status *rp = data; 1386 struct hci_cp_le_set_adv_set_rand_addr *cp; 1387 struct adv_info *adv; 1388 1389 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1390 1391 if (rp->status) 1392 return rp->status; 1393 1394 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1395 /* Update only in case the adv instance since handle 0x00 shall be using 1396 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1397 * non-extended adverting. 1398 */ 1399 if (!cp || !cp->handle) 1400 return rp->status; 1401 1402 hci_dev_lock(hdev); 1403 1404 adv = hci_find_adv_instance(hdev, cp->handle); 1405 if (adv) { 1406 bacpy(&adv->random_addr, &cp->bdaddr); 1407 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1408 adv->rpa_expired = false; 1409 queue_delayed_work(hdev->workqueue, 1410 &adv->rpa_expired_cb, 1411 secs_to_jiffies(hdev->rpa_timeout)); 1412 } 1413 } 1414 1415 hci_dev_unlock(hdev); 1416 1417 return rp->status; 1418 } 1419 1420 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1421 struct sk_buff *skb) 1422 { 1423 struct hci_ev_status *rp = data; 1424 u8 *instance; 1425 int err; 1426 1427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1428 1429 if (rp->status) 1430 return rp->status; 1431 1432 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1433 if (!instance) 1434 return rp->status; 1435 1436 hci_dev_lock(hdev); 1437 1438 err = hci_remove_adv_instance(hdev, *instance); 1439 if (!err) 1440 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1441 *instance); 1442 1443 hci_dev_unlock(hdev); 1444 1445 return rp->status; 1446 } 1447 1448 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1449 struct sk_buff *skb) 1450 { 1451 struct hci_ev_status *rp = data; 1452 struct adv_info *adv, *n; 1453 int err; 1454 1455 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1456 1457 if (rp->status) 1458 return rp->status; 1459 1460 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1461 return rp->status; 1462 1463 hci_dev_lock(hdev); 1464 1465 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1466 u8 instance = adv->instance; 1467 1468 err = hci_remove_adv_instance(hdev, instance); 1469 if (!err) 1470 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1471 hdev, instance); 1472 } 1473 1474 hci_dev_unlock(hdev); 1475 1476 return rp->status; 1477 } 1478 1479 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1480 struct sk_buff *skb) 1481 { 1482 struct hci_rp_le_read_transmit_power *rp = data; 1483 1484 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1485 1486 if (rp->status) 1487 return rp->status; 1488 1489 hdev->min_le_tx_power = rp->min_le_tx_power; 1490 hdev->max_le_tx_power = rp->max_le_tx_power; 1491 1492 return rp->status; 1493 } 1494 1495 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1496 struct sk_buff *skb) 1497 { 1498 struct hci_ev_status *rp = data; 1499 struct hci_cp_le_set_privacy_mode *cp; 1500 struct hci_conn_params *params; 1501 1502 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1503 1504 if (rp->status) 1505 return rp->status; 1506 1507 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1508 if (!cp) 1509 return rp->status; 1510 1511 hci_dev_lock(hdev); 1512 1513 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1514 if (params) 1515 params->privacy_mode = cp->mode; 1516 1517 hci_dev_unlock(hdev); 1518 1519 return rp->status; 1520 } 1521 1522 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1523 struct sk_buff *skb) 1524 { 1525 struct hci_ev_status *rp = data; 1526 __u8 *sent; 1527 1528 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1529 1530 if (rp->status) 1531 return rp->status; 1532 1533 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1534 if (!sent) 1535 return rp->status; 1536 1537 hci_dev_lock(hdev); 1538 1539 /* If we're doing connection initiation as peripheral. Set a 1540 * timeout in case something goes wrong. 1541 */ 1542 if (*sent) { 1543 struct hci_conn *conn; 1544 1545 hci_dev_set_flag(hdev, HCI_LE_ADV); 1546 1547 conn = hci_lookup_le_connect(hdev); 1548 if (conn) 1549 queue_delayed_work(hdev->workqueue, 1550 &conn->le_conn_timeout, 1551 conn->conn_timeout); 1552 } else { 1553 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1554 } 1555 1556 hci_dev_unlock(hdev); 1557 1558 return rp->status; 1559 } 1560 1561 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1562 struct sk_buff *skb) 1563 { 1564 struct hci_cp_le_set_ext_adv_enable *cp; 1565 struct hci_cp_ext_adv_set *set; 1566 struct adv_info *adv = NULL, *n; 1567 struct hci_ev_status *rp = data; 1568 1569 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1570 1571 if (rp->status) 1572 return rp->status; 1573 1574 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1575 if (!cp) 1576 return rp->status; 1577 1578 set = (void *)cp->data; 1579 1580 hci_dev_lock(hdev); 1581 1582 if (cp->num_of_sets) 1583 adv = hci_find_adv_instance(hdev, set->handle); 1584 1585 if (cp->enable) { 1586 struct hci_conn *conn; 1587 1588 hci_dev_set_flag(hdev, HCI_LE_ADV); 1589 1590 if (adv) 1591 adv->enabled = true; 1592 1593 conn = hci_lookup_le_connect(hdev); 1594 if (conn) 1595 queue_delayed_work(hdev->workqueue, 1596 &conn->le_conn_timeout, 1597 conn->conn_timeout); 1598 } else { 1599 if (cp->num_of_sets) { 1600 if (adv) 1601 adv->enabled = false; 1602 1603 /* If just one instance was disabled check if there are 1604 * any other instance enabled before clearing HCI_LE_ADV 1605 */ 1606 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1607 list) { 1608 if (adv->enabled) 1609 goto unlock; 1610 } 1611 } else { 1612 /* All instances shall be considered disabled */ 1613 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1614 list) 1615 adv->enabled = false; 1616 } 1617 1618 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1619 } 1620 1621 unlock: 1622 hci_dev_unlock(hdev); 1623 return rp->status; 1624 } 1625 1626 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1627 struct sk_buff *skb) 1628 { 1629 struct hci_cp_le_set_scan_param *cp; 1630 struct hci_ev_status *rp = data; 1631 1632 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1633 1634 if (rp->status) 1635 return rp->status; 1636 1637 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1638 if (!cp) 1639 return rp->status; 1640 1641 hci_dev_lock(hdev); 1642 1643 hdev->le_scan_type = cp->type; 1644 1645 hci_dev_unlock(hdev); 1646 1647 return rp->status; 1648 } 1649 1650 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1651 struct sk_buff *skb) 1652 { 1653 struct hci_cp_le_set_ext_scan_params *cp; 1654 struct hci_ev_status *rp = data; 1655 struct hci_cp_le_scan_phy_params *phy_param; 1656 1657 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1658 1659 if (rp->status) 1660 return rp->status; 1661 1662 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1663 if (!cp) 1664 return rp->status; 1665 1666 phy_param = (void *)cp->data; 1667 1668 hci_dev_lock(hdev); 1669 1670 hdev->le_scan_type = phy_param->type; 1671 1672 hci_dev_unlock(hdev); 1673 1674 return rp->status; 1675 } 1676 1677 static bool has_pending_adv_report(struct hci_dev *hdev) 1678 { 1679 struct discovery_state *d = &hdev->discovery; 1680 1681 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1682 } 1683 1684 static void clear_pending_adv_report(struct hci_dev *hdev) 1685 { 1686 struct discovery_state *d = &hdev->discovery; 1687 1688 bacpy(&d->last_adv_addr, BDADDR_ANY); 1689 d->last_adv_data_len = 0; 1690 } 1691 1692 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1693 u8 bdaddr_type, s8 rssi, u32 flags, 1694 u8 *data, u8 len) 1695 { 1696 struct discovery_state *d = &hdev->discovery; 1697 1698 if (len > HCI_MAX_AD_LENGTH) 1699 return; 1700 1701 bacpy(&d->last_adv_addr, bdaddr); 1702 d->last_adv_addr_type = bdaddr_type; 1703 d->last_adv_rssi = rssi; 1704 d->last_adv_flags = flags; 1705 memcpy(d->last_adv_data, data, len); 1706 d->last_adv_data_len = len; 1707 } 1708 1709 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1710 { 1711 hci_dev_lock(hdev); 1712 1713 switch (enable) { 1714 case LE_SCAN_ENABLE: 1715 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1716 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1717 clear_pending_adv_report(hdev); 1718 break; 1719 1720 case LE_SCAN_DISABLE: 1721 /* We do this here instead of when setting DISCOVERY_STOPPED 1722 * since the latter would potentially require waiting for 1723 * inquiry to stop too. 1724 */ 1725 if (has_pending_adv_report(hdev)) { 1726 struct discovery_state *d = &hdev->discovery; 1727 1728 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1729 d->last_adv_addr_type, NULL, 1730 d->last_adv_rssi, d->last_adv_flags, 1731 d->last_adv_data, 1732 d->last_adv_data_len, NULL, 0); 1733 } 1734 1735 /* Cancel this timer so that we don't try to disable scanning 1736 * when it's already disabled. 1737 */ 1738 cancel_delayed_work(&hdev->le_scan_disable); 1739 1740 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1741 1742 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1743 * interrupted scanning due to a connect request. Mark 1744 * therefore discovery as stopped. 1745 */ 1746 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1747 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1748 1749 break; 1750 1751 default: 1752 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1753 enable); 1754 break; 1755 } 1756 1757 hci_dev_unlock(hdev); 1758 } 1759 1760 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1761 struct sk_buff *skb) 1762 { 1763 struct hci_cp_le_set_scan_enable *cp; 1764 struct hci_ev_status *rp = data; 1765 1766 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1767 1768 if (rp->status) 1769 return rp->status; 1770 1771 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1772 if (!cp) 1773 return rp->status; 1774 1775 le_set_scan_enable_complete(hdev, cp->enable); 1776 1777 return rp->status; 1778 } 1779 1780 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1781 struct sk_buff *skb) 1782 { 1783 struct hci_cp_le_set_ext_scan_enable *cp; 1784 struct hci_ev_status *rp = data; 1785 1786 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1787 1788 if (rp->status) 1789 return rp->status; 1790 1791 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1792 if (!cp) 1793 return rp->status; 1794 1795 le_set_scan_enable_complete(hdev, cp->enable); 1796 1797 return rp->status; 1798 } 1799 1800 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1801 struct sk_buff *skb) 1802 { 1803 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1804 1805 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1806 rp->num_of_sets); 1807 1808 if (rp->status) 1809 return rp->status; 1810 1811 hdev->le_num_of_adv_sets = rp->num_of_sets; 1812 1813 return rp->status; 1814 } 1815 1816 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1817 struct sk_buff *skb) 1818 { 1819 struct hci_rp_le_read_accept_list_size *rp = data; 1820 1821 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1822 1823 if (rp->status) 1824 return rp->status; 1825 1826 hdev->le_accept_list_size = rp->size; 1827 1828 return rp->status; 1829 } 1830 1831 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1832 struct sk_buff *skb) 1833 { 1834 struct hci_ev_status *rp = data; 1835 1836 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1837 1838 if (rp->status) 1839 return rp->status; 1840 1841 hci_dev_lock(hdev); 1842 hci_bdaddr_list_clear(&hdev->le_accept_list); 1843 hci_dev_unlock(hdev); 1844 1845 return rp->status; 1846 } 1847 1848 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1849 struct sk_buff *skb) 1850 { 1851 struct hci_cp_le_add_to_accept_list *sent; 1852 struct hci_ev_status *rp = data; 1853 1854 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1855 1856 if (rp->status) 1857 return rp->status; 1858 1859 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1860 if (!sent) 1861 return rp->status; 1862 1863 hci_dev_lock(hdev); 1864 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1865 sent->bdaddr_type); 1866 hci_dev_unlock(hdev); 1867 1868 return rp->status; 1869 } 1870 1871 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1872 struct sk_buff *skb) 1873 { 1874 struct hci_cp_le_del_from_accept_list *sent; 1875 struct hci_ev_status *rp = data; 1876 1877 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1878 1879 if (rp->status) 1880 return rp->status; 1881 1882 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1883 if (!sent) 1884 return rp->status; 1885 1886 hci_dev_lock(hdev); 1887 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1888 sent->bdaddr_type); 1889 hci_dev_unlock(hdev); 1890 1891 return rp->status; 1892 } 1893 1894 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1895 struct sk_buff *skb) 1896 { 1897 struct hci_rp_le_read_supported_states *rp = data; 1898 1899 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1900 1901 if (rp->status) 1902 return rp->status; 1903 1904 memcpy(hdev->le_states, rp->le_states, 8); 1905 1906 return rp->status; 1907 } 1908 1909 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1910 struct sk_buff *skb) 1911 { 1912 struct hci_rp_le_read_def_data_len *rp = data; 1913 1914 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1915 1916 if (rp->status) 1917 return rp->status; 1918 1919 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1920 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1921 1922 return rp->status; 1923 } 1924 1925 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1926 struct sk_buff *skb) 1927 { 1928 struct hci_cp_le_write_def_data_len *sent; 1929 struct hci_ev_status *rp = data; 1930 1931 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1932 1933 if (rp->status) 1934 return rp->status; 1935 1936 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1937 if (!sent) 1938 return rp->status; 1939 1940 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1941 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1942 1943 return rp->status; 1944 } 1945 1946 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1947 struct sk_buff *skb) 1948 { 1949 struct hci_cp_le_add_to_resolv_list *sent; 1950 struct hci_ev_status *rp = data; 1951 1952 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1953 1954 if (rp->status) 1955 return rp->status; 1956 1957 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1958 if (!sent) 1959 return rp->status; 1960 1961 hci_dev_lock(hdev); 1962 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1963 sent->bdaddr_type, sent->peer_irk, 1964 sent->local_irk); 1965 hci_dev_unlock(hdev); 1966 1967 return rp->status; 1968 } 1969 1970 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 1971 struct sk_buff *skb) 1972 { 1973 struct hci_cp_le_del_from_resolv_list *sent; 1974 struct hci_ev_status *rp = data; 1975 1976 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1977 1978 if (rp->status) 1979 return rp->status; 1980 1981 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1982 if (!sent) 1983 return rp->status; 1984 1985 hci_dev_lock(hdev); 1986 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1987 sent->bdaddr_type); 1988 hci_dev_unlock(hdev); 1989 1990 return rp->status; 1991 } 1992 1993 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 1994 struct sk_buff *skb) 1995 { 1996 struct hci_ev_status *rp = data; 1997 1998 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1999 2000 if (rp->status) 2001 return rp->status; 2002 2003 hci_dev_lock(hdev); 2004 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2005 hci_dev_unlock(hdev); 2006 2007 return rp->status; 2008 } 2009 2010 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 2011 struct sk_buff *skb) 2012 { 2013 struct hci_rp_le_read_resolv_list_size *rp = data; 2014 2015 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2016 2017 if (rp->status) 2018 return rp->status; 2019 2020 hdev->le_resolv_list_size = rp->size; 2021 2022 return rp->status; 2023 } 2024 2025 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2026 struct sk_buff *skb) 2027 { 2028 struct hci_ev_status *rp = data; 2029 __u8 *sent; 2030 2031 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2032 2033 if (rp->status) 2034 return rp->status; 2035 2036 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2037 if (!sent) 2038 return rp->status; 2039 2040 hci_dev_lock(hdev); 2041 2042 if (*sent) 2043 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2044 else 2045 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2046 2047 hci_dev_unlock(hdev); 2048 2049 return rp->status; 2050 } 2051 2052 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2053 struct sk_buff *skb) 2054 { 2055 struct hci_rp_le_read_max_data_len *rp = data; 2056 2057 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2058 2059 if (rp->status) 2060 return rp->status; 2061 2062 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2063 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2064 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2065 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2066 2067 return rp->status; 2068 } 2069 2070 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2071 struct sk_buff *skb) 2072 { 2073 struct hci_cp_write_le_host_supported *sent; 2074 struct hci_ev_status *rp = data; 2075 2076 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2077 2078 if (rp->status) 2079 return rp->status; 2080 2081 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2082 if (!sent) 2083 return rp->status; 2084 2085 hci_dev_lock(hdev); 2086 2087 if (sent->le) { 2088 hdev->features[1][0] |= LMP_HOST_LE; 2089 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2090 } else { 2091 hdev->features[1][0] &= ~LMP_HOST_LE; 2092 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2093 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2094 } 2095 2096 if (sent->simul) 2097 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2098 else 2099 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2100 2101 hci_dev_unlock(hdev); 2102 2103 return rp->status; 2104 } 2105 2106 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2107 struct sk_buff *skb) 2108 { 2109 struct hci_cp_le_set_adv_param *cp; 2110 struct hci_ev_status *rp = data; 2111 2112 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2113 2114 if (rp->status) 2115 return rp->status; 2116 2117 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2118 if (!cp) 2119 return rp->status; 2120 2121 hci_dev_lock(hdev); 2122 hdev->adv_addr_type = cp->own_address_type; 2123 hci_dev_unlock(hdev); 2124 2125 return rp->status; 2126 } 2127 2128 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2129 struct sk_buff *skb) 2130 { 2131 struct hci_rp_le_set_ext_adv_params *rp = data; 2132 struct hci_cp_le_set_ext_adv_params *cp; 2133 struct adv_info *adv_instance; 2134 2135 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2136 2137 if (rp->status) 2138 return rp->status; 2139 2140 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2141 if (!cp) 2142 return rp->status; 2143 2144 hci_dev_lock(hdev); 2145 hdev->adv_addr_type = cp->own_addr_type; 2146 if (!cp->handle) { 2147 /* Store in hdev for instance 0 */ 2148 hdev->adv_tx_power = rp->tx_power; 2149 } else { 2150 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2151 if (adv_instance) 2152 adv_instance->tx_power = rp->tx_power; 2153 } 2154 /* Update adv data as tx power is known now */ 2155 hci_req_update_adv_data(hdev, cp->handle); 2156 2157 hci_dev_unlock(hdev); 2158 2159 return rp->status; 2160 } 2161 2162 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2163 struct sk_buff *skb) 2164 { 2165 struct hci_rp_read_rssi *rp = data; 2166 struct hci_conn *conn; 2167 2168 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2169 2170 if (rp->status) 2171 return rp->status; 2172 2173 hci_dev_lock(hdev); 2174 2175 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2176 if (conn) 2177 conn->rssi = rp->rssi; 2178 2179 hci_dev_unlock(hdev); 2180 2181 return rp->status; 2182 } 2183 2184 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2185 struct sk_buff *skb) 2186 { 2187 struct hci_cp_read_tx_power *sent; 2188 struct hci_rp_read_tx_power *rp = data; 2189 struct hci_conn *conn; 2190 2191 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2192 2193 if (rp->status) 2194 return rp->status; 2195 2196 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2197 if (!sent) 2198 return rp->status; 2199 2200 hci_dev_lock(hdev); 2201 2202 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2203 if (!conn) 2204 goto unlock; 2205 2206 switch (sent->type) { 2207 case 0x00: 2208 conn->tx_power = rp->tx_power; 2209 break; 2210 case 0x01: 2211 conn->max_tx_power = rp->tx_power; 2212 break; 2213 } 2214 2215 unlock: 2216 hci_dev_unlock(hdev); 2217 return rp->status; 2218 } 2219 2220 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2221 struct sk_buff *skb) 2222 { 2223 struct hci_ev_status *rp = data; 2224 u8 *mode; 2225 2226 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2227 2228 if (rp->status) 2229 return rp->status; 2230 2231 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2232 if (mode) 2233 hdev->ssp_debug_mode = *mode; 2234 2235 return rp->status; 2236 } 2237 2238 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2239 { 2240 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2241 2242 if (status) { 2243 hci_conn_check_pending(hdev); 2244 return; 2245 } 2246 2247 set_bit(HCI_INQUIRY, &hdev->flags); 2248 } 2249 2250 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2251 { 2252 struct hci_cp_create_conn *cp; 2253 struct hci_conn *conn; 2254 2255 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2256 2257 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2258 if (!cp) 2259 return; 2260 2261 hci_dev_lock(hdev); 2262 2263 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2264 2265 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2266 2267 if (status) { 2268 if (conn && conn->state == BT_CONNECT) { 2269 if (status != 0x0c || conn->attempt > 2) { 2270 conn->state = BT_CLOSED; 2271 hci_connect_cfm(conn, status); 2272 hci_conn_del(conn); 2273 } else 2274 conn->state = BT_CONNECT2; 2275 } 2276 } else { 2277 if (!conn) { 2278 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 2279 HCI_ROLE_MASTER); 2280 if (!conn) 2281 bt_dev_err(hdev, "no memory for new connection"); 2282 } 2283 } 2284 2285 hci_dev_unlock(hdev); 2286 } 2287 2288 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2289 { 2290 struct hci_cp_add_sco *cp; 2291 struct hci_conn *acl, *sco; 2292 __u16 handle; 2293 2294 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2295 2296 if (!status) 2297 return; 2298 2299 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2300 if (!cp) 2301 return; 2302 2303 handle = __le16_to_cpu(cp->handle); 2304 2305 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2306 2307 hci_dev_lock(hdev); 2308 2309 acl = hci_conn_hash_lookup_handle(hdev, handle); 2310 if (acl) { 2311 sco = acl->link; 2312 if (sco) { 2313 sco->state = BT_CLOSED; 2314 2315 hci_connect_cfm(sco, status); 2316 hci_conn_del(sco); 2317 } 2318 } 2319 2320 hci_dev_unlock(hdev); 2321 } 2322 2323 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2324 { 2325 struct hci_cp_auth_requested *cp; 2326 struct hci_conn *conn; 2327 2328 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2329 2330 if (!status) 2331 return; 2332 2333 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2334 if (!cp) 2335 return; 2336 2337 hci_dev_lock(hdev); 2338 2339 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2340 if (conn) { 2341 if (conn->state == BT_CONFIG) { 2342 hci_connect_cfm(conn, status); 2343 hci_conn_drop(conn); 2344 } 2345 } 2346 2347 hci_dev_unlock(hdev); 2348 } 2349 2350 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2351 { 2352 struct hci_cp_set_conn_encrypt *cp; 2353 struct hci_conn *conn; 2354 2355 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2356 2357 if (!status) 2358 return; 2359 2360 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2361 if (!cp) 2362 return; 2363 2364 hci_dev_lock(hdev); 2365 2366 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2367 if (conn) { 2368 if (conn->state == BT_CONFIG) { 2369 hci_connect_cfm(conn, status); 2370 hci_conn_drop(conn); 2371 } 2372 } 2373 2374 hci_dev_unlock(hdev); 2375 } 2376 2377 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2378 struct hci_conn *conn) 2379 { 2380 if (conn->state != BT_CONFIG || !conn->out) 2381 return 0; 2382 2383 if (conn->pending_sec_level == BT_SECURITY_SDP) 2384 return 0; 2385 2386 /* Only request authentication for SSP connections or non-SSP 2387 * devices with sec_level MEDIUM or HIGH or if MITM protection 2388 * is requested. 2389 */ 2390 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2391 conn->pending_sec_level != BT_SECURITY_FIPS && 2392 conn->pending_sec_level != BT_SECURITY_HIGH && 2393 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2394 return 0; 2395 2396 return 1; 2397 } 2398 2399 static int hci_resolve_name(struct hci_dev *hdev, 2400 struct inquiry_entry *e) 2401 { 2402 struct hci_cp_remote_name_req cp; 2403 2404 memset(&cp, 0, sizeof(cp)); 2405 2406 bacpy(&cp.bdaddr, &e->data.bdaddr); 2407 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2408 cp.pscan_mode = e->data.pscan_mode; 2409 cp.clock_offset = e->data.clock_offset; 2410 2411 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2412 } 2413 2414 static bool hci_resolve_next_name(struct hci_dev *hdev) 2415 { 2416 struct discovery_state *discov = &hdev->discovery; 2417 struct inquiry_entry *e; 2418 2419 if (list_empty(&discov->resolve)) 2420 return false; 2421 2422 /* We should stop if we already spent too much time resolving names. */ 2423 if (time_after(jiffies, discov->name_resolve_timeout)) { 2424 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2425 return false; 2426 } 2427 2428 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2429 if (!e) 2430 return false; 2431 2432 if (hci_resolve_name(hdev, e) == 0) { 2433 e->name_state = NAME_PENDING; 2434 return true; 2435 } 2436 2437 return false; 2438 } 2439 2440 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2441 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2442 { 2443 struct discovery_state *discov = &hdev->discovery; 2444 struct inquiry_entry *e; 2445 2446 /* Update the mgmt connected state if necessary. Be careful with 2447 * conn objects that exist but are not (yet) connected however. 2448 * Only those in BT_CONFIG or BT_CONNECTED states can be 2449 * considered connected. 2450 */ 2451 if (conn && 2452 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2453 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2454 mgmt_device_connected(hdev, conn, name, name_len); 2455 2456 if (discov->state == DISCOVERY_STOPPED) 2457 return; 2458 2459 if (discov->state == DISCOVERY_STOPPING) 2460 goto discov_complete; 2461 2462 if (discov->state != DISCOVERY_RESOLVING) 2463 return; 2464 2465 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2466 /* If the device was not found in a list of found devices names of which 2467 * are pending. there is no need to continue resolving a next name as it 2468 * will be done upon receiving another Remote Name Request Complete 2469 * Event */ 2470 if (!e) 2471 return; 2472 2473 list_del(&e->list); 2474 2475 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2476 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2477 name, name_len); 2478 2479 if (hci_resolve_next_name(hdev)) 2480 return; 2481 2482 discov_complete: 2483 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2484 } 2485 2486 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2487 { 2488 struct hci_cp_remote_name_req *cp; 2489 struct hci_conn *conn; 2490 2491 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2492 2493 /* If successful wait for the name req complete event before 2494 * checking for the need to do authentication */ 2495 if (!status) 2496 return; 2497 2498 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2499 if (!cp) 2500 return; 2501 2502 hci_dev_lock(hdev); 2503 2504 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2505 2506 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2507 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2508 2509 if (!conn) 2510 goto unlock; 2511 2512 if (!hci_outgoing_auth_needed(hdev, conn)) 2513 goto unlock; 2514 2515 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2516 struct hci_cp_auth_requested auth_cp; 2517 2518 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2519 2520 auth_cp.handle = __cpu_to_le16(conn->handle); 2521 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2522 sizeof(auth_cp), &auth_cp); 2523 } 2524 2525 unlock: 2526 hci_dev_unlock(hdev); 2527 } 2528 2529 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2530 { 2531 struct hci_cp_read_remote_features *cp; 2532 struct hci_conn *conn; 2533 2534 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2535 2536 if (!status) 2537 return; 2538 2539 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2540 if (!cp) 2541 return; 2542 2543 hci_dev_lock(hdev); 2544 2545 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2546 if (conn) { 2547 if (conn->state == BT_CONFIG) { 2548 hci_connect_cfm(conn, status); 2549 hci_conn_drop(conn); 2550 } 2551 } 2552 2553 hci_dev_unlock(hdev); 2554 } 2555 2556 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2557 { 2558 struct hci_cp_read_remote_ext_features *cp; 2559 struct hci_conn *conn; 2560 2561 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2562 2563 if (!status) 2564 return; 2565 2566 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2567 if (!cp) 2568 return; 2569 2570 hci_dev_lock(hdev); 2571 2572 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2573 if (conn) { 2574 if (conn->state == BT_CONFIG) { 2575 hci_connect_cfm(conn, status); 2576 hci_conn_drop(conn); 2577 } 2578 } 2579 2580 hci_dev_unlock(hdev); 2581 } 2582 2583 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2584 { 2585 struct hci_cp_setup_sync_conn *cp; 2586 struct hci_conn *acl, *sco; 2587 __u16 handle; 2588 2589 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2590 2591 if (!status) 2592 return; 2593 2594 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2595 if (!cp) 2596 return; 2597 2598 handle = __le16_to_cpu(cp->handle); 2599 2600 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2601 2602 hci_dev_lock(hdev); 2603 2604 acl = hci_conn_hash_lookup_handle(hdev, handle); 2605 if (acl) { 2606 sco = acl->link; 2607 if (sco) { 2608 sco->state = BT_CLOSED; 2609 2610 hci_connect_cfm(sco, status); 2611 hci_conn_del(sco); 2612 } 2613 } 2614 2615 hci_dev_unlock(hdev); 2616 } 2617 2618 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2619 { 2620 struct hci_cp_enhanced_setup_sync_conn *cp; 2621 struct hci_conn *acl, *sco; 2622 __u16 handle; 2623 2624 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2625 2626 if (!status) 2627 return; 2628 2629 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2630 if (!cp) 2631 return; 2632 2633 handle = __le16_to_cpu(cp->handle); 2634 2635 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2636 2637 hci_dev_lock(hdev); 2638 2639 acl = hci_conn_hash_lookup_handle(hdev, handle); 2640 if (acl) { 2641 sco = acl->link; 2642 if (sco) { 2643 sco->state = BT_CLOSED; 2644 2645 hci_connect_cfm(sco, status); 2646 hci_conn_del(sco); 2647 } 2648 } 2649 2650 hci_dev_unlock(hdev); 2651 } 2652 2653 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2654 { 2655 struct hci_cp_sniff_mode *cp; 2656 struct hci_conn *conn; 2657 2658 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2659 2660 if (!status) 2661 return; 2662 2663 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2664 if (!cp) 2665 return; 2666 2667 hci_dev_lock(hdev); 2668 2669 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2670 if (conn) { 2671 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2672 2673 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2674 hci_sco_setup(conn, status); 2675 } 2676 2677 hci_dev_unlock(hdev); 2678 } 2679 2680 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2681 { 2682 struct hci_cp_exit_sniff_mode *cp; 2683 struct hci_conn *conn; 2684 2685 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2686 2687 if (!status) 2688 return; 2689 2690 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2691 if (!cp) 2692 return; 2693 2694 hci_dev_lock(hdev); 2695 2696 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2697 if (conn) { 2698 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2699 2700 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2701 hci_sco_setup(conn, status); 2702 } 2703 2704 hci_dev_unlock(hdev); 2705 } 2706 2707 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2708 { 2709 struct hci_cp_disconnect *cp; 2710 struct hci_conn_params *params; 2711 struct hci_conn *conn; 2712 bool mgmt_conn; 2713 2714 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2715 2716 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2717 * otherwise cleanup the connection immediately. 2718 */ 2719 if (!status && !hdev->suspended) 2720 return; 2721 2722 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2723 if (!cp) 2724 return; 2725 2726 hci_dev_lock(hdev); 2727 2728 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2729 if (!conn) 2730 goto unlock; 2731 2732 if (status) { 2733 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2734 conn->dst_type, status); 2735 2736 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2737 hdev->cur_adv_instance = conn->adv_instance; 2738 hci_enable_advertising(hdev); 2739 } 2740 2741 goto done; 2742 } 2743 2744 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2745 2746 if (conn->type == ACL_LINK) { 2747 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2748 hci_remove_link_key(hdev, &conn->dst); 2749 } 2750 2751 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2752 if (params) { 2753 switch (params->auto_connect) { 2754 case HCI_AUTO_CONN_LINK_LOSS: 2755 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2756 break; 2757 fallthrough; 2758 2759 case HCI_AUTO_CONN_DIRECT: 2760 case HCI_AUTO_CONN_ALWAYS: 2761 list_del_init(¶ms->action); 2762 list_add(¶ms->action, &hdev->pend_le_conns); 2763 break; 2764 2765 default: 2766 break; 2767 } 2768 } 2769 2770 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2771 cp->reason, mgmt_conn); 2772 2773 hci_disconn_cfm(conn, cp->reason); 2774 2775 done: 2776 /* If the disconnection failed for any reason, the upper layer 2777 * does not retry to disconnect in current implementation. 2778 * Hence, we need to do some basic cleanup here and re-enable 2779 * advertising if necessary. 2780 */ 2781 hci_conn_del(conn); 2782 unlock: 2783 hci_dev_unlock(hdev); 2784 } 2785 2786 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2787 { 2788 /* When using controller based address resolution, then the new 2789 * address types 0x02 and 0x03 are used. These types need to be 2790 * converted back into either public address or random address type 2791 */ 2792 switch (type) { 2793 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2794 if (resolved) 2795 *resolved = true; 2796 return ADDR_LE_DEV_PUBLIC; 2797 case ADDR_LE_DEV_RANDOM_RESOLVED: 2798 if (resolved) 2799 *resolved = true; 2800 return ADDR_LE_DEV_RANDOM; 2801 } 2802 2803 if (resolved) 2804 *resolved = false; 2805 return type; 2806 } 2807 2808 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2809 u8 peer_addr_type, u8 own_address_type, 2810 u8 filter_policy) 2811 { 2812 struct hci_conn *conn; 2813 2814 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2815 peer_addr_type); 2816 if (!conn) 2817 return; 2818 2819 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2820 2821 /* Store the initiator and responder address information which 2822 * is needed for SMP. These values will not change during the 2823 * lifetime of the connection. 2824 */ 2825 conn->init_addr_type = own_address_type; 2826 if (own_address_type == ADDR_LE_DEV_RANDOM) 2827 bacpy(&conn->init_addr, &hdev->random_addr); 2828 else 2829 bacpy(&conn->init_addr, &hdev->bdaddr); 2830 2831 conn->resp_addr_type = peer_addr_type; 2832 bacpy(&conn->resp_addr, peer_addr); 2833 2834 /* We don't want the connection attempt to stick around 2835 * indefinitely since LE doesn't have a page timeout concept 2836 * like BR/EDR. Set a timer for any connection that doesn't use 2837 * the accept list for connecting. 2838 */ 2839 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2840 queue_delayed_work(conn->hdev->workqueue, 2841 &conn->le_conn_timeout, 2842 conn->conn_timeout); 2843 } 2844 2845 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2846 { 2847 struct hci_cp_le_create_conn *cp; 2848 2849 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2850 2851 /* All connection failure handling is taken care of by the 2852 * hci_conn_failed function which is triggered by the HCI 2853 * request completion callbacks used for connecting. 2854 */ 2855 if (status) 2856 return; 2857 2858 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2859 if (!cp) 2860 return; 2861 2862 hci_dev_lock(hdev); 2863 2864 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2865 cp->own_address_type, cp->filter_policy); 2866 2867 hci_dev_unlock(hdev); 2868 } 2869 2870 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2871 { 2872 struct hci_cp_le_ext_create_conn *cp; 2873 2874 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2875 2876 /* All connection failure handling is taken care of by the 2877 * hci_conn_failed function which is triggered by the HCI 2878 * request completion callbacks used for connecting. 2879 */ 2880 if (status) 2881 return; 2882 2883 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2884 if (!cp) 2885 return; 2886 2887 hci_dev_lock(hdev); 2888 2889 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2890 cp->own_addr_type, cp->filter_policy); 2891 2892 hci_dev_unlock(hdev); 2893 } 2894 2895 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2896 { 2897 struct hci_cp_le_read_remote_features *cp; 2898 struct hci_conn *conn; 2899 2900 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2901 2902 if (!status) 2903 return; 2904 2905 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2906 if (!cp) 2907 return; 2908 2909 hci_dev_lock(hdev); 2910 2911 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2912 if (conn) { 2913 if (conn->state == BT_CONFIG) { 2914 hci_connect_cfm(conn, status); 2915 hci_conn_drop(conn); 2916 } 2917 } 2918 2919 hci_dev_unlock(hdev); 2920 } 2921 2922 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2923 { 2924 struct hci_cp_le_start_enc *cp; 2925 struct hci_conn *conn; 2926 2927 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2928 2929 if (!status) 2930 return; 2931 2932 hci_dev_lock(hdev); 2933 2934 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2935 if (!cp) 2936 goto unlock; 2937 2938 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2939 if (!conn) 2940 goto unlock; 2941 2942 if (conn->state != BT_CONNECTED) 2943 goto unlock; 2944 2945 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2946 hci_conn_drop(conn); 2947 2948 unlock: 2949 hci_dev_unlock(hdev); 2950 } 2951 2952 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2953 { 2954 struct hci_cp_switch_role *cp; 2955 struct hci_conn *conn; 2956 2957 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2958 2959 if (!status) 2960 return; 2961 2962 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2963 if (!cp) 2964 return; 2965 2966 hci_dev_lock(hdev); 2967 2968 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2969 if (conn) 2970 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2971 2972 hci_dev_unlock(hdev); 2973 } 2974 2975 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 2976 struct sk_buff *skb) 2977 { 2978 struct hci_ev_status *ev = data; 2979 struct discovery_state *discov = &hdev->discovery; 2980 struct inquiry_entry *e; 2981 2982 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 2983 2984 hci_conn_check_pending(hdev); 2985 2986 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2987 return; 2988 2989 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2990 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2991 2992 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2993 return; 2994 2995 hci_dev_lock(hdev); 2996 2997 if (discov->state != DISCOVERY_FINDING) 2998 goto unlock; 2999 3000 if (list_empty(&discov->resolve)) { 3001 /* When BR/EDR inquiry is active and no LE scanning is in 3002 * progress, then change discovery state to indicate completion. 3003 * 3004 * When running LE scanning and BR/EDR inquiry simultaneously 3005 * and the LE scan already finished, then change the discovery 3006 * state to indicate completion. 3007 */ 3008 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3009 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3010 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3011 goto unlock; 3012 } 3013 3014 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3015 if (e && hci_resolve_name(hdev, e) == 0) { 3016 e->name_state = NAME_PENDING; 3017 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3018 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3019 } else { 3020 /* When BR/EDR inquiry is active and no LE scanning is in 3021 * progress, then change discovery state to indicate completion. 3022 * 3023 * When running LE scanning and BR/EDR inquiry simultaneously 3024 * and the LE scan already finished, then change the discovery 3025 * state to indicate completion. 3026 */ 3027 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3028 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3029 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3030 } 3031 3032 unlock: 3033 hci_dev_unlock(hdev); 3034 } 3035 3036 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3037 struct sk_buff *skb) 3038 { 3039 struct hci_ev_inquiry_result *ev = edata; 3040 struct inquiry_data data; 3041 int i; 3042 3043 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3044 flex_array_size(ev, info, ev->num))) 3045 return; 3046 3047 bt_dev_dbg(hdev, "num %d", ev->num); 3048 3049 if (!ev->num) 3050 return; 3051 3052 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3053 return; 3054 3055 hci_dev_lock(hdev); 3056 3057 for (i = 0; i < ev->num; i++) { 3058 struct inquiry_info *info = &ev->info[i]; 3059 u32 flags; 3060 3061 bacpy(&data.bdaddr, &info->bdaddr); 3062 data.pscan_rep_mode = info->pscan_rep_mode; 3063 data.pscan_period_mode = info->pscan_period_mode; 3064 data.pscan_mode = info->pscan_mode; 3065 memcpy(data.dev_class, info->dev_class, 3); 3066 data.clock_offset = info->clock_offset; 3067 data.rssi = HCI_RSSI_INVALID; 3068 data.ssp_mode = 0x00; 3069 3070 flags = hci_inquiry_cache_update(hdev, &data, false); 3071 3072 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3073 info->dev_class, HCI_RSSI_INVALID, 3074 flags, NULL, 0, NULL, 0); 3075 } 3076 3077 hci_dev_unlock(hdev); 3078 } 3079 3080 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3081 struct sk_buff *skb) 3082 { 3083 struct hci_ev_conn_complete *ev = data; 3084 struct hci_conn *conn; 3085 u8 status = ev->status; 3086 3087 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3088 3089 hci_dev_lock(hdev); 3090 3091 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3092 if (!conn) { 3093 /* In case of error status and there is no connection pending 3094 * just unlock as there is nothing to cleanup. 3095 */ 3096 if (ev->status) 3097 goto unlock; 3098 3099 /* Connection may not exist if auto-connected. Check the bredr 3100 * allowlist to see if this device is allowed to auto connect. 3101 * If link is an ACL type, create a connection class 3102 * automatically. 3103 * 3104 * Auto-connect will only occur if the event filter is 3105 * programmed with a given address. Right now, event filter is 3106 * only used during suspend. 3107 */ 3108 if (ev->link_type == ACL_LINK && 3109 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3110 &ev->bdaddr, 3111 BDADDR_BREDR)) { 3112 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3113 HCI_ROLE_SLAVE); 3114 if (!conn) { 3115 bt_dev_err(hdev, "no memory for new conn"); 3116 goto unlock; 3117 } 3118 } else { 3119 if (ev->link_type != SCO_LINK) 3120 goto unlock; 3121 3122 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3123 &ev->bdaddr); 3124 if (!conn) 3125 goto unlock; 3126 3127 conn->type = SCO_LINK; 3128 } 3129 } 3130 3131 /* The HCI_Connection_Complete event is only sent once per connection. 3132 * Processing it more than once per connection can corrupt kernel memory. 3133 * 3134 * As the connection handle is set here for the first time, it indicates 3135 * whether the connection is already set up. 3136 */ 3137 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 3138 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3139 goto unlock; 3140 } 3141 3142 if (!status) { 3143 conn->handle = __le16_to_cpu(ev->handle); 3144 if (conn->handle > HCI_CONN_HANDLE_MAX) { 3145 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 3146 conn->handle, HCI_CONN_HANDLE_MAX); 3147 status = HCI_ERROR_INVALID_PARAMETERS; 3148 goto done; 3149 } 3150 3151 if (conn->type == ACL_LINK) { 3152 conn->state = BT_CONFIG; 3153 hci_conn_hold(conn); 3154 3155 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3156 !hci_find_link_key(hdev, &ev->bdaddr)) 3157 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3158 else 3159 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3160 } else 3161 conn->state = BT_CONNECTED; 3162 3163 hci_debugfs_create_conn(conn); 3164 hci_conn_add_sysfs(conn); 3165 3166 if (test_bit(HCI_AUTH, &hdev->flags)) 3167 set_bit(HCI_CONN_AUTH, &conn->flags); 3168 3169 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3170 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3171 3172 /* Get remote features */ 3173 if (conn->type == ACL_LINK) { 3174 struct hci_cp_read_remote_features cp; 3175 cp.handle = ev->handle; 3176 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3177 sizeof(cp), &cp); 3178 3179 hci_update_scan(hdev); 3180 } 3181 3182 /* Set packet type for incoming connection */ 3183 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3184 struct hci_cp_change_conn_ptype cp; 3185 cp.handle = ev->handle; 3186 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3187 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3188 &cp); 3189 } 3190 } 3191 3192 if (conn->type == ACL_LINK) 3193 hci_sco_setup(conn, ev->status); 3194 3195 done: 3196 if (status) { 3197 hci_conn_failed(conn, status); 3198 } else if (ev->link_type == SCO_LINK) { 3199 switch (conn->setting & SCO_AIRMODE_MASK) { 3200 case SCO_AIRMODE_CVSD: 3201 if (hdev->notify) 3202 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3203 break; 3204 } 3205 3206 hci_connect_cfm(conn, status); 3207 } 3208 3209 unlock: 3210 hci_dev_unlock(hdev); 3211 3212 hci_conn_check_pending(hdev); 3213 } 3214 3215 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3216 { 3217 struct hci_cp_reject_conn_req cp; 3218 3219 bacpy(&cp.bdaddr, bdaddr); 3220 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3221 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3222 } 3223 3224 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3225 struct sk_buff *skb) 3226 { 3227 struct hci_ev_conn_request *ev = data; 3228 int mask = hdev->link_mode; 3229 struct inquiry_entry *ie; 3230 struct hci_conn *conn; 3231 __u8 flags = 0; 3232 3233 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3234 3235 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3236 &flags); 3237 3238 if (!(mask & HCI_LM_ACCEPT)) { 3239 hci_reject_conn(hdev, &ev->bdaddr); 3240 return; 3241 } 3242 3243 hci_dev_lock(hdev); 3244 3245 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3246 BDADDR_BREDR)) { 3247 hci_reject_conn(hdev, &ev->bdaddr); 3248 goto unlock; 3249 } 3250 3251 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3252 * connection. These features are only touched through mgmt so 3253 * only do the checks if HCI_MGMT is set. 3254 */ 3255 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3256 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3257 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3258 BDADDR_BREDR)) { 3259 hci_reject_conn(hdev, &ev->bdaddr); 3260 goto unlock; 3261 } 3262 3263 /* Connection accepted */ 3264 3265 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3266 if (ie) 3267 memcpy(ie->data.dev_class, ev->dev_class, 3); 3268 3269 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3270 &ev->bdaddr); 3271 if (!conn) { 3272 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3273 HCI_ROLE_SLAVE); 3274 if (!conn) { 3275 bt_dev_err(hdev, "no memory for new connection"); 3276 goto unlock; 3277 } 3278 } 3279 3280 memcpy(conn->dev_class, ev->dev_class, 3); 3281 3282 hci_dev_unlock(hdev); 3283 3284 if (ev->link_type == ACL_LINK || 3285 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3286 struct hci_cp_accept_conn_req cp; 3287 conn->state = BT_CONNECT; 3288 3289 bacpy(&cp.bdaddr, &ev->bdaddr); 3290 3291 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3292 cp.role = 0x00; /* Become central */ 3293 else 3294 cp.role = 0x01; /* Remain peripheral */ 3295 3296 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3297 } else if (!(flags & HCI_PROTO_DEFER)) { 3298 struct hci_cp_accept_sync_conn_req cp; 3299 conn->state = BT_CONNECT; 3300 3301 bacpy(&cp.bdaddr, &ev->bdaddr); 3302 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3303 3304 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3305 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3306 cp.max_latency = cpu_to_le16(0xffff); 3307 cp.content_format = cpu_to_le16(hdev->voice_setting); 3308 cp.retrans_effort = 0xff; 3309 3310 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3311 &cp); 3312 } else { 3313 conn->state = BT_CONNECT2; 3314 hci_connect_cfm(conn, 0); 3315 } 3316 3317 return; 3318 unlock: 3319 hci_dev_unlock(hdev); 3320 } 3321 3322 static u8 hci_to_mgmt_reason(u8 err) 3323 { 3324 switch (err) { 3325 case HCI_ERROR_CONNECTION_TIMEOUT: 3326 return MGMT_DEV_DISCONN_TIMEOUT; 3327 case HCI_ERROR_REMOTE_USER_TERM: 3328 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3329 case HCI_ERROR_REMOTE_POWER_OFF: 3330 return MGMT_DEV_DISCONN_REMOTE; 3331 case HCI_ERROR_LOCAL_HOST_TERM: 3332 return MGMT_DEV_DISCONN_LOCAL_HOST; 3333 default: 3334 return MGMT_DEV_DISCONN_UNKNOWN; 3335 } 3336 } 3337 3338 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3339 struct sk_buff *skb) 3340 { 3341 struct hci_ev_disconn_complete *ev = data; 3342 u8 reason; 3343 struct hci_conn_params *params; 3344 struct hci_conn *conn; 3345 bool mgmt_connected; 3346 3347 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3348 3349 hci_dev_lock(hdev); 3350 3351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3352 if (!conn) 3353 goto unlock; 3354 3355 if (ev->status) { 3356 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3357 conn->dst_type, ev->status); 3358 goto unlock; 3359 } 3360 3361 conn->state = BT_CLOSED; 3362 3363 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3364 3365 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3366 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3367 else 3368 reason = hci_to_mgmt_reason(ev->reason); 3369 3370 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3371 reason, mgmt_connected); 3372 3373 if (conn->type == ACL_LINK) { 3374 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3375 hci_remove_link_key(hdev, &conn->dst); 3376 3377 hci_update_scan(hdev); 3378 } 3379 3380 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3381 if (params) { 3382 switch (params->auto_connect) { 3383 case HCI_AUTO_CONN_LINK_LOSS: 3384 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3385 break; 3386 fallthrough; 3387 3388 case HCI_AUTO_CONN_DIRECT: 3389 case HCI_AUTO_CONN_ALWAYS: 3390 list_del_init(¶ms->action); 3391 list_add(¶ms->action, &hdev->pend_le_conns); 3392 hci_update_passive_scan(hdev); 3393 break; 3394 3395 default: 3396 break; 3397 } 3398 } 3399 3400 hci_disconn_cfm(conn, ev->reason); 3401 3402 /* Re-enable advertising if necessary, since it might 3403 * have been disabled by the connection. From the 3404 * HCI_LE_Set_Advertise_Enable command description in 3405 * the core specification (v4.0): 3406 * "The Controller shall continue advertising until the Host 3407 * issues an LE_Set_Advertise_Enable command with 3408 * Advertising_Enable set to 0x00 (Advertising is disabled) 3409 * or until a connection is created or until the Advertising 3410 * is timed out due to Directed Advertising." 3411 */ 3412 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3413 hdev->cur_adv_instance = conn->adv_instance; 3414 hci_enable_advertising(hdev); 3415 } 3416 3417 hci_conn_del(conn); 3418 3419 unlock: 3420 hci_dev_unlock(hdev); 3421 } 3422 3423 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3424 struct sk_buff *skb) 3425 { 3426 struct hci_ev_auth_complete *ev = data; 3427 struct hci_conn *conn; 3428 3429 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3430 3431 hci_dev_lock(hdev); 3432 3433 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3434 if (!conn) 3435 goto unlock; 3436 3437 if (!ev->status) { 3438 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3439 3440 if (!hci_conn_ssp_enabled(conn) && 3441 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3442 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3443 } else { 3444 set_bit(HCI_CONN_AUTH, &conn->flags); 3445 conn->sec_level = conn->pending_sec_level; 3446 } 3447 } else { 3448 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3449 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3450 3451 mgmt_auth_failed(conn, ev->status); 3452 } 3453 3454 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3455 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3456 3457 if (conn->state == BT_CONFIG) { 3458 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3459 struct hci_cp_set_conn_encrypt cp; 3460 cp.handle = ev->handle; 3461 cp.encrypt = 0x01; 3462 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3463 &cp); 3464 } else { 3465 conn->state = BT_CONNECTED; 3466 hci_connect_cfm(conn, ev->status); 3467 hci_conn_drop(conn); 3468 } 3469 } else { 3470 hci_auth_cfm(conn, ev->status); 3471 3472 hci_conn_hold(conn); 3473 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3474 hci_conn_drop(conn); 3475 } 3476 3477 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3478 if (!ev->status) { 3479 struct hci_cp_set_conn_encrypt cp; 3480 cp.handle = ev->handle; 3481 cp.encrypt = 0x01; 3482 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3483 &cp); 3484 } else { 3485 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3486 hci_encrypt_cfm(conn, ev->status); 3487 } 3488 } 3489 3490 unlock: 3491 hci_dev_unlock(hdev); 3492 } 3493 3494 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3495 struct sk_buff *skb) 3496 { 3497 struct hci_ev_remote_name *ev = data; 3498 struct hci_conn *conn; 3499 3500 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3501 3502 hci_conn_check_pending(hdev); 3503 3504 hci_dev_lock(hdev); 3505 3506 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3507 3508 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3509 goto check_auth; 3510 3511 if (ev->status == 0) 3512 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3513 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3514 else 3515 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3516 3517 check_auth: 3518 if (!conn) 3519 goto unlock; 3520 3521 if (!hci_outgoing_auth_needed(hdev, conn)) 3522 goto unlock; 3523 3524 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3525 struct hci_cp_auth_requested cp; 3526 3527 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3528 3529 cp.handle = __cpu_to_le16(conn->handle); 3530 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3531 } 3532 3533 unlock: 3534 hci_dev_unlock(hdev); 3535 } 3536 3537 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3538 u16 opcode, struct sk_buff *skb) 3539 { 3540 const struct hci_rp_read_enc_key_size *rp; 3541 struct hci_conn *conn; 3542 u16 handle; 3543 3544 BT_DBG("%s status 0x%02x", hdev->name, status); 3545 3546 if (!skb || skb->len < sizeof(*rp)) { 3547 bt_dev_err(hdev, "invalid read key size response"); 3548 return; 3549 } 3550 3551 rp = (void *)skb->data; 3552 handle = le16_to_cpu(rp->handle); 3553 3554 hci_dev_lock(hdev); 3555 3556 conn = hci_conn_hash_lookup_handle(hdev, handle); 3557 if (!conn) 3558 goto unlock; 3559 3560 /* While unexpected, the read_enc_key_size command may fail. The most 3561 * secure approach is to then assume the key size is 0 to force a 3562 * disconnection. 3563 */ 3564 if (rp->status) { 3565 bt_dev_err(hdev, "failed to read key size for handle %u", 3566 handle); 3567 conn->enc_key_size = 0; 3568 } else { 3569 conn->enc_key_size = rp->key_size; 3570 } 3571 3572 hci_encrypt_cfm(conn, 0); 3573 3574 unlock: 3575 hci_dev_unlock(hdev); 3576 } 3577 3578 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3579 struct sk_buff *skb) 3580 { 3581 struct hci_ev_encrypt_change *ev = data; 3582 struct hci_conn *conn; 3583 3584 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3585 3586 hci_dev_lock(hdev); 3587 3588 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3589 if (!conn) 3590 goto unlock; 3591 3592 if (!ev->status) { 3593 if (ev->encrypt) { 3594 /* Encryption implies authentication */ 3595 set_bit(HCI_CONN_AUTH, &conn->flags); 3596 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3597 conn->sec_level = conn->pending_sec_level; 3598 3599 /* P-256 authentication key implies FIPS */ 3600 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3601 set_bit(HCI_CONN_FIPS, &conn->flags); 3602 3603 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3604 conn->type == LE_LINK) 3605 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3606 } else { 3607 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3608 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3609 } 3610 } 3611 3612 /* We should disregard the current RPA and generate a new one 3613 * whenever the encryption procedure fails. 3614 */ 3615 if (ev->status && conn->type == LE_LINK) { 3616 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3617 hci_adv_instances_set_rpa_expired(hdev, true); 3618 } 3619 3620 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3621 3622 /* Check link security requirements are met */ 3623 if (!hci_conn_check_link_mode(conn)) 3624 ev->status = HCI_ERROR_AUTH_FAILURE; 3625 3626 if (ev->status && conn->state == BT_CONNECTED) { 3627 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3628 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3629 3630 /* Notify upper layers so they can cleanup before 3631 * disconnecting. 3632 */ 3633 hci_encrypt_cfm(conn, ev->status); 3634 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3635 hci_conn_drop(conn); 3636 goto unlock; 3637 } 3638 3639 /* Try reading the encryption key size for encrypted ACL links */ 3640 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3641 struct hci_cp_read_enc_key_size cp; 3642 struct hci_request req; 3643 3644 /* Only send HCI_Read_Encryption_Key_Size if the 3645 * controller really supports it. If it doesn't, assume 3646 * the default size (16). 3647 */ 3648 if (!(hdev->commands[20] & 0x10)) { 3649 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3650 goto notify; 3651 } 3652 3653 hci_req_init(&req, hdev); 3654 3655 cp.handle = cpu_to_le16(conn->handle); 3656 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3657 3658 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3659 bt_dev_err(hdev, "sending read key size failed"); 3660 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3661 goto notify; 3662 } 3663 3664 goto unlock; 3665 } 3666 3667 /* Set the default Authenticated Payload Timeout after 3668 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3669 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3670 * sent when the link is active and Encryption is enabled, the conn 3671 * type can be either LE or ACL and controller must support LMP Ping. 3672 * Ensure for AES-CCM encryption as well. 3673 */ 3674 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3675 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3676 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3677 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3678 struct hci_cp_write_auth_payload_to cp; 3679 3680 cp.handle = cpu_to_le16(conn->handle); 3681 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3682 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3683 sizeof(cp), &cp); 3684 } 3685 3686 notify: 3687 hci_encrypt_cfm(conn, ev->status); 3688 3689 unlock: 3690 hci_dev_unlock(hdev); 3691 } 3692 3693 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3694 struct sk_buff *skb) 3695 { 3696 struct hci_ev_change_link_key_complete *ev = data; 3697 struct hci_conn *conn; 3698 3699 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3700 3701 hci_dev_lock(hdev); 3702 3703 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3704 if (conn) { 3705 if (!ev->status) 3706 set_bit(HCI_CONN_SECURE, &conn->flags); 3707 3708 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3709 3710 hci_key_change_cfm(conn, ev->status); 3711 } 3712 3713 hci_dev_unlock(hdev); 3714 } 3715 3716 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3717 struct sk_buff *skb) 3718 { 3719 struct hci_ev_remote_features *ev = data; 3720 struct hci_conn *conn; 3721 3722 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3723 3724 hci_dev_lock(hdev); 3725 3726 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3727 if (!conn) 3728 goto unlock; 3729 3730 if (!ev->status) 3731 memcpy(conn->features[0], ev->features, 8); 3732 3733 if (conn->state != BT_CONFIG) 3734 goto unlock; 3735 3736 if (!ev->status && lmp_ext_feat_capable(hdev) && 3737 lmp_ext_feat_capable(conn)) { 3738 struct hci_cp_read_remote_ext_features cp; 3739 cp.handle = ev->handle; 3740 cp.page = 0x01; 3741 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3742 sizeof(cp), &cp); 3743 goto unlock; 3744 } 3745 3746 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3747 struct hci_cp_remote_name_req cp; 3748 memset(&cp, 0, sizeof(cp)); 3749 bacpy(&cp.bdaddr, &conn->dst); 3750 cp.pscan_rep_mode = 0x02; 3751 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3752 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3753 mgmt_device_connected(hdev, conn, NULL, 0); 3754 3755 if (!hci_outgoing_auth_needed(hdev, conn)) { 3756 conn->state = BT_CONNECTED; 3757 hci_connect_cfm(conn, ev->status); 3758 hci_conn_drop(conn); 3759 } 3760 3761 unlock: 3762 hci_dev_unlock(hdev); 3763 } 3764 3765 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3766 { 3767 cancel_delayed_work(&hdev->cmd_timer); 3768 3769 if (!test_bit(HCI_RESET, &hdev->flags)) { 3770 if (ncmd) { 3771 cancel_delayed_work(&hdev->ncmd_timer); 3772 atomic_set(&hdev->cmd_cnt, 1); 3773 } else { 3774 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 3775 schedule_delayed_work(&hdev->ncmd_timer, 3776 HCI_NCMD_TIMEOUT); 3777 } 3778 } 3779 } 3780 3781 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, 3782 struct sk_buff *skb) 3783 { 3784 struct hci_rp_le_read_buffer_size_v2 *rp = data; 3785 3786 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3787 3788 if (rp->status) 3789 return rp->status; 3790 3791 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); 3792 hdev->le_pkts = rp->acl_max_pkt; 3793 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); 3794 hdev->iso_pkts = rp->iso_max_pkt; 3795 3796 hdev->le_cnt = hdev->le_pkts; 3797 hdev->iso_cnt = hdev->iso_pkts; 3798 3799 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, 3800 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); 3801 3802 return rp->status; 3803 } 3804 3805 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, 3806 struct sk_buff *skb) 3807 { 3808 struct hci_rp_le_set_cig_params *rp = data; 3809 struct hci_conn *conn; 3810 int i = 0; 3811 3812 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3813 3814 hci_dev_lock(hdev); 3815 3816 if (rp->status) { 3817 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { 3818 conn->state = BT_CLOSED; 3819 hci_connect_cfm(conn, rp->status); 3820 hci_conn_del(conn); 3821 } 3822 goto unlock; 3823 } 3824 3825 rcu_read_lock(); 3826 3827 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 3828 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || 3829 conn->state == BT_CONNECTED) 3830 continue; 3831 3832 conn->handle = __le16_to_cpu(rp->handle[i++]); 3833 3834 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, 3835 conn->handle, conn->link); 3836 3837 /* Create CIS if LE is already connected */ 3838 if (conn->link && conn->link->state == BT_CONNECTED) 3839 hci_le_create_cis(conn->link); 3840 3841 if (i == rp->num_handles) 3842 break; 3843 } 3844 3845 rcu_read_unlock(); 3846 3847 unlock: 3848 hci_dev_unlock(hdev); 3849 3850 return rp->status; 3851 } 3852 3853 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, 3854 struct sk_buff *skb) 3855 { 3856 struct hci_rp_le_setup_iso_path *rp = data; 3857 struct hci_cp_le_setup_iso_path *cp; 3858 struct hci_conn *conn; 3859 3860 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3861 3862 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); 3863 if (!cp) 3864 return rp->status; 3865 3866 hci_dev_lock(hdev); 3867 3868 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 3869 if (!conn) 3870 goto unlock; 3871 3872 if (rp->status) { 3873 hci_connect_cfm(conn, rp->status); 3874 hci_conn_del(conn); 3875 goto unlock; 3876 } 3877 3878 switch (cp->direction) { 3879 /* Input (Host to Controller) */ 3880 case 0x00: 3881 /* Only confirm connection if output only */ 3882 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) 3883 hci_connect_cfm(conn, rp->status); 3884 break; 3885 /* Output (Controller to Host) */ 3886 case 0x01: 3887 /* Confirm connection since conn->iso_qos is always configured 3888 * last. 3889 */ 3890 hci_connect_cfm(conn, rp->status); 3891 break; 3892 } 3893 3894 unlock: 3895 hci_dev_unlock(hdev); 3896 return rp->status; 3897 } 3898 3899 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) 3900 { 3901 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3902 } 3903 3904 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, 3905 struct sk_buff *skb) 3906 { 3907 struct hci_ev_status *rp = data; 3908 struct hci_cp_le_set_per_adv_params *cp; 3909 3910 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3911 3912 if (rp->status) 3913 return rp->status; 3914 3915 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); 3916 if (!cp) 3917 return rp->status; 3918 3919 /* TODO: set the conn state */ 3920 return rp->status; 3921 } 3922 3923 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, 3924 struct sk_buff *skb) 3925 { 3926 struct hci_ev_status *rp = data; 3927 __u8 *sent; 3928 3929 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3930 3931 if (rp->status) 3932 return rp->status; 3933 3934 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); 3935 if (!sent) 3936 return rp->status; 3937 3938 hci_dev_lock(hdev); 3939 3940 if (*sent) 3941 hci_dev_set_flag(hdev, HCI_LE_PER_ADV); 3942 else 3943 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); 3944 3945 hci_dev_unlock(hdev); 3946 3947 return rp->status; 3948 } 3949 3950 #define HCI_CC_VL(_op, _func, _min, _max) \ 3951 { \ 3952 .op = _op, \ 3953 .func = _func, \ 3954 .min_len = _min, \ 3955 .max_len = _max, \ 3956 } 3957 3958 #define HCI_CC(_op, _func, _len) \ 3959 HCI_CC_VL(_op, _func, _len, _len) 3960 3961 #define HCI_CC_STATUS(_op, _func) \ 3962 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3963 3964 static const struct hci_cc { 3965 u16 op; 3966 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3967 u16 min_len; 3968 u16 max_len; 3969 } hci_cc_table[] = { 3970 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3971 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3972 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3973 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 3974 hci_cc_remote_name_req_cancel), 3975 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3976 sizeof(struct hci_rp_role_discovery)), 3977 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3978 sizeof(struct hci_rp_read_link_policy)), 3979 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 3980 sizeof(struct hci_rp_write_link_policy)), 3981 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 3982 sizeof(struct hci_rp_read_def_link_policy)), 3983 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 3984 hci_cc_write_def_link_policy), 3985 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 3986 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 3987 sizeof(struct hci_rp_read_stored_link_key)), 3988 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 3989 sizeof(struct hci_rp_delete_stored_link_key)), 3990 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 3991 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 3992 sizeof(struct hci_rp_read_local_name)), 3993 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 3994 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 3995 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 3996 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 3997 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 3998 sizeof(struct hci_rp_read_class_of_dev)), 3999 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 4000 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 4001 sizeof(struct hci_rp_read_voice_setting)), 4002 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 4003 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 4004 sizeof(struct hci_rp_read_num_supported_iac)), 4005 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 4006 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 4007 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 4008 sizeof(struct hci_rp_read_auth_payload_to)), 4009 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 4010 sizeof(struct hci_rp_write_auth_payload_to)), 4011 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 4012 sizeof(struct hci_rp_read_local_version)), 4013 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 4014 sizeof(struct hci_rp_read_local_commands)), 4015 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 4016 sizeof(struct hci_rp_read_local_features)), 4017 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 4018 sizeof(struct hci_rp_read_local_ext_features)), 4019 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 4020 sizeof(struct hci_rp_read_buffer_size)), 4021 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 4022 sizeof(struct hci_rp_read_bd_addr)), 4023 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 4024 sizeof(struct hci_rp_read_local_pairing_opts)), 4025 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 4026 sizeof(struct hci_rp_read_page_scan_activity)), 4027 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 4028 hci_cc_write_page_scan_activity), 4029 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 4030 sizeof(struct hci_rp_read_page_scan_type)), 4031 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 4032 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, 4033 sizeof(struct hci_rp_read_data_block_size)), 4034 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, 4035 sizeof(struct hci_rp_read_flow_control_mode)), 4036 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, 4037 sizeof(struct hci_rp_read_local_amp_info)), 4038 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 4039 sizeof(struct hci_rp_read_clock)), 4040 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 4041 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 4042 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 4043 hci_cc_read_def_err_data_reporting, 4044 sizeof(struct hci_rp_read_def_err_data_reporting)), 4045 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4046 hci_cc_write_def_err_data_reporting), 4047 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 4048 sizeof(struct hci_rp_pin_code_reply)), 4049 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 4050 sizeof(struct hci_rp_pin_code_neg_reply)), 4051 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 4052 sizeof(struct hci_rp_read_local_oob_data)), 4053 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 4054 sizeof(struct hci_rp_read_local_oob_ext_data)), 4055 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 4056 sizeof(struct hci_rp_le_read_buffer_size)), 4057 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 4058 sizeof(struct hci_rp_le_read_local_features)), 4059 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 4060 sizeof(struct hci_rp_le_read_adv_tx_power)), 4061 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 4062 sizeof(struct hci_rp_user_confirm_reply)), 4063 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 4064 sizeof(struct hci_rp_user_confirm_reply)), 4065 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 4066 sizeof(struct hci_rp_user_confirm_reply)), 4067 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 4068 sizeof(struct hci_rp_user_confirm_reply)), 4069 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 4070 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 4071 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 4072 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 4073 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4074 hci_cc_le_read_accept_list_size, 4075 sizeof(struct hci_rp_le_read_accept_list_size)), 4076 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 4077 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 4078 hci_cc_le_add_to_accept_list), 4079 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 4080 hci_cc_le_del_from_accept_list), 4081 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 4082 sizeof(struct hci_rp_le_read_supported_states)), 4083 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 4084 sizeof(struct hci_rp_le_read_def_data_len)), 4085 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 4086 hci_cc_le_write_def_data_len), 4087 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 4088 hci_cc_le_add_to_resolv_list), 4089 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 4090 hci_cc_le_del_from_resolv_list), 4091 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 4092 hci_cc_le_clear_resolv_list), 4093 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 4094 sizeof(struct hci_rp_le_read_resolv_list_size)), 4095 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 4096 hci_cc_le_set_addr_resolution_enable), 4097 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 4098 sizeof(struct hci_rp_le_read_max_data_len)), 4099 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 4100 hci_cc_write_le_host_supported), 4101 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 4102 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 4103 sizeof(struct hci_rp_read_rssi)), 4104 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 4105 sizeof(struct hci_rp_read_tx_power)), 4106 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 4107 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 4108 hci_cc_le_set_ext_scan_param), 4109 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 4110 hci_cc_le_set_ext_scan_enable), 4111 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 4112 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4113 hci_cc_le_read_num_adv_sets, 4114 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 4115 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 4116 sizeof(struct hci_rp_le_set_ext_adv_params)), 4117 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 4118 hci_cc_le_set_ext_adv_enable), 4119 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 4120 hci_cc_le_set_adv_set_random_addr), 4121 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 4122 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 4123 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), 4124 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, 4125 hci_cc_le_set_per_adv_enable), 4126 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 4127 sizeof(struct hci_rp_le_read_transmit_power)), 4128 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), 4129 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, 4130 sizeof(struct hci_rp_le_read_buffer_size_v2)), 4131 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, 4132 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), 4133 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, 4134 sizeof(struct hci_rp_le_setup_iso_path)), 4135 }; 4136 4137 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 4138 struct sk_buff *skb) 4139 { 4140 void *data; 4141 4142 if (skb->len < cc->min_len) { 4143 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 4144 cc->op, skb->len, cc->min_len); 4145 return HCI_ERROR_UNSPECIFIED; 4146 } 4147 4148 /* Just warn if the length is over max_len size it still be possible to 4149 * partially parse the cc so leave to callback to decide if that is 4150 * acceptable. 4151 */ 4152 if (skb->len > cc->max_len) 4153 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 4154 cc->op, skb->len, cc->max_len); 4155 4156 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 4157 if (!data) 4158 return HCI_ERROR_UNSPECIFIED; 4159 4160 return cc->func(hdev, data, skb); 4161 } 4162 4163 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 4164 struct sk_buff *skb, u16 *opcode, u8 *status, 4165 hci_req_complete_t *req_complete, 4166 hci_req_complete_skb_t *req_complete_skb) 4167 { 4168 struct hci_ev_cmd_complete *ev = data; 4169 int i; 4170 4171 *opcode = __le16_to_cpu(ev->opcode); 4172 4173 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4174 4175 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 4176 if (hci_cc_table[i].op == *opcode) { 4177 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 4178 break; 4179 } 4180 } 4181 4182 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4183 4184 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 4185 req_complete_skb); 4186 4187 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4188 bt_dev_err(hdev, 4189 "unexpected event for opcode 0x%4.4x", *opcode); 4190 return; 4191 } 4192 4193 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4194 queue_work(hdev->workqueue, &hdev->cmd_work); 4195 } 4196 4197 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) 4198 { 4199 struct hci_cp_le_create_cis *cp; 4200 int i; 4201 4202 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4203 4204 if (!status) 4205 return; 4206 4207 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); 4208 if (!cp) 4209 return; 4210 4211 hci_dev_lock(hdev); 4212 4213 /* Remove connection if command failed */ 4214 for (i = 0; cp->num_cis; cp->num_cis--, i++) { 4215 struct hci_conn *conn; 4216 u16 handle; 4217 4218 handle = __le16_to_cpu(cp->cis[i].cis_handle); 4219 4220 conn = hci_conn_hash_lookup_handle(hdev, handle); 4221 if (conn) { 4222 conn->state = BT_CLOSED; 4223 hci_connect_cfm(conn, status); 4224 hci_conn_del(conn); 4225 } 4226 } 4227 4228 hci_dev_unlock(hdev); 4229 } 4230 4231 #define HCI_CS(_op, _func) \ 4232 { \ 4233 .op = _op, \ 4234 .func = _func, \ 4235 } 4236 4237 static const struct hci_cs { 4238 u16 op; 4239 void (*func)(struct hci_dev *hdev, __u8 status); 4240 } hci_cs_table[] = { 4241 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4242 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4243 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4244 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4245 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4246 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4247 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4248 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4249 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4250 hci_cs_read_remote_ext_features), 4251 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4252 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4253 hci_cs_enhanced_setup_sync_conn), 4254 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4255 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4256 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4257 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4258 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4259 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4260 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), 4261 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), 4262 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), 4263 }; 4264 4265 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4266 struct sk_buff *skb, u16 *opcode, u8 *status, 4267 hci_req_complete_t *req_complete, 4268 hci_req_complete_skb_t *req_complete_skb) 4269 { 4270 struct hci_ev_cmd_status *ev = data; 4271 int i; 4272 4273 *opcode = __le16_to_cpu(ev->opcode); 4274 *status = ev->status; 4275 4276 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4277 4278 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4279 if (hci_cs_table[i].op == *opcode) { 4280 hci_cs_table[i].func(hdev, ev->status); 4281 break; 4282 } 4283 } 4284 4285 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4286 4287 /* Indicate request completion if the command failed. Also, if 4288 * we're not waiting for a special event and we get a success 4289 * command status we should try to flag the request as completed 4290 * (since for this kind of commands there will not be a command 4291 * complete event). 4292 */ 4293 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { 4294 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4295 req_complete_skb); 4296 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4297 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4298 *opcode); 4299 return; 4300 } 4301 } 4302 4303 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4304 queue_work(hdev->workqueue, &hdev->cmd_work); 4305 } 4306 4307 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4308 struct sk_buff *skb) 4309 { 4310 struct hci_ev_hardware_error *ev = data; 4311 4312 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4313 4314 hdev->hw_error_code = ev->code; 4315 4316 queue_work(hdev->req_workqueue, &hdev->error_reset); 4317 } 4318 4319 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4320 struct sk_buff *skb) 4321 { 4322 struct hci_ev_role_change *ev = data; 4323 struct hci_conn *conn; 4324 4325 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4326 4327 hci_dev_lock(hdev); 4328 4329 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4330 if (conn) { 4331 if (!ev->status) 4332 conn->role = ev->role; 4333 4334 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4335 4336 hci_role_switch_cfm(conn, ev->status, ev->role); 4337 } 4338 4339 hci_dev_unlock(hdev); 4340 } 4341 4342 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4343 struct sk_buff *skb) 4344 { 4345 struct hci_ev_num_comp_pkts *ev = data; 4346 int i; 4347 4348 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4349 flex_array_size(ev, handles, ev->num))) 4350 return; 4351 4352 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 4353 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4354 return; 4355 } 4356 4357 bt_dev_dbg(hdev, "num %d", ev->num); 4358 4359 for (i = 0; i < ev->num; i++) { 4360 struct hci_comp_pkts_info *info = &ev->handles[i]; 4361 struct hci_conn *conn; 4362 __u16 handle, count; 4363 4364 handle = __le16_to_cpu(info->handle); 4365 count = __le16_to_cpu(info->count); 4366 4367 conn = hci_conn_hash_lookup_handle(hdev, handle); 4368 if (!conn) 4369 continue; 4370 4371 conn->sent -= count; 4372 4373 switch (conn->type) { 4374 case ACL_LINK: 4375 hdev->acl_cnt += count; 4376 if (hdev->acl_cnt > hdev->acl_pkts) 4377 hdev->acl_cnt = hdev->acl_pkts; 4378 break; 4379 4380 case LE_LINK: 4381 if (hdev->le_pkts) { 4382 hdev->le_cnt += count; 4383 if (hdev->le_cnt > hdev->le_pkts) 4384 hdev->le_cnt = hdev->le_pkts; 4385 } else { 4386 hdev->acl_cnt += count; 4387 if (hdev->acl_cnt > hdev->acl_pkts) 4388 hdev->acl_cnt = hdev->acl_pkts; 4389 } 4390 break; 4391 4392 case SCO_LINK: 4393 hdev->sco_cnt += count; 4394 if (hdev->sco_cnt > hdev->sco_pkts) 4395 hdev->sco_cnt = hdev->sco_pkts; 4396 break; 4397 4398 case ISO_LINK: 4399 if (hdev->iso_pkts) { 4400 hdev->iso_cnt += count; 4401 if (hdev->iso_cnt > hdev->iso_pkts) 4402 hdev->iso_cnt = hdev->iso_pkts; 4403 } else if (hdev->le_pkts) { 4404 hdev->le_cnt += count; 4405 if (hdev->le_cnt > hdev->le_pkts) 4406 hdev->le_cnt = hdev->le_pkts; 4407 } else { 4408 hdev->acl_cnt += count; 4409 if (hdev->acl_cnt > hdev->acl_pkts) 4410 hdev->acl_cnt = hdev->acl_pkts; 4411 } 4412 break; 4413 4414 default: 4415 bt_dev_err(hdev, "unknown type %d conn %p", 4416 conn->type, conn); 4417 break; 4418 } 4419 } 4420 4421 queue_work(hdev->workqueue, &hdev->tx_work); 4422 } 4423 4424 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4425 __u16 handle) 4426 { 4427 struct hci_chan *chan; 4428 4429 switch (hdev->dev_type) { 4430 case HCI_PRIMARY: 4431 return hci_conn_hash_lookup_handle(hdev, handle); 4432 case HCI_AMP: 4433 chan = hci_chan_lookup_handle(hdev, handle); 4434 if (chan) 4435 return chan->conn; 4436 break; 4437 default: 4438 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4439 break; 4440 } 4441 4442 return NULL; 4443 } 4444 4445 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, 4446 struct sk_buff *skb) 4447 { 4448 struct hci_ev_num_comp_blocks *ev = data; 4449 int i; 4450 4451 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, 4452 flex_array_size(ev, handles, ev->num_hndl))) 4453 return; 4454 4455 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4456 bt_dev_err(hdev, "wrong event for mode %d", 4457 hdev->flow_ctl_mode); 4458 return; 4459 } 4460 4461 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, 4462 ev->num_hndl); 4463 4464 for (i = 0; i < ev->num_hndl; i++) { 4465 struct hci_comp_blocks_info *info = &ev->handles[i]; 4466 struct hci_conn *conn = NULL; 4467 __u16 handle, block_count; 4468 4469 handle = __le16_to_cpu(info->handle); 4470 block_count = __le16_to_cpu(info->blocks); 4471 4472 conn = __hci_conn_lookup_handle(hdev, handle); 4473 if (!conn) 4474 continue; 4475 4476 conn->sent -= block_count; 4477 4478 switch (conn->type) { 4479 case ACL_LINK: 4480 case AMP_LINK: 4481 hdev->block_cnt += block_count; 4482 if (hdev->block_cnt > hdev->num_blocks) 4483 hdev->block_cnt = hdev->num_blocks; 4484 break; 4485 4486 default: 4487 bt_dev_err(hdev, "unknown type %d conn %p", 4488 conn->type, conn); 4489 break; 4490 } 4491 } 4492 4493 queue_work(hdev->workqueue, &hdev->tx_work); 4494 } 4495 4496 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4497 struct sk_buff *skb) 4498 { 4499 struct hci_ev_mode_change *ev = data; 4500 struct hci_conn *conn; 4501 4502 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4503 4504 hci_dev_lock(hdev); 4505 4506 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4507 if (conn) { 4508 conn->mode = ev->mode; 4509 4510 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4511 &conn->flags)) { 4512 if (conn->mode == HCI_CM_ACTIVE) 4513 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4514 else 4515 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4516 } 4517 4518 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4519 hci_sco_setup(conn, ev->status); 4520 } 4521 4522 hci_dev_unlock(hdev); 4523 } 4524 4525 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4526 struct sk_buff *skb) 4527 { 4528 struct hci_ev_pin_code_req *ev = data; 4529 struct hci_conn *conn; 4530 4531 bt_dev_dbg(hdev, ""); 4532 4533 hci_dev_lock(hdev); 4534 4535 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4536 if (!conn) 4537 goto unlock; 4538 4539 if (conn->state == BT_CONNECTED) { 4540 hci_conn_hold(conn); 4541 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4542 hci_conn_drop(conn); 4543 } 4544 4545 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4546 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4547 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4548 sizeof(ev->bdaddr), &ev->bdaddr); 4549 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4550 u8 secure; 4551 4552 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4553 secure = 1; 4554 else 4555 secure = 0; 4556 4557 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4558 } 4559 4560 unlock: 4561 hci_dev_unlock(hdev); 4562 } 4563 4564 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4565 { 4566 if (key_type == HCI_LK_CHANGED_COMBINATION) 4567 return; 4568 4569 conn->pin_length = pin_len; 4570 conn->key_type = key_type; 4571 4572 switch (key_type) { 4573 case HCI_LK_LOCAL_UNIT: 4574 case HCI_LK_REMOTE_UNIT: 4575 case HCI_LK_DEBUG_COMBINATION: 4576 return; 4577 case HCI_LK_COMBINATION: 4578 if (pin_len == 16) 4579 conn->pending_sec_level = BT_SECURITY_HIGH; 4580 else 4581 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4582 break; 4583 case HCI_LK_UNAUTH_COMBINATION_P192: 4584 case HCI_LK_UNAUTH_COMBINATION_P256: 4585 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4586 break; 4587 case HCI_LK_AUTH_COMBINATION_P192: 4588 conn->pending_sec_level = BT_SECURITY_HIGH; 4589 break; 4590 case HCI_LK_AUTH_COMBINATION_P256: 4591 conn->pending_sec_level = BT_SECURITY_FIPS; 4592 break; 4593 } 4594 } 4595 4596 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4597 struct sk_buff *skb) 4598 { 4599 struct hci_ev_link_key_req *ev = data; 4600 struct hci_cp_link_key_reply cp; 4601 struct hci_conn *conn; 4602 struct link_key *key; 4603 4604 bt_dev_dbg(hdev, ""); 4605 4606 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4607 return; 4608 4609 hci_dev_lock(hdev); 4610 4611 key = hci_find_link_key(hdev, &ev->bdaddr); 4612 if (!key) { 4613 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4614 goto not_found; 4615 } 4616 4617 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4618 4619 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4620 if (conn) { 4621 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4622 4623 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4624 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4625 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4626 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4627 goto not_found; 4628 } 4629 4630 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4631 (conn->pending_sec_level == BT_SECURITY_HIGH || 4632 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4633 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4634 goto not_found; 4635 } 4636 4637 conn_set_key(conn, key->type, key->pin_len); 4638 } 4639 4640 bacpy(&cp.bdaddr, &ev->bdaddr); 4641 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4642 4643 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4644 4645 hci_dev_unlock(hdev); 4646 4647 return; 4648 4649 not_found: 4650 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4651 hci_dev_unlock(hdev); 4652 } 4653 4654 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4655 struct sk_buff *skb) 4656 { 4657 struct hci_ev_link_key_notify *ev = data; 4658 struct hci_conn *conn; 4659 struct link_key *key; 4660 bool persistent; 4661 u8 pin_len = 0; 4662 4663 bt_dev_dbg(hdev, ""); 4664 4665 hci_dev_lock(hdev); 4666 4667 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4668 if (!conn) 4669 goto unlock; 4670 4671 hci_conn_hold(conn); 4672 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4673 hci_conn_drop(conn); 4674 4675 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4676 conn_set_key(conn, ev->key_type, conn->pin_length); 4677 4678 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4679 goto unlock; 4680 4681 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4682 ev->key_type, pin_len, &persistent); 4683 if (!key) 4684 goto unlock; 4685 4686 /* Update connection information since adding the key will have 4687 * fixed up the type in the case of changed combination keys. 4688 */ 4689 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4690 conn_set_key(conn, key->type, key->pin_len); 4691 4692 mgmt_new_link_key(hdev, key, persistent); 4693 4694 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4695 * is set. If it's not set simply remove the key from the kernel 4696 * list (we've still notified user space about it but with 4697 * store_hint being 0). 4698 */ 4699 if (key->type == HCI_LK_DEBUG_COMBINATION && 4700 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4701 list_del_rcu(&key->list); 4702 kfree_rcu(key, rcu); 4703 goto unlock; 4704 } 4705 4706 if (persistent) 4707 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4708 else 4709 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4710 4711 unlock: 4712 hci_dev_unlock(hdev); 4713 } 4714 4715 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4716 struct sk_buff *skb) 4717 { 4718 struct hci_ev_clock_offset *ev = data; 4719 struct hci_conn *conn; 4720 4721 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4722 4723 hci_dev_lock(hdev); 4724 4725 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4726 if (conn && !ev->status) { 4727 struct inquiry_entry *ie; 4728 4729 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4730 if (ie) { 4731 ie->data.clock_offset = ev->clock_offset; 4732 ie->timestamp = jiffies; 4733 } 4734 } 4735 4736 hci_dev_unlock(hdev); 4737 } 4738 4739 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4740 struct sk_buff *skb) 4741 { 4742 struct hci_ev_pkt_type_change *ev = data; 4743 struct hci_conn *conn; 4744 4745 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4746 4747 hci_dev_lock(hdev); 4748 4749 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4750 if (conn && !ev->status) 4751 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4752 4753 hci_dev_unlock(hdev); 4754 } 4755 4756 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4757 struct sk_buff *skb) 4758 { 4759 struct hci_ev_pscan_rep_mode *ev = data; 4760 struct inquiry_entry *ie; 4761 4762 bt_dev_dbg(hdev, ""); 4763 4764 hci_dev_lock(hdev); 4765 4766 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4767 if (ie) { 4768 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4769 ie->timestamp = jiffies; 4770 } 4771 4772 hci_dev_unlock(hdev); 4773 } 4774 4775 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4776 struct sk_buff *skb) 4777 { 4778 struct hci_ev_inquiry_result_rssi *ev = edata; 4779 struct inquiry_data data; 4780 int i; 4781 4782 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4783 4784 if (!ev->num) 4785 return; 4786 4787 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4788 return; 4789 4790 hci_dev_lock(hdev); 4791 4792 if (skb->len == array_size(ev->num, 4793 sizeof(struct inquiry_info_rssi_pscan))) { 4794 struct inquiry_info_rssi_pscan *info; 4795 4796 for (i = 0; i < ev->num; i++) { 4797 u32 flags; 4798 4799 info = hci_ev_skb_pull(hdev, skb, 4800 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4801 sizeof(*info)); 4802 if (!info) { 4803 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4804 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4805 goto unlock; 4806 } 4807 4808 bacpy(&data.bdaddr, &info->bdaddr); 4809 data.pscan_rep_mode = info->pscan_rep_mode; 4810 data.pscan_period_mode = info->pscan_period_mode; 4811 data.pscan_mode = info->pscan_mode; 4812 memcpy(data.dev_class, info->dev_class, 3); 4813 data.clock_offset = info->clock_offset; 4814 data.rssi = info->rssi; 4815 data.ssp_mode = 0x00; 4816 4817 flags = hci_inquiry_cache_update(hdev, &data, false); 4818 4819 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4820 info->dev_class, info->rssi, 4821 flags, NULL, 0, NULL, 0); 4822 } 4823 } else if (skb->len == array_size(ev->num, 4824 sizeof(struct inquiry_info_rssi))) { 4825 struct inquiry_info_rssi *info; 4826 4827 for (i = 0; i < ev->num; i++) { 4828 u32 flags; 4829 4830 info = hci_ev_skb_pull(hdev, skb, 4831 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4832 sizeof(*info)); 4833 if (!info) { 4834 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4835 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4836 goto unlock; 4837 } 4838 4839 bacpy(&data.bdaddr, &info->bdaddr); 4840 data.pscan_rep_mode = info->pscan_rep_mode; 4841 data.pscan_period_mode = info->pscan_period_mode; 4842 data.pscan_mode = 0x00; 4843 memcpy(data.dev_class, info->dev_class, 3); 4844 data.clock_offset = info->clock_offset; 4845 data.rssi = info->rssi; 4846 data.ssp_mode = 0x00; 4847 4848 flags = hci_inquiry_cache_update(hdev, &data, false); 4849 4850 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4851 info->dev_class, info->rssi, 4852 flags, NULL, 0, NULL, 0); 4853 } 4854 } else { 4855 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4856 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4857 } 4858 unlock: 4859 hci_dev_unlock(hdev); 4860 } 4861 4862 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4863 struct sk_buff *skb) 4864 { 4865 struct hci_ev_remote_ext_features *ev = data; 4866 struct hci_conn *conn; 4867 4868 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4869 4870 hci_dev_lock(hdev); 4871 4872 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4873 if (!conn) 4874 goto unlock; 4875 4876 if (ev->page < HCI_MAX_PAGES) 4877 memcpy(conn->features[ev->page], ev->features, 8); 4878 4879 if (!ev->status && ev->page == 0x01) { 4880 struct inquiry_entry *ie; 4881 4882 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4883 if (ie) 4884 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4885 4886 if (ev->features[0] & LMP_HOST_SSP) { 4887 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4888 } else { 4889 /* It is mandatory by the Bluetooth specification that 4890 * Extended Inquiry Results are only used when Secure 4891 * Simple Pairing is enabled, but some devices violate 4892 * this. 4893 * 4894 * To make these devices work, the internal SSP 4895 * enabled flag needs to be cleared if the remote host 4896 * features do not indicate SSP support */ 4897 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4898 } 4899 4900 if (ev->features[0] & LMP_HOST_SC) 4901 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4902 } 4903 4904 if (conn->state != BT_CONFIG) 4905 goto unlock; 4906 4907 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4908 struct hci_cp_remote_name_req cp; 4909 memset(&cp, 0, sizeof(cp)); 4910 bacpy(&cp.bdaddr, &conn->dst); 4911 cp.pscan_rep_mode = 0x02; 4912 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4913 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4914 mgmt_device_connected(hdev, conn, NULL, 0); 4915 4916 if (!hci_outgoing_auth_needed(hdev, conn)) { 4917 conn->state = BT_CONNECTED; 4918 hci_connect_cfm(conn, ev->status); 4919 hci_conn_drop(conn); 4920 } 4921 4922 unlock: 4923 hci_dev_unlock(hdev); 4924 } 4925 4926 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4927 struct sk_buff *skb) 4928 { 4929 struct hci_ev_sync_conn_complete *ev = data; 4930 struct hci_conn *conn; 4931 u8 status = ev->status; 4932 4933 switch (ev->link_type) { 4934 case SCO_LINK: 4935 case ESCO_LINK: 4936 break; 4937 default: 4938 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4939 * for HCI_Synchronous_Connection_Complete is limited to 4940 * either SCO or eSCO 4941 */ 4942 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4943 return; 4944 } 4945 4946 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4947 4948 hci_dev_lock(hdev); 4949 4950 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4951 if (!conn) { 4952 if (ev->link_type == ESCO_LINK) 4953 goto unlock; 4954 4955 /* When the link type in the event indicates SCO connection 4956 * and lookup of the connection object fails, then check 4957 * if an eSCO connection object exists. 4958 * 4959 * The core limits the synchronous connections to either 4960 * SCO or eSCO. The eSCO connection is preferred and tried 4961 * to be setup first and until successfully established, 4962 * the link type will be hinted as eSCO. 4963 */ 4964 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4965 if (!conn) 4966 goto unlock; 4967 } 4968 4969 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4970 * Processing it more than once per connection can corrupt kernel memory. 4971 * 4972 * As the connection handle is set here for the first time, it indicates 4973 * whether the connection is already set up. 4974 */ 4975 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 4976 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 4977 goto unlock; 4978 } 4979 4980 switch (status) { 4981 case 0x00: 4982 conn->handle = __le16_to_cpu(ev->handle); 4983 if (conn->handle > HCI_CONN_HANDLE_MAX) { 4984 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 4985 conn->handle, HCI_CONN_HANDLE_MAX); 4986 status = HCI_ERROR_INVALID_PARAMETERS; 4987 conn->state = BT_CLOSED; 4988 break; 4989 } 4990 4991 conn->state = BT_CONNECTED; 4992 conn->type = ev->link_type; 4993 4994 hci_debugfs_create_conn(conn); 4995 hci_conn_add_sysfs(conn); 4996 break; 4997 4998 case 0x10: /* Connection Accept Timeout */ 4999 case 0x0d: /* Connection Rejected due to Limited Resources */ 5000 case 0x11: /* Unsupported Feature or Parameter Value */ 5001 case 0x1c: /* SCO interval rejected */ 5002 case 0x1a: /* Unsupported Remote Feature */ 5003 case 0x1e: /* Invalid LMP Parameters */ 5004 case 0x1f: /* Unspecified error */ 5005 case 0x20: /* Unsupported LMP Parameter value */ 5006 if (conn->out) { 5007 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 5008 (hdev->esco_type & EDR_ESCO_MASK); 5009 if (hci_setup_sync(conn, conn->link->handle)) 5010 goto unlock; 5011 } 5012 fallthrough; 5013 5014 default: 5015 conn->state = BT_CLOSED; 5016 break; 5017 } 5018 5019 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 5020 /* Notify only in case of SCO over HCI transport data path which 5021 * is zero and non-zero value shall be non-HCI transport data path 5022 */ 5023 if (conn->codec.data_path == 0 && hdev->notify) { 5024 switch (ev->air_mode) { 5025 case 0x02: 5026 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 5027 break; 5028 case 0x03: 5029 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 5030 break; 5031 } 5032 } 5033 5034 hci_connect_cfm(conn, status); 5035 if (status) 5036 hci_conn_del(conn); 5037 5038 unlock: 5039 hci_dev_unlock(hdev); 5040 } 5041 5042 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 5043 { 5044 size_t parsed = 0; 5045 5046 while (parsed < eir_len) { 5047 u8 field_len = eir[0]; 5048 5049 if (field_len == 0) 5050 return parsed; 5051 5052 parsed += field_len + 1; 5053 eir += field_len + 1; 5054 } 5055 5056 return eir_len; 5057 } 5058 5059 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 5060 struct sk_buff *skb) 5061 { 5062 struct hci_ev_ext_inquiry_result *ev = edata; 5063 struct inquiry_data data; 5064 size_t eir_len; 5065 int i; 5066 5067 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 5068 flex_array_size(ev, info, ev->num))) 5069 return; 5070 5071 bt_dev_dbg(hdev, "num %d", ev->num); 5072 5073 if (!ev->num) 5074 return; 5075 5076 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 5077 return; 5078 5079 hci_dev_lock(hdev); 5080 5081 for (i = 0; i < ev->num; i++) { 5082 struct extended_inquiry_info *info = &ev->info[i]; 5083 u32 flags; 5084 bool name_known; 5085 5086 bacpy(&data.bdaddr, &info->bdaddr); 5087 data.pscan_rep_mode = info->pscan_rep_mode; 5088 data.pscan_period_mode = info->pscan_period_mode; 5089 data.pscan_mode = 0x00; 5090 memcpy(data.dev_class, info->dev_class, 3); 5091 data.clock_offset = info->clock_offset; 5092 data.rssi = info->rssi; 5093 data.ssp_mode = 0x01; 5094 5095 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5096 name_known = eir_get_data(info->data, 5097 sizeof(info->data), 5098 EIR_NAME_COMPLETE, NULL); 5099 else 5100 name_known = true; 5101 5102 flags = hci_inquiry_cache_update(hdev, &data, name_known); 5103 5104 eir_len = eir_get_length(info->data, sizeof(info->data)); 5105 5106 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 5107 info->dev_class, info->rssi, 5108 flags, info->data, eir_len, NULL, 0); 5109 } 5110 5111 hci_dev_unlock(hdev); 5112 } 5113 5114 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 5115 struct sk_buff *skb) 5116 { 5117 struct hci_ev_key_refresh_complete *ev = data; 5118 struct hci_conn *conn; 5119 5120 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 5121 __le16_to_cpu(ev->handle)); 5122 5123 hci_dev_lock(hdev); 5124 5125 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5126 if (!conn) 5127 goto unlock; 5128 5129 /* For BR/EDR the necessary steps are taken through the 5130 * auth_complete event. 5131 */ 5132 if (conn->type != LE_LINK) 5133 goto unlock; 5134 5135 if (!ev->status) 5136 conn->sec_level = conn->pending_sec_level; 5137 5138 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 5139 5140 if (ev->status && conn->state == BT_CONNECTED) { 5141 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 5142 hci_conn_drop(conn); 5143 goto unlock; 5144 } 5145 5146 if (conn->state == BT_CONFIG) { 5147 if (!ev->status) 5148 conn->state = BT_CONNECTED; 5149 5150 hci_connect_cfm(conn, ev->status); 5151 hci_conn_drop(conn); 5152 } else { 5153 hci_auth_cfm(conn, ev->status); 5154 5155 hci_conn_hold(conn); 5156 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 5157 hci_conn_drop(conn); 5158 } 5159 5160 unlock: 5161 hci_dev_unlock(hdev); 5162 } 5163 5164 static u8 hci_get_auth_req(struct hci_conn *conn) 5165 { 5166 /* If remote requests no-bonding follow that lead */ 5167 if (conn->remote_auth == HCI_AT_NO_BONDING || 5168 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 5169 return conn->remote_auth | (conn->auth_type & 0x01); 5170 5171 /* If both remote and local have enough IO capabilities, require 5172 * MITM protection 5173 */ 5174 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 5175 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 5176 return conn->remote_auth | 0x01; 5177 5178 /* No MITM protection possible so ignore remote requirement */ 5179 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 5180 } 5181 5182 static u8 bredr_oob_data_present(struct hci_conn *conn) 5183 { 5184 struct hci_dev *hdev = conn->hdev; 5185 struct oob_data *data; 5186 5187 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 5188 if (!data) 5189 return 0x00; 5190 5191 if (bredr_sc_enabled(hdev)) { 5192 /* When Secure Connections is enabled, then just 5193 * return the present value stored with the OOB 5194 * data. The stored value contains the right present 5195 * information. However it can only be trusted when 5196 * not in Secure Connection Only mode. 5197 */ 5198 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 5199 return data->present; 5200 5201 /* When Secure Connections Only mode is enabled, then 5202 * the P-256 values are required. If they are not 5203 * available, then do not declare that OOB data is 5204 * present. 5205 */ 5206 if (!memcmp(data->rand256, ZERO_KEY, 16) || 5207 !memcmp(data->hash256, ZERO_KEY, 16)) 5208 return 0x00; 5209 5210 return 0x02; 5211 } 5212 5213 /* When Secure Connections is not enabled or actually 5214 * not supported by the hardware, then check that if 5215 * P-192 data values are present. 5216 */ 5217 if (!memcmp(data->rand192, ZERO_KEY, 16) || 5218 !memcmp(data->hash192, ZERO_KEY, 16)) 5219 return 0x00; 5220 5221 return 0x01; 5222 } 5223 5224 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 5225 struct sk_buff *skb) 5226 { 5227 struct hci_ev_io_capa_request *ev = data; 5228 struct hci_conn *conn; 5229 5230 bt_dev_dbg(hdev, ""); 5231 5232 hci_dev_lock(hdev); 5233 5234 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5235 if (!conn) 5236 goto unlock; 5237 5238 hci_conn_hold(conn); 5239 5240 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5241 goto unlock; 5242 5243 /* Allow pairing if we're pairable, the initiators of the 5244 * pairing or if the remote is not requesting bonding. 5245 */ 5246 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 5247 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 5248 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 5249 struct hci_cp_io_capability_reply cp; 5250 5251 bacpy(&cp.bdaddr, &ev->bdaddr); 5252 /* Change the IO capability from KeyboardDisplay 5253 * to DisplayYesNo as it is not supported by BT spec. */ 5254 cp.capability = (conn->io_capability == 0x04) ? 5255 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5256 5257 /* If we are initiators, there is no remote information yet */ 5258 if (conn->remote_auth == 0xff) { 5259 /* Request MITM protection if our IO caps allow it 5260 * except for the no-bonding case. 5261 */ 5262 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5263 conn->auth_type != HCI_AT_NO_BONDING) 5264 conn->auth_type |= 0x01; 5265 } else { 5266 conn->auth_type = hci_get_auth_req(conn); 5267 } 5268 5269 /* If we're not bondable, force one of the non-bondable 5270 * authentication requirement values. 5271 */ 5272 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5273 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5274 5275 cp.authentication = conn->auth_type; 5276 cp.oob_data = bredr_oob_data_present(conn); 5277 5278 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5279 sizeof(cp), &cp); 5280 } else { 5281 struct hci_cp_io_capability_neg_reply cp; 5282 5283 bacpy(&cp.bdaddr, &ev->bdaddr); 5284 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5285 5286 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5287 sizeof(cp), &cp); 5288 } 5289 5290 unlock: 5291 hci_dev_unlock(hdev); 5292 } 5293 5294 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5295 struct sk_buff *skb) 5296 { 5297 struct hci_ev_io_capa_reply *ev = data; 5298 struct hci_conn *conn; 5299 5300 bt_dev_dbg(hdev, ""); 5301 5302 hci_dev_lock(hdev); 5303 5304 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5305 if (!conn) 5306 goto unlock; 5307 5308 conn->remote_cap = ev->capability; 5309 conn->remote_auth = ev->authentication; 5310 5311 unlock: 5312 hci_dev_unlock(hdev); 5313 } 5314 5315 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5316 struct sk_buff *skb) 5317 { 5318 struct hci_ev_user_confirm_req *ev = data; 5319 int loc_mitm, rem_mitm, confirm_hint = 0; 5320 struct hci_conn *conn; 5321 5322 bt_dev_dbg(hdev, ""); 5323 5324 hci_dev_lock(hdev); 5325 5326 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5327 goto unlock; 5328 5329 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5330 if (!conn) 5331 goto unlock; 5332 5333 loc_mitm = (conn->auth_type & 0x01); 5334 rem_mitm = (conn->remote_auth & 0x01); 5335 5336 /* If we require MITM but the remote device can't provide that 5337 * (it has NoInputNoOutput) then reject the confirmation 5338 * request. We check the security level here since it doesn't 5339 * necessarily match conn->auth_type. 5340 */ 5341 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5342 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5343 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5344 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5345 sizeof(ev->bdaddr), &ev->bdaddr); 5346 goto unlock; 5347 } 5348 5349 /* If no side requires MITM protection; auto-accept */ 5350 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5351 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5352 5353 /* If we're not the initiators request authorization to 5354 * proceed from user space (mgmt_user_confirm with 5355 * confirm_hint set to 1). The exception is if neither 5356 * side had MITM or if the local IO capability is 5357 * NoInputNoOutput, in which case we do auto-accept 5358 */ 5359 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5360 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5361 (loc_mitm || rem_mitm)) { 5362 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5363 confirm_hint = 1; 5364 goto confirm; 5365 } 5366 5367 /* If there already exists link key in local host, leave the 5368 * decision to user space since the remote device could be 5369 * legitimate or malicious. 5370 */ 5371 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5372 bt_dev_dbg(hdev, "Local host already has link key"); 5373 confirm_hint = 1; 5374 goto confirm; 5375 } 5376 5377 BT_DBG("Auto-accept of user confirmation with %ums delay", 5378 hdev->auto_accept_delay); 5379 5380 if (hdev->auto_accept_delay > 0) { 5381 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5382 queue_delayed_work(conn->hdev->workqueue, 5383 &conn->auto_accept_work, delay); 5384 goto unlock; 5385 } 5386 5387 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5388 sizeof(ev->bdaddr), &ev->bdaddr); 5389 goto unlock; 5390 } 5391 5392 confirm: 5393 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5394 le32_to_cpu(ev->passkey), confirm_hint); 5395 5396 unlock: 5397 hci_dev_unlock(hdev); 5398 } 5399 5400 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5401 struct sk_buff *skb) 5402 { 5403 struct hci_ev_user_passkey_req *ev = data; 5404 5405 bt_dev_dbg(hdev, ""); 5406 5407 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5408 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5409 } 5410 5411 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5412 struct sk_buff *skb) 5413 { 5414 struct hci_ev_user_passkey_notify *ev = data; 5415 struct hci_conn *conn; 5416 5417 bt_dev_dbg(hdev, ""); 5418 5419 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5420 if (!conn) 5421 return; 5422 5423 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5424 conn->passkey_entered = 0; 5425 5426 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5427 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5428 conn->dst_type, conn->passkey_notify, 5429 conn->passkey_entered); 5430 } 5431 5432 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5433 struct sk_buff *skb) 5434 { 5435 struct hci_ev_keypress_notify *ev = data; 5436 struct hci_conn *conn; 5437 5438 bt_dev_dbg(hdev, ""); 5439 5440 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5441 if (!conn) 5442 return; 5443 5444 switch (ev->type) { 5445 case HCI_KEYPRESS_STARTED: 5446 conn->passkey_entered = 0; 5447 return; 5448 5449 case HCI_KEYPRESS_ENTERED: 5450 conn->passkey_entered++; 5451 break; 5452 5453 case HCI_KEYPRESS_ERASED: 5454 conn->passkey_entered--; 5455 break; 5456 5457 case HCI_KEYPRESS_CLEARED: 5458 conn->passkey_entered = 0; 5459 break; 5460 5461 case HCI_KEYPRESS_COMPLETED: 5462 return; 5463 } 5464 5465 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5466 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5467 conn->dst_type, conn->passkey_notify, 5468 conn->passkey_entered); 5469 } 5470 5471 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5472 struct sk_buff *skb) 5473 { 5474 struct hci_ev_simple_pair_complete *ev = data; 5475 struct hci_conn *conn; 5476 5477 bt_dev_dbg(hdev, ""); 5478 5479 hci_dev_lock(hdev); 5480 5481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5482 if (!conn) 5483 goto unlock; 5484 5485 /* Reset the authentication requirement to unknown */ 5486 conn->remote_auth = 0xff; 5487 5488 /* To avoid duplicate auth_failed events to user space we check 5489 * the HCI_CONN_AUTH_PEND flag which will be set if we 5490 * initiated the authentication. A traditional auth_complete 5491 * event gets always produced as initiator and is also mapped to 5492 * the mgmt_auth_failed event */ 5493 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5494 mgmt_auth_failed(conn, ev->status); 5495 5496 hci_conn_drop(conn); 5497 5498 unlock: 5499 hci_dev_unlock(hdev); 5500 } 5501 5502 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5503 struct sk_buff *skb) 5504 { 5505 struct hci_ev_remote_host_features *ev = data; 5506 struct inquiry_entry *ie; 5507 struct hci_conn *conn; 5508 5509 bt_dev_dbg(hdev, ""); 5510 5511 hci_dev_lock(hdev); 5512 5513 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5514 if (conn) 5515 memcpy(conn->features[1], ev->features, 8); 5516 5517 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5518 if (ie) 5519 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5520 5521 hci_dev_unlock(hdev); 5522 } 5523 5524 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5525 struct sk_buff *skb) 5526 { 5527 struct hci_ev_remote_oob_data_request *ev = edata; 5528 struct oob_data *data; 5529 5530 bt_dev_dbg(hdev, ""); 5531 5532 hci_dev_lock(hdev); 5533 5534 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5535 goto unlock; 5536 5537 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5538 if (!data) { 5539 struct hci_cp_remote_oob_data_neg_reply cp; 5540 5541 bacpy(&cp.bdaddr, &ev->bdaddr); 5542 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5543 sizeof(cp), &cp); 5544 goto unlock; 5545 } 5546 5547 if (bredr_sc_enabled(hdev)) { 5548 struct hci_cp_remote_oob_ext_data_reply cp; 5549 5550 bacpy(&cp.bdaddr, &ev->bdaddr); 5551 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5552 memset(cp.hash192, 0, sizeof(cp.hash192)); 5553 memset(cp.rand192, 0, sizeof(cp.rand192)); 5554 } else { 5555 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5556 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5557 } 5558 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5559 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5560 5561 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5562 sizeof(cp), &cp); 5563 } else { 5564 struct hci_cp_remote_oob_data_reply cp; 5565 5566 bacpy(&cp.bdaddr, &ev->bdaddr); 5567 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5568 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5569 5570 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5571 sizeof(cp), &cp); 5572 } 5573 5574 unlock: 5575 hci_dev_unlock(hdev); 5576 } 5577 5578 #if IS_ENABLED(CONFIG_BT_HS) 5579 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, 5580 struct sk_buff *skb) 5581 { 5582 struct hci_ev_channel_selected *ev = data; 5583 struct hci_conn *hcon; 5584 5585 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); 5586 5587 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5588 if (!hcon) 5589 return; 5590 5591 amp_read_loc_assoc_final_data(hdev, hcon); 5592 } 5593 5594 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, 5595 struct sk_buff *skb) 5596 { 5597 struct hci_ev_phy_link_complete *ev = data; 5598 struct hci_conn *hcon, *bredr_hcon; 5599 5600 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, 5601 ev->status); 5602 5603 hci_dev_lock(hdev); 5604 5605 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5606 if (!hcon) 5607 goto unlock; 5608 5609 if (!hcon->amp_mgr) 5610 goto unlock; 5611 5612 if (ev->status) { 5613 hci_conn_del(hcon); 5614 goto unlock; 5615 } 5616 5617 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5618 5619 hcon->state = BT_CONNECTED; 5620 bacpy(&hcon->dst, &bredr_hcon->dst); 5621 5622 hci_conn_hold(hcon); 5623 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5624 hci_conn_drop(hcon); 5625 5626 hci_debugfs_create_conn(hcon); 5627 hci_conn_add_sysfs(hcon); 5628 5629 amp_physical_cfm(bredr_hcon, hcon); 5630 5631 unlock: 5632 hci_dev_unlock(hdev); 5633 } 5634 5635 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, 5636 struct sk_buff *skb) 5637 { 5638 struct hci_ev_logical_link_complete *ev = data; 5639 struct hci_conn *hcon; 5640 struct hci_chan *hchan; 5641 struct amp_mgr *mgr; 5642 5643 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5644 le16_to_cpu(ev->handle), ev->phy_handle, ev->status); 5645 5646 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5647 if (!hcon) 5648 return; 5649 5650 /* Create AMP hchan */ 5651 hchan = hci_chan_create(hcon); 5652 if (!hchan) 5653 return; 5654 5655 hchan->handle = le16_to_cpu(ev->handle); 5656 hchan->amp = true; 5657 5658 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5659 5660 mgr = hcon->amp_mgr; 5661 if (mgr && mgr->bredr_chan) { 5662 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5663 5664 l2cap_chan_lock(bredr_chan); 5665 5666 bredr_chan->conn->mtu = hdev->block_mtu; 5667 l2cap_logical_cfm(bredr_chan, hchan, 0); 5668 hci_conn_hold(hcon); 5669 5670 l2cap_chan_unlock(bredr_chan); 5671 } 5672 } 5673 5674 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, 5675 struct sk_buff *skb) 5676 { 5677 struct hci_ev_disconn_logical_link_complete *ev = data; 5678 struct hci_chan *hchan; 5679 5680 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", 5681 le16_to_cpu(ev->handle), ev->status); 5682 5683 if (ev->status) 5684 return; 5685 5686 hci_dev_lock(hdev); 5687 5688 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5689 if (!hchan || !hchan->amp) 5690 goto unlock; 5691 5692 amp_destroy_logical_link(hchan, ev->reason); 5693 5694 unlock: 5695 hci_dev_unlock(hdev); 5696 } 5697 5698 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, 5699 struct sk_buff *skb) 5700 { 5701 struct hci_ev_disconn_phy_link_complete *ev = data; 5702 struct hci_conn *hcon; 5703 5704 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5705 5706 if (ev->status) 5707 return; 5708 5709 hci_dev_lock(hdev); 5710 5711 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5712 if (hcon && hcon->type == AMP_LINK) { 5713 hcon->state = BT_CLOSED; 5714 hci_disconn_cfm(hcon, ev->reason); 5715 hci_conn_del(hcon); 5716 } 5717 5718 hci_dev_unlock(hdev); 5719 } 5720 #endif 5721 5722 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5723 u8 bdaddr_type, bdaddr_t *local_rpa) 5724 { 5725 if (conn->out) { 5726 conn->dst_type = bdaddr_type; 5727 conn->resp_addr_type = bdaddr_type; 5728 bacpy(&conn->resp_addr, bdaddr); 5729 5730 /* Check if the controller has set a Local RPA then it must be 5731 * used instead or hdev->rpa. 5732 */ 5733 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5734 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5735 bacpy(&conn->init_addr, local_rpa); 5736 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5737 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5738 bacpy(&conn->init_addr, &conn->hdev->rpa); 5739 } else { 5740 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5741 &conn->init_addr_type); 5742 } 5743 } else { 5744 conn->resp_addr_type = conn->hdev->adv_addr_type; 5745 /* Check if the controller has set a Local RPA then it must be 5746 * used instead or hdev->rpa. 5747 */ 5748 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5749 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5750 bacpy(&conn->resp_addr, local_rpa); 5751 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5752 /* In case of ext adv, resp_addr will be updated in 5753 * Adv Terminated event. 5754 */ 5755 if (!ext_adv_capable(conn->hdev)) 5756 bacpy(&conn->resp_addr, 5757 &conn->hdev->random_addr); 5758 } else { 5759 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5760 } 5761 5762 conn->init_addr_type = bdaddr_type; 5763 bacpy(&conn->init_addr, bdaddr); 5764 5765 /* For incoming connections, set the default minimum 5766 * and maximum connection interval. They will be used 5767 * to check if the parameters are in range and if not 5768 * trigger the connection update procedure. 5769 */ 5770 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5771 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5772 } 5773 } 5774 5775 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5776 bdaddr_t *bdaddr, u8 bdaddr_type, 5777 bdaddr_t *local_rpa, u8 role, u16 handle, 5778 u16 interval, u16 latency, 5779 u16 supervision_timeout) 5780 { 5781 struct hci_conn_params *params; 5782 struct hci_conn *conn; 5783 struct smp_irk *irk; 5784 u8 addr_type; 5785 5786 hci_dev_lock(hdev); 5787 5788 /* All controllers implicitly stop advertising in the event of a 5789 * connection, so ensure that the state bit is cleared. 5790 */ 5791 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5792 5793 conn = hci_lookup_le_connect(hdev); 5794 if (!conn) { 5795 /* In case of error status and there is no connection pending 5796 * just unlock as there is nothing to cleanup. 5797 */ 5798 if (status) 5799 goto unlock; 5800 5801 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5802 if (!conn) { 5803 bt_dev_err(hdev, "no memory for new connection"); 5804 goto unlock; 5805 } 5806 5807 conn->dst_type = bdaddr_type; 5808 5809 /* If we didn't have a hci_conn object previously 5810 * but we're in central role this must be something 5811 * initiated using an accept list. Since accept list based 5812 * connections are not "first class citizens" we don't 5813 * have full tracking of them. Therefore, we go ahead 5814 * with a "best effort" approach of determining the 5815 * initiator address based on the HCI_PRIVACY flag. 5816 */ 5817 if (conn->out) { 5818 conn->resp_addr_type = bdaddr_type; 5819 bacpy(&conn->resp_addr, bdaddr); 5820 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5821 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5822 bacpy(&conn->init_addr, &hdev->rpa); 5823 } else { 5824 hci_copy_identity_address(hdev, 5825 &conn->init_addr, 5826 &conn->init_addr_type); 5827 } 5828 } 5829 } else { 5830 cancel_delayed_work(&conn->le_conn_timeout); 5831 } 5832 5833 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5834 * Processing it more than once per connection can corrupt kernel memory. 5835 * 5836 * As the connection handle is set here for the first time, it indicates 5837 * whether the connection is already set up. 5838 */ 5839 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 5840 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5841 goto unlock; 5842 } 5843 5844 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5845 5846 /* Lookup the identity address from the stored connection 5847 * address and address type. 5848 * 5849 * When establishing connections to an identity address, the 5850 * connection procedure will store the resolvable random 5851 * address first. Now if it can be converted back into the 5852 * identity address, start using the identity address from 5853 * now on. 5854 */ 5855 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5856 if (irk) { 5857 bacpy(&conn->dst, &irk->bdaddr); 5858 conn->dst_type = irk->addr_type; 5859 } 5860 5861 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5862 5863 if (handle > HCI_CONN_HANDLE_MAX) { 5864 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, 5865 HCI_CONN_HANDLE_MAX); 5866 status = HCI_ERROR_INVALID_PARAMETERS; 5867 } 5868 5869 /* All connection failure handling is taken care of by the 5870 * hci_conn_failed function which is triggered by the HCI 5871 * request completion callbacks used for connecting. 5872 */ 5873 if (status) 5874 goto unlock; 5875 5876 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5877 addr_type = BDADDR_LE_PUBLIC; 5878 else 5879 addr_type = BDADDR_LE_RANDOM; 5880 5881 /* Drop the connection if the device is blocked */ 5882 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5883 hci_conn_drop(conn); 5884 goto unlock; 5885 } 5886 5887 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5888 mgmt_device_connected(hdev, conn, NULL, 0); 5889 5890 conn->sec_level = BT_SECURITY_LOW; 5891 conn->handle = handle; 5892 conn->state = BT_CONFIG; 5893 5894 /* Store current advertising instance as connection advertising instance 5895 * when sotfware rotation is in use so it can be re-enabled when 5896 * disconnected. 5897 */ 5898 if (!ext_adv_capable(hdev)) 5899 conn->adv_instance = hdev->cur_adv_instance; 5900 5901 conn->le_conn_interval = interval; 5902 conn->le_conn_latency = latency; 5903 conn->le_supv_timeout = supervision_timeout; 5904 5905 hci_debugfs_create_conn(conn); 5906 hci_conn_add_sysfs(conn); 5907 5908 /* The remote features procedure is defined for central 5909 * role only. So only in case of an initiated connection 5910 * request the remote features. 5911 * 5912 * If the local controller supports peripheral-initiated features 5913 * exchange, then requesting the remote features in peripheral 5914 * role is possible. Otherwise just transition into the 5915 * connected state without requesting the remote features. 5916 */ 5917 if (conn->out || 5918 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5919 struct hci_cp_le_read_remote_features cp; 5920 5921 cp.handle = __cpu_to_le16(conn->handle); 5922 5923 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5924 sizeof(cp), &cp); 5925 5926 hci_conn_hold(conn); 5927 } else { 5928 conn->state = BT_CONNECTED; 5929 hci_connect_cfm(conn, status); 5930 } 5931 5932 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5933 conn->dst_type); 5934 if (params) { 5935 list_del_init(¶ms->action); 5936 if (params->conn) { 5937 hci_conn_drop(params->conn); 5938 hci_conn_put(params->conn); 5939 params->conn = NULL; 5940 } 5941 } 5942 5943 unlock: 5944 hci_update_passive_scan(hdev); 5945 hci_dev_unlock(hdev); 5946 } 5947 5948 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5949 struct sk_buff *skb) 5950 { 5951 struct hci_ev_le_conn_complete *ev = data; 5952 5953 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5954 5955 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5956 NULL, ev->role, le16_to_cpu(ev->handle), 5957 le16_to_cpu(ev->interval), 5958 le16_to_cpu(ev->latency), 5959 le16_to_cpu(ev->supervision_timeout)); 5960 } 5961 5962 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5963 struct sk_buff *skb) 5964 { 5965 struct hci_ev_le_enh_conn_complete *ev = data; 5966 5967 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5968 5969 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5970 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5971 le16_to_cpu(ev->interval), 5972 le16_to_cpu(ev->latency), 5973 le16_to_cpu(ev->supervision_timeout)); 5974 } 5975 5976 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 5977 struct sk_buff *skb) 5978 { 5979 struct hci_evt_le_ext_adv_set_term *ev = data; 5980 struct hci_conn *conn; 5981 struct adv_info *adv, *n; 5982 5983 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5984 5985 /* The Bluetooth Core 5.3 specification clearly states that this event 5986 * shall not be sent when the Host disables the advertising set. So in 5987 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5988 * 5989 * When the Host disables an advertising set, all cleanup is done via 5990 * its command callback and not needed to be duplicated here. 5991 */ 5992 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5993 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5994 return; 5995 } 5996 5997 hci_dev_lock(hdev); 5998 5999 adv = hci_find_adv_instance(hdev, ev->handle); 6000 6001 if (ev->status) { 6002 if (!adv) 6003 goto unlock; 6004 6005 /* Remove advertising as it has been terminated */ 6006 hci_remove_adv_instance(hdev, ev->handle); 6007 mgmt_advertising_removed(NULL, hdev, ev->handle); 6008 6009 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 6010 if (adv->enabled) 6011 goto unlock; 6012 } 6013 6014 /* We are no longer advertising, clear HCI_LE_ADV */ 6015 hci_dev_clear_flag(hdev, HCI_LE_ADV); 6016 goto unlock; 6017 } 6018 6019 if (adv) 6020 adv->enabled = false; 6021 6022 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 6023 if (conn) { 6024 /* Store handle in the connection so the correct advertising 6025 * instance can be re-enabled when disconnected. 6026 */ 6027 conn->adv_instance = ev->handle; 6028 6029 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 6030 bacmp(&conn->resp_addr, BDADDR_ANY)) 6031 goto unlock; 6032 6033 if (!ev->handle) { 6034 bacpy(&conn->resp_addr, &hdev->random_addr); 6035 goto unlock; 6036 } 6037 6038 if (adv) 6039 bacpy(&conn->resp_addr, &adv->random_addr); 6040 } 6041 6042 unlock: 6043 hci_dev_unlock(hdev); 6044 } 6045 6046 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 6047 struct sk_buff *skb) 6048 { 6049 struct hci_ev_le_conn_update_complete *ev = data; 6050 struct hci_conn *conn; 6051 6052 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6053 6054 if (ev->status) 6055 return; 6056 6057 hci_dev_lock(hdev); 6058 6059 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6060 if (conn) { 6061 conn->le_conn_interval = le16_to_cpu(ev->interval); 6062 conn->le_conn_latency = le16_to_cpu(ev->latency); 6063 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 6064 } 6065 6066 hci_dev_unlock(hdev); 6067 } 6068 6069 /* This function requires the caller holds hdev->lock */ 6070 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 6071 bdaddr_t *addr, 6072 u8 addr_type, bool addr_resolved, 6073 u8 adv_type) 6074 { 6075 struct hci_conn *conn; 6076 struct hci_conn_params *params; 6077 6078 /* If the event is not connectable don't proceed further */ 6079 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 6080 return NULL; 6081 6082 /* Ignore if the device is blocked or hdev is suspended */ 6083 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 6084 hdev->suspended) 6085 return NULL; 6086 6087 /* Most controller will fail if we try to create new connections 6088 * while we have an existing one in peripheral role. 6089 */ 6090 if (hdev->conn_hash.le_num_peripheral > 0 && 6091 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 6092 !(hdev->le_states[3] & 0x10))) 6093 return NULL; 6094 6095 /* If we're not connectable only connect devices that we have in 6096 * our pend_le_conns list. 6097 */ 6098 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 6099 addr_type); 6100 if (!params) 6101 return NULL; 6102 6103 if (!params->explicit_connect) { 6104 switch (params->auto_connect) { 6105 case HCI_AUTO_CONN_DIRECT: 6106 /* Only devices advertising with ADV_DIRECT_IND are 6107 * triggering a connection attempt. This is allowing 6108 * incoming connections from peripheral devices. 6109 */ 6110 if (adv_type != LE_ADV_DIRECT_IND) 6111 return NULL; 6112 break; 6113 case HCI_AUTO_CONN_ALWAYS: 6114 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 6115 * are triggering a connection attempt. This means 6116 * that incoming connections from peripheral device are 6117 * accepted and also outgoing connections to peripheral 6118 * devices are established when found. 6119 */ 6120 break; 6121 default: 6122 return NULL; 6123 } 6124 } 6125 6126 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 6127 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 6128 HCI_ROLE_MASTER); 6129 if (!IS_ERR(conn)) { 6130 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 6131 * by higher layer that tried to connect, if no then 6132 * store the pointer since we don't really have any 6133 * other owner of the object besides the params that 6134 * triggered it. This way we can abort the connection if 6135 * the parameters get removed and keep the reference 6136 * count consistent once the connection is established. 6137 */ 6138 6139 if (!params->explicit_connect) 6140 params->conn = hci_conn_get(conn); 6141 6142 return conn; 6143 } 6144 6145 switch (PTR_ERR(conn)) { 6146 case -EBUSY: 6147 /* If hci_connect() returns -EBUSY it means there is already 6148 * an LE connection attempt going on. Since controllers don't 6149 * support more than one connection attempt at the time, we 6150 * don't consider this an error case. 6151 */ 6152 break; 6153 default: 6154 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 6155 return NULL; 6156 } 6157 6158 return NULL; 6159 } 6160 6161 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 6162 u8 bdaddr_type, bdaddr_t *direct_addr, 6163 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 6164 bool ext_adv) 6165 { 6166 struct discovery_state *d = &hdev->discovery; 6167 struct smp_irk *irk; 6168 struct hci_conn *conn; 6169 bool match, bdaddr_resolved; 6170 u32 flags; 6171 u8 *ptr; 6172 6173 switch (type) { 6174 case LE_ADV_IND: 6175 case LE_ADV_DIRECT_IND: 6176 case LE_ADV_SCAN_IND: 6177 case LE_ADV_NONCONN_IND: 6178 case LE_ADV_SCAN_RSP: 6179 break; 6180 default: 6181 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 6182 "type: 0x%02x", type); 6183 return; 6184 } 6185 6186 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 6187 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 6188 return; 6189 } 6190 6191 /* Find the end of the data in case the report contains padded zero 6192 * bytes at the end causing an invalid length value. 6193 * 6194 * When data is NULL, len is 0 so there is no need for extra ptr 6195 * check as 'ptr < data + 0' is already false in such case. 6196 */ 6197 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 6198 if (ptr + 1 + *ptr > data + len) 6199 break; 6200 } 6201 6202 /* Adjust for actual length. This handles the case when remote 6203 * device is advertising with incorrect data length. 6204 */ 6205 len = ptr - data; 6206 6207 /* If the direct address is present, then this report is from 6208 * a LE Direct Advertising Report event. In that case it is 6209 * important to see if the address is matching the local 6210 * controller address. 6211 */ 6212 if (direct_addr) { 6213 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 6214 &bdaddr_resolved); 6215 6216 /* Only resolvable random addresses are valid for these 6217 * kind of reports and others can be ignored. 6218 */ 6219 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 6220 return; 6221 6222 /* If the controller is not using resolvable random 6223 * addresses, then this report can be ignored. 6224 */ 6225 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 6226 return; 6227 6228 /* If the local IRK of the controller does not match 6229 * with the resolvable random address provided, then 6230 * this report can be ignored. 6231 */ 6232 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 6233 return; 6234 } 6235 6236 /* Check if we need to convert to identity address */ 6237 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 6238 if (irk) { 6239 bdaddr = &irk->bdaddr; 6240 bdaddr_type = irk->addr_type; 6241 } 6242 6243 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 6244 6245 /* Check if we have been requested to connect to this device. 6246 * 6247 * direct_addr is set only for directed advertising reports (it is NULL 6248 * for advertising reports) and is already verified to be RPA above. 6249 */ 6250 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 6251 type); 6252 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 6253 /* Store report for later inclusion by 6254 * mgmt_device_connected 6255 */ 6256 memcpy(conn->le_adv_data, data, len); 6257 conn->le_adv_data_len = len; 6258 } 6259 6260 /* Passive scanning shouldn't trigger any device found events, 6261 * except for devices marked as CONN_REPORT for which we do send 6262 * device found events, or advertisement monitoring requested. 6263 */ 6264 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6265 if (type == LE_ADV_DIRECT_IND) 6266 return; 6267 6268 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6269 bdaddr, bdaddr_type) && 6270 idr_is_empty(&hdev->adv_monitors_idr)) 6271 return; 6272 6273 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6274 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6275 else 6276 flags = 0; 6277 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6278 rssi, flags, data, len, NULL, 0); 6279 return; 6280 } 6281 6282 /* When receiving non-connectable or scannable undirected 6283 * advertising reports, this means that the remote device is 6284 * not connectable and then clearly indicate this in the 6285 * device found event. 6286 * 6287 * When receiving a scan response, then there is no way to 6288 * know if the remote device is connectable or not. However 6289 * since scan responses are merged with a previously seen 6290 * advertising report, the flags field from that report 6291 * will be used. 6292 * 6293 * In the really unlikely case that a controller get confused 6294 * and just sends a scan response event, then it is marked as 6295 * not connectable as well. 6296 */ 6297 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 6298 type == LE_ADV_SCAN_RSP) 6299 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6300 else 6301 flags = 0; 6302 6303 /* If there's nothing pending either store the data from this 6304 * event or send an immediate device found event if the data 6305 * should not be stored for later. 6306 */ 6307 if (!ext_adv && !has_pending_adv_report(hdev)) { 6308 /* If the report will trigger a SCAN_REQ store it for 6309 * later merging. 6310 */ 6311 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 6312 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6313 rssi, flags, data, len); 6314 return; 6315 } 6316 6317 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6318 rssi, flags, data, len, NULL, 0); 6319 return; 6320 } 6321 6322 /* Check if the pending report is for the same device as the new one */ 6323 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6324 bdaddr_type == d->last_adv_addr_type); 6325 6326 /* If the pending data doesn't match this report or this isn't a 6327 * scan response (e.g. we got a duplicate ADV_IND) then force 6328 * sending of the pending data. 6329 */ 6330 if (type != LE_ADV_SCAN_RSP || !match) { 6331 /* Send out whatever is in the cache, but skip duplicates */ 6332 if (!match) 6333 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6334 d->last_adv_addr_type, NULL, 6335 d->last_adv_rssi, d->last_adv_flags, 6336 d->last_adv_data, 6337 d->last_adv_data_len, NULL, 0); 6338 6339 /* If the new report will trigger a SCAN_REQ store it for 6340 * later merging. 6341 */ 6342 if (!ext_adv && (type == LE_ADV_IND || 6343 type == LE_ADV_SCAN_IND)) { 6344 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6345 rssi, flags, data, len); 6346 return; 6347 } 6348 6349 /* The advertising reports cannot be merged, so clear 6350 * the pending report and send out a device found event. 6351 */ 6352 clear_pending_adv_report(hdev); 6353 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6354 rssi, flags, data, len, NULL, 0); 6355 return; 6356 } 6357 6358 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6359 * the new event is a SCAN_RSP. We can therefore proceed with 6360 * sending a merged device found event. 6361 */ 6362 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6363 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6364 d->last_adv_data, d->last_adv_data_len, data, len); 6365 clear_pending_adv_report(hdev); 6366 } 6367 6368 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6369 struct sk_buff *skb) 6370 { 6371 struct hci_ev_le_advertising_report *ev = data; 6372 6373 if (!ev->num) 6374 return; 6375 6376 hci_dev_lock(hdev); 6377 6378 while (ev->num--) { 6379 struct hci_ev_le_advertising_info *info; 6380 s8 rssi; 6381 6382 info = hci_le_ev_skb_pull(hdev, skb, 6383 HCI_EV_LE_ADVERTISING_REPORT, 6384 sizeof(*info)); 6385 if (!info) 6386 break; 6387 6388 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6389 info->length + 1)) 6390 break; 6391 6392 if (info->length <= HCI_MAX_AD_LENGTH) { 6393 rssi = info->data[info->length]; 6394 process_adv_report(hdev, info->type, &info->bdaddr, 6395 info->bdaddr_type, NULL, 0, rssi, 6396 info->data, info->length, false); 6397 } else { 6398 bt_dev_err(hdev, "Dropping invalid advertising data"); 6399 } 6400 } 6401 6402 hci_dev_unlock(hdev); 6403 } 6404 6405 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6406 { 6407 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6408 switch (evt_type) { 6409 case LE_LEGACY_ADV_IND: 6410 return LE_ADV_IND; 6411 case LE_LEGACY_ADV_DIRECT_IND: 6412 return LE_ADV_DIRECT_IND; 6413 case LE_LEGACY_ADV_SCAN_IND: 6414 return LE_ADV_SCAN_IND; 6415 case LE_LEGACY_NONCONN_IND: 6416 return LE_ADV_NONCONN_IND; 6417 case LE_LEGACY_SCAN_RSP_ADV: 6418 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6419 return LE_ADV_SCAN_RSP; 6420 } 6421 6422 goto invalid; 6423 } 6424 6425 if (evt_type & LE_EXT_ADV_CONN_IND) { 6426 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6427 return LE_ADV_DIRECT_IND; 6428 6429 return LE_ADV_IND; 6430 } 6431 6432 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6433 return LE_ADV_SCAN_RSP; 6434 6435 if (evt_type & LE_EXT_ADV_SCAN_IND) 6436 return LE_ADV_SCAN_IND; 6437 6438 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6439 evt_type & LE_EXT_ADV_DIRECT_IND) 6440 return LE_ADV_NONCONN_IND; 6441 6442 invalid: 6443 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6444 evt_type); 6445 6446 return LE_ADV_INVALID; 6447 } 6448 6449 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6450 struct sk_buff *skb) 6451 { 6452 struct hci_ev_le_ext_adv_report *ev = data; 6453 6454 if (!ev->num) 6455 return; 6456 6457 hci_dev_lock(hdev); 6458 6459 while (ev->num--) { 6460 struct hci_ev_le_ext_adv_info *info; 6461 u8 legacy_evt_type; 6462 u16 evt_type; 6463 6464 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6465 sizeof(*info)); 6466 if (!info) 6467 break; 6468 6469 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6470 info->length)) 6471 break; 6472 6473 evt_type = __le16_to_cpu(info->type); 6474 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6475 if (legacy_evt_type != LE_ADV_INVALID) { 6476 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6477 info->bdaddr_type, NULL, 0, 6478 info->rssi, info->data, info->length, 6479 !(evt_type & LE_EXT_ADV_LEGACY_PDU)); 6480 } 6481 } 6482 6483 hci_dev_unlock(hdev); 6484 } 6485 6486 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) 6487 { 6488 struct hci_cp_le_pa_term_sync cp; 6489 6490 memset(&cp, 0, sizeof(cp)); 6491 cp.handle = handle; 6492 6493 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); 6494 } 6495 6496 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, 6497 struct sk_buff *skb) 6498 { 6499 struct hci_ev_le_pa_sync_established *ev = data; 6500 int mask = hdev->link_mode; 6501 __u8 flags = 0; 6502 6503 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6504 6505 if (ev->status) 6506 return; 6507 6508 hci_dev_lock(hdev); 6509 6510 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6511 6512 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); 6513 if (!(mask & HCI_LM_ACCEPT)) 6514 hci_le_pa_term_sync(hdev, ev->handle); 6515 6516 hci_dev_unlock(hdev); 6517 } 6518 6519 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6520 struct sk_buff *skb) 6521 { 6522 struct hci_ev_le_remote_feat_complete *ev = data; 6523 struct hci_conn *conn; 6524 6525 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6526 6527 hci_dev_lock(hdev); 6528 6529 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6530 if (conn) { 6531 if (!ev->status) 6532 memcpy(conn->features[0], ev->features, 8); 6533 6534 if (conn->state == BT_CONFIG) { 6535 __u8 status; 6536 6537 /* If the local controller supports peripheral-initiated 6538 * features exchange, but the remote controller does 6539 * not, then it is possible that the error code 0x1a 6540 * for unsupported remote feature gets returned. 6541 * 6542 * In this specific case, allow the connection to 6543 * transition into connected state and mark it as 6544 * successful. 6545 */ 6546 if (!conn->out && ev->status == 0x1a && 6547 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6548 status = 0x00; 6549 else 6550 status = ev->status; 6551 6552 conn->state = BT_CONNECTED; 6553 hci_connect_cfm(conn, status); 6554 hci_conn_drop(conn); 6555 } 6556 } 6557 6558 hci_dev_unlock(hdev); 6559 } 6560 6561 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6562 struct sk_buff *skb) 6563 { 6564 struct hci_ev_le_ltk_req *ev = data; 6565 struct hci_cp_le_ltk_reply cp; 6566 struct hci_cp_le_ltk_neg_reply neg; 6567 struct hci_conn *conn; 6568 struct smp_ltk *ltk; 6569 6570 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6571 6572 hci_dev_lock(hdev); 6573 6574 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6575 if (conn == NULL) 6576 goto not_found; 6577 6578 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6579 if (!ltk) 6580 goto not_found; 6581 6582 if (smp_ltk_is_sc(ltk)) { 6583 /* With SC both EDiv and Rand are set to zero */ 6584 if (ev->ediv || ev->rand) 6585 goto not_found; 6586 } else { 6587 /* For non-SC keys check that EDiv and Rand match */ 6588 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6589 goto not_found; 6590 } 6591 6592 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6593 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6594 cp.handle = cpu_to_le16(conn->handle); 6595 6596 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6597 6598 conn->enc_key_size = ltk->enc_size; 6599 6600 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6601 6602 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6603 * temporary key used to encrypt a connection following 6604 * pairing. It is used during the Encrypted Session Setup to 6605 * distribute the keys. Later, security can be re-established 6606 * using a distributed LTK. 6607 */ 6608 if (ltk->type == SMP_STK) { 6609 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6610 list_del_rcu(<k->list); 6611 kfree_rcu(ltk, rcu); 6612 } else { 6613 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6614 } 6615 6616 hci_dev_unlock(hdev); 6617 6618 return; 6619 6620 not_found: 6621 neg.handle = ev->handle; 6622 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6623 hci_dev_unlock(hdev); 6624 } 6625 6626 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6627 u8 reason) 6628 { 6629 struct hci_cp_le_conn_param_req_neg_reply cp; 6630 6631 cp.handle = cpu_to_le16(handle); 6632 cp.reason = reason; 6633 6634 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6635 &cp); 6636 } 6637 6638 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6639 struct sk_buff *skb) 6640 { 6641 struct hci_ev_le_remote_conn_param_req *ev = data; 6642 struct hci_cp_le_conn_param_req_reply cp; 6643 struct hci_conn *hcon; 6644 u16 handle, min, max, latency, timeout; 6645 6646 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6647 6648 handle = le16_to_cpu(ev->handle); 6649 min = le16_to_cpu(ev->interval_min); 6650 max = le16_to_cpu(ev->interval_max); 6651 latency = le16_to_cpu(ev->latency); 6652 timeout = le16_to_cpu(ev->timeout); 6653 6654 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6655 if (!hcon || hcon->state != BT_CONNECTED) 6656 return send_conn_param_neg_reply(hdev, handle, 6657 HCI_ERROR_UNKNOWN_CONN_ID); 6658 6659 if (hci_check_conn_params(min, max, latency, timeout)) 6660 return send_conn_param_neg_reply(hdev, handle, 6661 HCI_ERROR_INVALID_LL_PARAMS); 6662 6663 if (hcon->role == HCI_ROLE_MASTER) { 6664 struct hci_conn_params *params; 6665 u8 store_hint; 6666 6667 hci_dev_lock(hdev); 6668 6669 params = hci_conn_params_lookup(hdev, &hcon->dst, 6670 hcon->dst_type); 6671 if (params) { 6672 params->conn_min_interval = min; 6673 params->conn_max_interval = max; 6674 params->conn_latency = latency; 6675 params->supervision_timeout = timeout; 6676 store_hint = 0x01; 6677 } else { 6678 store_hint = 0x00; 6679 } 6680 6681 hci_dev_unlock(hdev); 6682 6683 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6684 store_hint, min, max, latency, timeout); 6685 } 6686 6687 cp.handle = ev->handle; 6688 cp.interval_min = ev->interval_min; 6689 cp.interval_max = ev->interval_max; 6690 cp.latency = ev->latency; 6691 cp.timeout = ev->timeout; 6692 cp.min_ce_len = 0; 6693 cp.max_ce_len = 0; 6694 6695 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6696 } 6697 6698 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6699 struct sk_buff *skb) 6700 { 6701 struct hci_ev_le_direct_adv_report *ev = data; 6702 int i; 6703 6704 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6705 flex_array_size(ev, info, ev->num))) 6706 return; 6707 6708 if (!ev->num) 6709 return; 6710 6711 hci_dev_lock(hdev); 6712 6713 for (i = 0; i < ev->num; i++) { 6714 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6715 6716 process_adv_report(hdev, info->type, &info->bdaddr, 6717 info->bdaddr_type, &info->direct_addr, 6718 info->direct_addr_type, info->rssi, NULL, 0, 6719 false); 6720 } 6721 6722 hci_dev_unlock(hdev); 6723 } 6724 6725 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6726 struct sk_buff *skb) 6727 { 6728 struct hci_ev_le_phy_update_complete *ev = data; 6729 struct hci_conn *conn; 6730 6731 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6732 6733 if (ev->status) 6734 return; 6735 6736 hci_dev_lock(hdev); 6737 6738 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6739 if (!conn) 6740 goto unlock; 6741 6742 conn->le_tx_phy = ev->tx_phy; 6743 conn->le_rx_phy = ev->rx_phy; 6744 6745 unlock: 6746 hci_dev_unlock(hdev); 6747 } 6748 6749 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, 6750 struct sk_buff *skb) 6751 { 6752 struct hci_evt_le_cis_established *ev = data; 6753 struct hci_conn *conn; 6754 u16 handle = __le16_to_cpu(ev->handle); 6755 6756 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6757 6758 hci_dev_lock(hdev); 6759 6760 conn = hci_conn_hash_lookup_handle(hdev, handle); 6761 if (!conn) { 6762 bt_dev_err(hdev, 6763 "Unable to find connection with handle 0x%4.4x", 6764 handle); 6765 goto unlock; 6766 } 6767 6768 if (conn->role == HCI_ROLE_SLAVE) { 6769 __le32 interval; 6770 6771 memset(&interval, 0, sizeof(interval)); 6772 6773 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); 6774 conn->iso_qos.in.interval = le32_to_cpu(interval); 6775 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); 6776 conn->iso_qos.out.interval = le32_to_cpu(interval); 6777 conn->iso_qos.in.latency = le16_to_cpu(ev->interval); 6778 conn->iso_qos.out.latency = le16_to_cpu(ev->interval); 6779 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); 6780 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); 6781 conn->iso_qos.in.phy = ev->c_phy; 6782 conn->iso_qos.out.phy = ev->p_phy; 6783 } 6784 6785 if (!ev->status) { 6786 conn->state = BT_CONNECTED; 6787 hci_debugfs_create_conn(conn); 6788 hci_conn_add_sysfs(conn); 6789 hci_iso_setup_path(conn); 6790 goto unlock; 6791 } 6792 6793 hci_connect_cfm(conn, ev->status); 6794 hci_conn_del(conn); 6795 6796 unlock: 6797 hci_dev_unlock(hdev); 6798 } 6799 6800 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) 6801 { 6802 struct hci_cp_le_reject_cis cp; 6803 6804 memset(&cp, 0, sizeof(cp)); 6805 cp.handle = handle; 6806 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 6807 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); 6808 } 6809 6810 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) 6811 { 6812 struct hci_cp_le_accept_cis cp; 6813 6814 memset(&cp, 0, sizeof(cp)); 6815 cp.handle = handle; 6816 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); 6817 } 6818 6819 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, 6820 struct sk_buff *skb) 6821 { 6822 struct hci_evt_le_cis_req *ev = data; 6823 u16 acl_handle, cis_handle; 6824 struct hci_conn *acl, *cis; 6825 int mask; 6826 __u8 flags = 0; 6827 6828 acl_handle = __le16_to_cpu(ev->acl_handle); 6829 cis_handle = __le16_to_cpu(ev->cis_handle); 6830 6831 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", 6832 acl_handle, cis_handle, ev->cig_id, ev->cis_id); 6833 6834 hci_dev_lock(hdev); 6835 6836 acl = hci_conn_hash_lookup_handle(hdev, acl_handle); 6837 if (!acl) 6838 goto unlock; 6839 6840 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); 6841 if (!(mask & HCI_LM_ACCEPT)) { 6842 hci_le_reject_cis(hdev, ev->cis_handle); 6843 goto unlock; 6844 } 6845 6846 cis = hci_conn_hash_lookup_handle(hdev, cis_handle); 6847 if (!cis) { 6848 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); 6849 if (!cis) { 6850 hci_le_reject_cis(hdev, ev->cis_handle); 6851 goto unlock; 6852 } 6853 cis->handle = cis_handle; 6854 } 6855 6856 cis->iso_qos.cig = ev->cig_id; 6857 cis->iso_qos.cis = ev->cis_id; 6858 6859 if (!(flags & HCI_PROTO_DEFER)) { 6860 hci_le_accept_cis(hdev, ev->cis_handle); 6861 } else { 6862 cis->state = BT_CONNECT2; 6863 hci_connect_cfm(cis, 0); 6864 } 6865 6866 unlock: 6867 hci_dev_unlock(hdev); 6868 } 6869 6870 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, 6871 struct sk_buff *skb) 6872 { 6873 struct hci_evt_le_create_big_complete *ev = data; 6874 struct hci_conn *conn; 6875 6876 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6877 6878 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, 6879 flex_array_size(ev, bis_handle, ev->num_bis))) 6880 return; 6881 6882 hci_dev_lock(hdev); 6883 6884 conn = hci_conn_hash_lookup_big(hdev, ev->handle); 6885 if (!conn) 6886 goto unlock; 6887 6888 if (ev->num_bis) 6889 conn->handle = __le16_to_cpu(ev->bis_handle[0]); 6890 6891 if (!ev->status) { 6892 conn->state = BT_CONNECTED; 6893 hci_debugfs_create_conn(conn); 6894 hci_conn_add_sysfs(conn); 6895 hci_iso_setup_path(conn); 6896 goto unlock; 6897 } 6898 6899 hci_connect_cfm(conn, ev->status); 6900 hci_conn_del(conn); 6901 6902 unlock: 6903 hci_dev_unlock(hdev); 6904 } 6905 6906 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, 6907 struct sk_buff *skb) 6908 { 6909 struct hci_evt_le_big_sync_estabilished *ev = data; 6910 struct hci_conn *bis; 6911 int i; 6912 6913 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6914 6915 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 6916 flex_array_size(ev, bis, ev->num_bis))) 6917 return; 6918 6919 if (ev->status) 6920 return; 6921 6922 hci_dev_lock(hdev); 6923 6924 for (i = 0; i < ev->num_bis; i++) { 6925 u16 handle = le16_to_cpu(ev->bis[i]); 6926 __le32 interval; 6927 6928 bis = hci_conn_hash_lookup_handle(hdev, handle); 6929 if (!bis) { 6930 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, 6931 HCI_ROLE_SLAVE); 6932 if (!bis) 6933 continue; 6934 bis->handle = handle; 6935 } 6936 6937 bis->iso_qos.big = ev->handle; 6938 memset(&interval, 0, sizeof(interval)); 6939 memcpy(&interval, ev->latency, sizeof(ev->latency)); 6940 bis->iso_qos.in.interval = le32_to_cpu(interval); 6941 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ 6942 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; 6943 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); 6944 6945 hci_connect_cfm(bis, ev->status); 6946 } 6947 6948 hci_dev_unlock(hdev); 6949 } 6950 6951 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, 6952 struct sk_buff *skb) 6953 { 6954 struct hci_evt_le_big_info_adv_report *ev = data; 6955 int mask = hdev->link_mode; 6956 __u8 flags = 0; 6957 6958 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 6959 6960 hci_dev_lock(hdev); 6961 6962 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); 6963 if (!(mask & HCI_LM_ACCEPT)) 6964 hci_le_pa_term_sync(hdev, ev->sync_handle); 6965 6966 hci_dev_unlock(hdev); 6967 } 6968 6969 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 6970 [_op] = { \ 6971 .func = _func, \ 6972 .min_len = _min_len, \ 6973 .max_len = _max_len, \ 6974 } 6975 6976 #define HCI_LE_EV(_op, _func, _len) \ 6977 HCI_LE_EV_VL(_op, _func, _len, _len) 6978 6979 #define HCI_LE_EV_STATUS(_op, _func) \ 6980 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 6981 6982 /* Entries in this table shall have their position according to the subevent 6983 * opcode they handle so the use of the macros above is recommend since it does 6984 * attempt to initialize at its proper index using Designated Initializers that 6985 * way events without a callback function can be ommited. 6986 */ 6987 static const struct hci_le_ev { 6988 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 6989 u16 min_len; 6990 u16 max_len; 6991 } hci_le_ev_table[U8_MAX + 1] = { 6992 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 6993 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 6994 sizeof(struct hci_ev_le_conn_complete)), 6995 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 6996 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 6997 sizeof(struct hci_ev_le_advertising_report), 6998 HCI_MAX_EVENT_SIZE), 6999 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 7000 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 7001 hci_le_conn_update_complete_evt, 7002 sizeof(struct hci_ev_le_conn_update_complete)), 7003 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 7004 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 7005 hci_le_remote_feat_complete_evt, 7006 sizeof(struct hci_ev_le_remote_feat_complete)), 7007 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 7008 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 7009 sizeof(struct hci_ev_le_ltk_req)), 7010 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 7011 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 7012 hci_le_remote_conn_param_req_evt, 7013 sizeof(struct hci_ev_le_remote_conn_param_req)), 7014 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 7015 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 7016 hci_le_enh_conn_complete_evt, 7017 sizeof(struct hci_ev_le_enh_conn_complete)), 7018 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 7019 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 7020 sizeof(struct hci_ev_le_direct_adv_report), 7021 HCI_MAX_EVENT_SIZE), 7022 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 7023 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 7024 sizeof(struct hci_ev_le_phy_update_complete)), 7025 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 7026 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 7027 sizeof(struct hci_ev_le_ext_adv_report), 7028 HCI_MAX_EVENT_SIZE), 7029 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ 7030 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, 7031 hci_le_pa_sync_estabilished_evt, 7032 sizeof(struct hci_ev_le_pa_sync_established)), 7033 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 7034 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 7035 sizeof(struct hci_evt_le_ext_adv_set_term)), 7036 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ 7037 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, 7038 sizeof(struct hci_evt_le_cis_established)), 7039 /* [0x1a = HCI_EVT_LE_CIS_REQ] */ 7040 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, 7041 sizeof(struct hci_evt_le_cis_req)), 7042 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ 7043 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, 7044 hci_le_create_big_complete_evt, 7045 sizeof(struct hci_evt_le_create_big_complete), 7046 HCI_MAX_EVENT_SIZE), 7047 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ 7048 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 7049 hci_le_big_sync_established_evt, 7050 sizeof(struct hci_evt_le_big_sync_estabilished), 7051 HCI_MAX_EVENT_SIZE), 7052 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ 7053 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, 7054 hci_le_big_info_adv_report_evt, 7055 sizeof(struct hci_evt_le_big_info_adv_report), 7056 HCI_MAX_EVENT_SIZE), 7057 }; 7058 7059 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 7060 struct sk_buff *skb, u16 *opcode, u8 *status, 7061 hci_req_complete_t *req_complete, 7062 hci_req_complete_skb_t *req_complete_skb) 7063 { 7064 struct hci_ev_le_meta *ev = data; 7065 const struct hci_le_ev *subev; 7066 7067 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 7068 7069 /* Only match event if command OGF is for LE */ 7070 if (hdev->sent_cmd && 7071 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && 7072 hci_skb_event(hdev->sent_cmd) == ev->subevent) { 7073 *opcode = hci_skb_opcode(hdev->sent_cmd); 7074 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 7075 req_complete_skb); 7076 } 7077 7078 subev = &hci_le_ev_table[ev->subevent]; 7079 if (!subev->func) 7080 return; 7081 7082 if (skb->len < subev->min_len) { 7083 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 7084 ev->subevent, skb->len, subev->min_len); 7085 return; 7086 } 7087 7088 /* Just warn if the length is over max_len size it still be 7089 * possible to partially parse the event so leave to callback to 7090 * decide if that is acceptable. 7091 */ 7092 if (skb->len > subev->max_len) 7093 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 7094 ev->subevent, skb->len, subev->max_len); 7095 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 7096 if (!data) 7097 return; 7098 7099 subev->func(hdev, data, skb); 7100 } 7101 7102 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 7103 u8 event, struct sk_buff *skb) 7104 { 7105 struct hci_ev_cmd_complete *ev; 7106 struct hci_event_hdr *hdr; 7107 7108 if (!skb) 7109 return false; 7110 7111 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 7112 if (!hdr) 7113 return false; 7114 7115 if (event) { 7116 if (hdr->evt != event) 7117 return false; 7118 return true; 7119 } 7120 7121 /* Check if request ended in Command Status - no way to retrieve 7122 * any extra parameters in this case. 7123 */ 7124 if (hdr->evt == HCI_EV_CMD_STATUS) 7125 return false; 7126 7127 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 7128 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 7129 hdr->evt); 7130 return false; 7131 } 7132 7133 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 7134 if (!ev) 7135 return false; 7136 7137 if (opcode != __le16_to_cpu(ev->opcode)) { 7138 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 7139 __le16_to_cpu(ev->opcode)); 7140 return false; 7141 } 7142 7143 return true; 7144 } 7145 7146 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 7147 struct sk_buff *skb) 7148 { 7149 struct hci_ev_le_advertising_info *adv; 7150 struct hci_ev_le_direct_adv_info *direct_adv; 7151 struct hci_ev_le_ext_adv_info *ext_adv; 7152 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 7153 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 7154 7155 hci_dev_lock(hdev); 7156 7157 /* If we are currently suspended and this is the first BT event seen, 7158 * save the wake reason associated with the event. 7159 */ 7160 if (!hdev->suspended || hdev->wake_reason) 7161 goto unlock; 7162 7163 /* Default to remote wake. Values for wake_reason are documented in the 7164 * Bluez mgmt api docs. 7165 */ 7166 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 7167 7168 /* Once configured for remote wakeup, we should only wake up for 7169 * reconnections. It's useful to see which device is waking us up so 7170 * keep track of the bdaddr of the connection event that woke us up. 7171 */ 7172 if (event == HCI_EV_CONN_REQUEST) { 7173 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 7174 hdev->wake_addr_type = BDADDR_BREDR; 7175 } else if (event == HCI_EV_CONN_COMPLETE) { 7176 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 7177 hdev->wake_addr_type = BDADDR_BREDR; 7178 } else if (event == HCI_EV_LE_META) { 7179 struct hci_ev_le_meta *le_ev = (void *)skb->data; 7180 u8 subevent = le_ev->subevent; 7181 u8 *ptr = &skb->data[sizeof(*le_ev)]; 7182 u8 num_reports = *ptr; 7183 7184 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 7185 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 7186 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 7187 num_reports) { 7188 adv = (void *)(ptr + 1); 7189 direct_adv = (void *)(ptr + 1); 7190 ext_adv = (void *)(ptr + 1); 7191 7192 switch (subevent) { 7193 case HCI_EV_LE_ADVERTISING_REPORT: 7194 bacpy(&hdev->wake_addr, &adv->bdaddr); 7195 hdev->wake_addr_type = adv->bdaddr_type; 7196 break; 7197 case HCI_EV_LE_DIRECT_ADV_REPORT: 7198 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 7199 hdev->wake_addr_type = direct_adv->bdaddr_type; 7200 break; 7201 case HCI_EV_LE_EXT_ADV_REPORT: 7202 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 7203 hdev->wake_addr_type = ext_adv->bdaddr_type; 7204 break; 7205 } 7206 } 7207 } else { 7208 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 7209 } 7210 7211 unlock: 7212 hci_dev_unlock(hdev); 7213 } 7214 7215 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 7216 [_op] = { \ 7217 .req = false, \ 7218 .func = _func, \ 7219 .min_len = _min_len, \ 7220 .max_len = _max_len, \ 7221 } 7222 7223 #define HCI_EV(_op, _func, _len) \ 7224 HCI_EV_VL(_op, _func, _len, _len) 7225 7226 #define HCI_EV_STATUS(_op, _func) \ 7227 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 7228 7229 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 7230 [_op] = { \ 7231 .req = true, \ 7232 .func_req = _func, \ 7233 .min_len = _min_len, \ 7234 .max_len = _max_len, \ 7235 } 7236 7237 #define HCI_EV_REQ(_op, _func, _len) \ 7238 HCI_EV_REQ_VL(_op, _func, _len, _len) 7239 7240 /* Entries in this table shall have their position according to the event opcode 7241 * they handle so the use of the macros above is recommend since it does attempt 7242 * to initialize at its proper index using Designated Initializers that way 7243 * events without a callback function don't have entered. 7244 */ 7245 static const struct hci_ev { 7246 bool req; 7247 union { 7248 void (*func)(struct hci_dev *hdev, void *data, 7249 struct sk_buff *skb); 7250 void (*func_req)(struct hci_dev *hdev, void *data, 7251 struct sk_buff *skb, u16 *opcode, u8 *status, 7252 hci_req_complete_t *req_complete, 7253 hci_req_complete_skb_t *req_complete_skb); 7254 }; 7255 u16 min_len; 7256 u16 max_len; 7257 } hci_ev_table[U8_MAX + 1] = { 7258 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 7259 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 7260 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 7261 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 7262 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 7263 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 7264 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 7265 sizeof(struct hci_ev_conn_complete)), 7266 /* [0x04 = HCI_EV_CONN_REQUEST] */ 7267 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 7268 sizeof(struct hci_ev_conn_request)), 7269 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 7270 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 7271 sizeof(struct hci_ev_disconn_complete)), 7272 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 7273 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 7274 sizeof(struct hci_ev_auth_complete)), 7275 /* [0x07 = HCI_EV_REMOTE_NAME] */ 7276 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 7277 sizeof(struct hci_ev_remote_name)), 7278 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 7279 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 7280 sizeof(struct hci_ev_encrypt_change)), 7281 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 7282 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 7283 hci_change_link_key_complete_evt, 7284 sizeof(struct hci_ev_change_link_key_complete)), 7285 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 7286 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 7287 sizeof(struct hci_ev_remote_features)), 7288 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 7289 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 7290 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 7291 /* [0x0f = HCI_EV_CMD_STATUS] */ 7292 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 7293 sizeof(struct hci_ev_cmd_status)), 7294 /* [0x10 = HCI_EV_CMD_STATUS] */ 7295 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 7296 sizeof(struct hci_ev_hardware_error)), 7297 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 7298 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 7299 sizeof(struct hci_ev_role_change)), 7300 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 7301 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 7302 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 7303 /* [0x14 = HCI_EV_MODE_CHANGE] */ 7304 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 7305 sizeof(struct hci_ev_mode_change)), 7306 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 7307 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 7308 sizeof(struct hci_ev_pin_code_req)), 7309 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 7310 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 7311 sizeof(struct hci_ev_link_key_req)), 7312 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 7313 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 7314 sizeof(struct hci_ev_link_key_notify)), 7315 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 7316 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 7317 sizeof(struct hci_ev_clock_offset)), 7318 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 7319 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 7320 sizeof(struct hci_ev_pkt_type_change)), 7321 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 7322 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 7323 sizeof(struct hci_ev_pscan_rep_mode)), 7324 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 7325 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 7326 hci_inquiry_result_with_rssi_evt, 7327 sizeof(struct hci_ev_inquiry_result_rssi), 7328 HCI_MAX_EVENT_SIZE), 7329 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 7330 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 7331 sizeof(struct hci_ev_remote_ext_features)), 7332 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 7333 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 7334 sizeof(struct hci_ev_sync_conn_complete)), 7335 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 7336 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 7337 hci_extended_inquiry_result_evt, 7338 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 7339 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 7340 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 7341 sizeof(struct hci_ev_key_refresh_complete)), 7342 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 7343 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 7344 sizeof(struct hci_ev_io_capa_request)), 7345 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 7346 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 7347 sizeof(struct hci_ev_io_capa_reply)), 7348 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 7349 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 7350 sizeof(struct hci_ev_user_confirm_req)), 7351 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 7352 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 7353 sizeof(struct hci_ev_user_passkey_req)), 7354 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 7355 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 7356 sizeof(struct hci_ev_remote_oob_data_request)), 7357 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 7358 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 7359 sizeof(struct hci_ev_simple_pair_complete)), 7360 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 7361 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 7362 sizeof(struct hci_ev_user_passkey_notify)), 7363 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 7364 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 7365 sizeof(struct hci_ev_keypress_notify)), 7366 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 7367 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 7368 sizeof(struct hci_ev_remote_host_features)), 7369 /* [0x3e = HCI_EV_LE_META] */ 7370 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 7371 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 7372 #if IS_ENABLED(CONFIG_BT_HS) 7373 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ 7374 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, 7375 sizeof(struct hci_ev_phy_link_complete)), 7376 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ 7377 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, 7378 sizeof(struct hci_ev_channel_selected)), 7379 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ 7380 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, 7381 hci_disconn_loglink_complete_evt, 7382 sizeof(struct hci_ev_disconn_logical_link_complete)), 7383 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ 7384 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, 7385 sizeof(struct hci_ev_logical_link_complete)), 7386 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ 7387 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, 7388 hci_disconn_phylink_complete_evt, 7389 sizeof(struct hci_ev_disconn_phy_link_complete)), 7390 #endif 7391 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ 7392 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, 7393 sizeof(struct hci_ev_num_comp_blocks)), 7394 /* [0xff = HCI_EV_VENDOR] */ 7395 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 7396 }; 7397 7398 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 7399 u16 *opcode, u8 *status, 7400 hci_req_complete_t *req_complete, 7401 hci_req_complete_skb_t *req_complete_skb) 7402 { 7403 const struct hci_ev *ev = &hci_ev_table[event]; 7404 void *data; 7405 7406 if (!ev->func) 7407 return; 7408 7409 if (skb->len < ev->min_len) { 7410 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 7411 event, skb->len, ev->min_len); 7412 return; 7413 } 7414 7415 /* Just warn if the length is over max_len size it still be 7416 * possible to partially parse the event so leave to callback to 7417 * decide if that is acceptable. 7418 */ 7419 if (skb->len > ev->max_len) 7420 bt_dev_warn_ratelimited(hdev, 7421 "unexpected event 0x%2.2x length: %u > %u", 7422 event, skb->len, ev->max_len); 7423 7424 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 7425 if (!data) 7426 return; 7427 7428 if (ev->req) 7429 ev->func_req(hdev, data, skb, opcode, status, req_complete, 7430 req_complete_skb); 7431 else 7432 ev->func(hdev, data, skb); 7433 } 7434 7435 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 7436 { 7437 struct hci_event_hdr *hdr = (void *) skb->data; 7438 hci_req_complete_t req_complete = NULL; 7439 hci_req_complete_skb_t req_complete_skb = NULL; 7440 struct sk_buff *orig_skb = NULL; 7441 u8 status = 0, event, req_evt = 0; 7442 u16 opcode = HCI_OP_NOP; 7443 7444 if (skb->len < sizeof(*hdr)) { 7445 bt_dev_err(hdev, "Malformed HCI Event"); 7446 goto done; 7447 } 7448 7449 kfree_skb(hdev->recv_event); 7450 hdev->recv_event = skb_clone(skb, GFP_KERNEL); 7451 7452 event = hdr->evt; 7453 if (!event) { 7454 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 7455 event); 7456 goto done; 7457 } 7458 7459 /* Only match event if command OGF is not for LE */ 7460 if (hdev->sent_cmd && 7461 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && 7462 hci_skb_event(hdev->sent_cmd) == event) { 7463 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), 7464 status, &req_complete, &req_complete_skb); 7465 req_evt = event; 7466 } 7467 7468 /* If it looks like we might end up having to call 7469 * req_complete_skb, store a pristine copy of the skb since the 7470 * various handlers may modify the original one through 7471 * skb_pull() calls, etc. 7472 */ 7473 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 7474 event == HCI_EV_CMD_COMPLETE) 7475 orig_skb = skb_clone(skb, GFP_KERNEL); 7476 7477 skb_pull(skb, HCI_EVENT_HDR_SIZE); 7478 7479 /* Store wake reason if we're suspended */ 7480 hci_store_wake_reason(hdev, event, skb); 7481 7482 bt_dev_dbg(hdev, "event 0x%2.2x", event); 7483 7484 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 7485 &req_complete_skb); 7486 7487 if (req_complete) { 7488 req_complete(hdev, status, opcode); 7489 } else if (req_complete_skb) { 7490 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 7491 kfree_skb(orig_skb); 7492 orig_skb = NULL; 7493 } 7494 req_complete_skb(hdev, status, opcode, orig_skb); 7495 } 7496 7497 done: 7498 kfree_skb(orig_skb); 7499 kfree_skb(skb); 7500 hdev->stat.evt_rx++; 7501 } 7502