1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 ev, size_t len) 50 { 51 void *data; 52 53 data = skb_pull_data(skb, len); 54 if (!data) 55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 56 57 return data; 58 } 59 60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 61 u16 op, size_t len) 62 { 63 void *data; 64 65 data = skb_pull_data(skb, len); 66 if (!data) 67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 68 69 return data; 70 } 71 72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 73 u8 ev, size_t len) 74 { 75 void *data; 76 77 data = skb_pull_data(skb, len); 78 if (!data) 79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 80 81 return data; 82 } 83 84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 85 struct sk_buff *skb) 86 { 87 struct hci_ev_status *rp = data; 88 89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 90 91 /* It is possible that we receive Inquiry Complete event right 92 * before we receive Inquiry Cancel Command Complete event, in 93 * which case the latter event should have status of Command 94 * Disallowed (0x0c). This should not be treated as error, since 95 * we actually achieve what Inquiry Cancel wants to achieve, 96 * which is to end the last Inquiry session. 97 */ 98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 100 rp->status = 0x00; 101 } 102 103 if (rp->status) 104 return rp->status; 105 106 clear_bit(HCI_INQUIRY, &hdev->flags); 107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 108 wake_up_bit(&hdev->flags, HCI_INQUIRY); 109 110 hci_dev_lock(hdev); 111 /* Set discovery state to stopped if we're not doing LE active 112 * scanning. 113 */ 114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 115 hdev->le_scan_type != LE_SCAN_ACTIVE) 116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 117 hci_dev_unlock(hdev); 118 119 hci_conn_check_pending(hdev); 120 121 return rp->status; 122 } 123 124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 125 struct sk_buff *skb) 126 { 127 struct hci_ev_status *rp = data; 128 129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 130 131 if (rp->status) 132 return rp->status; 133 134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 135 136 return rp->status; 137 } 138 139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 140 struct sk_buff *skb) 141 { 142 struct hci_ev_status *rp = data; 143 144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 145 146 if (rp->status) 147 return rp->status; 148 149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 150 151 hci_conn_check_pending(hdev); 152 153 return rp->status; 154 } 155 156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 157 struct sk_buff *skb) 158 { 159 struct hci_ev_status *rp = data; 160 161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 162 163 return rp->status; 164 } 165 166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 167 struct sk_buff *skb) 168 { 169 struct hci_rp_role_discovery *rp = data; 170 struct hci_conn *conn; 171 172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 173 174 if (rp->status) 175 return rp->status; 176 177 hci_dev_lock(hdev); 178 179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 180 if (conn) 181 conn->role = rp->role; 182 183 hci_dev_unlock(hdev); 184 185 return rp->status; 186 } 187 188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 189 struct sk_buff *skb) 190 { 191 struct hci_rp_read_link_policy *rp = data; 192 struct hci_conn *conn; 193 194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 195 196 if (rp->status) 197 return rp->status; 198 199 hci_dev_lock(hdev); 200 201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 202 if (conn) 203 conn->link_policy = __le16_to_cpu(rp->policy); 204 205 hci_dev_unlock(hdev); 206 207 return rp->status; 208 } 209 210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 211 struct sk_buff *skb) 212 { 213 struct hci_rp_write_link_policy *rp = data; 214 struct hci_conn *conn; 215 void *sent; 216 217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 218 219 if (rp->status) 220 return rp->status; 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 223 if (!sent) 224 return rp->status; 225 226 hci_dev_lock(hdev); 227 228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 229 if (conn) 230 conn->link_policy = get_unaligned_le16(sent + 2); 231 232 hci_dev_unlock(hdev); 233 234 return rp->status; 235 } 236 237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 238 struct sk_buff *skb) 239 { 240 struct hci_rp_read_def_link_policy *rp = data; 241 242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 243 244 if (rp->status) 245 return rp->status; 246 247 hdev->link_policy = __le16_to_cpu(rp->policy); 248 249 return rp->status; 250 } 251 252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 253 struct sk_buff *skb) 254 { 255 struct hci_ev_status *rp = data; 256 void *sent; 257 258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 259 260 if (rp->status) 261 return rp->status; 262 263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 264 if (!sent) 265 return rp->status; 266 267 hdev->link_policy = get_unaligned_le16(sent); 268 269 return rp->status; 270 } 271 272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 273 { 274 struct hci_ev_status *rp = data; 275 276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 277 278 clear_bit(HCI_RESET, &hdev->flags); 279 280 if (rp->status) 281 return rp->status; 282 283 /* Reset all non-persistent flags */ 284 hci_dev_clear_volatile_flags(hdev); 285 286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 287 288 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 289 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 290 291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 292 hdev->adv_data_len = 0; 293 294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 295 hdev->scan_rsp_data_len = 0; 296 297 hdev->le_scan_type = LE_SCAN_PASSIVE; 298 299 hdev->ssp_debug_mode = 0; 300 301 hci_bdaddr_list_clear(&hdev->le_accept_list); 302 hci_bdaddr_list_clear(&hdev->le_resolv_list); 303 304 return rp->status; 305 } 306 307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 308 struct sk_buff *skb) 309 { 310 struct hci_rp_read_stored_link_key *rp = data; 311 struct hci_cp_read_stored_link_key *sent; 312 313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 314 315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 316 if (!sent) 317 return rp->status; 318 319 if (!rp->status && sent->read_all == 0x01) { 320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 322 } 323 324 return rp->status; 325 } 326 327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 328 struct sk_buff *skb) 329 { 330 struct hci_rp_delete_stored_link_key *rp = data; 331 u16 num_keys; 332 333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 334 335 if (rp->status) 336 return rp->status; 337 338 num_keys = le16_to_cpu(rp->num_keys); 339 340 if (num_keys <= hdev->stored_num_keys) 341 hdev->stored_num_keys -= num_keys; 342 else 343 hdev->stored_num_keys = 0; 344 345 return rp->status; 346 } 347 348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 349 struct sk_buff *skb) 350 { 351 struct hci_ev_status *rp = data; 352 void *sent; 353 354 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 355 356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 357 if (!sent) 358 return rp->status; 359 360 hci_dev_lock(hdev); 361 362 if (hci_dev_test_flag(hdev, HCI_MGMT)) 363 mgmt_set_local_name_complete(hdev, sent, rp->status); 364 else if (!rp->status) 365 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 366 367 hci_dev_unlock(hdev); 368 369 return rp->status; 370 } 371 372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 373 struct sk_buff *skb) 374 { 375 struct hci_rp_read_local_name *rp = data; 376 377 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 378 379 if (rp->status) 380 return rp->status; 381 382 if (hci_dev_test_flag(hdev, HCI_SETUP) || 383 hci_dev_test_flag(hdev, HCI_CONFIG)) 384 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 385 386 return rp->status; 387 } 388 389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 390 struct sk_buff *skb) 391 { 392 struct hci_ev_status *rp = data; 393 void *sent; 394 395 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 396 397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 398 if (!sent) 399 return rp->status; 400 401 hci_dev_lock(hdev); 402 403 if (!rp->status) { 404 __u8 param = *((__u8 *) sent); 405 406 if (param == AUTH_ENABLED) 407 set_bit(HCI_AUTH, &hdev->flags); 408 else 409 clear_bit(HCI_AUTH, &hdev->flags); 410 } 411 412 if (hci_dev_test_flag(hdev, HCI_MGMT)) 413 mgmt_auth_enable_complete(hdev, rp->status); 414 415 hci_dev_unlock(hdev); 416 417 return rp->status; 418 } 419 420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 421 struct sk_buff *skb) 422 { 423 struct hci_ev_status *rp = data; 424 __u8 param; 425 void *sent; 426 427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 428 429 if (rp->status) 430 return rp->status; 431 432 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 433 if (!sent) 434 return rp->status; 435 436 param = *((__u8 *) sent); 437 438 if (param) 439 set_bit(HCI_ENCRYPT, &hdev->flags); 440 else 441 clear_bit(HCI_ENCRYPT, &hdev->flags); 442 443 return rp->status; 444 } 445 446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 447 struct sk_buff *skb) 448 { 449 struct hci_ev_status *rp = data; 450 __u8 param; 451 void *sent; 452 453 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 454 455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 456 if (!sent) 457 return rp->status; 458 459 param = *((__u8 *) sent); 460 461 hci_dev_lock(hdev); 462 463 if (rp->status) { 464 hdev->discov_timeout = 0; 465 goto done; 466 } 467 468 if (param & SCAN_INQUIRY) 469 set_bit(HCI_ISCAN, &hdev->flags); 470 else 471 clear_bit(HCI_ISCAN, &hdev->flags); 472 473 if (param & SCAN_PAGE) 474 set_bit(HCI_PSCAN, &hdev->flags); 475 else 476 clear_bit(HCI_PSCAN, &hdev->flags); 477 478 done: 479 hci_dev_unlock(hdev); 480 481 return rp->status; 482 } 483 484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 485 struct sk_buff *skb) 486 { 487 struct hci_ev_status *rp = data; 488 struct hci_cp_set_event_filter *cp; 489 void *sent; 490 491 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 492 493 if (rp->status) 494 return rp->status; 495 496 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 497 if (!sent) 498 return rp->status; 499 500 cp = (struct hci_cp_set_event_filter *)sent; 501 502 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 503 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 504 else 505 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 506 507 return rp->status; 508 } 509 510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 511 struct sk_buff *skb) 512 { 513 struct hci_rp_read_class_of_dev *rp = data; 514 515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 516 517 if (rp->status) 518 return rp->status; 519 520 memcpy(hdev->dev_class, rp->dev_class, 3); 521 522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 523 hdev->dev_class[1], hdev->dev_class[0]); 524 525 return rp->status; 526 } 527 528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 529 struct sk_buff *skb) 530 { 531 struct hci_ev_status *rp = data; 532 void *sent; 533 534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 535 536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 537 if (!sent) 538 return rp->status; 539 540 hci_dev_lock(hdev); 541 542 if (!rp->status) 543 memcpy(hdev->dev_class, sent, 3); 544 545 if (hci_dev_test_flag(hdev, HCI_MGMT)) 546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 547 548 hci_dev_unlock(hdev); 549 550 return rp->status; 551 } 552 553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 554 struct sk_buff *skb) 555 { 556 struct hci_rp_read_voice_setting *rp = data; 557 __u16 setting; 558 559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 560 561 if (rp->status) 562 return rp->status; 563 564 setting = __le16_to_cpu(rp->voice_setting); 565 566 if (hdev->voice_setting == setting) 567 return rp->status; 568 569 hdev->voice_setting = setting; 570 571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 572 573 if (hdev->notify) 574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 575 576 return rp->status; 577 } 578 579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 580 struct sk_buff *skb) 581 { 582 struct hci_ev_status *rp = data; 583 __u16 setting; 584 void *sent; 585 586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 587 588 if (rp->status) 589 return rp->status; 590 591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 592 if (!sent) 593 return rp->status; 594 595 setting = get_unaligned_le16(sent); 596 597 if (hdev->voice_setting == setting) 598 return rp->status; 599 600 hdev->voice_setting = setting; 601 602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 603 604 if (hdev->notify) 605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 606 607 return rp->status; 608 } 609 610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 611 struct sk_buff *skb) 612 { 613 struct hci_rp_read_num_supported_iac *rp = data; 614 615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 616 617 if (rp->status) 618 return rp->status; 619 620 hdev->num_iac = rp->num_iac; 621 622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 623 624 return rp->status; 625 } 626 627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 628 struct sk_buff *skb) 629 { 630 struct hci_ev_status *rp = data; 631 struct hci_cp_write_ssp_mode *sent; 632 633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 634 635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 636 if (!sent) 637 return rp->status; 638 639 hci_dev_lock(hdev); 640 641 if (!rp->status) { 642 if (sent->mode) 643 hdev->features[1][0] |= LMP_HOST_SSP; 644 else 645 hdev->features[1][0] &= ~LMP_HOST_SSP; 646 } 647 648 if (!rp->status) { 649 if (sent->mode) 650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 651 else 652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 653 } 654 655 hci_dev_unlock(hdev); 656 657 return rp->status; 658 } 659 660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 661 struct sk_buff *skb) 662 { 663 struct hci_ev_status *rp = data; 664 struct hci_cp_write_sc_support *sent; 665 666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 667 668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 669 if (!sent) 670 return rp->status; 671 672 hci_dev_lock(hdev); 673 674 if (!rp->status) { 675 if (sent->support) 676 hdev->features[1][0] |= LMP_HOST_SC; 677 else 678 hdev->features[1][0] &= ~LMP_HOST_SC; 679 } 680 681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 682 if (sent->support) 683 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 684 else 685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 686 } 687 688 hci_dev_unlock(hdev); 689 690 return rp->status; 691 } 692 693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 694 struct sk_buff *skb) 695 { 696 struct hci_rp_read_local_version *rp = data; 697 698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 699 700 if (rp->status) 701 return rp->status; 702 703 if (hci_dev_test_flag(hdev, HCI_SETUP) || 704 hci_dev_test_flag(hdev, HCI_CONFIG)) { 705 hdev->hci_ver = rp->hci_ver; 706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 707 hdev->lmp_ver = rp->lmp_ver; 708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 710 } 711 712 return rp->status; 713 } 714 715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, 716 struct sk_buff *skb) 717 { 718 struct hci_rp_read_enc_key_size *rp = data; 719 struct hci_conn *conn; 720 u16 handle; 721 u8 status = rp->status; 722 723 bt_dev_dbg(hdev, "status 0x%2.2x", status); 724 725 handle = le16_to_cpu(rp->handle); 726 727 hci_dev_lock(hdev); 728 729 conn = hci_conn_hash_lookup_handle(hdev, handle); 730 if (!conn) { 731 status = 0xFF; 732 goto done; 733 } 734 735 /* While unexpected, the read_enc_key_size command may fail. The most 736 * secure approach is to then assume the key size is 0 to force a 737 * disconnection. 738 */ 739 if (status) { 740 bt_dev_err(hdev, "failed to read key size for handle %u", 741 handle); 742 conn->enc_key_size = 0; 743 } else { 744 conn->enc_key_size = rp->key_size; 745 status = 0; 746 } 747 748 hci_encrypt_cfm(conn, 0); 749 750 done: 751 hci_dev_unlock(hdev); 752 753 return status; 754 } 755 756 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 757 struct sk_buff *skb) 758 { 759 struct hci_rp_read_local_commands *rp = data; 760 761 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 762 763 if (rp->status) 764 return rp->status; 765 766 if (hci_dev_test_flag(hdev, HCI_SETUP) || 767 hci_dev_test_flag(hdev, HCI_CONFIG)) 768 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 769 770 return rp->status; 771 } 772 773 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 774 struct sk_buff *skb) 775 { 776 struct hci_rp_read_auth_payload_to *rp = data; 777 struct hci_conn *conn; 778 779 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 780 781 if (rp->status) 782 return rp->status; 783 784 hci_dev_lock(hdev); 785 786 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 787 if (conn) 788 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 789 790 hci_dev_unlock(hdev); 791 792 return rp->status; 793 } 794 795 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 796 struct sk_buff *skb) 797 { 798 struct hci_rp_write_auth_payload_to *rp = data; 799 struct hci_conn *conn; 800 void *sent; 801 802 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 803 804 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 805 if (!sent) 806 return rp->status; 807 808 hci_dev_lock(hdev); 809 810 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 811 if (!conn) { 812 rp->status = 0xff; 813 goto unlock; 814 } 815 816 if (!rp->status) 817 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 818 819 hci_encrypt_cfm(conn, 0); 820 821 unlock: 822 hci_dev_unlock(hdev); 823 824 return rp->status; 825 } 826 827 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 828 struct sk_buff *skb) 829 { 830 struct hci_rp_read_local_features *rp = data; 831 832 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 833 834 if (rp->status) 835 return rp->status; 836 837 memcpy(hdev->features, rp->features, 8); 838 839 /* Adjust default settings according to features 840 * supported by device. */ 841 842 if (hdev->features[0][0] & LMP_3SLOT) 843 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 844 845 if (hdev->features[0][0] & LMP_5SLOT) 846 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 847 848 if (hdev->features[0][1] & LMP_HV2) { 849 hdev->pkt_type |= (HCI_HV2); 850 hdev->esco_type |= (ESCO_HV2); 851 } 852 853 if (hdev->features[0][1] & LMP_HV3) { 854 hdev->pkt_type |= (HCI_HV3); 855 hdev->esco_type |= (ESCO_HV3); 856 } 857 858 if (lmp_esco_capable(hdev)) 859 hdev->esco_type |= (ESCO_EV3); 860 861 if (hdev->features[0][4] & LMP_EV4) 862 hdev->esco_type |= (ESCO_EV4); 863 864 if (hdev->features[0][4] & LMP_EV5) 865 hdev->esco_type |= (ESCO_EV5); 866 867 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 868 hdev->esco_type |= (ESCO_2EV3); 869 870 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 871 hdev->esco_type |= (ESCO_3EV3); 872 873 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 874 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 875 876 return rp->status; 877 } 878 879 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 880 struct sk_buff *skb) 881 { 882 struct hci_rp_read_local_ext_features *rp = data; 883 884 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 885 886 if (rp->status) 887 return rp->status; 888 889 if (hdev->max_page < rp->max_page) 890 hdev->max_page = rp->max_page; 891 892 if (rp->page < HCI_MAX_PAGES) 893 memcpy(hdev->features[rp->page], rp->features, 8); 894 895 return rp->status; 896 } 897 898 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, 899 struct sk_buff *skb) 900 { 901 struct hci_rp_read_flow_control_mode *rp = data; 902 903 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 904 905 if (rp->status) 906 return rp->status; 907 908 hdev->flow_ctl_mode = rp->mode; 909 910 return rp->status; 911 } 912 913 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 914 struct sk_buff *skb) 915 { 916 struct hci_rp_read_buffer_size *rp = data; 917 918 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 919 920 if (rp->status) 921 return rp->status; 922 923 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 924 hdev->sco_mtu = rp->sco_mtu; 925 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 926 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 927 928 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 929 hdev->sco_mtu = 64; 930 hdev->sco_pkts = 8; 931 } 932 933 hdev->acl_cnt = hdev->acl_pkts; 934 hdev->sco_cnt = hdev->sco_pkts; 935 936 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 937 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 938 939 return rp->status; 940 } 941 942 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 943 struct sk_buff *skb) 944 { 945 struct hci_rp_read_bd_addr *rp = data; 946 947 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 948 949 if (rp->status) 950 return rp->status; 951 952 if (test_bit(HCI_INIT, &hdev->flags)) 953 bacpy(&hdev->bdaddr, &rp->bdaddr); 954 955 if (hci_dev_test_flag(hdev, HCI_SETUP)) 956 bacpy(&hdev->setup_addr, &rp->bdaddr); 957 958 return rp->status; 959 } 960 961 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 962 struct sk_buff *skb) 963 { 964 struct hci_rp_read_local_pairing_opts *rp = data; 965 966 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 967 968 if (rp->status) 969 return rp->status; 970 971 if (hci_dev_test_flag(hdev, HCI_SETUP) || 972 hci_dev_test_flag(hdev, HCI_CONFIG)) { 973 hdev->pairing_opts = rp->pairing_opts; 974 hdev->max_enc_key_size = rp->max_key_size; 975 } 976 977 return rp->status; 978 } 979 980 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 981 struct sk_buff *skb) 982 { 983 struct hci_rp_read_page_scan_activity *rp = data; 984 985 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 986 987 if (rp->status) 988 return rp->status; 989 990 if (test_bit(HCI_INIT, &hdev->flags)) { 991 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 992 hdev->page_scan_window = __le16_to_cpu(rp->window); 993 } 994 995 return rp->status; 996 } 997 998 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 999 struct sk_buff *skb) 1000 { 1001 struct hci_ev_status *rp = data; 1002 struct hci_cp_write_page_scan_activity *sent; 1003 1004 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1005 1006 if (rp->status) 1007 return rp->status; 1008 1009 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 1010 if (!sent) 1011 return rp->status; 1012 1013 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 1014 hdev->page_scan_window = __le16_to_cpu(sent->window); 1015 1016 return rp->status; 1017 } 1018 1019 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 1020 struct sk_buff *skb) 1021 { 1022 struct hci_rp_read_page_scan_type *rp = data; 1023 1024 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1025 1026 if (rp->status) 1027 return rp->status; 1028 1029 if (test_bit(HCI_INIT, &hdev->flags)) 1030 hdev->page_scan_type = rp->type; 1031 1032 return rp->status; 1033 } 1034 1035 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 1036 struct sk_buff *skb) 1037 { 1038 struct hci_ev_status *rp = data; 1039 u8 *type; 1040 1041 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1042 1043 if (rp->status) 1044 return rp->status; 1045 1046 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 1047 if (type) 1048 hdev->page_scan_type = *type; 1049 1050 return rp->status; 1051 } 1052 1053 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, 1054 struct sk_buff *skb) 1055 { 1056 struct hci_rp_read_data_block_size *rp = data; 1057 1058 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1059 1060 if (rp->status) 1061 return rp->status; 1062 1063 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 1064 hdev->block_len = __le16_to_cpu(rp->block_len); 1065 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 1066 1067 hdev->block_cnt = hdev->num_blocks; 1068 1069 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 1070 hdev->block_cnt, hdev->block_len); 1071 1072 return rp->status; 1073 } 1074 1075 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1076 struct sk_buff *skb) 1077 { 1078 struct hci_rp_read_clock *rp = data; 1079 struct hci_cp_read_clock *cp; 1080 struct hci_conn *conn; 1081 1082 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1083 1084 if (rp->status) 1085 return rp->status; 1086 1087 hci_dev_lock(hdev); 1088 1089 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1090 if (!cp) 1091 goto unlock; 1092 1093 if (cp->which == 0x00) { 1094 hdev->clock = le32_to_cpu(rp->clock); 1095 goto unlock; 1096 } 1097 1098 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1099 if (conn) { 1100 conn->clock = le32_to_cpu(rp->clock); 1101 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1102 } 1103 1104 unlock: 1105 hci_dev_unlock(hdev); 1106 return rp->status; 1107 } 1108 1109 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, 1110 struct sk_buff *skb) 1111 { 1112 struct hci_rp_read_local_amp_info *rp = data; 1113 1114 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1115 1116 if (rp->status) 1117 return rp->status; 1118 1119 hdev->amp_status = rp->amp_status; 1120 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 1121 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 1122 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 1123 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 1124 hdev->amp_type = rp->amp_type; 1125 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 1126 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 1127 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 1128 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 1129 1130 return rp->status; 1131 } 1132 1133 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1134 struct sk_buff *skb) 1135 { 1136 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1137 1138 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1139 1140 if (rp->status) 1141 return rp->status; 1142 1143 hdev->inq_tx_power = rp->tx_power; 1144 1145 return rp->status; 1146 } 1147 1148 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1149 struct sk_buff *skb) 1150 { 1151 struct hci_rp_read_def_err_data_reporting *rp = data; 1152 1153 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1154 1155 if (rp->status) 1156 return rp->status; 1157 1158 hdev->err_data_reporting = rp->err_data_reporting; 1159 1160 return rp->status; 1161 } 1162 1163 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1164 struct sk_buff *skb) 1165 { 1166 struct hci_ev_status *rp = data; 1167 struct hci_cp_write_def_err_data_reporting *cp; 1168 1169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1170 1171 if (rp->status) 1172 return rp->status; 1173 1174 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1175 if (!cp) 1176 return rp->status; 1177 1178 hdev->err_data_reporting = cp->err_data_reporting; 1179 1180 return rp->status; 1181 } 1182 1183 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1184 struct sk_buff *skb) 1185 { 1186 struct hci_rp_pin_code_reply *rp = data; 1187 struct hci_cp_pin_code_reply *cp; 1188 struct hci_conn *conn; 1189 1190 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1191 1192 hci_dev_lock(hdev); 1193 1194 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1195 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1196 1197 if (rp->status) 1198 goto unlock; 1199 1200 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1201 if (!cp) 1202 goto unlock; 1203 1204 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1205 if (conn) 1206 conn->pin_length = cp->pin_len; 1207 1208 unlock: 1209 hci_dev_unlock(hdev); 1210 return rp->status; 1211 } 1212 1213 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1214 struct sk_buff *skb) 1215 { 1216 struct hci_rp_pin_code_neg_reply *rp = data; 1217 1218 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1219 1220 hci_dev_lock(hdev); 1221 1222 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1223 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1224 rp->status); 1225 1226 hci_dev_unlock(hdev); 1227 1228 return rp->status; 1229 } 1230 1231 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1232 struct sk_buff *skb) 1233 { 1234 struct hci_rp_le_read_buffer_size *rp = data; 1235 1236 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1237 1238 if (rp->status) 1239 return rp->status; 1240 1241 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1242 hdev->le_pkts = rp->le_max_pkt; 1243 1244 hdev->le_cnt = hdev->le_pkts; 1245 1246 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1247 1248 return rp->status; 1249 } 1250 1251 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1252 struct sk_buff *skb) 1253 { 1254 struct hci_rp_le_read_local_features *rp = data; 1255 1256 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1257 1258 if (rp->status) 1259 return rp->status; 1260 1261 memcpy(hdev->le_features, rp->features, 8); 1262 1263 return rp->status; 1264 } 1265 1266 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1267 struct sk_buff *skb) 1268 { 1269 struct hci_rp_le_read_adv_tx_power *rp = data; 1270 1271 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1272 1273 if (rp->status) 1274 return rp->status; 1275 1276 hdev->adv_tx_power = rp->tx_power; 1277 1278 return rp->status; 1279 } 1280 1281 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1282 struct sk_buff *skb) 1283 { 1284 struct hci_rp_user_confirm_reply *rp = data; 1285 1286 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1287 1288 hci_dev_lock(hdev); 1289 1290 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1291 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1292 rp->status); 1293 1294 hci_dev_unlock(hdev); 1295 1296 return rp->status; 1297 } 1298 1299 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1300 struct sk_buff *skb) 1301 { 1302 struct hci_rp_user_confirm_reply *rp = data; 1303 1304 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1305 1306 hci_dev_lock(hdev); 1307 1308 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1309 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1310 ACL_LINK, 0, rp->status); 1311 1312 hci_dev_unlock(hdev); 1313 1314 return rp->status; 1315 } 1316 1317 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1318 struct sk_buff *skb) 1319 { 1320 struct hci_rp_user_confirm_reply *rp = data; 1321 1322 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1323 1324 hci_dev_lock(hdev); 1325 1326 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1327 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1328 0, rp->status); 1329 1330 hci_dev_unlock(hdev); 1331 1332 return rp->status; 1333 } 1334 1335 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1336 struct sk_buff *skb) 1337 { 1338 struct hci_rp_user_confirm_reply *rp = data; 1339 1340 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1341 1342 hci_dev_lock(hdev); 1343 1344 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1345 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1346 ACL_LINK, 0, rp->status); 1347 1348 hci_dev_unlock(hdev); 1349 1350 return rp->status; 1351 } 1352 1353 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1354 struct sk_buff *skb) 1355 { 1356 struct hci_rp_read_local_oob_data *rp = data; 1357 1358 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1359 1360 return rp->status; 1361 } 1362 1363 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1364 struct sk_buff *skb) 1365 { 1366 struct hci_rp_read_local_oob_ext_data *rp = data; 1367 1368 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1369 1370 return rp->status; 1371 } 1372 1373 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1374 struct sk_buff *skb) 1375 { 1376 struct hci_ev_status *rp = data; 1377 bdaddr_t *sent; 1378 1379 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1380 1381 if (rp->status) 1382 return rp->status; 1383 1384 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1385 if (!sent) 1386 return rp->status; 1387 1388 hci_dev_lock(hdev); 1389 1390 bacpy(&hdev->random_addr, sent); 1391 1392 if (!bacmp(&hdev->rpa, sent)) { 1393 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1394 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1395 secs_to_jiffies(hdev->rpa_timeout)); 1396 } 1397 1398 hci_dev_unlock(hdev); 1399 1400 return rp->status; 1401 } 1402 1403 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1404 struct sk_buff *skb) 1405 { 1406 struct hci_ev_status *rp = data; 1407 struct hci_cp_le_set_default_phy *cp; 1408 1409 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1410 1411 if (rp->status) 1412 return rp->status; 1413 1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1415 if (!cp) 1416 return rp->status; 1417 1418 hci_dev_lock(hdev); 1419 1420 hdev->le_tx_def_phys = cp->tx_phys; 1421 hdev->le_rx_def_phys = cp->rx_phys; 1422 1423 hci_dev_unlock(hdev); 1424 1425 return rp->status; 1426 } 1427 1428 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1429 struct sk_buff *skb) 1430 { 1431 struct hci_ev_status *rp = data; 1432 struct hci_cp_le_set_adv_set_rand_addr *cp; 1433 struct adv_info *adv; 1434 1435 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1436 1437 if (rp->status) 1438 return rp->status; 1439 1440 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1441 /* Update only in case the adv instance since handle 0x00 shall be using 1442 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1443 * non-extended adverting. 1444 */ 1445 if (!cp || !cp->handle) 1446 return rp->status; 1447 1448 hci_dev_lock(hdev); 1449 1450 adv = hci_find_adv_instance(hdev, cp->handle); 1451 if (adv) { 1452 bacpy(&adv->random_addr, &cp->bdaddr); 1453 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1454 adv->rpa_expired = false; 1455 queue_delayed_work(hdev->workqueue, 1456 &adv->rpa_expired_cb, 1457 secs_to_jiffies(hdev->rpa_timeout)); 1458 } 1459 } 1460 1461 hci_dev_unlock(hdev); 1462 1463 return rp->status; 1464 } 1465 1466 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1467 struct sk_buff *skb) 1468 { 1469 struct hci_ev_status *rp = data; 1470 u8 *instance; 1471 int err; 1472 1473 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1474 1475 if (rp->status) 1476 return rp->status; 1477 1478 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1479 if (!instance) 1480 return rp->status; 1481 1482 hci_dev_lock(hdev); 1483 1484 err = hci_remove_adv_instance(hdev, *instance); 1485 if (!err) 1486 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1487 *instance); 1488 1489 hci_dev_unlock(hdev); 1490 1491 return rp->status; 1492 } 1493 1494 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1495 struct sk_buff *skb) 1496 { 1497 struct hci_ev_status *rp = data; 1498 struct adv_info *adv, *n; 1499 int err; 1500 1501 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1502 1503 if (rp->status) 1504 return rp->status; 1505 1506 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1507 return rp->status; 1508 1509 hci_dev_lock(hdev); 1510 1511 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1512 u8 instance = adv->instance; 1513 1514 err = hci_remove_adv_instance(hdev, instance); 1515 if (!err) 1516 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1517 hdev, instance); 1518 } 1519 1520 hci_dev_unlock(hdev); 1521 1522 return rp->status; 1523 } 1524 1525 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1526 struct sk_buff *skb) 1527 { 1528 struct hci_rp_le_read_transmit_power *rp = data; 1529 1530 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1531 1532 if (rp->status) 1533 return rp->status; 1534 1535 hdev->min_le_tx_power = rp->min_le_tx_power; 1536 hdev->max_le_tx_power = rp->max_le_tx_power; 1537 1538 return rp->status; 1539 } 1540 1541 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1542 struct sk_buff *skb) 1543 { 1544 struct hci_ev_status *rp = data; 1545 struct hci_cp_le_set_privacy_mode *cp; 1546 struct hci_conn_params *params; 1547 1548 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1549 1550 if (rp->status) 1551 return rp->status; 1552 1553 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1554 if (!cp) 1555 return rp->status; 1556 1557 hci_dev_lock(hdev); 1558 1559 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1560 if (params) 1561 params->privacy_mode = cp->mode; 1562 1563 hci_dev_unlock(hdev); 1564 1565 return rp->status; 1566 } 1567 1568 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1569 struct sk_buff *skb) 1570 { 1571 struct hci_ev_status *rp = data; 1572 __u8 *sent; 1573 1574 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1575 1576 if (rp->status) 1577 return rp->status; 1578 1579 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1580 if (!sent) 1581 return rp->status; 1582 1583 hci_dev_lock(hdev); 1584 1585 /* If we're doing connection initiation as peripheral. Set a 1586 * timeout in case something goes wrong. 1587 */ 1588 if (*sent) { 1589 struct hci_conn *conn; 1590 1591 hci_dev_set_flag(hdev, HCI_LE_ADV); 1592 1593 conn = hci_lookup_le_connect(hdev); 1594 if (conn) 1595 queue_delayed_work(hdev->workqueue, 1596 &conn->le_conn_timeout, 1597 conn->conn_timeout); 1598 } else { 1599 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1600 } 1601 1602 hci_dev_unlock(hdev); 1603 1604 return rp->status; 1605 } 1606 1607 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1608 struct sk_buff *skb) 1609 { 1610 struct hci_cp_le_set_ext_adv_enable *cp; 1611 struct hci_cp_ext_adv_set *set; 1612 struct adv_info *adv = NULL, *n; 1613 struct hci_ev_status *rp = data; 1614 1615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1616 1617 if (rp->status) 1618 return rp->status; 1619 1620 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1621 if (!cp) 1622 return rp->status; 1623 1624 set = (void *)cp->data; 1625 1626 hci_dev_lock(hdev); 1627 1628 if (cp->num_of_sets) 1629 adv = hci_find_adv_instance(hdev, set->handle); 1630 1631 if (cp->enable) { 1632 struct hci_conn *conn; 1633 1634 hci_dev_set_flag(hdev, HCI_LE_ADV); 1635 1636 if (adv) 1637 adv->enabled = true; 1638 1639 conn = hci_lookup_le_connect(hdev); 1640 if (conn) 1641 queue_delayed_work(hdev->workqueue, 1642 &conn->le_conn_timeout, 1643 conn->conn_timeout); 1644 } else { 1645 if (cp->num_of_sets) { 1646 if (adv) 1647 adv->enabled = false; 1648 1649 /* If just one instance was disabled check if there are 1650 * any other instance enabled before clearing HCI_LE_ADV 1651 */ 1652 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1653 list) { 1654 if (adv->enabled) 1655 goto unlock; 1656 } 1657 } else { 1658 /* All instances shall be considered disabled */ 1659 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1660 list) 1661 adv->enabled = false; 1662 } 1663 1664 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1665 } 1666 1667 unlock: 1668 hci_dev_unlock(hdev); 1669 return rp->status; 1670 } 1671 1672 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1673 struct sk_buff *skb) 1674 { 1675 struct hci_cp_le_set_scan_param *cp; 1676 struct hci_ev_status *rp = data; 1677 1678 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1679 1680 if (rp->status) 1681 return rp->status; 1682 1683 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1684 if (!cp) 1685 return rp->status; 1686 1687 hci_dev_lock(hdev); 1688 1689 hdev->le_scan_type = cp->type; 1690 1691 hci_dev_unlock(hdev); 1692 1693 return rp->status; 1694 } 1695 1696 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1697 struct sk_buff *skb) 1698 { 1699 struct hci_cp_le_set_ext_scan_params *cp; 1700 struct hci_ev_status *rp = data; 1701 struct hci_cp_le_scan_phy_params *phy_param; 1702 1703 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1704 1705 if (rp->status) 1706 return rp->status; 1707 1708 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1709 if (!cp) 1710 return rp->status; 1711 1712 phy_param = (void *)cp->data; 1713 1714 hci_dev_lock(hdev); 1715 1716 hdev->le_scan_type = phy_param->type; 1717 1718 hci_dev_unlock(hdev); 1719 1720 return rp->status; 1721 } 1722 1723 static bool has_pending_adv_report(struct hci_dev *hdev) 1724 { 1725 struct discovery_state *d = &hdev->discovery; 1726 1727 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1728 } 1729 1730 static void clear_pending_adv_report(struct hci_dev *hdev) 1731 { 1732 struct discovery_state *d = &hdev->discovery; 1733 1734 bacpy(&d->last_adv_addr, BDADDR_ANY); 1735 d->last_adv_data_len = 0; 1736 } 1737 1738 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1739 u8 bdaddr_type, s8 rssi, u32 flags, 1740 u8 *data, u8 len) 1741 { 1742 struct discovery_state *d = &hdev->discovery; 1743 1744 if (len > HCI_MAX_AD_LENGTH) 1745 return; 1746 1747 bacpy(&d->last_adv_addr, bdaddr); 1748 d->last_adv_addr_type = bdaddr_type; 1749 d->last_adv_rssi = rssi; 1750 d->last_adv_flags = flags; 1751 memcpy(d->last_adv_data, data, len); 1752 d->last_adv_data_len = len; 1753 } 1754 1755 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1756 { 1757 hci_dev_lock(hdev); 1758 1759 switch (enable) { 1760 case LE_SCAN_ENABLE: 1761 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1762 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1763 clear_pending_adv_report(hdev); 1764 if (hci_dev_test_flag(hdev, HCI_MESH)) 1765 hci_discovery_set_state(hdev, DISCOVERY_FINDING); 1766 break; 1767 1768 case LE_SCAN_DISABLE: 1769 /* We do this here instead of when setting DISCOVERY_STOPPED 1770 * since the latter would potentially require waiting for 1771 * inquiry to stop too. 1772 */ 1773 if (has_pending_adv_report(hdev)) { 1774 struct discovery_state *d = &hdev->discovery; 1775 1776 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1777 d->last_adv_addr_type, NULL, 1778 d->last_adv_rssi, d->last_adv_flags, 1779 d->last_adv_data, 1780 d->last_adv_data_len, NULL, 0, 0); 1781 } 1782 1783 /* Cancel this timer so that we don't try to disable scanning 1784 * when it's already disabled. 1785 */ 1786 cancel_delayed_work(&hdev->le_scan_disable); 1787 1788 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1789 1790 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1791 * interrupted scanning due to a connect request. Mark 1792 * therefore discovery as stopped. 1793 */ 1794 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1795 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1796 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && 1797 hdev->discovery.state == DISCOVERY_FINDING) 1798 queue_work(hdev->workqueue, &hdev->reenable_adv_work); 1799 1800 break; 1801 1802 default: 1803 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1804 enable); 1805 break; 1806 } 1807 1808 hci_dev_unlock(hdev); 1809 } 1810 1811 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1812 struct sk_buff *skb) 1813 { 1814 struct hci_cp_le_set_scan_enable *cp; 1815 struct hci_ev_status *rp = data; 1816 1817 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1818 1819 if (rp->status) 1820 return rp->status; 1821 1822 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1823 if (!cp) 1824 return rp->status; 1825 1826 le_set_scan_enable_complete(hdev, cp->enable); 1827 1828 return rp->status; 1829 } 1830 1831 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1832 struct sk_buff *skb) 1833 { 1834 struct hci_cp_le_set_ext_scan_enable *cp; 1835 struct hci_ev_status *rp = data; 1836 1837 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1838 1839 if (rp->status) 1840 return rp->status; 1841 1842 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1843 if (!cp) 1844 return rp->status; 1845 1846 le_set_scan_enable_complete(hdev, cp->enable); 1847 1848 return rp->status; 1849 } 1850 1851 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1852 struct sk_buff *skb) 1853 { 1854 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1855 1856 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1857 rp->num_of_sets); 1858 1859 if (rp->status) 1860 return rp->status; 1861 1862 hdev->le_num_of_adv_sets = rp->num_of_sets; 1863 1864 return rp->status; 1865 } 1866 1867 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1868 struct sk_buff *skb) 1869 { 1870 struct hci_rp_le_read_accept_list_size *rp = data; 1871 1872 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1873 1874 if (rp->status) 1875 return rp->status; 1876 1877 hdev->le_accept_list_size = rp->size; 1878 1879 return rp->status; 1880 } 1881 1882 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1883 struct sk_buff *skb) 1884 { 1885 struct hci_ev_status *rp = data; 1886 1887 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1888 1889 if (rp->status) 1890 return rp->status; 1891 1892 hci_dev_lock(hdev); 1893 hci_bdaddr_list_clear(&hdev->le_accept_list); 1894 hci_dev_unlock(hdev); 1895 1896 return rp->status; 1897 } 1898 1899 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1900 struct sk_buff *skb) 1901 { 1902 struct hci_cp_le_add_to_accept_list *sent; 1903 struct hci_ev_status *rp = data; 1904 1905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1906 1907 if (rp->status) 1908 return rp->status; 1909 1910 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1911 if (!sent) 1912 return rp->status; 1913 1914 hci_dev_lock(hdev); 1915 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1916 sent->bdaddr_type); 1917 hci_dev_unlock(hdev); 1918 1919 return rp->status; 1920 } 1921 1922 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1923 struct sk_buff *skb) 1924 { 1925 struct hci_cp_le_del_from_accept_list *sent; 1926 struct hci_ev_status *rp = data; 1927 1928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1929 1930 if (rp->status) 1931 return rp->status; 1932 1933 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1934 if (!sent) 1935 return rp->status; 1936 1937 hci_dev_lock(hdev); 1938 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1939 sent->bdaddr_type); 1940 hci_dev_unlock(hdev); 1941 1942 return rp->status; 1943 } 1944 1945 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1946 struct sk_buff *skb) 1947 { 1948 struct hci_rp_le_read_supported_states *rp = data; 1949 1950 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1951 1952 if (rp->status) 1953 return rp->status; 1954 1955 memcpy(hdev->le_states, rp->le_states, 8); 1956 1957 return rp->status; 1958 } 1959 1960 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1961 struct sk_buff *skb) 1962 { 1963 struct hci_rp_le_read_def_data_len *rp = data; 1964 1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1966 1967 if (rp->status) 1968 return rp->status; 1969 1970 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1971 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1972 1973 return rp->status; 1974 } 1975 1976 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1977 struct sk_buff *skb) 1978 { 1979 struct hci_cp_le_write_def_data_len *sent; 1980 struct hci_ev_status *rp = data; 1981 1982 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1983 1984 if (rp->status) 1985 return rp->status; 1986 1987 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1988 if (!sent) 1989 return rp->status; 1990 1991 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1992 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1993 1994 return rp->status; 1995 } 1996 1997 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1998 struct sk_buff *skb) 1999 { 2000 struct hci_cp_le_add_to_resolv_list *sent; 2001 struct hci_ev_status *rp = data; 2002 2003 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2004 2005 if (rp->status) 2006 return rp->status; 2007 2008 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 2009 if (!sent) 2010 return rp->status; 2011 2012 hci_dev_lock(hdev); 2013 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 2014 sent->bdaddr_type, sent->peer_irk, 2015 sent->local_irk); 2016 hci_dev_unlock(hdev); 2017 2018 return rp->status; 2019 } 2020 2021 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 2022 struct sk_buff *skb) 2023 { 2024 struct hci_cp_le_del_from_resolv_list *sent; 2025 struct hci_ev_status *rp = data; 2026 2027 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2028 2029 if (rp->status) 2030 return rp->status; 2031 2032 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 2033 if (!sent) 2034 return rp->status; 2035 2036 hci_dev_lock(hdev); 2037 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 2038 sent->bdaddr_type); 2039 hci_dev_unlock(hdev); 2040 2041 return rp->status; 2042 } 2043 2044 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 2045 struct sk_buff *skb) 2046 { 2047 struct hci_ev_status *rp = data; 2048 2049 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2050 2051 if (rp->status) 2052 return rp->status; 2053 2054 hci_dev_lock(hdev); 2055 hci_bdaddr_list_clear(&hdev->le_resolv_list); 2056 hci_dev_unlock(hdev); 2057 2058 return rp->status; 2059 } 2060 2061 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 2062 struct sk_buff *skb) 2063 { 2064 struct hci_rp_le_read_resolv_list_size *rp = data; 2065 2066 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2067 2068 if (rp->status) 2069 return rp->status; 2070 2071 hdev->le_resolv_list_size = rp->size; 2072 2073 return rp->status; 2074 } 2075 2076 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2077 struct sk_buff *skb) 2078 { 2079 struct hci_ev_status *rp = data; 2080 __u8 *sent; 2081 2082 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2083 2084 if (rp->status) 2085 return rp->status; 2086 2087 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2088 if (!sent) 2089 return rp->status; 2090 2091 hci_dev_lock(hdev); 2092 2093 if (*sent) 2094 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2095 else 2096 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2097 2098 hci_dev_unlock(hdev); 2099 2100 return rp->status; 2101 } 2102 2103 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2104 struct sk_buff *skb) 2105 { 2106 struct hci_rp_le_read_max_data_len *rp = data; 2107 2108 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2109 2110 if (rp->status) 2111 return rp->status; 2112 2113 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2114 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2115 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2116 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2117 2118 return rp->status; 2119 } 2120 2121 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2122 struct sk_buff *skb) 2123 { 2124 struct hci_cp_write_le_host_supported *sent; 2125 struct hci_ev_status *rp = data; 2126 2127 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2128 2129 if (rp->status) 2130 return rp->status; 2131 2132 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2133 if (!sent) 2134 return rp->status; 2135 2136 hci_dev_lock(hdev); 2137 2138 if (sent->le) { 2139 hdev->features[1][0] |= LMP_HOST_LE; 2140 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2141 } else { 2142 hdev->features[1][0] &= ~LMP_HOST_LE; 2143 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2144 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2145 } 2146 2147 if (sent->simul) 2148 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2149 else 2150 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2151 2152 hci_dev_unlock(hdev); 2153 2154 return rp->status; 2155 } 2156 2157 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2158 struct sk_buff *skb) 2159 { 2160 struct hci_cp_le_set_adv_param *cp; 2161 struct hci_ev_status *rp = data; 2162 2163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2164 2165 if (rp->status) 2166 return rp->status; 2167 2168 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2169 if (!cp) 2170 return rp->status; 2171 2172 hci_dev_lock(hdev); 2173 hdev->adv_addr_type = cp->own_address_type; 2174 hci_dev_unlock(hdev); 2175 2176 return rp->status; 2177 } 2178 2179 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2180 struct sk_buff *skb) 2181 { 2182 struct hci_rp_le_set_ext_adv_params *rp = data; 2183 struct hci_cp_le_set_ext_adv_params *cp; 2184 struct adv_info *adv_instance; 2185 2186 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2187 2188 if (rp->status) 2189 return rp->status; 2190 2191 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2192 if (!cp) 2193 return rp->status; 2194 2195 hci_dev_lock(hdev); 2196 hdev->adv_addr_type = cp->own_addr_type; 2197 if (!cp->handle) { 2198 /* Store in hdev for instance 0 */ 2199 hdev->adv_tx_power = rp->tx_power; 2200 } else { 2201 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2202 if (adv_instance) 2203 adv_instance->tx_power = rp->tx_power; 2204 } 2205 /* Update adv data as tx power is known now */ 2206 hci_update_adv_data(hdev, cp->handle); 2207 2208 hci_dev_unlock(hdev); 2209 2210 return rp->status; 2211 } 2212 2213 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2214 struct sk_buff *skb) 2215 { 2216 struct hci_rp_read_rssi *rp = data; 2217 struct hci_conn *conn; 2218 2219 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2220 2221 if (rp->status) 2222 return rp->status; 2223 2224 hci_dev_lock(hdev); 2225 2226 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2227 if (conn) 2228 conn->rssi = rp->rssi; 2229 2230 hci_dev_unlock(hdev); 2231 2232 return rp->status; 2233 } 2234 2235 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2236 struct sk_buff *skb) 2237 { 2238 struct hci_cp_read_tx_power *sent; 2239 struct hci_rp_read_tx_power *rp = data; 2240 struct hci_conn *conn; 2241 2242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2243 2244 if (rp->status) 2245 return rp->status; 2246 2247 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2248 if (!sent) 2249 return rp->status; 2250 2251 hci_dev_lock(hdev); 2252 2253 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2254 if (!conn) 2255 goto unlock; 2256 2257 switch (sent->type) { 2258 case 0x00: 2259 conn->tx_power = rp->tx_power; 2260 break; 2261 case 0x01: 2262 conn->max_tx_power = rp->tx_power; 2263 break; 2264 } 2265 2266 unlock: 2267 hci_dev_unlock(hdev); 2268 return rp->status; 2269 } 2270 2271 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2272 struct sk_buff *skb) 2273 { 2274 struct hci_ev_status *rp = data; 2275 u8 *mode; 2276 2277 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2278 2279 if (rp->status) 2280 return rp->status; 2281 2282 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2283 if (mode) 2284 hdev->ssp_debug_mode = *mode; 2285 2286 return rp->status; 2287 } 2288 2289 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2290 { 2291 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2292 2293 if (status) { 2294 hci_conn_check_pending(hdev); 2295 return; 2296 } 2297 2298 set_bit(HCI_INQUIRY, &hdev->flags); 2299 } 2300 2301 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2302 { 2303 struct hci_cp_create_conn *cp; 2304 struct hci_conn *conn; 2305 2306 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2307 2308 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2309 if (!cp) 2310 return; 2311 2312 hci_dev_lock(hdev); 2313 2314 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2315 2316 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2317 2318 if (status) { 2319 if (conn && conn->state == BT_CONNECT) { 2320 if (status != 0x0c || conn->attempt > 2) { 2321 conn->state = BT_CLOSED; 2322 hci_connect_cfm(conn, status); 2323 hci_conn_del(conn); 2324 } else 2325 conn->state = BT_CONNECT2; 2326 } 2327 } else { 2328 if (!conn) { 2329 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 2330 HCI_ROLE_MASTER); 2331 if (!conn) 2332 bt_dev_err(hdev, "no memory for new connection"); 2333 } 2334 } 2335 2336 hci_dev_unlock(hdev); 2337 } 2338 2339 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2340 { 2341 struct hci_cp_add_sco *cp; 2342 struct hci_conn *acl, *sco; 2343 __u16 handle; 2344 2345 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2346 2347 if (!status) 2348 return; 2349 2350 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2351 if (!cp) 2352 return; 2353 2354 handle = __le16_to_cpu(cp->handle); 2355 2356 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2357 2358 hci_dev_lock(hdev); 2359 2360 acl = hci_conn_hash_lookup_handle(hdev, handle); 2361 if (acl) { 2362 sco = acl->link; 2363 if (sco) { 2364 sco->state = BT_CLOSED; 2365 2366 hci_connect_cfm(sco, status); 2367 hci_conn_del(sco); 2368 } 2369 } 2370 2371 hci_dev_unlock(hdev); 2372 } 2373 2374 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2375 { 2376 struct hci_cp_auth_requested *cp; 2377 struct hci_conn *conn; 2378 2379 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2380 2381 if (!status) 2382 return; 2383 2384 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2385 if (!cp) 2386 return; 2387 2388 hci_dev_lock(hdev); 2389 2390 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2391 if (conn) { 2392 if (conn->state == BT_CONFIG) { 2393 hci_connect_cfm(conn, status); 2394 hci_conn_drop(conn); 2395 } 2396 } 2397 2398 hci_dev_unlock(hdev); 2399 } 2400 2401 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2402 { 2403 struct hci_cp_set_conn_encrypt *cp; 2404 struct hci_conn *conn; 2405 2406 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2407 2408 if (!status) 2409 return; 2410 2411 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2412 if (!cp) 2413 return; 2414 2415 hci_dev_lock(hdev); 2416 2417 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2418 if (conn) { 2419 if (conn->state == BT_CONFIG) { 2420 hci_connect_cfm(conn, status); 2421 hci_conn_drop(conn); 2422 } 2423 } 2424 2425 hci_dev_unlock(hdev); 2426 } 2427 2428 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2429 struct hci_conn *conn) 2430 { 2431 if (conn->state != BT_CONFIG || !conn->out) 2432 return 0; 2433 2434 if (conn->pending_sec_level == BT_SECURITY_SDP) 2435 return 0; 2436 2437 /* Only request authentication for SSP connections or non-SSP 2438 * devices with sec_level MEDIUM or HIGH or if MITM protection 2439 * is requested. 2440 */ 2441 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2442 conn->pending_sec_level != BT_SECURITY_FIPS && 2443 conn->pending_sec_level != BT_SECURITY_HIGH && 2444 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2445 return 0; 2446 2447 return 1; 2448 } 2449 2450 static int hci_resolve_name(struct hci_dev *hdev, 2451 struct inquiry_entry *e) 2452 { 2453 struct hci_cp_remote_name_req cp; 2454 2455 memset(&cp, 0, sizeof(cp)); 2456 2457 bacpy(&cp.bdaddr, &e->data.bdaddr); 2458 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2459 cp.pscan_mode = e->data.pscan_mode; 2460 cp.clock_offset = e->data.clock_offset; 2461 2462 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2463 } 2464 2465 static bool hci_resolve_next_name(struct hci_dev *hdev) 2466 { 2467 struct discovery_state *discov = &hdev->discovery; 2468 struct inquiry_entry *e; 2469 2470 if (list_empty(&discov->resolve)) 2471 return false; 2472 2473 /* We should stop if we already spent too much time resolving names. */ 2474 if (time_after(jiffies, discov->name_resolve_timeout)) { 2475 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2476 return false; 2477 } 2478 2479 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2480 if (!e) 2481 return false; 2482 2483 if (hci_resolve_name(hdev, e) == 0) { 2484 e->name_state = NAME_PENDING; 2485 return true; 2486 } 2487 2488 return false; 2489 } 2490 2491 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2492 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2493 { 2494 struct discovery_state *discov = &hdev->discovery; 2495 struct inquiry_entry *e; 2496 2497 /* Update the mgmt connected state if necessary. Be careful with 2498 * conn objects that exist but are not (yet) connected however. 2499 * Only those in BT_CONFIG or BT_CONNECTED states can be 2500 * considered connected. 2501 */ 2502 if (conn && 2503 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2504 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2505 mgmt_device_connected(hdev, conn, name, name_len); 2506 2507 if (discov->state == DISCOVERY_STOPPED) 2508 return; 2509 2510 if (discov->state == DISCOVERY_STOPPING) 2511 goto discov_complete; 2512 2513 if (discov->state != DISCOVERY_RESOLVING) 2514 return; 2515 2516 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2517 /* If the device was not found in a list of found devices names of which 2518 * are pending. there is no need to continue resolving a next name as it 2519 * will be done upon receiving another Remote Name Request Complete 2520 * Event */ 2521 if (!e) 2522 return; 2523 2524 list_del(&e->list); 2525 2526 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2527 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2528 name, name_len); 2529 2530 if (hci_resolve_next_name(hdev)) 2531 return; 2532 2533 discov_complete: 2534 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2535 } 2536 2537 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2538 { 2539 struct hci_cp_remote_name_req *cp; 2540 struct hci_conn *conn; 2541 2542 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2543 2544 /* If successful wait for the name req complete event before 2545 * checking for the need to do authentication */ 2546 if (!status) 2547 return; 2548 2549 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2550 if (!cp) 2551 return; 2552 2553 hci_dev_lock(hdev); 2554 2555 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2556 2557 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2558 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2559 2560 if (!conn) 2561 goto unlock; 2562 2563 if (!hci_outgoing_auth_needed(hdev, conn)) 2564 goto unlock; 2565 2566 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2567 struct hci_cp_auth_requested auth_cp; 2568 2569 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2570 2571 auth_cp.handle = __cpu_to_le16(conn->handle); 2572 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2573 sizeof(auth_cp), &auth_cp); 2574 } 2575 2576 unlock: 2577 hci_dev_unlock(hdev); 2578 } 2579 2580 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2581 { 2582 struct hci_cp_read_remote_features *cp; 2583 struct hci_conn *conn; 2584 2585 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2586 2587 if (!status) 2588 return; 2589 2590 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2591 if (!cp) 2592 return; 2593 2594 hci_dev_lock(hdev); 2595 2596 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2597 if (conn) { 2598 if (conn->state == BT_CONFIG) { 2599 hci_connect_cfm(conn, status); 2600 hci_conn_drop(conn); 2601 } 2602 } 2603 2604 hci_dev_unlock(hdev); 2605 } 2606 2607 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2608 { 2609 struct hci_cp_read_remote_ext_features *cp; 2610 struct hci_conn *conn; 2611 2612 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2613 2614 if (!status) 2615 return; 2616 2617 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2618 if (!cp) 2619 return; 2620 2621 hci_dev_lock(hdev); 2622 2623 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2624 if (conn) { 2625 if (conn->state == BT_CONFIG) { 2626 hci_connect_cfm(conn, status); 2627 hci_conn_drop(conn); 2628 } 2629 } 2630 2631 hci_dev_unlock(hdev); 2632 } 2633 2634 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2635 { 2636 struct hci_cp_setup_sync_conn *cp; 2637 struct hci_conn *acl, *sco; 2638 __u16 handle; 2639 2640 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2641 2642 if (!status) 2643 return; 2644 2645 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2646 if (!cp) 2647 return; 2648 2649 handle = __le16_to_cpu(cp->handle); 2650 2651 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2652 2653 hci_dev_lock(hdev); 2654 2655 acl = hci_conn_hash_lookup_handle(hdev, handle); 2656 if (acl) { 2657 sco = acl->link; 2658 if (sco) { 2659 sco->state = BT_CLOSED; 2660 2661 hci_connect_cfm(sco, status); 2662 hci_conn_del(sco); 2663 } 2664 } 2665 2666 hci_dev_unlock(hdev); 2667 } 2668 2669 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2670 { 2671 struct hci_cp_enhanced_setup_sync_conn *cp; 2672 struct hci_conn *acl, *sco; 2673 __u16 handle; 2674 2675 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2676 2677 if (!status) 2678 return; 2679 2680 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2681 if (!cp) 2682 return; 2683 2684 handle = __le16_to_cpu(cp->handle); 2685 2686 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2687 2688 hci_dev_lock(hdev); 2689 2690 acl = hci_conn_hash_lookup_handle(hdev, handle); 2691 if (acl) { 2692 sco = acl->link; 2693 if (sco) { 2694 sco->state = BT_CLOSED; 2695 2696 hci_connect_cfm(sco, status); 2697 hci_conn_del(sco); 2698 } 2699 } 2700 2701 hci_dev_unlock(hdev); 2702 } 2703 2704 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2705 { 2706 struct hci_cp_sniff_mode *cp; 2707 struct hci_conn *conn; 2708 2709 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2710 2711 if (!status) 2712 return; 2713 2714 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2715 if (!cp) 2716 return; 2717 2718 hci_dev_lock(hdev); 2719 2720 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2721 if (conn) { 2722 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2723 2724 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2725 hci_sco_setup(conn, status); 2726 } 2727 2728 hci_dev_unlock(hdev); 2729 } 2730 2731 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2732 { 2733 struct hci_cp_exit_sniff_mode *cp; 2734 struct hci_conn *conn; 2735 2736 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2737 2738 if (!status) 2739 return; 2740 2741 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2742 if (!cp) 2743 return; 2744 2745 hci_dev_lock(hdev); 2746 2747 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2748 if (conn) { 2749 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2750 2751 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2752 hci_sco_setup(conn, status); 2753 } 2754 2755 hci_dev_unlock(hdev); 2756 } 2757 2758 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2759 { 2760 struct hci_cp_disconnect *cp; 2761 struct hci_conn_params *params; 2762 struct hci_conn *conn; 2763 bool mgmt_conn; 2764 2765 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2766 2767 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2768 * otherwise cleanup the connection immediately. 2769 */ 2770 if (!status && !hdev->suspended) 2771 return; 2772 2773 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2774 if (!cp) 2775 return; 2776 2777 hci_dev_lock(hdev); 2778 2779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2780 if (!conn) 2781 goto unlock; 2782 2783 if (status) { 2784 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2785 conn->dst_type, status); 2786 2787 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2788 hdev->cur_adv_instance = conn->adv_instance; 2789 hci_enable_advertising(hdev); 2790 } 2791 2792 goto done; 2793 } 2794 2795 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2796 2797 if (conn->type == ACL_LINK) { 2798 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2799 hci_remove_link_key(hdev, &conn->dst); 2800 } 2801 2802 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2803 if (params) { 2804 switch (params->auto_connect) { 2805 case HCI_AUTO_CONN_LINK_LOSS: 2806 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2807 break; 2808 fallthrough; 2809 2810 case HCI_AUTO_CONN_DIRECT: 2811 case HCI_AUTO_CONN_ALWAYS: 2812 list_del_init(¶ms->action); 2813 list_add(¶ms->action, &hdev->pend_le_conns); 2814 break; 2815 2816 default: 2817 break; 2818 } 2819 } 2820 2821 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2822 cp->reason, mgmt_conn); 2823 2824 hci_disconn_cfm(conn, cp->reason); 2825 2826 done: 2827 /* If the disconnection failed for any reason, the upper layer 2828 * does not retry to disconnect in current implementation. 2829 * Hence, we need to do some basic cleanup here and re-enable 2830 * advertising if necessary. 2831 */ 2832 hci_conn_del(conn); 2833 unlock: 2834 hci_dev_unlock(hdev); 2835 } 2836 2837 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2838 { 2839 /* When using controller based address resolution, then the new 2840 * address types 0x02 and 0x03 are used. These types need to be 2841 * converted back into either public address or random address type 2842 */ 2843 switch (type) { 2844 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2845 if (resolved) 2846 *resolved = true; 2847 return ADDR_LE_DEV_PUBLIC; 2848 case ADDR_LE_DEV_RANDOM_RESOLVED: 2849 if (resolved) 2850 *resolved = true; 2851 return ADDR_LE_DEV_RANDOM; 2852 } 2853 2854 if (resolved) 2855 *resolved = false; 2856 return type; 2857 } 2858 2859 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2860 u8 peer_addr_type, u8 own_address_type, 2861 u8 filter_policy) 2862 { 2863 struct hci_conn *conn; 2864 2865 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2866 peer_addr_type); 2867 if (!conn) 2868 return; 2869 2870 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2871 2872 /* Store the initiator and responder address information which 2873 * is needed for SMP. These values will not change during the 2874 * lifetime of the connection. 2875 */ 2876 conn->init_addr_type = own_address_type; 2877 if (own_address_type == ADDR_LE_DEV_RANDOM) 2878 bacpy(&conn->init_addr, &hdev->random_addr); 2879 else 2880 bacpy(&conn->init_addr, &hdev->bdaddr); 2881 2882 conn->resp_addr_type = peer_addr_type; 2883 bacpy(&conn->resp_addr, peer_addr); 2884 } 2885 2886 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2887 { 2888 struct hci_cp_le_create_conn *cp; 2889 2890 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2891 2892 /* All connection failure handling is taken care of by the 2893 * hci_conn_failed function which is triggered by the HCI 2894 * request completion callbacks used for connecting. 2895 */ 2896 if (status) 2897 return; 2898 2899 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2900 if (!cp) 2901 return; 2902 2903 hci_dev_lock(hdev); 2904 2905 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2906 cp->own_address_type, cp->filter_policy); 2907 2908 hci_dev_unlock(hdev); 2909 } 2910 2911 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2912 { 2913 struct hci_cp_le_ext_create_conn *cp; 2914 2915 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2916 2917 /* All connection failure handling is taken care of by the 2918 * hci_conn_failed function which is triggered by the HCI 2919 * request completion callbacks used for connecting. 2920 */ 2921 if (status) 2922 return; 2923 2924 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2925 if (!cp) 2926 return; 2927 2928 hci_dev_lock(hdev); 2929 2930 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2931 cp->own_addr_type, cp->filter_policy); 2932 2933 hci_dev_unlock(hdev); 2934 } 2935 2936 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2937 { 2938 struct hci_cp_le_read_remote_features *cp; 2939 struct hci_conn *conn; 2940 2941 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2942 2943 if (!status) 2944 return; 2945 2946 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2947 if (!cp) 2948 return; 2949 2950 hci_dev_lock(hdev); 2951 2952 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2953 if (conn) { 2954 if (conn->state == BT_CONFIG) { 2955 hci_connect_cfm(conn, status); 2956 hci_conn_drop(conn); 2957 } 2958 } 2959 2960 hci_dev_unlock(hdev); 2961 } 2962 2963 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2964 { 2965 struct hci_cp_le_start_enc *cp; 2966 struct hci_conn *conn; 2967 2968 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2969 2970 if (!status) 2971 return; 2972 2973 hci_dev_lock(hdev); 2974 2975 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2976 if (!cp) 2977 goto unlock; 2978 2979 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2980 if (!conn) 2981 goto unlock; 2982 2983 if (conn->state != BT_CONNECTED) 2984 goto unlock; 2985 2986 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2987 hci_conn_drop(conn); 2988 2989 unlock: 2990 hci_dev_unlock(hdev); 2991 } 2992 2993 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2994 { 2995 struct hci_cp_switch_role *cp; 2996 struct hci_conn *conn; 2997 2998 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2999 3000 if (!status) 3001 return; 3002 3003 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 3004 if (!cp) 3005 return; 3006 3007 hci_dev_lock(hdev); 3008 3009 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 3010 if (conn) 3011 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 3012 3013 hci_dev_unlock(hdev); 3014 } 3015 3016 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 3017 struct sk_buff *skb) 3018 { 3019 struct hci_ev_status *ev = data; 3020 struct discovery_state *discov = &hdev->discovery; 3021 struct inquiry_entry *e; 3022 3023 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3024 3025 hci_conn_check_pending(hdev); 3026 3027 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 3028 return; 3029 3030 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 3031 wake_up_bit(&hdev->flags, HCI_INQUIRY); 3032 3033 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3034 return; 3035 3036 hci_dev_lock(hdev); 3037 3038 if (discov->state != DISCOVERY_FINDING) 3039 goto unlock; 3040 3041 if (list_empty(&discov->resolve)) { 3042 /* When BR/EDR inquiry is active and no LE scanning is in 3043 * progress, then change discovery state to indicate completion. 3044 * 3045 * When running LE scanning and BR/EDR inquiry simultaneously 3046 * and the LE scan already finished, then change the discovery 3047 * state to indicate completion. 3048 */ 3049 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3050 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3051 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3052 goto unlock; 3053 } 3054 3055 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3056 if (e && hci_resolve_name(hdev, e) == 0) { 3057 e->name_state = NAME_PENDING; 3058 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3059 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3060 } else { 3061 /* When BR/EDR inquiry is active and no LE scanning is in 3062 * progress, then change discovery state to indicate completion. 3063 * 3064 * When running LE scanning and BR/EDR inquiry simultaneously 3065 * and the LE scan already finished, then change the discovery 3066 * state to indicate completion. 3067 */ 3068 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3069 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3070 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3071 } 3072 3073 unlock: 3074 hci_dev_unlock(hdev); 3075 } 3076 3077 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3078 struct sk_buff *skb) 3079 { 3080 struct hci_ev_inquiry_result *ev = edata; 3081 struct inquiry_data data; 3082 int i; 3083 3084 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3085 flex_array_size(ev, info, ev->num))) 3086 return; 3087 3088 bt_dev_dbg(hdev, "num %d", ev->num); 3089 3090 if (!ev->num) 3091 return; 3092 3093 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3094 return; 3095 3096 hci_dev_lock(hdev); 3097 3098 for (i = 0; i < ev->num; i++) { 3099 struct inquiry_info *info = &ev->info[i]; 3100 u32 flags; 3101 3102 bacpy(&data.bdaddr, &info->bdaddr); 3103 data.pscan_rep_mode = info->pscan_rep_mode; 3104 data.pscan_period_mode = info->pscan_period_mode; 3105 data.pscan_mode = info->pscan_mode; 3106 memcpy(data.dev_class, info->dev_class, 3); 3107 data.clock_offset = info->clock_offset; 3108 data.rssi = HCI_RSSI_INVALID; 3109 data.ssp_mode = 0x00; 3110 3111 flags = hci_inquiry_cache_update(hdev, &data, false); 3112 3113 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3114 info->dev_class, HCI_RSSI_INVALID, 3115 flags, NULL, 0, NULL, 0, 0); 3116 } 3117 3118 hci_dev_unlock(hdev); 3119 } 3120 3121 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3122 struct sk_buff *skb) 3123 { 3124 struct hci_ev_conn_complete *ev = data; 3125 struct hci_conn *conn; 3126 u8 status = ev->status; 3127 3128 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3129 3130 hci_dev_lock(hdev); 3131 3132 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3133 if (!conn) { 3134 /* In case of error status and there is no connection pending 3135 * just unlock as there is nothing to cleanup. 3136 */ 3137 if (ev->status) 3138 goto unlock; 3139 3140 /* Connection may not exist if auto-connected. Check the bredr 3141 * allowlist to see if this device is allowed to auto connect. 3142 * If link is an ACL type, create a connection class 3143 * automatically. 3144 * 3145 * Auto-connect will only occur if the event filter is 3146 * programmed with a given address. Right now, event filter is 3147 * only used during suspend. 3148 */ 3149 if (ev->link_type == ACL_LINK && 3150 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3151 &ev->bdaddr, 3152 BDADDR_BREDR)) { 3153 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3154 HCI_ROLE_SLAVE); 3155 if (!conn) { 3156 bt_dev_err(hdev, "no memory for new conn"); 3157 goto unlock; 3158 } 3159 } else { 3160 if (ev->link_type != SCO_LINK) 3161 goto unlock; 3162 3163 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3164 &ev->bdaddr); 3165 if (!conn) 3166 goto unlock; 3167 3168 conn->type = SCO_LINK; 3169 } 3170 } 3171 3172 /* The HCI_Connection_Complete event is only sent once per connection. 3173 * Processing it more than once per connection can corrupt kernel memory. 3174 * 3175 * As the connection handle is set here for the first time, it indicates 3176 * whether the connection is already set up. 3177 */ 3178 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 3179 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3180 goto unlock; 3181 } 3182 3183 if (!status) { 3184 conn->handle = __le16_to_cpu(ev->handle); 3185 if (conn->handle > HCI_CONN_HANDLE_MAX) { 3186 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 3187 conn->handle, HCI_CONN_HANDLE_MAX); 3188 status = HCI_ERROR_INVALID_PARAMETERS; 3189 goto done; 3190 } 3191 3192 if (conn->type == ACL_LINK) { 3193 conn->state = BT_CONFIG; 3194 hci_conn_hold(conn); 3195 3196 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3197 !hci_find_link_key(hdev, &ev->bdaddr)) 3198 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3199 else 3200 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3201 } else 3202 conn->state = BT_CONNECTED; 3203 3204 hci_debugfs_create_conn(conn); 3205 hci_conn_add_sysfs(conn); 3206 3207 if (test_bit(HCI_AUTH, &hdev->flags)) 3208 set_bit(HCI_CONN_AUTH, &conn->flags); 3209 3210 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3211 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3212 3213 /* Get remote features */ 3214 if (conn->type == ACL_LINK) { 3215 struct hci_cp_read_remote_features cp; 3216 cp.handle = ev->handle; 3217 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3218 sizeof(cp), &cp); 3219 3220 hci_update_scan(hdev); 3221 } 3222 3223 /* Set packet type for incoming connection */ 3224 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3225 struct hci_cp_change_conn_ptype cp; 3226 cp.handle = ev->handle; 3227 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3228 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3229 &cp); 3230 } 3231 } 3232 3233 if (conn->type == ACL_LINK) 3234 hci_sco_setup(conn, ev->status); 3235 3236 done: 3237 if (status) { 3238 hci_conn_failed(conn, status); 3239 } else if (ev->link_type == SCO_LINK) { 3240 switch (conn->setting & SCO_AIRMODE_MASK) { 3241 case SCO_AIRMODE_CVSD: 3242 if (hdev->notify) 3243 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3244 break; 3245 } 3246 3247 hci_connect_cfm(conn, status); 3248 } 3249 3250 unlock: 3251 hci_dev_unlock(hdev); 3252 3253 hci_conn_check_pending(hdev); 3254 } 3255 3256 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3257 { 3258 struct hci_cp_reject_conn_req cp; 3259 3260 bacpy(&cp.bdaddr, bdaddr); 3261 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3262 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3263 } 3264 3265 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3266 struct sk_buff *skb) 3267 { 3268 struct hci_ev_conn_request *ev = data; 3269 int mask = hdev->link_mode; 3270 struct inquiry_entry *ie; 3271 struct hci_conn *conn; 3272 __u8 flags = 0; 3273 3274 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3275 3276 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3277 &flags); 3278 3279 if (!(mask & HCI_LM_ACCEPT)) { 3280 hci_reject_conn(hdev, &ev->bdaddr); 3281 return; 3282 } 3283 3284 hci_dev_lock(hdev); 3285 3286 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3287 BDADDR_BREDR)) { 3288 hci_reject_conn(hdev, &ev->bdaddr); 3289 goto unlock; 3290 } 3291 3292 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3293 * connection. These features are only touched through mgmt so 3294 * only do the checks if HCI_MGMT is set. 3295 */ 3296 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3297 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3298 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3299 BDADDR_BREDR)) { 3300 hci_reject_conn(hdev, &ev->bdaddr); 3301 goto unlock; 3302 } 3303 3304 /* Connection accepted */ 3305 3306 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3307 if (ie) 3308 memcpy(ie->data.dev_class, ev->dev_class, 3); 3309 3310 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3311 &ev->bdaddr); 3312 if (!conn) { 3313 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3314 HCI_ROLE_SLAVE); 3315 if (!conn) { 3316 bt_dev_err(hdev, "no memory for new connection"); 3317 goto unlock; 3318 } 3319 } 3320 3321 memcpy(conn->dev_class, ev->dev_class, 3); 3322 3323 hci_dev_unlock(hdev); 3324 3325 if (ev->link_type == ACL_LINK || 3326 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3327 struct hci_cp_accept_conn_req cp; 3328 conn->state = BT_CONNECT; 3329 3330 bacpy(&cp.bdaddr, &ev->bdaddr); 3331 3332 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3333 cp.role = 0x00; /* Become central */ 3334 else 3335 cp.role = 0x01; /* Remain peripheral */ 3336 3337 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3338 } else if (!(flags & HCI_PROTO_DEFER)) { 3339 struct hci_cp_accept_sync_conn_req cp; 3340 conn->state = BT_CONNECT; 3341 3342 bacpy(&cp.bdaddr, &ev->bdaddr); 3343 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3344 3345 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3346 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3347 cp.max_latency = cpu_to_le16(0xffff); 3348 cp.content_format = cpu_to_le16(hdev->voice_setting); 3349 cp.retrans_effort = 0xff; 3350 3351 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3352 &cp); 3353 } else { 3354 conn->state = BT_CONNECT2; 3355 hci_connect_cfm(conn, 0); 3356 } 3357 3358 return; 3359 unlock: 3360 hci_dev_unlock(hdev); 3361 } 3362 3363 static u8 hci_to_mgmt_reason(u8 err) 3364 { 3365 switch (err) { 3366 case HCI_ERROR_CONNECTION_TIMEOUT: 3367 return MGMT_DEV_DISCONN_TIMEOUT; 3368 case HCI_ERROR_REMOTE_USER_TERM: 3369 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3370 case HCI_ERROR_REMOTE_POWER_OFF: 3371 return MGMT_DEV_DISCONN_REMOTE; 3372 case HCI_ERROR_LOCAL_HOST_TERM: 3373 return MGMT_DEV_DISCONN_LOCAL_HOST; 3374 default: 3375 return MGMT_DEV_DISCONN_UNKNOWN; 3376 } 3377 } 3378 3379 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3380 struct sk_buff *skb) 3381 { 3382 struct hci_ev_disconn_complete *ev = data; 3383 u8 reason; 3384 struct hci_conn_params *params; 3385 struct hci_conn *conn; 3386 bool mgmt_connected; 3387 3388 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3389 3390 hci_dev_lock(hdev); 3391 3392 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3393 if (!conn) 3394 goto unlock; 3395 3396 if (ev->status) { 3397 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3398 conn->dst_type, ev->status); 3399 goto unlock; 3400 } 3401 3402 conn->state = BT_CLOSED; 3403 3404 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3405 3406 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3407 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3408 else 3409 reason = hci_to_mgmt_reason(ev->reason); 3410 3411 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3412 reason, mgmt_connected); 3413 3414 if (conn->type == ACL_LINK) { 3415 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3416 hci_remove_link_key(hdev, &conn->dst); 3417 3418 hci_update_scan(hdev); 3419 } 3420 3421 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3422 if (params) { 3423 switch (params->auto_connect) { 3424 case HCI_AUTO_CONN_LINK_LOSS: 3425 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3426 break; 3427 fallthrough; 3428 3429 case HCI_AUTO_CONN_DIRECT: 3430 case HCI_AUTO_CONN_ALWAYS: 3431 list_del_init(¶ms->action); 3432 list_add(¶ms->action, &hdev->pend_le_conns); 3433 hci_update_passive_scan(hdev); 3434 break; 3435 3436 default: 3437 break; 3438 } 3439 } 3440 3441 hci_disconn_cfm(conn, ev->reason); 3442 3443 /* Re-enable advertising if necessary, since it might 3444 * have been disabled by the connection. From the 3445 * HCI_LE_Set_Advertise_Enable command description in 3446 * the core specification (v4.0): 3447 * "The Controller shall continue advertising until the Host 3448 * issues an LE_Set_Advertise_Enable command with 3449 * Advertising_Enable set to 0x00 (Advertising is disabled) 3450 * or until a connection is created or until the Advertising 3451 * is timed out due to Directed Advertising." 3452 */ 3453 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3454 hdev->cur_adv_instance = conn->adv_instance; 3455 hci_enable_advertising(hdev); 3456 } 3457 3458 hci_conn_del(conn); 3459 3460 unlock: 3461 hci_dev_unlock(hdev); 3462 } 3463 3464 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3465 struct sk_buff *skb) 3466 { 3467 struct hci_ev_auth_complete *ev = data; 3468 struct hci_conn *conn; 3469 3470 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3471 3472 hci_dev_lock(hdev); 3473 3474 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3475 if (!conn) 3476 goto unlock; 3477 3478 if (!ev->status) { 3479 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3480 3481 if (!hci_conn_ssp_enabled(conn) && 3482 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3483 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3484 } else { 3485 set_bit(HCI_CONN_AUTH, &conn->flags); 3486 conn->sec_level = conn->pending_sec_level; 3487 } 3488 } else { 3489 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3490 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3491 3492 mgmt_auth_failed(conn, ev->status); 3493 } 3494 3495 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3496 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3497 3498 if (conn->state == BT_CONFIG) { 3499 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3500 struct hci_cp_set_conn_encrypt cp; 3501 cp.handle = ev->handle; 3502 cp.encrypt = 0x01; 3503 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3504 &cp); 3505 } else { 3506 conn->state = BT_CONNECTED; 3507 hci_connect_cfm(conn, ev->status); 3508 hci_conn_drop(conn); 3509 } 3510 } else { 3511 hci_auth_cfm(conn, ev->status); 3512 3513 hci_conn_hold(conn); 3514 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3515 hci_conn_drop(conn); 3516 } 3517 3518 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3519 if (!ev->status) { 3520 struct hci_cp_set_conn_encrypt cp; 3521 cp.handle = ev->handle; 3522 cp.encrypt = 0x01; 3523 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3524 &cp); 3525 } else { 3526 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3527 hci_encrypt_cfm(conn, ev->status); 3528 } 3529 } 3530 3531 unlock: 3532 hci_dev_unlock(hdev); 3533 } 3534 3535 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3536 struct sk_buff *skb) 3537 { 3538 struct hci_ev_remote_name *ev = data; 3539 struct hci_conn *conn; 3540 3541 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3542 3543 hci_conn_check_pending(hdev); 3544 3545 hci_dev_lock(hdev); 3546 3547 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3548 3549 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3550 goto check_auth; 3551 3552 if (ev->status == 0) 3553 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3554 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3555 else 3556 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3557 3558 check_auth: 3559 if (!conn) 3560 goto unlock; 3561 3562 if (!hci_outgoing_auth_needed(hdev, conn)) 3563 goto unlock; 3564 3565 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3566 struct hci_cp_auth_requested cp; 3567 3568 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3569 3570 cp.handle = __cpu_to_le16(conn->handle); 3571 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3572 } 3573 3574 unlock: 3575 hci_dev_unlock(hdev); 3576 } 3577 3578 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3579 struct sk_buff *skb) 3580 { 3581 struct hci_ev_encrypt_change *ev = data; 3582 struct hci_conn *conn; 3583 3584 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3585 3586 hci_dev_lock(hdev); 3587 3588 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3589 if (!conn) 3590 goto unlock; 3591 3592 if (!ev->status) { 3593 if (ev->encrypt) { 3594 /* Encryption implies authentication */ 3595 set_bit(HCI_CONN_AUTH, &conn->flags); 3596 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3597 conn->sec_level = conn->pending_sec_level; 3598 3599 /* P-256 authentication key implies FIPS */ 3600 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3601 set_bit(HCI_CONN_FIPS, &conn->flags); 3602 3603 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3604 conn->type == LE_LINK) 3605 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3606 } else { 3607 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3608 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3609 } 3610 } 3611 3612 /* We should disregard the current RPA and generate a new one 3613 * whenever the encryption procedure fails. 3614 */ 3615 if (ev->status && conn->type == LE_LINK) { 3616 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3617 hci_adv_instances_set_rpa_expired(hdev, true); 3618 } 3619 3620 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3621 3622 /* Check link security requirements are met */ 3623 if (!hci_conn_check_link_mode(conn)) 3624 ev->status = HCI_ERROR_AUTH_FAILURE; 3625 3626 if (ev->status && conn->state == BT_CONNECTED) { 3627 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3628 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3629 3630 /* Notify upper layers so they can cleanup before 3631 * disconnecting. 3632 */ 3633 hci_encrypt_cfm(conn, ev->status); 3634 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3635 hci_conn_drop(conn); 3636 goto unlock; 3637 } 3638 3639 /* Try reading the encryption key size for encrypted ACL links */ 3640 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3641 struct hci_cp_read_enc_key_size cp; 3642 3643 /* Only send HCI_Read_Encryption_Key_Size if the 3644 * controller really supports it. If it doesn't, assume 3645 * the default size (16). 3646 */ 3647 if (!(hdev->commands[20] & 0x10)) { 3648 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3649 goto notify; 3650 } 3651 3652 cp.handle = cpu_to_le16(conn->handle); 3653 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, 3654 sizeof(cp), &cp)) { 3655 bt_dev_err(hdev, "sending read key size failed"); 3656 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3657 goto notify; 3658 } 3659 3660 goto unlock; 3661 } 3662 3663 /* Set the default Authenticated Payload Timeout after 3664 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3665 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3666 * sent when the link is active and Encryption is enabled, the conn 3667 * type can be either LE or ACL and controller must support LMP Ping. 3668 * Ensure for AES-CCM encryption as well. 3669 */ 3670 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3671 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3672 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3673 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3674 struct hci_cp_write_auth_payload_to cp; 3675 3676 cp.handle = cpu_to_le16(conn->handle); 3677 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3678 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3679 sizeof(cp), &cp)) { 3680 bt_dev_err(hdev, "write auth payload timeout failed"); 3681 goto notify; 3682 } 3683 3684 goto unlock; 3685 } 3686 3687 notify: 3688 hci_encrypt_cfm(conn, ev->status); 3689 3690 unlock: 3691 hci_dev_unlock(hdev); 3692 } 3693 3694 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3695 struct sk_buff *skb) 3696 { 3697 struct hci_ev_change_link_key_complete *ev = data; 3698 struct hci_conn *conn; 3699 3700 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3701 3702 hci_dev_lock(hdev); 3703 3704 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3705 if (conn) { 3706 if (!ev->status) 3707 set_bit(HCI_CONN_SECURE, &conn->flags); 3708 3709 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3710 3711 hci_key_change_cfm(conn, ev->status); 3712 } 3713 3714 hci_dev_unlock(hdev); 3715 } 3716 3717 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3718 struct sk_buff *skb) 3719 { 3720 struct hci_ev_remote_features *ev = data; 3721 struct hci_conn *conn; 3722 3723 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3724 3725 hci_dev_lock(hdev); 3726 3727 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3728 if (!conn) 3729 goto unlock; 3730 3731 if (!ev->status) 3732 memcpy(conn->features[0], ev->features, 8); 3733 3734 if (conn->state != BT_CONFIG) 3735 goto unlock; 3736 3737 if (!ev->status && lmp_ext_feat_capable(hdev) && 3738 lmp_ext_feat_capable(conn)) { 3739 struct hci_cp_read_remote_ext_features cp; 3740 cp.handle = ev->handle; 3741 cp.page = 0x01; 3742 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3743 sizeof(cp), &cp); 3744 goto unlock; 3745 } 3746 3747 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3748 struct hci_cp_remote_name_req cp; 3749 memset(&cp, 0, sizeof(cp)); 3750 bacpy(&cp.bdaddr, &conn->dst); 3751 cp.pscan_rep_mode = 0x02; 3752 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3753 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3754 mgmt_device_connected(hdev, conn, NULL, 0); 3755 3756 if (!hci_outgoing_auth_needed(hdev, conn)) { 3757 conn->state = BT_CONNECTED; 3758 hci_connect_cfm(conn, ev->status); 3759 hci_conn_drop(conn); 3760 } 3761 3762 unlock: 3763 hci_dev_unlock(hdev); 3764 } 3765 3766 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3767 { 3768 cancel_delayed_work(&hdev->cmd_timer); 3769 3770 rcu_read_lock(); 3771 if (!test_bit(HCI_RESET, &hdev->flags)) { 3772 if (ncmd) { 3773 cancel_delayed_work(&hdev->ncmd_timer); 3774 atomic_set(&hdev->cmd_cnt, 1); 3775 } else { 3776 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) 3777 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, 3778 HCI_NCMD_TIMEOUT); 3779 } 3780 } 3781 rcu_read_unlock(); 3782 } 3783 3784 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, 3785 struct sk_buff *skb) 3786 { 3787 struct hci_rp_le_read_buffer_size_v2 *rp = data; 3788 3789 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3790 3791 if (rp->status) 3792 return rp->status; 3793 3794 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); 3795 hdev->le_pkts = rp->acl_max_pkt; 3796 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); 3797 hdev->iso_pkts = rp->iso_max_pkt; 3798 3799 hdev->le_cnt = hdev->le_pkts; 3800 hdev->iso_cnt = hdev->iso_pkts; 3801 3802 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, 3803 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); 3804 3805 return rp->status; 3806 } 3807 3808 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, 3809 struct sk_buff *skb) 3810 { 3811 struct hci_rp_le_set_cig_params *rp = data; 3812 struct hci_conn *conn; 3813 int i = 0; 3814 3815 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3816 3817 hci_dev_lock(hdev); 3818 3819 if (rp->status) { 3820 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { 3821 conn->state = BT_CLOSED; 3822 hci_connect_cfm(conn, rp->status); 3823 hci_conn_del(conn); 3824 } 3825 goto unlock; 3826 } 3827 3828 rcu_read_lock(); 3829 3830 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 3831 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || 3832 conn->state == BT_CONNECTED) 3833 continue; 3834 3835 conn->handle = __le16_to_cpu(rp->handle[i++]); 3836 3837 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, 3838 conn->handle, conn->link); 3839 3840 /* Create CIS if LE is already connected */ 3841 if (conn->link && conn->link->state == BT_CONNECTED) { 3842 rcu_read_unlock(); 3843 hci_le_create_cis(conn->link); 3844 rcu_read_lock(); 3845 } 3846 3847 if (i == rp->num_handles) 3848 break; 3849 } 3850 3851 rcu_read_unlock(); 3852 3853 unlock: 3854 hci_dev_unlock(hdev); 3855 3856 return rp->status; 3857 } 3858 3859 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, 3860 struct sk_buff *skb) 3861 { 3862 struct hci_rp_le_setup_iso_path *rp = data; 3863 struct hci_cp_le_setup_iso_path *cp; 3864 struct hci_conn *conn; 3865 3866 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3867 3868 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); 3869 if (!cp) 3870 return rp->status; 3871 3872 hci_dev_lock(hdev); 3873 3874 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 3875 if (!conn) 3876 goto unlock; 3877 3878 if (rp->status) { 3879 hci_connect_cfm(conn, rp->status); 3880 hci_conn_del(conn); 3881 goto unlock; 3882 } 3883 3884 switch (cp->direction) { 3885 /* Input (Host to Controller) */ 3886 case 0x00: 3887 /* Only confirm connection if output only */ 3888 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) 3889 hci_connect_cfm(conn, rp->status); 3890 break; 3891 /* Output (Controller to Host) */ 3892 case 0x01: 3893 /* Confirm connection since conn->iso_qos is always configured 3894 * last. 3895 */ 3896 hci_connect_cfm(conn, rp->status); 3897 break; 3898 } 3899 3900 unlock: 3901 hci_dev_unlock(hdev); 3902 return rp->status; 3903 } 3904 3905 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) 3906 { 3907 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3908 } 3909 3910 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, 3911 struct sk_buff *skb) 3912 { 3913 struct hci_ev_status *rp = data; 3914 struct hci_cp_le_set_per_adv_params *cp; 3915 3916 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3917 3918 if (rp->status) 3919 return rp->status; 3920 3921 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); 3922 if (!cp) 3923 return rp->status; 3924 3925 /* TODO: set the conn state */ 3926 return rp->status; 3927 } 3928 3929 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, 3930 struct sk_buff *skb) 3931 { 3932 struct hci_ev_status *rp = data; 3933 __u8 *sent; 3934 3935 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 3936 3937 if (rp->status) 3938 return rp->status; 3939 3940 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); 3941 if (!sent) 3942 return rp->status; 3943 3944 hci_dev_lock(hdev); 3945 3946 if (*sent) 3947 hci_dev_set_flag(hdev, HCI_LE_PER_ADV); 3948 else 3949 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); 3950 3951 hci_dev_unlock(hdev); 3952 3953 return rp->status; 3954 } 3955 3956 #define HCI_CC_VL(_op, _func, _min, _max) \ 3957 { \ 3958 .op = _op, \ 3959 .func = _func, \ 3960 .min_len = _min, \ 3961 .max_len = _max, \ 3962 } 3963 3964 #define HCI_CC(_op, _func, _len) \ 3965 HCI_CC_VL(_op, _func, _len, _len) 3966 3967 #define HCI_CC_STATUS(_op, _func) \ 3968 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3969 3970 static const struct hci_cc { 3971 u16 op; 3972 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3973 u16 min_len; 3974 u16 max_len; 3975 } hci_cc_table[] = { 3976 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3977 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3978 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3979 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 3980 hci_cc_remote_name_req_cancel), 3981 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3982 sizeof(struct hci_rp_role_discovery)), 3983 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3984 sizeof(struct hci_rp_read_link_policy)), 3985 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 3986 sizeof(struct hci_rp_write_link_policy)), 3987 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 3988 sizeof(struct hci_rp_read_def_link_policy)), 3989 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 3990 hci_cc_write_def_link_policy), 3991 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 3992 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 3993 sizeof(struct hci_rp_read_stored_link_key)), 3994 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 3995 sizeof(struct hci_rp_delete_stored_link_key)), 3996 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 3997 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 3998 sizeof(struct hci_rp_read_local_name)), 3999 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 4000 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 4001 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 4002 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 4003 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 4004 sizeof(struct hci_rp_read_class_of_dev)), 4005 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 4006 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 4007 sizeof(struct hci_rp_read_voice_setting)), 4008 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 4009 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 4010 sizeof(struct hci_rp_read_num_supported_iac)), 4011 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 4012 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 4013 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 4014 sizeof(struct hci_rp_read_auth_payload_to)), 4015 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 4016 sizeof(struct hci_rp_write_auth_payload_to)), 4017 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 4018 sizeof(struct hci_rp_read_local_version)), 4019 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 4020 sizeof(struct hci_rp_read_local_commands)), 4021 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 4022 sizeof(struct hci_rp_read_local_features)), 4023 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 4024 sizeof(struct hci_rp_read_local_ext_features)), 4025 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 4026 sizeof(struct hci_rp_read_buffer_size)), 4027 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 4028 sizeof(struct hci_rp_read_bd_addr)), 4029 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 4030 sizeof(struct hci_rp_read_local_pairing_opts)), 4031 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 4032 sizeof(struct hci_rp_read_page_scan_activity)), 4033 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 4034 hci_cc_write_page_scan_activity), 4035 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 4036 sizeof(struct hci_rp_read_page_scan_type)), 4037 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 4038 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, 4039 sizeof(struct hci_rp_read_data_block_size)), 4040 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, 4041 sizeof(struct hci_rp_read_flow_control_mode)), 4042 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, 4043 sizeof(struct hci_rp_read_local_amp_info)), 4044 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 4045 sizeof(struct hci_rp_read_clock)), 4046 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, 4047 sizeof(struct hci_rp_read_enc_key_size)), 4048 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 4049 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 4050 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 4051 hci_cc_read_def_err_data_reporting, 4052 sizeof(struct hci_rp_read_def_err_data_reporting)), 4053 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4054 hci_cc_write_def_err_data_reporting), 4055 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 4056 sizeof(struct hci_rp_pin_code_reply)), 4057 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 4058 sizeof(struct hci_rp_pin_code_neg_reply)), 4059 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 4060 sizeof(struct hci_rp_read_local_oob_data)), 4061 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 4062 sizeof(struct hci_rp_read_local_oob_ext_data)), 4063 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 4064 sizeof(struct hci_rp_le_read_buffer_size)), 4065 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 4066 sizeof(struct hci_rp_le_read_local_features)), 4067 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 4068 sizeof(struct hci_rp_le_read_adv_tx_power)), 4069 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 4070 sizeof(struct hci_rp_user_confirm_reply)), 4071 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 4072 sizeof(struct hci_rp_user_confirm_reply)), 4073 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 4074 sizeof(struct hci_rp_user_confirm_reply)), 4075 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 4076 sizeof(struct hci_rp_user_confirm_reply)), 4077 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 4078 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 4079 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 4080 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 4081 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4082 hci_cc_le_read_accept_list_size, 4083 sizeof(struct hci_rp_le_read_accept_list_size)), 4084 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 4085 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 4086 hci_cc_le_add_to_accept_list), 4087 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 4088 hci_cc_le_del_from_accept_list), 4089 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 4090 sizeof(struct hci_rp_le_read_supported_states)), 4091 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 4092 sizeof(struct hci_rp_le_read_def_data_len)), 4093 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 4094 hci_cc_le_write_def_data_len), 4095 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 4096 hci_cc_le_add_to_resolv_list), 4097 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 4098 hci_cc_le_del_from_resolv_list), 4099 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 4100 hci_cc_le_clear_resolv_list), 4101 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 4102 sizeof(struct hci_rp_le_read_resolv_list_size)), 4103 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 4104 hci_cc_le_set_addr_resolution_enable), 4105 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 4106 sizeof(struct hci_rp_le_read_max_data_len)), 4107 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 4108 hci_cc_write_le_host_supported), 4109 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 4110 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 4111 sizeof(struct hci_rp_read_rssi)), 4112 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 4113 sizeof(struct hci_rp_read_tx_power)), 4114 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 4115 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 4116 hci_cc_le_set_ext_scan_param), 4117 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 4118 hci_cc_le_set_ext_scan_enable), 4119 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 4120 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4121 hci_cc_le_read_num_adv_sets, 4122 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 4123 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 4124 sizeof(struct hci_rp_le_set_ext_adv_params)), 4125 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 4126 hci_cc_le_set_ext_adv_enable), 4127 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 4128 hci_cc_le_set_adv_set_random_addr), 4129 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 4130 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 4131 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), 4132 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, 4133 hci_cc_le_set_per_adv_enable), 4134 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 4135 sizeof(struct hci_rp_le_read_transmit_power)), 4136 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), 4137 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, 4138 sizeof(struct hci_rp_le_read_buffer_size_v2)), 4139 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, 4140 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), 4141 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, 4142 sizeof(struct hci_rp_le_setup_iso_path)), 4143 }; 4144 4145 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 4146 struct sk_buff *skb) 4147 { 4148 void *data; 4149 4150 if (skb->len < cc->min_len) { 4151 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 4152 cc->op, skb->len, cc->min_len); 4153 return HCI_ERROR_UNSPECIFIED; 4154 } 4155 4156 /* Just warn if the length is over max_len size it still be possible to 4157 * partially parse the cc so leave to callback to decide if that is 4158 * acceptable. 4159 */ 4160 if (skb->len > cc->max_len) 4161 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 4162 cc->op, skb->len, cc->max_len); 4163 4164 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 4165 if (!data) 4166 return HCI_ERROR_UNSPECIFIED; 4167 4168 return cc->func(hdev, data, skb); 4169 } 4170 4171 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 4172 struct sk_buff *skb, u16 *opcode, u8 *status, 4173 hci_req_complete_t *req_complete, 4174 hci_req_complete_skb_t *req_complete_skb) 4175 { 4176 struct hci_ev_cmd_complete *ev = data; 4177 int i; 4178 4179 *opcode = __le16_to_cpu(ev->opcode); 4180 4181 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4182 4183 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 4184 if (hci_cc_table[i].op == *opcode) { 4185 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 4186 break; 4187 } 4188 } 4189 4190 if (i == ARRAY_SIZE(hci_cc_table)) { 4191 /* Unknown opcode, assume byte 0 contains the status, so 4192 * that e.g. __hci_cmd_sync() properly returns errors 4193 * for vendor specific commands send by HCI drivers. 4194 * If a vendor doesn't actually follow this convention we may 4195 * need to introduce a vendor CC table in order to properly set 4196 * the status. 4197 */ 4198 *status = skb->data[0]; 4199 } 4200 4201 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4202 4203 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 4204 req_complete_skb); 4205 4206 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4207 bt_dev_err(hdev, 4208 "unexpected event for opcode 0x%4.4x", *opcode); 4209 return; 4210 } 4211 4212 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4213 queue_work(hdev->workqueue, &hdev->cmd_work); 4214 } 4215 4216 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) 4217 { 4218 struct hci_cp_le_create_cis *cp; 4219 int i; 4220 4221 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4222 4223 if (!status) 4224 return; 4225 4226 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); 4227 if (!cp) 4228 return; 4229 4230 hci_dev_lock(hdev); 4231 4232 /* Remove connection if command failed */ 4233 for (i = 0; cp->num_cis; cp->num_cis--, i++) { 4234 struct hci_conn *conn; 4235 u16 handle; 4236 4237 handle = __le16_to_cpu(cp->cis[i].cis_handle); 4238 4239 conn = hci_conn_hash_lookup_handle(hdev, handle); 4240 if (conn) { 4241 conn->state = BT_CLOSED; 4242 hci_connect_cfm(conn, status); 4243 hci_conn_del(conn); 4244 } 4245 } 4246 4247 hci_dev_unlock(hdev); 4248 } 4249 4250 #define HCI_CS(_op, _func) \ 4251 { \ 4252 .op = _op, \ 4253 .func = _func, \ 4254 } 4255 4256 static const struct hci_cs { 4257 u16 op; 4258 void (*func)(struct hci_dev *hdev, __u8 status); 4259 } hci_cs_table[] = { 4260 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4261 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4262 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4263 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4264 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4265 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4266 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4267 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4268 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4269 hci_cs_read_remote_ext_features), 4270 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4271 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4272 hci_cs_enhanced_setup_sync_conn), 4273 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4274 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4275 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4276 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4277 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4278 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4279 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), 4280 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), 4281 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), 4282 }; 4283 4284 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4285 struct sk_buff *skb, u16 *opcode, u8 *status, 4286 hci_req_complete_t *req_complete, 4287 hci_req_complete_skb_t *req_complete_skb) 4288 { 4289 struct hci_ev_cmd_status *ev = data; 4290 int i; 4291 4292 *opcode = __le16_to_cpu(ev->opcode); 4293 *status = ev->status; 4294 4295 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4296 4297 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4298 if (hci_cs_table[i].op == *opcode) { 4299 hci_cs_table[i].func(hdev, ev->status); 4300 break; 4301 } 4302 } 4303 4304 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4305 4306 /* Indicate request completion if the command failed. Also, if 4307 * we're not waiting for a special event and we get a success 4308 * command status we should try to flag the request as completed 4309 * (since for this kind of commands there will not be a command 4310 * complete event). 4311 */ 4312 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { 4313 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4314 req_complete_skb); 4315 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4316 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4317 *opcode); 4318 return; 4319 } 4320 } 4321 4322 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4323 queue_work(hdev->workqueue, &hdev->cmd_work); 4324 } 4325 4326 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4327 struct sk_buff *skb) 4328 { 4329 struct hci_ev_hardware_error *ev = data; 4330 4331 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4332 4333 hdev->hw_error_code = ev->code; 4334 4335 queue_work(hdev->req_workqueue, &hdev->error_reset); 4336 } 4337 4338 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4339 struct sk_buff *skb) 4340 { 4341 struct hci_ev_role_change *ev = data; 4342 struct hci_conn *conn; 4343 4344 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4345 4346 hci_dev_lock(hdev); 4347 4348 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4349 if (conn) { 4350 if (!ev->status) 4351 conn->role = ev->role; 4352 4353 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4354 4355 hci_role_switch_cfm(conn, ev->status, ev->role); 4356 } 4357 4358 hci_dev_unlock(hdev); 4359 } 4360 4361 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4362 struct sk_buff *skb) 4363 { 4364 struct hci_ev_num_comp_pkts *ev = data; 4365 int i; 4366 4367 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4368 flex_array_size(ev, handles, ev->num))) 4369 return; 4370 4371 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 4372 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4373 return; 4374 } 4375 4376 bt_dev_dbg(hdev, "num %d", ev->num); 4377 4378 for (i = 0; i < ev->num; i++) { 4379 struct hci_comp_pkts_info *info = &ev->handles[i]; 4380 struct hci_conn *conn; 4381 __u16 handle, count; 4382 4383 handle = __le16_to_cpu(info->handle); 4384 count = __le16_to_cpu(info->count); 4385 4386 conn = hci_conn_hash_lookup_handle(hdev, handle); 4387 if (!conn) 4388 continue; 4389 4390 conn->sent -= count; 4391 4392 switch (conn->type) { 4393 case ACL_LINK: 4394 hdev->acl_cnt += count; 4395 if (hdev->acl_cnt > hdev->acl_pkts) 4396 hdev->acl_cnt = hdev->acl_pkts; 4397 break; 4398 4399 case LE_LINK: 4400 if (hdev->le_pkts) { 4401 hdev->le_cnt += count; 4402 if (hdev->le_cnt > hdev->le_pkts) 4403 hdev->le_cnt = hdev->le_pkts; 4404 } else { 4405 hdev->acl_cnt += count; 4406 if (hdev->acl_cnt > hdev->acl_pkts) 4407 hdev->acl_cnt = hdev->acl_pkts; 4408 } 4409 break; 4410 4411 case SCO_LINK: 4412 hdev->sco_cnt += count; 4413 if (hdev->sco_cnt > hdev->sco_pkts) 4414 hdev->sco_cnt = hdev->sco_pkts; 4415 break; 4416 4417 case ISO_LINK: 4418 if (hdev->iso_pkts) { 4419 hdev->iso_cnt += count; 4420 if (hdev->iso_cnt > hdev->iso_pkts) 4421 hdev->iso_cnt = hdev->iso_pkts; 4422 } else if (hdev->le_pkts) { 4423 hdev->le_cnt += count; 4424 if (hdev->le_cnt > hdev->le_pkts) 4425 hdev->le_cnt = hdev->le_pkts; 4426 } else { 4427 hdev->acl_cnt += count; 4428 if (hdev->acl_cnt > hdev->acl_pkts) 4429 hdev->acl_cnt = hdev->acl_pkts; 4430 } 4431 break; 4432 4433 default: 4434 bt_dev_err(hdev, "unknown type %d conn %p", 4435 conn->type, conn); 4436 break; 4437 } 4438 } 4439 4440 queue_work(hdev->workqueue, &hdev->tx_work); 4441 } 4442 4443 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4444 __u16 handle) 4445 { 4446 struct hci_chan *chan; 4447 4448 switch (hdev->dev_type) { 4449 case HCI_PRIMARY: 4450 return hci_conn_hash_lookup_handle(hdev, handle); 4451 case HCI_AMP: 4452 chan = hci_chan_lookup_handle(hdev, handle); 4453 if (chan) 4454 return chan->conn; 4455 break; 4456 default: 4457 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4458 break; 4459 } 4460 4461 return NULL; 4462 } 4463 4464 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, 4465 struct sk_buff *skb) 4466 { 4467 struct hci_ev_num_comp_blocks *ev = data; 4468 int i; 4469 4470 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, 4471 flex_array_size(ev, handles, ev->num_hndl))) 4472 return; 4473 4474 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4475 bt_dev_err(hdev, "wrong event for mode %d", 4476 hdev->flow_ctl_mode); 4477 return; 4478 } 4479 4480 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, 4481 ev->num_hndl); 4482 4483 for (i = 0; i < ev->num_hndl; i++) { 4484 struct hci_comp_blocks_info *info = &ev->handles[i]; 4485 struct hci_conn *conn = NULL; 4486 __u16 handle, block_count; 4487 4488 handle = __le16_to_cpu(info->handle); 4489 block_count = __le16_to_cpu(info->blocks); 4490 4491 conn = __hci_conn_lookup_handle(hdev, handle); 4492 if (!conn) 4493 continue; 4494 4495 conn->sent -= block_count; 4496 4497 switch (conn->type) { 4498 case ACL_LINK: 4499 case AMP_LINK: 4500 hdev->block_cnt += block_count; 4501 if (hdev->block_cnt > hdev->num_blocks) 4502 hdev->block_cnt = hdev->num_blocks; 4503 break; 4504 4505 default: 4506 bt_dev_err(hdev, "unknown type %d conn %p", 4507 conn->type, conn); 4508 break; 4509 } 4510 } 4511 4512 queue_work(hdev->workqueue, &hdev->tx_work); 4513 } 4514 4515 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4516 struct sk_buff *skb) 4517 { 4518 struct hci_ev_mode_change *ev = data; 4519 struct hci_conn *conn; 4520 4521 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4522 4523 hci_dev_lock(hdev); 4524 4525 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4526 if (conn) { 4527 conn->mode = ev->mode; 4528 4529 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4530 &conn->flags)) { 4531 if (conn->mode == HCI_CM_ACTIVE) 4532 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4533 else 4534 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4535 } 4536 4537 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4538 hci_sco_setup(conn, ev->status); 4539 } 4540 4541 hci_dev_unlock(hdev); 4542 } 4543 4544 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4545 struct sk_buff *skb) 4546 { 4547 struct hci_ev_pin_code_req *ev = data; 4548 struct hci_conn *conn; 4549 4550 bt_dev_dbg(hdev, ""); 4551 4552 hci_dev_lock(hdev); 4553 4554 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4555 if (!conn) 4556 goto unlock; 4557 4558 if (conn->state == BT_CONNECTED) { 4559 hci_conn_hold(conn); 4560 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4561 hci_conn_drop(conn); 4562 } 4563 4564 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4565 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4566 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4567 sizeof(ev->bdaddr), &ev->bdaddr); 4568 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4569 u8 secure; 4570 4571 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4572 secure = 1; 4573 else 4574 secure = 0; 4575 4576 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4577 } 4578 4579 unlock: 4580 hci_dev_unlock(hdev); 4581 } 4582 4583 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4584 { 4585 if (key_type == HCI_LK_CHANGED_COMBINATION) 4586 return; 4587 4588 conn->pin_length = pin_len; 4589 conn->key_type = key_type; 4590 4591 switch (key_type) { 4592 case HCI_LK_LOCAL_UNIT: 4593 case HCI_LK_REMOTE_UNIT: 4594 case HCI_LK_DEBUG_COMBINATION: 4595 return; 4596 case HCI_LK_COMBINATION: 4597 if (pin_len == 16) 4598 conn->pending_sec_level = BT_SECURITY_HIGH; 4599 else 4600 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4601 break; 4602 case HCI_LK_UNAUTH_COMBINATION_P192: 4603 case HCI_LK_UNAUTH_COMBINATION_P256: 4604 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4605 break; 4606 case HCI_LK_AUTH_COMBINATION_P192: 4607 conn->pending_sec_level = BT_SECURITY_HIGH; 4608 break; 4609 case HCI_LK_AUTH_COMBINATION_P256: 4610 conn->pending_sec_level = BT_SECURITY_FIPS; 4611 break; 4612 } 4613 } 4614 4615 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4616 struct sk_buff *skb) 4617 { 4618 struct hci_ev_link_key_req *ev = data; 4619 struct hci_cp_link_key_reply cp; 4620 struct hci_conn *conn; 4621 struct link_key *key; 4622 4623 bt_dev_dbg(hdev, ""); 4624 4625 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4626 return; 4627 4628 hci_dev_lock(hdev); 4629 4630 key = hci_find_link_key(hdev, &ev->bdaddr); 4631 if (!key) { 4632 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4633 goto not_found; 4634 } 4635 4636 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4637 4638 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4639 if (conn) { 4640 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4641 4642 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4643 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4644 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4645 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4646 goto not_found; 4647 } 4648 4649 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4650 (conn->pending_sec_level == BT_SECURITY_HIGH || 4651 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4652 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4653 goto not_found; 4654 } 4655 4656 conn_set_key(conn, key->type, key->pin_len); 4657 } 4658 4659 bacpy(&cp.bdaddr, &ev->bdaddr); 4660 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4661 4662 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4663 4664 hci_dev_unlock(hdev); 4665 4666 return; 4667 4668 not_found: 4669 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4670 hci_dev_unlock(hdev); 4671 } 4672 4673 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4674 struct sk_buff *skb) 4675 { 4676 struct hci_ev_link_key_notify *ev = data; 4677 struct hci_conn *conn; 4678 struct link_key *key; 4679 bool persistent; 4680 u8 pin_len = 0; 4681 4682 bt_dev_dbg(hdev, ""); 4683 4684 hci_dev_lock(hdev); 4685 4686 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4687 if (!conn) 4688 goto unlock; 4689 4690 hci_conn_hold(conn); 4691 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4692 hci_conn_drop(conn); 4693 4694 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4695 conn_set_key(conn, ev->key_type, conn->pin_length); 4696 4697 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4698 goto unlock; 4699 4700 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4701 ev->key_type, pin_len, &persistent); 4702 if (!key) 4703 goto unlock; 4704 4705 /* Update connection information since adding the key will have 4706 * fixed up the type in the case of changed combination keys. 4707 */ 4708 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4709 conn_set_key(conn, key->type, key->pin_len); 4710 4711 mgmt_new_link_key(hdev, key, persistent); 4712 4713 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4714 * is set. If it's not set simply remove the key from the kernel 4715 * list (we've still notified user space about it but with 4716 * store_hint being 0). 4717 */ 4718 if (key->type == HCI_LK_DEBUG_COMBINATION && 4719 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4720 list_del_rcu(&key->list); 4721 kfree_rcu(key, rcu); 4722 goto unlock; 4723 } 4724 4725 if (persistent) 4726 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4727 else 4728 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4729 4730 unlock: 4731 hci_dev_unlock(hdev); 4732 } 4733 4734 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4735 struct sk_buff *skb) 4736 { 4737 struct hci_ev_clock_offset *ev = data; 4738 struct hci_conn *conn; 4739 4740 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4741 4742 hci_dev_lock(hdev); 4743 4744 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4745 if (conn && !ev->status) { 4746 struct inquiry_entry *ie; 4747 4748 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4749 if (ie) { 4750 ie->data.clock_offset = ev->clock_offset; 4751 ie->timestamp = jiffies; 4752 } 4753 } 4754 4755 hci_dev_unlock(hdev); 4756 } 4757 4758 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4759 struct sk_buff *skb) 4760 { 4761 struct hci_ev_pkt_type_change *ev = data; 4762 struct hci_conn *conn; 4763 4764 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4765 4766 hci_dev_lock(hdev); 4767 4768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4769 if (conn && !ev->status) 4770 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4771 4772 hci_dev_unlock(hdev); 4773 } 4774 4775 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4776 struct sk_buff *skb) 4777 { 4778 struct hci_ev_pscan_rep_mode *ev = data; 4779 struct inquiry_entry *ie; 4780 4781 bt_dev_dbg(hdev, ""); 4782 4783 hci_dev_lock(hdev); 4784 4785 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4786 if (ie) { 4787 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4788 ie->timestamp = jiffies; 4789 } 4790 4791 hci_dev_unlock(hdev); 4792 } 4793 4794 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4795 struct sk_buff *skb) 4796 { 4797 struct hci_ev_inquiry_result_rssi *ev = edata; 4798 struct inquiry_data data; 4799 int i; 4800 4801 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4802 4803 if (!ev->num) 4804 return; 4805 4806 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4807 return; 4808 4809 hci_dev_lock(hdev); 4810 4811 if (skb->len == array_size(ev->num, 4812 sizeof(struct inquiry_info_rssi_pscan))) { 4813 struct inquiry_info_rssi_pscan *info; 4814 4815 for (i = 0; i < ev->num; i++) { 4816 u32 flags; 4817 4818 info = hci_ev_skb_pull(hdev, skb, 4819 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4820 sizeof(*info)); 4821 if (!info) { 4822 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4823 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4824 goto unlock; 4825 } 4826 4827 bacpy(&data.bdaddr, &info->bdaddr); 4828 data.pscan_rep_mode = info->pscan_rep_mode; 4829 data.pscan_period_mode = info->pscan_period_mode; 4830 data.pscan_mode = info->pscan_mode; 4831 memcpy(data.dev_class, info->dev_class, 3); 4832 data.clock_offset = info->clock_offset; 4833 data.rssi = info->rssi; 4834 data.ssp_mode = 0x00; 4835 4836 flags = hci_inquiry_cache_update(hdev, &data, false); 4837 4838 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4839 info->dev_class, info->rssi, 4840 flags, NULL, 0, NULL, 0, 0); 4841 } 4842 } else if (skb->len == array_size(ev->num, 4843 sizeof(struct inquiry_info_rssi))) { 4844 struct inquiry_info_rssi *info; 4845 4846 for (i = 0; i < ev->num; i++) { 4847 u32 flags; 4848 4849 info = hci_ev_skb_pull(hdev, skb, 4850 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4851 sizeof(*info)); 4852 if (!info) { 4853 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4854 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4855 goto unlock; 4856 } 4857 4858 bacpy(&data.bdaddr, &info->bdaddr); 4859 data.pscan_rep_mode = info->pscan_rep_mode; 4860 data.pscan_period_mode = info->pscan_period_mode; 4861 data.pscan_mode = 0x00; 4862 memcpy(data.dev_class, info->dev_class, 3); 4863 data.clock_offset = info->clock_offset; 4864 data.rssi = info->rssi; 4865 data.ssp_mode = 0x00; 4866 4867 flags = hci_inquiry_cache_update(hdev, &data, false); 4868 4869 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4870 info->dev_class, info->rssi, 4871 flags, NULL, 0, NULL, 0, 0); 4872 } 4873 } else { 4874 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4875 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4876 } 4877 unlock: 4878 hci_dev_unlock(hdev); 4879 } 4880 4881 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4882 struct sk_buff *skb) 4883 { 4884 struct hci_ev_remote_ext_features *ev = data; 4885 struct hci_conn *conn; 4886 4887 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4888 4889 hci_dev_lock(hdev); 4890 4891 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4892 if (!conn) 4893 goto unlock; 4894 4895 if (ev->page < HCI_MAX_PAGES) 4896 memcpy(conn->features[ev->page], ev->features, 8); 4897 4898 if (!ev->status && ev->page == 0x01) { 4899 struct inquiry_entry *ie; 4900 4901 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4902 if (ie) 4903 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4904 4905 if (ev->features[0] & LMP_HOST_SSP) { 4906 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4907 } else { 4908 /* It is mandatory by the Bluetooth specification that 4909 * Extended Inquiry Results are only used when Secure 4910 * Simple Pairing is enabled, but some devices violate 4911 * this. 4912 * 4913 * To make these devices work, the internal SSP 4914 * enabled flag needs to be cleared if the remote host 4915 * features do not indicate SSP support */ 4916 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4917 } 4918 4919 if (ev->features[0] & LMP_HOST_SC) 4920 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4921 } 4922 4923 if (conn->state != BT_CONFIG) 4924 goto unlock; 4925 4926 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4927 struct hci_cp_remote_name_req cp; 4928 memset(&cp, 0, sizeof(cp)); 4929 bacpy(&cp.bdaddr, &conn->dst); 4930 cp.pscan_rep_mode = 0x02; 4931 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4932 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4933 mgmt_device_connected(hdev, conn, NULL, 0); 4934 4935 if (!hci_outgoing_auth_needed(hdev, conn)) { 4936 conn->state = BT_CONNECTED; 4937 hci_connect_cfm(conn, ev->status); 4938 hci_conn_drop(conn); 4939 } 4940 4941 unlock: 4942 hci_dev_unlock(hdev); 4943 } 4944 4945 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4946 struct sk_buff *skb) 4947 { 4948 struct hci_ev_sync_conn_complete *ev = data; 4949 struct hci_conn *conn; 4950 u8 status = ev->status; 4951 4952 switch (ev->link_type) { 4953 case SCO_LINK: 4954 case ESCO_LINK: 4955 break; 4956 default: 4957 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4958 * for HCI_Synchronous_Connection_Complete is limited to 4959 * either SCO or eSCO 4960 */ 4961 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4962 return; 4963 } 4964 4965 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4966 4967 hci_dev_lock(hdev); 4968 4969 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4970 if (!conn) { 4971 if (ev->link_type == ESCO_LINK) 4972 goto unlock; 4973 4974 /* When the link type in the event indicates SCO connection 4975 * and lookup of the connection object fails, then check 4976 * if an eSCO connection object exists. 4977 * 4978 * The core limits the synchronous connections to either 4979 * SCO or eSCO. The eSCO connection is preferred and tried 4980 * to be setup first and until successfully established, 4981 * the link type will be hinted as eSCO. 4982 */ 4983 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4984 if (!conn) 4985 goto unlock; 4986 } 4987 4988 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4989 * Processing it more than once per connection can corrupt kernel memory. 4990 * 4991 * As the connection handle is set here for the first time, it indicates 4992 * whether the connection is already set up. 4993 */ 4994 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 4995 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 4996 goto unlock; 4997 } 4998 4999 switch (status) { 5000 case 0x00: 5001 conn->handle = __le16_to_cpu(ev->handle); 5002 if (conn->handle > HCI_CONN_HANDLE_MAX) { 5003 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 5004 conn->handle, HCI_CONN_HANDLE_MAX); 5005 status = HCI_ERROR_INVALID_PARAMETERS; 5006 conn->state = BT_CLOSED; 5007 break; 5008 } 5009 5010 conn->state = BT_CONNECTED; 5011 conn->type = ev->link_type; 5012 5013 hci_debugfs_create_conn(conn); 5014 hci_conn_add_sysfs(conn); 5015 break; 5016 5017 case 0x10: /* Connection Accept Timeout */ 5018 case 0x0d: /* Connection Rejected due to Limited Resources */ 5019 case 0x11: /* Unsupported Feature or Parameter Value */ 5020 case 0x1c: /* SCO interval rejected */ 5021 case 0x1a: /* Unsupported Remote Feature */ 5022 case 0x1e: /* Invalid LMP Parameters */ 5023 case 0x1f: /* Unspecified error */ 5024 case 0x20: /* Unsupported LMP Parameter value */ 5025 if (conn->out) { 5026 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 5027 (hdev->esco_type & EDR_ESCO_MASK); 5028 if (hci_setup_sync(conn, conn->link->handle)) 5029 goto unlock; 5030 } 5031 fallthrough; 5032 5033 default: 5034 conn->state = BT_CLOSED; 5035 break; 5036 } 5037 5038 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 5039 /* Notify only in case of SCO over HCI transport data path which 5040 * is zero and non-zero value shall be non-HCI transport data path 5041 */ 5042 if (conn->codec.data_path == 0 && hdev->notify) { 5043 switch (ev->air_mode) { 5044 case 0x02: 5045 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 5046 break; 5047 case 0x03: 5048 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 5049 break; 5050 } 5051 } 5052 5053 hci_connect_cfm(conn, status); 5054 if (status) 5055 hci_conn_del(conn); 5056 5057 unlock: 5058 hci_dev_unlock(hdev); 5059 } 5060 5061 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 5062 { 5063 size_t parsed = 0; 5064 5065 while (parsed < eir_len) { 5066 u8 field_len = eir[0]; 5067 5068 if (field_len == 0) 5069 return parsed; 5070 5071 parsed += field_len + 1; 5072 eir += field_len + 1; 5073 } 5074 5075 return eir_len; 5076 } 5077 5078 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 5079 struct sk_buff *skb) 5080 { 5081 struct hci_ev_ext_inquiry_result *ev = edata; 5082 struct inquiry_data data; 5083 size_t eir_len; 5084 int i; 5085 5086 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 5087 flex_array_size(ev, info, ev->num))) 5088 return; 5089 5090 bt_dev_dbg(hdev, "num %d", ev->num); 5091 5092 if (!ev->num) 5093 return; 5094 5095 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 5096 return; 5097 5098 hci_dev_lock(hdev); 5099 5100 for (i = 0; i < ev->num; i++) { 5101 struct extended_inquiry_info *info = &ev->info[i]; 5102 u32 flags; 5103 bool name_known; 5104 5105 bacpy(&data.bdaddr, &info->bdaddr); 5106 data.pscan_rep_mode = info->pscan_rep_mode; 5107 data.pscan_period_mode = info->pscan_period_mode; 5108 data.pscan_mode = 0x00; 5109 memcpy(data.dev_class, info->dev_class, 3); 5110 data.clock_offset = info->clock_offset; 5111 data.rssi = info->rssi; 5112 data.ssp_mode = 0x01; 5113 5114 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5115 name_known = eir_get_data(info->data, 5116 sizeof(info->data), 5117 EIR_NAME_COMPLETE, NULL); 5118 else 5119 name_known = true; 5120 5121 flags = hci_inquiry_cache_update(hdev, &data, name_known); 5122 5123 eir_len = eir_get_length(info->data, sizeof(info->data)); 5124 5125 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 5126 info->dev_class, info->rssi, 5127 flags, info->data, eir_len, NULL, 0, 0); 5128 } 5129 5130 hci_dev_unlock(hdev); 5131 } 5132 5133 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 5134 struct sk_buff *skb) 5135 { 5136 struct hci_ev_key_refresh_complete *ev = data; 5137 struct hci_conn *conn; 5138 5139 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 5140 __le16_to_cpu(ev->handle)); 5141 5142 hci_dev_lock(hdev); 5143 5144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5145 if (!conn) 5146 goto unlock; 5147 5148 /* For BR/EDR the necessary steps are taken through the 5149 * auth_complete event. 5150 */ 5151 if (conn->type != LE_LINK) 5152 goto unlock; 5153 5154 if (!ev->status) 5155 conn->sec_level = conn->pending_sec_level; 5156 5157 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 5158 5159 if (ev->status && conn->state == BT_CONNECTED) { 5160 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 5161 hci_conn_drop(conn); 5162 goto unlock; 5163 } 5164 5165 if (conn->state == BT_CONFIG) { 5166 if (!ev->status) 5167 conn->state = BT_CONNECTED; 5168 5169 hci_connect_cfm(conn, ev->status); 5170 hci_conn_drop(conn); 5171 } else { 5172 hci_auth_cfm(conn, ev->status); 5173 5174 hci_conn_hold(conn); 5175 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 5176 hci_conn_drop(conn); 5177 } 5178 5179 unlock: 5180 hci_dev_unlock(hdev); 5181 } 5182 5183 static u8 hci_get_auth_req(struct hci_conn *conn) 5184 { 5185 /* If remote requests no-bonding follow that lead */ 5186 if (conn->remote_auth == HCI_AT_NO_BONDING || 5187 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 5188 return conn->remote_auth | (conn->auth_type & 0x01); 5189 5190 /* If both remote and local have enough IO capabilities, require 5191 * MITM protection 5192 */ 5193 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 5194 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 5195 return conn->remote_auth | 0x01; 5196 5197 /* No MITM protection possible so ignore remote requirement */ 5198 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 5199 } 5200 5201 static u8 bredr_oob_data_present(struct hci_conn *conn) 5202 { 5203 struct hci_dev *hdev = conn->hdev; 5204 struct oob_data *data; 5205 5206 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 5207 if (!data) 5208 return 0x00; 5209 5210 if (bredr_sc_enabled(hdev)) { 5211 /* When Secure Connections is enabled, then just 5212 * return the present value stored with the OOB 5213 * data. The stored value contains the right present 5214 * information. However it can only be trusted when 5215 * not in Secure Connection Only mode. 5216 */ 5217 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 5218 return data->present; 5219 5220 /* When Secure Connections Only mode is enabled, then 5221 * the P-256 values are required. If they are not 5222 * available, then do not declare that OOB data is 5223 * present. 5224 */ 5225 if (!memcmp(data->rand256, ZERO_KEY, 16) || 5226 !memcmp(data->hash256, ZERO_KEY, 16)) 5227 return 0x00; 5228 5229 return 0x02; 5230 } 5231 5232 /* When Secure Connections is not enabled or actually 5233 * not supported by the hardware, then check that if 5234 * P-192 data values are present. 5235 */ 5236 if (!memcmp(data->rand192, ZERO_KEY, 16) || 5237 !memcmp(data->hash192, ZERO_KEY, 16)) 5238 return 0x00; 5239 5240 return 0x01; 5241 } 5242 5243 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 5244 struct sk_buff *skb) 5245 { 5246 struct hci_ev_io_capa_request *ev = data; 5247 struct hci_conn *conn; 5248 5249 bt_dev_dbg(hdev, ""); 5250 5251 hci_dev_lock(hdev); 5252 5253 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5254 if (!conn) 5255 goto unlock; 5256 5257 hci_conn_hold(conn); 5258 5259 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5260 goto unlock; 5261 5262 /* Allow pairing if we're pairable, the initiators of the 5263 * pairing or if the remote is not requesting bonding. 5264 */ 5265 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 5266 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 5267 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 5268 struct hci_cp_io_capability_reply cp; 5269 5270 bacpy(&cp.bdaddr, &ev->bdaddr); 5271 /* Change the IO capability from KeyboardDisplay 5272 * to DisplayYesNo as it is not supported by BT spec. */ 5273 cp.capability = (conn->io_capability == 0x04) ? 5274 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5275 5276 /* If we are initiators, there is no remote information yet */ 5277 if (conn->remote_auth == 0xff) { 5278 /* Request MITM protection if our IO caps allow it 5279 * except for the no-bonding case. 5280 */ 5281 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5282 conn->auth_type != HCI_AT_NO_BONDING) 5283 conn->auth_type |= 0x01; 5284 } else { 5285 conn->auth_type = hci_get_auth_req(conn); 5286 } 5287 5288 /* If we're not bondable, force one of the non-bondable 5289 * authentication requirement values. 5290 */ 5291 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5292 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5293 5294 cp.authentication = conn->auth_type; 5295 cp.oob_data = bredr_oob_data_present(conn); 5296 5297 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5298 sizeof(cp), &cp); 5299 } else { 5300 struct hci_cp_io_capability_neg_reply cp; 5301 5302 bacpy(&cp.bdaddr, &ev->bdaddr); 5303 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5304 5305 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5306 sizeof(cp), &cp); 5307 } 5308 5309 unlock: 5310 hci_dev_unlock(hdev); 5311 } 5312 5313 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5314 struct sk_buff *skb) 5315 { 5316 struct hci_ev_io_capa_reply *ev = data; 5317 struct hci_conn *conn; 5318 5319 bt_dev_dbg(hdev, ""); 5320 5321 hci_dev_lock(hdev); 5322 5323 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5324 if (!conn) 5325 goto unlock; 5326 5327 conn->remote_cap = ev->capability; 5328 conn->remote_auth = ev->authentication; 5329 5330 unlock: 5331 hci_dev_unlock(hdev); 5332 } 5333 5334 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5335 struct sk_buff *skb) 5336 { 5337 struct hci_ev_user_confirm_req *ev = data; 5338 int loc_mitm, rem_mitm, confirm_hint = 0; 5339 struct hci_conn *conn; 5340 5341 bt_dev_dbg(hdev, ""); 5342 5343 hci_dev_lock(hdev); 5344 5345 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5346 goto unlock; 5347 5348 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5349 if (!conn) 5350 goto unlock; 5351 5352 loc_mitm = (conn->auth_type & 0x01); 5353 rem_mitm = (conn->remote_auth & 0x01); 5354 5355 /* If we require MITM but the remote device can't provide that 5356 * (it has NoInputNoOutput) then reject the confirmation 5357 * request. We check the security level here since it doesn't 5358 * necessarily match conn->auth_type. 5359 */ 5360 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5361 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5362 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5363 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5364 sizeof(ev->bdaddr), &ev->bdaddr); 5365 goto unlock; 5366 } 5367 5368 /* If no side requires MITM protection; auto-accept */ 5369 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5370 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5371 5372 /* If we're not the initiators request authorization to 5373 * proceed from user space (mgmt_user_confirm with 5374 * confirm_hint set to 1). The exception is if neither 5375 * side had MITM or if the local IO capability is 5376 * NoInputNoOutput, in which case we do auto-accept 5377 */ 5378 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5379 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5380 (loc_mitm || rem_mitm)) { 5381 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5382 confirm_hint = 1; 5383 goto confirm; 5384 } 5385 5386 /* If there already exists link key in local host, leave the 5387 * decision to user space since the remote device could be 5388 * legitimate or malicious. 5389 */ 5390 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5391 bt_dev_dbg(hdev, "Local host already has link key"); 5392 confirm_hint = 1; 5393 goto confirm; 5394 } 5395 5396 BT_DBG("Auto-accept of user confirmation with %ums delay", 5397 hdev->auto_accept_delay); 5398 5399 if (hdev->auto_accept_delay > 0) { 5400 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5401 queue_delayed_work(conn->hdev->workqueue, 5402 &conn->auto_accept_work, delay); 5403 goto unlock; 5404 } 5405 5406 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5407 sizeof(ev->bdaddr), &ev->bdaddr); 5408 goto unlock; 5409 } 5410 5411 confirm: 5412 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5413 le32_to_cpu(ev->passkey), confirm_hint); 5414 5415 unlock: 5416 hci_dev_unlock(hdev); 5417 } 5418 5419 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5420 struct sk_buff *skb) 5421 { 5422 struct hci_ev_user_passkey_req *ev = data; 5423 5424 bt_dev_dbg(hdev, ""); 5425 5426 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5427 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5428 } 5429 5430 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5431 struct sk_buff *skb) 5432 { 5433 struct hci_ev_user_passkey_notify *ev = data; 5434 struct hci_conn *conn; 5435 5436 bt_dev_dbg(hdev, ""); 5437 5438 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5439 if (!conn) 5440 return; 5441 5442 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5443 conn->passkey_entered = 0; 5444 5445 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5446 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5447 conn->dst_type, conn->passkey_notify, 5448 conn->passkey_entered); 5449 } 5450 5451 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5452 struct sk_buff *skb) 5453 { 5454 struct hci_ev_keypress_notify *ev = data; 5455 struct hci_conn *conn; 5456 5457 bt_dev_dbg(hdev, ""); 5458 5459 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5460 if (!conn) 5461 return; 5462 5463 switch (ev->type) { 5464 case HCI_KEYPRESS_STARTED: 5465 conn->passkey_entered = 0; 5466 return; 5467 5468 case HCI_KEYPRESS_ENTERED: 5469 conn->passkey_entered++; 5470 break; 5471 5472 case HCI_KEYPRESS_ERASED: 5473 conn->passkey_entered--; 5474 break; 5475 5476 case HCI_KEYPRESS_CLEARED: 5477 conn->passkey_entered = 0; 5478 break; 5479 5480 case HCI_KEYPRESS_COMPLETED: 5481 return; 5482 } 5483 5484 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5485 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5486 conn->dst_type, conn->passkey_notify, 5487 conn->passkey_entered); 5488 } 5489 5490 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5491 struct sk_buff *skb) 5492 { 5493 struct hci_ev_simple_pair_complete *ev = data; 5494 struct hci_conn *conn; 5495 5496 bt_dev_dbg(hdev, ""); 5497 5498 hci_dev_lock(hdev); 5499 5500 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5501 if (!conn) 5502 goto unlock; 5503 5504 /* Reset the authentication requirement to unknown */ 5505 conn->remote_auth = 0xff; 5506 5507 /* To avoid duplicate auth_failed events to user space we check 5508 * the HCI_CONN_AUTH_PEND flag which will be set if we 5509 * initiated the authentication. A traditional auth_complete 5510 * event gets always produced as initiator and is also mapped to 5511 * the mgmt_auth_failed event */ 5512 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5513 mgmt_auth_failed(conn, ev->status); 5514 5515 hci_conn_drop(conn); 5516 5517 unlock: 5518 hci_dev_unlock(hdev); 5519 } 5520 5521 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5522 struct sk_buff *skb) 5523 { 5524 struct hci_ev_remote_host_features *ev = data; 5525 struct inquiry_entry *ie; 5526 struct hci_conn *conn; 5527 5528 bt_dev_dbg(hdev, ""); 5529 5530 hci_dev_lock(hdev); 5531 5532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5533 if (conn) 5534 memcpy(conn->features[1], ev->features, 8); 5535 5536 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5537 if (ie) 5538 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5539 5540 hci_dev_unlock(hdev); 5541 } 5542 5543 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5544 struct sk_buff *skb) 5545 { 5546 struct hci_ev_remote_oob_data_request *ev = edata; 5547 struct oob_data *data; 5548 5549 bt_dev_dbg(hdev, ""); 5550 5551 hci_dev_lock(hdev); 5552 5553 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5554 goto unlock; 5555 5556 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5557 if (!data) { 5558 struct hci_cp_remote_oob_data_neg_reply cp; 5559 5560 bacpy(&cp.bdaddr, &ev->bdaddr); 5561 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5562 sizeof(cp), &cp); 5563 goto unlock; 5564 } 5565 5566 if (bredr_sc_enabled(hdev)) { 5567 struct hci_cp_remote_oob_ext_data_reply cp; 5568 5569 bacpy(&cp.bdaddr, &ev->bdaddr); 5570 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5571 memset(cp.hash192, 0, sizeof(cp.hash192)); 5572 memset(cp.rand192, 0, sizeof(cp.rand192)); 5573 } else { 5574 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5575 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5576 } 5577 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5578 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5579 5580 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5581 sizeof(cp), &cp); 5582 } else { 5583 struct hci_cp_remote_oob_data_reply cp; 5584 5585 bacpy(&cp.bdaddr, &ev->bdaddr); 5586 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5587 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5588 5589 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5590 sizeof(cp), &cp); 5591 } 5592 5593 unlock: 5594 hci_dev_unlock(hdev); 5595 } 5596 5597 #if IS_ENABLED(CONFIG_BT_HS) 5598 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, 5599 struct sk_buff *skb) 5600 { 5601 struct hci_ev_channel_selected *ev = data; 5602 struct hci_conn *hcon; 5603 5604 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); 5605 5606 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5607 if (!hcon) 5608 return; 5609 5610 amp_read_loc_assoc_final_data(hdev, hcon); 5611 } 5612 5613 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, 5614 struct sk_buff *skb) 5615 { 5616 struct hci_ev_phy_link_complete *ev = data; 5617 struct hci_conn *hcon, *bredr_hcon; 5618 5619 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, 5620 ev->status); 5621 5622 hci_dev_lock(hdev); 5623 5624 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5625 if (!hcon) 5626 goto unlock; 5627 5628 if (!hcon->amp_mgr) 5629 goto unlock; 5630 5631 if (ev->status) { 5632 hci_conn_del(hcon); 5633 goto unlock; 5634 } 5635 5636 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5637 5638 hcon->state = BT_CONNECTED; 5639 bacpy(&hcon->dst, &bredr_hcon->dst); 5640 5641 hci_conn_hold(hcon); 5642 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5643 hci_conn_drop(hcon); 5644 5645 hci_debugfs_create_conn(hcon); 5646 hci_conn_add_sysfs(hcon); 5647 5648 amp_physical_cfm(bredr_hcon, hcon); 5649 5650 unlock: 5651 hci_dev_unlock(hdev); 5652 } 5653 5654 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, 5655 struct sk_buff *skb) 5656 { 5657 struct hci_ev_logical_link_complete *ev = data; 5658 struct hci_conn *hcon; 5659 struct hci_chan *hchan; 5660 struct amp_mgr *mgr; 5661 5662 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5663 le16_to_cpu(ev->handle), ev->phy_handle, ev->status); 5664 5665 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5666 if (!hcon) 5667 return; 5668 5669 /* Create AMP hchan */ 5670 hchan = hci_chan_create(hcon); 5671 if (!hchan) 5672 return; 5673 5674 hchan->handle = le16_to_cpu(ev->handle); 5675 hchan->amp = true; 5676 5677 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5678 5679 mgr = hcon->amp_mgr; 5680 if (mgr && mgr->bredr_chan) { 5681 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5682 5683 l2cap_chan_lock(bredr_chan); 5684 5685 bredr_chan->conn->mtu = hdev->block_mtu; 5686 l2cap_logical_cfm(bredr_chan, hchan, 0); 5687 hci_conn_hold(hcon); 5688 5689 l2cap_chan_unlock(bredr_chan); 5690 } 5691 } 5692 5693 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, 5694 struct sk_buff *skb) 5695 { 5696 struct hci_ev_disconn_logical_link_complete *ev = data; 5697 struct hci_chan *hchan; 5698 5699 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", 5700 le16_to_cpu(ev->handle), ev->status); 5701 5702 if (ev->status) 5703 return; 5704 5705 hci_dev_lock(hdev); 5706 5707 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5708 if (!hchan || !hchan->amp) 5709 goto unlock; 5710 5711 amp_destroy_logical_link(hchan, ev->reason); 5712 5713 unlock: 5714 hci_dev_unlock(hdev); 5715 } 5716 5717 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, 5718 struct sk_buff *skb) 5719 { 5720 struct hci_ev_disconn_phy_link_complete *ev = data; 5721 struct hci_conn *hcon; 5722 5723 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5724 5725 if (ev->status) 5726 return; 5727 5728 hci_dev_lock(hdev); 5729 5730 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5731 if (hcon && hcon->type == AMP_LINK) { 5732 hcon->state = BT_CLOSED; 5733 hci_disconn_cfm(hcon, ev->reason); 5734 hci_conn_del(hcon); 5735 } 5736 5737 hci_dev_unlock(hdev); 5738 } 5739 #endif 5740 5741 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5742 u8 bdaddr_type, bdaddr_t *local_rpa) 5743 { 5744 if (conn->out) { 5745 conn->dst_type = bdaddr_type; 5746 conn->resp_addr_type = bdaddr_type; 5747 bacpy(&conn->resp_addr, bdaddr); 5748 5749 /* Check if the controller has set a Local RPA then it must be 5750 * used instead or hdev->rpa. 5751 */ 5752 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5753 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5754 bacpy(&conn->init_addr, local_rpa); 5755 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5756 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5757 bacpy(&conn->init_addr, &conn->hdev->rpa); 5758 } else { 5759 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5760 &conn->init_addr_type); 5761 } 5762 } else { 5763 conn->resp_addr_type = conn->hdev->adv_addr_type; 5764 /* Check if the controller has set a Local RPA then it must be 5765 * used instead or hdev->rpa. 5766 */ 5767 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5768 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5769 bacpy(&conn->resp_addr, local_rpa); 5770 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5771 /* In case of ext adv, resp_addr will be updated in 5772 * Adv Terminated event. 5773 */ 5774 if (!ext_adv_capable(conn->hdev)) 5775 bacpy(&conn->resp_addr, 5776 &conn->hdev->random_addr); 5777 } else { 5778 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5779 } 5780 5781 conn->init_addr_type = bdaddr_type; 5782 bacpy(&conn->init_addr, bdaddr); 5783 5784 /* For incoming connections, set the default minimum 5785 * and maximum connection interval. They will be used 5786 * to check if the parameters are in range and if not 5787 * trigger the connection update procedure. 5788 */ 5789 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5790 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5791 } 5792 } 5793 5794 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5795 bdaddr_t *bdaddr, u8 bdaddr_type, 5796 bdaddr_t *local_rpa, u8 role, u16 handle, 5797 u16 interval, u16 latency, 5798 u16 supervision_timeout) 5799 { 5800 struct hci_conn_params *params; 5801 struct hci_conn *conn; 5802 struct smp_irk *irk; 5803 u8 addr_type; 5804 5805 hci_dev_lock(hdev); 5806 5807 /* All controllers implicitly stop advertising in the event of a 5808 * connection, so ensure that the state bit is cleared. 5809 */ 5810 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5811 5812 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); 5813 if (!conn) { 5814 /* In case of error status and there is no connection pending 5815 * just unlock as there is nothing to cleanup. 5816 */ 5817 if (status) 5818 goto unlock; 5819 5820 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5821 if (!conn) { 5822 bt_dev_err(hdev, "no memory for new connection"); 5823 goto unlock; 5824 } 5825 5826 conn->dst_type = bdaddr_type; 5827 5828 /* If we didn't have a hci_conn object previously 5829 * but we're in central role this must be something 5830 * initiated using an accept list. Since accept list based 5831 * connections are not "first class citizens" we don't 5832 * have full tracking of them. Therefore, we go ahead 5833 * with a "best effort" approach of determining the 5834 * initiator address based on the HCI_PRIVACY flag. 5835 */ 5836 if (conn->out) { 5837 conn->resp_addr_type = bdaddr_type; 5838 bacpy(&conn->resp_addr, bdaddr); 5839 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5840 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5841 bacpy(&conn->init_addr, &hdev->rpa); 5842 } else { 5843 hci_copy_identity_address(hdev, 5844 &conn->init_addr, 5845 &conn->init_addr_type); 5846 } 5847 } 5848 } else { 5849 cancel_delayed_work(&conn->le_conn_timeout); 5850 } 5851 5852 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5853 * Processing it more than once per connection can corrupt kernel memory. 5854 * 5855 * As the connection handle is set here for the first time, it indicates 5856 * whether the connection is already set up. 5857 */ 5858 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 5859 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5860 goto unlock; 5861 } 5862 5863 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5864 5865 /* Lookup the identity address from the stored connection 5866 * address and address type. 5867 * 5868 * When establishing connections to an identity address, the 5869 * connection procedure will store the resolvable random 5870 * address first. Now if it can be converted back into the 5871 * identity address, start using the identity address from 5872 * now on. 5873 */ 5874 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5875 if (irk) { 5876 bacpy(&conn->dst, &irk->bdaddr); 5877 conn->dst_type = irk->addr_type; 5878 } 5879 5880 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5881 5882 if (handle > HCI_CONN_HANDLE_MAX) { 5883 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, 5884 HCI_CONN_HANDLE_MAX); 5885 status = HCI_ERROR_INVALID_PARAMETERS; 5886 } 5887 5888 /* All connection failure handling is taken care of by the 5889 * hci_conn_failed function which is triggered by the HCI 5890 * request completion callbacks used for connecting. 5891 */ 5892 if (status) 5893 goto unlock; 5894 5895 /* Drop the connection if it has been aborted */ 5896 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) { 5897 hci_conn_drop(conn); 5898 goto unlock; 5899 } 5900 5901 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5902 addr_type = BDADDR_LE_PUBLIC; 5903 else 5904 addr_type = BDADDR_LE_RANDOM; 5905 5906 /* Drop the connection if the device is blocked */ 5907 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5908 hci_conn_drop(conn); 5909 goto unlock; 5910 } 5911 5912 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5913 mgmt_device_connected(hdev, conn, NULL, 0); 5914 5915 conn->sec_level = BT_SECURITY_LOW; 5916 conn->handle = handle; 5917 conn->state = BT_CONFIG; 5918 5919 /* Store current advertising instance as connection advertising instance 5920 * when sotfware rotation is in use so it can be re-enabled when 5921 * disconnected. 5922 */ 5923 if (!ext_adv_capable(hdev)) 5924 conn->adv_instance = hdev->cur_adv_instance; 5925 5926 conn->le_conn_interval = interval; 5927 conn->le_conn_latency = latency; 5928 conn->le_supv_timeout = supervision_timeout; 5929 5930 hci_debugfs_create_conn(conn); 5931 hci_conn_add_sysfs(conn); 5932 5933 /* The remote features procedure is defined for central 5934 * role only. So only in case of an initiated connection 5935 * request the remote features. 5936 * 5937 * If the local controller supports peripheral-initiated features 5938 * exchange, then requesting the remote features in peripheral 5939 * role is possible. Otherwise just transition into the 5940 * connected state without requesting the remote features. 5941 */ 5942 if (conn->out || 5943 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5944 struct hci_cp_le_read_remote_features cp; 5945 5946 cp.handle = __cpu_to_le16(conn->handle); 5947 5948 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5949 sizeof(cp), &cp); 5950 5951 hci_conn_hold(conn); 5952 } else { 5953 conn->state = BT_CONNECTED; 5954 hci_connect_cfm(conn, status); 5955 } 5956 5957 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5958 conn->dst_type); 5959 if (params) { 5960 list_del_init(¶ms->action); 5961 if (params->conn) { 5962 hci_conn_drop(params->conn); 5963 hci_conn_put(params->conn); 5964 params->conn = NULL; 5965 } 5966 } 5967 5968 unlock: 5969 hci_update_passive_scan(hdev); 5970 hci_dev_unlock(hdev); 5971 } 5972 5973 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5974 struct sk_buff *skb) 5975 { 5976 struct hci_ev_le_conn_complete *ev = data; 5977 5978 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5979 5980 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5981 NULL, ev->role, le16_to_cpu(ev->handle), 5982 le16_to_cpu(ev->interval), 5983 le16_to_cpu(ev->latency), 5984 le16_to_cpu(ev->supervision_timeout)); 5985 } 5986 5987 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5988 struct sk_buff *skb) 5989 { 5990 struct hci_ev_le_enh_conn_complete *ev = data; 5991 5992 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5993 5994 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5995 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5996 le16_to_cpu(ev->interval), 5997 le16_to_cpu(ev->latency), 5998 le16_to_cpu(ev->supervision_timeout)); 5999 } 6000 6001 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 6002 struct sk_buff *skb) 6003 { 6004 struct hci_evt_le_ext_adv_set_term *ev = data; 6005 struct hci_conn *conn; 6006 struct adv_info *adv, *n; 6007 6008 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6009 6010 /* The Bluetooth Core 5.3 specification clearly states that this event 6011 * shall not be sent when the Host disables the advertising set. So in 6012 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 6013 * 6014 * When the Host disables an advertising set, all cleanup is done via 6015 * its command callback and not needed to be duplicated here. 6016 */ 6017 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 6018 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 6019 return; 6020 } 6021 6022 hci_dev_lock(hdev); 6023 6024 adv = hci_find_adv_instance(hdev, ev->handle); 6025 6026 if (ev->status) { 6027 if (!adv) 6028 goto unlock; 6029 6030 /* Remove advertising as it has been terminated */ 6031 hci_remove_adv_instance(hdev, ev->handle); 6032 mgmt_advertising_removed(NULL, hdev, ev->handle); 6033 6034 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 6035 if (adv->enabled) 6036 goto unlock; 6037 } 6038 6039 /* We are no longer advertising, clear HCI_LE_ADV */ 6040 hci_dev_clear_flag(hdev, HCI_LE_ADV); 6041 goto unlock; 6042 } 6043 6044 if (adv) 6045 adv->enabled = false; 6046 6047 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 6048 if (conn) { 6049 /* Store handle in the connection so the correct advertising 6050 * instance can be re-enabled when disconnected. 6051 */ 6052 conn->adv_instance = ev->handle; 6053 6054 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 6055 bacmp(&conn->resp_addr, BDADDR_ANY)) 6056 goto unlock; 6057 6058 if (!ev->handle) { 6059 bacpy(&conn->resp_addr, &hdev->random_addr); 6060 goto unlock; 6061 } 6062 6063 if (adv) 6064 bacpy(&conn->resp_addr, &adv->random_addr); 6065 } 6066 6067 unlock: 6068 hci_dev_unlock(hdev); 6069 } 6070 6071 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 6072 struct sk_buff *skb) 6073 { 6074 struct hci_ev_le_conn_update_complete *ev = data; 6075 struct hci_conn *conn; 6076 6077 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6078 6079 if (ev->status) 6080 return; 6081 6082 hci_dev_lock(hdev); 6083 6084 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6085 if (conn) { 6086 conn->le_conn_interval = le16_to_cpu(ev->interval); 6087 conn->le_conn_latency = le16_to_cpu(ev->latency); 6088 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 6089 } 6090 6091 hci_dev_unlock(hdev); 6092 } 6093 6094 /* This function requires the caller holds hdev->lock */ 6095 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 6096 bdaddr_t *addr, 6097 u8 addr_type, bool addr_resolved, 6098 u8 adv_type) 6099 { 6100 struct hci_conn *conn; 6101 struct hci_conn_params *params; 6102 6103 /* If the event is not connectable don't proceed further */ 6104 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 6105 return NULL; 6106 6107 /* Ignore if the device is blocked or hdev is suspended */ 6108 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 6109 hdev->suspended) 6110 return NULL; 6111 6112 /* Most controller will fail if we try to create new connections 6113 * while we have an existing one in peripheral role. 6114 */ 6115 if (hdev->conn_hash.le_num_peripheral > 0 && 6116 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 6117 !(hdev->le_states[3] & 0x10))) 6118 return NULL; 6119 6120 /* If we're not connectable only connect devices that we have in 6121 * our pend_le_conns list. 6122 */ 6123 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 6124 addr_type); 6125 if (!params) 6126 return NULL; 6127 6128 if (!params->explicit_connect) { 6129 switch (params->auto_connect) { 6130 case HCI_AUTO_CONN_DIRECT: 6131 /* Only devices advertising with ADV_DIRECT_IND are 6132 * triggering a connection attempt. This is allowing 6133 * incoming connections from peripheral devices. 6134 */ 6135 if (adv_type != LE_ADV_DIRECT_IND) 6136 return NULL; 6137 break; 6138 case HCI_AUTO_CONN_ALWAYS: 6139 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 6140 * are triggering a connection attempt. This means 6141 * that incoming connections from peripheral device are 6142 * accepted and also outgoing connections to peripheral 6143 * devices are established when found. 6144 */ 6145 break; 6146 default: 6147 return NULL; 6148 } 6149 } 6150 6151 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 6152 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 6153 HCI_ROLE_MASTER); 6154 if (!IS_ERR(conn)) { 6155 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 6156 * by higher layer that tried to connect, if no then 6157 * store the pointer since we don't really have any 6158 * other owner of the object besides the params that 6159 * triggered it. This way we can abort the connection if 6160 * the parameters get removed and keep the reference 6161 * count consistent once the connection is established. 6162 */ 6163 6164 if (!params->explicit_connect) 6165 params->conn = hci_conn_get(conn); 6166 6167 return conn; 6168 } 6169 6170 switch (PTR_ERR(conn)) { 6171 case -EBUSY: 6172 /* If hci_connect() returns -EBUSY it means there is already 6173 * an LE connection attempt going on. Since controllers don't 6174 * support more than one connection attempt at the time, we 6175 * don't consider this an error case. 6176 */ 6177 break; 6178 default: 6179 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 6180 return NULL; 6181 } 6182 6183 return NULL; 6184 } 6185 6186 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 6187 u8 bdaddr_type, bdaddr_t *direct_addr, 6188 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 6189 bool ext_adv, bool ctl_time, u64 instant) 6190 { 6191 struct discovery_state *d = &hdev->discovery; 6192 struct smp_irk *irk; 6193 struct hci_conn *conn; 6194 bool match, bdaddr_resolved; 6195 u32 flags; 6196 u8 *ptr; 6197 6198 switch (type) { 6199 case LE_ADV_IND: 6200 case LE_ADV_DIRECT_IND: 6201 case LE_ADV_SCAN_IND: 6202 case LE_ADV_NONCONN_IND: 6203 case LE_ADV_SCAN_RSP: 6204 break; 6205 default: 6206 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 6207 "type: 0x%02x", type); 6208 return; 6209 } 6210 6211 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 6212 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 6213 return; 6214 } 6215 6216 /* Find the end of the data in case the report contains padded zero 6217 * bytes at the end causing an invalid length value. 6218 * 6219 * When data is NULL, len is 0 so there is no need for extra ptr 6220 * check as 'ptr < data + 0' is already false in such case. 6221 */ 6222 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 6223 if (ptr + 1 + *ptr > data + len) 6224 break; 6225 } 6226 6227 /* Adjust for actual length. This handles the case when remote 6228 * device is advertising with incorrect data length. 6229 */ 6230 len = ptr - data; 6231 6232 /* If the direct address is present, then this report is from 6233 * a LE Direct Advertising Report event. In that case it is 6234 * important to see if the address is matching the local 6235 * controller address. 6236 */ 6237 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) { 6238 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 6239 &bdaddr_resolved); 6240 6241 /* Only resolvable random addresses are valid for these 6242 * kind of reports and others can be ignored. 6243 */ 6244 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 6245 return; 6246 6247 /* If the controller is not using resolvable random 6248 * addresses, then this report can be ignored. 6249 */ 6250 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 6251 return; 6252 6253 /* If the local IRK of the controller does not match 6254 * with the resolvable random address provided, then 6255 * this report can be ignored. 6256 */ 6257 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 6258 return; 6259 } 6260 6261 /* Check if we need to convert to identity address */ 6262 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 6263 if (irk) { 6264 bdaddr = &irk->bdaddr; 6265 bdaddr_type = irk->addr_type; 6266 } 6267 6268 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 6269 6270 /* Check if we have been requested to connect to this device. 6271 * 6272 * direct_addr is set only for directed advertising reports (it is NULL 6273 * for advertising reports) and is already verified to be RPA above. 6274 */ 6275 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 6276 type); 6277 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 6278 /* Store report for later inclusion by 6279 * mgmt_device_connected 6280 */ 6281 memcpy(conn->le_adv_data, data, len); 6282 conn->le_adv_data_len = len; 6283 } 6284 6285 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6286 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6287 else 6288 flags = 0; 6289 6290 /* All scan results should be sent up for Mesh systems */ 6291 if (hci_dev_test_flag(hdev, HCI_MESH)) { 6292 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6293 rssi, flags, data, len, NULL, 0, instant); 6294 return; 6295 } 6296 6297 /* Passive scanning shouldn't trigger any device found events, 6298 * except for devices marked as CONN_REPORT for which we do send 6299 * device found events, or advertisement monitoring requested. 6300 */ 6301 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6302 if (type == LE_ADV_DIRECT_IND) 6303 return; 6304 6305 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6306 bdaddr, bdaddr_type) && 6307 idr_is_empty(&hdev->adv_monitors_idr)) 6308 return; 6309 6310 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6311 rssi, flags, data, len, NULL, 0, 0); 6312 return; 6313 } 6314 6315 /* When receiving non-connectable or scannable undirected 6316 * advertising reports, this means that the remote device is 6317 * not connectable and then clearly indicate this in the 6318 * device found event. 6319 * 6320 * When receiving a scan response, then there is no way to 6321 * know if the remote device is connectable or not. However 6322 * since scan responses are merged with a previously seen 6323 * advertising report, the flags field from that report 6324 * will be used. 6325 * 6326 * In the really unlikely case that a controller get confused 6327 * and just sends a scan response event, then it is marked as 6328 * not connectable as well. 6329 */ 6330 if (type == LE_ADV_SCAN_RSP) 6331 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6332 6333 /* If there's nothing pending either store the data from this 6334 * event or send an immediate device found event if the data 6335 * should not be stored for later. 6336 */ 6337 if (!ext_adv && !has_pending_adv_report(hdev)) { 6338 /* If the report will trigger a SCAN_REQ store it for 6339 * later merging. 6340 */ 6341 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 6342 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6343 rssi, flags, data, len); 6344 return; 6345 } 6346 6347 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6348 rssi, flags, data, len, NULL, 0, 0); 6349 return; 6350 } 6351 6352 /* Check if the pending report is for the same device as the new one */ 6353 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6354 bdaddr_type == d->last_adv_addr_type); 6355 6356 /* If the pending data doesn't match this report or this isn't a 6357 * scan response (e.g. we got a duplicate ADV_IND) then force 6358 * sending of the pending data. 6359 */ 6360 if (type != LE_ADV_SCAN_RSP || !match) { 6361 /* Send out whatever is in the cache, but skip duplicates */ 6362 if (!match) 6363 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6364 d->last_adv_addr_type, NULL, 6365 d->last_adv_rssi, d->last_adv_flags, 6366 d->last_adv_data, 6367 d->last_adv_data_len, NULL, 0, 0); 6368 6369 /* If the new report will trigger a SCAN_REQ store it for 6370 * later merging. 6371 */ 6372 if (!ext_adv && (type == LE_ADV_IND || 6373 type == LE_ADV_SCAN_IND)) { 6374 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6375 rssi, flags, data, len); 6376 return; 6377 } 6378 6379 /* The advertising reports cannot be merged, so clear 6380 * the pending report and send out a device found event. 6381 */ 6382 clear_pending_adv_report(hdev); 6383 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6384 rssi, flags, data, len, NULL, 0, 0); 6385 return; 6386 } 6387 6388 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6389 * the new event is a SCAN_RSP. We can therefore proceed with 6390 * sending a merged device found event. 6391 */ 6392 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6393 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6394 d->last_adv_data, d->last_adv_data_len, data, len, 0); 6395 clear_pending_adv_report(hdev); 6396 } 6397 6398 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6399 struct sk_buff *skb) 6400 { 6401 struct hci_ev_le_advertising_report *ev = data; 6402 u64 instant = jiffies; 6403 6404 if (!ev->num) 6405 return; 6406 6407 hci_dev_lock(hdev); 6408 6409 while (ev->num--) { 6410 struct hci_ev_le_advertising_info *info; 6411 s8 rssi; 6412 6413 info = hci_le_ev_skb_pull(hdev, skb, 6414 HCI_EV_LE_ADVERTISING_REPORT, 6415 sizeof(*info)); 6416 if (!info) 6417 break; 6418 6419 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6420 info->length + 1)) 6421 break; 6422 6423 if (info->length <= HCI_MAX_AD_LENGTH) { 6424 rssi = info->data[info->length]; 6425 process_adv_report(hdev, info->type, &info->bdaddr, 6426 info->bdaddr_type, NULL, 0, rssi, 6427 info->data, info->length, false, 6428 false, instant); 6429 } else { 6430 bt_dev_err(hdev, "Dropping invalid advertising data"); 6431 } 6432 } 6433 6434 hci_dev_unlock(hdev); 6435 } 6436 6437 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6438 { 6439 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6440 switch (evt_type) { 6441 case LE_LEGACY_ADV_IND: 6442 return LE_ADV_IND; 6443 case LE_LEGACY_ADV_DIRECT_IND: 6444 return LE_ADV_DIRECT_IND; 6445 case LE_LEGACY_ADV_SCAN_IND: 6446 return LE_ADV_SCAN_IND; 6447 case LE_LEGACY_NONCONN_IND: 6448 return LE_ADV_NONCONN_IND; 6449 case LE_LEGACY_SCAN_RSP_ADV: 6450 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6451 return LE_ADV_SCAN_RSP; 6452 } 6453 6454 goto invalid; 6455 } 6456 6457 if (evt_type & LE_EXT_ADV_CONN_IND) { 6458 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6459 return LE_ADV_DIRECT_IND; 6460 6461 return LE_ADV_IND; 6462 } 6463 6464 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6465 return LE_ADV_SCAN_RSP; 6466 6467 if (evt_type & LE_EXT_ADV_SCAN_IND) 6468 return LE_ADV_SCAN_IND; 6469 6470 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6471 evt_type & LE_EXT_ADV_DIRECT_IND) 6472 return LE_ADV_NONCONN_IND; 6473 6474 invalid: 6475 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6476 evt_type); 6477 6478 return LE_ADV_INVALID; 6479 } 6480 6481 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6482 struct sk_buff *skb) 6483 { 6484 struct hci_ev_le_ext_adv_report *ev = data; 6485 u64 instant = jiffies; 6486 6487 if (!ev->num) 6488 return; 6489 6490 hci_dev_lock(hdev); 6491 6492 while (ev->num--) { 6493 struct hci_ev_le_ext_adv_info *info; 6494 u8 legacy_evt_type; 6495 u16 evt_type; 6496 6497 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6498 sizeof(*info)); 6499 if (!info) 6500 break; 6501 6502 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6503 info->length)) 6504 break; 6505 6506 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK; 6507 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6508 if (legacy_evt_type != LE_ADV_INVALID) { 6509 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6510 info->bdaddr_type, NULL, 0, 6511 info->rssi, info->data, info->length, 6512 !(evt_type & LE_EXT_ADV_LEGACY_PDU), 6513 false, instant); 6514 } 6515 } 6516 6517 hci_dev_unlock(hdev); 6518 } 6519 6520 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) 6521 { 6522 struct hci_cp_le_pa_term_sync cp; 6523 6524 memset(&cp, 0, sizeof(cp)); 6525 cp.handle = handle; 6526 6527 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); 6528 } 6529 6530 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, 6531 struct sk_buff *skb) 6532 { 6533 struct hci_ev_le_pa_sync_established *ev = data; 6534 int mask = hdev->link_mode; 6535 __u8 flags = 0; 6536 6537 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6538 6539 if (ev->status) 6540 return; 6541 6542 hci_dev_lock(hdev); 6543 6544 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 6545 6546 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); 6547 if (!(mask & HCI_LM_ACCEPT)) 6548 hci_le_pa_term_sync(hdev, ev->handle); 6549 6550 hci_dev_unlock(hdev); 6551 } 6552 6553 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6554 struct sk_buff *skb) 6555 { 6556 struct hci_ev_le_remote_feat_complete *ev = data; 6557 struct hci_conn *conn; 6558 6559 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6560 6561 hci_dev_lock(hdev); 6562 6563 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6564 if (conn) { 6565 if (!ev->status) 6566 memcpy(conn->features[0], ev->features, 8); 6567 6568 if (conn->state == BT_CONFIG) { 6569 __u8 status; 6570 6571 /* If the local controller supports peripheral-initiated 6572 * features exchange, but the remote controller does 6573 * not, then it is possible that the error code 0x1a 6574 * for unsupported remote feature gets returned. 6575 * 6576 * In this specific case, allow the connection to 6577 * transition into connected state and mark it as 6578 * successful. 6579 */ 6580 if (!conn->out && ev->status == 0x1a && 6581 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6582 status = 0x00; 6583 else 6584 status = ev->status; 6585 6586 conn->state = BT_CONNECTED; 6587 hci_connect_cfm(conn, status); 6588 hci_conn_drop(conn); 6589 } 6590 } 6591 6592 hci_dev_unlock(hdev); 6593 } 6594 6595 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6596 struct sk_buff *skb) 6597 { 6598 struct hci_ev_le_ltk_req *ev = data; 6599 struct hci_cp_le_ltk_reply cp; 6600 struct hci_cp_le_ltk_neg_reply neg; 6601 struct hci_conn *conn; 6602 struct smp_ltk *ltk; 6603 6604 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6605 6606 hci_dev_lock(hdev); 6607 6608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6609 if (conn == NULL) 6610 goto not_found; 6611 6612 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6613 if (!ltk) 6614 goto not_found; 6615 6616 if (smp_ltk_is_sc(ltk)) { 6617 /* With SC both EDiv and Rand are set to zero */ 6618 if (ev->ediv || ev->rand) 6619 goto not_found; 6620 } else { 6621 /* For non-SC keys check that EDiv and Rand match */ 6622 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6623 goto not_found; 6624 } 6625 6626 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6627 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6628 cp.handle = cpu_to_le16(conn->handle); 6629 6630 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6631 6632 conn->enc_key_size = ltk->enc_size; 6633 6634 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6635 6636 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6637 * temporary key used to encrypt a connection following 6638 * pairing. It is used during the Encrypted Session Setup to 6639 * distribute the keys. Later, security can be re-established 6640 * using a distributed LTK. 6641 */ 6642 if (ltk->type == SMP_STK) { 6643 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6644 list_del_rcu(<k->list); 6645 kfree_rcu(ltk, rcu); 6646 } else { 6647 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6648 } 6649 6650 hci_dev_unlock(hdev); 6651 6652 return; 6653 6654 not_found: 6655 neg.handle = ev->handle; 6656 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6657 hci_dev_unlock(hdev); 6658 } 6659 6660 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6661 u8 reason) 6662 { 6663 struct hci_cp_le_conn_param_req_neg_reply cp; 6664 6665 cp.handle = cpu_to_le16(handle); 6666 cp.reason = reason; 6667 6668 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6669 &cp); 6670 } 6671 6672 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6673 struct sk_buff *skb) 6674 { 6675 struct hci_ev_le_remote_conn_param_req *ev = data; 6676 struct hci_cp_le_conn_param_req_reply cp; 6677 struct hci_conn *hcon; 6678 u16 handle, min, max, latency, timeout; 6679 6680 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6681 6682 handle = le16_to_cpu(ev->handle); 6683 min = le16_to_cpu(ev->interval_min); 6684 max = le16_to_cpu(ev->interval_max); 6685 latency = le16_to_cpu(ev->latency); 6686 timeout = le16_to_cpu(ev->timeout); 6687 6688 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6689 if (!hcon || hcon->state != BT_CONNECTED) 6690 return send_conn_param_neg_reply(hdev, handle, 6691 HCI_ERROR_UNKNOWN_CONN_ID); 6692 6693 if (hci_check_conn_params(min, max, latency, timeout)) 6694 return send_conn_param_neg_reply(hdev, handle, 6695 HCI_ERROR_INVALID_LL_PARAMS); 6696 6697 if (hcon->role == HCI_ROLE_MASTER) { 6698 struct hci_conn_params *params; 6699 u8 store_hint; 6700 6701 hci_dev_lock(hdev); 6702 6703 params = hci_conn_params_lookup(hdev, &hcon->dst, 6704 hcon->dst_type); 6705 if (params) { 6706 params->conn_min_interval = min; 6707 params->conn_max_interval = max; 6708 params->conn_latency = latency; 6709 params->supervision_timeout = timeout; 6710 store_hint = 0x01; 6711 } else { 6712 store_hint = 0x00; 6713 } 6714 6715 hci_dev_unlock(hdev); 6716 6717 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6718 store_hint, min, max, latency, timeout); 6719 } 6720 6721 cp.handle = ev->handle; 6722 cp.interval_min = ev->interval_min; 6723 cp.interval_max = ev->interval_max; 6724 cp.latency = ev->latency; 6725 cp.timeout = ev->timeout; 6726 cp.min_ce_len = 0; 6727 cp.max_ce_len = 0; 6728 6729 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6730 } 6731 6732 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6733 struct sk_buff *skb) 6734 { 6735 struct hci_ev_le_direct_adv_report *ev = data; 6736 u64 instant = jiffies; 6737 int i; 6738 6739 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6740 flex_array_size(ev, info, ev->num))) 6741 return; 6742 6743 if (!ev->num) 6744 return; 6745 6746 hci_dev_lock(hdev); 6747 6748 for (i = 0; i < ev->num; i++) { 6749 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6750 6751 process_adv_report(hdev, info->type, &info->bdaddr, 6752 info->bdaddr_type, &info->direct_addr, 6753 info->direct_addr_type, info->rssi, NULL, 0, 6754 false, false, instant); 6755 } 6756 6757 hci_dev_unlock(hdev); 6758 } 6759 6760 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6761 struct sk_buff *skb) 6762 { 6763 struct hci_ev_le_phy_update_complete *ev = data; 6764 struct hci_conn *conn; 6765 6766 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6767 6768 if (ev->status) 6769 return; 6770 6771 hci_dev_lock(hdev); 6772 6773 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6774 if (!conn) 6775 goto unlock; 6776 6777 conn->le_tx_phy = ev->tx_phy; 6778 conn->le_rx_phy = ev->rx_phy; 6779 6780 unlock: 6781 hci_dev_unlock(hdev); 6782 } 6783 6784 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, 6785 struct sk_buff *skb) 6786 { 6787 struct hci_evt_le_cis_established *ev = data; 6788 struct hci_conn *conn; 6789 u16 handle = __le16_to_cpu(ev->handle); 6790 6791 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6792 6793 hci_dev_lock(hdev); 6794 6795 conn = hci_conn_hash_lookup_handle(hdev, handle); 6796 if (!conn) { 6797 bt_dev_err(hdev, 6798 "Unable to find connection with handle 0x%4.4x", 6799 handle); 6800 goto unlock; 6801 } 6802 6803 if (conn->type != ISO_LINK) { 6804 bt_dev_err(hdev, 6805 "Invalid connection link type handle 0x%4.4x", 6806 handle); 6807 goto unlock; 6808 } 6809 6810 if (conn->role == HCI_ROLE_SLAVE) { 6811 __le32 interval; 6812 6813 memset(&interval, 0, sizeof(interval)); 6814 6815 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); 6816 conn->iso_qos.in.interval = le32_to_cpu(interval); 6817 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); 6818 conn->iso_qos.out.interval = le32_to_cpu(interval); 6819 conn->iso_qos.in.latency = le16_to_cpu(ev->interval); 6820 conn->iso_qos.out.latency = le16_to_cpu(ev->interval); 6821 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); 6822 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); 6823 conn->iso_qos.in.phy = ev->c_phy; 6824 conn->iso_qos.out.phy = ev->p_phy; 6825 } 6826 6827 if (!ev->status) { 6828 conn->state = BT_CONNECTED; 6829 hci_debugfs_create_conn(conn); 6830 hci_conn_add_sysfs(conn); 6831 hci_iso_setup_path(conn); 6832 goto unlock; 6833 } 6834 6835 hci_connect_cfm(conn, ev->status); 6836 hci_conn_del(conn); 6837 6838 unlock: 6839 hci_dev_unlock(hdev); 6840 } 6841 6842 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) 6843 { 6844 struct hci_cp_le_reject_cis cp; 6845 6846 memset(&cp, 0, sizeof(cp)); 6847 cp.handle = handle; 6848 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 6849 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); 6850 } 6851 6852 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) 6853 { 6854 struct hci_cp_le_accept_cis cp; 6855 6856 memset(&cp, 0, sizeof(cp)); 6857 cp.handle = handle; 6858 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); 6859 } 6860 6861 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, 6862 struct sk_buff *skb) 6863 { 6864 struct hci_evt_le_cis_req *ev = data; 6865 u16 acl_handle, cis_handle; 6866 struct hci_conn *acl, *cis; 6867 int mask; 6868 __u8 flags = 0; 6869 6870 acl_handle = __le16_to_cpu(ev->acl_handle); 6871 cis_handle = __le16_to_cpu(ev->cis_handle); 6872 6873 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", 6874 acl_handle, cis_handle, ev->cig_id, ev->cis_id); 6875 6876 hci_dev_lock(hdev); 6877 6878 acl = hci_conn_hash_lookup_handle(hdev, acl_handle); 6879 if (!acl) 6880 goto unlock; 6881 6882 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); 6883 if (!(mask & HCI_LM_ACCEPT)) { 6884 hci_le_reject_cis(hdev, ev->cis_handle); 6885 goto unlock; 6886 } 6887 6888 cis = hci_conn_hash_lookup_handle(hdev, cis_handle); 6889 if (!cis) { 6890 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); 6891 if (!cis) { 6892 hci_le_reject_cis(hdev, ev->cis_handle); 6893 goto unlock; 6894 } 6895 cis->handle = cis_handle; 6896 } 6897 6898 cis->iso_qos.cig = ev->cig_id; 6899 cis->iso_qos.cis = ev->cis_id; 6900 6901 if (!(flags & HCI_PROTO_DEFER)) { 6902 hci_le_accept_cis(hdev, ev->cis_handle); 6903 } else { 6904 cis->state = BT_CONNECT2; 6905 hci_connect_cfm(cis, 0); 6906 } 6907 6908 unlock: 6909 hci_dev_unlock(hdev); 6910 } 6911 6912 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, 6913 struct sk_buff *skb) 6914 { 6915 struct hci_evt_le_create_big_complete *ev = data; 6916 struct hci_conn *conn; 6917 6918 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6919 6920 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, 6921 flex_array_size(ev, bis_handle, ev->num_bis))) 6922 return; 6923 6924 hci_dev_lock(hdev); 6925 6926 conn = hci_conn_hash_lookup_big(hdev, ev->handle); 6927 if (!conn) 6928 goto unlock; 6929 6930 if (conn->type != ISO_LINK) { 6931 bt_dev_err(hdev, 6932 "Invalid connection link type handle 0x%2.2x", 6933 ev->handle); 6934 goto unlock; 6935 } 6936 6937 if (ev->num_bis) 6938 conn->handle = __le16_to_cpu(ev->bis_handle[0]); 6939 6940 if (!ev->status) { 6941 conn->state = BT_CONNECTED; 6942 hci_debugfs_create_conn(conn); 6943 hci_conn_add_sysfs(conn); 6944 hci_iso_setup_path(conn); 6945 goto unlock; 6946 } 6947 6948 hci_connect_cfm(conn, ev->status); 6949 hci_conn_del(conn); 6950 6951 unlock: 6952 hci_dev_unlock(hdev); 6953 } 6954 6955 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, 6956 struct sk_buff *skb) 6957 { 6958 struct hci_evt_le_big_sync_estabilished *ev = data; 6959 struct hci_conn *bis; 6960 int i; 6961 6962 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6963 6964 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 6965 flex_array_size(ev, bis, ev->num_bis))) 6966 return; 6967 6968 if (ev->status) 6969 return; 6970 6971 hci_dev_lock(hdev); 6972 6973 for (i = 0; i < ev->num_bis; i++) { 6974 u16 handle = le16_to_cpu(ev->bis[i]); 6975 __le32 interval; 6976 6977 bis = hci_conn_hash_lookup_handle(hdev, handle); 6978 if (!bis) { 6979 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, 6980 HCI_ROLE_SLAVE); 6981 if (!bis) 6982 continue; 6983 bis->handle = handle; 6984 } 6985 6986 bis->iso_qos.big = ev->handle; 6987 memset(&interval, 0, sizeof(interval)); 6988 memcpy(&interval, ev->latency, sizeof(ev->latency)); 6989 bis->iso_qos.in.interval = le32_to_cpu(interval); 6990 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ 6991 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; 6992 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); 6993 6994 hci_iso_setup_path(bis); 6995 } 6996 6997 hci_dev_unlock(hdev); 6998 } 6999 7000 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, 7001 struct sk_buff *skb) 7002 { 7003 struct hci_evt_le_big_info_adv_report *ev = data; 7004 int mask = hdev->link_mode; 7005 __u8 flags = 0; 7006 7007 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); 7008 7009 hci_dev_lock(hdev); 7010 7011 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); 7012 if (!(mask & HCI_LM_ACCEPT)) 7013 hci_le_pa_term_sync(hdev, ev->sync_handle); 7014 7015 hci_dev_unlock(hdev); 7016 } 7017 7018 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 7019 [_op] = { \ 7020 .func = _func, \ 7021 .min_len = _min_len, \ 7022 .max_len = _max_len, \ 7023 } 7024 7025 #define HCI_LE_EV(_op, _func, _len) \ 7026 HCI_LE_EV_VL(_op, _func, _len, _len) 7027 7028 #define HCI_LE_EV_STATUS(_op, _func) \ 7029 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 7030 7031 /* Entries in this table shall have their position according to the subevent 7032 * opcode they handle so the use of the macros above is recommend since it does 7033 * attempt to initialize at its proper index using Designated Initializers that 7034 * way events without a callback function can be ommited. 7035 */ 7036 static const struct hci_le_ev { 7037 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 7038 u16 min_len; 7039 u16 max_len; 7040 } hci_le_ev_table[U8_MAX + 1] = { 7041 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 7042 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 7043 sizeof(struct hci_ev_le_conn_complete)), 7044 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 7045 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 7046 sizeof(struct hci_ev_le_advertising_report), 7047 HCI_MAX_EVENT_SIZE), 7048 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 7049 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 7050 hci_le_conn_update_complete_evt, 7051 sizeof(struct hci_ev_le_conn_update_complete)), 7052 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 7053 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 7054 hci_le_remote_feat_complete_evt, 7055 sizeof(struct hci_ev_le_remote_feat_complete)), 7056 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 7057 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 7058 sizeof(struct hci_ev_le_ltk_req)), 7059 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 7060 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 7061 hci_le_remote_conn_param_req_evt, 7062 sizeof(struct hci_ev_le_remote_conn_param_req)), 7063 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 7064 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 7065 hci_le_enh_conn_complete_evt, 7066 sizeof(struct hci_ev_le_enh_conn_complete)), 7067 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 7068 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 7069 sizeof(struct hci_ev_le_direct_adv_report), 7070 HCI_MAX_EVENT_SIZE), 7071 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 7072 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 7073 sizeof(struct hci_ev_le_phy_update_complete)), 7074 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 7075 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 7076 sizeof(struct hci_ev_le_ext_adv_report), 7077 HCI_MAX_EVENT_SIZE), 7078 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ 7079 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, 7080 hci_le_pa_sync_estabilished_evt, 7081 sizeof(struct hci_ev_le_pa_sync_established)), 7082 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 7083 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 7084 sizeof(struct hci_evt_le_ext_adv_set_term)), 7085 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ 7086 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, 7087 sizeof(struct hci_evt_le_cis_established)), 7088 /* [0x1a = HCI_EVT_LE_CIS_REQ] */ 7089 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, 7090 sizeof(struct hci_evt_le_cis_req)), 7091 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ 7092 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, 7093 hci_le_create_big_complete_evt, 7094 sizeof(struct hci_evt_le_create_big_complete), 7095 HCI_MAX_EVENT_SIZE), 7096 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ 7097 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, 7098 hci_le_big_sync_established_evt, 7099 sizeof(struct hci_evt_le_big_sync_estabilished), 7100 HCI_MAX_EVENT_SIZE), 7101 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ 7102 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, 7103 hci_le_big_info_adv_report_evt, 7104 sizeof(struct hci_evt_le_big_info_adv_report), 7105 HCI_MAX_EVENT_SIZE), 7106 }; 7107 7108 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 7109 struct sk_buff *skb, u16 *opcode, u8 *status, 7110 hci_req_complete_t *req_complete, 7111 hci_req_complete_skb_t *req_complete_skb) 7112 { 7113 struct hci_ev_le_meta *ev = data; 7114 const struct hci_le_ev *subev; 7115 7116 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 7117 7118 /* Only match event if command OGF is for LE */ 7119 if (hdev->sent_cmd && 7120 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && 7121 hci_skb_event(hdev->sent_cmd) == ev->subevent) { 7122 *opcode = hci_skb_opcode(hdev->sent_cmd); 7123 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 7124 req_complete_skb); 7125 } 7126 7127 subev = &hci_le_ev_table[ev->subevent]; 7128 if (!subev->func) 7129 return; 7130 7131 if (skb->len < subev->min_len) { 7132 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 7133 ev->subevent, skb->len, subev->min_len); 7134 return; 7135 } 7136 7137 /* Just warn if the length is over max_len size it still be 7138 * possible to partially parse the event so leave to callback to 7139 * decide if that is acceptable. 7140 */ 7141 if (skb->len > subev->max_len) 7142 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 7143 ev->subevent, skb->len, subev->max_len); 7144 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 7145 if (!data) 7146 return; 7147 7148 subev->func(hdev, data, skb); 7149 } 7150 7151 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 7152 u8 event, struct sk_buff *skb) 7153 { 7154 struct hci_ev_cmd_complete *ev; 7155 struct hci_event_hdr *hdr; 7156 7157 if (!skb) 7158 return false; 7159 7160 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 7161 if (!hdr) 7162 return false; 7163 7164 if (event) { 7165 if (hdr->evt != event) 7166 return false; 7167 return true; 7168 } 7169 7170 /* Check if request ended in Command Status - no way to retrieve 7171 * any extra parameters in this case. 7172 */ 7173 if (hdr->evt == HCI_EV_CMD_STATUS) 7174 return false; 7175 7176 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 7177 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 7178 hdr->evt); 7179 return false; 7180 } 7181 7182 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 7183 if (!ev) 7184 return false; 7185 7186 if (opcode != __le16_to_cpu(ev->opcode)) { 7187 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 7188 __le16_to_cpu(ev->opcode)); 7189 return false; 7190 } 7191 7192 return true; 7193 } 7194 7195 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 7196 struct sk_buff *skb) 7197 { 7198 struct hci_ev_le_advertising_info *adv; 7199 struct hci_ev_le_direct_adv_info *direct_adv; 7200 struct hci_ev_le_ext_adv_info *ext_adv; 7201 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 7202 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 7203 7204 hci_dev_lock(hdev); 7205 7206 /* If we are currently suspended and this is the first BT event seen, 7207 * save the wake reason associated with the event. 7208 */ 7209 if (!hdev->suspended || hdev->wake_reason) 7210 goto unlock; 7211 7212 /* Default to remote wake. Values for wake_reason are documented in the 7213 * Bluez mgmt api docs. 7214 */ 7215 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 7216 7217 /* Once configured for remote wakeup, we should only wake up for 7218 * reconnections. It's useful to see which device is waking us up so 7219 * keep track of the bdaddr of the connection event that woke us up. 7220 */ 7221 if (event == HCI_EV_CONN_REQUEST) { 7222 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 7223 hdev->wake_addr_type = BDADDR_BREDR; 7224 } else if (event == HCI_EV_CONN_COMPLETE) { 7225 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 7226 hdev->wake_addr_type = BDADDR_BREDR; 7227 } else if (event == HCI_EV_LE_META) { 7228 struct hci_ev_le_meta *le_ev = (void *)skb->data; 7229 u8 subevent = le_ev->subevent; 7230 u8 *ptr = &skb->data[sizeof(*le_ev)]; 7231 u8 num_reports = *ptr; 7232 7233 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 7234 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 7235 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 7236 num_reports) { 7237 adv = (void *)(ptr + 1); 7238 direct_adv = (void *)(ptr + 1); 7239 ext_adv = (void *)(ptr + 1); 7240 7241 switch (subevent) { 7242 case HCI_EV_LE_ADVERTISING_REPORT: 7243 bacpy(&hdev->wake_addr, &adv->bdaddr); 7244 hdev->wake_addr_type = adv->bdaddr_type; 7245 break; 7246 case HCI_EV_LE_DIRECT_ADV_REPORT: 7247 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 7248 hdev->wake_addr_type = direct_adv->bdaddr_type; 7249 break; 7250 case HCI_EV_LE_EXT_ADV_REPORT: 7251 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 7252 hdev->wake_addr_type = ext_adv->bdaddr_type; 7253 break; 7254 } 7255 } 7256 } else { 7257 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 7258 } 7259 7260 unlock: 7261 hci_dev_unlock(hdev); 7262 } 7263 7264 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 7265 [_op] = { \ 7266 .req = false, \ 7267 .func = _func, \ 7268 .min_len = _min_len, \ 7269 .max_len = _max_len, \ 7270 } 7271 7272 #define HCI_EV(_op, _func, _len) \ 7273 HCI_EV_VL(_op, _func, _len, _len) 7274 7275 #define HCI_EV_STATUS(_op, _func) \ 7276 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 7277 7278 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 7279 [_op] = { \ 7280 .req = true, \ 7281 .func_req = _func, \ 7282 .min_len = _min_len, \ 7283 .max_len = _max_len, \ 7284 } 7285 7286 #define HCI_EV_REQ(_op, _func, _len) \ 7287 HCI_EV_REQ_VL(_op, _func, _len, _len) 7288 7289 /* Entries in this table shall have their position according to the event opcode 7290 * they handle so the use of the macros above is recommend since it does attempt 7291 * to initialize at its proper index using Designated Initializers that way 7292 * events without a callback function don't have entered. 7293 */ 7294 static const struct hci_ev { 7295 bool req; 7296 union { 7297 void (*func)(struct hci_dev *hdev, void *data, 7298 struct sk_buff *skb); 7299 void (*func_req)(struct hci_dev *hdev, void *data, 7300 struct sk_buff *skb, u16 *opcode, u8 *status, 7301 hci_req_complete_t *req_complete, 7302 hci_req_complete_skb_t *req_complete_skb); 7303 }; 7304 u16 min_len; 7305 u16 max_len; 7306 } hci_ev_table[U8_MAX + 1] = { 7307 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 7308 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 7309 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 7310 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 7311 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 7312 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 7313 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 7314 sizeof(struct hci_ev_conn_complete)), 7315 /* [0x04 = HCI_EV_CONN_REQUEST] */ 7316 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 7317 sizeof(struct hci_ev_conn_request)), 7318 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 7319 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 7320 sizeof(struct hci_ev_disconn_complete)), 7321 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 7322 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 7323 sizeof(struct hci_ev_auth_complete)), 7324 /* [0x07 = HCI_EV_REMOTE_NAME] */ 7325 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 7326 sizeof(struct hci_ev_remote_name)), 7327 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 7328 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 7329 sizeof(struct hci_ev_encrypt_change)), 7330 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 7331 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 7332 hci_change_link_key_complete_evt, 7333 sizeof(struct hci_ev_change_link_key_complete)), 7334 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 7335 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 7336 sizeof(struct hci_ev_remote_features)), 7337 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 7338 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 7339 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 7340 /* [0x0f = HCI_EV_CMD_STATUS] */ 7341 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 7342 sizeof(struct hci_ev_cmd_status)), 7343 /* [0x10 = HCI_EV_CMD_STATUS] */ 7344 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 7345 sizeof(struct hci_ev_hardware_error)), 7346 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 7347 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 7348 sizeof(struct hci_ev_role_change)), 7349 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 7350 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 7351 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 7352 /* [0x14 = HCI_EV_MODE_CHANGE] */ 7353 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 7354 sizeof(struct hci_ev_mode_change)), 7355 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 7356 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 7357 sizeof(struct hci_ev_pin_code_req)), 7358 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 7359 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 7360 sizeof(struct hci_ev_link_key_req)), 7361 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 7362 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 7363 sizeof(struct hci_ev_link_key_notify)), 7364 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 7365 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 7366 sizeof(struct hci_ev_clock_offset)), 7367 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 7368 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 7369 sizeof(struct hci_ev_pkt_type_change)), 7370 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 7371 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 7372 sizeof(struct hci_ev_pscan_rep_mode)), 7373 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 7374 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 7375 hci_inquiry_result_with_rssi_evt, 7376 sizeof(struct hci_ev_inquiry_result_rssi), 7377 HCI_MAX_EVENT_SIZE), 7378 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 7379 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 7380 sizeof(struct hci_ev_remote_ext_features)), 7381 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 7382 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 7383 sizeof(struct hci_ev_sync_conn_complete)), 7384 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 7385 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 7386 hci_extended_inquiry_result_evt, 7387 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 7388 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 7389 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 7390 sizeof(struct hci_ev_key_refresh_complete)), 7391 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 7392 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 7393 sizeof(struct hci_ev_io_capa_request)), 7394 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 7395 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 7396 sizeof(struct hci_ev_io_capa_reply)), 7397 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 7398 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 7399 sizeof(struct hci_ev_user_confirm_req)), 7400 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 7401 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 7402 sizeof(struct hci_ev_user_passkey_req)), 7403 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 7404 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 7405 sizeof(struct hci_ev_remote_oob_data_request)), 7406 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 7407 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 7408 sizeof(struct hci_ev_simple_pair_complete)), 7409 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 7410 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 7411 sizeof(struct hci_ev_user_passkey_notify)), 7412 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 7413 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 7414 sizeof(struct hci_ev_keypress_notify)), 7415 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 7416 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 7417 sizeof(struct hci_ev_remote_host_features)), 7418 /* [0x3e = HCI_EV_LE_META] */ 7419 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 7420 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 7421 #if IS_ENABLED(CONFIG_BT_HS) 7422 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ 7423 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, 7424 sizeof(struct hci_ev_phy_link_complete)), 7425 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ 7426 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, 7427 sizeof(struct hci_ev_channel_selected)), 7428 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ 7429 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, 7430 hci_disconn_loglink_complete_evt, 7431 sizeof(struct hci_ev_disconn_logical_link_complete)), 7432 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ 7433 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, 7434 sizeof(struct hci_ev_logical_link_complete)), 7435 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ 7436 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, 7437 hci_disconn_phylink_complete_evt, 7438 sizeof(struct hci_ev_disconn_phy_link_complete)), 7439 #endif 7440 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ 7441 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, 7442 sizeof(struct hci_ev_num_comp_blocks)), 7443 /* [0xff = HCI_EV_VENDOR] */ 7444 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 7445 }; 7446 7447 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 7448 u16 *opcode, u8 *status, 7449 hci_req_complete_t *req_complete, 7450 hci_req_complete_skb_t *req_complete_skb) 7451 { 7452 const struct hci_ev *ev = &hci_ev_table[event]; 7453 void *data; 7454 7455 if (!ev->func) 7456 return; 7457 7458 if (skb->len < ev->min_len) { 7459 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 7460 event, skb->len, ev->min_len); 7461 return; 7462 } 7463 7464 /* Just warn if the length is over max_len size it still be 7465 * possible to partially parse the event so leave to callback to 7466 * decide if that is acceptable. 7467 */ 7468 if (skb->len > ev->max_len) 7469 bt_dev_warn_ratelimited(hdev, 7470 "unexpected event 0x%2.2x length: %u > %u", 7471 event, skb->len, ev->max_len); 7472 7473 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 7474 if (!data) 7475 return; 7476 7477 if (ev->req) 7478 ev->func_req(hdev, data, skb, opcode, status, req_complete, 7479 req_complete_skb); 7480 else 7481 ev->func(hdev, data, skb); 7482 } 7483 7484 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 7485 { 7486 struct hci_event_hdr *hdr = (void *) skb->data; 7487 hci_req_complete_t req_complete = NULL; 7488 hci_req_complete_skb_t req_complete_skb = NULL; 7489 struct sk_buff *orig_skb = NULL; 7490 u8 status = 0, event, req_evt = 0; 7491 u16 opcode = HCI_OP_NOP; 7492 7493 if (skb->len < sizeof(*hdr)) { 7494 bt_dev_err(hdev, "Malformed HCI Event"); 7495 goto done; 7496 } 7497 7498 kfree_skb(hdev->recv_event); 7499 hdev->recv_event = skb_clone(skb, GFP_KERNEL); 7500 7501 event = hdr->evt; 7502 if (!event) { 7503 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 7504 event); 7505 goto done; 7506 } 7507 7508 /* Only match event if command OGF is not for LE */ 7509 if (hdev->sent_cmd && 7510 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && 7511 hci_skb_event(hdev->sent_cmd) == event) { 7512 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), 7513 status, &req_complete, &req_complete_skb); 7514 req_evt = event; 7515 } 7516 7517 /* If it looks like we might end up having to call 7518 * req_complete_skb, store a pristine copy of the skb since the 7519 * various handlers may modify the original one through 7520 * skb_pull() calls, etc. 7521 */ 7522 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 7523 event == HCI_EV_CMD_COMPLETE) 7524 orig_skb = skb_clone(skb, GFP_KERNEL); 7525 7526 skb_pull(skb, HCI_EVENT_HDR_SIZE); 7527 7528 /* Store wake reason if we're suspended */ 7529 hci_store_wake_reason(hdev, event, skb); 7530 7531 bt_dev_dbg(hdev, "event 0x%2.2x", event); 7532 7533 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 7534 &req_complete_skb); 7535 7536 if (req_complete) { 7537 req_complete(hdev, status, opcode); 7538 } else if (req_complete_skb) { 7539 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 7540 kfree_skb(orig_skb); 7541 orig_skb = NULL; 7542 } 7543 req_complete_skb(hdev, status, opcode, orig_skb); 7544 } 7545 7546 done: 7547 kfree_skb(orig_skb); 7548 kfree_skb(skb); 7549 hdev->stat.evt_rx++; 7550 } 7551