1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 ev, size_t len) 50 { 51 void *data; 52 53 data = skb_pull_data(skb, len); 54 if (!data) 55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 56 57 return data; 58 } 59 60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 61 u16 op, size_t len) 62 { 63 void *data; 64 65 data = skb_pull_data(skb, len); 66 if (!data) 67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 68 69 return data; 70 } 71 72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 73 u8 ev, size_t len) 74 { 75 void *data; 76 77 data = skb_pull_data(skb, len); 78 if (!data) 79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 80 81 return data; 82 } 83 84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 85 struct sk_buff *skb) 86 { 87 struct hci_ev_status *rp = data; 88 89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 90 91 /* It is possible that we receive Inquiry Complete event right 92 * before we receive Inquiry Cancel Command Complete event, in 93 * which case the latter event should have status of Command 94 * Disallowed (0x0c). This should not be treated as error, since 95 * we actually achieve what Inquiry Cancel wants to achieve, 96 * which is to end the last Inquiry session. 97 */ 98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 100 rp->status = 0x00; 101 } 102 103 if (rp->status) 104 return rp->status; 105 106 clear_bit(HCI_INQUIRY, &hdev->flags); 107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 108 wake_up_bit(&hdev->flags, HCI_INQUIRY); 109 110 hci_dev_lock(hdev); 111 /* Set discovery state to stopped if we're not doing LE active 112 * scanning. 113 */ 114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 115 hdev->le_scan_type != LE_SCAN_ACTIVE) 116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 117 hci_dev_unlock(hdev); 118 119 hci_conn_check_pending(hdev); 120 121 return rp->status; 122 } 123 124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 125 struct sk_buff *skb) 126 { 127 struct hci_ev_status *rp = data; 128 129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 130 131 if (rp->status) 132 return rp->status; 133 134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 135 136 return rp->status; 137 } 138 139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 140 struct sk_buff *skb) 141 { 142 struct hci_ev_status *rp = data; 143 144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 145 146 if (rp->status) 147 return rp->status; 148 149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 150 151 hci_conn_check_pending(hdev); 152 153 return rp->status; 154 } 155 156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 157 struct sk_buff *skb) 158 { 159 struct hci_ev_status *rp = data; 160 161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 162 163 return rp->status; 164 } 165 166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 167 struct sk_buff *skb) 168 { 169 struct hci_rp_role_discovery *rp = data; 170 struct hci_conn *conn; 171 172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 173 174 if (rp->status) 175 return rp->status; 176 177 hci_dev_lock(hdev); 178 179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 180 if (conn) 181 conn->role = rp->role; 182 183 hci_dev_unlock(hdev); 184 185 return rp->status; 186 } 187 188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 189 struct sk_buff *skb) 190 { 191 struct hci_rp_read_link_policy *rp = data; 192 struct hci_conn *conn; 193 194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 195 196 if (rp->status) 197 return rp->status; 198 199 hci_dev_lock(hdev); 200 201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 202 if (conn) 203 conn->link_policy = __le16_to_cpu(rp->policy); 204 205 hci_dev_unlock(hdev); 206 207 return rp->status; 208 } 209 210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 211 struct sk_buff *skb) 212 { 213 struct hci_rp_write_link_policy *rp = data; 214 struct hci_conn *conn; 215 void *sent; 216 217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 218 219 if (rp->status) 220 return rp->status; 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 223 if (!sent) 224 return rp->status; 225 226 hci_dev_lock(hdev); 227 228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 229 if (conn) 230 conn->link_policy = get_unaligned_le16(sent + 2); 231 232 hci_dev_unlock(hdev); 233 234 return rp->status; 235 } 236 237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 238 struct sk_buff *skb) 239 { 240 struct hci_rp_read_def_link_policy *rp = data; 241 242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 243 244 if (rp->status) 245 return rp->status; 246 247 hdev->link_policy = __le16_to_cpu(rp->policy); 248 249 return rp->status; 250 } 251 252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 253 struct sk_buff *skb) 254 { 255 struct hci_ev_status *rp = data; 256 void *sent; 257 258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 259 260 if (rp->status) 261 return rp->status; 262 263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 264 if (!sent) 265 return rp->status; 266 267 hdev->link_policy = get_unaligned_le16(sent); 268 269 return rp->status; 270 } 271 272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 273 { 274 struct hci_ev_status *rp = data; 275 276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 277 278 clear_bit(HCI_RESET, &hdev->flags); 279 280 if (rp->status) 281 return rp->status; 282 283 /* Reset all non-persistent flags */ 284 hci_dev_clear_volatile_flags(hdev); 285 286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 287 288 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 289 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 290 291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 292 hdev->adv_data_len = 0; 293 294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 295 hdev->scan_rsp_data_len = 0; 296 297 hdev->le_scan_type = LE_SCAN_PASSIVE; 298 299 hdev->ssp_debug_mode = 0; 300 301 hci_bdaddr_list_clear(&hdev->le_accept_list); 302 hci_bdaddr_list_clear(&hdev->le_resolv_list); 303 304 return rp->status; 305 } 306 307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 308 struct sk_buff *skb) 309 { 310 struct hci_rp_read_stored_link_key *rp = data; 311 struct hci_cp_read_stored_link_key *sent; 312 313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 314 315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 316 if (!sent) 317 return rp->status; 318 319 if (!rp->status && sent->read_all == 0x01) { 320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 322 } 323 324 return rp->status; 325 } 326 327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 328 struct sk_buff *skb) 329 { 330 struct hci_rp_delete_stored_link_key *rp = data; 331 332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 333 334 if (rp->status) 335 return rp->status; 336 337 if (rp->num_keys <= hdev->stored_num_keys) 338 hdev->stored_num_keys -= le16_to_cpu(rp->num_keys); 339 else 340 hdev->stored_num_keys = 0; 341 342 return rp->status; 343 } 344 345 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 346 struct sk_buff *skb) 347 { 348 struct hci_ev_status *rp = data; 349 void *sent; 350 351 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 352 353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 354 if (!sent) 355 return rp->status; 356 357 hci_dev_lock(hdev); 358 359 if (hci_dev_test_flag(hdev, HCI_MGMT)) 360 mgmt_set_local_name_complete(hdev, sent, rp->status); 361 else if (!rp->status) 362 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 363 364 hci_dev_unlock(hdev); 365 366 return rp->status; 367 } 368 369 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 370 struct sk_buff *skb) 371 { 372 struct hci_rp_read_local_name *rp = data; 373 374 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 375 376 if (rp->status) 377 return rp->status; 378 379 if (hci_dev_test_flag(hdev, HCI_SETUP) || 380 hci_dev_test_flag(hdev, HCI_CONFIG)) 381 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 382 383 return rp->status; 384 } 385 386 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 387 struct sk_buff *skb) 388 { 389 struct hci_ev_status *rp = data; 390 void *sent; 391 392 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 393 394 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 395 if (!sent) 396 return rp->status; 397 398 hci_dev_lock(hdev); 399 400 if (!rp->status) { 401 __u8 param = *((__u8 *) sent); 402 403 if (param == AUTH_ENABLED) 404 set_bit(HCI_AUTH, &hdev->flags); 405 else 406 clear_bit(HCI_AUTH, &hdev->flags); 407 } 408 409 if (hci_dev_test_flag(hdev, HCI_MGMT)) 410 mgmt_auth_enable_complete(hdev, rp->status); 411 412 hci_dev_unlock(hdev); 413 414 return rp->status; 415 } 416 417 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 418 struct sk_buff *skb) 419 { 420 struct hci_ev_status *rp = data; 421 __u8 param; 422 void *sent; 423 424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 425 426 if (rp->status) 427 return rp->status; 428 429 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 430 if (!sent) 431 return rp->status; 432 433 param = *((__u8 *) sent); 434 435 if (param) 436 set_bit(HCI_ENCRYPT, &hdev->flags); 437 else 438 clear_bit(HCI_ENCRYPT, &hdev->flags); 439 440 return rp->status; 441 } 442 443 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 444 struct sk_buff *skb) 445 { 446 struct hci_ev_status *rp = data; 447 __u8 param; 448 void *sent; 449 450 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 451 452 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 453 if (!sent) 454 return rp->status; 455 456 param = *((__u8 *) sent); 457 458 hci_dev_lock(hdev); 459 460 if (rp->status) { 461 hdev->discov_timeout = 0; 462 goto done; 463 } 464 465 if (param & SCAN_INQUIRY) 466 set_bit(HCI_ISCAN, &hdev->flags); 467 else 468 clear_bit(HCI_ISCAN, &hdev->flags); 469 470 if (param & SCAN_PAGE) 471 set_bit(HCI_PSCAN, &hdev->flags); 472 else 473 clear_bit(HCI_PSCAN, &hdev->flags); 474 475 done: 476 hci_dev_unlock(hdev); 477 478 return rp->status; 479 } 480 481 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 482 struct sk_buff *skb) 483 { 484 struct hci_ev_status *rp = data; 485 struct hci_cp_set_event_filter *cp; 486 void *sent; 487 488 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 489 490 if (rp->status) 491 return rp->status; 492 493 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 494 if (!sent) 495 return rp->status; 496 497 cp = (struct hci_cp_set_event_filter *)sent; 498 499 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 500 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 501 else 502 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 503 504 return rp->status; 505 } 506 507 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 508 struct sk_buff *skb) 509 { 510 struct hci_rp_read_class_of_dev *rp = data; 511 512 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 513 514 if (rp->status) 515 return rp->status; 516 517 memcpy(hdev->dev_class, rp->dev_class, 3); 518 519 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 520 hdev->dev_class[1], hdev->dev_class[0]); 521 522 return rp->status; 523 } 524 525 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 526 struct sk_buff *skb) 527 { 528 struct hci_ev_status *rp = data; 529 void *sent; 530 531 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 532 533 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 534 if (!sent) 535 return rp->status; 536 537 hci_dev_lock(hdev); 538 539 if (!rp->status) 540 memcpy(hdev->dev_class, sent, 3); 541 542 if (hci_dev_test_flag(hdev, HCI_MGMT)) 543 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 544 545 hci_dev_unlock(hdev); 546 547 return rp->status; 548 } 549 550 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 551 struct sk_buff *skb) 552 { 553 struct hci_rp_read_voice_setting *rp = data; 554 __u16 setting; 555 556 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 557 558 if (rp->status) 559 return rp->status; 560 561 setting = __le16_to_cpu(rp->voice_setting); 562 563 if (hdev->voice_setting == setting) 564 return rp->status; 565 566 hdev->voice_setting = setting; 567 568 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 569 570 if (hdev->notify) 571 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 572 573 return rp->status; 574 } 575 576 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 577 struct sk_buff *skb) 578 { 579 struct hci_ev_status *rp = data; 580 __u16 setting; 581 void *sent; 582 583 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 584 585 if (rp->status) 586 return rp->status; 587 588 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 589 if (!sent) 590 return rp->status; 591 592 setting = get_unaligned_le16(sent); 593 594 if (hdev->voice_setting == setting) 595 return rp->status; 596 597 hdev->voice_setting = setting; 598 599 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 600 601 if (hdev->notify) 602 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 603 604 return rp->status; 605 } 606 607 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 608 struct sk_buff *skb) 609 { 610 struct hci_rp_read_num_supported_iac *rp = data; 611 612 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 613 614 if (rp->status) 615 return rp->status; 616 617 hdev->num_iac = rp->num_iac; 618 619 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 620 621 return rp->status; 622 } 623 624 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 625 struct sk_buff *skb) 626 { 627 struct hci_ev_status *rp = data; 628 struct hci_cp_write_ssp_mode *sent; 629 630 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 631 632 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 633 if (!sent) 634 return rp->status; 635 636 hci_dev_lock(hdev); 637 638 if (!rp->status) { 639 if (sent->mode) 640 hdev->features[1][0] |= LMP_HOST_SSP; 641 else 642 hdev->features[1][0] &= ~LMP_HOST_SSP; 643 } 644 645 if (!rp->status) { 646 if (sent->mode) 647 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 648 else 649 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 650 } 651 652 hci_dev_unlock(hdev); 653 654 return rp->status; 655 } 656 657 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 658 struct sk_buff *skb) 659 { 660 struct hci_ev_status *rp = data; 661 struct hci_cp_write_sc_support *sent; 662 663 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 664 665 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 666 if (!sent) 667 return rp->status; 668 669 hci_dev_lock(hdev); 670 671 if (!rp->status) { 672 if (sent->support) 673 hdev->features[1][0] |= LMP_HOST_SC; 674 else 675 hdev->features[1][0] &= ~LMP_HOST_SC; 676 } 677 678 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 679 if (sent->support) 680 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 681 else 682 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 683 } 684 685 hci_dev_unlock(hdev); 686 687 return rp->status; 688 } 689 690 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 691 struct sk_buff *skb) 692 { 693 struct hci_rp_read_local_version *rp = data; 694 695 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 696 697 if (rp->status) 698 return rp->status; 699 700 if (hci_dev_test_flag(hdev, HCI_SETUP) || 701 hci_dev_test_flag(hdev, HCI_CONFIG)) { 702 hdev->hci_ver = rp->hci_ver; 703 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 704 hdev->lmp_ver = rp->lmp_ver; 705 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 706 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 707 } 708 709 return rp->status; 710 } 711 712 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 713 struct sk_buff *skb) 714 { 715 struct hci_rp_read_local_commands *rp = data; 716 717 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 718 719 if (rp->status) 720 return rp->status; 721 722 if (hci_dev_test_flag(hdev, HCI_SETUP) || 723 hci_dev_test_flag(hdev, HCI_CONFIG)) 724 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 725 726 return rp->status; 727 } 728 729 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 730 struct sk_buff *skb) 731 { 732 struct hci_rp_read_auth_payload_to *rp = data; 733 struct hci_conn *conn; 734 735 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 736 737 if (rp->status) 738 return rp->status; 739 740 hci_dev_lock(hdev); 741 742 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 743 if (conn) 744 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 745 746 hci_dev_unlock(hdev); 747 748 return rp->status; 749 } 750 751 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 752 struct sk_buff *skb) 753 { 754 struct hci_rp_write_auth_payload_to *rp = data; 755 struct hci_conn *conn; 756 void *sent; 757 758 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 759 760 if (rp->status) 761 return rp->status; 762 763 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 764 if (!sent) 765 return rp->status; 766 767 hci_dev_lock(hdev); 768 769 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 770 if (conn) 771 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 772 773 hci_dev_unlock(hdev); 774 775 return rp->status; 776 } 777 778 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 779 struct sk_buff *skb) 780 { 781 struct hci_rp_read_local_features *rp = data; 782 783 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 784 785 if (rp->status) 786 return rp->status; 787 788 memcpy(hdev->features, rp->features, 8); 789 790 /* Adjust default settings according to features 791 * supported by device. */ 792 793 if (hdev->features[0][0] & LMP_3SLOT) 794 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 795 796 if (hdev->features[0][0] & LMP_5SLOT) 797 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 798 799 if (hdev->features[0][1] & LMP_HV2) { 800 hdev->pkt_type |= (HCI_HV2); 801 hdev->esco_type |= (ESCO_HV2); 802 } 803 804 if (hdev->features[0][1] & LMP_HV3) { 805 hdev->pkt_type |= (HCI_HV3); 806 hdev->esco_type |= (ESCO_HV3); 807 } 808 809 if (lmp_esco_capable(hdev)) 810 hdev->esco_type |= (ESCO_EV3); 811 812 if (hdev->features[0][4] & LMP_EV4) 813 hdev->esco_type |= (ESCO_EV4); 814 815 if (hdev->features[0][4] & LMP_EV5) 816 hdev->esco_type |= (ESCO_EV5); 817 818 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 819 hdev->esco_type |= (ESCO_2EV3); 820 821 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 822 hdev->esco_type |= (ESCO_3EV3); 823 824 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 825 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 826 827 return rp->status; 828 } 829 830 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 831 struct sk_buff *skb) 832 { 833 struct hci_rp_read_local_ext_features *rp = data; 834 835 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 836 837 if (rp->status) 838 return rp->status; 839 840 if (hdev->max_page < rp->max_page) 841 hdev->max_page = rp->max_page; 842 843 if (rp->page < HCI_MAX_PAGES) 844 memcpy(hdev->features[rp->page], rp->features, 8); 845 846 return rp->status; 847 } 848 849 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, 850 struct sk_buff *skb) 851 { 852 struct hci_rp_read_flow_control_mode *rp = data; 853 854 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 855 856 if (rp->status) 857 return rp->status; 858 859 hdev->flow_ctl_mode = rp->mode; 860 861 return rp->status; 862 } 863 864 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 865 struct sk_buff *skb) 866 { 867 struct hci_rp_read_buffer_size *rp = data; 868 869 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 870 871 if (rp->status) 872 return rp->status; 873 874 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 875 hdev->sco_mtu = rp->sco_mtu; 876 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 877 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 878 879 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 880 hdev->sco_mtu = 64; 881 hdev->sco_pkts = 8; 882 } 883 884 hdev->acl_cnt = hdev->acl_pkts; 885 hdev->sco_cnt = hdev->sco_pkts; 886 887 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 888 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 889 890 return rp->status; 891 } 892 893 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 894 struct sk_buff *skb) 895 { 896 struct hci_rp_read_bd_addr *rp = data; 897 898 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 899 900 if (rp->status) 901 return rp->status; 902 903 if (test_bit(HCI_INIT, &hdev->flags)) 904 bacpy(&hdev->bdaddr, &rp->bdaddr); 905 906 if (hci_dev_test_flag(hdev, HCI_SETUP)) 907 bacpy(&hdev->setup_addr, &rp->bdaddr); 908 909 return rp->status; 910 } 911 912 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 913 struct sk_buff *skb) 914 { 915 struct hci_rp_read_local_pairing_opts *rp = data; 916 917 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 918 919 if (rp->status) 920 return rp->status; 921 922 if (hci_dev_test_flag(hdev, HCI_SETUP) || 923 hci_dev_test_flag(hdev, HCI_CONFIG)) { 924 hdev->pairing_opts = rp->pairing_opts; 925 hdev->max_enc_key_size = rp->max_key_size; 926 } 927 928 return rp->status; 929 } 930 931 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 932 struct sk_buff *skb) 933 { 934 struct hci_rp_read_page_scan_activity *rp = data; 935 936 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 937 938 if (rp->status) 939 return rp->status; 940 941 if (test_bit(HCI_INIT, &hdev->flags)) { 942 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 943 hdev->page_scan_window = __le16_to_cpu(rp->window); 944 } 945 946 return rp->status; 947 } 948 949 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 950 struct sk_buff *skb) 951 { 952 struct hci_ev_status *rp = data; 953 struct hci_cp_write_page_scan_activity *sent; 954 955 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 956 957 if (rp->status) 958 return rp->status; 959 960 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 961 if (!sent) 962 return rp->status; 963 964 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 965 hdev->page_scan_window = __le16_to_cpu(sent->window); 966 967 return rp->status; 968 } 969 970 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 971 struct sk_buff *skb) 972 { 973 struct hci_rp_read_page_scan_type *rp = data; 974 975 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 976 977 if (rp->status) 978 return rp->status; 979 980 if (test_bit(HCI_INIT, &hdev->flags)) 981 hdev->page_scan_type = rp->type; 982 983 return rp->status; 984 } 985 986 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 987 struct sk_buff *skb) 988 { 989 struct hci_ev_status *rp = data; 990 u8 *type; 991 992 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 993 994 if (rp->status) 995 return rp->status; 996 997 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 998 if (type) 999 hdev->page_scan_type = *type; 1000 1001 return rp->status; 1002 } 1003 1004 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, 1005 struct sk_buff *skb) 1006 { 1007 struct hci_rp_read_data_block_size *rp = data; 1008 1009 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1010 1011 if (rp->status) 1012 return rp->status; 1013 1014 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 1015 hdev->block_len = __le16_to_cpu(rp->block_len); 1016 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 1017 1018 hdev->block_cnt = hdev->num_blocks; 1019 1020 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 1021 hdev->block_cnt, hdev->block_len); 1022 1023 return rp->status; 1024 } 1025 1026 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1027 struct sk_buff *skb) 1028 { 1029 struct hci_rp_read_clock *rp = data; 1030 struct hci_cp_read_clock *cp; 1031 struct hci_conn *conn; 1032 1033 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1034 1035 if (rp->status) 1036 return rp->status; 1037 1038 hci_dev_lock(hdev); 1039 1040 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1041 if (!cp) 1042 goto unlock; 1043 1044 if (cp->which == 0x00) { 1045 hdev->clock = le32_to_cpu(rp->clock); 1046 goto unlock; 1047 } 1048 1049 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1050 if (conn) { 1051 conn->clock = le32_to_cpu(rp->clock); 1052 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1053 } 1054 1055 unlock: 1056 hci_dev_unlock(hdev); 1057 return rp->status; 1058 } 1059 1060 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, 1061 struct sk_buff *skb) 1062 { 1063 struct hci_rp_read_local_amp_info *rp = data; 1064 1065 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1066 1067 if (rp->status) 1068 return rp->status; 1069 1070 hdev->amp_status = rp->amp_status; 1071 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 1072 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 1073 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 1074 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 1075 hdev->amp_type = rp->amp_type; 1076 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 1077 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 1078 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 1079 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 1080 1081 return rp->status; 1082 } 1083 1084 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1085 struct sk_buff *skb) 1086 { 1087 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1088 1089 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1090 1091 if (rp->status) 1092 return rp->status; 1093 1094 hdev->inq_tx_power = rp->tx_power; 1095 1096 return rp->status; 1097 } 1098 1099 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1100 struct sk_buff *skb) 1101 { 1102 struct hci_rp_read_def_err_data_reporting *rp = data; 1103 1104 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1105 1106 if (rp->status) 1107 return rp->status; 1108 1109 hdev->err_data_reporting = rp->err_data_reporting; 1110 1111 return rp->status; 1112 } 1113 1114 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1115 struct sk_buff *skb) 1116 { 1117 struct hci_ev_status *rp = data; 1118 struct hci_cp_write_def_err_data_reporting *cp; 1119 1120 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1121 1122 if (rp->status) 1123 return rp->status; 1124 1125 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1126 if (!cp) 1127 return rp->status; 1128 1129 hdev->err_data_reporting = cp->err_data_reporting; 1130 1131 return rp->status; 1132 } 1133 1134 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1135 struct sk_buff *skb) 1136 { 1137 struct hci_rp_pin_code_reply *rp = data; 1138 struct hci_cp_pin_code_reply *cp; 1139 struct hci_conn *conn; 1140 1141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1142 1143 hci_dev_lock(hdev); 1144 1145 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1146 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1147 1148 if (rp->status) 1149 goto unlock; 1150 1151 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1152 if (!cp) 1153 goto unlock; 1154 1155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1156 if (conn) 1157 conn->pin_length = cp->pin_len; 1158 1159 unlock: 1160 hci_dev_unlock(hdev); 1161 return rp->status; 1162 } 1163 1164 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1165 struct sk_buff *skb) 1166 { 1167 struct hci_rp_pin_code_neg_reply *rp = data; 1168 1169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1170 1171 hci_dev_lock(hdev); 1172 1173 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1174 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1175 rp->status); 1176 1177 hci_dev_unlock(hdev); 1178 1179 return rp->status; 1180 } 1181 1182 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1183 struct sk_buff *skb) 1184 { 1185 struct hci_rp_le_read_buffer_size *rp = data; 1186 1187 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1188 1189 if (rp->status) 1190 return rp->status; 1191 1192 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1193 hdev->le_pkts = rp->le_max_pkt; 1194 1195 hdev->le_cnt = hdev->le_pkts; 1196 1197 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1198 1199 return rp->status; 1200 } 1201 1202 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1203 struct sk_buff *skb) 1204 { 1205 struct hci_rp_le_read_local_features *rp = data; 1206 1207 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1208 1209 if (rp->status) 1210 return rp->status; 1211 1212 memcpy(hdev->le_features, rp->features, 8); 1213 1214 return rp->status; 1215 } 1216 1217 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1218 struct sk_buff *skb) 1219 { 1220 struct hci_rp_le_read_adv_tx_power *rp = data; 1221 1222 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1223 1224 if (rp->status) 1225 return rp->status; 1226 1227 hdev->adv_tx_power = rp->tx_power; 1228 1229 return rp->status; 1230 } 1231 1232 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1233 struct sk_buff *skb) 1234 { 1235 struct hci_rp_user_confirm_reply *rp = data; 1236 1237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1238 1239 hci_dev_lock(hdev); 1240 1241 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1242 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1243 rp->status); 1244 1245 hci_dev_unlock(hdev); 1246 1247 return rp->status; 1248 } 1249 1250 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1251 struct sk_buff *skb) 1252 { 1253 struct hci_rp_user_confirm_reply *rp = data; 1254 1255 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1256 1257 hci_dev_lock(hdev); 1258 1259 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1260 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1261 ACL_LINK, 0, rp->status); 1262 1263 hci_dev_unlock(hdev); 1264 1265 return rp->status; 1266 } 1267 1268 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1269 struct sk_buff *skb) 1270 { 1271 struct hci_rp_user_confirm_reply *rp = data; 1272 1273 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1274 1275 hci_dev_lock(hdev); 1276 1277 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1278 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1279 0, rp->status); 1280 1281 hci_dev_unlock(hdev); 1282 1283 return rp->status; 1284 } 1285 1286 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1287 struct sk_buff *skb) 1288 { 1289 struct hci_rp_user_confirm_reply *rp = data; 1290 1291 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1292 1293 hci_dev_lock(hdev); 1294 1295 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1296 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1297 ACL_LINK, 0, rp->status); 1298 1299 hci_dev_unlock(hdev); 1300 1301 return rp->status; 1302 } 1303 1304 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1305 struct sk_buff *skb) 1306 { 1307 struct hci_rp_read_local_oob_data *rp = data; 1308 1309 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1310 1311 return rp->status; 1312 } 1313 1314 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1315 struct sk_buff *skb) 1316 { 1317 struct hci_rp_read_local_oob_ext_data *rp = data; 1318 1319 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1320 1321 return rp->status; 1322 } 1323 1324 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1325 struct sk_buff *skb) 1326 { 1327 struct hci_ev_status *rp = data; 1328 bdaddr_t *sent; 1329 1330 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1331 1332 if (rp->status) 1333 return rp->status; 1334 1335 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1336 if (!sent) 1337 return rp->status; 1338 1339 hci_dev_lock(hdev); 1340 1341 bacpy(&hdev->random_addr, sent); 1342 1343 if (!bacmp(&hdev->rpa, sent)) { 1344 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1345 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1346 secs_to_jiffies(hdev->rpa_timeout)); 1347 } 1348 1349 hci_dev_unlock(hdev); 1350 1351 return rp->status; 1352 } 1353 1354 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1355 struct sk_buff *skb) 1356 { 1357 struct hci_ev_status *rp = data; 1358 struct hci_cp_le_set_default_phy *cp; 1359 1360 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1361 1362 if (rp->status) 1363 return rp->status; 1364 1365 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1366 if (!cp) 1367 return rp->status; 1368 1369 hci_dev_lock(hdev); 1370 1371 hdev->le_tx_def_phys = cp->tx_phys; 1372 hdev->le_rx_def_phys = cp->rx_phys; 1373 1374 hci_dev_unlock(hdev); 1375 1376 return rp->status; 1377 } 1378 1379 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1380 struct sk_buff *skb) 1381 { 1382 struct hci_ev_status *rp = data; 1383 struct hci_cp_le_set_adv_set_rand_addr *cp; 1384 struct adv_info *adv; 1385 1386 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1387 1388 if (rp->status) 1389 return rp->status; 1390 1391 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1392 /* Update only in case the adv instance since handle 0x00 shall be using 1393 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1394 * non-extended adverting. 1395 */ 1396 if (!cp || !cp->handle) 1397 return rp->status; 1398 1399 hci_dev_lock(hdev); 1400 1401 adv = hci_find_adv_instance(hdev, cp->handle); 1402 if (adv) { 1403 bacpy(&adv->random_addr, &cp->bdaddr); 1404 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1405 adv->rpa_expired = false; 1406 queue_delayed_work(hdev->workqueue, 1407 &adv->rpa_expired_cb, 1408 secs_to_jiffies(hdev->rpa_timeout)); 1409 } 1410 } 1411 1412 hci_dev_unlock(hdev); 1413 1414 return rp->status; 1415 } 1416 1417 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1418 struct sk_buff *skb) 1419 { 1420 struct hci_ev_status *rp = data; 1421 u8 *instance; 1422 int err; 1423 1424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1425 1426 if (rp->status) 1427 return rp->status; 1428 1429 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1430 if (!instance) 1431 return rp->status; 1432 1433 hci_dev_lock(hdev); 1434 1435 err = hci_remove_adv_instance(hdev, *instance); 1436 if (!err) 1437 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1438 *instance); 1439 1440 hci_dev_unlock(hdev); 1441 1442 return rp->status; 1443 } 1444 1445 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1446 struct sk_buff *skb) 1447 { 1448 struct hci_ev_status *rp = data; 1449 struct adv_info *adv, *n; 1450 int err; 1451 1452 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1453 1454 if (rp->status) 1455 return rp->status; 1456 1457 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1458 return rp->status; 1459 1460 hci_dev_lock(hdev); 1461 1462 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1463 u8 instance = adv->instance; 1464 1465 err = hci_remove_adv_instance(hdev, instance); 1466 if (!err) 1467 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1468 hdev, instance); 1469 } 1470 1471 hci_dev_unlock(hdev); 1472 1473 return rp->status; 1474 } 1475 1476 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1477 struct sk_buff *skb) 1478 { 1479 struct hci_rp_le_read_transmit_power *rp = data; 1480 1481 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1482 1483 if (rp->status) 1484 return rp->status; 1485 1486 hdev->min_le_tx_power = rp->min_le_tx_power; 1487 hdev->max_le_tx_power = rp->max_le_tx_power; 1488 1489 return rp->status; 1490 } 1491 1492 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1493 struct sk_buff *skb) 1494 { 1495 struct hci_ev_status *rp = data; 1496 struct hci_cp_le_set_privacy_mode *cp; 1497 struct hci_conn_params *params; 1498 1499 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1500 1501 if (rp->status) 1502 return rp->status; 1503 1504 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1505 if (!cp) 1506 return rp->status; 1507 1508 hci_dev_lock(hdev); 1509 1510 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1511 if (params) 1512 params->privacy_mode = cp->mode; 1513 1514 hci_dev_unlock(hdev); 1515 1516 return rp->status; 1517 } 1518 1519 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1520 struct sk_buff *skb) 1521 { 1522 struct hci_ev_status *rp = data; 1523 __u8 *sent; 1524 1525 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1526 1527 if (rp->status) 1528 return rp->status; 1529 1530 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1531 if (!sent) 1532 return rp->status; 1533 1534 hci_dev_lock(hdev); 1535 1536 /* If we're doing connection initiation as peripheral. Set a 1537 * timeout in case something goes wrong. 1538 */ 1539 if (*sent) { 1540 struct hci_conn *conn; 1541 1542 hci_dev_set_flag(hdev, HCI_LE_ADV); 1543 1544 conn = hci_lookup_le_connect(hdev); 1545 if (conn) 1546 queue_delayed_work(hdev->workqueue, 1547 &conn->le_conn_timeout, 1548 conn->conn_timeout); 1549 } else { 1550 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1551 } 1552 1553 hci_dev_unlock(hdev); 1554 1555 return rp->status; 1556 } 1557 1558 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1559 struct sk_buff *skb) 1560 { 1561 struct hci_cp_le_set_ext_adv_enable *cp; 1562 struct hci_cp_ext_adv_set *set; 1563 struct adv_info *adv = NULL, *n; 1564 struct hci_ev_status *rp = data; 1565 1566 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1567 1568 if (rp->status) 1569 return rp->status; 1570 1571 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1572 if (!cp) 1573 return rp->status; 1574 1575 set = (void *)cp->data; 1576 1577 hci_dev_lock(hdev); 1578 1579 if (cp->num_of_sets) 1580 adv = hci_find_adv_instance(hdev, set->handle); 1581 1582 if (cp->enable) { 1583 struct hci_conn *conn; 1584 1585 hci_dev_set_flag(hdev, HCI_LE_ADV); 1586 1587 if (adv) 1588 adv->enabled = true; 1589 1590 conn = hci_lookup_le_connect(hdev); 1591 if (conn) 1592 queue_delayed_work(hdev->workqueue, 1593 &conn->le_conn_timeout, 1594 conn->conn_timeout); 1595 } else { 1596 if (cp->num_of_sets) { 1597 if (adv) 1598 adv->enabled = false; 1599 1600 /* If just one instance was disabled check if there are 1601 * any other instance enabled before clearing HCI_LE_ADV 1602 */ 1603 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1604 list) { 1605 if (adv->enabled) 1606 goto unlock; 1607 } 1608 } else { 1609 /* All instances shall be considered disabled */ 1610 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1611 list) 1612 adv->enabled = false; 1613 } 1614 1615 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1616 } 1617 1618 unlock: 1619 hci_dev_unlock(hdev); 1620 return rp->status; 1621 } 1622 1623 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1624 struct sk_buff *skb) 1625 { 1626 struct hci_cp_le_set_scan_param *cp; 1627 struct hci_ev_status *rp = data; 1628 1629 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1630 1631 if (rp->status) 1632 return rp->status; 1633 1634 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1635 if (!cp) 1636 return rp->status; 1637 1638 hci_dev_lock(hdev); 1639 1640 hdev->le_scan_type = cp->type; 1641 1642 hci_dev_unlock(hdev); 1643 1644 return rp->status; 1645 } 1646 1647 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1648 struct sk_buff *skb) 1649 { 1650 struct hci_cp_le_set_ext_scan_params *cp; 1651 struct hci_ev_status *rp = data; 1652 struct hci_cp_le_scan_phy_params *phy_param; 1653 1654 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1655 1656 if (rp->status) 1657 return rp->status; 1658 1659 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1660 if (!cp) 1661 return rp->status; 1662 1663 phy_param = (void *)cp->data; 1664 1665 hci_dev_lock(hdev); 1666 1667 hdev->le_scan_type = phy_param->type; 1668 1669 hci_dev_unlock(hdev); 1670 1671 return rp->status; 1672 } 1673 1674 static bool has_pending_adv_report(struct hci_dev *hdev) 1675 { 1676 struct discovery_state *d = &hdev->discovery; 1677 1678 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1679 } 1680 1681 static void clear_pending_adv_report(struct hci_dev *hdev) 1682 { 1683 struct discovery_state *d = &hdev->discovery; 1684 1685 bacpy(&d->last_adv_addr, BDADDR_ANY); 1686 d->last_adv_data_len = 0; 1687 } 1688 1689 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1690 u8 bdaddr_type, s8 rssi, u32 flags, 1691 u8 *data, u8 len) 1692 { 1693 struct discovery_state *d = &hdev->discovery; 1694 1695 if (len > HCI_MAX_AD_LENGTH) 1696 return; 1697 1698 bacpy(&d->last_adv_addr, bdaddr); 1699 d->last_adv_addr_type = bdaddr_type; 1700 d->last_adv_rssi = rssi; 1701 d->last_adv_flags = flags; 1702 memcpy(d->last_adv_data, data, len); 1703 d->last_adv_data_len = len; 1704 } 1705 1706 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1707 { 1708 hci_dev_lock(hdev); 1709 1710 switch (enable) { 1711 case LE_SCAN_ENABLE: 1712 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1713 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1714 clear_pending_adv_report(hdev); 1715 break; 1716 1717 case LE_SCAN_DISABLE: 1718 /* We do this here instead of when setting DISCOVERY_STOPPED 1719 * since the latter would potentially require waiting for 1720 * inquiry to stop too. 1721 */ 1722 if (has_pending_adv_report(hdev)) { 1723 struct discovery_state *d = &hdev->discovery; 1724 1725 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1726 d->last_adv_addr_type, NULL, 1727 d->last_adv_rssi, d->last_adv_flags, 1728 d->last_adv_data, 1729 d->last_adv_data_len, NULL, 0); 1730 } 1731 1732 /* Cancel this timer so that we don't try to disable scanning 1733 * when it's already disabled. 1734 */ 1735 cancel_delayed_work(&hdev->le_scan_disable); 1736 1737 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1738 1739 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1740 * interrupted scanning due to a connect request. Mark 1741 * therefore discovery as stopped. 1742 */ 1743 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1744 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1745 1746 break; 1747 1748 default: 1749 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1750 enable); 1751 break; 1752 } 1753 1754 hci_dev_unlock(hdev); 1755 } 1756 1757 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1758 struct sk_buff *skb) 1759 { 1760 struct hci_cp_le_set_scan_enable *cp; 1761 struct hci_ev_status *rp = data; 1762 1763 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1764 1765 if (rp->status) 1766 return rp->status; 1767 1768 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1769 if (!cp) 1770 return rp->status; 1771 1772 le_set_scan_enable_complete(hdev, cp->enable); 1773 1774 return rp->status; 1775 } 1776 1777 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1778 struct sk_buff *skb) 1779 { 1780 struct hci_cp_le_set_ext_scan_enable *cp; 1781 struct hci_ev_status *rp = data; 1782 1783 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1784 1785 if (rp->status) 1786 return rp->status; 1787 1788 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1789 if (!cp) 1790 return rp->status; 1791 1792 le_set_scan_enable_complete(hdev, cp->enable); 1793 1794 return rp->status; 1795 } 1796 1797 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1798 struct sk_buff *skb) 1799 { 1800 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1801 1802 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1803 rp->num_of_sets); 1804 1805 if (rp->status) 1806 return rp->status; 1807 1808 hdev->le_num_of_adv_sets = rp->num_of_sets; 1809 1810 return rp->status; 1811 } 1812 1813 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1814 struct sk_buff *skb) 1815 { 1816 struct hci_rp_le_read_accept_list_size *rp = data; 1817 1818 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1819 1820 if (rp->status) 1821 return rp->status; 1822 1823 hdev->le_accept_list_size = rp->size; 1824 1825 return rp->status; 1826 } 1827 1828 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1829 struct sk_buff *skb) 1830 { 1831 struct hci_ev_status *rp = data; 1832 1833 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1834 1835 if (rp->status) 1836 return rp->status; 1837 1838 hci_bdaddr_list_clear(&hdev->le_accept_list); 1839 1840 return rp->status; 1841 } 1842 1843 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1844 struct sk_buff *skb) 1845 { 1846 struct hci_cp_le_add_to_accept_list *sent; 1847 struct hci_ev_status *rp = data; 1848 1849 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1850 1851 if (rp->status) 1852 return rp->status; 1853 1854 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1855 if (!sent) 1856 return rp->status; 1857 1858 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1859 sent->bdaddr_type); 1860 1861 return rp->status; 1862 } 1863 1864 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1865 struct sk_buff *skb) 1866 { 1867 struct hci_cp_le_del_from_accept_list *sent; 1868 struct hci_ev_status *rp = data; 1869 1870 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1871 1872 if (rp->status) 1873 return rp->status; 1874 1875 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1876 if (!sent) 1877 return rp->status; 1878 1879 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1880 sent->bdaddr_type); 1881 1882 return rp->status; 1883 } 1884 1885 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1886 struct sk_buff *skb) 1887 { 1888 struct hci_rp_le_read_supported_states *rp = data; 1889 1890 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1891 1892 if (rp->status) 1893 return rp->status; 1894 1895 memcpy(hdev->le_states, rp->le_states, 8); 1896 1897 return rp->status; 1898 } 1899 1900 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1901 struct sk_buff *skb) 1902 { 1903 struct hci_rp_le_read_def_data_len *rp = data; 1904 1905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1906 1907 if (rp->status) 1908 return rp->status; 1909 1910 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1911 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1912 1913 return rp->status; 1914 } 1915 1916 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1917 struct sk_buff *skb) 1918 { 1919 struct hci_cp_le_write_def_data_len *sent; 1920 struct hci_ev_status *rp = data; 1921 1922 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1923 1924 if (rp->status) 1925 return rp->status; 1926 1927 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1928 if (!sent) 1929 return rp->status; 1930 1931 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1932 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1933 1934 return rp->status; 1935 } 1936 1937 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1938 struct sk_buff *skb) 1939 { 1940 struct hci_cp_le_add_to_resolv_list *sent; 1941 struct hci_ev_status *rp = data; 1942 1943 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1944 1945 if (rp->status) 1946 return rp->status; 1947 1948 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1949 if (!sent) 1950 return rp->status; 1951 1952 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1953 sent->bdaddr_type, sent->peer_irk, 1954 sent->local_irk); 1955 1956 return rp->status; 1957 } 1958 1959 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 1960 struct sk_buff *skb) 1961 { 1962 struct hci_cp_le_del_from_resolv_list *sent; 1963 struct hci_ev_status *rp = data; 1964 1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1966 1967 if (rp->status) 1968 return rp->status; 1969 1970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1971 if (!sent) 1972 return rp->status; 1973 1974 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1975 sent->bdaddr_type); 1976 1977 return rp->status; 1978 } 1979 1980 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 1981 struct sk_buff *skb) 1982 { 1983 struct hci_ev_status *rp = data; 1984 1985 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1986 1987 if (rp->status) 1988 return rp->status; 1989 1990 hci_bdaddr_list_clear(&hdev->le_resolv_list); 1991 1992 return rp->status; 1993 } 1994 1995 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 1996 struct sk_buff *skb) 1997 { 1998 struct hci_rp_le_read_resolv_list_size *rp = data; 1999 2000 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2001 2002 if (rp->status) 2003 return rp->status; 2004 2005 hdev->le_resolv_list_size = rp->size; 2006 2007 return rp->status; 2008 } 2009 2010 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2011 struct sk_buff *skb) 2012 { 2013 struct hci_ev_status *rp = data; 2014 __u8 *sent; 2015 2016 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2017 2018 if (rp->status) 2019 return rp->status; 2020 2021 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2022 if (!sent) 2023 return rp->status; 2024 2025 hci_dev_lock(hdev); 2026 2027 if (*sent) 2028 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2029 else 2030 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2031 2032 hci_dev_unlock(hdev); 2033 2034 return rp->status; 2035 } 2036 2037 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2038 struct sk_buff *skb) 2039 { 2040 struct hci_rp_le_read_max_data_len *rp = data; 2041 2042 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2043 2044 if (rp->status) 2045 return rp->status; 2046 2047 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2048 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2049 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2050 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2051 2052 return rp->status; 2053 } 2054 2055 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2056 struct sk_buff *skb) 2057 { 2058 struct hci_cp_write_le_host_supported *sent; 2059 struct hci_ev_status *rp = data; 2060 2061 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2062 2063 if (rp->status) 2064 return rp->status; 2065 2066 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2067 if (!sent) 2068 return rp->status; 2069 2070 hci_dev_lock(hdev); 2071 2072 if (sent->le) { 2073 hdev->features[1][0] |= LMP_HOST_LE; 2074 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2075 } else { 2076 hdev->features[1][0] &= ~LMP_HOST_LE; 2077 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2078 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2079 } 2080 2081 if (sent->simul) 2082 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2083 else 2084 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2085 2086 hci_dev_unlock(hdev); 2087 2088 return rp->status; 2089 } 2090 2091 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2092 struct sk_buff *skb) 2093 { 2094 struct hci_cp_le_set_adv_param *cp; 2095 struct hci_ev_status *rp = data; 2096 2097 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2098 2099 if (rp->status) 2100 return rp->status; 2101 2102 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2103 if (!cp) 2104 return rp->status; 2105 2106 hci_dev_lock(hdev); 2107 hdev->adv_addr_type = cp->own_address_type; 2108 hci_dev_unlock(hdev); 2109 2110 return rp->status; 2111 } 2112 2113 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2114 struct sk_buff *skb) 2115 { 2116 struct hci_rp_le_set_ext_adv_params *rp = data; 2117 struct hci_cp_le_set_ext_adv_params *cp; 2118 struct adv_info *adv_instance; 2119 2120 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2121 2122 if (rp->status) 2123 return rp->status; 2124 2125 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2126 if (!cp) 2127 return rp->status; 2128 2129 hci_dev_lock(hdev); 2130 hdev->adv_addr_type = cp->own_addr_type; 2131 if (!cp->handle) { 2132 /* Store in hdev for instance 0 */ 2133 hdev->adv_tx_power = rp->tx_power; 2134 } else { 2135 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2136 if (adv_instance) 2137 adv_instance->tx_power = rp->tx_power; 2138 } 2139 /* Update adv data as tx power is known now */ 2140 hci_req_update_adv_data(hdev, cp->handle); 2141 2142 hci_dev_unlock(hdev); 2143 2144 return rp->status; 2145 } 2146 2147 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2148 struct sk_buff *skb) 2149 { 2150 struct hci_rp_read_rssi *rp = data; 2151 struct hci_conn *conn; 2152 2153 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2154 2155 if (rp->status) 2156 return rp->status; 2157 2158 hci_dev_lock(hdev); 2159 2160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2161 if (conn) 2162 conn->rssi = rp->rssi; 2163 2164 hci_dev_unlock(hdev); 2165 2166 return rp->status; 2167 } 2168 2169 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2170 struct sk_buff *skb) 2171 { 2172 struct hci_cp_read_tx_power *sent; 2173 struct hci_rp_read_tx_power *rp = data; 2174 struct hci_conn *conn; 2175 2176 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2177 2178 if (rp->status) 2179 return rp->status; 2180 2181 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2182 if (!sent) 2183 return rp->status; 2184 2185 hci_dev_lock(hdev); 2186 2187 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2188 if (!conn) 2189 goto unlock; 2190 2191 switch (sent->type) { 2192 case 0x00: 2193 conn->tx_power = rp->tx_power; 2194 break; 2195 case 0x01: 2196 conn->max_tx_power = rp->tx_power; 2197 break; 2198 } 2199 2200 unlock: 2201 hci_dev_unlock(hdev); 2202 return rp->status; 2203 } 2204 2205 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2206 struct sk_buff *skb) 2207 { 2208 struct hci_ev_status *rp = data; 2209 u8 *mode; 2210 2211 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2212 2213 if (rp->status) 2214 return rp->status; 2215 2216 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2217 if (mode) 2218 hdev->ssp_debug_mode = *mode; 2219 2220 return rp->status; 2221 } 2222 2223 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2224 { 2225 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2226 2227 if (status) { 2228 hci_conn_check_pending(hdev); 2229 return; 2230 } 2231 2232 set_bit(HCI_INQUIRY, &hdev->flags); 2233 } 2234 2235 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2236 { 2237 struct hci_cp_create_conn *cp; 2238 struct hci_conn *conn; 2239 2240 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2241 2242 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2243 if (!cp) 2244 return; 2245 2246 hci_dev_lock(hdev); 2247 2248 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2249 2250 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2251 2252 if (status) { 2253 if (conn && conn->state == BT_CONNECT) { 2254 if (status != 0x0c || conn->attempt > 2) { 2255 conn->state = BT_CLOSED; 2256 hci_connect_cfm(conn, status); 2257 hci_conn_del(conn); 2258 } else 2259 conn->state = BT_CONNECT2; 2260 } 2261 } else { 2262 if (!conn) { 2263 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 2264 HCI_ROLE_MASTER); 2265 if (!conn) 2266 bt_dev_err(hdev, "no memory for new connection"); 2267 } 2268 } 2269 2270 hci_dev_unlock(hdev); 2271 } 2272 2273 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2274 { 2275 struct hci_cp_add_sco *cp; 2276 struct hci_conn *acl, *sco; 2277 __u16 handle; 2278 2279 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2280 2281 if (!status) 2282 return; 2283 2284 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2285 if (!cp) 2286 return; 2287 2288 handle = __le16_to_cpu(cp->handle); 2289 2290 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2291 2292 hci_dev_lock(hdev); 2293 2294 acl = hci_conn_hash_lookup_handle(hdev, handle); 2295 if (acl) { 2296 sco = acl->link; 2297 if (sco) { 2298 sco->state = BT_CLOSED; 2299 2300 hci_connect_cfm(sco, status); 2301 hci_conn_del(sco); 2302 } 2303 } 2304 2305 hci_dev_unlock(hdev); 2306 } 2307 2308 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2309 { 2310 struct hci_cp_auth_requested *cp; 2311 struct hci_conn *conn; 2312 2313 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2314 2315 if (!status) 2316 return; 2317 2318 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2319 if (!cp) 2320 return; 2321 2322 hci_dev_lock(hdev); 2323 2324 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2325 if (conn) { 2326 if (conn->state == BT_CONFIG) { 2327 hci_connect_cfm(conn, status); 2328 hci_conn_drop(conn); 2329 } 2330 } 2331 2332 hci_dev_unlock(hdev); 2333 } 2334 2335 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2336 { 2337 struct hci_cp_set_conn_encrypt *cp; 2338 struct hci_conn *conn; 2339 2340 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2341 2342 if (!status) 2343 return; 2344 2345 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2346 if (!cp) 2347 return; 2348 2349 hci_dev_lock(hdev); 2350 2351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2352 if (conn) { 2353 if (conn->state == BT_CONFIG) { 2354 hci_connect_cfm(conn, status); 2355 hci_conn_drop(conn); 2356 } 2357 } 2358 2359 hci_dev_unlock(hdev); 2360 } 2361 2362 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2363 struct hci_conn *conn) 2364 { 2365 if (conn->state != BT_CONFIG || !conn->out) 2366 return 0; 2367 2368 if (conn->pending_sec_level == BT_SECURITY_SDP) 2369 return 0; 2370 2371 /* Only request authentication for SSP connections or non-SSP 2372 * devices with sec_level MEDIUM or HIGH or if MITM protection 2373 * is requested. 2374 */ 2375 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2376 conn->pending_sec_level != BT_SECURITY_FIPS && 2377 conn->pending_sec_level != BT_SECURITY_HIGH && 2378 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2379 return 0; 2380 2381 return 1; 2382 } 2383 2384 static int hci_resolve_name(struct hci_dev *hdev, 2385 struct inquiry_entry *e) 2386 { 2387 struct hci_cp_remote_name_req cp; 2388 2389 memset(&cp, 0, sizeof(cp)); 2390 2391 bacpy(&cp.bdaddr, &e->data.bdaddr); 2392 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2393 cp.pscan_mode = e->data.pscan_mode; 2394 cp.clock_offset = e->data.clock_offset; 2395 2396 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2397 } 2398 2399 static bool hci_resolve_next_name(struct hci_dev *hdev) 2400 { 2401 struct discovery_state *discov = &hdev->discovery; 2402 struct inquiry_entry *e; 2403 2404 if (list_empty(&discov->resolve)) 2405 return false; 2406 2407 /* We should stop if we already spent too much time resolving names. */ 2408 if (time_after(jiffies, discov->name_resolve_timeout)) { 2409 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2410 return false; 2411 } 2412 2413 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2414 if (!e) 2415 return false; 2416 2417 if (hci_resolve_name(hdev, e) == 0) { 2418 e->name_state = NAME_PENDING; 2419 return true; 2420 } 2421 2422 return false; 2423 } 2424 2425 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2426 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2427 { 2428 struct discovery_state *discov = &hdev->discovery; 2429 struct inquiry_entry *e; 2430 2431 /* Update the mgmt connected state if necessary. Be careful with 2432 * conn objects that exist but are not (yet) connected however. 2433 * Only those in BT_CONFIG or BT_CONNECTED states can be 2434 * considered connected. 2435 */ 2436 if (conn && 2437 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2438 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2439 mgmt_device_connected(hdev, conn, name, name_len); 2440 2441 if (discov->state == DISCOVERY_STOPPED) 2442 return; 2443 2444 if (discov->state == DISCOVERY_STOPPING) 2445 goto discov_complete; 2446 2447 if (discov->state != DISCOVERY_RESOLVING) 2448 return; 2449 2450 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2451 /* If the device was not found in a list of found devices names of which 2452 * are pending. there is no need to continue resolving a next name as it 2453 * will be done upon receiving another Remote Name Request Complete 2454 * Event */ 2455 if (!e) 2456 return; 2457 2458 list_del(&e->list); 2459 2460 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2461 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2462 name, name_len); 2463 2464 if (hci_resolve_next_name(hdev)) 2465 return; 2466 2467 discov_complete: 2468 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2469 } 2470 2471 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2472 { 2473 struct hci_cp_remote_name_req *cp; 2474 struct hci_conn *conn; 2475 2476 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2477 2478 /* If successful wait for the name req complete event before 2479 * checking for the need to do authentication */ 2480 if (!status) 2481 return; 2482 2483 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2484 if (!cp) 2485 return; 2486 2487 hci_dev_lock(hdev); 2488 2489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2490 2491 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2492 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2493 2494 if (!conn) 2495 goto unlock; 2496 2497 if (!hci_outgoing_auth_needed(hdev, conn)) 2498 goto unlock; 2499 2500 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2501 struct hci_cp_auth_requested auth_cp; 2502 2503 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2504 2505 auth_cp.handle = __cpu_to_le16(conn->handle); 2506 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2507 sizeof(auth_cp), &auth_cp); 2508 } 2509 2510 unlock: 2511 hci_dev_unlock(hdev); 2512 } 2513 2514 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2515 { 2516 struct hci_cp_read_remote_features *cp; 2517 struct hci_conn *conn; 2518 2519 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2520 2521 if (!status) 2522 return; 2523 2524 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2525 if (!cp) 2526 return; 2527 2528 hci_dev_lock(hdev); 2529 2530 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2531 if (conn) { 2532 if (conn->state == BT_CONFIG) { 2533 hci_connect_cfm(conn, status); 2534 hci_conn_drop(conn); 2535 } 2536 } 2537 2538 hci_dev_unlock(hdev); 2539 } 2540 2541 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2542 { 2543 struct hci_cp_read_remote_ext_features *cp; 2544 struct hci_conn *conn; 2545 2546 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2547 2548 if (!status) 2549 return; 2550 2551 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2552 if (!cp) 2553 return; 2554 2555 hci_dev_lock(hdev); 2556 2557 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2558 if (conn) { 2559 if (conn->state == BT_CONFIG) { 2560 hci_connect_cfm(conn, status); 2561 hci_conn_drop(conn); 2562 } 2563 } 2564 2565 hci_dev_unlock(hdev); 2566 } 2567 2568 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2569 { 2570 struct hci_cp_setup_sync_conn *cp; 2571 struct hci_conn *acl, *sco; 2572 __u16 handle; 2573 2574 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2575 2576 if (!status) 2577 return; 2578 2579 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2580 if (!cp) 2581 return; 2582 2583 handle = __le16_to_cpu(cp->handle); 2584 2585 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2586 2587 hci_dev_lock(hdev); 2588 2589 acl = hci_conn_hash_lookup_handle(hdev, handle); 2590 if (acl) { 2591 sco = acl->link; 2592 if (sco) { 2593 sco->state = BT_CLOSED; 2594 2595 hci_connect_cfm(sco, status); 2596 hci_conn_del(sco); 2597 } 2598 } 2599 2600 hci_dev_unlock(hdev); 2601 } 2602 2603 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2604 { 2605 struct hci_cp_enhanced_setup_sync_conn *cp; 2606 struct hci_conn *acl, *sco; 2607 __u16 handle; 2608 2609 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2610 2611 if (!status) 2612 return; 2613 2614 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2615 if (!cp) 2616 return; 2617 2618 handle = __le16_to_cpu(cp->handle); 2619 2620 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2621 2622 hci_dev_lock(hdev); 2623 2624 acl = hci_conn_hash_lookup_handle(hdev, handle); 2625 if (acl) { 2626 sco = acl->link; 2627 if (sco) { 2628 sco->state = BT_CLOSED; 2629 2630 hci_connect_cfm(sco, status); 2631 hci_conn_del(sco); 2632 } 2633 } 2634 2635 hci_dev_unlock(hdev); 2636 } 2637 2638 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2639 { 2640 struct hci_cp_sniff_mode *cp; 2641 struct hci_conn *conn; 2642 2643 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2644 2645 if (!status) 2646 return; 2647 2648 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2649 if (!cp) 2650 return; 2651 2652 hci_dev_lock(hdev); 2653 2654 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2655 if (conn) { 2656 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2657 2658 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2659 hci_sco_setup(conn, status); 2660 } 2661 2662 hci_dev_unlock(hdev); 2663 } 2664 2665 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2666 { 2667 struct hci_cp_exit_sniff_mode *cp; 2668 struct hci_conn *conn; 2669 2670 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2671 2672 if (!status) 2673 return; 2674 2675 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2676 if (!cp) 2677 return; 2678 2679 hci_dev_lock(hdev); 2680 2681 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2682 if (conn) { 2683 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2684 2685 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2686 hci_sco_setup(conn, status); 2687 } 2688 2689 hci_dev_unlock(hdev); 2690 } 2691 2692 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2693 { 2694 struct hci_cp_disconnect *cp; 2695 struct hci_conn_params *params; 2696 struct hci_conn *conn; 2697 bool mgmt_conn; 2698 2699 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2700 2701 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2702 * otherwise cleanup the connection immediately. 2703 */ 2704 if (!status && !hdev->suspended) 2705 return; 2706 2707 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2708 if (!cp) 2709 return; 2710 2711 hci_dev_lock(hdev); 2712 2713 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2714 if (!conn) 2715 goto unlock; 2716 2717 if (status) { 2718 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2719 conn->dst_type, status); 2720 2721 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2722 hdev->cur_adv_instance = conn->adv_instance; 2723 hci_enable_advertising(hdev); 2724 } 2725 2726 goto done; 2727 } 2728 2729 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2730 2731 if (conn->type == ACL_LINK) { 2732 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2733 hci_remove_link_key(hdev, &conn->dst); 2734 } 2735 2736 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2737 if (params) { 2738 switch (params->auto_connect) { 2739 case HCI_AUTO_CONN_LINK_LOSS: 2740 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2741 break; 2742 fallthrough; 2743 2744 case HCI_AUTO_CONN_DIRECT: 2745 case HCI_AUTO_CONN_ALWAYS: 2746 list_del_init(¶ms->action); 2747 list_add(¶ms->action, &hdev->pend_le_conns); 2748 break; 2749 2750 default: 2751 break; 2752 } 2753 } 2754 2755 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2756 cp->reason, mgmt_conn); 2757 2758 hci_disconn_cfm(conn, cp->reason); 2759 2760 done: 2761 /* If the disconnection failed for any reason, the upper layer 2762 * does not retry to disconnect in current implementation. 2763 * Hence, we need to do some basic cleanup here and re-enable 2764 * advertising if necessary. 2765 */ 2766 hci_conn_del(conn); 2767 unlock: 2768 hci_dev_unlock(hdev); 2769 } 2770 2771 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2772 { 2773 /* When using controller based address resolution, then the new 2774 * address types 0x02 and 0x03 are used. These types need to be 2775 * converted back into either public address or random address type 2776 */ 2777 switch (type) { 2778 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2779 if (resolved) 2780 *resolved = true; 2781 return ADDR_LE_DEV_PUBLIC; 2782 case ADDR_LE_DEV_RANDOM_RESOLVED: 2783 if (resolved) 2784 *resolved = true; 2785 return ADDR_LE_DEV_RANDOM; 2786 } 2787 2788 if (resolved) 2789 *resolved = false; 2790 return type; 2791 } 2792 2793 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2794 u8 peer_addr_type, u8 own_address_type, 2795 u8 filter_policy) 2796 { 2797 struct hci_conn *conn; 2798 2799 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2800 peer_addr_type); 2801 if (!conn) 2802 return; 2803 2804 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2805 2806 /* Store the initiator and responder address information which 2807 * is needed for SMP. These values will not change during the 2808 * lifetime of the connection. 2809 */ 2810 conn->init_addr_type = own_address_type; 2811 if (own_address_type == ADDR_LE_DEV_RANDOM) 2812 bacpy(&conn->init_addr, &hdev->random_addr); 2813 else 2814 bacpy(&conn->init_addr, &hdev->bdaddr); 2815 2816 conn->resp_addr_type = peer_addr_type; 2817 bacpy(&conn->resp_addr, peer_addr); 2818 2819 /* We don't want the connection attempt to stick around 2820 * indefinitely since LE doesn't have a page timeout concept 2821 * like BR/EDR. Set a timer for any connection that doesn't use 2822 * the accept list for connecting. 2823 */ 2824 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2825 queue_delayed_work(conn->hdev->workqueue, 2826 &conn->le_conn_timeout, 2827 conn->conn_timeout); 2828 } 2829 2830 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2831 { 2832 struct hci_cp_le_create_conn *cp; 2833 2834 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2835 2836 /* All connection failure handling is taken care of by the 2837 * hci_le_conn_failed function which is triggered by the HCI 2838 * request completion callbacks used for connecting. 2839 */ 2840 if (status) 2841 return; 2842 2843 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2844 if (!cp) 2845 return; 2846 2847 hci_dev_lock(hdev); 2848 2849 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2850 cp->own_address_type, cp->filter_policy); 2851 2852 hci_dev_unlock(hdev); 2853 } 2854 2855 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2856 { 2857 struct hci_cp_le_ext_create_conn *cp; 2858 2859 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2860 2861 /* All connection failure handling is taken care of by the 2862 * hci_le_conn_failed function which is triggered by the HCI 2863 * request completion callbacks used for connecting. 2864 */ 2865 if (status) 2866 return; 2867 2868 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2869 if (!cp) 2870 return; 2871 2872 hci_dev_lock(hdev); 2873 2874 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2875 cp->own_addr_type, cp->filter_policy); 2876 2877 hci_dev_unlock(hdev); 2878 } 2879 2880 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2881 { 2882 struct hci_cp_le_read_remote_features *cp; 2883 struct hci_conn *conn; 2884 2885 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2886 2887 if (!status) 2888 return; 2889 2890 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2891 if (!cp) 2892 return; 2893 2894 hci_dev_lock(hdev); 2895 2896 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2897 if (conn) { 2898 if (conn->state == BT_CONFIG) { 2899 hci_connect_cfm(conn, status); 2900 hci_conn_drop(conn); 2901 } 2902 } 2903 2904 hci_dev_unlock(hdev); 2905 } 2906 2907 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2908 { 2909 struct hci_cp_le_start_enc *cp; 2910 struct hci_conn *conn; 2911 2912 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2913 2914 if (!status) 2915 return; 2916 2917 hci_dev_lock(hdev); 2918 2919 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2920 if (!cp) 2921 goto unlock; 2922 2923 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2924 if (!conn) 2925 goto unlock; 2926 2927 if (conn->state != BT_CONNECTED) 2928 goto unlock; 2929 2930 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2931 hci_conn_drop(conn); 2932 2933 unlock: 2934 hci_dev_unlock(hdev); 2935 } 2936 2937 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2938 { 2939 struct hci_cp_switch_role *cp; 2940 struct hci_conn *conn; 2941 2942 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2943 2944 if (!status) 2945 return; 2946 2947 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2948 if (!cp) 2949 return; 2950 2951 hci_dev_lock(hdev); 2952 2953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2954 if (conn) 2955 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2956 2957 hci_dev_unlock(hdev); 2958 } 2959 2960 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 2961 struct sk_buff *skb) 2962 { 2963 struct hci_ev_status *ev = data; 2964 struct discovery_state *discov = &hdev->discovery; 2965 struct inquiry_entry *e; 2966 2967 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 2968 2969 hci_conn_check_pending(hdev); 2970 2971 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2972 return; 2973 2974 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2975 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2976 2977 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2978 return; 2979 2980 hci_dev_lock(hdev); 2981 2982 if (discov->state != DISCOVERY_FINDING) 2983 goto unlock; 2984 2985 if (list_empty(&discov->resolve)) { 2986 /* When BR/EDR inquiry is active and no LE scanning is in 2987 * progress, then change discovery state to indicate completion. 2988 * 2989 * When running LE scanning and BR/EDR inquiry simultaneously 2990 * and the LE scan already finished, then change the discovery 2991 * state to indicate completion. 2992 */ 2993 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2994 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2995 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2996 goto unlock; 2997 } 2998 2999 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3000 if (e && hci_resolve_name(hdev, e) == 0) { 3001 e->name_state = NAME_PENDING; 3002 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3003 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3004 } else { 3005 /* When BR/EDR inquiry is active and no LE scanning is in 3006 * progress, then change discovery state to indicate completion. 3007 * 3008 * When running LE scanning and BR/EDR inquiry simultaneously 3009 * and the LE scan already finished, then change the discovery 3010 * state to indicate completion. 3011 */ 3012 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3013 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3015 } 3016 3017 unlock: 3018 hci_dev_unlock(hdev); 3019 } 3020 3021 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3022 struct sk_buff *skb) 3023 { 3024 struct hci_ev_inquiry_result *ev = edata; 3025 struct inquiry_data data; 3026 int i; 3027 3028 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3029 flex_array_size(ev, info, ev->num))) 3030 return; 3031 3032 bt_dev_dbg(hdev, "num %d", ev->num); 3033 3034 if (!ev->num) 3035 return; 3036 3037 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3038 return; 3039 3040 hci_dev_lock(hdev); 3041 3042 for (i = 0; i < ev->num; i++) { 3043 struct inquiry_info *info = &ev->info[i]; 3044 u32 flags; 3045 3046 bacpy(&data.bdaddr, &info->bdaddr); 3047 data.pscan_rep_mode = info->pscan_rep_mode; 3048 data.pscan_period_mode = info->pscan_period_mode; 3049 data.pscan_mode = info->pscan_mode; 3050 memcpy(data.dev_class, info->dev_class, 3); 3051 data.clock_offset = info->clock_offset; 3052 data.rssi = HCI_RSSI_INVALID; 3053 data.ssp_mode = 0x00; 3054 3055 flags = hci_inquiry_cache_update(hdev, &data, false); 3056 3057 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3058 info->dev_class, HCI_RSSI_INVALID, 3059 flags, NULL, 0, NULL, 0); 3060 } 3061 3062 hci_dev_unlock(hdev); 3063 } 3064 3065 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3066 struct sk_buff *skb) 3067 { 3068 struct hci_ev_conn_complete *ev = data; 3069 struct hci_conn *conn; 3070 3071 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3072 3073 hci_dev_lock(hdev); 3074 3075 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3076 if (!conn) { 3077 /* Connection may not exist if auto-connected. Check the bredr 3078 * allowlist to see if this device is allowed to auto connect. 3079 * If link is an ACL type, create a connection class 3080 * automatically. 3081 * 3082 * Auto-connect will only occur if the event filter is 3083 * programmed with a given address. Right now, event filter is 3084 * only used during suspend. 3085 */ 3086 if (ev->link_type == ACL_LINK && 3087 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3088 &ev->bdaddr, 3089 BDADDR_BREDR)) { 3090 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3091 HCI_ROLE_SLAVE); 3092 if (!conn) { 3093 bt_dev_err(hdev, "no memory for new conn"); 3094 goto unlock; 3095 } 3096 } else { 3097 if (ev->link_type != SCO_LINK) 3098 goto unlock; 3099 3100 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3101 &ev->bdaddr); 3102 if (!conn) 3103 goto unlock; 3104 3105 conn->type = SCO_LINK; 3106 } 3107 } 3108 3109 if (!ev->status) { 3110 conn->handle = __le16_to_cpu(ev->handle); 3111 3112 if (conn->type == ACL_LINK) { 3113 conn->state = BT_CONFIG; 3114 hci_conn_hold(conn); 3115 3116 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3117 !hci_find_link_key(hdev, &ev->bdaddr)) 3118 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3119 else 3120 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3121 } else 3122 conn->state = BT_CONNECTED; 3123 3124 hci_debugfs_create_conn(conn); 3125 hci_conn_add_sysfs(conn); 3126 3127 if (test_bit(HCI_AUTH, &hdev->flags)) 3128 set_bit(HCI_CONN_AUTH, &conn->flags); 3129 3130 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3131 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3132 3133 /* Get remote features */ 3134 if (conn->type == ACL_LINK) { 3135 struct hci_cp_read_remote_features cp; 3136 cp.handle = ev->handle; 3137 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3138 sizeof(cp), &cp); 3139 3140 hci_req_update_scan(hdev); 3141 } 3142 3143 /* Set packet type for incoming connection */ 3144 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3145 struct hci_cp_change_conn_ptype cp; 3146 cp.handle = ev->handle; 3147 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3148 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3149 &cp); 3150 } 3151 } else { 3152 conn->state = BT_CLOSED; 3153 if (conn->type == ACL_LINK) 3154 mgmt_connect_failed(hdev, &conn->dst, conn->type, 3155 conn->dst_type, ev->status); 3156 } 3157 3158 if (conn->type == ACL_LINK) 3159 hci_sco_setup(conn, ev->status); 3160 3161 if (ev->status) { 3162 hci_connect_cfm(conn, ev->status); 3163 hci_conn_del(conn); 3164 } else if (ev->link_type == SCO_LINK) { 3165 switch (conn->setting & SCO_AIRMODE_MASK) { 3166 case SCO_AIRMODE_CVSD: 3167 if (hdev->notify) 3168 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3169 break; 3170 } 3171 3172 hci_connect_cfm(conn, ev->status); 3173 } 3174 3175 unlock: 3176 hci_dev_unlock(hdev); 3177 3178 hci_conn_check_pending(hdev); 3179 } 3180 3181 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3182 { 3183 struct hci_cp_reject_conn_req cp; 3184 3185 bacpy(&cp.bdaddr, bdaddr); 3186 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3187 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3188 } 3189 3190 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3191 struct sk_buff *skb) 3192 { 3193 struct hci_ev_conn_request *ev = data; 3194 int mask = hdev->link_mode; 3195 struct inquiry_entry *ie; 3196 struct hci_conn *conn; 3197 __u8 flags = 0; 3198 3199 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3200 3201 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3202 &flags); 3203 3204 if (!(mask & HCI_LM_ACCEPT)) { 3205 hci_reject_conn(hdev, &ev->bdaddr); 3206 return; 3207 } 3208 3209 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3210 BDADDR_BREDR)) { 3211 hci_reject_conn(hdev, &ev->bdaddr); 3212 return; 3213 } 3214 3215 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3216 * connection. These features are only touched through mgmt so 3217 * only do the checks if HCI_MGMT is set. 3218 */ 3219 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3220 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3221 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3222 BDADDR_BREDR)) { 3223 hci_reject_conn(hdev, &ev->bdaddr); 3224 return; 3225 } 3226 3227 /* Connection accepted */ 3228 3229 hci_dev_lock(hdev); 3230 3231 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3232 if (ie) 3233 memcpy(ie->data.dev_class, ev->dev_class, 3); 3234 3235 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3236 &ev->bdaddr); 3237 if (!conn) { 3238 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3239 HCI_ROLE_SLAVE); 3240 if (!conn) { 3241 bt_dev_err(hdev, "no memory for new connection"); 3242 hci_dev_unlock(hdev); 3243 return; 3244 } 3245 } 3246 3247 memcpy(conn->dev_class, ev->dev_class, 3); 3248 3249 hci_dev_unlock(hdev); 3250 3251 if (ev->link_type == ACL_LINK || 3252 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3253 struct hci_cp_accept_conn_req cp; 3254 conn->state = BT_CONNECT; 3255 3256 bacpy(&cp.bdaddr, &ev->bdaddr); 3257 3258 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3259 cp.role = 0x00; /* Become central */ 3260 else 3261 cp.role = 0x01; /* Remain peripheral */ 3262 3263 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3264 } else if (!(flags & HCI_PROTO_DEFER)) { 3265 struct hci_cp_accept_sync_conn_req cp; 3266 conn->state = BT_CONNECT; 3267 3268 bacpy(&cp.bdaddr, &ev->bdaddr); 3269 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3270 3271 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3272 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3273 cp.max_latency = cpu_to_le16(0xffff); 3274 cp.content_format = cpu_to_le16(hdev->voice_setting); 3275 cp.retrans_effort = 0xff; 3276 3277 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3278 &cp); 3279 } else { 3280 conn->state = BT_CONNECT2; 3281 hci_connect_cfm(conn, 0); 3282 } 3283 } 3284 3285 static u8 hci_to_mgmt_reason(u8 err) 3286 { 3287 switch (err) { 3288 case HCI_ERROR_CONNECTION_TIMEOUT: 3289 return MGMT_DEV_DISCONN_TIMEOUT; 3290 case HCI_ERROR_REMOTE_USER_TERM: 3291 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3292 case HCI_ERROR_REMOTE_POWER_OFF: 3293 return MGMT_DEV_DISCONN_REMOTE; 3294 case HCI_ERROR_LOCAL_HOST_TERM: 3295 return MGMT_DEV_DISCONN_LOCAL_HOST; 3296 default: 3297 return MGMT_DEV_DISCONN_UNKNOWN; 3298 } 3299 } 3300 3301 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3302 struct sk_buff *skb) 3303 { 3304 struct hci_ev_disconn_complete *ev = data; 3305 u8 reason; 3306 struct hci_conn_params *params; 3307 struct hci_conn *conn; 3308 bool mgmt_connected; 3309 3310 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3311 3312 hci_dev_lock(hdev); 3313 3314 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3315 if (!conn) 3316 goto unlock; 3317 3318 if (ev->status) { 3319 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3320 conn->dst_type, ev->status); 3321 goto unlock; 3322 } 3323 3324 conn->state = BT_CLOSED; 3325 3326 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3327 3328 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3329 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3330 else 3331 reason = hci_to_mgmt_reason(ev->reason); 3332 3333 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3334 reason, mgmt_connected); 3335 3336 if (conn->type == ACL_LINK) { 3337 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3338 hci_remove_link_key(hdev, &conn->dst); 3339 3340 hci_req_update_scan(hdev); 3341 } 3342 3343 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3344 if (params) { 3345 switch (params->auto_connect) { 3346 case HCI_AUTO_CONN_LINK_LOSS: 3347 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3348 break; 3349 fallthrough; 3350 3351 case HCI_AUTO_CONN_DIRECT: 3352 case HCI_AUTO_CONN_ALWAYS: 3353 list_del_init(¶ms->action); 3354 list_add(¶ms->action, &hdev->pend_le_conns); 3355 hci_update_passive_scan(hdev); 3356 break; 3357 3358 default: 3359 break; 3360 } 3361 } 3362 3363 hci_disconn_cfm(conn, ev->reason); 3364 3365 /* Re-enable advertising if necessary, since it might 3366 * have been disabled by the connection. From the 3367 * HCI_LE_Set_Advertise_Enable command description in 3368 * the core specification (v4.0): 3369 * "The Controller shall continue advertising until the Host 3370 * issues an LE_Set_Advertise_Enable command with 3371 * Advertising_Enable set to 0x00 (Advertising is disabled) 3372 * or until a connection is created or until the Advertising 3373 * is timed out due to Directed Advertising." 3374 */ 3375 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3376 hdev->cur_adv_instance = conn->adv_instance; 3377 hci_enable_advertising(hdev); 3378 } 3379 3380 hci_conn_del(conn); 3381 3382 unlock: 3383 hci_dev_unlock(hdev); 3384 } 3385 3386 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3387 struct sk_buff *skb) 3388 { 3389 struct hci_ev_auth_complete *ev = data; 3390 struct hci_conn *conn; 3391 3392 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3393 3394 hci_dev_lock(hdev); 3395 3396 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3397 if (!conn) 3398 goto unlock; 3399 3400 if (!ev->status) { 3401 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3402 3403 if (!hci_conn_ssp_enabled(conn) && 3404 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3405 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3406 } else { 3407 set_bit(HCI_CONN_AUTH, &conn->flags); 3408 conn->sec_level = conn->pending_sec_level; 3409 } 3410 } else { 3411 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3412 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3413 3414 mgmt_auth_failed(conn, ev->status); 3415 } 3416 3417 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3418 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3419 3420 if (conn->state == BT_CONFIG) { 3421 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3422 struct hci_cp_set_conn_encrypt cp; 3423 cp.handle = ev->handle; 3424 cp.encrypt = 0x01; 3425 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3426 &cp); 3427 } else { 3428 conn->state = BT_CONNECTED; 3429 hci_connect_cfm(conn, ev->status); 3430 hci_conn_drop(conn); 3431 } 3432 } else { 3433 hci_auth_cfm(conn, ev->status); 3434 3435 hci_conn_hold(conn); 3436 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3437 hci_conn_drop(conn); 3438 } 3439 3440 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3441 if (!ev->status) { 3442 struct hci_cp_set_conn_encrypt cp; 3443 cp.handle = ev->handle; 3444 cp.encrypt = 0x01; 3445 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3446 &cp); 3447 } else { 3448 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3449 hci_encrypt_cfm(conn, ev->status); 3450 } 3451 } 3452 3453 unlock: 3454 hci_dev_unlock(hdev); 3455 } 3456 3457 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3458 struct sk_buff *skb) 3459 { 3460 struct hci_ev_remote_name *ev = data; 3461 struct hci_conn *conn; 3462 3463 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3464 3465 hci_conn_check_pending(hdev); 3466 3467 hci_dev_lock(hdev); 3468 3469 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3470 3471 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3472 goto check_auth; 3473 3474 if (ev->status == 0) 3475 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3476 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3477 else 3478 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3479 3480 check_auth: 3481 if (!conn) 3482 goto unlock; 3483 3484 if (!hci_outgoing_auth_needed(hdev, conn)) 3485 goto unlock; 3486 3487 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3488 struct hci_cp_auth_requested cp; 3489 3490 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3491 3492 cp.handle = __cpu_to_le16(conn->handle); 3493 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3494 } 3495 3496 unlock: 3497 hci_dev_unlock(hdev); 3498 } 3499 3500 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3501 u16 opcode, struct sk_buff *skb) 3502 { 3503 const struct hci_rp_read_enc_key_size *rp; 3504 struct hci_conn *conn; 3505 u16 handle; 3506 3507 BT_DBG("%s status 0x%02x", hdev->name, status); 3508 3509 if (!skb || skb->len < sizeof(*rp)) { 3510 bt_dev_err(hdev, "invalid read key size response"); 3511 return; 3512 } 3513 3514 rp = (void *)skb->data; 3515 handle = le16_to_cpu(rp->handle); 3516 3517 hci_dev_lock(hdev); 3518 3519 conn = hci_conn_hash_lookup_handle(hdev, handle); 3520 if (!conn) 3521 goto unlock; 3522 3523 /* While unexpected, the read_enc_key_size command may fail. The most 3524 * secure approach is to then assume the key size is 0 to force a 3525 * disconnection. 3526 */ 3527 if (rp->status) { 3528 bt_dev_err(hdev, "failed to read key size for handle %u", 3529 handle); 3530 conn->enc_key_size = 0; 3531 } else { 3532 conn->enc_key_size = rp->key_size; 3533 } 3534 3535 hci_encrypt_cfm(conn, 0); 3536 3537 unlock: 3538 hci_dev_unlock(hdev); 3539 } 3540 3541 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3542 struct sk_buff *skb) 3543 { 3544 struct hci_ev_encrypt_change *ev = data; 3545 struct hci_conn *conn; 3546 3547 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3548 3549 hci_dev_lock(hdev); 3550 3551 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3552 if (!conn) 3553 goto unlock; 3554 3555 if (!ev->status) { 3556 if (ev->encrypt) { 3557 /* Encryption implies authentication */ 3558 set_bit(HCI_CONN_AUTH, &conn->flags); 3559 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3560 conn->sec_level = conn->pending_sec_level; 3561 3562 /* P-256 authentication key implies FIPS */ 3563 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3564 set_bit(HCI_CONN_FIPS, &conn->flags); 3565 3566 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3567 conn->type == LE_LINK) 3568 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3569 } else { 3570 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3571 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3572 } 3573 } 3574 3575 /* We should disregard the current RPA and generate a new one 3576 * whenever the encryption procedure fails. 3577 */ 3578 if (ev->status && conn->type == LE_LINK) { 3579 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3580 hci_adv_instances_set_rpa_expired(hdev, true); 3581 } 3582 3583 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3584 3585 /* Check link security requirements are met */ 3586 if (!hci_conn_check_link_mode(conn)) 3587 ev->status = HCI_ERROR_AUTH_FAILURE; 3588 3589 if (ev->status && conn->state == BT_CONNECTED) { 3590 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3591 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3592 3593 /* Notify upper layers so they can cleanup before 3594 * disconnecting. 3595 */ 3596 hci_encrypt_cfm(conn, ev->status); 3597 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3598 hci_conn_drop(conn); 3599 goto unlock; 3600 } 3601 3602 /* Try reading the encryption key size for encrypted ACL links */ 3603 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3604 struct hci_cp_read_enc_key_size cp; 3605 struct hci_request req; 3606 3607 /* Only send HCI_Read_Encryption_Key_Size if the 3608 * controller really supports it. If it doesn't, assume 3609 * the default size (16). 3610 */ 3611 if (!(hdev->commands[20] & 0x10)) { 3612 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3613 goto notify; 3614 } 3615 3616 hci_req_init(&req, hdev); 3617 3618 cp.handle = cpu_to_le16(conn->handle); 3619 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3620 3621 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3622 bt_dev_err(hdev, "sending read key size failed"); 3623 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3624 goto notify; 3625 } 3626 3627 goto unlock; 3628 } 3629 3630 /* Set the default Authenticated Payload Timeout after 3631 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3632 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3633 * sent when the link is active and Encryption is enabled, the conn 3634 * type can be either LE or ACL and controller must support LMP Ping. 3635 * Ensure for AES-CCM encryption as well. 3636 */ 3637 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3638 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3639 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3640 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3641 struct hci_cp_write_auth_payload_to cp; 3642 3643 cp.handle = cpu_to_le16(conn->handle); 3644 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3645 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3646 sizeof(cp), &cp); 3647 } 3648 3649 notify: 3650 hci_encrypt_cfm(conn, ev->status); 3651 3652 unlock: 3653 hci_dev_unlock(hdev); 3654 } 3655 3656 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3657 struct sk_buff *skb) 3658 { 3659 struct hci_ev_change_link_key_complete *ev = data; 3660 struct hci_conn *conn; 3661 3662 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3663 3664 hci_dev_lock(hdev); 3665 3666 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3667 if (conn) { 3668 if (!ev->status) 3669 set_bit(HCI_CONN_SECURE, &conn->flags); 3670 3671 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3672 3673 hci_key_change_cfm(conn, ev->status); 3674 } 3675 3676 hci_dev_unlock(hdev); 3677 } 3678 3679 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3680 struct sk_buff *skb) 3681 { 3682 struct hci_ev_remote_features *ev = data; 3683 struct hci_conn *conn; 3684 3685 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3686 3687 hci_dev_lock(hdev); 3688 3689 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3690 if (!conn) 3691 goto unlock; 3692 3693 if (!ev->status) 3694 memcpy(conn->features[0], ev->features, 8); 3695 3696 if (conn->state != BT_CONFIG) 3697 goto unlock; 3698 3699 if (!ev->status && lmp_ext_feat_capable(hdev) && 3700 lmp_ext_feat_capable(conn)) { 3701 struct hci_cp_read_remote_ext_features cp; 3702 cp.handle = ev->handle; 3703 cp.page = 0x01; 3704 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3705 sizeof(cp), &cp); 3706 goto unlock; 3707 } 3708 3709 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3710 struct hci_cp_remote_name_req cp; 3711 memset(&cp, 0, sizeof(cp)); 3712 bacpy(&cp.bdaddr, &conn->dst); 3713 cp.pscan_rep_mode = 0x02; 3714 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3715 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3716 mgmt_device_connected(hdev, conn, NULL, 0); 3717 3718 if (!hci_outgoing_auth_needed(hdev, conn)) { 3719 conn->state = BT_CONNECTED; 3720 hci_connect_cfm(conn, ev->status); 3721 hci_conn_drop(conn); 3722 } 3723 3724 unlock: 3725 hci_dev_unlock(hdev); 3726 } 3727 3728 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3729 { 3730 cancel_delayed_work(&hdev->cmd_timer); 3731 3732 if (!test_bit(HCI_RESET, &hdev->flags)) { 3733 if (ncmd) { 3734 cancel_delayed_work(&hdev->ncmd_timer); 3735 atomic_set(&hdev->cmd_cnt, 1); 3736 } else { 3737 schedule_delayed_work(&hdev->ncmd_timer, 3738 HCI_NCMD_TIMEOUT); 3739 } 3740 } 3741 } 3742 3743 #define HCI_CC_VL(_op, _func, _min, _max) \ 3744 { \ 3745 .op = _op, \ 3746 .func = _func, \ 3747 .min_len = _min, \ 3748 .max_len = _max, \ 3749 } 3750 3751 #define HCI_CC(_op, _func, _len) \ 3752 HCI_CC_VL(_op, _func, _len, _len) 3753 3754 #define HCI_CC_STATUS(_op, _func) \ 3755 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3756 3757 static const struct hci_cc { 3758 u16 op; 3759 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3760 u16 min_len; 3761 u16 max_len; 3762 } hci_cc_table[] = { 3763 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3764 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3765 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3766 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 3767 hci_cc_remote_name_req_cancel), 3768 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3769 sizeof(struct hci_rp_role_discovery)), 3770 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3771 sizeof(struct hci_rp_read_link_policy)), 3772 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 3773 sizeof(struct hci_rp_write_link_policy)), 3774 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 3775 sizeof(struct hci_rp_read_def_link_policy)), 3776 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 3777 hci_cc_write_def_link_policy), 3778 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 3779 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 3780 sizeof(struct hci_rp_read_stored_link_key)), 3781 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 3782 sizeof(struct hci_rp_delete_stored_link_key)), 3783 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 3784 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 3785 sizeof(struct hci_rp_read_local_name)), 3786 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 3787 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 3788 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 3789 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 3790 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 3791 sizeof(struct hci_rp_read_class_of_dev)), 3792 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 3793 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 3794 sizeof(struct hci_rp_read_voice_setting)), 3795 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 3796 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 3797 sizeof(struct hci_rp_read_num_supported_iac)), 3798 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 3799 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 3800 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 3801 sizeof(struct hci_rp_read_auth_payload_to)), 3802 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 3803 sizeof(struct hci_rp_write_auth_payload_to)), 3804 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 3805 sizeof(struct hci_rp_read_local_version)), 3806 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 3807 sizeof(struct hci_rp_read_local_commands)), 3808 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 3809 sizeof(struct hci_rp_read_local_features)), 3810 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 3811 sizeof(struct hci_rp_read_local_ext_features)), 3812 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 3813 sizeof(struct hci_rp_read_buffer_size)), 3814 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 3815 sizeof(struct hci_rp_read_bd_addr)), 3816 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 3817 sizeof(struct hci_rp_read_local_pairing_opts)), 3818 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 3819 sizeof(struct hci_rp_read_page_scan_activity)), 3820 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 3821 hci_cc_write_page_scan_activity), 3822 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 3823 sizeof(struct hci_rp_read_page_scan_type)), 3824 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 3825 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, 3826 sizeof(struct hci_rp_read_data_block_size)), 3827 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, 3828 sizeof(struct hci_rp_read_flow_control_mode)), 3829 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, 3830 sizeof(struct hci_rp_read_local_amp_info)), 3831 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 3832 sizeof(struct hci_rp_read_clock)), 3833 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 3834 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 3835 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 3836 hci_cc_read_def_err_data_reporting, 3837 sizeof(struct hci_rp_read_def_err_data_reporting)), 3838 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 3839 hci_cc_write_def_err_data_reporting), 3840 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 3841 sizeof(struct hci_rp_pin_code_reply)), 3842 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 3843 sizeof(struct hci_rp_pin_code_neg_reply)), 3844 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 3845 sizeof(struct hci_rp_read_local_oob_data)), 3846 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 3847 sizeof(struct hci_rp_read_local_oob_ext_data)), 3848 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 3849 sizeof(struct hci_rp_le_read_buffer_size)), 3850 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 3851 sizeof(struct hci_rp_le_read_local_features)), 3852 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 3853 sizeof(struct hci_rp_le_read_adv_tx_power)), 3854 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 3855 sizeof(struct hci_rp_user_confirm_reply)), 3856 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 3857 sizeof(struct hci_rp_user_confirm_reply)), 3858 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 3859 sizeof(struct hci_rp_user_confirm_reply)), 3860 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 3861 sizeof(struct hci_rp_user_confirm_reply)), 3862 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 3863 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 3864 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 3865 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 3866 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 3867 hci_cc_le_read_accept_list_size, 3868 sizeof(struct hci_rp_le_read_accept_list_size)), 3869 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 3870 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 3871 hci_cc_le_add_to_accept_list), 3872 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 3873 hci_cc_le_del_from_accept_list), 3874 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 3875 sizeof(struct hci_rp_le_read_supported_states)), 3876 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 3877 sizeof(struct hci_rp_le_read_def_data_len)), 3878 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 3879 hci_cc_le_write_def_data_len), 3880 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 3881 hci_cc_le_add_to_resolv_list), 3882 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 3883 hci_cc_le_del_from_resolv_list), 3884 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 3885 hci_cc_le_clear_resolv_list), 3886 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 3887 sizeof(struct hci_rp_le_read_resolv_list_size)), 3888 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 3889 hci_cc_le_set_addr_resolution_enable), 3890 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 3891 sizeof(struct hci_rp_le_read_max_data_len)), 3892 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 3893 hci_cc_write_le_host_supported), 3894 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 3895 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 3896 sizeof(struct hci_rp_read_rssi)), 3897 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 3898 sizeof(struct hci_rp_read_tx_power)), 3899 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 3900 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 3901 hci_cc_le_set_ext_scan_param), 3902 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 3903 hci_cc_le_set_ext_scan_enable), 3904 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 3905 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 3906 hci_cc_le_read_num_adv_sets, 3907 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 3908 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 3909 sizeof(struct hci_rp_le_set_ext_adv_params)), 3910 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 3911 hci_cc_le_set_ext_adv_enable), 3912 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 3913 hci_cc_le_set_adv_set_random_addr), 3914 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 3915 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 3916 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 3917 sizeof(struct hci_rp_le_read_transmit_power)), 3918 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode) 3919 }; 3920 3921 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 3922 struct sk_buff *skb) 3923 { 3924 void *data; 3925 3926 if (skb->len < cc->min_len) { 3927 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 3928 cc->op, skb->len, cc->min_len); 3929 return HCI_ERROR_UNSPECIFIED; 3930 } 3931 3932 /* Just warn if the length is over max_len size it still be possible to 3933 * partially parse the cc so leave to callback to decide if that is 3934 * acceptable. 3935 */ 3936 if (skb->len > cc->max_len) 3937 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 3938 cc->op, skb->len, cc->max_len); 3939 3940 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 3941 if (!data) 3942 return HCI_ERROR_UNSPECIFIED; 3943 3944 return cc->func(hdev, data, skb); 3945 } 3946 3947 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 3948 struct sk_buff *skb, u16 *opcode, u8 *status, 3949 hci_req_complete_t *req_complete, 3950 hci_req_complete_skb_t *req_complete_skb) 3951 { 3952 struct hci_ev_cmd_complete *ev = data; 3953 int i; 3954 3955 *opcode = __le16_to_cpu(ev->opcode); 3956 3957 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 3958 3959 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 3960 if (hci_cc_table[i].op == *opcode) { 3961 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 3962 break; 3963 } 3964 } 3965 3966 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 3967 3968 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3969 req_complete_skb); 3970 3971 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3972 bt_dev_err(hdev, 3973 "unexpected event for opcode 0x%4.4x", *opcode); 3974 return; 3975 } 3976 3977 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3978 queue_work(hdev->workqueue, &hdev->cmd_work); 3979 } 3980 3981 #define HCI_CS(_op, _func) \ 3982 { \ 3983 .op = _op, \ 3984 .func = _func, \ 3985 } 3986 3987 static const struct hci_cs { 3988 u16 op; 3989 void (*func)(struct hci_dev *hdev, __u8 status); 3990 } hci_cs_table[] = { 3991 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 3992 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 3993 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 3994 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 3995 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 3996 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 3997 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 3998 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 3999 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4000 hci_cs_read_remote_ext_features), 4001 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4002 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4003 hci_cs_enhanced_setup_sync_conn), 4004 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4005 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4006 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4007 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4008 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4009 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4010 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn) 4011 }; 4012 4013 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4014 struct sk_buff *skb, u16 *opcode, u8 *status, 4015 hci_req_complete_t *req_complete, 4016 hci_req_complete_skb_t *req_complete_skb) 4017 { 4018 struct hci_ev_cmd_status *ev = data; 4019 int i; 4020 4021 *opcode = __le16_to_cpu(ev->opcode); 4022 *status = ev->status; 4023 4024 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4025 4026 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4027 if (hci_cs_table[i].op == *opcode) { 4028 hci_cs_table[i].func(hdev, ev->status); 4029 break; 4030 } 4031 } 4032 4033 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4034 4035 /* Indicate request completion if the command failed. Also, if 4036 * we're not waiting for a special event and we get a success 4037 * command status we should try to flag the request as completed 4038 * (since for this kind of commands there will not be a command 4039 * complete event). 4040 */ 4041 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { 4042 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4043 req_complete_skb); 4044 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4045 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4046 *opcode); 4047 return; 4048 } 4049 } 4050 4051 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4052 queue_work(hdev->workqueue, &hdev->cmd_work); 4053 } 4054 4055 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4056 struct sk_buff *skb) 4057 { 4058 struct hci_ev_hardware_error *ev = data; 4059 4060 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4061 4062 hdev->hw_error_code = ev->code; 4063 4064 queue_work(hdev->req_workqueue, &hdev->error_reset); 4065 } 4066 4067 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4068 struct sk_buff *skb) 4069 { 4070 struct hci_ev_role_change *ev = data; 4071 struct hci_conn *conn; 4072 4073 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4074 4075 hci_dev_lock(hdev); 4076 4077 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4078 if (conn) { 4079 if (!ev->status) 4080 conn->role = ev->role; 4081 4082 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4083 4084 hci_role_switch_cfm(conn, ev->status, ev->role); 4085 } 4086 4087 hci_dev_unlock(hdev); 4088 } 4089 4090 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4091 struct sk_buff *skb) 4092 { 4093 struct hci_ev_num_comp_pkts *ev = data; 4094 int i; 4095 4096 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4097 flex_array_size(ev, handles, ev->num))) 4098 return; 4099 4100 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 4101 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4102 return; 4103 } 4104 4105 bt_dev_dbg(hdev, "num %d", ev->num); 4106 4107 for (i = 0; i < ev->num; i++) { 4108 struct hci_comp_pkts_info *info = &ev->handles[i]; 4109 struct hci_conn *conn; 4110 __u16 handle, count; 4111 4112 handle = __le16_to_cpu(info->handle); 4113 count = __le16_to_cpu(info->count); 4114 4115 conn = hci_conn_hash_lookup_handle(hdev, handle); 4116 if (!conn) 4117 continue; 4118 4119 conn->sent -= count; 4120 4121 switch (conn->type) { 4122 case ACL_LINK: 4123 hdev->acl_cnt += count; 4124 if (hdev->acl_cnt > hdev->acl_pkts) 4125 hdev->acl_cnt = hdev->acl_pkts; 4126 break; 4127 4128 case LE_LINK: 4129 if (hdev->le_pkts) { 4130 hdev->le_cnt += count; 4131 if (hdev->le_cnt > hdev->le_pkts) 4132 hdev->le_cnt = hdev->le_pkts; 4133 } else { 4134 hdev->acl_cnt += count; 4135 if (hdev->acl_cnt > hdev->acl_pkts) 4136 hdev->acl_cnt = hdev->acl_pkts; 4137 } 4138 break; 4139 4140 case SCO_LINK: 4141 hdev->sco_cnt += count; 4142 if (hdev->sco_cnt > hdev->sco_pkts) 4143 hdev->sco_cnt = hdev->sco_pkts; 4144 break; 4145 4146 default: 4147 bt_dev_err(hdev, "unknown type %d conn %p", 4148 conn->type, conn); 4149 break; 4150 } 4151 } 4152 4153 queue_work(hdev->workqueue, &hdev->tx_work); 4154 } 4155 4156 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4157 __u16 handle) 4158 { 4159 struct hci_chan *chan; 4160 4161 switch (hdev->dev_type) { 4162 case HCI_PRIMARY: 4163 return hci_conn_hash_lookup_handle(hdev, handle); 4164 case HCI_AMP: 4165 chan = hci_chan_lookup_handle(hdev, handle); 4166 if (chan) 4167 return chan->conn; 4168 break; 4169 default: 4170 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4171 break; 4172 } 4173 4174 return NULL; 4175 } 4176 4177 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, 4178 struct sk_buff *skb) 4179 { 4180 struct hci_ev_num_comp_blocks *ev = data; 4181 int i; 4182 4183 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, 4184 flex_array_size(ev, handles, ev->num_hndl))) 4185 return; 4186 4187 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4188 bt_dev_err(hdev, "wrong event for mode %d", 4189 hdev->flow_ctl_mode); 4190 return; 4191 } 4192 4193 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, 4194 ev->num_hndl); 4195 4196 for (i = 0; i < ev->num_hndl; i++) { 4197 struct hci_comp_blocks_info *info = &ev->handles[i]; 4198 struct hci_conn *conn = NULL; 4199 __u16 handle, block_count; 4200 4201 handle = __le16_to_cpu(info->handle); 4202 block_count = __le16_to_cpu(info->blocks); 4203 4204 conn = __hci_conn_lookup_handle(hdev, handle); 4205 if (!conn) 4206 continue; 4207 4208 conn->sent -= block_count; 4209 4210 switch (conn->type) { 4211 case ACL_LINK: 4212 case AMP_LINK: 4213 hdev->block_cnt += block_count; 4214 if (hdev->block_cnt > hdev->num_blocks) 4215 hdev->block_cnt = hdev->num_blocks; 4216 break; 4217 4218 default: 4219 bt_dev_err(hdev, "unknown type %d conn %p", 4220 conn->type, conn); 4221 break; 4222 } 4223 } 4224 4225 queue_work(hdev->workqueue, &hdev->tx_work); 4226 } 4227 4228 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4229 struct sk_buff *skb) 4230 { 4231 struct hci_ev_mode_change *ev = data; 4232 struct hci_conn *conn; 4233 4234 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4235 4236 hci_dev_lock(hdev); 4237 4238 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4239 if (conn) { 4240 conn->mode = ev->mode; 4241 4242 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4243 &conn->flags)) { 4244 if (conn->mode == HCI_CM_ACTIVE) 4245 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4246 else 4247 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4248 } 4249 4250 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4251 hci_sco_setup(conn, ev->status); 4252 } 4253 4254 hci_dev_unlock(hdev); 4255 } 4256 4257 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4258 struct sk_buff *skb) 4259 { 4260 struct hci_ev_pin_code_req *ev = data; 4261 struct hci_conn *conn; 4262 4263 bt_dev_dbg(hdev, ""); 4264 4265 hci_dev_lock(hdev); 4266 4267 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4268 if (!conn) 4269 goto unlock; 4270 4271 if (conn->state == BT_CONNECTED) { 4272 hci_conn_hold(conn); 4273 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4274 hci_conn_drop(conn); 4275 } 4276 4277 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4278 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4279 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4280 sizeof(ev->bdaddr), &ev->bdaddr); 4281 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4282 u8 secure; 4283 4284 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4285 secure = 1; 4286 else 4287 secure = 0; 4288 4289 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4290 } 4291 4292 unlock: 4293 hci_dev_unlock(hdev); 4294 } 4295 4296 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4297 { 4298 if (key_type == HCI_LK_CHANGED_COMBINATION) 4299 return; 4300 4301 conn->pin_length = pin_len; 4302 conn->key_type = key_type; 4303 4304 switch (key_type) { 4305 case HCI_LK_LOCAL_UNIT: 4306 case HCI_LK_REMOTE_UNIT: 4307 case HCI_LK_DEBUG_COMBINATION: 4308 return; 4309 case HCI_LK_COMBINATION: 4310 if (pin_len == 16) 4311 conn->pending_sec_level = BT_SECURITY_HIGH; 4312 else 4313 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4314 break; 4315 case HCI_LK_UNAUTH_COMBINATION_P192: 4316 case HCI_LK_UNAUTH_COMBINATION_P256: 4317 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4318 break; 4319 case HCI_LK_AUTH_COMBINATION_P192: 4320 conn->pending_sec_level = BT_SECURITY_HIGH; 4321 break; 4322 case HCI_LK_AUTH_COMBINATION_P256: 4323 conn->pending_sec_level = BT_SECURITY_FIPS; 4324 break; 4325 } 4326 } 4327 4328 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4329 struct sk_buff *skb) 4330 { 4331 struct hci_ev_link_key_req *ev = data; 4332 struct hci_cp_link_key_reply cp; 4333 struct hci_conn *conn; 4334 struct link_key *key; 4335 4336 bt_dev_dbg(hdev, ""); 4337 4338 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4339 return; 4340 4341 hci_dev_lock(hdev); 4342 4343 key = hci_find_link_key(hdev, &ev->bdaddr); 4344 if (!key) { 4345 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4346 goto not_found; 4347 } 4348 4349 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4350 4351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4352 if (conn) { 4353 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4354 4355 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4356 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4357 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4358 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4359 goto not_found; 4360 } 4361 4362 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4363 (conn->pending_sec_level == BT_SECURITY_HIGH || 4364 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4365 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4366 goto not_found; 4367 } 4368 4369 conn_set_key(conn, key->type, key->pin_len); 4370 } 4371 4372 bacpy(&cp.bdaddr, &ev->bdaddr); 4373 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4374 4375 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4376 4377 hci_dev_unlock(hdev); 4378 4379 return; 4380 4381 not_found: 4382 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4383 hci_dev_unlock(hdev); 4384 } 4385 4386 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4387 struct sk_buff *skb) 4388 { 4389 struct hci_ev_link_key_notify *ev = data; 4390 struct hci_conn *conn; 4391 struct link_key *key; 4392 bool persistent; 4393 u8 pin_len = 0; 4394 4395 bt_dev_dbg(hdev, ""); 4396 4397 hci_dev_lock(hdev); 4398 4399 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4400 if (!conn) 4401 goto unlock; 4402 4403 hci_conn_hold(conn); 4404 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4405 hci_conn_drop(conn); 4406 4407 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4408 conn_set_key(conn, ev->key_type, conn->pin_length); 4409 4410 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4411 goto unlock; 4412 4413 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4414 ev->key_type, pin_len, &persistent); 4415 if (!key) 4416 goto unlock; 4417 4418 /* Update connection information since adding the key will have 4419 * fixed up the type in the case of changed combination keys. 4420 */ 4421 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4422 conn_set_key(conn, key->type, key->pin_len); 4423 4424 mgmt_new_link_key(hdev, key, persistent); 4425 4426 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4427 * is set. If it's not set simply remove the key from the kernel 4428 * list (we've still notified user space about it but with 4429 * store_hint being 0). 4430 */ 4431 if (key->type == HCI_LK_DEBUG_COMBINATION && 4432 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4433 list_del_rcu(&key->list); 4434 kfree_rcu(key, rcu); 4435 goto unlock; 4436 } 4437 4438 if (persistent) 4439 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4440 else 4441 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4442 4443 unlock: 4444 hci_dev_unlock(hdev); 4445 } 4446 4447 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4448 struct sk_buff *skb) 4449 { 4450 struct hci_ev_clock_offset *ev = data; 4451 struct hci_conn *conn; 4452 4453 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4454 4455 hci_dev_lock(hdev); 4456 4457 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4458 if (conn && !ev->status) { 4459 struct inquiry_entry *ie; 4460 4461 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4462 if (ie) { 4463 ie->data.clock_offset = ev->clock_offset; 4464 ie->timestamp = jiffies; 4465 } 4466 } 4467 4468 hci_dev_unlock(hdev); 4469 } 4470 4471 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4472 struct sk_buff *skb) 4473 { 4474 struct hci_ev_pkt_type_change *ev = data; 4475 struct hci_conn *conn; 4476 4477 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4478 4479 hci_dev_lock(hdev); 4480 4481 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4482 if (conn && !ev->status) 4483 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4484 4485 hci_dev_unlock(hdev); 4486 } 4487 4488 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4489 struct sk_buff *skb) 4490 { 4491 struct hci_ev_pscan_rep_mode *ev = data; 4492 struct inquiry_entry *ie; 4493 4494 bt_dev_dbg(hdev, ""); 4495 4496 hci_dev_lock(hdev); 4497 4498 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4499 if (ie) { 4500 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4501 ie->timestamp = jiffies; 4502 } 4503 4504 hci_dev_unlock(hdev); 4505 } 4506 4507 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4508 struct sk_buff *skb) 4509 { 4510 struct hci_ev_inquiry_result_rssi *ev = edata; 4511 struct inquiry_data data; 4512 int i; 4513 4514 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4515 4516 if (!ev->num) 4517 return; 4518 4519 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4520 return; 4521 4522 hci_dev_lock(hdev); 4523 4524 if (skb->len == array_size(ev->num, 4525 sizeof(struct inquiry_info_rssi_pscan))) { 4526 struct inquiry_info_rssi_pscan *info; 4527 4528 for (i = 0; i < ev->num; i++) { 4529 u32 flags; 4530 4531 info = hci_ev_skb_pull(hdev, skb, 4532 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4533 sizeof(*info)); 4534 if (!info) { 4535 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4536 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4537 return; 4538 } 4539 4540 bacpy(&data.bdaddr, &info->bdaddr); 4541 data.pscan_rep_mode = info->pscan_rep_mode; 4542 data.pscan_period_mode = info->pscan_period_mode; 4543 data.pscan_mode = info->pscan_mode; 4544 memcpy(data.dev_class, info->dev_class, 3); 4545 data.clock_offset = info->clock_offset; 4546 data.rssi = info->rssi; 4547 data.ssp_mode = 0x00; 4548 4549 flags = hci_inquiry_cache_update(hdev, &data, false); 4550 4551 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4552 info->dev_class, info->rssi, 4553 flags, NULL, 0, NULL, 0); 4554 } 4555 } else if (skb->len == array_size(ev->num, 4556 sizeof(struct inquiry_info_rssi))) { 4557 struct inquiry_info_rssi *info; 4558 4559 for (i = 0; i < ev->num; i++) { 4560 u32 flags; 4561 4562 info = hci_ev_skb_pull(hdev, skb, 4563 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4564 sizeof(*info)); 4565 if (!info) { 4566 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4567 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4568 return; 4569 } 4570 4571 bacpy(&data.bdaddr, &info->bdaddr); 4572 data.pscan_rep_mode = info->pscan_rep_mode; 4573 data.pscan_period_mode = info->pscan_period_mode; 4574 data.pscan_mode = 0x00; 4575 memcpy(data.dev_class, info->dev_class, 3); 4576 data.clock_offset = info->clock_offset; 4577 data.rssi = info->rssi; 4578 data.ssp_mode = 0x00; 4579 4580 flags = hci_inquiry_cache_update(hdev, &data, false); 4581 4582 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4583 info->dev_class, info->rssi, 4584 flags, NULL, 0, NULL, 0); 4585 } 4586 } else { 4587 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4588 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4589 } 4590 4591 hci_dev_unlock(hdev); 4592 } 4593 4594 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4595 struct sk_buff *skb) 4596 { 4597 struct hci_ev_remote_ext_features *ev = data; 4598 struct hci_conn *conn; 4599 4600 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4601 4602 hci_dev_lock(hdev); 4603 4604 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4605 if (!conn) 4606 goto unlock; 4607 4608 if (ev->page < HCI_MAX_PAGES) 4609 memcpy(conn->features[ev->page], ev->features, 8); 4610 4611 if (!ev->status && ev->page == 0x01) { 4612 struct inquiry_entry *ie; 4613 4614 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4615 if (ie) 4616 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4617 4618 if (ev->features[0] & LMP_HOST_SSP) { 4619 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4620 } else { 4621 /* It is mandatory by the Bluetooth specification that 4622 * Extended Inquiry Results are only used when Secure 4623 * Simple Pairing is enabled, but some devices violate 4624 * this. 4625 * 4626 * To make these devices work, the internal SSP 4627 * enabled flag needs to be cleared if the remote host 4628 * features do not indicate SSP support */ 4629 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4630 } 4631 4632 if (ev->features[0] & LMP_HOST_SC) 4633 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4634 } 4635 4636 if (conn->state != BT_CONFIG) 4637 goto unlock; 4638 4639 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4640 struct hci_cp_remote_name_req cp; 4641 memset(&cp, 0, sizeof(cp)); 4642 bacpy(&cp.bdaddr, &conn->dst); 4643 cp.pscan_rep_mode = 0x02; 4644 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4645 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4646 mgmt_device_connected(hdev, conn, NULL, 0); 4647 4648 if (!hci_outgoing_auth_needed(hdev, conn)) { 4649 conn->state = BT_CONNECTED; 4650 hci_connect_cfm(conn, ev->status); 4651 hci_conn_drop(conn); 4652 } 4653 4654 unlock: 4655 hci_dev_unlock(hdev); 4656 } 4657 4658 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4659 struct sk_buff *skb) 4660 { 4661 struct hci_ev_sync_conn_complete *ev = data; 4662 struct hci_conn *conn; 4663 4664 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4665 4666 hci_dev_lock(hdev); 4667 4668 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4669 if (!conn) { 4670 if (ev->link_type == ESCO_LINK) 4671 goto unlock; 4672 4673 /* When the link type in the event indicates SCO connection 4674 * and lookup of the connection object fails, then check 4675 * if an eSCO connection object exists. 4676 * 4677 * The core limits the synchronous connections to either 4678 * SCO or eSCO. The eSCO connection is preferred and tried 4679 * to be setup first and until successfully established, 4680 * the link type will be hinted as eSCO. 4681 */ 4682 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4683 if (!conn) 4684 goto unlock; 4685 } 4686 4687 switch (ev->status) { 4688 case 0x00: 4689 /* The synchronous connection complete event should only be 4690 * sent once per new connection. Receiving a successful 4691 * complete event when the connection status is already 4692 * BT_CONNECTED means that the device is misbehaving and sent 4693 * multiple complete event packets for the same new connection. 4694 * 4695 * Registering the device more than once can corrupt kernel 4696 * memory, hence upon detecting this invalid event, we report 4697 * an error and ignore the packet. 4698 */ 4699 if (conn->state == BT_CONNECTED) { 4700 bt_dev_err(hdev, "Ignoring connect complete event for existing connection"); 4701 goto unlock; 4702 } 4703 4704 conn->handle = __le16_to_cpu(ev->handle); 4705 conn->state = BT_CONNECTED; 4706 conn->type = ev->link_type; 4707 4708 hci_debugfs_create_conn(conn); 4709 hci_conn_add_sysfs(conn); 4710 break; 4711 4712 case 0x10: /* Connection Accept Timeout */ 4713 case 0x0d: /* Connection Rejected due to Limited Resources */ 4714 case 0x11: /* Unsupported Feature or Parameter Value */ 4715 case 0x1c: /* SCO interval rejected */ 4716 case 0x1a: /* Unsupported Remote Feature */ 4717 case 0x1e: /* Invalid LMP Parameters */ 4718 case 0x1f: /* Unspecified error */ 4719 case 0x20: /* Unsupported LMP Parameter value */ 4720 if (conn->out) { 4721 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4722 (hdev->esco_type & EDR_ESCO_MASK); 4723 if (hci_setup_sync(conn, conn->link->handle)) 4724 goto unlock; 4725 } 4726 fallthrough; 4727 4728 default: 4729 conn->state = BT_CLOSED; 4730 break; 4731 } 4732 4733 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4734 /* Notify only in case of SCO over HCI transport data path which 4735 * is zero and non-zero value shall be non-HCI transport data path 4736 */ 4737 if (conn->codec.data_path == 0 && hdev->notify) { 4738 switch (ev->air_mode) { 4739 case 0x02: 4740 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4741 break; 4742 case 0x03: 4743 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4744 break; 4745 } 4746 } 4747 4748 hci_connect_cfm(conn, ev->status); 4749 if (ev->status) 4750 hci_conn_del(conn); 4751 4752 unlock: 4753 hci_dev_unlock(hdev); 4754 } 4755 4756 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 4757 { 4758 size_t parsed = 0; 4759 4760 while (parsed < eir_len) { 4761 u8 field_len = eir[0]; 4762 4763 if (field_len == 0) 4764 return parsed; 4765 4766 parsed += field_len + 1; 4767 eir += field_len + 1; 4768 } 4769 4770 return eir_len; 4771 } 4772 4773 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 4774 struct sk_buff *skb) 4775 { 4776 struct hci_ev_ext_inquiry_result *ev = edata; 4777 struct inquiry_data data; 4778 size_t eir_len; 4779 int i; 4780 4781 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 4782 flex_array_size(ev, info, ev->num))) 4783 return; 4784 4785 bt_dev_dbg(hdev, "num %d", ev->num); 4786 4787 if (!ev->num) 4788 return; 4789 4790 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4791 return; 4792 4793 hci_dev_lock(hdev); 4794 4795 for (i = 0; i < ev->num; i++) { 4796 struct extended_inquiry_info *info = &ev->info[i]; 4797 u32 flags; 4798 bool name_known; 4799 4800 bacpy(&data.bdaddr, &info->bdaddr); 4801 data.pscan_rep_mode = info->pscan_rep_mode; 4802 data.pscan_period_mode = info->pscan_period_mode; 4803 data.pscan_mode = 0x00; 4804 memcpy(data.dev_class, info->dev_class, 3); 4805 data.clock_offset = info->clock_offset; 4806 data.rssi = info->rssi; 4807 data.ssp_mode = 0x01; 4808 4809 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4810 name_known = eir_get_data(info->data, 4811 sizeof(info->data), 4812 EIR_NAME_COMPLETE, NULL); 4813 else 4814 name_known = true; 4815 4816 flags = hci_inquiry_cache_update(hdev, &data, name_known); 4817 4818 eir_len = eir_get_length(info->data, sizeof(info->data)); 4819 4820 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4821 info->dev_class, info->rssi, 4822 flags, info->data, eir_len, NULL, 0); 4823 } 4824 4825 hci_dev_unlock(hdev); 4826 } 4827 4828 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 4829 struct sk_buff *skb) 4830 { 4831 struct hci_ev_key_refresh_complete *ev = data; 4832 struct hci_conn *conn; 4833 4834 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 4835 __le16_to_cpu(ev->handle)); 4836 4837 hci_dev_lock(hdev); 4838 4839 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4840 if (!conn) 4841 goto unlock; 4842 4843 /* For BR/EDR the necessary steps are taken through the 4844 * auth_complete event. 4845 */ 4846 if (conn->type != LE_LINK) 4847 goto unlock; 4848 4849 if (!ev->status) 4850 conn->sec_level = conn->pending_sec_level; 4851 4852 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 4853 4854 if (ev->status && conn->state == BT_CONNECTED) { 4855 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4856 hci_conn_drop(conn); 4857 goto unlock; 4858 } 4859 4860 if (conn->state == BT_CONFIG) { 4861 if (!ev->status) 4862 conn->state = BT_CONNECTED; 4863 4864 hci_connect_cfm(conn, ev->status); 4865 hci_conn_drop(conn); 4866 } else { 4867 hci_auth_cfm(conn, ev->status); 4868 4869 hci_conn_hold(conn); 4870 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4871 hci_conn_drop(conn); 4872 } 4873 4874 unlock: 4875 hci_dev_unlock(hdev); 4876 } 4877 4878 static u8 hci_get_auth_req(struct hci_conn *conn) 4879 { 4880 /* If remote requests no-bonding follow that lead */ 4881 if (conn->remote_auth == HCI_AT_NO_BONDING || 4882 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4883 return conn->remote_auth | (conn->auth_type & 0x01); 4884 4885 /* If both remote and local have enough IO capabilities, require 4886 * MITM protection 4887 */ 4888 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4889 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4890 return conn->remote_auth | 0x01; 4891 4892 /* No MITM protection possible so ignore remote requirement */ 4893 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4894 } 4895 4896 static u8 bredr_oob_data_present(struct hci_conn *conn) 4897 { 4898 struct hci_dev *hdev = conn->hdev; 4899 struct oob_data *data; 4900 4901 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4902 if (!data) 4903 return 0x00; 4904 4905 if (bredr_sc_enabled(hdev)) { 4906 /* When Secure Connections is enabled, then just 4907 * return the present value stored with the OOB 4908 * data. The stored value contains the right present 4909 * information. However it can only be trusted when 4910 * not in Secure Connection Only mode. 4911 */ 4912 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4913 return data->present; 4914 4915 /* When Secure Connections Only mode is enabled, then 4916 * the P-256 values are required. If they are not 4917 * available, then do not declare that OOB data is 4918 * present. 4919 */ 4920 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4921 !memcmp(data->hash256, ZERO_KEY, 16)) 4922 return 0x00; 4923 4924 return 0x02; 4925 } 4926 4927 /* When Secure Connections is not enabled or actually 4928 * not supported by the hardware, then check that if 4929 * P-192 data values are present. 4930 */ 4931 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4932 !memcmp(data->hash192, ZERO_KEY, 16)) 4933 return 0x00; 4934 4935 return 0x01; 4936 } 4937 4938 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 4939 struct sk_buff *skb) 4940 { 4941 struct hci_ev_io_capa_request *ev = data; 4942 struct hci_conn *conn; 4943 4944 bt_dev_dbg(hdev, ""); 4945 4946 hci_dev_lock(hdev); 4947 4948 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4949 if (!conn) 4950 goto unlock; 4951 4952 hci_conn_hold(conn); 4953 4954 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4955 goto unlock; 4956 4957 /* Allow pairing if we're pairable, the initiators of the 4958 * pairing or if the remote is not requesting bonding. 4959 */ 4960 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4961 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4962 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 4963 struct hci_cp_io_capability_reply cp; 4964 4965 bacpy(&cp.bdaddr, &ev->bdaddr); 4966 /* Change the IO capability from KeyboardDisplay 4967 * to DisplayYesNo as it is not supported by BT spec. */ 4968 cp.capability = (conn->io_capability == 0x04) ? 4969 HCI_IO_DISPLAY_YESNO : conn->io_capability; 4970 4971 /* If we are initiators, there is no remote information yet */ 4972 if (conn->remote_auth == 0xff) { 4973 /* Request MITM protection if our IO caps allow it 4974 * except for the no-bonding case. 4975 */ 4976 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4977 conn->auth_type != HCI_AT_NO_BONDING) 4978 conn->auth_type |= 0x01; 4979 } else { 4980 conn->auth_type = hci_get_auth_req(conn); 4981 } 4982 4983 /* If we're not bondable, force one of the non-bondable 4984 * authentication requirement values. 4985 */ 4986 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 4987 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 4988 4989 cp.authentication = conn->auth_type; 4990 cp.oob_data = bredr_oob_data_present(conn); 4991 4992 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 4993 sizeof(cp), &cp); 4994 } else { 4995 struct hci_cp_io_capability_neg_reply cp; 4996 4997 bacpy(&cp.bdaddr, &ev->bdaddr); 4998 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 4999 5000 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5001 sizeof(cp), &cp); 5002 } 5003 5004 unlock: 5005 hci_dev_unlock(hdev); 5006 } 5007 5008 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5009 struct sk_buff *skb) 5010 { 5011 struct hci_ev_io_capa_reply *ev = data; 5012 struct hci_conn *conn; 5013 5014 bt_dev_dbg(hdev, ""); 5015 5016 hci_dev_lock(hdev); 5017 5018 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5019 if (!conn) 5020 goto unlock; 5021 5022 conn->remote_cap = ev->capability; 5023 conn->remote_auth = ev->authentication; 5024 5025 unlock: 5026 hci_dev_unlock(hdev); 5027 } 5028 5029 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5030 struct sk_buff *skb) 5031 { 5032 struct hci_ev_user_confirm_req *ev = data; 5033 int loc_mitm, rem_mitm, confirm_hint = 0; 5034 struct hci_conn *conn; 5035 5036 bt_dev_dbg(hdev, ""); 5037 5038 hci_dev_lock(hdev); 5039 5040 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5041 goto unlock; 5042 5043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5044 if (!conn) 5045 goto unlock; 5046 5047 loc_mitm = (conn->auth_type & 0x01); 5048 rem_mitm = (conn->remote_auth & 0x01); 5049 5050 /* If we require MITM but the remote device can't provide that 5051 * (it has NoInputNoOutput) then reject the confirmation 5052 * request. We check the security level here since it doesn't 5053 * necessarily match conn->auth_type. 5054 */ 5055 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5056 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5057 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5058 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5059 sizeof(ev->bdaddr), &ev->bdaddr); 5060 goto unlock; 5061 } 5062 5063 /* If no side requires MITM protection; auto-accept */ 5064 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5065 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5066 5067 /* If we're not the initiators request authorization to 5068 * proceed from user space (mgmt_user_confirm with 5069 * confirm_hint set to 1). The exception is if neither 5070 * side had MITM or if the local IO capability is 5071 * NoInputNoOutput, in which case we do auto-accept 5072 */ 5073 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5074 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5075 (loc_mitm || rem_mitm)) { 5076 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5077 confirm_hint = 1; 5078 goto confirm; 5079 } 5080 5081 /* If there already exists link key in local host, leave the 5082 * decision to user space since the remote device could be 5083 * legitimate or malicious. 5084 */ 5085 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5086 bt_dev_dbg(hdev, "Local host already has link key"); 5087 confirm_hint = 1; 5088 goto confirm; 5089 } 5090 5091 BT_DBG("Auto-accept of user confirmation with %ums delay", 5092 hdev->auto_accept_delay); 5093 5094 if (hdev->auto_accept_delay > 0) { 5095 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5096 queue_delayed_work(conn->hdev->workqueue, 5097 &conn->auto_accept_work, delay); 5098 goto unlock; 5099 } 5100 5101 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5102 sizeof(ev->bdaddr), &ev->bdaddr); 5103 goto unlock; 5104 } 5105 5106 confirm: 5107 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5108 le32_to_cpu(ev->passkey), confirm_hint); 5109 5110 unlock: 5111 hci_dev_unlock(hdev); 5112 } 5113 5114 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5115 struct sk_buff *skb) 5116 { 5117 struct hci_ev_user_passkey_req *ev = data; 5118 5119 bt_dev_dbg(hdev, ""); 5120 5121 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5122 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5123 } 5124 5125 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5126 struct sk_buff *skb) 5127 { 5128 struct hci_ev_user_passkey_notify *ev = data; 5129 struct hci_conn *conn; 5130 5131 bt_dev_dbg(hdev, ""); 5132 5133 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5134 if (!conn) 5135 return; 5136 5137 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5138 conn->passkey_entered = 0; 5139 5140 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5141 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5142 conn->dst_type, conn->passkey_notify, 5143 conn->passkey_entered); 5144 } 5145 5146 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5147 struct sk_buff *skb) 5148 { 5149 struct hci_ev_keypress_notify *ev = data; 5150 struct hci_conn *conn; 5151 5152 bt_dev_dbg(hdev, ""); 5153 5154 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5155 if (!conn) 5156 return; 5157 5158 switch (ev->type) { 5159 case HCI_KEYPRESS_STARTED: 5160 conn->passkey_entered = 0; 5161 return; 5162 5163 case HCI_KEYPRESS_ENTERED: 5164 conn->passkey_entered++; 5165 break; 5166 5167 case HCI_KEYPRESS_ERASED: 5168 conn->passkey_entered--; 5169 break; 5170 5171 case HCI_KEYPRESS_CLEARED: 5172 conn->passkey_entered = 0; 5173 break; 5174 5175 case HCI_KEYPRESS_COMPLETED: 5176 return; 5177 } 5178 5179 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5180 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5181 conn->dst_type, conn->passkey_notify, 5182 conn->passkey_entered); 5183 } 5184 5185 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5186 struct sk_buff *skb) 5187 { 5188 struct hci_ev_simple_pair_complete *ev = data; 5189 struct hci_conn *conn; 5190 5191 bt_dev_dbg(hdev, ""); 5192 5193 hci_dev_lock(hdev); 5194 5195 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5196 if (!conn) 5197 goto unlock; 5198 5199 /* Reset the authentication requirement to unknown */ 5200 conn->remote_auth = 0xff; 5201 5202 /* To avoid duplicate auth_failed events to user space we check 5203 * the HCI_CONN_AUTH_PEND flag which will be set if we 5204 * initiated the authentication. A traditional auth_complete 5205 * event gets always produced as initiator and is also mapped to 5206 * the mgmt_auth_failed event */ 5207 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5208 mgmt_auth_failed(conn, ev->status); 5209 5210 hci_conn_drop(conn); 5211 5212 unlock: 5213 hci_dev_unlock(hdev); 5214 } 5215 5216 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5217 struct sk_buff *skb) 5218 { 5219 struct hci_ev_remote_host_features *ev = data; 5220 struct inquiry_entry *ie; 5221 struct hci_conn *conn; 5222 5223 bt_dev_dbg(hdev, ""); 5224 5225 hci_dev_lock(hdev); 5226 5227 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5228 if (conn) 5229 memcpy(conn->features[1], ev->features, 8); 5230 5231 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5232 if (ie) 5233 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5234 5235 hci_dev_unlock(hdev); 5236 } 5237 5238 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5239 struct sk_buff *skb) 5240 { 5241 struct hci_ev_remote_oob_data_request *ev = edata; 5242 struct oob_data *data; 5243 5244 bt_dev_dbg(hdev, ""); 5245 5246 hci_dev_lock(hdev); 5247 5248 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5249 goto unlock; 5250 5251 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5252 if (!data) { 5253 struct hci_cp_remote_oob_data_neg_reply cp; 5254 5255 bacpy(&cp.bdaddr, &ev->bdaddr); 5256 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5257 sizeof(cp), &cp); 5258 goto unlock; 5259 } 5260 5261 if (bredr_sc_enabled(hdev)) { 5262 struct hci_cp_remote_oob_ext_data_reply cp; 5263 5264 bacpy(&cp.bdaddr, &ev->bdaddr); 5265 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5266 memset(cp.hash192, 0, sizeof(cp.hash192)); 5267 memset(cp.rand192, 0, sizeof(cp.rand192)); 5268 } else { 5269 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5270 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5271 } 5272 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5273 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5274 5275 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5276 sizeof(cp), &cp); 5277 } else { 5278 struct hci_cp_remote_oob_data_reply cp; 5279 5280 bacpy(&cp.bdaddr, &ev->bdaddr); 5281 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5282 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5283 5284 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5285 sizeof(cp), &cp); 5286 } 5287 5288 unlock: 5289 hci_dev_unlock(hdev); 5290 } 5291 5292 #if IS_ENABLED(CONFIG_BT_HS) 5293 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, 5294 struct sk_buff *skb) 5295 { 5296 struct hci_ev_channel_selected *ev = data; 5297 struct hci_conn *hcon; 5298 5299 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); 5300 5301 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5302 if (!hcon) 5303 return; 5304 5305 amp_read_loc_assoc_final_data(hdev, hcon); 5306 } 5307 5308 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, 5309 struct sk_buff *skb) 5310 { 5311 struct hci_ev_phy_link_complete *ev = data; 5312 struct hci_conn *hcon, *bredr_hcon; 5313 5314 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, 5315 ev->status); 5316 5317 hci_dev_lock(hdev); 5318 5319 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5320 if (!hcon) 5321 goto unlock; 5322 5323 if (!hcon->amp_mgr) 5324 goto unlock; 5325 5326 if (ev->status) { 5327 hci_conn_del(hcon); 5328 goto unlock; 5329 } 5330 5331 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5332 5333 hcon->state = BT_CONNECTED; 5334 bacpy(&hcon->dst, &bredr_hcon->dst); 5335 5336 hci_conn_hold(hcon); 5337 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5338 hci_conn_drop(hcon); 5339 5340 hci_debugfs_create_conn(hcon); 5341 hci_conn_add_sysfs(hcon); 5342 5343 amp_physical_cfm(bredr_hcon, hcon); 5344 5345 unlock: 5346 hci_dev_unlock(hdev); 5347 } 5348 5349 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, 5350 struct sk_buff *skb) 5351 { 5352 struct hci_ev_logical_link_complete *ev = data; 5353 struct hci_conn *hcon; 5354 struct hci_chan *hchan; 5355 struct amp_mgr *mgr; 5356 5357 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5358 le16_to_cpu(ev->handle), ev->phy_handle, ev->status); 5359 5360 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5361 if (!hcon) 5362 return; 5363 5364 /* Create AMP hchan */ 5365 hchan = hci_chan_create(hcon); 5366 if (!hchan) 5367 return; 5368 5369 hchan->handle = le16_to_cpu(ev->handle); 5370 hchan->amp = true; 5371 5372 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5373 5374 mgr = hcon->amp_mgr; 5375 if (mgr && mgr->bredr_chan) { 5376 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5377 5378 l2cap_chan_lock(bredr_chan); 5379 5380 bredr_chan->conn->mtu = hdev->block_mtu; 5381 l2cap_logical_cfm(bredr_chan, hchan, 0); 5382 hci_conn_hold(hcon); 5383 5384 l2cap_chan_unlock(bredr_chan); 5385 } 5386 } 5387 5388 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, 5389 struct sk_buff *skb) 5390 { 5391 struct hci_ev_disconn_logical_link_complete *ev = data; 5392 struct hci_chan *hchan; 5393 5394 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", 5395 le16_to_cpu(ev->handle), ev->status); 5396 5397 if (ev->status) 5398 return; 5399 5400 hci_dev_lock(hdev); 5401 5402 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5403 if (!hchan || !hchan->amp) 5404 goto unlock; 5405 5406 amp_destroy_logical_link(hchan, ev->reason); 5407 5408 unlock: 5409 hci_dev_unlock(hdev); 5410 } 5411 5412 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, 5413 struct sk_buff *skb) 5414 { 5415 struct hci_ev_disconn_phy_link_complete *ev = data; 5416 struct hci_conn *hcon; 5417 5418 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5419 5420 if (ev->status) 5421 return; 5422 5423 hci_dev_lock(hdev); 5424 5425 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5426 if (hcon) { 5427 hcon->state = BT_CLOSED; 5428 hci_conn_del(hcon); 5429 } 5430 5431 hci_dev_unlock(hdev); 5432 } 5433 #endif 5434 5435 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5436 u8 bdaddr_type, bdaddr_t *local_rpa) 5437 { 5438 if (conn->out) { 5439 conn->dst_type = bdaddr_type; 5440 conn->resp_addr_type = bdaddr_type; 5441 bacpy(&conn->resp_addr, bdaddr); 5442 5443 /* Check if the controller has set a Local RPA then it must be 5444 * used instead or hdev->rpa. 5445 */ 5446 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5447 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5448 bacpy(&conn->init_addr, local_rpa); 5449 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5450 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5451 bacpy(&conn->init_addr, &conn->hdev->rpa); 5452 } else { 5453 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5454 &conn->init_addr_type); 5455 } 5456 } else { 5457 conn->resp_addr_type = conn->hdev->adv_addr_type; 5458 /* Check if the controller has set a Local RPA then it must be 5459 * used instead or hdev->rpa. 5460 */ 5461 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5462 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5463 bacpy(&conn->resp_addr, local_rpa); 5464 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5465 /* In case of ext adv, resp_addr will be updated in 5466 * Adv Terminated event. 5467 */ 5468 if (!ext_adv_capable(conn->hdev)) 5469 bacpy(&conn->resp_addr, 5470 &conn->hdev->random_addr); 5471 } else { 5472 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5473 } 5474 5475 conn->init_addr_type = bdaddr_type; 5476 bacpy(&conn->init_addr, bdaddr); 5477 5478 /* For incoming connections, set the default minimum 5479 * and maximum connection interval. They will be used 5480 * to check if the parameters are in range and if not 5481 * trigger the connection update procedure. 5482 */ 5483 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5484 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5485 } 5486 } 5487 5488 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5489 bdaddr_t *bdaddr, u8 bdaddr_type, 5490 bdaddr_t *local_rpa, u8 role, u16 handle, 5491 u16 interval, u16 latency, 5492 u16 supervision_timeout) 5493 { 5494 struct hci_conn_params *params; 5495 struct hci_conn *conn; 5496 struct smp_irk *irk; 5497 u8 addr_type; 5498 5499 hci_dev_lock(hdev); 5500 5501 /* All controllers implicitly stop advertising in the event of a 5502 * connection, so ensure that the state bit is cleared. 5503 */ 5504 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5505 5506 conn = hci_lookup_le_connect(hdev); 5507 if (!conn) { 5508 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5509 if (!conn) { 5510 bt_dev_err(hdev, "no memory for new connection"); 5511 goto unlock; 5512 } 5513 5514 conn->dst_type = bdaddr_type; 5515 5516 /* If we didn't have a hci_conn object previously 5517 * but we're in central role this must be something 5518 * initiated using an accept list. Since accept list based 5519 * connections are not "first class citizens" we don't 5520 * have full tracking of them. Therefore, we go ahead 5521 * with a "best effort" approach of determining the 5522 * initiator address based on the HCI_PRIVACY flag. 5523 */ 5524 if (conn->out) { 5525 conn->resp_addr_type = bdaddr_type; 5526 bacpy(&conn->resp_addr, bdaddr); 5527 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5528 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5529 bacpy(&conn->init_addr, &hdev->rpa); 5530 } else { 5531 hci_copy_identity_address(hdev, 5532 &conn->init_addr, 5533 &conn->init_addr_type); 5534 } 5535 } 5536 } else { 5537 cancel_delayed_work(&conn->le_conn_timeout); 5538 } 5539 5540 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5541 5542 /* Lookup the identity address from the stored connection 5543 * address and address type. 5544 * 5545 * When establishing connections to an identity address, the 5546 * connection procedure will store the resolvable random 5547 * address first. Now if it can be converted back into the 5548 * identity address, start using the identity address from 5549 * now on. 5550 */ 5551 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5552 if (irk) { 5553 bacpy(&conn->dst, &irk->bdaddr); 5554 conn->dst_type = irk->addr_type; 5555 } 5556 5557 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5558 5559 if (status) { 5560 hci_le_conn_failed(conn, status); 5561 goto unlock; 5562 } 5563 5564 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5565 addr_type = BDADDR_LE_PUBLIC; 5566 else 5567 addr_type = BDADDR_LE_RANDOM; 5568 5569 /* Drop the connection if the device is blocked */ 5570 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5571 hci_conn_drop(conn); 5572 goto unlock; 5573 } 5574 5575 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5576 mgmt_device_connected(hdev, conn, NULL, 0); 5577 5578 conn->sec_level = BT_SECURITY_LOW; 5579 conn->handle = handle; 5580 conn->state = BT_CONFIG; 5581 5582 /* Store current advertising instance as connection advertising instance 5583 * when sotfware rotation is in use so it can be re-enabled when 5584 * disconnected. 5585 */ 5586 if (!ext_adv_capable(hdev)) 5587 conn->adv_instance = hdev->cur_adv_instance; 5588 5589 conn->le_conn_interval = interval; 5590 conn->le_conn_latency = latency; 5591 conn->le_supv_timeout = supervision_timeout; 5592 5593 hci_debugfs_create_conn(conn); 5594 hci_conn_add_sysfs(conn); 5595 5596 /* The remote features procedure is defined for central 5597 * role only. So only in case of an initiated connection 5598 * request the remote features. 5599 * 5600 * If the local controller supports peripheral-initiated features 5601 * exchange, then requesting the remote features in peripheral 5602 * role is possible. Otherwise just transition into the 5603 * connected state without requesting the remote features. 5604 */ 5605 if (conn->out || 5606 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5607 struct hci_cp_le_read_remote_features cp; 5608 5609 cp.handle = __cpu_to_le16(conn->handle); 5610 5611 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5612 sizeof(cp), &cp); 5613 5614 hci_conn_hold(conn); 5615 } else { 5616 conn->state = BT_CONNECTED; 5617 hci_connect_cfm(conn, status); 5618 } 5619 5620 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5621 conn->dst_type); 5622 if (params) { 5623 list_del_init(¶ms->action); 5624 if (params->conn) { 5625 hci_conn_drop(params->conn); 5626 hci_conn_put(params->conn); 5627 params->conn = NULL; 5628 } 5629 } 5630 5631 unlock: 5632 hci_update_passive_scan(hdev); 5633 hci_dev_unlock(hdev); 5634 } 5635 5636 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5637 struct sk_buff *skb) 5638 { 5639 struct hci_ev_le_conn_complete *ev = data; 5640 5641 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5642 5643 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5644 NULL, ev->role, le16_to_cpu(ev->handle), 5645 le16_to_cpu(ev->interval), 5646 le16_to_cpu(ev->latency), 5647 le16_to_cpu(ev->supervision_timeout)); 5648 } 5649 5650 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5651 struct sk_buff *skb) 5652 { 5653 struct hci_ev_le_enh_conn_complete *ev = data; 5654 5655 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5656 5657 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5658 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5659 le16_to_cpu(ev->interval), 5660 le16_to_cpu(ev->latency), 5661 le16_to_cpu(ev->supervision_timeout)); 5662 } 5663 5664 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 5665 struct sk_buff *skb) 5666 { 5667 struct hci_evt_le_ext_adv_set_term *ev = data; 5668 struct hci_conn *conn; 5669 struct adv_info *adv, *n; 5670 5671 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5672 5673 adv = hci_find_adv_instance(hdev, ev->handle); 5674 5675 /* The Bluetooth Core 5.3 specification clearly states that this event 5676 * shall not be sent when the Host disables the advertising set. So in 5677 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5678 * 5679 * When the Host disables an advertising set, all cleanup is done via 5680 * its command callback and not needed to be duplicated here. 5681 */ 5682 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5683 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5684 return; 5685 } 5686 5687 if (ev->status) { 5688 if (!adv) 5689 return; 5690 5691 /* Remove advertising as it has been terminated */ 5692 hci_remove_adv_instance(hdev, ev->handle); 5693 mgmt_advertising_removed(NULL, hdev, ev->handle); 5694 5695 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 5696 if (adv->enabled) 5697 return; 5698 } 5699 5700 /* We are no longer advertising, clear HCI_LE_ADV */ 5701 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5702 return; 5703 } 5704 5705 if (adv) 5706 adv->enabled = false; 5707 5708 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5709 if (conn) { 5710 /* Store handle in the connection so the correct advertising 5711 * instance can be re-enabled when disconnected. 5712 */ 5713 conn->adv_instance = ev->handle; 5714 5715 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 5716 bacmp(&conn->resp_addr, BDADDR_ANY)) 5717 return; 5718 5719 if (!ev->handle) { 5720 bacpy(&conn->resp_addr, &hdev->random_addr); 5721 return; 5722 } 5723 5724 if (adv) 5725 bacpy(&conn->resp_addr, &adv->random_addr); 5726 } 5727 } 5728 5729 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 5730 struct sk_buff *skb) 5731 { 5732 struct hci_ev_le_conn_update_complete *ev = data; 5733 struct hci_conn *conn; 5734 5735 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5736 5737 if (ev->status) 5738 return; 5739 5740 hci_dev_lock(hdev); 5741 5742 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5743 if (conn) { 5744 conn->le_conn_interval = le16_to_cpu(ev->interval); 5745 conn->le_conn_latency = le16_to_cpu(ev->latency); 5746 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5747 } 5748 5749 hci_dev_unlock(hdev); 5750 } 5751 5752 /* This function requires the caller holds hdev->lock */ 5753 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5754 bdaddr_t *addr, 5755 u8 addr_type, bool addr_resolved, 5756 u8 adv_type) 5757 { 5758 struct hci_conn *conn; 5759 struct hci_conn_params *params; 5760 5761 /* If the event is not connectable don't proceed further */ 5762 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5763 return NULL; 5764 5765 /* Ignore if the device is blocked or hdev is suspended */ 5766 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 5767 hdev->suspended) 5768 return NULL; 5769 5770 /* Most controller will fail if we try to create new connections 5771 * while we have an existing one in peripheral role. 5772 */ 5773 if (hdev->conn_hash.le_num_peripheral > 0 && 5774 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 5775 !(hdev->le_states[3] & 0x10))) 5776 return NULL; 5777 5778 /* If we're not connectable only connect devices that we have in 5779 * our pend_le_conns list. 5780 */ 5781 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5782 addr_type); 5783 if (!params) 5784 return NULL; 5785 5786 if (!params->explicit_connect) { 5787 switch (params->auto_connect) { 5788 case HCI_AUTO_CONN_DIRECT: 5789 /* Only devices advertising with ADV_DIRECT_IND are 5790 * triggering a connection attempt. This is allowing 5791 * incoming connections from peripheral devices. 5792 */ 5793 if (adv_type != LE_ADV_DIRECT_IND) 5794 return NULL; 5795 break; 5796 case HCI_AUTO_CONN_ALWAYS: 5797 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5798 * are triggering a connection attempt. This means 5799 * that incoming connections from peripheral device are 5800 * accepted and also outgoing connections to peripheral 5801 * devices are established when found. 5802 */ 5803 break; 5804 default: 5805 return NULL; 5806 } 5807 } 5808 5809 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5810 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5811 HCI_ROLE_MASTER); 5812 if (!IS_ERR(conn)) { 5813 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5814 * by higher layer that tried to connect, if no then 5815 * store the pointer since we don't really have any 5816 * other owner of the object besides the params that 5817 * triggered it. This way we can abort the connection if 5818 * the parameters get removed and keep the reference 5819 * count consistent once the connection is established. 5820 */ 5821 5822 if (!params->explicit_connect) 5823 params->conn = hci_conn_get(conn); 5824 5825 return conn; 5826 } 5827 5828 switch (PTR_ERR(conn)) { 5829 case -EBUSY: 5830 /* If hci_connect() returns -EBUSY it means there is already 5831 * an LE connection attempt going on. Since controllers don't 5832 * support more than one connection attempt at the time, we 5833 * don't consider this an error case. 5834 */ 5835 break; 5836 default: 5837 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5838 return NULL; 5839 } 5840 5841 return NULL; 5842 } 5843 5844 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5845 u8 bdaddr_type, bdaddr_t *direct_addr, 5846 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 5847 bool ext_adv) 5848 { 5849 struct discovery_state *d = &hdev->discovery; 5850 struct smp_irk *irk; 5851 struct hci_conn *conn; 5852 bool match, bdaddr_resolved; 5853 u32 flags; 5854 u8 *ptr; 5855 5856 switch (type) { 5857 case LE_ADV_IND: 5858 case LE_ADV_DIRECT_IND: 5859 case LE_ADV_SCAN_IND: 5860 case LE_ADV_NONCONN_IND: 5861 case LE_ADV_SCAN_RSP: 5862 break; 5863 default: 5864 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 5865 "type: 0x%02x", type); 5866 return; 5867 } 5868 5869 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 5870 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 5871 return; 5872 } 5873 5874 /* Find the end of the data in case the report contains padded zero 5875 * bytes at the end causing an invalid length value. 5876 * 5877 * When data is NULL, len is 0 so there is no need for extra ptr 5878 * check as 'ptr < data + 0' is already false in such case. 5879 */ 5880 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 5881 if (ptr + 1 + *ptr > data + len) 5882 break; 5883 } 5884 5885 /* Adjust for actual length. This handles the case when remote 5886 * device is advertising with incorrect data length. 5887 */ 5888 len = ptr - data; 5889 5890 /* If the direct address is present, then this report is from 5891 * a LE Direct Advertising Report event. In that case it is 5892 * important to see if the address is matching the local 5893 * controller address. 5894 */ 5895 if (direct_addr) { 5896 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 5897 &bdaddr_resolved); 5898 5899 /* Only resolvable random addresses are valid for these 5900 * kind of reports and others can be ignored. 5901 */ 5902 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 5903 return; 5904 5905 /* If the controller is not using resolvable random 5906 * addresses, then this report can be ignored. 5907 */ 5908 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 5909 return; 5910 5911 /* If the local IRK of the controller does not match 5912 * with the resolvable random address provided, then 5913 * this report can be ignored. 5914 */ 5915 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 5916 return; 5917 } 5918 5919 /* Check if we need to convert to identity address */ 5920 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 5921 if (irk) { 5922 bdaddr = &irk->bdaddr; 5923 bdaddr_type = irk->addr_type; 5924 } 5925 5926 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 5927 5928 /* Check if we have been requested to connect to this device. 5929 * 5930 * direct_addr is set only for directed advertising reports (it is NULL 5931 * for advertising reports) and is already verified to be RPA above. 5932 */ 5933 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 5934 type); 5935 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 5936 /* Store report for later inclusion by 5937 * mgmt_device_connected 5938 */ 5939 memcpy(conn->le_adv_data, data, len); 5940 conn->le_adv_data_len = len; 5941 } 5942 5943 /* Passive scanning shouldn't trigger any device found events, 5944 * except for devices marked as CONN_REPORT for which we do send 5945 * device found events, or advertisement monitoring requested. 5946 */ 5947 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 5948 if (type == LE_ADV_DIRECT_IND) 5949 return; 5950 5951 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 5952 bdaddr, bdaddr_type) && 5953 idr_is_empty(&hdev->adv_monitors_idr)) 5954 return; 5955 5956 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 5957 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5958 else 5959 flags = 0; 5960 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5961 rssi, flags, data, len, NULL, 0); 5962 return; 5963 } 5964 5965 /* When receiving non-connectable or scannable undirected 5966 * advertising reports, this means that the remote device is 5967 * not connectable and then clearly indicate this in the 5968 * device found event. 5969 * 5970 * When receiving a scan response, then there is no way to 5971 * know if the remote device is connectable or not. However 5972 * since scan responses are merged with a previously seen 5973 * advertising report, the flags field from that report 5974 * will be used. 5975 * 5976 * In the really unlikely case that a controller get confused 5977 * and just sends a scan response event, then it is marked as 5978 * not connectable as well. 5979 */ 5980 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 5981 type == LE_ADV_SCAN_RSP) 5982 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5983 else 5984 flags = 0; 5985 5986 /* If there's nothing pending either store the data from this 5987 * event or send an immediate device found event if the data 5988 * should not be stored for later. 5989 */ 5990 if (!ext_adv && !has_pending_adv_report(hdev)) { 5991 /* If the report will trigger a SCAN_REQ store it for 5992 * later merging. 5993 */ 5994 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 5995 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5996 rssi, flags, data, len); 5997 return; 5998 } 5999 6000 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6001 rssi, flags, data, len, NULL, 0); 6002 return; 6003 } 6004 6005 /* Check if the pending report is for the same device as the new one */ 6006 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6007 bdaddr_type == d->last_adv_addr_type); 6008 6009 /* If the pending data doesn't match this report or this isn't a 6010 * scan response (e.g. we got a duplicate ADV_IND) then force 6011 * sending of the pending data. 6012 */ 6013 if (type != LE_ADV_SCAN_RSP || !match) { 6014 /* Send out whatever is in the cache, but skip duplicates */ 6015 if (!match) 6016 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6017 d->last_adv_addr_type, NULL, 6018 d->last_adv_rssi, d->last_adv_flags, 6019 d->last_adv_data, 6020 d->last_adv_data_len, NULL, 0); 6021 6022 /* If the new report will trigger a SCAN_REQ store it for 6023 * later merging. 6024 */ 6025 if (!ext_adv && (type == LE_ADV_IND || 6026 type == LE_ADV_SCAN_IND)) { 6027 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6028 rssi, flags, data, len); 6029 return; 6030 } 6031 6032 /* The advertising reports cannot be merged, so clear 6033 * the pending report and send out a device found event. 6034 */ 6035 clear_pending_adv_report(hdev); 6036 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6037 rssi, flags, data, len, NULL, 0); 6038 return; 6039 } 6040 6041 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6042 * the new event is a SCAN_RSP. We can therefore proceed with 6043 * sending a merged device found event. 6044 */ 6045 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6046 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6047 d->last_adv_data, d->last_adv_data_len, data, len); 6048 clear_pending_adv_report(hdev); 6049 } 6050 6051 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6052 struct sk_buff *skb) 6053 { 6054 struct hci_ev_le_advertising_report *ev = data; 6055 6056 if (!ev->num) 6057 return; 6058 6059 hci_dev_lock(hdev); 6060 6061 while (ev->num--) { 6062 struct hci_ev_le_advertising_info *info; 6063 s8 rssi; 6064 6065 info = hci_le_ev_skb_pull(hdev, skb, 6066 HCI_EV_LE_ADVERTISING_REPORT, 6067 sizeof(*info)); 6068 if (!info) 6069 break; 6070 6071 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6072 info->length + 1)) 6073 break; 6074 6075 if (info->length <= HCI_MAX_AD_LENGTH) { 6076 rssi = info->data[info->length]; 6077 process_adv_report(hdev, info->type, &info->bdaddr, 6078 info->bdaddr_type, NULL, 0, rssi, 6079 info->data, info->length, false); 6080 } else { 6081 bt_dev_err(hdev, "Dropping invalid advertising data"); 6082 } 6083 } 6084 6085 hci_dev_unlock(hdev); 6086 } 6087 6088 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6089 { 6090 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6091 switch (evt_type) { 6092 case LE_LEGACY_ADV_IND: 6093 return LE_ADV_IND; 6094 case LE_LEGACY_ADV_DIRECT_IND: 6095 return LE_ADV_DIRECT_IND; 6096 case LE_LEGACY_ADV_SCAN_IND: 6097 return LE_ADV_SCAN_IND; 6098 case LE_LEGACY_NONCONN_IND: 6099 return LE_ADV_NONCONN_IND; 6100 case LE_LEGACY_SCAN_RSP_ADV: 6101 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6102 return LE_ADV_SCAN_RSP; 6103 } 6104 6105 goto invalid; 6106 } 6107 6108 if (evt_type & LE_EXT_ADV_CONN_IND) { 6109 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6110 return LE_ADV_DIRECT_IND; 6111 6112 return LE_ADV_IND; 6113 } 6114 6115 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6116 return LE_ADV_SCAN_RSP; 6117 6118 if (evt_type & LE_EXT_ADV_SCAN_IND) 6119 return LE_ADV_SCAN_IND; 6120 6121 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6122 evt_type & LE_EXT_ADV_DIRECT_IND) 6123 return LE_ADV_NONCONN_IND; 6124 6125 invalid: 6126 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6127 evt_type); 6128 6129 return LE_ADV_INVALID; 6130 } 6131 6132 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6133 struct sk_buff *skb) 6134 { 6135 struct hci_ev_le_ext_adv_report *ev = data; 6136 6137 if (!ev->num) 6138 return; 6139 6140 hci_dev_lock(hdev); 6141 6142 while (ev->num--) { 6143 struct hci_ev_le_ext_adv_info *info; 6144 u8 legacy_evt_type; 6145 u16 evt_type; 6146 6147 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6148 sizeof(*info)); 6149 if (!info) 6150 break; 6151 6152 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6153 info->length)) 6154 break; 6155 6156 evt_type = __le16_to_cpu(info->type); 6157 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6158 if (legacy_evt_type != LE_ADV_INVALID) { 6159 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6160 info->bdaddr_type, NULL, 0, 6161 info->rssi, info->data, info->length, 6162 !(evt_type & LE_EXT_ADV_LEGACY_PDU)); 6163 } 6164 } 6165 6166 hci_dev_unlock(hdev); 6167 } 6168 6169 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6170 struct sk_buff *skb) 6171 { 6172 struct hci_ev_le_remote_feat_complete *ev = data; 6173 struct hci_conn *conn; 6174 6175 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6176 6177 hci_dev_lock(hdev); 6178 6179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6180 if (conn) { 6181 if (!ev->status) 6182 memcpy(conn->features[0], ev->features, 8); 6183 6184 if (conn->state == BT_CONFIG) { 6185 __u8 status; 6186 6187 /* If the local controller supports peripheral-initiated 6188 * features exchange, but the remote controller does 6189 * not, then it is possible that the error code 0x1a 6190 * for unsupported remote feature gets returned. 6191 * 6192 * In this specific case, allow the connection to 6193 * transition into connected state and mark it as 6194 * successful. 6195 */ 6196 if (!conn->out && ev->status == 0x1a && 6197 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6198 status = 0x00; 6199 else 6200 status = ev->status; 6201 6202 conn->state = BT_CONNECTED; 6203 hci_connect_cfm(conn, status); 6204 hci_conn_drop(conn); 6205 } 6206 } 6207 6208 hci_dev_unlock(hdev); 6209 } 6210 6211 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6212 struct sk_buff *skb) 6213 { 6214 struct hci_ev_le_ltk_req *ev = data; 6215 struct hci_cp_le_ltk_reply cp; 6216 struct hci_cp_le_ltk_neg_reply neg; 6217 struct hci_conn *conn; 6218 struct smp_ltk *ltk; 6219 6220 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6221 6222 hci_dev_lock(hdev); 6223 6224 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6225 if (conn == NULL) 6226 goto not_found; 6227 6228 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6229 if (!ltk) 6230 goto not_found; 6231 6232 if (smp_ltk_is_sc(ltk)) { 6233 /* With SC both EDiv and Rand are set to zero */ 6234 if (ev->ediv || ev->rand) 6235 goto not_found; 6236 } else { 6237 /* For non-SC keys check that EDiv and Rand match */ 6238 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6239 goto not_found; 6240 } 6241 6242 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6243 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6244 cp.handle = cpu_to_le16(conn->handle); 6245 6246 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6247 6248 conn->enc_key_size = ltk->enc_size; 6249 6250 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6251 6252 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6253 * temporary key used to encrypt a connection following 6254 * pairing. It is used during the Encrypted Session Setup to 6255 * distribute the keys. Later, security can be re-established 6256 * using a distributed LTK. 6257 */ 6258 if (ltk->type == SMP_STK) { 6259 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6260 list_del_rcu(<k->list); 6261 kfree_rcu(ltk, rcu); 6262 } else { 6263 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6264 } 6265 6266 hci_dev_unlock(hdev); 6267 6268 return; 6269 6270 not_found: 6271 neg.handle = ev->handle; 6272 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6273 hci_dev_unlock(hdev); 6274 } 6275 6276 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6277 u8 reason) 6278 { 6279 struct hci_cp_le_conn_param_req_neg_reply cp; 6280 6281 cp.handle = cpu_to_le16(handle); 6282 cp.reason = reason; 6283 6284 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6285 &cp); 6286 } 6287 6288 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6289 struct sk_buff *skb) 6290 { 6291 struct hci_ev_le_remote_conn_param_req *ev = data; 6292 struct hci_cp_le_conn_param_req_reply cp; 6293 struct hci_conn *hcon; 6294 u16 handle, min, max, latency, timeout; 6295 6296 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6297 6298 handle = le16_to_cpu(ev->handle); 6299 min = le16_to_cpu(ev->interval_min); 6300 max = le16_to_cpu(ev->interval_max); 6301 latency = le16_to_cpu(ev->latency); 6302 timeout = le16_to_cpu(ev->timeout); 6303 6304 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6305 if (!hcon || hcon->state != BT_CONNECTED) 6306 return send_conn_param_neg_reply(hdev, handle, 6307 HCI_ERROR_UNKNOWN_CONN_ID); 6308 6309 if (hci_check_conn_params(min, max, latency, timeout)) 6310 return send_conn_param_neg_reply(hdev, handle, 6311 HCI_ERROR_INVALID_LL_PARAMS); 6312 6313 if (hcon->role == HCI_ROLE_MASTER) { 6314 struct hci_conn_params *params; 6315 u8 store_hint; 6316 6317 hci_dev_lock(hdev); 6318 6319 params = hci_conn_params_lookup(hdev, &hcon->dst, 6320 hcon->dst_type); 6321 if (params) { 6322 params->conn_min_interval = min; 6323 params->conn_max_interval = max; 6324 params->conn_latency = latency; 6325 params->supervision_timeout = timeout; 6326 store_hint = 0x01; 6327 } else { 6328 store_hint = 0x00; 6329 } 6330 6331 hci_dev_unlock(hdev); 6332 6333 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6334 store_hint, min, max, latency, timeout); 6335 } 6336 6337 cp.handle = ev->handle; 6338 cp.interval_min = ev->interval_min; 6339 cp.interval_max = ev->interval_max; 6340 cp.latency = ev->latency; 6341 cp.timeout = ev->timeout; 6342 cp.min_ce_len = 0; 6343 cp.max_ce_len = 0; 6344 6345 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6346 } 6347 6348 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6349 struct sk_buff *skb) 6350 { 6351 struct hci_ev_le_direct_adv_report *ev = data; 6352 int i; 6353 6354 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6355 flex_array_size(ev, info, ev->num))) 6356 return; 6357 6358 if (!ev->num) 6359 return; 6360 6361 hci_dev_lock(hdev); 6362 6363 for (i = 0; i < ev->num; i++) { 6364 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6365 6366 process_adv_report(hdev, info->type, &info->bdaddr, 6367 info->bdaddr_type, &info->direct_addr, 6368 info->direct_addr_type, info->rssi, NULL, 0, 6369 false); 6370 } 6371 6372 hci_dev_unlock(hdev); 6373 } 6374 6375 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6376 struct sk_buff *skb) 6377 { 6378 struct hci_ev_le_phy_update_complete *ev = data; 6379 struct hci_conn *conn; 6380 6381 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6382 6383 if (ev->status) 6384 return; 6385 6386 hci_dev_lock(hdev); 6387 6388 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6389 if (!conn) 6390 goto unlock; 6391 6392 conn->le_tx_phy = ev->tx_phy; 6393 conn->le_rx_phy = ev->rx_phy; 6394 6395 unlock: 6396 hci_dev_unlock(hdev); 6397 } 6398 6399 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 6400 [_op] = { \ 6401 .func = _func, \ 6402 .min_len = _min_len, \ 6403 .max_len = _max_len, \ 6404 } 6405 6406 #define HCI_LE_EV(_op, _func, _len) \ 6407 HCI_LE_EV_VL(_op, _func, _len, _len) 6408 6409 #define HCI_LE_EV_STATUS(_op, _func) \ 6410 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 6411 6412 /* Entries in this table shall have their position according to the subevent 6413 * opcode they handle so the use of the macros above is recommend since it does 6414 * attempt to initialize at its proper index using Designated Initializers that 6415 * way events without a callback function can be ommited. 6416 */ 6417 static const struct hci_le_ev { 6418 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 6419 u16 min_len; 6420 u16 max_len; 6421 } hci_le_ev_table[U8_MAX + 1] = { 6422 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 6423 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 6424 sizeof(struct hci_ev_le_conn_complete)), 6425 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 6426 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 6427 sizeof(struct hci_ev_le_advertising_report), 6428 HCI_MAX_EVENT_SIZE), 6429 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 6430 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 6431 hci_le_conn_update_complete_evt, 6432 sizeof(struct hci_ev_le_conn_update_complete)), 6433 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 6434 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 6435 hci_le_remote_feat_complete_evt, 6436 sizeof(struct hci_ev_le_remote_feat_complete)), 6437 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 6438 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 6439 sizeof(struct hci_ev_le_ltk_req)), 6440 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 6441 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 6442 hci_le_remote_conn_param_req_evt, 6443 sizeof(struct hci_ev_le_remote_conn_param_req)), 6444 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 6445 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 6446 hci_le_enh_conn_complete_evt, 6447 sizeof(struct hci_ev_le_enh_conn_complete)), 6448 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 6449 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 6450 sizeof(struct hci_ev_le_direct_adv_report), 6451 HCI_MAX_EVENT_SIZE), 6452 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 6453 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 6454 sizeof(struct hci_ev_le_phy_update_complete)), 6455 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 6456 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 6457 sizeof(struct hci_ev_le_ext_adv_report), 6458 HCI_MAX_EVENT_SIZE), 6459 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 6460 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 6461 sizeof(struct hci_evt_le_ext_adv_set_term)), 6462 }; 6463 6464 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 6465 struct sk_buff *skb, u16 *opcode, u8 *status, 6466 hci_req_complete_t *req_complete, 6467 hci_req_complete_skb_t *req_complete_skb) 6468 { 6469 struct hci_ev_le_meta *ev = data; 6470 const struct hci_le_ev *subev; 6471 6472 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 6473 6474 /* Only match event if command OGF is for LE */ 6475 if (hdev->sent_cmd && 6476 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && 6477 hci_skb_event(hdev->sent_cmd) == ev->subevent) { 6478 *opcode = hci_skb_opcode(hdev->sent_cmd); 6479 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 6480 req_complete_skb); 6481 } 6482 6483 subev = &hci_le_ev_table[ev->subevent]; 6484 if (!subev->func) 6485 return; 6486 6487 if (skb->len < subev->min_len) { 6488 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 6489 ev->subevent, skb->len, subev->min_len); 6490 return; 6491 } 6492 6493 /* Just warn if the length is over max_len size it still be 6494 * possible to partially parse the event so leave to callback to 6495 * decide if that is acceptable. 6496 */ 6497 if (skb->len > subev->max_len) 6498 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 6499 ev->subevent, skb->len, subev->max_len); 6500 6501 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 6502 if (!data) 6503 return; 6504 6505 subev->func(hdev, data, skb); 6506 } 6507 6508 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 6509 u8 event, struct sk_buff *skb) 6510 { 6511 struct hci_ev_cmd_complete *ev; 6512 struct hci_event_hdr *hdr; 6513 6514 if (!skb) 6515 return false; 6516 6517 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 6518 if (!hdr) 6519 return false; 6520 6521 if (event) { 6522 if (hdr->evt != event) 6523 return false; 6524 return true; 6525 } 6526 6527 /* Check if request ended in Command Status - no way to retrieve 6528 * any extra parameters in this case. 6529 */ 6530 if (hdr->evt == HCI_EV_CMD_STATUS) 6531 return false; 6532 6533 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 6534 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 6535 hdr->evt); 6536 return false; 6537 } 6538 6539 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 6540 if (!ev) 6541 return false; 6542 6543 if (opcode != __le16_to_cpu(ev->opcode)) { 6544 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 6545 __le16_to_cpu(ev->opcode)); 6546 return false; 6547 } 6548 6549 return true; 6550 } 6551 6552 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 6553 struct sk_buff *skb) 6554 { 6555 struct hci_ev_le_advertising_info *adv; 6556 struct hci_ev_le_direct_adv_info *direct_adv; 6557 struct hci_ev_le_ext_adv_info *ext_adv; 6558 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 6559 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 6560 6561 hci_dev_lock(hdev); 6562 6563 /* If we are currently suspended and this is the first BT event seen, 6564 * save the wake reason associated with the event. 6565 */ 6566 if (!hdev->suspended || hdev->wake_reason) 6567 goto unlock; 6568 6569 /* Default to remote wake. Values for wake_reason are documented in the 6570 * Bluez mgmt api docs. 6571 */ 6572 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 6573 6574 /* Once configured for remote wakeup, we should only wake up for 6575 * reconnections. It's useful to see which device is waking us up so 6576 * keep track of the bdaddr of the connection event that woke us up. 6577 */ 6578 if (event == HCI_EV_CONN_REQUEST) { 6579 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 6580 hdev->wake_addr_type = BDADDR_BREDR; 6581 } else if (event == HCI_EV_CONN_COMPLETE) { 6582 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 6583 hdev->wake_addr_type = BDADDR_BREDR; 6584 } else if (event == HCI_EV_LE_META) { 6585 struct hci_ev_le_meta *le_ev = (void *)skb->data; 6586 u8 subevent = le_ev->subevent; 6587 u8 *ptr = &skb->data[sizeof(*le_ev)]; 6588 u8 num_reports = *ptr; 6589 6590 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 6591 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 6592 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 6593 num_reports) { 6594 adv = (void *)(ptr + 1); 6595 direct_adv = (void *)(ptr + 1); 6596 ext_adv = (void *)(ptr + 1); 6597 6598 switch (subevent) { 6599 case HCI_EV_LE_ADVERTISING_REPORT: 6600 bacpy(&hdev->wake_addr, &adv->bdaddr); 6601 hdev->wake_addr_type = adv->bdaddr_type; 6602 break; 6603 case HCI_EV_LE_DIRECT_ADV_REPORT: 6604 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 6605 hdev->wake_addr_type = direct_adv->bdaddr_type; 6606 break; 6607 case HCI_EV_LE_EXT_ADV_REPORT: 6608 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 6609 hdev->wake_addr_type = ext_adv->bdaddr_type; 6610 break; 6611 } 6612 } 6613 } else { 6614 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 6615 } 6616 6617 unlock: 6618 hci_dev_unlock(hdev); 6619 } 6620 6621 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 6622 [_op] = { \ 6623 .req = false, \ 6624 .func = _func, \ 6625 .min_len = _min_len, \ 6626 .max_len = _max_len, \ 6627 } 6628 6629 #define HCI_EV(_op, _func, _len) \ 6630 HCI_EV_VL(_op, _func, _len, _len) 6631 6632 #define HCI_EV_STATUS(_op, _func) \ 6633 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 6634 6635 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 6636 [_op] = { \ 6637 .req = true, \ 6638 .func_req = _func, \ 6639 .min_len = _min_len, \ 6640 .max_len = _max_len, \ 6641 } 6642 6643 #define HCI_EV_REQ(_op, _func, _len) \ 6644 HCI_EV_REQ_VL(_op, _func, _len, _len) 6645 6646 /* Entries in this table shall have their position according to the event opcode 6647 * they handle so the use of the macros above is recommend since it does attempt 6648 * to initialize at its proper index using Designated Initializers that way 6649 * events without a callback function don't have entered. 6650 */ 6651 static const struct hci_ev { 6652 bool req; 6653 union { 6654 void (*func)(struct hci_dev *hdev, void *data, 6655 struct sk_buff *skb); 6656 void (*func_req)(struct hci_dev *hdev, void *data, 6657 struct sk_buff *skb, u16 *opcode, u8 *status, 6658 hci_req_complete_t *req_complete, 6659 hci_req_complete_skb_t *req_complete_skb); 6660 }; 6661 u16 min_len; 6662 u16 max_len; 6663 } hci_ev_table[U8_MAX + 1] = { 6664 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 6665 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 6666 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 6667 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 6668 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 6669 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 6670 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 6671 sizeof(struct hci_ev_conn_complete)), 6672 /* [0x04 = HCI_EV_CONN_REQUEST] */ 6673 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 6674 sizeof(struct hci_ev_conn_request)), 6675 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 6676 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 6677 sizeof(struct hci_ev_disconn_complete)), 6678 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 6679 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 6680 sizeof(struct hci_ev_auth_complete)), 6681 /* [0x07 = HCI_EV_REMOTE_NAME] */ 6682 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 6683 sizeof(struct hci_ev_remote_name)), 6684 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 6685 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 6686 sizeof(struct hci_ev_encrypt_change)), 6687 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 6688 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 6689 hci_change_link_key_complete_evt, 6690 sizeof(struct hci_ev_change_link_key_complete)), 6691 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 6692 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 6693 sizeof(struct hci_ev_remote_features)), 6694 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 6695 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 6696 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 6697 /* [0x0f = HCI_EV_CMD_STATUS] */ 6698 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 6699 sizeof(struct hci_ev_cmd_status)), 6700 /* [0x10 = HCI_EV_CMD_STATUS] */ 6701 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 6702 sizeof(struct hci_ev_hardware_error)), 6703 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 6704 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 6705 sizeof(struct hci_ev_role_change)), 6706 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 6707 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 6708 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 6709 /* [0x14 = HCI_EV_MODE_CHANGE] */ 6710 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 6711 sizeof(struct hci_ev_mode_change)), 6712 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 6713 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 6714 sizeof(struct hci_ev_pin_code_req)), 6715 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 6716 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 6717 sizeof(struct hci_ev_link_key_req)), 6718 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 6719 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 6720 sizeof(struct hci_ev_link_key_notify)), 6721 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 6722 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 6723 sizeof(struct hci_ev_clock_offset)), 6724 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 6725 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 6726 sizeof(struct hci_ev_pkt_type_change)), 6727 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 6728 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 6729 sizeof(struct hci_ev_pscan_rep_mode)), 6730 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 6731 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 6732 hci_inquiry_result_with_rssi_evt, 6733 sizeof(struct hci_ev_inquiry_result_rssi), 6734 HCI_MAX_EVENT_SIZE), 6735 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 6736 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 6737 sizeof(struct hci_ev_remote_ext_features)), 6738 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 6739 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 6740 sizeof(struct hci_ev_sync_conn_complete)), 6741 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 6742 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 6743 hci_extended_inquiry_result_evt, 6744 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 6745 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 6746 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 6747 sizeof(struct hci_ev_key_refresh_complete)), 6748 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 6749 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 6750 sizeof(struct hci_ev_io_capa_request)), 6751 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 6752 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 6753 sizeof(struct hci_ev_io_capa_reply)), 6754 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 6755 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 6756 sizeof(struct hci_ev_user_confirm_req)), 6757 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 6758 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 6759 sizeof(struct hci_ev_user_passkey_req)), 6760 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 6761 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 6762 sizeof(struct hci_ev_remote_oob_data_request)), 6763 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 6764 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 6765 sizeof(struct hci_ev_simple_pair_complete)), 6766 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 6767 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 6768 sizeof(struct hci_ev_user_passkey_notify)), 6769 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 6770 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 6771 sizeof(struct hci_ev_keypress_notify)), 6772 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 6773 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 6774 sizeof(struct hci_ev_remote_host_features)), 6775 /* [0x3e = HCI_EV_LE_META] */ 6776 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 6777 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 6778 #if IS_ENABLED(CONFIG_BT_HS) 6779 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ 6780 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, 6781 sizeof(struct hci_ev_phy_link_complete)), 6782 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ 6783 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, 6784 sizeof(struct hci_ev_channel_selected)), 6785 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ 6786 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, 6787 hci_disconn_loglink_complete_evt, 6788 sizeof(struct hci_ev_disconn_logical_link_complete)), 6789 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ 6790 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, 6791 sizeof(struct hci_ev_logical_link_complete)), 6792 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ 6793 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, 6794 hci_disconn_phylink_complete_evt, 6795 sizeof(struct hci_ev_disconn_phy_link_complete)), 6796 #endif 6797 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ 6798 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, 6799 sizeof(struct hci_ev_num_comp_blocks)), 6800 /* [0xff = HCI_EV_VENDOR] */ 6801 HCI_EV(HCI_EV_VENDOR, msft_vendor_evt, 0), 6802 }; 6803 6804 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 6805 u16 *opcode, u8 *status, 6806 hci_req_complete_t *req_complete, 6807 hci_req_complete_skb_t *req_complete_skb) 6808 { 6809 const struct hci_ev *ev = &hci_ev_table[event]; 6810 void *data; 6811 6812 if (!ev->func) 6813 return; 6814 6815 if (skb->len < ev->min_len) { 6816 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 6817 event, skb->len, ev->min_len); 6818 return; 6819 } 6820 6821 /* Just warn if the length is over max_len size it still be 6822 * possible to partially parse the event so leave to callback to 6823 * decide if that is acceptable. 6824 */ 6825 if (skb->len > ev->max_len) 6826 bt_dev_warn(hdev, "unexpected event 0x%2.2x length: %u > %u", 6827 event, skb->len, ev->max_len); 6828 6829 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 6830 if (!data) 6831 return; 6832 6833 if (ev->req) 6834 ev->func_req(hdev, data, skb, opcode, status, req_complete, 6835 req_complete_skb); 6836 else 6837 ev->func(hdev, data, skb); 6838 } 6839 6840 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 6841 { 6842 struct hci_event_hdr *hdr = (void *) skb->data; 6843 hci_req_complete_t req_complete = NULL; 6844 hci_req_complete_skb_t req_complete_skb = NULL; 6845 struct sk_buff *orig_skb = NULL; 6846 u8 status = 0, event, req_evt = 0; 6847 u16 opcode = HCI_OP_NOP; 6848 6849 if (skb->len < sizeof(*hdr)) { 6850 bt_dev_err(hdev, "Malformed HCI Event"); 6851 goto done; 6852 } 6853 6854 event = hdr->evt; 6855 if (!event) { 6856 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 6857 event); 6858 goto done; 6859 } 6860 6861 /* Only match event if command OGF is not for LE */ 6862 if (hdev->sent_cmd && 6863 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && 6864 hci_skb_event(hdev->sent_cmd) == event) { 6865 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), 6866 status, &req_complete, &req_complete_skb); 6867 req_evt = event; 6868 } 6869 6870 /* If it looks like we might end up having to call 6871 * req_complete_skb, store a pristine copy of the skb since the 6872 * various handlers may modify the original one through 6873 * skb_pull() calls, etc. 6874 */ 6875 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 6876 event == HCI_EV_CMD_COMPLETE) 6877 orig_skb = skb_clone(skb, GFP_KERNEL); 6878 6879 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6880 6881 /* Store wake reason if we're suspended */ 6882 hci_store_wake_reason(hdev, event, skb); 6883 6884 bt_dev_dbg(hdev, "event 0x%2.2x", event); 6885 6886 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 6887 &req_complete_skb); 6888 6889 if (req_complete) { 6890 req_complete(hdev, status, opcode); 6891 } else if (req_complete_skb) { 6892 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 6893 kfree_skb(orig_skb); 6894 orig_skb = NULL; 6895 } 6896 req_complete_skb(hdev, status, opcode, orig_skb); 6897 } 6898 6899 done: 6900 kfree_skb(orig_skb); 6901 kfree_skb(skb); 6902 hdev->stat.evt_rx++; 6903 } 6904