1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 ev, size_t len) 50 { 51 void *data; 52 53 data = skb_pull_data(skb, len); 54 if (!data) 55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); 56 57 return data; 58 } 59 60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 61 u16 op, size_t len) 62 { 63 void *data; 64 65 data = skb_pull_data(skb, len); 66 if (!data) 67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); 68 69 return data; 70 } 71 72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, 73 u8 ev, size_t len) 74 { 75 void *data; 76 77 data = skb_pull_data(skb, len); 78 if (!data) 79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); 80 81 return data; 82 } 83 84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, 85 struct sk_buff *skb) 86 { 87 struct hci_ev_status *rp = data; 88 89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 90 91 /* It is possible that we receive Inquiry Complete event right 92 * before we receive Inquiry Cancel Command Complete event, in 93 * which case the latter event should have status of Command 94 * Disallowed (0x0c). This should not be treated as error, since 95 * we actually achieve what Inquiry Cancel wants to achieve, 96 * which is to end the last Inquiry session. 97 */ 98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 100 rp->status = 0x00; 101 } 102 103 if (rp->status) 104 return rp->status; 105 106 clear_bit(HCI_INQUIRY, &hdev->flags); 107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 108 wake_up_bit(&hdev->flags, HCI_INQUIRY); 109 110 hci_dev_lock(hdev); 111 /* Set discovery state to stopped if we're not doing LE active 112 * scanning. 113 */ 114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 115 hdev->le_scan_type != LE_SCAN_ACTIVE) 116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 117 hci_dev_unlock(hdev); 118 119 hci_conn_check_pending(hdev); 120 121 return rp->status; 122 } 123 124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, 125 struct sk_buff *skb) 126 { 127 struct hci_ev_status *rp = data; 128 129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 130 131 if (rp->status) 132 return rp->status; 133 134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 135 136 return rp->status; 137 } 138 139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, 140 struct sk_buff *skb) 141 { 142 struct hci_ev_status *rp = data; 143 144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 145 146 if (rp->status) 147 return rp->status; 148 149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 150 151 hci_conn_check_pending(hdev); 152 153 return rp->status; 154 } 155 156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, 157 struct sk_buff *skb) 158 { 159 struct hci_ev_status *rp = data; 160 161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 162 163 return rp->status; 164 } 165 166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, 167 struct sk_buff *skb) 168 { 169 struct hci_rp_role_discovery *rp = data; 170 struct hci_conn *conn; 171 172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 173 174 if (rp->status) 175 return rp->status; 176 177 hci_dev_lock(hdev); 178 179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 180 if (conn) 181 conn->role = rp->role; 182 183 hci_dev_unlock(hdev); 184 185 return rp->status; 186 } 187 188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, 189 struct sk_buff *skb) 190 { 191 struct hci_rp_read_link_policy *rp = data; 192 struct hci_conn *conn; 193 194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 195 196 if (rp->status) 197 return rp->status; 198 199 hci_dev_lock(hdev); 200 201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 202 if (conn) 203 conn->link_policy = __le16_to_cpu(rp->policy); 204 205 hci_dev_unlock(hdev); 206 207 return rp->status; 208 } 209 210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, 211 struct sk_buff *skb) 212 { 213 struct hci_rp_write_link_policy *rp = data; 214 struct hci_conn *conn; 215 void *sent; 216 217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 218 219 if (rp->status) 220 return rp->status; 221 222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 223 if (!sent) 224 return rp->status; 225 226 hci_dev_lock(hdev); 227 228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 229 if (conn) 230 conn->link_policy = get_unaligned_le16(sent + 2); 231 232 hci_dev_unlock(hdev); 233 234 return rp->status; 235 } 236 237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, 238 struct sk_buff *skb) 239 { 240 struct hci_rp_read_def_link_policy *rp = data; 241 242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 243 244 if (rp->status) 245 return rp->status; 246 247 hdev->link_policy = __le16_to_cpu(rp->policy); 248 249 return rp->status; 250 } 251 252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, 253 struct sk_buff *skb) 254 { 255 struct hci_ev_status *rp = data; 256 void *sent; 257 258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 259 260 if (rp->status) 261 return rp->status; 262 263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 264 if (!sent) 265 return rp->status; 266 267 hdev->link_policy = get_unaligned_le16(sent); 268 269 return rp->status; 270 } 271 272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) 273 { 274 struct hci_ev_status *rp = data; 275 276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 277 278 clear_bit(HCI_RESET, &hdev->flags); 279 280 if (rp->status) 281 return rp->status; 282 283 /* Reset all non-persistent flags */ 284 hci_dev_clear_volatile_flags(hdev); 285 286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 287 288 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 289 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 290 291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 292 hdev->adv_data_len = 0; 293 294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 295 hdev->scan_rsp_data_len = 0; 296 297 hdev->le_scan_type = LE_SCAN_PASSIVE; 298 299 hdev->ssp_debug_mode = 0; 300 301 hci_bdaddr_list_clear(&hdev->le_accept_list); 302 hci_bdaddr_list_clear(&hdev->le_resolv_list); 303 304 return rp->status; 305 } 306 307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, 308 struct sk_buff *skb) 309 { 310 struct hci_rp_read_stored_link_key *rp = data; 311 struct hci_cp_read_stored_link_key *sent; 312 313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 314 315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 316 if (!sent) 317 return rp->status; 318 319 if (!rp->status && sent->read_all == 0x01) { 320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 322 } 323 324 return rp->status; 325 } 326 327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, 328 struct sk_buff *skb) 329 { 330 struct hci_rp_delete_stored_link_key *rp = data; 331 332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 333 334 if (rp->status) 335 return rp->status; 336 337 if (rp->num_keys <= hdev->stored_num_keys) 338 hdev->stored_num_keys -= le16_to_cpu(rp->num_keys); 339 else 340 hdev->stored_num_keys = 0; 341 342 return rp->status; 343 } 344 345 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, 346 struct sk_buff *skb) 347 { 348 struct hci_ev_status *rp = data; 349 void *sent; 350 351 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 352 353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 354 if (!sent) 355 return rp->status; 356 357 hci_dev_lock(hdev); 358 359 if (hci_dev_test_flag(hdev, HCI_MGMT)) 360 mgmt_set_local_name_complete(hdev, sent, rp->status); 361 else if (!rp->status) 362 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 363 364 hci_dev_unlock(hdev); 365 366 return rp->status; 367 } 368 369 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, 370 struct sk_buff *skb) 371 { 372 struct hci_rp_read_local_name *rp = data; 373 374 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 375 376 if (rp->status) 377 return rp->status; 378 379 if (hci_dev_test_flag(hdev, HCI_SETUP) || 380 hci_dev_test_flag(hdev, HCI_CONFIG)) 381 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 382 383 return rp->status; 384 } 385 386 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, 387 struct sk_buff *skb) 388 { 389 struct hci_ev_status *rp = data; 390 void *sent; 391 392 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 393 394 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 395 if (!sent) 396 return rp->status; 397 398 hci_dev_lock(hdev); 399 400 if (!rp->status) { 401 __u8 param = *((__u8 *) sent); 402 403 if (param == AUTH_ENABLED) 404 set_bit(HCI_AUTH, &hdev->flags); 405 else 406 clear_bit(HCI_AUTH, &hdev->flags); 407 } 408 409 if (hci_dev_test_flag(hdev, HCI_MGMT)) 410 mgmt_auth_enable_complete(hdev, rp->status); 411 412 hci_dev_unlock(hdev); 413 414 return rp->status; 415 } 416 417 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, 418 struct sk_buff *skb) 419 { 420 struct hci_ev_status *rp = data; 421 __u8 param; 422 void *sent; 423 424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 425 426 if (rp->status) 427 return rp->status; 428 429 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 430 if (!sent) 431 return rp->status; 432 433 param = *((__u8 *) sent); 434 435 if (param) 436 set_bit(HCI_ENCRYPT, &hdev->flags); 437 else 438 clear_bit(HCI_ENCRYPT, &hdev->flags); 439 440 return rp->status; 441 } 442 443 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, 444 struct sk_buff *skb) 445 { 446 struct hci_ev_status *rp = data; 447 __u8 param; 448 void *sent; 449 450 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 451 452 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 453 if (!sent) 454 return rp->status; 455 456 param = *((__u8 *) sent); 457 458 hci_dev_lock(hdev); 459 460 if (rp->status) { 461 hdev->discov_timeout = 0; 462 goto done; 463 } 464 465 if (param & SCAN_INQUIRY) 466 set_bit(HCI_ISCAN, &hdev->flags); 467 else 468 clear_bit(HCI_ISCAN, &hdev->flags); 469 470 if (param & SCAN_PAGE) 471 set_bit(HCI_PSCAN, &hdev->flags); 472 else 473 clear_bit(HCI_PSCAN, &hdev->flags); 474 475 done: 476 hci_dev_unlock(hdev); 477 478 return rp->status; 479 } 480 481 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, 482 struct sk_buff *skb) 483 { 484 struct hci_ev_status *rp = data; 485 struct hci_cp_set_event_filter *cp; 486 void *sent; 487 488 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 489 490 if (rp->status) 491 return rp->status; 492 493 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 494 if (!sent) 495 return rp->status; 496 497 cp = (struct hci_cp_set_event_filter *)sent; 498 499 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 500 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 501 else 502 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 503 504 return rp->status; 505 } 506 507 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, 508 struct sk_buff *skb) 509 { 510 struct hci_rp_read_class_of_dev *rp = data; 511 512 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 513 514 if (rp->status) 515 return rp->status; 516 517 memcpy(hdev->dev_class, rp->dev_class, 3); 518 519 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], 520 hdev->dev_class[1], hdev->dev_class[0]); 521 522 return rp->status; 523 } 524 525 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, 526 struct sk_buff *skb) 527 { 528 struct hci_ev_status *rp = data; 529 void *sent; 530 531 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 532 533 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 534 if (!sent) 535 return rp->status; 536 537 hci_dev_lock(hdev); 538 539 if (!rp->status) 540 memcpy(hdev->dev_class, sent, 3); 541 542 if (hci_dev_test_flag(hdev, HCI_MGMT)) 543 mgmt_set_class_of_dev_complete(hdev, sent, rp->status); 544 545 hci_dev_unlock(hdev); 546 547 return rp->status; 548 } 549 550 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, 551 struct sk_buff *skb) 552 { 553 struct hci_rp_read_voice_setting *rp = data; 554 __u16 setting; 555 556 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 557 558 if (rp->status) 559 return rp->status; 560 561 setting = __le16_to_cpu(rp->voice_setting); 562 563 if (hdev->voice_setting == setting) 564 return rp->status; 565 566 hdev->voice_setting = setting; 567 568 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 569 570 if (hdev->notify) 571 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 572 573 return rp->status; 574 } 575 576 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, 577 struct sk_buff *skb) 578 { 579 struct hci_ev_status *rp = data; 580 __u16 setting; 581 void *sent; 582 583 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 584 585 if (rp->status) 586 return rp->status; 587 588 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 589 if (!sent) 590 return rp->status; 591 592 setting = get_unaligned_le16(sent); 593 594 if (hdev->voice_setting == setting) 595 return rp->status; 596 597 hdev->voice_setting = setting; 598 599 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); 600 601 if (hdev->notify) 602 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 603 604 return rp->status; 605 } 606 607 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, 608 struct sk_buff *skb) 609 { 610 struct hci_rp_read_num_supported_iac *rp = data; 611 612 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 613 614 if (rp->status) 615 return rp->status; 616 617 hdev->num_iac = rp->num_iac; 618 619 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); 620 621 return rp->status; 622 } 623 624 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, 625 struct sk_buff *skb) 626 { 627 struct hci_ev_status *rp = data; 628 struct hci_cp_write_ssp_mode *sent; 629 630 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 631 632 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 633 if (!sent) 634 return rp->status; 635 636 hci_dev_lock(hdev); 637 638 if (!rp->status) { 639 if (sent->mode) 640 hdev->features[1][0] |= LMP_HOST_SSP; 641 else 642 hdev->features[1][0] &= ~LMP_HOST_SSP; 643 } 644 645 if (!rp->status) { 646 if (sent->mode) 647 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 648 else 649 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 650 } 651 652 hci_dev_unlock(hdev); 653 654 return rp->status; 655 } 656 657 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, 658 struct sk_buff *skb) 659 { 660 struct hci_ev_status *rp = data; 661 struct hci_cp_write_sc_support *sent; 662 663 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 664 665 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 666 if (!sent) 667 return rp->status; 668 669 hci_dev_lock(hdev); 670 671 if (!rp->status) { 672 if (sent->support) 673 hdev->features[1][0] |= LMP_HOST_SC; 674 else 675 hdev->features[1][0] &= ~LMP_HOST_SC; 676 } 677 678 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { 679 if (sent->support) 680 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 681 else 682 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 683 } 684 685 hci_dev_unlock(hdev); 686 687 return rp->status; 688 } 689 690 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, 691 struct sk_buff *skb) 692 { 693 struct hci_rp_read_local_version *rp = data; 694 695 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 696 697 if (rp->status) 698 return rp->status; 699 700 if (hci_dev_test_flag(hdev, HCI_SETUP) || 701 hci_dev_test_flag(hdev, HCI_CONFIG)) { 702 hdev->hci_ver = rp->hci_ver; 703 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 704 hdev->lmp_ver = rp->lmp_ver; 705 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 706 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 707 } 708 709 return rp->status; 710 } 711 712 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, 713 struct sk_buff *skb) 714 { 715 struct hci_rp_read_local_commands *rp = data; 716 717 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 718 719 if (rp->status) 720 return rp->status; 721 722 if (hci_dev_test_flag(hdev, HCI_SETUP) || 723 hci_dev_test_flag(hdev, HCI_CONFIG)) 724 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 725 726 return rp->status; 727 } 728 729 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, 730 struct sk_buff *skb) 731 { 732 struct hci_rp_read_auth_payload_to *rp = data; 733 struct hci_conn *conn; 734 735 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 736 737 if (rp->status) 738 return rp->status; 739 740 hci_dev_lock(hdev); 741 742 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 743 if (conn) 744 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 745 746 hci_dev_unlock(hdev); 747 748 return rp->status; 749 } 750 751 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, 752 struct sk_buff *skb) 753 { 754 struct hci_rp_write_auth_payload_to *rp = data; 755 struct hci_conn *conn; 756 void *sent; 757 758 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 759 760 if (rp->status) 761 return rp->status; 762 763 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 764 if (!sent) 765 return rp->status; 766 767 hci_dev_lock(hdev); 768 769 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 770 if (conn) 771 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 772 773 hci_dev_unlock(hdev); 774 775 return rp->status; 776 } 777 778 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, 779 struct sk_buff *skb) 780 { 781 struct hci_rp_read_local_features *rp = data; 782 783 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 784 785 if (rp->status) 786 return rp->status; 787 788 memcpy(hdev->features, rp->features, 8); 789 790 /* Adjust default settings according to features 791 * supported by device. */ 792 793 if (hdev->features[0][0] & LMP_3SLOT) 794 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 795 796 if (hdev->features[0][0] & LMP_5SLOT) 797 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 798 799 if (hdev->features[0][1] & LMP_HV2) { 800 hdev->pkt_type |= (HCI_HV2); 801 hdev->esco_type |= (ESCO_HV2); 802 } 803 804 if (hdev->features[0][1] & LMP_HV3) { 805 hdev->pkt_type |= (HCI_HV3); 806 hdev->esco_type |= (ESCO_HV3); 807 } 808 809 if (lmp_esco_capable(hdev)) 810 hdev->esco_type |= (ESCO_EV3); 811 812 if (hdev->features[0][4] & LMP_EV4) 813 hdev->esco_type |= (ESCO_EV4); 814 815 if (hdev->features[0][4] & LMP_EV5) 816 hdev->esco_type |= (ESCO_EV5); 817 818 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 819 hdev->esco_type |= (ESCO_2EV3); 820 821 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 822 hdev->esco_type |= (ESCO_3EV3); 823 824 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 825 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 826 827 return rp->status; 828 } 829 830 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, 831 struct sk_buff *skb) 832 { 833 struct hci_rp_read_local_ext_features *rp = data; 834 835 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 836 837 if (rp->status) 838 return rp->status; 839 840 if (hdev->max_page < rp->max_page) 841 hdev->max_page = rp->max_page; 842 843 if (rp->page < HCI_MAX_PAGES) 844 memcpy(hdev->features[rp->page], rp->features, 8); 845 846 return rp->status; 847 } 848 849 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, 850 struct sk_buff *skb) 851 { 852 struct hci_rp_read_flow_control_mode *rp = data; 853 854 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 855 856 if (rp->status) 857 return rp->status; 858 859 hdev->flow_ctl_mode = rp->mode; 860 861 return rp->status; 862 } 863 864 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, 865 struct sk_buff *skb) 866 { 867 struct hci_rp_read_buffer_size *rp = data; 868 869 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 870 871 if (rp->status) 872 return rp->status; 873 874 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 875 hdev->sco_mtu = rp->sco_mtu; 876 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 877 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 878 879 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 880 hdev->sco_mtu = 64; 881 hdev->sco_pkts = 8; 882 } 883 884 hdev->acl_cnt = hdev->acl_pkts; 885 hdev->sco_cnt = hdev->sco_pkts; 886 887 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 888 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 889 890 return rp->status; 891 } 892 893 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, 894 struct sk_buff *skb) 895 { 896 struct hci_rp_read_bd_addr *rp = data; 897 898 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 899 900 if (rp->status) 901 return rp->status; 902 903 if (test_bit(HCI_INIT, &hdev->flags)) 904 bacpy(&hdev->bdaddr, &rp->bdaddr); 905 906 if (hci_dev_test_flag(hdev, HCI_SETUP)) 907 bacpy(&hdev->setup_addr, &rp->bdaddr); 908 909 return rp->status; 910 } 911 912 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, 913 struct sk_buff *skb) 914 { 915 struct hci_rp_read_local_pairing_opts *rp = data; 916 917 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 918 919 if (rp->status) 920 return rp->status; 921 922 if (hci_dev_test_flag(hdev, HCI_SETUP) || 923 hci_dev_test_flag(hdev, HCI_CONFIG)) { 924 hdev->pairing_opts = rp->pairing_opts; 925 hdev->max_enc_key_size = rp->max_key_size; 926 } 927 928 return rp->status; 929 } 930 931 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, 932 struct sk_buff *skb) 933 { 934 struct hci_rp_read_page_scan_activity *rp = data; 935 936 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 937 938 if (rp->status) 939 return rp->status; 940 941 if (test_bit(HCI_INIT, &hdev->flags)) { 942 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 943 hdev->page_scan_window = __le16_to_cpu(rp->window); 944 } 945 946 return rp->status; 947 } 948 949 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, 950 struct sk_buff *skb) 951 { 952 struct hci_ev_status *rp = data; 953 struct hci_cp_write_page_scan_activity *sent; 954 955 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 956 957 if (rp->status) 958 return rp->status; 959 960 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 961 if (!sent) 962 return rp->status; 963 964 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 965 hdev->page_scan_window = __le16_to_cpu(sent->window); 966 967 return rp->status; 968 } 969 970 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, 971 struct sk_buff *skb) 972 { 973 struct hci_rp_read_page_scan_type *rp = data; 974 975 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 976 977 if (rp->status) 978 return rp->status; 979 980 if (test_bit(HCI_INIT, &hdev->flags)) 981 hdev->page_scan_type = rp->type; 982 983 return rp->status; 984 } 985 986 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, 987 struct sk_buff *skb) 988 { 989 struct hci_ev_status *rp = data; 990 u8 *type; 991 992 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 993 994 if (rp->status) 995 return rp->status; 996 997 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 998 if (type) 999 hdev->page_scan_type = *type; 1000 1001 return rp->status; 1002 } 1003 1004 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, 1005 struct sk_buff *skb) 1006 { 1007 struct hci_rp_read_data_block_size *rp = data; 1008 1009 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1010 1011 if (rp->status) 1012 return rp->status; 1013 1014 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 1015 hdev->block_len = __le16_to_cpu(rp->block_len); 1016 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 1017 1018 hdev->block_cnt = hdev->num_blocks; 1019 1020 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 1021 hdev->block_cnt, hdev->block_len); 1022 1023 return rp->status; 1024 } 1025 1026 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, 1027 struct sk_buff *skb) 1028 { 1029 struct hci_rp_read_clock *rp = data; 1030 struct hci_cp_read_clock *cp; 1031 struct hci_conn *conn; 1032 1033 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1034 1035 if (rp->status) 1036 return rp->status; 1037 1038 hci_dev_lock(hdev); 1039 1040 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 1041 if (!cp) 1042 goto unlock; 1043 1044 if (cp->which == 0x00) { 1045 hdev->clock = le32_to_cpu(rp->clock); 1046 goto unlock; 1047 } 1048 1049 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1050 if (conn) { 1051 conn->clock = le32_to_cpu(rp->clock); 1052 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 1053 } 1054 1055 unlock: 1056 hci_dev_unlock(hdev); 1057 return rp->status; 1058 } 1059 1060 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, 1061 struct sk_buff *skb) 1062 { 1063 struct hci_rp_read_local_amp_info *rp = data; 1064 1065 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1066 1067 if (rp->status) 1068 return rp->status; 1069 1070 hdev->amp_status = rp->amp_status; 1071 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 1072 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 1073 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 1074 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 1075 hdev->amp_type = rp->amp_type; 1076 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 1077 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 1078 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 1079 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 1080 1081 return rp->status; 1082 } 1083 1084 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, 1085 struct sk_buff *skb) 1086 { 1087 struct hci_rp_read_inq_rsp_tx_power *rp = data; 1088 1089 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1090 1091 if (rp->status) 1092 return rp->status; 1093 1094 hdev->inq_tx_power = rp->tx_power; 1095 1096 return rp->status; 1097 } 1098 1099 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, 1100 struct sk_buff *skb) 1101 { 1102 struct hci_rp_read_def_err_data_reporting *rp = data; 1103 1104 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1105 1106 if (rp->status) 1107 return rp->status; 1108 1109 hdev->err_data_reporting = rp->err_data_reporting; 1110 1111 return rp->status; 1112 } 1113 1114 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, 1115 struct sk_buff *skb) 1116 { 1117 struct hci_ev_status *rp = data; 1118 struct hci_cp_write_def_err_data_reporting *cp; 1119 1120 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1121 1122 if (rp->status) 1123 return rp->status; 1124 1125 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 1126 if (!cp) 1127 return rp->status; 1128 1129 hdev->err_data_reporting = cp->err_data_reporting; 1130 1131 return rp->status; 1132 } 1133 1134 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, 1135 struct sk_buff *skb) 1136 { 1137 struct hci_rp_pin_code_reply *rp = data; 1138 struct hci_cp_pin_code_reply *cp; 1139 struct hci_conn *conn; 1140 1141 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1142 1143 hci_dev_lock(hdev); 1144 1145 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1146 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1147 1148 if (rp->status) 1149 goto unlock; 1150 1151 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1152 if (!cp) 1153 goto unlock; 1154 1155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1156 if (conn) 1157 conn->pin_length = cp->pin_len; 1158 1159 unlock: 1160 hci_dev_unlock(hdev); 1161 return rp->status; 1162 } 1163 1164 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, 1165 struct sk_buff *skb) 1166 { 1167 struct hci_rp_pin_code_neg_reply *rp = data; 1168 1169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1170 1171 hci_dev_lock(hdev); 1172 1173 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1174 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1175 rp->status); 1176 1177 hci_dev_unlock(hdev); 1178 1179 return rp->status; 1180 } 1181 1182 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, 1183 struct sk_buff *skb) 1184 { 1185 struct hci_rp_le_read_buffer_size *rp = data; 1186 1187 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1188 1189 if (rp->status) 1190 return rp->status; 1191 1192 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1193 hdev->le_pkts = rp->le_max_pkt; 1194 1195 hdev->le_cnt = hdev->le_pkts; 1196 1197 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1198 1199 return rp->status; 1200 } 1201 1202 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, 1203 struct sk_buff *skb) 1204 { 1205 struct hci_rp_le_read_local_features *rp = data; 1206 1207 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1208 1209 if (rp->status) 1210 return rp->status; 1211 1212 memcpy(hdev->le_features, rp->features, 8); 1213 1214 return rp->status; 1215 } 1216 1217 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, 1218 struct sk_buff *skb) 1219 { 1220 struct hci_rp_le_read_adv_tx_power *rp = data; 1221 1222 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1223 1224 if (rp->status) 1225 return rp->status; 1226 1227 hdev->adv_tx_power = rp->tx_power; 1228 1229 return rp->status; 1230 } 1231 1232 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, 1233 struct sk_buff *skb) 1234 { 1235 struct hci_rp_user_confirm_reply *rp = data; 1236 1237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1238 1239 hci_dev_lock(hdev); 1240 1241 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1242 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1243 rp->status); 1244 1245 hci_dev_unlock(hdev); 1246 1247 return rp->status; 1248 } 1249 1250 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, 1251 struct sk_buff *skb) 1252 { 1253 struct hci_rp_user_confirm_reply *rp = data; 1254 1255 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1256 1257 hci_dev_lock(hdev); 1258 1259 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1260 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1261 ACL_LINK, 0, rp->status); 1262 1263 hci_dev_unlock(hdev); 1264 1265 return rp->status; 1266 } 1267 1268 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, 1269 struct sk_buff *skb) 1270 { 1271 struct hci_rp_user_confirm_reply *rp = data; 1272 1273 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1274 1275 hci_dev_lock(hdev); 1276 1277 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1278 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1279 0, rp->status); 1280 1281 hci_dev_unlock(hdev); 1282 1283 return rp->status; 1284 } 1285 1286 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, 1287 struct sk_buff *skb) 1288 { 1289 struct hci_rp_user_confirm_reply *rp = data; 1290 1291 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1292 1293 hci_dev_lock(hdev); 1294 1295 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1296 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1297 ACL_LINK, 0, rp->status); 1298 1299 hci_dev_unlock(hdev); 1300 1301 return rp->status; 1302 } 1303 1304 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, 1305 struct sk_buff *skb) 1306 { 1307 struct hci_rp_read_local_oob_data *rp = data; 1308 1309 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1310 1311 return rp->status; 1312 } 1313 1314 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, 1315 struct sk_buff *skb) 1316 { 1317 struct hci_rp_read_local_oob_ext_data *rp = data; 1318 1319 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1320 1321 return rp->status; 1322 } 1323 1324 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, 1325 struct sk_buff *skb) 1326 { 1327 struct hci_ev_status *rp = data; 1328 bdaddr_t *sent; 1329 1330 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1331 1332 if (rp->status) 1333 return rp->status; 1334 1335 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1336 if (!sent) 1337 return rp->status; 1338 1339 hci_dev_lock(hdev); 1340 1341 bacpy(&hdev->random_addr, sent); 1342 1343 if (!bacmp(&hdev->rpa, sent)) { 1344 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1345 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1346 secs_to_jiffies(hdev->rpa_timeout)); 1347 } 1348 1349 hci_dev_unlock(hdev); 1350 1351 return rp->status; 1352 } 1353 1354 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, 1355 struct sk_buff *skb) 1356 { 1357 struct hci_ev_status *rp = data; 1358 struct hci_cp_le_set_default_phy *cp; 1359 1360 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1361 1362 if (rp->status) 1363 return rp->status; 1364 1365 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1366 if (!cp) 1367 return rp->status; 1368 1369 hci_dev_lock(hdev); 1370 1371 hdev->le_tx_def_phys = cp->tx_phys; 1372 hdev->le_rx_def_phys = cp->rx_phys; 1373 1374 hci_dev_unlock(hdev); 1375 1376 return rp->status; 1377 } 1378 1379 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, 1380 struct sk_buff *skb) 1381 { 1382 struct hci_ev_status *rp = data; 1383 struct hci_cp_le_set_adv_set_rand_addr *cp; 1384 struct adv_info *adv; 1385 1386 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1387 1388 if (rp->status) 1389 return rp->status; 1390 1391 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1392 /* Update only in case the adv instance since handle 0x00 shall be using 1393 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1394 * non-extended adverting. 1395 */ 1396 if (!cp || !cp->handle) 1397 return rp->status; 1398 1399 hci_dev_lock(hdev); 1400 1401 adv = hci_find_adv_instance(hdev, cp->handle); 1402 if (adv) { 1403 bacpy(&adv->random_addr, &cp->bdaddr); 1404 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1405 adv->rpa_expired = false; 1406 queue_delayed_work(hdev->workqueue, 1407 &adv->rpa_expired_cb, 1408 secs_to_jiffies(hdev->rpa_timeout)); 1409 } 1410 } 1411 1412 hci_dev_unlock(hdev); 1413 1414 return rp->status; 1415 } 1416 1417 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, 1418 struct sk_buff *skb) 1419 { 1420 struct hci_ev_status *rp = data; 1421 u8 *instance; 1422 int err; 1423 1424 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1425 1426 if (rp->status) 1427 return rp->status; 1428 1429 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1430 if (!instance) 1431 return rp->status; 1432 1433 hci_dev_lock(hdev); 1434 1435 err = hci_remove_adv_instance(hdev, *instance); 1436 if (!err) 1437 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1438 *instance); 1439 1440 hci_dev_unlock(hdev); 1441 1442 return rp->status; 1443 } 1444 1445 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, 1446 struct sk_buff *skb) 1447 { 1448 struct hci_ev_status *rp = data; 1449 struct adv_info *adv, *n; 1450 int err; 1451 1452 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1453 1454 if (rp->status) 1455 return rp->status; 1456 1457 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1458 return rp->status; 1459 1460 hci_dev_lock(hdev); 1461 1462 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1463 u8 instance = adv->instance; 1464 1465 err = hci_remove_adv_instance(hdev, instance); 1466 if (!err) 1467 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1468 hdev, instance); 1469 } 1470 1471 hci_dev_unlock(hdev); 1472 1473 return rp->status; 1474 } 1475 1476 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, 1477 struct sk_buff *skb) 1478 { 1479 struct hci_rp_le_read_transmit_power *rp = data; 1480 1481 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1482 1483 if (rp->status) 1484 return rp->status; 1485 1486 hdev->min_le_tx_power = rp->min_le_tx_power; 1487 hdev->max_le_tx_power = rp->max_le_tx_power; 1488 1489 return rp->status; 1490 } 1491 1492 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, 1493 struct sk_buff *skb) 1494 { 1495 struct hci_ev_status *rp = data; 1496 struct hci_cp_le_set_privacy_mode *cp; 1497 struct hci_conn_params *params; 1498 1499 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1500 1501 if (rp->status) 1502 return rp->status; 1503 1504 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); 1505 if (!cp) 1506 return rp->status; 1507 1508 hci_dev_lock(hdev); 1509 1510 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); 1511 if (params) 1512 params->privacy_mode = cp->mode; 1513 1514 hci_dev_unlock(hdev); 1515 1516 return rp->status; 1517 } 1518 1519 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, 1520 struct sk_buff *skb) 1521 { 1522 struct hci_ev_status *rp = data; 1523 __u8 *sent; 1524 1525 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1526 1527 if (rp->status) 1528 return rp->status; 1529 1530 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1531 if (!sent) 1532 return rp->status; 1533 1534 hci_dev_lock(hdev); 1535 1536 /* If we're doing connection initiation as peripheral. Set a 1537 * timeout in case something goes wrong. 1538 */ 1539 if (*sent) { 1540 struct hci_conn *conn; 1541 1542 hci_dev_set_flag(hdev, HCI_LE_ADV); 1543 1544 conn = hci_lookup_le_connect(hdev); 1545 if (conn) 1546 queue_delayed_work(hdev->workqueue, 1547 &conn->le_conn_timeout, 1548 conn->conn_timeout); 1549 } else { 1550 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1551 } 1552 1553 hci_dev_unlock(hdev); 1554 1555 return rp->status; 1556 } 1557 1558 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, 1559 struct sk_buff *skb) 1560 { 1561 struct hci_cp_le_set_ext_adv_enable *cp; 1562 struct hci_cp_ext_adv_set *set; 1563 struct adv_info *adv = NULL, *n; 1564 struct hci_ev_status *rp = data; 1565 1566 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1567 1568 if (rp->status) 1569 return rp->status; 1570 1571 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1572 if (!cp) 1573 return rp->status; 1574 1575 set = (void *)cp->data; 1576 1577 hci_dev_lock(hdev); 1578 1579 if (cp->num_of_sets) 1580 adv = hci_find_adv_instance(hdev, set->handle); 1581 1582 if (cp->enable) { 1583 struct hci_conn *conn; 1584 1585 hci_dev_set_flag(hdev, HCI_LE_ADV); 1586 1587 if (adv) 1588 adv->enabled = true; 1589 1590 conn = hci_lookup_le_connect(hdev); 1591 if (conn) 1592 queue_delayed_work(hdev->workqueue, 1593 &conn->le_conn_timeout, 1594 conn->conn_timeout); 1595 } else { 1596 if (cp->num_of_sets) { 1597 if (adv) 1598 adv->enabled = false; 1599 1600 /* If just one instance was disabled check if there are 1601 * any other instance enabled before clearing HCI_LE_ADV 1602 */ 1603 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1604 list) { 1605 if (adv->enabled) 1606 goto unlock; 1607 } 1608 } else { 1609 /* All instances shall be considered disabled */ 1610 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1611 list) 1612 adv->enabled = false; 1613 } 1614 1615 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1616 } 1617 1618 unlock: 1619 hci_dev_unlock(hdev); 1620 return rp->status; 1621 } 1622 1623 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, 1624 struct sk_buff *skb) 1625 { 1626 struct hci_cp_le_set_scan_param *cp; 1627 struct hci_ev_status *rp = data; 1628 1629 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1630 1631 if (rp->status) 1632 return rp->status; 1633 1634 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1635 if (!cp) 1636 return rp->status; 1637 1638 hci_dev_lock(hdev); 1639 1640 hdev->le_scan_type = cp->type; 1641 1642 hci_dev_unlock(hdev); 1643 1644 return rp->status; 1645 } 1646 1647 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, 1648 struct sk_buff *skb) 1649 { 1650 struct hci_cp_le_set_ext_scan_params *cp; 1651 struct hci_ev_status *rp = data; 1652 struct hci_cp_le_scan_phy_params *phy_param; 1653 1654 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1655 1656 if (rp->status) 1657 return rp->status; 1658 1659 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1660 if (!cp) 1661 return rp->status; 1662 1663 phy_param = (void *)cp->data; 1664 1665 hci_dev_lock(hdev); 1666 1667 hdev->le_scan_type = phy_param->type; 1668 1669 hci_dev_unlock(hdev); 1670 1671 return rp->status; 1672 } 1673 1674 static bool has_pending_adv_report(struct hci_dev *hdev) 1675 { 1676 struct discovery_state *d = &hdev->discovery; 1677 1678 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1679 } 1680 1681 static void clear_pending_adv_report(struct hci_dev *hdev) 1682 { 1683 struct discovery_state *d = &hdev->discovery; 1684 1685 bacpy(&d->last_adv_addr, BDADDR_ANY); 1686 d->last_adv_data_len = 0; 1687 } 1688 1689 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1690 u8 bdaddr_type, s8 rssi, u32 flags, 1691 u8 *data, u8 len) 1692 { 1693 struct discovery_state *d = &hdev->discovery; 1694 1695 if (len > HCI_MAX_AD_LENGTH) 1696 return; 1697 1698 bacpy(&d->last_adv_addr, bdaddr); 1699 d->last_adv_addr_type = bdaddr_type; 1700 d->last_adv_rssi = rssi; 1701 d->last_adv_flags = flags; 1702 memcpy(d->last_adv_data, data, len); 1703 d->last_adv_data_len = len; 1704 } 1705 1706 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1707 { 1708 hci_dev_lock(hdev); 1709 1710 switch (enable) { 1711 case LE_SCAN_ENABLE: 1712 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1713 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1714 clear_pending_adv_report(hdev); 1715 break; 1716 1717 case LE_SCAN_DISABLE: 1718 /* We do this here instead of when setting DISCOVERY_STOPPED 1719 * since the latter would potentially require waiting for 1720 * inquiry to stop too. 1721 */ 1722 if (has_pending_adv_report(hdev)) { 1723 struct discovery_state *d = &hdev->discovery; 1724 1725 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1726 d->last_adv_addr_type, NULL, 1727 d->last_adv_rssi, d->last_adv_flags, 1728 d->last_adv_data, 1729 d->last_adv_data_len, NULL, 0); 1730 } 1731 1732 /* Cancel this timer so that we don't try to disable scanning 1733 * when it's already disabled. 1734 */ 1735 cancel_delayed_work(&hdev->le_scan_disable); 1736 1737 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1738 1739 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1740 * interrupted scanning due to a connect request. Mark 1741 * therefore discovery as stopped. 1742 */ 1743 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1744 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1745 1746 break; 1747 1748 default: 1749 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1750 enable); 1751 break; 1752 } 1753 1754 hci_dev_unlock(hdev); 1755 } 1756 1757 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, 1758 struct sk_buff *skb) 1759 { 1760 struct hci_cp_le_set_scan_enable *cp; 1761 struct hci_ev_status *rp = data; 1762 1763 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1764 1765 if (rp->status) 1766 return rp->status; 1767 1768 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1769 if (!cp) 1770 return rp->status; 1771 1772 le_set_scan_enable_complete(hdev, cp->enable); 1773 1774 return rp->status; 1775 } 1776 1777 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, 1778 struct sk_buff *skb) 1779 { 1780 struct hci_cp_le_set_ext_scan_enable *cp; 1781 struct hci_ev_status *rp = data; 1782 1783 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1784 1785 if (rp->status) 1786 return rp->status; 1787 1788 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1789 if (!cp) 1790 return rp->status; 1791 1792 le_set_scan_enable_complete(hdev, cp->enable); 1793 1794 return rp->status; 1795 } 1796 1797 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, 1798 struct sk_buff *skb) 1799 { 1800 struct hci_rp_le_read_num_supported_adv_sets *rp = data; 1801 1802 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, 1803 rp->num_of_sets); 1804 1805 if (rp->status) 1806 return rp->status; 1807 1808 hdev->le_num_of_adv_sets = rp->num_of_sets; 1809 1810 return rp->status; 1811 } 1812 1813 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, 1814 struct sk_buff *skb) 1815 { 1816 struct hci_rp_le_read_accept_list_size *rp = data; 1817 1818 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 1819 1820 if (rp->status) 1821 return rp->status; 1822 1823 hdev->le_accept_list_size = rp->size; 1824 1825 return rp->status; 1826 } 1827 1828 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, 1829 struct sk_buff *skb) 1830 { 1831 struct hci_ev_status *rp = data; 1832 1833 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1834 1835 if (rp->status) 1836 return rp->status; 1837 1838 hci_bdaddr_list_clear(&hdev->le_accept_list); 1839 1840 return rp->status; 1841 } 1842 1843 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, 1844 struct sk_buff *skb) 1845 { 1846 struct hci_cp_le_add_to_accept_list *sent; 1847 struct hci_ev_status *rp = data; 1848 1849 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1850 1851 if (rp->status) 1852 return rp->status; 1853 1854 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1855 if (!sent) 1856 return rp->status; 1857 1858 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1859 sent->bdaddr_type); 1860 1861 return rp->status; 1862 } 1863 1864 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, 1865 struct sk_buff *skb) 1866 { 1867 struct hci_cp_le_del_from_accept_list *sent; 1868 struct hci_ev_status *rp = data; 1869 1870 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1871 1872 if (rp->status) 1873 return rp->status; 1874 1875 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1876 if (!sent) 1877 return rp->status; 1878 1879 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1880 sent->bdaddr_type); 1881 1882 return rp->status; 1883 } 1884 1885 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, 1886 struct sk_buff *skb) 1887 { 1888 struct hci_rp_le_read_supported_states *rp = data; 1889 1890 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1891 1892 if (rp->status) 1893 return rp->status; 1894 1895 memcpy(hdev->le_states, rp->le_states, 8); 1896 1897 return rp->status; 1898 } 1899 1900 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, 1901 struct sk_buff *skb) 1902 { 1903 struct hci_rp_le_read_def_data_len *rp = data; 1904 1905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1906 1907 if (rp->status) 1908 return rp->status; 1909 1910 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1911 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1912 1913 return rp->status; 1914 } 1915 1916 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, 1917 struct sk_buff *skb) 1918 { 1919 struct hci_cp_le_write_def_data_len *sent; 1920 struct hci_ev_status *rp = data; 1921 1922 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1923 1924 if (rp->status) 1925 return rp->status; 1926 1927 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1928 if (!sent) 1929 return rp->status; 1930 1931 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1932 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1933 1934 return rp->status; 1935 } 1936 1937 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, 1938 struct sk_buff *skb) 1939 { 1940 struct hci_cp_le_add_to_resolv_list *sent; 1941 struct hci_ev_status *rp = data; 1942 1943 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1944 1945 if (rp->status) 1946 return rp->status; 1947 1948 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1949 if (!sent) 1950 return rp->status; 1951 1952 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1953 sent->bdaddr_type, sent->peer_irk, 1954 sent->local_irk); 1955 1956 return rp->status; 1957 } 1958 1959 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, 1960 struct sk_buff *skb) 1961 { 1962 struct hci_cp_le_del_from_resolv_list *sent; 1963 struct hci_ev_status *rp = data; 1964 1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1966 1967 if (rp->status) 1968 return rp->status; 1969 1970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1971 if (!sent) 1972 return rp->status; 1973 1974 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1975 sent->bdaddr_type); 1976 1977 return rp->status; 1978 } 1979 1980 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, 1981 struct sk_buff *skb) 1982 { 1983 struct hci_ev_status *rp = data; 1984 1985 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 1986 1987 if (rp->status) 1988 return rp->status; 1989 1990 hci_bdaddr_list_clear(&hdev->le_resolv_list); 1991 1992 return rp->status; 1993 } 1994 1995 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, 1996 struct sk_buff *skb) 1997 { 1998 struct hci_rp_le_read_resolv_list_size *rp = data; 1999 2000 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); 2001 2002 if (rp->status) 2003 return rp->status; 2004 2005 hdev->le_resolv_list_size = rp->size; 2006 2007 return rp->status; 2008 } 2009 2010 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, 2011 struct sk_buff *skb) 2012 { 2013 struct hci_ev_status *rp = data; 2014 __u8 *sent; 2015 2016 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2017 2018 if (rp->status) 2019 return rp->status; 2020 2021 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 2022 if (!sent) 2023 return rp->status; 2024 2025 hci_dev_lock(hdev); 2026 2027 if (*sent) 2028 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 2029 else 2030 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 2031 2032 hci_dev_unlock(hdev); 2033 2034 return rp->status; 2035 } 2036 2037 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, 2038 struct sk_buff *skb) 2039 { 2040 struct hci_rp_le_read_max_data_len *rp = data; 2041 2042 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2043 2044 if (rp->status) 2045 return rp->status; 2046 2047 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 2048 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 2049 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 2050 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 2051 2052 return rp->status; 2053 } 2054 2055 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, 2056 struct sk_buff *skb) 2057 { 2058 struct hci_cp_write_le_host_supported *sent; 2059 struct hci_ev_status *rp = data; 2060 2061 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2062 2063 if (rp->status) 2064 return rp->status; 2065 2066 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 2067 if (!sent) 2068 return rp->status; 2069 2070 hci_dev_lock(hdev); 2071 2072 if (sent->le) { 2073 hdev->features[1][0] |= LMP_HOST_LE; 2074 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 2075 } else { 2076 hdev->features[1][0] &= ~LMP_HOST_LE; 2077 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 2078 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 2079 } 2080 2081 if (sent->simul) 2082 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 2083 else 2084 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 2085 2086 hci_dev_unlock(hdev); 2087 2088 return rp->status; 2089 } 2090 2091 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, 2092 struct sk_buff *skb) 2093 { 2094 struct hci_cp_le_set_adv_param *cp; 2095 struct hci_ev_status *rp = data; 2096 2097 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2098 2099 if (rp->status) 2100 return rp->status; 2101 2102 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 2103 if (!cp) 2104 return rp->status; 2105 2106 hci_dev_lock(hdev); 2107 hdev->adv_addr_type = cp->own_address_type; 2108 hci_dev_unlock(hdev); 2109 2110 return rp->status; 2111 } 2112 2113 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, 2114 struct sk_buff *skb) 2115 { 2116 struct hci_rp_le_set_ext_adv_params *rp = data; 2117 struct hci_cp_le_set_ext_adv_params *cp; 2118 struct adv_info *adv_instance; 2119 2120 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2121 2122 if (rp->status) 2123 return rp->status; 2124 2125 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 2126 if (!cp) 2127 return rp->status; 2128 2129 hci_dev_lock(hdev); 2130 hdev->adv_addr_type = cp->own_addr_type; 2131 if (!cp->handle) { 2132 /* Store in hdev for instance 0 */ 2133 hdev->adv_tx_power = rp->tx_power; 2134 } else { 2135 adv_instance = hci_find_adv_instance(hdev, cp->handle); 2136 if (adv_instance) 2137 adv_instance->tx_power = rp->tx_power; 2138 } 2139 /* Update adv data as tx power is known now */ 2140 hci_req_update_adv_data(hdev, cp->handle); 2141 2142 hci_dev_unlock(hdev); 2143 2144 return rp->status; 2145 } 2146 2147 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, 2148 struct sk_buff *skb) 2149 { 2150 struct hci_rp_read_rssi *rp = data; 2151 struct hci_conn *conn; 2152 2153 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2154 2155 if (rp->status) 2156 return rp->status; 2157 2158 hci_dev_lock(hdev); 2159 2160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2161 if (conn) 2162 conn->rssi = rp->rssi; 2163 2164 hci_dev_unlock(hdev); 2165 2166 return rp->status; 2167 } 2168 2169 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, 2170 struct sk_buff *skb) 2171 { 2172 struct hci_cp_read_tx_power *sent; 2173 struct hci_rp_read_tx_power *rp = data; 2174 struct hci_conn *conn; 2175 2176 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2177 2178 if (rp->status) 2179 return rp->status; 2180 2181 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 2182 if (!sent) 2183 return rp->status; 2184 2185 hci_dev_lock(hdev); 2186 2187 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 2188 if (!conn) 2189 goto unlock; 2190 2191 switch (sent->type) { 2192 case 0x00: 2193 conn->tx_power = rp->tx_power; 2194 break; 2195 case 0x01: 2196 conn->max_tx_power = rp->tx_power; 2197 break; 2198 } 2199 2200 unlock: 2201 hci_dev_unlock(hdev); 2202 return rp->status; 2203 } 2204 2205 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, 2206 struct sk_buff *skb) 2207 { 2208 struct hci_ev_status *rp = data; 2209 u8 *mode; 2210 2211 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); 2212 2213 if (rp->status) 2214 return rp->status; 2215 2216 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 2217 if (mode) 2218 hdev->ssp_debug_mode = *mode; 2219 2220 return rp->status; 2221 } 2222 2223 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 2224 { 2225 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2226 2227 if (status) { 2228 hci_conn_check_pending(hdev); 2229 return; 2230 } 2231 2232 set_bit(HCI_INQUIRY, &hdev->flags); 2233 } 2234 2235 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 2236 { 2237 struct hci_cp_create_conn *cp; 2238 struct hci_conn *conn; 2239 2240 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2241 2242 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 2243 if (!cp) 2244 return; 2245 2246 hci_dev_lock(hdev); 2247 2248 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2249 2250 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); 2251 2252 if (status) { 2253 if (conn && conn->state == BT_CONNECT) { 2254 if (status != 0x0c || conn->attempt > 2) { 2255 conn->state = BT_CLOSED; 2256 hci_connect_cfm(conn, status); 2257 hci_conn_del(conn); 2258 } else 2259 conn->state = BT_CONNECT2; 2260 } 2261 } else { 2262 if (!conn) { 2263 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 2264 HCI_ROLE_MASTER); 2265 if (!conn) 2266 bt_dev_err(hdev, "no memory for new connection"); 2267 } 2268 } 2269 2270 hci_dev_unlock(hdev); 2271 } 2272 2273 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 2274 { 2275 struct hci_cp_add_sco *cp; 2276 struct hci_conn *acl, *sco; 2277 __u16 handle; 2278 2279 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2280 2281 if (!status) 2282 return; 2283 2284 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2285 if (!cp) 2286 return; 2287 2288 handle = __le16_to_cpu(cp->handle); 2289 2290 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2291 2292 hci_dev_lock(hdev); 2293 2294 acl = hci_conn_hash_lookup_handle(hdev, handle); 2295 if (acl) { 2296 sco = acl->link; 2297 if (sco) { 2298 sco->state = BT_CLOSED; 2299 2300 hci_connect_cfm(sco, status); 2301 hci_conn_del(sco); 2302 } 2303 } 2304 2305 hci_dev_unlock(hdev); 2306 } 2307 2308 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2309 { 2310 struct hci_cp_auth_requested *cp; 2311 struct hci_conn *conn; 2312 2313 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2314 2315 if (!status) 2316 return; 2317 2318 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2319 if (!cp) 2320 return; 2321 2322 hci_dev_lock(hdev); 2323 2324 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2325 if (conn) { 2326 if (conn->state == BT_CONFIG) { 2327 hci_connect_cfm(conn, status); 2328 hci_conn_drop(conn); 2329 } 2330 } 2331 2332 hci_dev_unlock(hdev); 2333 } 2334 2335 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2336 { 2337 struct hci_cp_set_conn_encrypt *cp; 2338 struct hci_conn *conn; 2339 2340 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2341 2342 if (!status) 2343 return; 2344 2345 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2346 if (!cp) 2347 return; 2348 2349 hci_dev_lock(hdev); 2350 2351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2352 if (conn) { 2353 if (conn->state == BT_CONFIG) { 2354 hci_connect_cfm(conn, status); 2355 hci_conn_drop(conn); 2356 } 2357 } 2358 2359 hci_dev_unlock(hdev); 2360 } 2361 2362 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2363 struct hci_conn *conn) 2364 { 2365 if (conn->state != BT_CONFIG || !conn->out) 2366 return 0; 2367 2368 if (conn->pending_sec_level == BT_SECURITY_SDP) 2369 return 0; 2370 2371 /* Only request authentication for SSP connections or non-SSP 2372 * devices with sec_level MEDIUM or HIGH or if MITM protection 2373 * is requested. 2374 */ 2375 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2376 conn->pending_sec_level != BT_SECURITY_FIPS && 2377 conn->pending_sec_level != BT_SECURITY_HIGH && 2378 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2379 return 0; 2380 2381 return 1; 2382 } 2383 2384 static int hci_resolve_name(struct hci_dev *hdev, 2385 struct inquiry_entry *e) 2386 { 2387 struct hci_cp_remote_name_req cp; 2388 2389 memset(&cp, 0, sizeof(cp)); 2390 2391 bacpy(&cp.bdaddr, &e->data.bdaddr); 2392 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2393 cp.pscan_mode = e->data.pscan_mode; 2394 cp.clock_offset = e->data.clock_offset; 2395 2396 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2397 } 2398 2399 static bool hci_resolve_next_name(struct hci_dev *hdev) 2400 { 2401 struct discovery_state *discov = &hdev->discovery; 2402 struct inquiry_entry *e; 2403 2404 if (list_empty(&discov->resolve)) 2405 return false; 2406 2407 /* We should stop if we already spent too much time resolving names. */ 2408 if (time_after(jiffies, discov->name_resolve_timeout)) { 2409 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2410 return false; 2411 } 2412 2413 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2414 if (!e) 2415 return false; 2416 2417 if (hci_resolve_name(hdev, e) == 0) { 2418 e->name_state = NAME_PENDING; 2419 return true; 2420 } 2421 2422 return false; 2423 } 2424 2425 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2426 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2427 { 2428 struct discovery_state *discov = &hdev->discovery; 2429 struct inquiry_entry *e; 2430 2431 /* Update the mgmt connected state if necessary. Be careful with 2432 * conn objects that exist but are not (yet) connected however. 2433 * Only those in BT_CONFIG or BT_CONNECTED states can be 2434 * considered connected. 2435 */ 2436 if (conn && 2437 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2438 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2439 mgmt_device_connected(hdev, conn, name, name_len); 2440 2441 if (discov->state == DISCOVERY_STOPPED) 2442 return; 2443 2444 if (discov->state == DISCOVERY_STOPPING) 2445 goto discov_complete; 2446 2447 if (discov->state != DISCOVERY_RESOLVING) 2448 return; 2449 2450 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2451 /* If the device was not found in a list of found devices names of which 2452 * are pending. there is no need to continue resolving a next name as it 2453 * will be done upon receiving another Remote Name Request Complete 2454 * Event */ 2455 if (!e) 2456 return; 2457 2458 list_del(&e->list); 2459 2460 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2461 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2462 name, name_len); 2463 2464 if (hci_resolve_next_name(hdev)) 2465 return; 2466 2467 discov_complete: 2468 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2469 } 2470 2471 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2472 { 2473 struct hci_cp_remote_name_req *cp; 2474 struct hci_conn *conn; 2475 2476 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2477 2478 /* If successful wait for the name req complete event before 2479 * checking for the need to do authentication */ 2480 if (!status) 2481 return; 2482 2483 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2484 if (!cp) 2485 return; 2486 2487 hci_dev_lock(hdev); 2488 2489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2490 2491 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2492 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2493 2494 if (!conn) 2495 goto unlock; 2496 2497 if (!hci_outgoing_auth_needed(hdev, conn)) 2498 goto unlock; 2499 2500 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2501 struct hci_cp_auth_requested auth_cp; 2502 2503 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2504 2505 auth_cp.handle = __cpu_to_le16(conn->handle); 2506 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2507 sizeof(auth_cp), &auth_cp); 2508 } 2509 2510 unlock: 2511 hci_dev_unlock(hdev); 2512 } 2513 2514 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2515 { 2516 struct hci_cp_read_remote_features *cp; 2517 struct hci_conn *conn; 2518 2519 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2520 2521 if (!status) 2522 return; 2523 2524 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2525 if (!cp) 2526 return; 2527 2528 hci_dev_lock(hdev); 2529 2530 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2531 if (conn) { 2532 if (conn->state == BT_CONFIG) { 2533 hci_connect_cfm(conn, status); 2534 hci_conn_drop(conn); 2535 } 2536 } 2537 2538 hci_dev_unlock(hdev); 2539 } 2540 2541 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2542 { 2543 struct hci_cp_read_remote_ext_features *cp; 2544 struct hci_conn *conn; 2545 2546 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2547 2548 if (!status) 2549 return; 2550 2551 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2552 if (!cp) 2553 return; 2554 2555 hci_dev_lock(hdev); 2556 2557 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2558 if (conn) { 2559 if (conn->state == BT_CONFIG) { 2560 hci_connect_cfm(conn, status); 2561 hci_conn_drop(conn); 2562 } 2563 } 2564 2565 hci_dev_unlock(hdev); 2566 } 2567 2568 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2569 { 2570 struct hci_cp_setup_sync_conn *cp; 2571 struct hci_conn *acl, *sco; 2572 __u16 handle; 2573 2574 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2575 2576 if (!status) 2577 return; 2578 2579 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2580 if (!cp) 2581 return; 2582 2583 handle = __le16_to_cpu(cp->handle); 2584 2585 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2586 2587 hci_dev_lock(hdev); 2588 2589 acl = hci_conn_hash_lookup_handle(hdev, handle); 2590 if (acl) { 2591 sco = acl->link; 2592 if (sco) { 2593 sco->state = BT_CLOSED; 2594 2595 hci_connect_cfm(sco, status); 2596 hci_conn_del(sco); 2597 } 2598 } 2599 2600 hci_dev_unlock(hdev); 2601 } 2602 2603 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2604 { 2605 struct hci_cp_enhanced_setup_sync_conn *cp; 2606 struct hci_conn *acl, *sco; 2607 __u16 handle; 2608 2609 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2610 2611 if (!status) 2612 return; 2613 2614 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2615 if (!cp) 2616 return; 2617 2618 handle = __le16_to_cpu(cp->handle); 2619 2620 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2621 2622 hci_dev_lock(hdev); 2623 2624 acl = hci_conn_hash_lookup_handle(hdev, handle); 2625 if (acl) { 2626 sco = acl->link; 2627 if (sco) { 2628 sco->state = BT_CLOSED; 2629 2630 hci_connect_cfm(sco, status); 2631 hci_conn_del(sco); 2632 } 2633 } 2634 2635 hci_dev_unlock(hdev); 2636 } 2637 2638 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2639 { 2640 struct hci_cp_sniff_mode *cp; 2641 struct hci_conn *conn; 2642 2643 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2644 2645 if (!status) 2646 return; 2647 2648 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2649 if (!cp) 2650 return; 2651 2652 hci_dev_lock(hdev); 2653 2654 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2655 if (conn) { 2656 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2657 2658 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2659 hci_sco_setup(conn, status); 2660 } 2661 2662 hci_dev_unlock(hdev); 2663 } 2664 2665 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2666 { 2667 struct hci_cp_exit_sniff_mode *cp; 2668 struct hci_conn *conn; 2669 2670 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2671 2672 if (!status) 2673 return; 2674 2675 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2676 if (!cp) 2677 return; 2678 2679 hci_dev_lock(hdev); 2680 2681 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2682 if (conn) { 2683 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2684 2685 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2686 hci_sco_setup(conn, status); 2687 } 2688 2689 hci_dev_unlock(hdev); 2690 } 2691 2692 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2693 { 2694 struct hci_cp_disconnect *cp; 2695 struct hci_conn_params *params; 2696 struct hci_conn *conn; 2697 bool mgmt_conn; 2698 2699 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2700 2701 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2702 * otherwise cleanup the connection immediately. 2703 */ 2704 if (!status && !hdev->suspended) 2705 return; 2706 2707 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2708 if (!cp) 2709 return; 2710 2711 hci_dev_lock(hdev); 2712 2713 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2714 if (!conn) 2715 goto unlock; 2716 2717 if (status) { 2718 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2719 conn->dst_type, status); 2720 2721 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2722 hdev->cur_adv_instance = conn->adv_instance; 2723 hci_enable_advertising(hdev); 2724 } 2725 2726 goto done; 2727 } 2728 2729 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2730 2731 if (conn->type == ACL_LINK) { 2732 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2733 hci_remove_link_key(hdev, &conn->dst); 2734 } 2735 2736 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2737 if (params) { 2738 switch (params->auto_connect) { 2739 case HCI_AUTO_CONN_LINK_LOSS: 2740 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2741 break; 2742 fallthrough; 2743 2744 case HCI_AUTO_CONN_DIRECT: 2745 case HCI_AUTO_CONN_ALWAYS: 2746 list_del_init(¶ms->action); 2747 list_add(¶ms->action, &hdev->pend_le_conns); 2748 break; 2749 2750 default: 2751 break; 2752 } 2753 } 2754 2755 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2756 cp->reason, mgmt_conn); 2757 2758 hci_disconn_cfm(conn, cp->reason); 2759 2760 done: 2761 /* If the disconnection failed for any reason, the upper layer 2762 * does not retry to disconnect in current implementation. 2763 * Hence, we need to do some basic cleanup here and re-enable 2764 * advertising if necessary. 2765 */ 2766 hci_conn_del(conn); 2767 unlock: 2768 hci_dev_unlock(hdev); 2769 } 2770 2771 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2772 { 2773 /* When using controller based address resolution, then the new 2774 * address types 0x02 and 0x03 are used. These types need to be 2775 * converted back into either public address or random address type 2776 */ 2777 switch (type) { 2778 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2779 if (resolved) 2780 *resolved = true; 2781 return ADDR_LE_DEV_PUBLIC; 2782 case ADDR_LE_DEV_RANDOM_RESOLVED: 2783 if (resolved) 2784 *resolved = true; 2785 return ADDR_LE_DEV_RANDOM; 2786 } 2787 2788 if (resolved) 2789 *resolved = false; 2790 return type; 2791 } 2792 2793 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2794 u8 peer_addr_type, u8 own_address_type, 2795 u8 filter_policy) 2796 { 2797 struct hci_conn *conn; 2798 2799 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2800 peer_addr_type); 2801 if (!conn) 2802 return; 2803 2804 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2805 2806 /* Store the initiator and responder address information which 2807 * is needed for SMP. These values will not change during the 2808 * lifetime of the connection. 2809 */ 2810 conn->init_addr_type = own_address_type; 2811 if (own_address_type == ADDR_LE_DEV_RANDOM) 2812 bacpy(&conn->init_addr, &hdev->random_addr); 2813 else 2814 bacpy(&conn->init_addr, &hdev->bdaddr); 2815 2816 conn->resp_addr_type = peer_addr_type; 2817 bacpy(&conn->resp_addr, peer_addr); 2818 2819 /* We don't want the connection attempt to stick around 2820 * indefinitely since LE doesn't have a page timeout concept 2821 * like BR/EDR. Set a timer for any connection that doesn't use 2822 * the accept list for connecting. 2823 */ 2824 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2825 queue_delayed_work(conn->hdev->workqueue, 2826 &conn->le_conn_timeout, 2827 conn->conn_timeout); 2828 } 2829 2830 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2831 { 2832 struct hci_cp_le_create_conn *cp; 2833 2834 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2835 2836 /* All connection failure handling is taken care of by the 2837 * hci_conn_failed function which is triggered by the HCI 2838 * request completion callbacks used for connecting. 2839 */ 2840 if (status) 2841 return; 2842 2843 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2844 if (!cp) 2845 return; 2846 2847 hci_dev_lock(hdev); 2848 2849 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2850 cp->own_address_type, cp->filter_policy); 2851 2852 hci_dev_unlock(hdev); 2853 } 2854 2855 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2856 { 2857 struct hci_cp_le_ext_create_conn *cp; 2858 2859 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2860 2861 /* All connection failure handling is taken care of by the 2862 * hci_conn_failed function which is triggered by the HCI 2863 * request completion callbacks used for connecting. 2864 */ 2865 if (status) 2866 return; 2867 2868 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2869 if (!cp) 2870 return; 2871 2872 hci_dev_lock(hdev); 2873 2874 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2875 cp->own_addr_type, cp->filter_policy); 2876 2877 hci_dev_unlock(hdev); 2878 } 2879 2880 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2881 { 2882 struct hci_cp_le_read_remote_features *cp; 2883 struct hci_conn *conn; 2884 2885 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2886 2887 if (!status) 2888 return; 2889 2890 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2891 if (!cp) 2892 return; 2893 2894 hci_dev_lock(hdev); 2895 2896 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2897 if (conn) { 2898 if (conn->state == BT_CONFIG) { 2899 hci_connect_cfm(conn, status); 2900 hci_conn_drop(conn); 2901 } 2902 } 2903 2904 hci_dev_unlock(hdev); 2905 } 2906 2907 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2908 { 2909 struct hci_cp_le_start_enc *cp; 2910 struct hci_conn *conn; 2911 2912 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2913 2914 if (!status) 2915 return; 2916 2917 hci_dev_lock(hdev); 2918 2919 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2920 if (!cp) 2921 goto unlock; 2922 2923 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2924 if (!conn) 2925 goto unlock; 2926 2927 if (conn->state != BT_CONNECTED) 2928 goto unlock; 2929 2930 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2931 hci_conn_drop(conn); 2932 2933 unlock: 2934 hci_dev_unlock(hdev); 2935 } 2936 2937 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2938 { 2939 struct hci_cp_switch_role *cp; 2940 struct hci_conn *conn; 2941 2942 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2943 2944 if (!status) 2945 return; 2946 2947 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2948 if (!cp) 2949 return; 2950 2951 hci_dev_lock(hdev); 2952 2953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2954 if (conn) 2955 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2956 2957 hci_dev_unlock(hdev); 2958 } 2959 2960 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, 2961 struct sk_buff *skb) 2962 { 2963 struct hci_ev_status *ev = data; 2964 struct discovery_state *discov = &hdev->discovery; 2965 struct inquiry_entry *e; 2966 2967 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 2968 2969 hci_conn_check_pending(hdev); 2970 2971 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2972 return; 2973 2974 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2975 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2976 2977 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2978 return; 2979 2980 hci_dev_lock(hdev); 2981 2982 if (discov->state != DISCOVERY_FINDING) 2983 goto unlock; 2984 2985 if (list_empty(&discov->resolve)) { 2986 /* When BR/EDR inquiry is active and no LE scanning is in 2987 * progress, then change discovery state to indicate completion. 2988 * 2989 * When running LE scanning and BR/EDR inquiry simultaneously 2990 * and the LE scan already finished, then change the discovery 2991 * state to indicate completion. 2992 */ 2993 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2994 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2995 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2996 goto unlock; 2997 } 2998 2999 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 3000 if (e && hci_resolve_name(hdev, e) == 0) { 3001 e->name_state = NAME_PENDING; 3002 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 3003 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 3004 } else { 3005 /* When BR/EDR inquiry is active and no LE scanning is in 3006 * progress, then change discovery state to indicate completion. 3007 * 3008 * When running LE scanning and BR/EDR inquiry simultaneously 3009 * and the LE scan already finished, then change the discovery 3010 * state to indicate completion. 3011 */ 3012 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 3013 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 3014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 3015 } 3016 3017 unlock: 3018 hci_dev_unlock(hdev); 3019 } 3020 3021 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, 3022 struct sk_buff *skb) 3023 { 3024 struct hci_ev_inquiry_result *ev = edata; 3025 struct inquiry_data data; 3026 int i; 3027 3028 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, 3029 flex_array_size(ev, info, ev->num))) 3030 return; 3031 3032 bt_dev_dbg(hdev, "num %d", ev->num); 3033 3034 if (!ev->num) 3035 return; 3036 3037 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3038 return; 3039 3040 hci_dev_lock(hdev); 3041 3042 for (i = 0; i < ev->num; i++) { 3043 struct inquiry_info *info = &ev->info[i]; 3044 u32 flags; 3045 3046 bacpy(&data.bdaddr, &info->bdaddr); 3047 data.pscan_rep_mode = info->pscan_rep_mode; 3048 data.pscan_period_mode = info->pscan_period_mode; 3049 data.pscan_mode = info->pscan_mode; 3050 memcpy(data.dev_class, info->dev_class, 3); 3051 data.clock_offset = info->clock_offset; 3052 data.rssi = HCI_RSSI_INVALID; 3053 data.ssp_mode = 0x00; 3054 3055 flags = hci_inquiry_cache_update(hdev, &data, false); 3056 3057 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3058 info->dev_class, HCI_RSSI_INVALID, 3059 flags, NULL, 0, NULL, 0); 3060 } 3061 3062 hci_dev_unlock(hdev); 3063 } 3064 3065 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, 3066 struct sk_buff *skb) 3067 { 3068 struct hci_ev_conn_complete *ev = data; 3069 struct hci_conn *conn; 3070 u8 status = ev->status; 3071 3072 bt_dev_dbg(hdev, "status 0x%2.2x", status); 3073 3074 hci_dev_lock(hdev); 3075 3076 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3077 if (!conn) { 3078 /* In case of error status and there is no connection pending 3079 * just unlock as there is nothing to cleanup. 3080 */ 3081 if (ev->status) 3082 goto unlock; 3083 3084 /* Connection may not exist if auto-connected. Check the bredr 3085 * allowlist to see if this device is allowed to auto connect. 3086 * If link is an ACL type, create a connection class 3087 * automatically. 3088 * 3089 * Auto-connect will only occur if the event filter is 3090 * programmed with a given address. Right now, event filter is 3091 * only used during suspend. 3092 */ 3093 if (ev->link_type == ACL_LINK && 3094 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 3095 &ev->bdaddr, 3096 BDADDR_BREDR)) { 3097 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3098 HCI_ROLE_SLAVE); 3099 if (!conn) { 3100 bt_dev_err(hdev, "no memory for new conn"); 3101 goto unlock; 3102 } 3103 } else { 3104 if (ev->link_type != SCO_LINK) 3105 goto unlock; 3106 3107 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 3108 &ev->bdaddr); 3109 if (!conn) 3110 goto unlock; 3111 3112 conn->type = SCO_LINK; 3113 } 3114 } 3115 3116 /* The HCI_Connection_Complete event is only sent once per connection. 3117 * Processing it more than once per connection can corrupt kernel memory. 3118 * 3119 * As the connection handle is set here for the first time, it indicates 3120 * whether the connection is already set up. 3121 */ 3122 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 3123 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 3124 goto unlock; 3125 } 3126 3127 if (!status) { 3128 conn->handle = __le16_to_cpu(ev->handle); 3129 if (conn->handle > HCI_CONN_HANDLE_MAX) { 3130 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 3131 conn->handle, HCI_CONN_HANDLE_MAX); 3132 status = HCI_ERROR_INVALID_PARAMETERS; 3133 goto done; 3134 } 3135 3136 if (conn->type == ACL_LINK) { 3137 conn->state = BT_CONFIG; 3138 hci_conn_hold(conn); 3139 3140 if (!conn->out && !hci_conn_ssp_enabled(conn) && 3141 !hci_find_link_key(hdev, &ev->bdaddr)) 3142 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3143 else 3144 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3145 } else 3146 conn->state = BT_CONNECTED; 3147 3148 hci_debugfs_create_conn(conn); 3149 hci_conn_add_sysfs(conn); 3150 3151 if (test_bit(HCI_AUTH, &hdev->flags)) 3152 set_bit(HCI_CONN_AUTH, &conn->flags); 3153 3154 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 3155 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3156 3157 /* Get remote features */ 3158 if (conn->type == ACL_LINK) { 3159 struct hci_cp_read_remote_features cp; 3160 cp.handle = ev->handle; 3161 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 3162 sizeof(cp), &cp); 3163 3164 hci_req_update_scan(hdev); 3165 } 3166 3167 /* Set packet type for incoming connection */ 3168 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 3169 struct hci_cp_change_conn_ptype cp; 3170 cp.handle = ev->handle; 3171 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3172 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 3173 &cp); 3174 } 3175 } 3176 3177 if (conn->type == ACL_LINK) 3178 hci_sco_setup(conn, ev->status); 3179 3180 done: 3181 if (status) { 3182 hci_conn_failed(conn, status); 3183 } else if (ev->link_type == SCO_LINK) { 3184 switch (conn->setting & SCO_AIRMODE_MASK) { 3185 case SCO_AIRMODE_CVSD: 3186 if (hdev->notify) 3187 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 3188 break; 3189 } 3190 3191 hci_connect_cfm(conn, status); 3192 } 3193 3194 unlock: 3195 hci_dev_unlock(hdev); 3196 3197 hci_conn_check_pending(hdev); 3198 } 3199 3200 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 3201 { 3202 struct hci_cp_reject_conn_req cp; 3203 3204 bacpy(&cp.bdaddr, bdaddr); 3205 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 3206 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 3207 } 3208 3209 static void hci_conn_request_evt(struct hci_dev *hdev, void *data, 3210 struct sk_buff *skb) 3211 { 3212 struct hci_ev_conn_request *ev = data; 3213 int mask = hdev->link_mode; 3214 struct inquiry_entry *ie; 3215 struct hci_conn *conn; 3216 __u8 flags = 0; 3217 3218 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); 3219 3220 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 3221 &flags); 3222 3223 if (!(mask & HCI_LM_ACCEPT)) { 3224 hci_reject_conn(hdev, &ev->bdaddr); 3225 return; 3226 } 3227 3228 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 3229 BDADDR_BREDR)) { 3230 hci_reject_conn(hdev, &ev->bdaddr); 3231 return; 3232 } 3233 3234 /* Require HCI_CONNECTABLE or an accept list entry to accept the 3235 * connection. These features are only touched through mgmt so 3236 * only do the checks if HCI_MGMT is set. 3237 */ 3238 if (hci_dev_test_flag(hdev, HCI_MGMT) && 3239 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 3240 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 3241 BDADDR_BREDR)) { 3242 hci_reject_conn(hdev, &ev->bdaddr); 3243 return; 3244 } 3245 3246 /* Connection accepted */ 3247 3248 hci_dev_lock(hdev); 3249 3250 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3251 if (ie) 3252 memcpy(ie->data.dev_class, ev->dev_class, 3); 3253 3254 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 3255 &ev->bdaddr); 3256 if (!conn) { 3257 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 3258 HCI_ROLE_SLAVE); 3259 if (!conn) { 3260 bt_dev_err(hdev, "no memory for new connection"); 3261 hci_dev_unlock(hdev); 3262 return; 3263 } 3264 } 3265 3266 memcpy(conn->dev_class, ev->dev_class, 3); 3267 3268 hci_dev_unlock(hdev); 3269 3270 if (ev->link_type == ACL_LINK || 3271 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 3272 struct hci_cp_accept_conn_req cp; 3273 conn->state = BT_CONNECT; 3274 3275 bacpy(&cp.bdaddr, &ev->bdaddr); 3276 3277 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 3278 cp.role = 0x00; /* Become central */ 3279 else 3280 cp.role = 0x01; /* Remain peripheral */ 3281 3282 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 3283 } else if (!(flags & HCI_PROTO_DEFER)) { 3284 struct hci_cp_accept_sync_conn_req cp; 3285 conn->state = BT_CONNECT; 3286 3287 bacpy(&cp.bdaddr, &ev->bdaddr); 3288 cp.pkt_type = cpu_to_le16(conn->pkt_type); 3289 3290 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 3291 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 3292 cp.max_latency = cpu_to_le16(0xffff); 3293 cp.content_format = cpu_to_le16(hdev->voice_setting); 3294 cp.retrans_effort = 0xff; 3295 3296 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 3297 &cp); 3298 } else { 3299 conn->state = BT_CONNECT2; 3300 hci_connect_cfm(conn, 0); 3301 } 3302 } 3303 3304 static u8 hci_to_mgmt_reason(u8 err) 3305 { 3306 switch (err) { 3307 case HCI_ERROR_CONNECTION_TIMEOUT: 3308 return MGMT_DEV_DISCONN_TIMEOUT; 3309 case HCI_ERROR_REMOTE_USER_TERM: 3310 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3311 case HCI_ERROR_REMOTE_POWER_OFF: 3312 return MGMT_DEV_DISCONN_REMOTE; 3313 case HCI_ERROR_LOCAL_HOST_TERM: 3314 return MGMT_DEV_DISCONN_LOCAL_HOST; 3315 default: 3316 return MGMT_DEV_DISCONN_UNKNOWN; 3317 } 3318 } 3319 3320 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, 3321 struct sk_buff *skb) 3322 { 3323 struct hci_ev_disconn_complete *ev = data; 3324 u8 reason; 3325 struct hci_conn_params *params; 3326 struct hci_conn *conn; 3327 bool mgmt_connected; 3328 3329 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3330 3331 hci_dev_lock(hdev); 3332 3333 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3334 if (!conn) 3335 goto unlock; 3336 3337 if (ev->status) { 3338 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3339 conn->dst_type, ev->status); 3340 goto unlock; 3341 } 3342 3343 conn->state = BT_CLOSED; 3344 3345 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3346 3347 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3348 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3349 else 3350 reason = hci_to_mgmt_reason(ev->reason); 3351 3352 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3353 reason, mgmt_connected); 3354 3355 if (conn->type == ACL_LINK) { 3356 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3357 hci_remove_link_key(hdev, &conn->dst); 3358 3359 hci_req_update_scan(hdev); 3360 } 3361 3362 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3363 if (params) { 3364 switch (params->auto_connect) { 3365 case HCI_AUTO_CONN_LINK_LOSS: 3366 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3367 break; 3368 fallthrough; 3369 3370 case HCI_AUTO_CONN_DIRECT: 3371 case HCI_AUTO_CONN_ALWAYS: 3372 list_del_init(¶ms->action); 3373 list_add(¶ms->action, &hdev->pend_le_conns); 3374 hci_update_passive_scan(hdev); 3375 break; 3376 3377 default: 3378 break; 3379 } 3380 } 3381 3382 hci_disconn_cfm(conn, ev->reason); 3383 3384 /* Re-enable advertising if necessary, since it might 3385 * have been disabled by the connection. From the 3386 * HCI_LE_Set_Advertise_Enable command description in 3387 * the core specification (v4.0): 3388 * "The Controller shall continue advertising until the Host 3389 * issues an LE_Set_Advertise_Enable command with 3390 * Advertising_Enable set to 0x00 (Advertising is disabled) 3391 * or until a connection is created or until the Advertising 3392 * is timed out due to Directed Advertising." 3393 */ 3394 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3395 hdev->cur_adv_instance = conn->adv_instance; 3396 hci_enable_advertising(hdev); 3397 } 3398 3399 hci_conn_del(conn); 3400 3401 unlock: 3402 hci_dev_unlock(hdev); 3403 } 3404 3405 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, 3406 struct sk_buff *skb) 3407 { 3408 struct hci_ev_auth_complete *ev = data; 3409 struct hci_conn *conn; 3410 3411 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3412 3413 hci_dev_lock(hdev); 3414 3415 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3416 if (!conn) 3417 goto unlock; 3418 3419 if (!ev->status) { 3420 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3421 3422 if (!hci_conn_ssp_enabled(conn) && 3423 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3424 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3425 } else { 3426 set_bit(HCI_CONN_AUTH, &conn->flags); 3427 conn->sec_level = conn->pending_sec_level; 3428 } 3429 } else { 3430 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3431 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3432 3433 mgmt_auth_failed(conn, ev->status); 3434 } 3435 3436 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3437 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3438 3439 if (conn->state == BT_CONFIG) { 3440 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3441 struct hci_cp_set_conn_encrypt cp; 3442 cp.handle = ev->handle; 3443 cp.encrypt = 0x01; 3444 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3445 &cp); 3446 } else { 3447 conn->state = BT_CONNECTED; 3448 hci_connect_cfm(conn, ev->status); 3449 hci_conn_drop(conn); 3450 } 3451 } else { 3452 hci_auth_cfm(conn, ev->status); 3453 3454 hci_conn_hold(conn); 3455 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3456 hci_conn_drop(conn); 3457 } 3458 3459 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3460 if (!ev->status) { 3461 struct hci_cp_set_conn_encrypt cp; 3462 cp.handle = ev->handle; 3463 cp.encrypt = 0x01; 3464 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3465 &cp); 3466 } else { 3467 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3468 hci_encrypt_cfm(conn, ev->status); 3469 } 3470 } 3471 3472 unlock: 3473 hci_dev_unlock(hdev); 3474 } 3475 3476 static void hci_remote_name_evt(struct hci_dev *hdev, void *data, 3477 struct sk_buff *skb) 3478 { 3479 struct hci_ev_remote_name *ev = data; 3480 struct hci_conn *conn; 3481 3482 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3483 3484 hci_conn_check_pending(hdev); 3485 3486 hci_dev_lock(hdev); 3487 3488 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3489 3490 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3491 goto check_auth; 3492 3493 if (ev->status == 0) 3494 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3495 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3496 else 3497 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3498 3499 check_auth: 3500 if (!conn) 3501 goto unlock; 3502 3503 if (!hci_outgoing_auth_needed(hdev, conn)) 3504 goto unlock; 3505 3506 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3507 struct hci_cp_auth_requested cp; 3508 3509 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3510 3511 cp.handle = __cpu_to_le16(conn->handle); 3512 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3513 } 3514 3515 unlock: 3516 hci_dev_unlock(hdev); 3517 } 3518 3519 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3520 u16 opcode, struct sk_buff *skb) 3521 { 3522 const struct hci_rp_read_enc_key_size *rp; 3523 struct hci_conn *conn; 3524 u16 handle; 3525 3526 BT_DBG("%s status 0x%02x", hdev->name, status); 3527 3528 if (!skb || skb->len < sizeof(*rp)) { 3529 bt_dev_err(hdev, "invalid read key size response"); 3530 return; 3531 } 3532 3533 rp = (void *)skb->data; 3534 handle = le16_to_cpu(rp->handle); 3535 3536 hci_dev_lock(hdev); 3537 3538 conn = hci_conn_hash_lookup_handle(hdev, handle); 3539 if (!conn) 3540 goto unlock; 3541 3542 /* While unexpected, the read_enc_key_size command may fail. The most 3543 * secure approach is to then assume the key size is 0 to force a 3544 * disconnection. 3545 */ 3546 if (rp->status) { 3547 bt_dev_err(hdev, "failed to read key size for handle %u", 3548 handle); 3549 conn->enc_key_size = 0; 3550 } else { 3551 conn->enc_key_size = rp->key_size; 3552 } 3553 3554 hci_encrypt_cfm(conn, 0); 3555 3556 unlock: 3557 hci_dev_unlock(hdev); 3558 } 3559 3560 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, 3561 struct sk_buff *skb) 3562 { 3563 struct hci_ev_encrypt_change *ev = data; 3564 struct hci_conn *conn; 3565 3566 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3567 3568 hci_dev_lock(hdev); 3569 3570 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3571 if (!conn) 3572 goto unlock; 3573 3574 if (!ev->status) { 3575 if (ev->encrypt) { 3576 /* Encryption implies authentication */ 3577 set_bit(HCI_CONN_AUTH, &conn->flags); 3578 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3579 conn->sec_level = conn->pending_sec_level; 3580 3581 /* P-256 authentication key implies FIPS */ 3582 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3583 set_bit(HCI_CONN_FIPS, &conn->flags); 3584 3585 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3586 conn->type == LE_LINK) 3587 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3588 } else { 3589 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3590 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3591 } 3592 } 3593 3594 /* We should disregard the current RPA and generate a new one 3595 * whenever the encryption procedure fails. 3596 */ 3597 if (ev->status && conn->type == LE_LINK) { 3598 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3599 hci_adv_instances_set_rpa_expired(hdev, true); 3600 } 3601 3602 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3603 3604 /* Check link security requirements are met */ 3605 if (!hci_conn_check_link_mode(conn)) 3606 ev->status = HCI_ERROR_AUTH_FAILURE; 3607 3608 if (ev->status && conn->state == BT_CONNECTED) { 3609 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3610 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3611 3612 /* Notify upper layers so they can cleanup before 3613 * disconnecting. 3614 */ 3615 hci_encrypt_cfm(conn, ev->status); 3616 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3617 hci_conn_drop(conn); 3618 goto unlock; 3619 } 3620 3621 /* Try reading the encryption key size for encrypted ACL links */ 3622 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3623 struct hci_cp_read_enc_key_size cp; 3624 struct hci_request req; 3625 3626 /* Only send HCI_Read_Encryption_Key_Size if the 3627 * controller really supports it. If it doesn't, assume 3628 * the default size (16). 3629 */ 3630 if (!(hdev->commands[20] & 0x10)) { 3631 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3632 goto notify; 3633 } 3634 3635 hci_req_init(&req, hdev); 3636 3637 cp.handle = cpu_to_le16(conn->handle); 3638 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3639 3640 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3641 bt_dev_err(hdev, "sending read key size failed"); 3642 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3643 goto notify; 3644 } 3645 3646 goto unlock; 3647 } 3648 3649 /* Set the default Authenticated Payload Timeout after 3650 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3651 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3652 * sent when the link is active and Encryption is enabled, the conn 3653 * type can be either LE or ACL and controller must support LMP Ping. 3654 * Ensure for AES-CCM encryption as well. 3655 */ 3656 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3657 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3658 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3659 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3660 struct hci_cp_write_auth_payload_to cp; 3661 3662 cp.handle = cpu_to_le16(conn->handle); 3663 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3664 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3665 sizeof(cp), &cp); 3666 } 3667 3668 notify: 3669 hci_encrypt_cfm(conn, ev->status); 3670 3671 unlock: 3672 hci_dev_unlock(hdev); 3673 } 3674 3675 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, 3676 struct sk_buff *skb) 3677 { 3678 struct hci_ev_change_link_key_complete *ev = data; 3679 struct hci_conn *conn; 3680 3681 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3682 3683 hci_dev_lock(hdev); 3684 3685 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3686 if (conn) { 3687 if (!ev->status) 3688 set_bit(HCI_CONN_SECURE, &conn->flags); 3689 3690 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3691 3692 hci_key_change_cfm(conn, ev->status); 3693 } 3694 3695 hci_dev_unlock(hdev); 3696 } 3697 3698 static void hci_remote_features_evt(struct hci_dev *hdev, void *data, 3699 struct sk_buff *skb) 3700 { 3701 struct hci_ev_remote_features *ev = data; 3702 struct hci_conn *conn; 3703 3704 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 3705 3706 hci_dev_lock(hdev); 3707 3708 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3709 if (!conn) 3710 goto unlock; 3711 3712 if (!ev->status) 3713 memcpy(conn->features[0], ev->features, 8); 3714 3715 if (conn->state != BT_CONFIG) 3716 goto unlock; 3717 3718 if (!ev->status && lmp_ext_feat_capable(hdev) && 3719 lmp_ext_feat_capable(conn)) { 3720 struct hci_cp_read_remote_ext_features cp; 3721 cp.handle = ev->handle; 3722 cp.page = 0x01; 3723 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3724 sizeof(cp), &cp); 3725 goto unlock; 3726 } 3727 3728 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3729 struct hci_cp_remote_name_req cp; 3730 memset(&cp, 0, sizeof(cp)); 3731 bacpy(&cp.bdaddr, &conn->dst); 3732 cp.pscan_rep_mode = 0x02; 3733 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3734 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3735 mgmt_device_connected(hdev, conn, NULL, 0); 3736 3737 if (!hci_outgoing_auth_needed(hdev, conn)) { 3738 conn->state = BT_CONNECTED; 3739 hci_connect_cfm(conn, ev->status); 3740 hci_conn_drop(conn); 3741 } 3742 3743 unlock: 3744 hci_dev_unlock(hdev); 3745 } 3746 3747 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3748 { 3749 cancel_delayed_work(&hdev->cmd_timer); 3750 3751 if (!test_bit(HCI_RESET, &hdev->flags)) { 3752 if (ncmd) { 3753 cancel_delayed_work(&hdev->ncmd_timer); 3754 atomic_set(&hdev->cmd_cnt, 1); 3755 } else { 3756 schedule_delayed_work(&hdev->ncmd_timer, 3757 HCI_NCMD_TIMEOUT); 3758 } 3759 } 3760 } 3761 3762 #define HCI_CC_VL(_op, _func, _min, _max) \ 3763 { \ 3764 .op = _op, \ 3765 .func = _func, \ 3766 .min_len = _min, \ 3767 .max_len = _max, \ 3768 } 3769 3770 #define HCI_CC(_op, _func, _len) \ 3771 HCI_CC_VL(_op, _func, _len, _len) 3772 3773 #define HCI_CC_STATUS(_op, _func) \ 3774 HCI_CC(_op, _func, sizeof(struct hci_ev_status)) 3775 3776 static const struct hci_cc { 3777 u16 op; 3778 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 3779 u16 min_len; 3780 u16 max_len; 3781 } hci_cc_table[] = { 3782 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), 3783 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), 3784 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), 3785 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, 3786 hci_cc_remote_name_req_cancel), 3787 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, 3788 sizeof(struct hci_rp_role_discovery)), 3789 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, 3790 sizeof(struct hci_rp_read_link_policy)), 3791 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, 3792 sizeof(struct hci_rp_write_link_policy)), 3793 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, 3794 sizeof(struct hci_rp_read_def_link_policy)), 3795 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, 3796 hci_cc_write_def_link_policy), 3797 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), 3798 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, 3799 sizeof(struct hci_rp_read_stored_link_key)), 3800 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, 3801 sizeof(struct hci_rp_delete_stored_link_key)), 3802 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), 3803 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, 3804 sizeof(struct hci_rp_read_local_name)), 3805 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), 3806 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), 3807 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), 3808 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), 3809 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, 3810 sizeof(struct hci_rp_read_class_of_dev)), 3811 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), 3812 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, 3813 sizeof(struct hci_rp_read_voice_setting)), 3814 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), 3815 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, 3816 sizeof(struct hci_rp_read_num_supported_iac)), 3817 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), 3818 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), 3819 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, 3820 sizeof(struct hci_rp_read_auth_payload_to)), 3821 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, 3822 sizeof(struct hci_rp_write_auth_payload_to)), 3823 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, 3824 sizeof(struct hci_rp_read_local_version)), 3825 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, 3826 sizeof(struct hci_rp_read_local_commands)), 3827 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, 3828 sizeof(struct hci_rp_read_local_features)), 3829 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, 3830 sizeof(struct hci_rp_read_local_ext_features)), 3831 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, 3832 sizeof(struct hci_rp_read_buffer_size)), 3833 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, 3834 sizeof(struct hci_rp_read_bd_addr)), 3835 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, 3836 sizeof(struct hci_rp_read_local_pairing_opts)), 3837 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, 3838 sizeof(struct hci_rp_read_page_scan_activity)), 3839 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 3840 hci_cc_write_page_scan_activity), 3841 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, 3842 sizeof(struct hci_rp_read_page_scan_type)), 3843 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), 3844 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, 3845 sizeof(struct hci_rp_read_data_block_size)), 3846 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, 3847 sizeof(struct hci_rp_read_flow_control_mode)), 3848 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, 3849 sizeof(struct hci_rp_read_local_amp_info)), 3850 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, 3851 sizeof(struct hci_rp_read_clock)), 3852 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, 3853 sizeof(struct hci_rp_read_inq_rsp_tx_power)), 3854 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, 3855 hci_cc_read_def_err_data_reporting, 3856 sizeof(struct hci_rp_read_def_err_data_reporting)), 3857 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 3858 hci_cc_write_def_err_data_reporting), 3859 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, 3860 sizeof(struct hci_rp_pin_code_reply)), 3861 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, 3862 sizeof(struct hci_rp_pin_code_neg_reply)), 3863 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, 3864 sizeof(struct hci_rp_read_local_oob_data)), 3865 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, 3866 sizeof(struct hci_rp_read_local_oob_ext_data)), 3867 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, 3868 sizeof(struct hci_rp_le_read_buffer_size)), 3869 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, 3870 sizeof(struct hci_rp_le_read_local_features)), 3871 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, 3872 sizeof(struct hci_rp_le_read_adv_tx_power)), 3873 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, 3874 sizeof(struct hci_rp_user_confirm_reply)), 3875 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, 3876 sizeof(struct hci_rp_user_confirm_reply)), 3877 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, 3878 sizeof(struct hci_rp_user_confirm_reply)), 3879 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, 3880 sizeof(struct hci_rp_user_confirm_reply)), 3881 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), 3882 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), 3883 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), 3884 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), 3885 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 3886 hci_cc_le_read_accept_list_size, 3887 sizeof(struct hci_rp_le_read_accept_list_size)), 3888 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), 3889 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, 3890 hci_cc_le_add_to_accept_list), 3891 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 3892 hci_cc_le_del_from_accept_list), 3893 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, 3894 sizeof(struct hci_rp_le_read_supported_states)), 3895 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, 3896 sizeof(struct hci_rp_le_read_def_data_len)), 3897 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, 3898 hci_cc_le_write_def_data_len), 3899 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, 3900 hci_cc_le_add_to_resolv_list), 3901 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, 3902 hci_cc_le_del_from_resolv_list), 3903 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, 3904 hci_cc_le_clear_resolv_list), 3905 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, 3906 sizeof(struct hci_rp_le_read_resolv_list_size)), 3907 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 3908 hci_cc_le_set_addr_resolution_enable), 3909 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, 3910 sizeof(struct hci_rp_le_read_max_data_len)), 3911 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, 3912 hci_cc_write_le_host_supported), 3913 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), 3914 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, 3915 sizeof(struct hci_rp_read_rssi)), 3916 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, 3917 sizeof(struct hci_rp_read_tx_power)), 3918 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), 3919 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, 3920 hci_cc_le_set_ext_scan_param), 3921 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, 3922 hci_cc_le_set_ext_scan_enable), 3923 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), 3924 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 3925 hci_cc_le_read_num_adv_sets, 3926 sizeof(struct hci_rp_le_read_num_supported_adv_sets)), 3927 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, 3928 sizeof(struct hci_rp_le_set_ext_adv_params)), 3929 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, 3930 hci_cc_le_set_ext_adv_enable), 3931 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 3932 hci_cc_le_set_adv_set_random_addr), 3933 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), 3934 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), 3935 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, 3936 sizeof(struct hci_rp_le_read_transmit_power)), 3937 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode) 3938 }; 3939 3940 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, 3941 struct sk_buff *skb) 3942 { 3943 void *data; 3944 3945 if (skb->len < cc->min_len) { 3946 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", 3947 cc->op, skb->len, cc->min_len); 3948 return HCI_ERROR_UNSPECIFIED; 3949 } 3950 3951 /* Just warn if the length is over max_len size it still be possible to 3952 * partially parse the cc so leave to callback to decide if that is 3953 * acceptable. 3954 */ 3955 if (skb->len > cc->max_len) 3956 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", 3957 cc->op, skb->len, cc->max_len); 3958 3959 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); 3960 if (!data) 3961 return HCI_ERROR_UNSPECIFIED; 3962 3963 return cc->func(hdev, data, skb); 3964 } 3965 3966 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, 3967 struct sk_buff *skb, u16 *opcode, u8 *status, 3968 hci_req_complete_t *req_complete, 3969 hci_req_complete_skb_t *req_complete_skb) 3970 { 3971 struct hci_ev_cmd_complete *ev = data; 3972 int i; 3973 3974 *opcode = __le16_to_cpu(ev->opcode); 3975 3976 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 3977 3978 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { 3979 if (hci_cc_table[i].op == *opcode) { 3980 *status = hci_cc_func(hdev, &hci_cc_table[i], skb); 3981 break; 3982 } 3983 } 3984 3985 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 3986 3987 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3988 req_complete_skb); 3989 3990 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3991 bt_dev_err(hdev, 3992 "unexpected event for opcode 0x%4.4x", *opcode); 3993 return; 3994 } 3995 3996 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3997 queue_work(hdev->workqueue, &hdev->cmd_work); 3998 } 3999 4000 #define HCI_CS(_op, _func) \ 4001 { \ 4002 .op = _op, \ 4003 .func = _func, \ 4004 } 4005 4006 static const struct hci_cs { 4007 u16 op; 4008 void (*func)(struct hci_dev *hdev, __u8 status); 4009 } hci_cs_table[] = { 4010 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), 4011 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), 4012 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), 4013 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), 4014 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), 4015 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), 4016 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), 4017 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), 4018 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, 4019 hci_cs_read_remote_ext_features), 4020 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), 4021 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, 4022 hci_cs_enhanced_setup_sync_conn), 4023 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), 4024 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), 4025 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), 4026 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), 4027 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), 4028 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), 4029 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn) 4030 }; 4031 4032 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, 4033 struct sk_buff *skb, u16 *opcode, u8 *status, 4034 hci_req_complete_t *req_complete, 4035 hci_req_complete_skb_t *req_complete_skb) 4036 { 4037 struct hci_ev_cmd_status *ev = data; 4038 int i; 4039 4040 *opcode = __le16_to_cpu(ev->opcode); 4041 *status = ev->status; 4042 4043 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); 4044 4045 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { 4046 if (hci_cs_table[i].op == *opcode) { 4047 hci_cs_table[i].func(hdev, ev->status); 4048 break; 4049 } 4050 } 4051 4052 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 4053 4054 /* Indicate request completion if the command failed. Also, if 4055 * we're not waiting for a special event and we get a success 4056 * command status we should try to flag the request as completed 4057 * (since for this kind of commands there will not be a command 4058 * complete event). 4059 */ 4060 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { 4061 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 4062 req_complete_skb); 4063 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 4064 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", 4065 *opcode); 4066 return; 4067 } 4068 } 4069 4070 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 4071 queue_work(hdev->workqueue, &hdev->cmd_work); 4072 } 4073 4074 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, 4075 struct sk_buff *skb) 4076 { 4077 struct hci_ev_hardware_error *ev = data; 4078 4079 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); 4080 4081 hdev->hw_error_code = ev->code; 4082 4083 queue_work(hdev->req_workqueue, &hdev->error_reset); 4084 } 4085 4086 static void hci_role_change_evt(struct hci_dev *hdev, void *data, 4087 struct sk_buff *skb) 4088 { 4089 struct hci_ev_role_change *ev = data; 4090 struct hci_conn *conn; 4091 4092 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4093 4094 hci_dev_lock(hdev); 4095 4096 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4097 if (conn) { 4098 if (!ev->status) 4099 conn->role = ev->role; 4100 4101 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 4102 4103 hci_role_switch_cfm(conn, ev->status, ev->role); 4104 } 4105 4106 hci_dev_unlock(hdev); 4107 } 4108 4109 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, 4110 struct sk_buff *skb) 4111 { 4112 struct hci_ev_num_comp_pkts *ev = data; 4113 int i; 4114 4115 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, 4116 flex_array_size(ev, handles, ev->num))) 4117 return; 4118 4119 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 4120 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4121 return; 4122 } 4123 4124 bt_dev_dbg(hdev, "num %d", ev->num); 4125 4126 for (i = 0; i < ev->num; i++) { 4127 struct hci_comp_pkts_info *info = &ev->handles[i]; 4128 struct hci_conn *conn; 4129 __u16 handle, count; 4130 4131 handle = __le16_to_cpu(info->handle); 4132 count = __le16_to_cpu(info->count); 4133 4134 conn = hci_conn_hash_lookup_handle(hdev, handle); 4135 if (!conn) 4136 continue; 4137 4138 conn->sent -= count; 4139 4140 switch (conn->type) { 4141 case ACL_LINK: 4142 hdev->acl_cnt += count; 4143 if (hdev->acl_cnt > hdev->acl_pkts) 4144 hdev->acl_cnt = hdev->acl_pkts; 4145 break; 4146 4147 case LE_LINK: 4148 if (hdev->le_pkts) { 4149 hdev->le_cnt += count; 4150 if (hdev->le_cnt > hdev->le_pkts) 4151 hdev->le_cnt = hdev->le_pkts; 4152 } else { 4153 hdev->acl_cnt += count; 4154 if (hdev->acl_cnt > hdev->acl_pkts) 4155 hdev->acl_cnt = hdev->acl_pkts; 4156 } 4157 break; 4158 4159 case SCO_LINK: 4160 hdev->sco_cnt += count; 4161 if (hdev->sco_cnt > hdev->sco_pkts) 4162 hdev->sco_cnt = hdev->sco_pkts; 4163 break; 4164 4165 default: 4166 bt_dev_err(hdev, "unknown type %d conn %p", 4167 conn->type, conn); 4168 break; 4169 } 4170 } 4171 4172 queue_work(hdev->workqueue, &hdev->tx_work); 4173 } 4174 4175 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4176 __u16 handle) 4177 { 4178 struct hci_chan *chan; 4179 4180 switch (hdev->dev_type) { 4181 case HCI_PRIMARY: 4182 return hci_conn_hash_lookup_handle(hdev, handle); 4183 case HCI_AMP: 4184 chan = hci_chan_lookup_handle(hdev, handle); 4185 if (chan) 4186 return chan->conn; 4187 break; 4188 default: 4189 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4190 break; 4191 } 4192 4193 return NULL; 4194 } 4195 4196 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, 4197 struct sk_buff *skb) 4198 { 4199 struct hci_ev_num_comp_blocks *ev = data; 4200 int i; 4201 4202 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, 4203 flex_array_size(ev, handles, ev->num_hndl))) 4204 return; 4205 4206 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4207 bt_dev_err(hdev, "wrong event for mode %d", 4208 hdev->flow_ctl_mode); 4209 return; 4210 } 4211 4212 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, 4213 ev->num_hndl); 4214 4215 for (i = 0; i < ev->num_hndl; i++) { 4216 struct hci_comp_blocks_info *info = &ev->handles[i]; 4217 struct hci_conn *conn = NULL; 4218 __u16 handle, block_count; 4219 4220 handle = __le16_to_cpu(info->handle); 4221 block_count = __le16_to_cpu(info->blocks); 4222 4223 conn = __hci_conn_lookup_handle(hdev, handle); 4224 if (!conn) 4225 continue; 4226 4227 conn->sent -= block_count; 4228 4229 switch (conn->type) { 4230 case ACL_LINK: 4231 case AMP_LINK: 4232 hdev->block_cnt += block_count; 4233 if (hdev->block_cnt > hdev->num_blocks) 4234 hdev->block_cnt = hdev->num_blocks; 4235 break; 4236 4237 default: 4238 bt_dev_err(hdev, "unknown type %d conn %p", 4239 conn->type, conn); 4240 break; 4241 } 4242 } 4243 4244 queue_work(hdev->workqueue, &hdev->tx_work); 4245 } 4246 4247 static void hci_mode_change_evt(struct hci_dev *hdev, void *data, 4248 struct sk_buff *skb) 4249 { 4250 struct hci_ev_mode_change *ev = data; 4251 struct hci_conn *conn; 4252 4253 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4254 4255 hci_dev_lock(hdev); 4256 4257 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4258 if (conn) { 4259 conn->mode = ev->mode; 4260 4261 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4262 &conn->flags)) { 4263 if (conn->mode == HCI_CM_ACTIVE) 4264 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4265 else 4266 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4267 } 4268 4269 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4270 hci_sco_setup(conn, ev->status); 4271 } 4272 4273 hci_dev_unlock(hdev); 4274 } 4275 4276 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, 4277 struct sk_buff *skb) 4278 { 4279 struct hci_ev_pin_code_req *ev = data; 4280 struct hci_conn *conn; 4281 4282 bt_dev_dbg(hdev, ""); 4283 4284 hci_dev_lock(hdev); 4285 4286 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4287 if (!conn) 4288 goto unlock; 4289 4290 if (conn->state == BT_CONNECTED) { 4291 hci_conn_hold(conn); 4292 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4293 hci_conn_drop(conn); 4294 } 4295 4296 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4297 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4298 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4299 sizeof(ev->bdaddr), &ev->bdaddr); 4300 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4301 u8 secure; 4302 4303 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4304 secure = 1; 4305 else 4306 secure = 0; 4307 4308 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4309 } 4310 4311 unlock: 4312 hci_dev_unlock(hdev); 4313 } 4314 4315 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4316 { 4317 if (key_type == HCI_LK_CHANGED_COMBINATION) 4318 return; 4319 4320 conn->pin_length = pin_len; 4321 conn->key_type = key_type; 4322 4323 switch (key_type) { 4324 case HCI_LK_LOCAL_UNIT: 4325 case HCI_LK_REMOTE_UNIT: 4326 case HCI_LK_DEBUG_COMBINATION: 4327 return; 4328 case HCI_LK_COMBINATION: 4329 if (pin_len == 16) 4330 conn->pending_sec_level = BT_SECURITY_HIGH; 4331 else 4332 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4333 break; 4334 case HCI_LK_UNAUTH_COMBINATION_P192: 4335 case HCI_LK_UNAUTH_COMBINATION_P256: 4336 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4337 break; 4338 case HCI_LK_AUTH_COMBINATION_P192: 4339 conn->pending_sec_level = BT_SECURITY_HIGH; 4340 break; 4341 case HCI_LK_AUTH_COMBINATION_P256: 4342 conn->pending_sec_level = BT_SECURITY_FIPS; 4343 break; 4344 } 4345 } 4346 4347 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, 4348 struct sk_buff *skb) 4349 { 4350 struct hci_ev_link_key_req *ev = data; 4351 struct hci_cp_link_key_reply cp; 4352 struct hci_conn *conn; 4353 struct link_key *key; 4354 4355 bt_dev_dbg(hdev, ""); 4356 4357 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4358 return; 4359 4360 hci_dev_lock(hdev); 4361 4362 key = hci_find_link_key(hdev, &ev->bdaddr); 4363 if (!key) { 4364 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); 4365 goto not_found; 4366 } 4367 4368 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); 4369 4370 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4371 if (conn) { 4372 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4373 4374 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4375 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4376 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4377 bt_dev_dbg(hdev, "ignoring unauthenticated key"); 4378 goto not_found; 4379 } 4380 4381 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4382 (conn->pending_sec_level == BT_SECURITY_HIGH || 4383 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4384 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); 4385 goto not_found; 4386 } 4387 4388 conn_set_key(conn, key->type, key->pin_len); 4389 } 4390 4391 bacpy(&cp.bdaddr, &ev->bdaddr); 4392 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4393 4394 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4395 4396 hci_dev_unlock(hdev); 4397 4398 return; 4399 4400 not_found: 4401 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4402 hci_dev_unlock(hdev); 4403 } 4404 4405 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, 4406 struct sk_buff *skb) 4407 { 4408 struct hci_ev_link_key_notify *ev = data; 4409 struct hci_conn *conn; 4410 struct link_key *key; 4411 bool persistent; 4412 u8 pin_len = 0; 4413 4414 bt_dev_dbg(hdev, ""); 4415 4416 hci_dev_lock(hdev); 4417 4418 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4419 if (!conn) 4420 goto unlock; 4421 4422 hci_conn_hold(conn); 4423 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4424 hci_conn_drop(conn); 4425 4426 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4427 conn_set_key(conn, ev->key_type, conn->pin_length); 4428 4429 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4430 goto unlock; 4431 4432 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4433 ev->key_type, pin_len, &persistent); 4434 if (!key) 4435 goto unlock; 4436 4437 /* Update connection information since adding the key will have 4438 * fixed up the type in the case of changed combination keys. 4439 */ 4440 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4441 conn_set_key(conn, key->type, key->pin_len); 4442 4443 mgmt_new_link_key(hdev, key, persistent); 4444 4445 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4446 * is set. If it's not set simply remove the key from the kernel 4447 * list (we've still notified user space about it but with 4448 * store_hint being 0). 4449 */ 4450 if (key->type == HCI_LK_DEBUG_COMBINATION && 4451 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4452 list_del_rcu(&key->list); 4453 kfree_rcu(key, rcu); 4454 goto unlock; 4455 } 4456 4457 if (persistent) 4458 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4459 else 4460 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4461 4462 unlock: 4463 hci_dev_unlock(hdev); 4464 } 4465 4466 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, 4467 struct sk_buff *skb) 4468 { 4469 struct hci_ev_clock_offset *ev = data; 4470 struct hci_conn *conn; 4471 4472 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4473 4474 hci_dev_lock(hdev); 4475 4476 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4477 if (conn && !ev->status) { 4478 struct inquiry_entry *ie; 4479 4480 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4481 if (ie) { 4482 ie->data.clock_offset = ev->clock_offset; 4483 ie->timestamp = jiffies; 4484 } 4485 } 4486 4487 hci_dev_unlock(hdev); 4488 } 4489 4490 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, 4491 struct sk_buff *skb) 4492 { 4493 struct hci_ev_pkt_type_change *ev = data; 4494 struct hci_conn *conn; 4495 4496 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4497 4498 hci_dev_lock(hdev); 4499 4500 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4501 if (conn && !ev->status) 4502 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4503 4504 hci_dev_unlock(hdev); 4505 } 4506 4507 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, 4508 struct sk_buff *skb) 4509 { 4510 struct hci_ev_pscan_rep_mode *ev = data; 4511 struct inquiry_entry *ie; 4512 4513 bt_dev_dbg(hdev, ""); 4514 4515 hci_dev_lock(hdev); 4516 4517 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4518 if (ie) { 4519 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4520 ie->timestamp = jiffies; 4521 } 4522 4523 hci_dev_unlock(hdev); 4524 } 4525 4526 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, 4527 struct sk_buff *skb) 4528 { 4529 struct hci_ev_inquiry_result_rssi *ev = edata; 4530 struct inquiry_data data; 4531 int i; 4532 4533 bt_dev_dbg(hdev, "num_rsp %d", ev->num); 4534 4535 if (!ev->num) 4536 return; 4537 4538 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4539 return; 4540 4541 hci_dev_lock(hdev); 4542 4543 if (skb->len == array_size(ev->num, 4544 sizeof(struct inquiry_info_rssi_pscan))) { 4545 struct inquiry_info_rssi_pscan *info; 4546 4547 for (i = 0; i < ev->num; i++) { 4548 u32 flags; 4549 4550 info = hci_ev_skb_pull(hdev, skb, 4551 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4552 sizeof(*info)); 4553 if (!info) { 4554 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4555 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4556 goto unlock; 4557 } 4558 4559 bacpy(&data.bdaddr, &info->bdaddr); 4560 data.pscan_rep_mode = info->pscan_rep_mode; 4561 data.pscan_period_mode = info->pscan_period_mode; 4562 data.pscan_mode = info->pscan_mode; 4563 memcpy(data.dev_class, info->dev_class, 3); 4564 data.clock_offset = info->clock_offset; 4565 data.rssi = info->rssi; 4566 data.ssp_mode = 0x00; 4567 4568 flags = hci_inquiry_cache_update(hdev, &data, false); 4569 4570 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4571 info->dev_class, info->rssi, 4572 flags, NULL, 0, NULL, 0); 4573 } 4574 } else if (skb->len == array_size(ev->num, 4575 sizeof(struct inquiry_info_rssi))) { 4576 struct inquiry_info_rssi *info; 4577 4578 for (i = 0; i < ev->num; i++) { 4579 u32 flags; 4580 4581 info = hci_ev_skb_pull(hdev, skb, 4582 HCI_EV_INQUIRY_RESULT_WITH_RSSI, 4583 sizeof(*info)); 4584 if (!info) { 4585 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4586 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4587 goto unlock; 4588 } 4589 4590 bacpy(&data.bdaddr, &info->bdaddr); 4591 data.pscan_rep_mode = info->pscan_rep_mode; 4592 data.pscan_period_mode = info->pscan_period_mode; 4593 data.pscan_mode = 0x00; 4594 memcpy(data.dev_class, info->dev_class, 3); 4595 data.clock_offset = info->clock_offset; 4596 data.rssi = info->rssi; 4597 data.ssp_mode = 0x00; 4598 4599 flags = hci_inquiry_cache_update(hdev, &data, false); 4600 4601 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4602 info->dev_class, info->rssi, 4603 flags, NULL, 0, NULL, 0); 4604 } 4605 } else { 4606 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", 4607 HCI_EV_INQUIRY_RESULT_WITH_RSSI); 4608 } 4609 unlock: 4610 hci_dev_unlock(hdev); 4611 } 4612 4613 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, 4614 struct sk_buff *skb) 4615 { 4616 struct hci_ev_remote_ext_features *ev = data; 4617 struct hci_conn *conn; 4618 4619 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 4620 4621 hci_dev_lock(hdev); 4622 4623 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4624 if (!conn) 4625 goto unlock; 4626 4627 if (ev->page < HCI_MAX_PAGES) 4628 memcpy(conn->features[ev->page], ev->features, 8); 4629 4630 if (!ev->status && ev->page == 0x01) { 4631 struct inquiry_entry *ie; 4632 4633 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4634 if (ie) 4635 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4636 4637 if (ev->features[0] & LMP_HOST_SSP) { 4638 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4639 } else { 4640 /* It is mandatory by the Bluetooth specification that 4641 * Extended Inquiry Results are only used when Secure 4642 * Simple Pairing is enabled, but some devices violate 4643 * this. 4644 * 4645 * To make these devices work, the internal SSP 4646 * enabled flag needs to be cleared if the remote host 4647 * features do not indicate SSP support */ 4648 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4649 } 4650 4651 if (ev->features[0] & LMP_HOST_SC) 4652 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4653 } 4654 4655 if (conn->state != BT_CONFIG) 4656 goto unlock; 4657 4658 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4659 struct hci_cp_remote_name_req cp; 4660 memset(&cp, 0, sizeof(cp)); 4661 bacpy(&cp.bdaddr, &conn->dst); 4662 cp.pscan_rep_mode = 0x02; 4663 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4664 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4665 mgmt_device_connected(hdev, conn, NULL, 0); 4666 4667 if (!hci_outgoing_auth_needed(hdev, conn)) { 4668 conn->state = BT_CONNECTED; 4669 hci_connect_cfm(conn, ev->status); 4670 hci_conn_drop(conn); 4671 } 4672 4673 unlock: 4674 hci_dev_unlock(hdev); 4675 } 4676 4677 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, 4678 struct sk_buff *skb) 4679 { 4680 struct hci_ev_sync_conn_complete *ev = data; 4681 struct hci_conn *conn; 4682 u8 status = ev->status; 4683 4684 switch (ev->link_type) { 4685 case SCO_LINK: 4686 case ESCO_LINK: 4687 break; 4688 default: 4689 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type 4690 * for HCI_Synchronous_Connection_Complete is limited to 4691 * either SCO or eSCO 4692 */ 4693 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); 4694 return; 4695 } 4696 4697 bt_dev_dbg(hdev, "status 0x%2.2x", status); 4698 4699 hci_dev_lock(hdev); 4700 4701 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4702 if (!conn) { 4703 if (ev->link_type == ESCO_LINK) 4704 goto unlock; 4705 4706 /* When the link type in the event indicates SCO connection 4707 * and lookup of the connection object fails, then check 4708 * if an eSCO connection object exists. 4709 * 4710 * The core limits the synchronous connections to either 4711 * SCO or eSCO. The eSCO connection is preferred and tried 4712 * to be setup first and until successfully established, 4713 * the link type will be hinted as eSCO. 4714 */ 4715 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4716 if (!conn) 4717 goto unlock; 4718 } 4719 4720 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. 4721 * Processing it more than once per connection can corrupt kernel memory. 4722 * 4723 * As the connection handle is set here for the first time, it indicates 4724 * whether the connection is already set up. 4725 */ 4726 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 4727 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); 4728 goto unlock; 4729 } 4730 4731 switch (status) { 4732 case 0x00: 4733 conn->handle = __le16_to_cpu(ev->handle); 4734 if (conn->handle > HCI_CONN_HANDLE_MAX) { 4735 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 4736 conn->handle, HCI_CONN_HANDLE_MAX); 4737 status = HCI_ERROR_INVALID_PARAMETERS; 4738 conn->state = BT_CLOSED; 4739 break; 4740 } 4741 4742 conn->state = BT_CONNECTED; 4743 conn->type = ev->link_type; 4744 4745 hci_debugfs_create_conn(conn); 4746 hci_conn_add_sysfs(conn); 4747 break; 4748 4749 case 0x10: /* Connection Accept Timeout */ 4750 case 0x0d: /* Connection Rejected due to Limited Resources */ 4751 case 0x11: /* Unsupported Feature or Parameter Value */ 4752 case 0x1c: /* SCO interval rejected */ 4753 case 0x1a: /* Unsupported Remote Feature */ 4754 case 0x1e: /* Invalid LMP Parameters */ 4755 case 0x1f: /* Unspecified error */ 4756 case 0x20: /* Unsupported LMP Parameter value */ 4757 if (conn->out) { 4758 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4759 (hdev->esco_type & EDR_ESCO_MASK); 4760 if (hci_setup_sync(conn, conn->link->handle)) 4761 goto unlock; 4762 } 4763 fallthrough; 4764 4765 default: 4766 conn->state = BT_CLOSED; 4767 break; 4768 } 4769 4770 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4771 /* Notify only in case of SCO over HCI transport data path which 4772 * is zero and non-zero value shall be non-HCI transport data path 4773 */ 4774 if (conn->codec.data_path == 0 && hdev->notify) { 4775 switch (ev->air_mode) { 4776 case 0x02: 4777 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4778 break; 4779 case 0x03: 4780 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4781 break; 4782 } 4783 } 4784 4785 hci_connect_cfm(conn, status); 4786 if (status) 4787 hci_conn_del(conn); 4788 4789 unlock: 4790 hci_dev_unlock(hdev); 4791 } 4792 4793 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 4794 { 4795 size_t parsed = 0; 4796 4797 while (parsed < eir_len) { 4798 u8 field_len = eir[0]; 4799 4800 if (field_len == 0) 4801 return parsed; 4802 4803 parsed += field_len + 1; 4804 eir += field_len + 1; 4805 } 4806 4807 return eir_len; 4808 } 4809 4810 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, 4811 struct sk_buff *skb) 4812 { 4813 struct hci_ev_ext_inquiry_result *ev = edata; 4814 struct inquiry_data data; 4815 size_t eir_len; 4816 int i; 4817 4818 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, 4819 flex_array_size(ev, info, ev->num))) 4820 return; 4821 4822 bt_dev_dbg(hdev, "num %d", ev->num); 4823 4824 if (!ev->num) 4825 return; 4826 4827 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4828 return; 4829 4830 hci_dev_lock(hdev); 4831 4832 for (i = 0; i < ev->num; i++) { 4833 struct extended_inquiry_info *info = &ev->info[i]; 4834 u32 flags; 4835 bool name_known; 4836 4837 bacpy(&data.bdaddr, &info->bdaddr); 4838 data.pscan_rep_mode = info->pscan_rep_mode; 4839 data.pscan_period_mode = info->pscan_period_mode; 4840 data.pscan_mode = 0x00; 4841 memcpy(data.dev_class, info->dev_class, 3); 4842 data.clock_offset = info->clock_offset; 4843 data.rssi = info->rssi; 4844 data.ssp_mode = 0x01; 4845 4846 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4847 name_known = eir_get_data(info->data, 4848 sizeof(info->data), 4849 EIR_NAME_COMPLETE, NULL); 4850 else 4851 name_known = true; 4852 4853 flags = hci_inquiry_cache_update(hdev, &data, name_known); 4854 4855 eir_len = eir_get_length(info->data, sizeof(info->data)); 4856 4857 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4858 info->dev_class, info->rssi, 4859 flags, info->data, eir_len, NULL, 0); 4860 } 4861 4862 hci_dev_unlock(hdev); 4863 } 4864 4865 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, 4866 struct sk_buff *skb) 4867 { 4868 struct hci_ev_key_refresh_complete *ev = data; 4869 struct hci_conn *conn; 4870 4871 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, 4872 __le16_to_cpu(ev->handle)); 4873 4874 hci_dev_lock(hdev); 4875 4876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4877 if (!conn) 4878 goto unlock; 4879 4880 /* For BR/EDR the necessary steps are taken through the 4881 * auth_complete event. 4882 */ 4883 if (conn->type != LE_LINK) 4884 goto unlock; 4885 4886 if (!ev->status) 4887 conn->sec_level = conn->pending_sec_level; 4888 4889 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 4890 4891 if (ev->status && conn->state == BT_CONNECTED) { 4892 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4893 hci_conn_drop(conn); 4894 goto unlock; 4895 } 4896 4897 if (conn->state == BT_CONFIG) { 4898 if (!ev->status) 4899 conn->state = BT_CONNECTED; 4900 4901 hci_connect_cfm(conn, ev->status); 4902 hci_conn_drop(conn); 4903 } else { 4904 hci_auth_cfm(conn, ev->status); 4905 4906 hci_conn_hold(conn); 4907 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4908 hci_conn_drop(conn); 4909 } 4910 4911 unlock: 4912 hci_dev_unlock(hdev); 4913 } 4914 4915 static u8 hci_get_auth_req(struct hci_conn *conn) 4916 { 4917 /* If remote requests no-bonding follow that lead */ 4918 if (conn->remote_auth == HCI_AT_NO_BONDING || 4919 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4920 return conn->remote_auth | (conn->auth_type & 0x01); 4921 4922 /* If both remote and local have enough IO capabilities, require 4923 * MITM protection 4924 */ 4925 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4926 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4927 return conn->remote_auth | 0x01; 4928 4929 /* No MITM protection possible so ignore remote requirement */ 4930 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4931 } 4932 4933 static u8 bredr_oob_data_present(struct hci_conn *conn) 4934 { 4935 struct hci_dev *hdev = conn->hdev; 4936 struct oob_data *data; 4937 4938 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4939 if (!data) 4940 return 0x00; 4941 4942 if (bredr_sc_enabled(hdev)) { 4943 /* When Secure Connections is enabled, then just 4944 * return the present value stored with the OOB 4945 * data. The stored value contains the right present 4946 * information. However it can only be trusted when 4947 * not in Secure Connection Only mode. 4948 */ 4949 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4950 return data->present; 4951 4952 /* When Secure Connections Only mode is enabled, then 4953 * the P-256 values are required. If they are not 4954 * available, then do not declare that OOB data is 4955 * present. 4956 */ 4957 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4958 !memcmp(data->hash256, ZERO_KEY, 16)) 4959 return 0x00; 4960 4961 return 0x02; 4962 } 4963 4964 /* When Secure Connections is not enabled or actually 4965 * not supported by the hardware, then check that if 4966 * P-192 data values are present. 4967 */ 4968 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4969 !memcmp(data->hash192, ZERO_KEY, 16)) 4970 return 0x00; 4971 4972 return 0x01; 4973 } 4974 4975 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, 4976 struct sk_buff *skb) 4977 { 4978 struct hci_ev_io_capa_request *ev = data; 4979 struct hci_conn *conn; 4980 4981 bt_dev_dbg(hdev, ""); 4982 4983 hci_dev_lock(hdev); 4984 4985 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4986 if (!conn) 4987 goto unlock; 4988 4989 hci_conn_hold(conn); 4990 4991 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4992 goto unlock; 4993 4994 /* Allow pairing if we're pairable, the initiators of the 4995 * pairing or if the remote is not requesting bonding. 4996 */ 4997 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4998 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4999 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 5000 struct hci_cp_io_capability_reply cp; 5001 5002 bacpy(&cp.bdaddr, &ev->bdaddr); 5003 /* Change the IO capability from KeyboardDisplay 5004 * to DisplayYesNo as it is not supported by BT spec. */ 5005 cp.capability = (conn->io_capability == 0x04) ? 5006 HCI_IO_DISPLAY_YESNO : conn->io_capability; 5007 5008 /* If we are initiators, there is no remote information yet */ 5009 if (conn->remote_auth == 0xff) { 5010 /* Request MITM protection if our IO caps allow it 5011 * except for the no-bonding case. 5012 */ 5013 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5014 conn->auth_type != HCI_AT_NO_BONDING) 5015 conn->auth_type |= 0x01; 5016 } else { 5017 conn->auth_type = hci_get_auth_req(conn); 5018 } 5019 5020 /* If we're not bondable, force one of the non-bondable 5021 * authentication requirement values. 5022 */ 5023 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 5024 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 5025 5026 cp.authentication = conn->auth_type; 5027 cp.oob_data = bredr_oob_data_present(conn); 5028 5029 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 5030 sizeof(cp), &cp); 5031 } else { 5032 struct hci_cp_io_capability_neg_reply cp; 5033 5034 bacpy(&cp.bdaddr, &ev->bdaddr); 5035 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 5036 5037 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 5038 sizeof(cp), &cp); 5039 } 5040 5041 unlock: 5042 hci_dev_unlock(hdev); 5043 } 5044 5045 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, 5046 struct sk_buff *skb) 5047 { 5048 struct hci_ev_io_capa_reply *ev = data; 5049 struct hci_conn *conn; 5050 5051 bt_dev_dbg(hdev, ""); 5052 5053 hci_dev_lock(hdev); 5054 5055 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5056 if (!conn) 5057 goto unlock; 5058 5059 conn->remote_cap = ev->capability; 5060 conn->remote_auth = ev->authentication; 5061 5062 unlock: 5063 hci_dev_unlock(hdev); 5064 } 5065 5066 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, 5067 struct sk_buff *skb) 5068 { 5069 struct hci_ev_user_confirm_req *ev = data; 5070 int loc_mitm, rem_mitm, confirm_hint = 0; 5071 struct hci_conn *conn; 5072 5073 bt_dev_dbg(hdev, ""); 5074 5075 hci_dev_lock(hdev); 5076 5077 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5078 goto unlock; 5079 5080 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5081 if (!conn) 5082 goto unlock; 5083 5084 loc_mitm = (conn->auth_type & 0x01); 5085 rem_mitm = (conn->remote_auth & 0x01); 5086 5087 /* If we require MITM but the remote device can't provide that 5088 * (it has NoInputNoOutput) then reject the confirmation 5089 * request. We check the security level here since it doesn't 5090 * necessarily match conn->auth_type. 5091 */ 5092 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 5093 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 5094 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); 5095 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 5096 sizeof(ev->bdaddr), &ev->bdaddr); 5097 goto unlock; 5098 } 5099 5100 /* If no side requires MITM protection; auto-accept */ 5101 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 5102 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 5103 5104 /* If we're not the initiators request authorization to 5105 * proceed from user space (mgmt_user_confirm with 5106 * confirm_hint set to 1). The exception is if neither 5107 * side had MITM or if the local IO capability is 5108 * NoInputNoOutput, in which case we do auto-accept 5109 */ 5110 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 5111 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 5112 (loc_mitm || rem_mitm)) { 5113 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); 5114 confirm_hint = 1; 5115 goto confirm; 5116 } 5117 5118 /* If there already exists link key in local host, leave the 5119 * decision to user space since the remote device could be 5120 * legitimate or malicious. 5121 */ 5122 if (hci_find_link_key(hdev, &ev->bdaddr)) { 5123 bt_dev_dbg(hdev, "Local host already has link key"); 5124 confirm_hint = 1; 5125 goto confirm; 5126 } 5127 5128 BT_DBG("Auto-accept of user confirmation with %ums delay", 5129 hdev->auto_accept_delay); 5130 5131 if (hdev->auto_accept_delay > 0) { 5132 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 5133 queue_delayed_work(conn->hdev->workqueue, 5134 &conn->auto_accept_work, delay); 5135 goto unlock; 5136 } 5137 5138 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 5139 sizeof(ev->bdaddr), &ev->bdaddr); 5140 goto unlock; 5141 } 5142 5143 confirm: 5144 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 5145 le32_to_cpu(ev->passkey), confirm_hint); 5146 5147 unlock: 5148 hci_dev_unlock(hdev); 5149 } 5150 5151 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, 5152 struct sk_buff *skb) 5153 { 5154 struct hci_ev_user_passkey_req *ev = data; 5155 5156 bt_dev_dbg(hdev, ""); 5157 5158 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5159 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 5160 } 5161 5162 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, 5163 struct sk_buff *skb) 5164 { 5165 struct hci_ev_user_passkey_notify *ev = data; 5166 struct hci_conn *conn; 5167 5168 bt_dev_dbg(hdev, ""); 5169 5170 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5171 if (!conn) 5172 return; 5173 5174 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5175 conn->passkey_entered = 0; 5176 5177 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5178 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5179 conn->dst_type, conn->passkey_notify, 5180 conn->passkey_entered); 5181 } 5182 5183 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, 5184 struct sk_buff *skb) 5185 { 5186 struct hci_ev_keypress_notify *ev = data; 5187 struct hci_conn *conn; 5188 5189 bt_dev_dbg(hdev, ""); 5190 5191 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5192 if (!conn) 5193 return; 5194 5195 switch (ev->type) { 5196 case HCI_KEYPRESS_STARTED: 5197 conn->passkey_entered = 0; 5198 return; 5199 5200 case HCI_KEYPRESS_ENTERED: 5201 conn->passkey_entered++; 5202 break; 5203 5204 case HCI_KEYPRESS_ERASED: 5205 conn->passkey_entered--; 5206 break; 5207 5208 case HCI_KEYPRESS_CLEARED: 5209 conn->passkey_entered = 0; 5210 break; 5211 5212 case HCI_KEYPRESS_COMPLETED: 5213 return; 5214 } 5215 5216 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5217 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5218 conn->dst_type, conn->passkey_notify, 5219 conn->passkey_entered); 5220 } 5221 5222 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, 5223 struct sk_buff *skb) 5224 { 5225 struct hci_ev_simple_pair_complete *ev = data; 5226 struct hci_conn *conn; 5227 5228 bt_dev_dbg(hdev, ""); 5229 5230 hci_dev_lock(hdev); 5231 5232 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5233 if (!conn) 5234 goto unlock; 5235 5236 /* Reset the authentication requirement to unknown */ 5237 conn->remote_auth = 0xff; 5238 5239 /* To avoid duplicate auth_failed events to user space we check 5240 * the HCI_CONN_AUTH_PEND flag which will be set if we 5241 * initiated the authentication. A traditional auth_complete 5242 * event gets always produced as initiator and is also mapped to 5243 * the mgmt_auth_failed event */ 5244 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5245 mgmt_auth_failed(conn, ev->status); 5246 5247 hci_conn_drop(conn); 5248 5249 unlock: 5250 hci_dev_unlock(hdev); 5251 } 5252 5253 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, 5254 struct sk_buff *skb) 5255 { 5256 struct hci_ev_remote_host_features *ev = data; 5257 struct inquiry_entry *ie; 5258 struct hci_conn *conn; 5259 5260 bt_dev_dbg(hdev, ""); 5261 5262 hci_dev_lock(hdev); 5263 5264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5265 if (conn) 5266 memcpy(conn->features[1], ev->features, 8); 5267 5268 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5269 if (ie) 5270 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5271 5272 hci_dev_unlock(hdev); 5273 } 5274 5275 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, 5276 struct sk_buff *skb) 5277 { 5278 struct hci_ev_remote_oob_data_request *ev = edata; 5279 struct oob_data *data; 5280 5281 bt_dev_dbg(hdev, ""); 5282 5283 hci_dev_lock(hdev); 5284 5285 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5286 goto unlock; 5287 5288 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5289 if (!data) { 5290 struct hci_cp_remote_oob_data_neg_reply cp; 5291 5292 bacpy(&cp.bdaddr, &ev->bdaddr); 5293 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5294 sizeof(cp), &cp); 5295 goto unlock; 5296 } 5297 5298 if (bredr_sc_enabled(hdev)) { 5299 struct hci_cp_remote_oob_ext_data_reply cp; 5300 5301 bacpy(&cp.bdaddr, &ev->bdaddr); 5302 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5303 memset(cp.hash192, 0, sizeof(cp.hash192)); 5304 memset(cp.rand192, 0, sizeof(cp.rand192)); 5305 } else { 5306 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5307 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5308 } 5309 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5310 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5311 5312 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5313 sizeof(cp), &cp); 5314 } else { 5315 struct hci_cp_remote_oob_data_reply cp; 5316 5317 bacpy(&cp.bdaddr, &ev->bdaddr); 5318 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5319 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5320 5321 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5322 sizeof(cp), &cp); 5323 } 5324 5325 unlock: 5326 hci_dev_unlock(hdev); 5327 } 5328 5329 #if IS_ENABLED(CONFIG_BT_HS) 5330 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, 5331 struct sk_buff *skb) 5332 { 5333 struct hci_ev_channel_selected *ev = data; 5334 struct hci_conn *hcon; 5335 5336 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); 5337 5338 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5339 if (!hcon) 5340 return; 5341 5342 amp_read_loc_assoc_final_data(hdev, hcon); 5343 } 5344 5345 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, 5346 struct sk_buff *skb) 5347 { 5348 struct hci_ev_phy_link_complete *ev = data; 5349 struct hci_conn *hcon, *bredr_hcon; 5350 5351 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, 5352 ev->status); 5353 5354 hci_dev_lock(hdev); 5355 5356 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5357 if (!hcon) 5358 goto unlock; 5359 5360 if (!hcon->amp_mgr) 5361 goto unlock; 5362 5363 if (ev->status) { 5364 hci_conn_del(hcon); 5365 goto unlock; 5366 } 5367 5368 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5369 5370 hcon->state = BT_CONNECTED; 5371 bacpy(&hcon->dst, &bredr_hcon->dst); 5372 5373 hci_conn_hold(hcon); 5374 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5375 hci_conn_drop(hcon); 5376 5377 hci_debugfs_create_conn(hcon); 5378 hci_conn_add_sysfs(hcon); 5379 5380 amp_physical_cfm(bredr_hcon, hcon); 5381 5382 unlock: 5383 hci_dev_unlock(hdev); 5384 } 5385 5386 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, 5387 struct sk_buff *skb) 5388 { 5389 struct hci_ev_logical_link_complete *ev = data; 5390 struct hci_conn *hcon; 5391 struct hci_chan *hchan; 5392 struct amp_mgr *mgr; 5393 5394 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5395 le16_to_cpu(ev->handle), ev->phy_handle, ev->status); 5396 5397 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5398 if (!hcon) 5399 return; 5400 5401 /* Create AMP hchan */ 5402 hchan = hci_chan_create(hcon); 5403 if (!hchan) 5404 return; 5405 5406 hchan->handle = le16_to_cpu(ev->handle); 5407 hchan->amp = true; 5408 5409 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5410 5411 mgr = hcon->amp_mgr; 5412 if (mgr && mgr->bredr_chan) { 5413 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5414 5415 l2cap_chan_lock(bredr_chan); 5416 5417 bredr_chan->conn->mtu = hdev->block_mtu; 5418 l2cap_logical_cfm(bredr_chan, hchan, 0); 5419 hci_conn_hold(hcon); 5420 5421 l2cap_chan_unlock(bredr_chan); 5422 } 5423 } 5424 5425 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, 5426 struct sk_buff *skb) 5427 { 5428 struct hci_ev_disconn_logical_link_complete *ev = data; 5429 struct hci_chan *hchan; 5430 5431 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", 5432 le16_to_cpu(ev->handle), ev->status); 5433 5434 if (ev->status) 5435 return; 5436 5437 hci_dev_lock(hdev); 5438 5439 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5440 if (!hchan || !hchan->amp) 5441 goto unlock; 5442 5443 amp_destroy_logical_link(hchan, ev->reason); 5444 5445 unlock: 5446 hci_dev_unlock(hdev); 5447 } 5448 5449 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, 5450 struct sk_buff *skb) 5451 { 5452 struct hci_ev_disconn_phy_link_complete *ev = data; 5453 struct hci_conn *hcon; 5454 5455 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5456 5457 if (ev->status) 5458 return; 5459 5460 hci_dev_lock(hdev); 5461 5462 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5463 if (hcon && hcon->type == AMP_LINK) { 5464 hcon->state = BT_CLOSED; 5465 hci_disconn_cfm(hcon, ev->reason); 5466 hci_conn_del(hcon); 5467 } 5468 5469 hci_dev_unlock(hdev); 5470 } 5471 #endif 5472 5473 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5474 u8 bdaddr_type, bdaddr_t *local_rpa) 5475 { 5476 if (conn->out) { 5477 conn->dst_type = bdaddr_type; 5478 conn->resp_addr_type = bdaddr_type; 5479 bacpy(&conn->resp_addr, bdaddr); 5480 5481 /* Check if the controller has set a Local RPA then it must be 5482 * used instead or hdev->rpa. 5483 */ 5484 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5485 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5486 bacpy(&conn->init_addr, local_rpa); 5487 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5488 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5489 bacpy(&conn->init_addr, &conn->hdev->rpa); 5490 } else { 5491 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5492 &conn->init_addr_type); 5493 } 5494 } else { 5495 conn->resp_addr_type = conn->hdev->adv_addr_type; 5496 /* Check if the controller has set a Local RPA then it must be 5497 * used instead or hdev->rpa. 5498 */ 5499 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5500 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5501 bacpy(&conn->resp_addr, local_rpa); 5502 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5503 /* In case of ext adv, resp_addr will be updated in 5504 * Adv Terminated event. 5505 */ 5506 if (!ext_adv_capable(conn->hdev)) 5507 bacpy(&conn->resp_addr, 5508 &conn->hdev->random_addr); 5509 } else { 5510 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5511 } 5512 5513 conn->init_addr_type = bdaddr_type; 5514 bacpy(&conn->init_addr, bdaddr); 5515 5516 /* For incoming connections, set the default minimum 5517 * and maximum connection interval. They will be used 5518 * to check if the parameters are in range and if not 5519 * trigger the connection update procedure. 5520 */ 5521 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5522 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5523 } 5524 } 5525 5526 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5527 bdaddr_t *bdaddr, u8 bdaddr_type, 5528 bdaddr_t *local_rpa, u8 role, u16 handle, 5529 u16 interval, u16 latency, 5530 u16 supervision_timeout) 5531 { 5532 struct hci_conn_params *params; 5533 struct hci_conn *conn; 5534 struct smp_irk *irk; 5535 u8 addr_type; 5536 5537 hci_dev_lock(hdev); 5538 5539 /* All controllers implicitly stop advertising in the event of a 5540 * connection, so ensure that the state bit is cleared. 5541 */ 5542 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5543 5544 conn = hci_lookup_le_connect(hdev); 5545 if (!conn) { 5546 /* In case of error status and there is no connection pending 5547 * just unlock as there is nothing to cleanup. 5548 */ 5549 if (status) 5550 goto unlock; 5551 5552 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5553 if (!conn) { 5554 bt_dev_err(hdev, "no memory for new connection"); 5555 goto unlock; 5556 } 5557 5558 conn->dst_type = bdaddr_type; 5559 5560 /* If we didn't have a hci_conn object previously 5561 * but we're in central role this must be something 5562 * initiated using an accept list. Since accept list based 5563 * connections are not "first class citizens" we don't 5564 * have full tracking of them. Therefore, we go ahead 5565 * with a "best effort" approach of determining the 5566 * initiator address based on the HCI_PRIVACY flag. 5567 */ 5568 if (conn->out) { 5569 conn->resp_addr_type = bdaddr_type; 5570 bacpy(&conn->resp_addr, bdaddr); 5571 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5572 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5573 bacpy(&conn->init_addr, &hdev->rpa); 5574 } else { 5575 hci_copy_identity_address(hdev, 5576 &conn->init_addr, 5577 &conn->init_addr_type); 5578 } 5579 } 5580 } else { 5581 cancel_delayed_work(&conn->le_conn_timeout); 5582 } 5583 5584 /* The HCI_LE_Connection_Complete event is only sent once per connection. 5585 * Processing it more than once per connection can corrupt kernel memory. 5586 * 5587 * As the connection handle is set here for the first time, it indicates 5588 * whether the connection is already set up. 5589 */ 5590 if (conn->handle != HCI_CONN_HANDLE_UNSET) { 5591 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); 5592 goto unlock; 5593 } 5594 5595 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5596 5597 /* Lookup the identity address from the stored connection 5598 * address and address type. 5599 * 5600 * When establishing connections to an identity address, the 5601 * connection procedure will store the resolvable random 5602 * address first. Now if it can be converted back into the 5603 * identity address, start using the identity address from 5604 * now on. 5605 */ 5606 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5607 if (irk) { 5608 bacpy(&conn->dst, &irk->bdaddr); 5609 conn->dst_type = irk->addr_type; 5610 } 5611 5612 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5613 5614 if (handle > HCI_CONN_HANDLE_MAX) { 5615 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, 5616 HCI_CONN_HANDLE_MAX); 5617 status = HCI_ERROR_INVALID_PARAMETERS; 5618 } 5619 5620 if (status) { 5621 hci_conn_failed(conn, status); 5622 goto unlock; 5623 } 5624 5625 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5626 addr_type = BDADDR_LE_PUBLIC; 5627 else 5628 addr_type = BDADDR_LE_RANDOM; 5629 5630 /* Drop the connection if the device is blocked */ 5631 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5632 hci_conn_drop(conn); 5633 goto unlock; 5634 } 5635 5636 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5637 mgmt_device_connected(hdev, conn, NULL, 0); 5638 5639 conn->sec_level = BT_SECURITY_LOW; 5640 conn->handle = handle; 5641 conn->state = BT_CONFIG; 5642 5643 /* Store current advertising instance as connection advertising instance 5644 * when sotfware rotation is in use so it can be re-enabled when 5645 * disconnected. 5646 */ 5647 if (!ext_adv_capable(hdev)) 5648 conn->adv_instance = hdev->cur_adv_instance; 5649 5650 conn->le_conn_interval = interval; 5651 conn->le_conn_latency = latency; 5652 conn->le_supv_timeout = supervision_timeout; 5653 5654 hci_debugfs_create_conn(conn); 5655 hci_conn_add_sysfs(conn); 5656 5657 /* The remote features procedure is defined for central 5658 * role only. So only in case of an initiated connection 5659 * request the remote features. 5660 * 5661 * If the local controller supports peripheral-initiated features 5662 * exchange, then requesting the remote features in peripheral 5663 * role is possible. Otherwise just transition into the 5664 * connected state without requesting the remote features. 5665 */ 5666 if (conn->out || 5667 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5668 struct hci_cp_le_read_remote_features cp; 5669 5670 cp.handle = __cpu_to_le16(conn->handle); 5671 5672 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5673 sizeof(cp), &cp); 5674 5675 hci_conn_hold(conn); 5676 } else { 5677 conn->state = BT_CONNECTED; 5678 hci_connect_cfm(conn, status); 5679 } 5680 5681 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5682 conn->dst_type); 5683 if (params) { 5684 list_del_init(¶ms->action); 5685 if (params->conn) { 5686 hci_conn_drop(params->conn); 5687 hci_conn_put(params->conn); 5688 params->conn = NULL; 5689 } 5690 } 5691 5692 unlock: 5693 hci_update_passive_scan(hdev); 5694 hci_dev_unlock(hdev); 5695 } 5696 5697 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, 5698 struct sk_buff *skb) 5699 { 5700 struct hci_ev_le_conn_complete *ev = data; 5701 5702 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5703 5704 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5705 NULL, ev->role, le16_to_cpu(ev->handle), 5706 le16_to_cpu(ev->interval), 5707 le16_to_cpu(ev->latency), 5708 le16_to_cpu(ev->supervision_timeout)); 5709 } 5710 5711 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, 5712 struct sk_buff *skb) 5713 { 5714 struct hci_ev_le_enh_conn_complete *ev = data; 5715 5716 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5717 5718 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5719 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5720 le16_to_cpu(ev->interval), 5721 le16_to_cpu(ev->latency), 5722 le16_to_cpu(ev->supervision_timeout)); 5723 } 5724 5725 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, 5726 struct sk_buff *skb) 5727 { 5728 struct hci_evt_le_ext_adv_set_term *ev = data; 5729 struct hci_conn *conn; 5730 struct adv_info *adv, *n; 5731 5732 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5733 5734 /* The Bluetooth Core 5.3 specification clearly states that this event 5735 * shall not be sent when the Host disables the advertising set. So in 5736 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5737 * 5738 * When the Host disables an advertising set, all cleanup is done via 5739 * its command callback and not needed to be duplicated here. 5740 */ 5741 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5742 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5743 return; 5744 } 5745 5746 hci_dev_lock(hdev); 5747 5748 adv = hci_find_adv_instance(hdev, ev->handle); 5749 5750 if (ev->status) { 5751 if (!adv) 5752 goto unlock; 5753 5754 /* Remove advertising as it has been terminated */ 5755 hci_remove_adv_instance(hdev, ev->handle); 5756 mgmt_advertising_removed(NULL, hdev, ev->handle); 5757 5758 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 5759 if (adv->enabled) 5760 goto unlock; 5761 } 5762 5763 /* We are no longer advertising, clear HCI_LE_ADV */ 5764 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5765 goto unlock; 5766 } 5767 5768 if (adv) 5769 adv->enabled = false; 5770 5771 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5772 if (conn) { 5773 /* Store handle in the connection so the correct advertising 5774 * instance can be re-enabled when disconnected. 5775 */ 5776 conn->adv_instance = ev->handle; 5777 5778 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 5779 bacmp(&conn->resp_addr, BDADDR_ANY)) 5780 goto unlock; 5781 5782 if (!ev->handle) { 5783 bacpy(&conn->resp_addr, &hdev->random_addr); 5784 goto unlock; 5785 } 5786 5787 if (adv) 5788 bacpy(&conn->resp_addr, &adv->random_addr); 5789 } 5790 5791 unlock: 5792 hci_dev_unlock(hdev); 5793 } 5794 5795 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, 5796 struct sk_buff *skb) 5797 { 5798 struct hci_ev_le_conn_update_complete *ev = data; 5799 struct hci_conn *conn; 5800 5801 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 5802 5803 if (ev->status) 5804 return; 5805 5806 hci_dev_lock(hdev); 5807 5808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5809 if (conn) { 5810 conn->le_conn_interval = le16_to_cpu(ev->interval); 5811 conn->le_conn_latency = le16_to_cpu(ev->latency); 5812 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5813 } 5814 5815 hci_dev_unlock(hdev); 5816 } 5817 5818 /* This function requires the caller holds hdev->lock */ 5819 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5820 bdaddr_t *addr, 5821 u8 addr_type, bool addr_resolved, 5822 u8 adv_type) 5823 { 5824 struct hci_conn *conn; 5825 struct hci_conn_params *params; 5826 5827 /* If the event is not connectable don't proceed further */ 5828 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5829 return NULL; 5830 5831 /* Ignore if the device is blocked or hdev is suspended */ 5832 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 5833 hdev->suspended) 5834 return NULL; 5835 5836 /* Most controller will fail if we try to create new connections 5837 * while we have an existing one in peripheral role. 5838 */ 5839 if (hdev->conn_hash.le_num_peripheral > 0 && 5840 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 5841 !(hdev->le_states[3] & 0x10))) 5842 return NULL; 5843 5844 /* If we're not connectable only connect devices that we have in 5845 * our pend_le_conns list. 5846 */ 5847 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5848 addr_type); 5849 if (!params) 5850 return NULL; 5851 5852 if (!params->explicit_connect) { 5853 switch (params->auto_connect) { 5854 case HCI_AUTO_CONN_DIRECT: 5855 /* Only devices advertising with ADV_DIRECT_IND are 5856 * triggering a connection attempt. This is allowing 5857 * incoming connections from peripheral devices. 5858 */ 5859 if (adv_type != LE_ADV_DIRECT_IND) 5860 return NULL; 5861 break; 5862 case HCI_AUTO_CONN_ALWAYS: 5863 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5864 * are triggering a connection attempt. This means 5865 * that incoming connections from peripheral device are 5866 * accepted and also outgoing connections to peripheral 5867 * devices are established when found. 5868 */ 5869 break; 5870 default: 5871 return NULL; 5872 } 5873 } 5874 5875 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5876 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5877 HCI_ROLE_MASTER); 5878 if (!IS_ERR(conn)) { 5879 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5880 * by higher layer that tried to connect, if no then 5881 * store the pointer since we don't really have any 5882 * other owner of the object besides the params that 5883 * triggered it. This way we can abort the connection if 5884 * the parameters get removed and keep the reference 5885 * count consistent once the connection is established. 5886 */ 5887 5888 if (!params->explicit_connect) 5889 params->conn = hci_conn_get(conn); 5890 5891 return conn; 5892 } 5893 5894 switch (PTR_ERR(conn)) { 5895 case -EBUSY: 5896 /* If hci_connect() returns -EBUSY it means there is already 5897 * an LE connection attempt going on. Since controllers don't 5898 * support more than one connection attempt at the time, we 5899 * don't consider this an error case. 5900 */ 5901 break; 5902 default: 5903 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5904 return NULL; 5905 } 5906 5907 return NULL; 5908 } 5909 5910 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5911 u8 bdaddr_type, bdaddr_t *direct_addr, 5912 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 5913 bool ext_adv) 5914 { 5915 struct discovery_state *d = &hdev->discovery; 5916 struct smp_irk *irk; 5917 struct hci_conn *conn; 5918 bool match, bdaddr_resolved; 5919 u32 flags; 5920 u8 *ptr; 5921 5922 switch (type) { 5923 case LE_ADV_IND: 5924 case LE_ADV_DIRECT_IND: 5925 case LE_ADV_SCAN_IND: 5926 case LE_ADV_NONCONN_IND: 5927 case LE_ADV_SCAN_RSP: 5928 break; 5929 default: 5930 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 5931 "type: 0x%02x", type); 5932 return; 5933 } 5934 5935 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 5936 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 5937 return; 5938 } 5939 5940 /* Find the end of the data in case the report contains padded zero 5941 * bytes at the end causing an invalid length value. 5942 * 5943 * When data is NULL, len is 0 so there is no need for extra ptr 5944 * check as 'ptr < data + 0' is already false in such case. 5945 */ 5946 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 5947 if (ptr + 1 + *ptr > data + len) 5948 break; 5949 } 5950 5951 /* Adjust for actual length. This handles the case when remote 5952 * device is advertising with incorrect data length. 5953 */ 5954 len = ptr - data; 5955 5956 /* If the direct address is present, then this report is from 5957 * a LE Direct Advertising Report event. In that case it is 5958 * important to see if the address is matching the local 5959 * controller address. 5960 */ 5961 if (direct_addr) { 5962 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 5963 &bdaddr_resolved); 5964 5965 /* Only resolvable random addresses are valid for these 5966 * kind of reports and others can be ignored. 5967 */ 5968 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 5969 return; 5970 5971 /* If the controller is not using resolvable random 5972 * addresses, then this report can be ignored. 5973 */ 5974 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 5975 return; 5976 5977 /* If the local IRK of the controller does not match 5978 * with the resolvable random address provided, then 5979 * this report can be ignored. 5980 */ 5981 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 5982 return; 5983 } 5984 5985 /* Check if we need to convert to identity address */ 5986 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 5987 if (irk) { 5988 bdaddr = &irk->bdaddr; 5989 bdaddr_type = irk->addr_type; 5990 } 5991 5992 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 5993 5994 /* Check if we have been requested to connect to this device. 5995 * 5996 * direct_addr is set only for directed advertising reports (it is NULL 5997 * for advertising reports) and is already verified to be RPA above. 5998 */ 5999 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 6000 type); 6001 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 6002 /* Store report for later inclusion by 6003 * mgmt_device_connected 6004 */ 6005 memcpy(conn->le_adv_data, data, len); 6006 conn->le_adv_data_len = len; 6007 } 6008 6009 /* Passive scanning shouldn't trigger any device found events, 6010 * except for devices marked as CONN_REPORT for which we do send 6011 * device found events, or advertisement monitoring requested. 6012 */ 6013 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 6014 if (type == LE_ADV_DIRECT_IND) 6015 return; 6016 6017 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 6018 bdaddr, bdaddr_type) && 6019 idr_is_empty(&hdev->adv_monitors_idr)) 6020 return; 6021 6022 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 6023 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6024 else 6025 flags = 0; 6026 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6027 rssi, flags, data, len, NULL, 0); 6028 return; 6029 } 6030 6031 /* When receiving non-connectable or scannable undirected 6032 * advertising reports, this means that the remote device is 6033 * not connectable and then clearly indicate this in the 6034 * device found event. 6035 * 6036 * When receiving a scan response, then there is no way to 6037 * know if the remote device is connectable or not. However 6038 * since scan responses are merged with a previously seen 6039 * advertising report, the flags field from that report 6040 * will be used. 6041 * 6042 * In the really unlikely case that a controller get confused 6043 * and just sends a scan response event, then it is marked as 6044 * not connectable as well. 6045 */ 6046 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 6047 type == LE_ADV_SCAN_RSP) 6048 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 6049 else 6050 flags = 0; 6051 6052 /* If there's nothing pending either store the data from this 6053 * event or send an immediate device found event if the data 6054 * should not be stored for later. 6055 */ 6056 if (!ext_adv && !has_pending_adv_report(hdev)) { 6057 /* If the report will trigger a SCAN_REQ store it for 6058 * later merging. 6059 */ 6060 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 6061 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6062 rssi, flags, data, len); 6063 return; 6064 } 6065 6066 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6067 rssi, flags, data, len, NULL, 0); 6068 return; 6069 } 6070 6071 /* Check if the pending report is for the same device as the new one */ 6072 match = (!bacmp(bdaddr, &d->last_adv_addr) && 6073 bdaddr_type == d->last_adv_addr_type); 6074 6075 /* If the pending data doesn't match this report or this isn't a 6076 * scan response (e.g. we got a duplicate ADV_IND) then force 6077 * sending of the pending data. 6078 */ 6079 if (type != LE_ADV_SCAN_RSP || !match) { 6080 /* Send out whatever is in the cache, but skip duplicates */ 6081 if (!match) 6082 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6083 d->last_adv_addr_type, NULL, 6084 d->last_adv_rssi, d->last_adv_flags, 6085 d->last_adv_data, 6086 d->last_adv_data_len, NULL, 0); 6087 6088 /* If the new report will trigger a SCAN_REQ store it for 6089 * later merging. 6090 */ 6091 if (!ext_adv && (type == LE_ADV_IND || 6092 type == LE_ADV_SCAN_IND)) { 6093 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 6094 rssi, flags, data, len); 6095 return; 6096 } 6097 6098 /* The advertising reports cannot be merged, so clear 6099 * the pending report and send out a device found event. 6100 */ 6101 clear_pending_adv_report(hdev); 6102 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 6103 rssi, flags, data, len, NULL, 0); 6104 return; 6105 } 6106 6107 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 6108 * the new event is a SCAN_RSP. We can therefore proceed with 6109 * sending a merged device found event. 6110 */ 6111 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 6112 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 6113 d->last_adv_data, d->last_adv_data_len, data, len); 6114 clear_pending_adv_report(hdev); 6115 } 6116 6117 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, 6118 struct sk_buff *skb) 6119 { 6120 struct hci_ev_le_advertising_report *ev = data; 6121 6122 if (!ev->num) 6123 return; 6124 6125 hci_dev_lock(hdev); 6126 6127 while (ev->num--) { 6128 struct hci_ev_le_advertising_info *info; 6129 s8 rssi; 6130 6131 info = hci_le_ev_skb_pull(hdev, skb, 6132 HCI_EV_LE_ADVERTISING_REPORT, 6133 sizeof(*info)); 6134 if (!info) 6135 break; 6136 6137 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, 6138 info->length + 1)) 6139 break; 6140 6141 if (info->length <= HCI_MAX_AD_LENGTH) { 6142 rssi = info->data[info->length]; 6143 process_adv_report(hdev, info->type, &info->bdaddr, 6144 info->bdaddr_type, NULL, 0, rssi, 6145 info->data, info->length, false); 6146 } else { 6147 bt_dev_err(hdev, "Dropping invalid advertising data"); 6148 } 6149 } 6150 6151 hci_dev_unlock(hdev); 6152 } 6153 6154 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 6155 { 6156 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 6157 switch (evt_type) { 6158 case LE_LEGACY_ADV_IND: 6159 return LE_ADV_IND; 6160 case LE_LEGACY_ADV_DIRECT_IND: 6161 return LE_ADV_DIRECT_IND; 6162 case LE_LEGACY_ADV_SCAN_IND: 6163 return LE_ADV_SCAN_IND; 6164 case LE_LEGACY_NONCONN_IND: 6165 return LE_ADV_NONCONN_IND; 6166 case LE_LEGACY_SCAN_RSP_ADV: 6167 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 6168 return LE_ADV_SCAN_RSP; 6169 } 6170 6171 goto invalid; 6172 } 6173 6174 if (evt_type & LE_EXT_ADV_CONN_IND) { 6175 if (evt_type & LE_EXT_ADV_DIRECT_IND) 6176 return LE_ADV_DIRECT_IND; 6177 6178 return LE_ADV_IND; 6179 } 6180 6181 if (evt_type & LE_EXT_ADV_SCAN_RSP) 6182 return LE_ADV_SCAN_RSP; 6183 6184 if (evt_type & LE_EXT_ADV_SCAN_IND) 6185 return LE_ADV_SCAN_IND; 6186 6187 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 6188 evt_type & LE_EXT_ADV_DIRECT_IND) 6189 return LE_ADV_NONCONN_IND; 6190 6191 invalid: 6192 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 6193 evt_type); 6194 6195 return LE_ADV_INVALID; 6196 } 6197 6198 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, 6199 struct sk_buff *skb) 6200 { 6201 struct hci_ev_le_ext_adv_report *ev = data; 6202 6203 if (!ev->num) 6204 return; 6205 6206 hci_dev_lock(hdev); 6207 6208 while (ev->num--) { 6209 struct hci_ev_le_ext_adv_info *info; 6210 u8 legacy_evt_type; 6211 u16 evt_type; 6212 6213 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6214 sizeof(*info)); 6215 if (!info) 6216 break; 6217 6218 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, 6219 info->length)) 6220 break; 6221 6222 evt_type = __le16_to_cpu(info->type); 6223 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6224 if (legacy_evt_type != LE_ADV_INVALID) { 6225 process_adv_report(hdev, legacy_evt_type, &info->bdaddr, 6226 info->bdaddr_type, NULL, 0, 6227 info->rssi, info->data, info->length, 6228 !(evt_type & LE_EXT_ADV_LEGACY_PDU)); 6229 } 6230 } 6231 6232 hci_dev_unlock(hdev); 6233 } 6234 6235 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, 6236 struct sk_buff *skb) 6237 { 6238 struct hci_ev_le_remote_feat_complete *ev = data; 6239 struct hci_conn *conn; 6240 6241 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6242 6243 hci_dev_lock(hdev); 6244 6245 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6246 if (conn) { 6247 if (!ev->status) 6248 memcpy(conn->features[0], ev->features, 8); 6249 6250 if (conn->state == BT_CONFIG) { 6251 __u8 status; 6252 6253 /* If the local controller supports peripheral-initiated 6254 * features exchange, but the remote controller does 6255 * not, then it is possible that the error code 0x1a 6256 * for unsupported remote feature gets returned. 6257 * 6258 * In this specific case, allow the connection to 6259 * transition into connected state and mark it as 6260 * successful. 6261 */ 6262 if (!conn->out && ev->status == 0x1a && 6263 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6264 status = 0x00; 6265 else 6266 status = ev->status; 6267 6268 conn->state = BT_CONNECTED; 6269 hci_connect_cfm(conn, status); 6270 hci_conn_drop(conn); 6271 } 6272 } 6273 6274 hci_dev_unlock(hdev); 6275 } 6276 6277 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, 6278 struct sk_buff *skb) 6279 { 6280 struct hci_ev_le_ltk_req *ev = data; 6281 struct hci_cp_le_ltk_reply cp; 6282 struct hci_cp_le_ltk_neg_reply neg; 6283 struct hci_conn *conn; 6284 struct smp_ltk *ltk; 6285 6286 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6287 6288 hci_dev_lock(hdev); 6289 6290 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6291 if (conn == NULL) 6292 goto not_found; 6293 6294 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6295 if (!ltk) 6296 goto not_found; 6297 6298 if (smp_ltk_is_sc(ltk)) { 6299 /* With SC both EDiv and Rand are set to zero */ 6300 if (ev->ediv || ev->rand) 6301 goto not_found; 6302 } else { 6303 /* For non-SC keys check that EDiv and Rand match */ 6304 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6305 goto not_found; 6306 } 6307 6308 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6309 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6310 cp.handle = cpu_to_le16(conn->handle); 6311 6312 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6313 6314 conn->enc_key_size = ltk->enc_size; 6315 6316 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6317 6318 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6319 * temporary key used to encrypt a connection following 6320 * pairing. It is used during the Encrypted Session Setup to 6321 * distribute the keys. Later, security can be re-established 6322 * using a distributed LTK. 6323 */ 6324 if (ltk->type == SMP_STK) { 6325 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6326 list_del_rcu(<k->list); 6327 kfree_rcu(ltk, rcu); 6328 } else { 6329 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6330 } 6331 6332 hci_dev_unlock(hdev); 6333 6334 return; 6335 6336 not_found: 6337 neg.handle = ev->handle; 6338 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6339 hci_dev_unlock(hdev); 6340 } 6341 6342 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6343 u8 reason) 6344 { 6345 struct hci_cp_le_conn_param_req_neg_reply cp; 6346 6347 cp.handle = cpu_to_le16(handle); 6348 cp.reason = reason; 6349 6350 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6351 &cp); 6352 } 6353 6354 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, 6355 struct sk_buff *skb) 6356 { 6357 struct hci_ev_le_remote_conn_param_req *ev = data; 6358 struct hci_cp_le_conn_param_req_reply cp; 6359 struct hci_conn *hcon; 6360 u16 handle, min, max, latency, timeout; 6361 6362 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); 6363 6364 handle = le16_to_cpu(ev->handle); 6365 min = le16_to_cpu(ev->interval_min); 6366 max = le16_to_cpu(ev->interval_max); 6367 latency = le16_to_cpu(ev->latency); 6368 timeout = le16_to_cpu(ev->timeout); 6369 6370 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6371 if (!hcon || hcon->state != BT_CONNECTED) 6372 return send_conn_param_neg_reply(hdev, handle, 6373 HCI_ERROR_UNKNOWN_CONN_ID); 6374 6375 if (hci_check_conn_params(min, max, latency, timeout)) 6376 return send_conn_param_neg_reply(hdev, handle, 6377 HCI_ERROR_INVALID_LL_PARAMS); 6378 6379 if (hcon->role == HCI_ROLE_MASTER) { 6380 struct hci_conn_params *params; 6381 u8 store_hint; 6382 6383 hci_dev_lock(hdev); 6384 6385 params = hci_conn_params_lookup(hdev, &hcon->dst, 6386 hcon->dst_type); 6387 if (params) { 6388 params->conn_min_interval = min; 6389 params->conn_max_interval = max; 6390 params->conn_latency = latency; 6391 params->supervision_timeout = timeout; 6392 store_hint = 0x01; 6393 } else { 6394 store_hint = 0x00; 6395 } 6396 6397 hci_dev_unlock(hdev); 6398 6399 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6400 store_hint, min, max, latency, timeout); 6401 } 6402 6403 cp.handle = ev->handle; 6404 cp.interval_min = ev->interval_min; 6405 cp.interval_max = ev->interval_max; 6406 cp.latency = ev->latency; 6407 cp.timeout = ev->timeout; 6408 cp.min_ce_len = 0; 6409 cp.max_ce_len = 0; 6410 6411 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6412 } 6413 6414 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, 6415 struct sk_buff *skb) 6416 { 6417 struct hci_ev_le_direct_adv_report *ev = data; 6418 int i; 6419 6420 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, 6421 flex_array_size(ev, info, ev->num))) 6422 return; 6423 6424 if (!ev->num) 6425 return; 6426 6427 hci_dev_lock(hdev); 6428 6429 for (i = 0; i < ev->num; i++) { 6430 struct hci_ev_le_direct_adv_info *info = &ev->info[i]; 6431 6432 process_adv_report(hdev, info->type, &info->bdaddr, 6433 info->bdaddr_type, &info->direct_addr, 6434 info->direct_addr_type, info->rssi, NULL, 0, 6435 false); 6436 } 6437 6438 hci_dev_unlock(hdev); 6439 } 6440 6441 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, 6442 struct sk_buff *skb) 6443 { 6444 struct hci_ev_le_phy_update_complete *ev = data; 6445 struct hci_conn *conn; 6446 6447 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); 6448 6449 if (ev->status) 6450 return; 6451 6452 hci_dev_lock(hdev); 6453 6454 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6455 if (!conn) 6456 goto unlock; 6457 6458 conn->le_tx_phy = ev->tx_phy; 6459 conn->le_rx_phy = ev->rx_phy; 6460 6461 unlock: 6462 hci_dev_unlock(hdev); 6463 } 6464 6465 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ 6466 [_op] = { \ 6467 .func = _func, \ 6468 .min_len = _min_len, \ 6469 .max_len = _max_len, \ 6470 } 6471 6472 #define HCI_LE_EV(_op, _func, _len) \ 6473 HCI_LE_EV_VL(_op, _func, _len, _len) 6474 6475 #define HCI_LE_EV_STATUS(_op, _func) \ 6476 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) 6477 6478 /* Entries in this table shall have their position according to the subevent 6479 * opcode they handle so the use of the macros above is recommend since it does 6480 * attempt to initialize at its proper index using Designated Initializers that 6481 * way events without a callback function can be ommited. 6482 */ 6483 static const struct hci_le_ev { 6484 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); 6485 u16 min_len; 6486 u16 max_len; 6487 } hci_le_ev_table[U8_MAX + 1] = { 6488 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ 6489 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, 6490 sizeof(struct hci_ev_le_conn_complete)), 6491 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ 6492 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, 6493 sizeof(struct hci_ev_le_advertising_report), 6494 HCI_MAX_EVENT_SIZE), 6495 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ 6496 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, 6497 hci_le_conn_update_complete_evt, 6498 sizeof(struct hci_ev_le_conn_update_complete)), 6499 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ 6500 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, 6501 hci_le_remote_feat_complete_evt, 6502 sizeof(struct hci_ev_le_remote_feat_complete)), 6503 /* [0x05 = HCI_EV_LE_LTK_REQ] */ 6504 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, 6505 sizeof(struct hci_ev_le_ltk_req)), 6506 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ 6507 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, 6508 hci_le_remote_conn_param_req_evt, 6509 sizeof(struct hci_ev_le_remote_conn_param_req)), 6510 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ 6511 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, 6512 hci_le_enh_conn_complete_evt, 6513 sizeof(struct hci_ev_le_enh_conn_complete)), 6514 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ 6515 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, 6516 sizeof(struct hci_ev_le_direct_adv_report), 6517 HCI_MAX_EVENT_SIZE), 6518 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ 6519 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, 6520 sizeof(struct hci_ev_le_phy_update_complete)), 6521 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ 6522 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, 6523 sizeof(struct hci_ev_le_ext_adv_report), 6524 HCI_MAX_EVENT_SIZE), 6525 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ 6526 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, 6527 sizeof(struct hci_evt_le_ext_adv_set_term)), 6528 }; 6529 6530 static void hci_le_meta_evt(struct hci_dev *hdev, void *data, 6531 struct sk_buff *skb, u16 *opcode, u8 *status, 6532 hci_req_complete_t *req_complete, 6533 hci_req_complete_skb_t *req_complete_skb) 6534 { 6535 struct hci_ev_le_meta *ev = data; 6536 const struct hci_le_ev *subev; 6537 6538 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); 6539 6540 /* Only match event if command OGF is for LE */ 6541 if (hdev->sent_cmd && 6542 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && 6543 hci_skb_event(hdev->sent_cmd) == ev->subevent) { 6544 *opcode = hci_skb_opcode(hdev->sent_cmd); 6545 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, 6546 req_complete_skb); 6547 } 6548 6549 subev = &hci_le_ev_table[ev->subevent]; 6550 if (!subev->func) 6551 return; 6552 6553 if (skb->len < subev->min_len) { 6554 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", 6555 ev->subevent, skb->len, subev->min_len); 6556 return; 6557 } 6558 6559 /* Just warn if the length is over max_len size it still be 6560 * possible to partially parse the event so leave to callback to 6561 * decide if that is acceptable. 6562 */ 6563 if (skb->len > subev->max_len) 6564 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", 6565 ev->subevent, skb->len, subev->max_len); 6566 6567 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); 6568 if (!data) 6569 return; 6570 6571 subev->func(hdev, data, skb); 6572 } 6573 6574 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 6575 u8 event, struct sk_buff *skb) 6576 { 6577 struct hci_ev_cmd_complete *ev; 6578 struct hci_event_hdr *hdr; 6579 6580 if (!skb) 6581 return false; 6582 6583 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); 6584 if (!hdr) 6585 return false; 6586 6587 if (event) { 6588 if (hdr->evt != event) 6589 return false; 6590 return true; 6591 } 6592 6593 /* Check if request ended in Command Status - no way to retrieve 6594 * any extra parameters in this case. 6595 */ 6596 if (hdr->evt == HCI_EV_CMD_STATUS) 6597 return false; 6598 6599 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 6600 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 6601 hdr->evt); 6602 return false; 6603 } 6604 6605 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); 6606 if (!ev) 6607 return false; 6608 6609 if (opcode != __le16_to_cpu(ev->opcode)) { 6610 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 6611 __le16_to_cpu(ev->opcode)); 6612 return false; 6613 } 6614 6615 return true; 6616 } 6617 6618 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 6619 struct sk_buff *skb) 6620 { 6621 struct hci_ev_le_advertising_info *adv; 6622 struct hci_ev_le_direct_adv_info *direct_adv; 6623 struct hci_ev_le_ext_adv_info *ext_adv; 6624 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 6625 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 6626 6627 hci_dev_lock(hdev); 6628 6629 /* If we are currently suspended and this is the first BT event seen, 6630 * save the wake reason associated with the event. 6631 */ 6632 if (!hdev->suspended || hdev->wake_reason) 6633 goto unlock; 6634 6635 /* Default to remote wake. Values for wake_reason are documented in the 6636 * Bluez mgmt api docs. 6637 */ 6638 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 6639 6640 /* Once configured for remote wakeup, we should only wake up for 6641 * reconnections. It's useful to see which device is waking us up so 6642 * keep track of the bdaddr of the connection event that woke us up. 6643 */ 6644 if (event == HCI_EV_CONN_REQUEST) { 6645 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 6646 hdev->wake_addr_type = BDADDR_BREDR; 6647 } else if (event == HCI_EV_CONN_COMPLETE) { 6648 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 6649 hdev->wake_addr_type = BDADDR_BREDR; 6650 } else if (event == HCI_EV_LE_META) { 6651 struct hci_ev_le_meta *le_ev = (void *)skb->data; 6652 u8 subevent = le_ev->subevent; 6653 u8 *ptr = &skb->data[sizeof(*le_ev)]; 6654 u8 num_reports = *ptr; 6655 6656 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 6657 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 6658 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 6659 num_reports) { 6660 adv = (void *)(ptr + 1); 6661 direct_adv = (void *)(ptr + 1); 6662 ext_adv = (void *)(ptr + 1); 6663 6664 switch (subevent) { 6665 case HCI_EV_LE_ADVERTISING_REPORT: 6666 bacpy(&hdev->wake_addr, &adv->bdaddr); 6667 hdev->wake_addr_type = adv->bdaddr_type; 6668 break; 6669 case HCI_EV_LE_DIRECT_ADV_REPORT: 6670 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 6671 hdev->wake_addr_type = direct_adv->bdaddr_type; 6672 break; 6673 case HCI_EV_LE_EXT_ADV_REPORT: 6674 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 6675 hdev->wake_addr_type = ext_adv->bdaddr_type; 6676 break; 6677 } 6678 } 6679 } else { 6680 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 6681 } 6682 6683 unlock: 6684 hci_dev_unlock(hdev); 6685 } 6686 6687 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \ 6688 [_op] = { \ 6689 .req = false, \ 6690 .func = _func, \ 6691 .min_len = _min_len, \ 6692 .max_len = _max_len, \ 6693 } 6694 6695 #define HCI_EV(_op, _func, _len) \ 6696 HCI_EV_VL(_op, _func, _len, _len) 6697 6698 #define HCI_EV_STATUS(_op, _func) \ 6699 HCI_EV(_op, _func, sizeof(struct hci_ev_status)) 6700 6701 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ 6702 [_op] = { \ 6703 .req = true, \ 6704 .func_req = _func, \ 6705 .min_len = _min_len, \ 6706 .max_len = _max_len, \ 6707 } 6708 6709 #define HCI_EV_REQ(_op, _func, _len) \ 6710 HCI_EV_REQ_VL(_op, _func, _len, _len) 6711 6712 /* Entries in this table shall have their position according to the event opcode 6713 * they handle so the use of the macros above is recommend since it does attempt 6714 * to initialize at its proper index using Designated Initializers that way 6715 * events without a callback function don't have entered. 6716 */ 6717 static const struct hci_ev { 6718 bool req; 6719 union { 6720 void (*func)(struct hci_dev *hdev, void *data, 6721 struct sk_buff *skb); 6722 void (*func_req)(struct hci_dev *hdev, void *data, 6723 struct sk_buff *skb, u16 *opcode, u8 *status, 6724 hci_req_complete_t *req_complete, 6725 hci_req_complete_skb_t *req_complete_skb); 6726 }; 6727 u16 min_len; 6728 u16 max_len; 6729 } hci_ev_table[U8_MAX + 1] = { 6730 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ 6731 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), 6732 /* [0x02 = HCI_EV_INQUIRY_RESULT] */ 6733 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, 6734 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), 6735 /* [0x03 = HCI_EV_CONN_COMPLETE] */ 6736 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, 6737 sizeof(struct hci_ev_conn_complete)), 6738 /* [0x04 = HCI_EV_CONN_REQUEST] */ 6739 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, 6740 sizeof(struct hci_ev_conn_request)), 6741 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ 6742 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, 6743 sizeof(struct hci_ev_disconn_complete)), 6744 /* [0x06 = HCI_EV_AUTH_COMPLETE] */ 6745 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, 6746 sizeof(struct hci_ev_auth_complete)), 6747 /* [0x07 = HCI_EV_REMOTE_NAME] */ 6748 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, 6749 sizeof(struct hci_ev_remote_name)), 6750 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ 6751 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, 6752 sizeof(struct hci_ev_encrypt_change)), 6753 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ 6754 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, 6755 hci_change_link_key_complete_evt, 6756 sizeof(struct hci_ev_change_link_key_complete)), 6757 /* [0x0b = HCI_EV_REMOTE_FEATURES] */ 6758 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, 6759 sizeof(struct hci_ev_remote_features)), 6760 /* [0x0e = HCI_EV_CMD_COMPLETE] */ 6761 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, 6762 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), 6763 /* [0x0f = HCI_EV_CMD_STATUS] */ 6764 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, 6765 sizeof(struct hci_ev_cmd_status)), 6766 /* [0x10 = HCI_EV_CMD_STATUS] */ 6767 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, 6768 sizeof(struct hci_ev_hardware_error)), 6769 /* [0x12 = HCI_EV_ROLE_CHANGE] */ 6770 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, 6771 sizeof(struct hci_ev_role_change)), 6772 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ 6773 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, 6774 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), 6775 /* [0x14 = HCI_EV_MODE_CHANGE] */ 6776 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, 6777 sizeof(struct hci_ev_mode_change)), 6778 /* [0x16 = HCI_EV_PIN_CODE_REQ] */ 6779 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, 6780 sizeof(struct hci_ev_pin_code_req)), 6781 /* [0x17 = HCI_EV_LINK_KEY_REQ] */ 6782 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, 6783 sizeof(struct hci_ev_link_key_req)), 6784 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ 6785 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, 6786 sizeof(struct hci_ev_link_key_notify)), 6787 /* [0x1c = HCI_EV_CLOCK_OFFSET] */ 6788 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, 6789 sizeof(struct hci_ev_clock_offset)), 6790 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ 6791 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, 6792 sizeof(struct hci_ev_pkt_type_change)), 6793 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ 6794 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, 6795 sizeof(struct hci_ev_pscan_rep_mode)), 6796 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ 6797 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, 6798 hci_inquiry_result_with_rssi_evt, 6799 sizeof(struct hci_ev_inquiry_result_rssi), 6800 HCI_MAX_EVENT_SIZE), 6801 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ 6802 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, 6803 sizeof(struct hci_ev_remote_ext_features)), 6804 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ 6805 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, 6806 sizeof(struct hci_ev_sync_conn_complete)), 6807 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ 6808 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, 6809 hci_extended_inquiry_result_evt, 6810 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), 6811 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ 6812 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, 6813 sizeof(struct hci_ev_key_refresh_complete)), 6814 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ 6815 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, 6816 sizeof(struct hci_ev_io_capa_request)), 6817 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ 6818 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, 6819 sizeof(struct hci_ev_io_capa_reply)), 6820 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ 6821 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, 6822 sizeof(struct hci_ev_user_confirm_req)), 6823 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ 6824 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, 6825 sizeof(struct hci_ev_user_passkey_req)), 6826 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ 6827 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, 6828 sizeof(struct hci_ev_remote_oob_data_request)), 6829 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ 6830 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, 6831 sizeof(struct hci_ev_simple_pair_complete)), 6832 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ 6833 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, 6834 sizeof(struct hci_ev_user_passkey_notify)), 6835 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ 6836 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, 6837 sizeof(struct hci_ev_keypress_notify)), 6838 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ 6839 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, 6840 sizeof(struct hci_ev_remote_host_features)), 6841 /* [0x3e = HCI_EV_LE_META] */ 6842 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, 6843 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), 6844 #if IS_ENABLED(CONFIG_BT_HS) 6845 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ 6846 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, 6847 sizeof(struct hci_ev_phy_link_complete)), 6848 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ 6849 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, 6850 sizeof(struct hci_ev_channel_selected)), 6851 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ 6852 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, 6853 hci_disconn_loglink_complete_evt, 6854 sizeof(struct hci_ev_disconn_logical_link_complete)), 6855 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ 6856 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, 6857 sizeof(struct hci_ev_logical_link_complete)), 6858 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ 6859 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, 6860 hci_disconn_phylink_complete_evt, 6861 sizeof(struct hci_ev_disconn_phy_link_complete)), 6862 #endif 6863 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ 6864 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, 6865 sizeof(struct hci_ev_num_comp_blocks)), 6866 /* [0xff = HCI_EV_VENDOR] */ 6867 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), 6868 }; 6869 6870 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, 6871 u16 *opcode, u8 *status, 6872 hci_req_complete_t *req_complete, 6873 hci_req_complete_skb_t *req_complete_skb) 6874 { 6875 const struct hci_ev *ev = &hci_ev_table[event]; 6876 void *data; 6877 6878 if (!ev->func) 6879 return; 6880 6881 if (skb->len < ev->min_len) { 6882 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", 6883 event, skb->len, ev->min_len); 6884 return; 6885 } 6886 6887 /* Just warn if the length is over max_len size it still be 6888 * possible to partially parse the event so leave to callback to 6889 * decide if that is acceptable. 6890 */ 6891 if (skb->len > ev->max_len) 6892 bt_dev_warn_ratelimited(hdev, 6893 "unexpected event 0x%2.2x length: %u > %u", 6894 event, skb->len, ev->max_len); 6895 6896 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); 6897 if (!data) 6898 return; 6899 6900 if (ev->req) 6901 ev->func_req(hdev, data, skb, opcode, status, req_complete, 6902 req_complete_skb); 6903 else 6904 ev->func(hdev, data, skb); 6905 } 6906 6907 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 6908 { 6909 struct hci_event_hdr *hdr = (void *) skb->data; 6910 hci_req_complete_t req_complete = NULL; 6911 hci_req_complete_skb_t req_complete_skb = NULL; 6912 struct sk_buff *orig_skb = NULL; 6913 u8 status = 0, event, req_evt = 0; 6914 u16 opcode = HCI_OP_NOP; 6915 6916 if (skb->len < sizeof(*hdr)) { 6917 bt_dev_err(hdev, "Malformed HCI Event"); 6918 goto done; 6919 } 6920 6921 event = hdr->evt; 6922 if (!event) { 6923 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", 6924 event); 6925 goto done; 6926 } 6927 6928 /* Only match event if command OGF is not for LE */ 6929 if (hdev->sent_cmd && 6930 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && 6931 hci_skb_event(hdev->sent_cmd) == event) { 6932 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), 6933 status, &req_complete, &req_complete_skb); 6934 req_evt = event; 6935 } 6936 6937 /* If it looks like we might end up having to call 6938 * req_complete_skb, store a pristine copy of the skb since the 6939 * various handlers may modify the original one through 6940 * skb_pull() calls, etc. 6941 */ 6942 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 6943 event == HCI_EV_CMD_COMPLETE) 6944 orig_skb = skb_clone(skb, GFP_KERNEL); 6945 6946 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6947 6948 /* Store wake reason if we're suspended */ 6949 hci_store_wake_reason(hdev, event, skb); 6950 6951 bt_dev_dbg(hdev, "event 0x%2.2x", event); 6952 6953 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, 6954 &req_complete_skb); 6955 6956 if (req_complete) { 6957 req_complete(hdev, status, opcode); 6958 } else if (req_complete_skb) { 6959 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 6960 kfree_skb(orig_skb); 6961 orig_skb = NULL; 6962 } 6963 req_complete_skb(hdev, status, opcode, orig_skb); 6964 } 6965 6966 done: 6967 kfree_skb(orig_skb); 6968 kfree_skb(skb); 6969 hdev->stat.evt_rx++; 6970 } 6971