1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 *new_status) 50 { 51 __u8 status = *((__u8 *) skb->data); 52 53 BT_DBG("%s status 0x%2.2x", hdev->name, status); 54 55 /* It is possible that we receive Inquiry Complete event right 56 * before we receive Inquiry Cancel Command Complete event, in 57 * which case the latter event should have status of Command 58 * Disallowed (0x0c). This should not be treated as error, since 59 * we actually achieve what Inquiry Cancel wants to achieve, 60 * which is to end the last Inquiry session. 61 */ 62 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 63 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 64 status = 0x00; 65 } 66 67 *new_status = status; 68 69 if (status) 70 return; 71 72 clear_bit(HCI_INQUIRY, &hdev->flags); 73 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 74 wake_up_bit(&hdev->flags, HCI_INQUIRY); 75 76 hci_dev_lock(hdev); 77 /* Set discovery state to stopped if we're not doing LE active 78 * scanning. 79 */ 80 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 81 hdev->le_scan_type != LE_SCAN_ACTIVE) 82 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 83 hci_dev_unlock(hdev); 84 85 hci_conn_check_pending(hdev); 86 } 87 88 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 89 { 90 __u8 status = *((__u8 *) skb->data); 91 92 BT_DBG("%s status 0x%2.2x", hdev->name, status); 93 94 if (status) 95 return; 96 97 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 98 } 99 100 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 101 { 102 __u8 status = *((__u8 *) skb->data); 103 104 BT_DBG("%s status 0x%2.2x", hdev->name, status); 105 106 if (status) 107 return; 108 109 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 110 111 hci_conn_check_pending(hdev); 112 } 113 114 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 115 struct sk_buff *skb) 116 { 117 BT_DBG("%s", hdev->name); 118 } 119 120 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 121 { 122 struct hci_rp_role_discovery *rp = (void *) skb->data; 123 struct hci_conn *conn; 124 125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 126 127 if (rp->status) 128 return; 129 130 hci_dev_lock(hdev); 131 132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 133 if (conn) 134 conn->role = rp->role; 135 136 hci_dev_unlock(hdev); 137 } 138 139 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 140 { 141 struct hci_rp_read_link_policy *rp = (void *) skb->data; 142 struct hci_conn *conn; 143 144 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 145 146 if (rp->status) 147 return; 148 149 hci_dev_lock(hdev); 150 151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 152 if (conn) 153 conn->link_policy = __le16_to_cpu(rp->policy); 154 155 hci_dev_unlock(hdev); 156 } 157 158 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 159 { 160 struct hci_rp_write_link_policy *rp = (void *) skb->data; 161 struct hci_conn *conn; 162 void *sent; 163 164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 165 166 if (rp->status) 167 return; 168 169 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 170 if (!sent) 171 return; 172 173 hci_dev_lock(hdev); 174 175 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 176 if (conn) 177 conn->link_policy = get_unaligned_le16(sent + 2); 178 179 hci_dev_unlock(hdev); 180 } 181 182 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 183 struct sk_buff *skb) 184 { 185 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 186 187 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 188 189 if (rp->status) 190 return; 191 192 hdev->link_policy = __le16_to_cpu(rp->policy); 193 } 194 195 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 196 struct sk_buff *skb) 197 { 198 __u8 status = *((__u8 *) skb->data); 199 void *sent; 200 201 BT_DBG("%s status 0x%2.2x", hdev->name, status); 202 203 if (status) 204 return; 205 206 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 207 if (!sent) 208 return; 209 210 hdev->link_policy = get_unaligned_le16(sent); 211 } 212 213 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 214 { 215 __u8 status = *((__u8 *) skb->data); 216 217 BT_DBG("%s status 0x%2.2x", hdev->name, status); 218 219 clear_bit(HCI_RESET, &hdev->flags); 220 221 if (status) 222 return; 223 224 /* Reset all non-persistent flags */ 225 hci_dev_clear_volatile_flags(hdev); 226 227 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 228 229 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 230 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 231 232 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 233 hdev->adv_data_len = 0; 234 235 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 236 hdev->scan_rsp_data_len = 0; 237 238 hdev->le_scan_type = LE_SCAN_PASSIVE; 239 240 hdev->ssp_debug_mode = 0; 241 242 hci_bdaddr_list_clear(&hdev->le_accept_list); 243 hci_bdaddr_list_clear(&hdev->le_resolv_list); 244 } 245 246 static void hci_cc_read_stored_link_key(struct hci_dev *hdev, 247 struct sk_buff *skb) 248 { 249 struct hci_rp_read_stored_link_key *rp = (void *)skb->data; 250 struct hci_cp_read_stored_link_key *sent; 251 252 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 253 254 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 255 if (!sent) 256 return; 257 258 if (!rp->status && sent->read_all == 0x01) { 259 hdev->stored_max_keys = rp->max_keys; 260 hdev->stored_num_keys = rp->num_keys; 261 } 262 } 263 264 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 265 struct sk_buff *skb) 266 { 267 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data; 268 269 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 270 271 if (rp->status) 272 return; 273 274 if (rp->num_keys <= hdev->stored_num_keys) 275 hdev->stored_num_keys -= rp->num_keys; 276 else 277 hdev->stored_num_keys = 0; 278 } 279 280 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 281 { 282 __u8 status = *((__u8 *) skb->data); 283 void *sent; 284 285 BT_DBG("%s status 0x%2.2x", hdev->name, status); 286 287 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 288 if (!sent) 289 return; 290 291 hci_dev_lock(hdev); 292 293 if (hci_dev_test_flag(hdev, HCI_MGMT)) 294 mgmt_set_local_name_complete(hdev, sent, status); 295 else if (!status) 296 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 297 298 hci_dev_unlock(hdev); 299 } 300 301 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 302 { 303 struct hci_rp_read_local_name *rp = (void *) skb->data; 304 305 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 306 307 if (rp->status) 308 return; 309 310 if (hci_dev_test_flag(hdev, HCI_SETUP) || 311 hci_dev_test_flag(hdev, HCI_CONFIG)) 312 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 313 } 314 315 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 316 { 317 __u8 status = *((__u8 *) skb->data); 318 void *sent; 319 320 BT_DBG("%s status 0x%2.2x", hdev->name, status); 321 322 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 323 if (!sent) 324 return; 325 326 hci_dev_lock(hdev); 327 328 if (!status) { 329 __u8 param = *((__u8 *) sent); 330 331 if (param == AUTH_ENABLED) 332 set_bit(HCI_AUTH, &hdev->flags); 333 else 334 clear_bit(HCI_AUTH, &hdev->flags); 335 } 336 337 if (hci_dev_test_flag(hdev, HCI_MGMT)) 338 mgmt_auth_enable_complete(hdev, status); 339 340 hci_dev_unlock(hdev); 341 } 342 343 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 344 { 345 __u8 status = *((__u8 *) skb->data); 346 __u8 param; 347 void *sent; 348 349 BT_DBG("%s status 0x%2.2x", hdev->name, status); 350 351 if (status) 352 return; 353 354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 355 if (!sent) 356 return; 357 358 param = *((__u8 *) sent); 359 360 if (param) 361 set_bit(HCI_ENCRYPT, &hdev->flags); 362 else 363 clear_bit(HCI_ENCRYPT, &hdev->flags); 364 } 365 366 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 367 { 368 __u8 status = *((__u8 *) skb->data); 369 __u8 param; 370 void *sent; 371 372 BT_DBG("%s status 0x%2.2x", hdev->name, status); 373 374 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 375 if (!sent) 376 return; 377 378 param = *((__u8 *) sent); 379 380 hci_dev_lock(hdev); 381 382 if (status) { 383 hdev->discov_timeout = 0; 384 goto done; 385 } 386 387 if (param & SCAN_INQUIRY) 388 set_bit(HCI_ISCAN, &hdev->flags); 389 else 390 clear_bit(HCI_ISCAN, &hdev->flags); 391 392 if (param & SCAN_PAGE) 393 set_bit(HCI_PSCAN, &hdev->flags); 394 else 395 clear_bit(HCI_PSCAN, &hdev->flags); 396 397 done: 398 hci_dev_unlock(hdev); 399 } 400 401 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb) 402 { 403 __u8 status = *((__u8 *)skb->data); 404 struct hci_cp_set_event_filter *cp; 405 void *sent; 406 407 BT_DBG("%s status 0x%2.2x", hdev->name, status); 408 409 if (status) 410 return; 411 412 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 413 if (!sent) 414 return; 415 416 cp = (struct hci_cp_set_event_filter *)sent; 417 418 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 419 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 420 else 421 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 422 } 423 424 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 425 { 426 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 427 428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 429 430 if (rp->status) 431 return; 432 433 memcpy(hdev->dev_class, rp->dev_class, 3); 434 435 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 436 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 437 } 438 439 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 440 { 441 __u8 status = *((__u8 *) skb->data); 442 void *sent; 443 444 BT_DBG("%s status 0x%2.2x", hdev->name, status); 445 446 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 447 if (!sent) 448 return; 449 450 hci_dev_lock(hdev); 451 452 if (status == 0) 453 memcpy(hdev->dev_class, sent, 3); 454 455 if (hci_dev_test_flag(hdev, HCI_MGMT)) 456 mgmt_set_class_of_dev_complete(hdev, sent, status); 457 458 hci_dev_unlock(hdev); 459 } 460 461 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 462 { 463 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 464 __u16 setting; 465 466 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 467 468 if (rp->status) 469 return; 470 471 setting = __le16_to_cpu(rp->voice_setting); 472 473 if (hdev->voice_setting == setting) 474 return; 475 476 hdev->voice_setting = setting; 477 478 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 479 480 if (hdev->notify) 481 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 482 } 483 484 static void hci_cc_write_voice_setting(struct hci_dev *hdev, 485 struct sk_buff *skb) 486 { 487 __u8 status = *((__u8 *) skb->data); 488 __u16 setting; 489 void *sent; 490 491 BT_DBG("%s status 0x%2.2x", hdev->name, status); 492 493 if (status) 494 return; 495 496 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 497 if (!sent) 498 return; 499 500 setting = get_unaligned_le16(sent); 501 502 if (hdev->voice_setting == setting) 503 return; 504 505 hdev->voice_setting = setting; 506 507 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 508 509 if (hdev->notify) 510 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 511 } 512 513 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, 514 struct sk_buff *skb) 515 { 516 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; 517 518 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 519 520 if (rp->status) 521 return; 522 523 hdev->num_iac = rp->num_iac; 524 525 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); 526 } 527 528 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 529 { 530 __u8 status = *((__u8 *) skb->data); 531 struct hci_cp_write_ssp_mode *sent; 532 533 BT_DBG("%s status 0x%2.2x", hdev->name, status); 534 535 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 536 if (!sent) 537 return; 538 539 hci_dev_lock(hdev); 540 541 if (!status) { 542 if (sent->mode) 543 hdev->features[1][0] |= LMP_HOST_SSP; 544 else 545 hdev->features[1][0] &= ~LMP_HOST_SSP; 546 } 547 548 if (!status) { 549 if (sent->mode) 550 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 551 else 552 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 553 } 554 555 hci_dev_unlock(hdev); 556 } 557 558 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) 559 { 560 u8 status = *((u8 *) skb->data); 561 struct hci_cp_write_sc_support *sent; 562 563 BT_DBG("%s status 0x%2.2x", hdev->name, status); 564 565 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 566 if (!sent) 567 return; 568 569 hci_dev_lock(hdev); 570 571 if (!status) { 572 if (sent->support) 573 hdev->features[1][0] |= LMP_HOST_SC; 574 else 575 hdev->features[1][0] &= ~LMP_HOST_SC; 576 } 577 578 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) { 579 if (sent->support) 580 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 581 else 582 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 583 } 584 585 hci_dev_unlock(hdev); 586 } 587 588 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 589 { 590 struct hci_rp_read_local_version *rp = (void *) skb->data; 591 592 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 593 594 if (rp->status) 595 return; 596 597 if (hci_dev_test_flag(hdev, HCI_SETUP) || 598 hci_dev_test_flag(hdev, HCI_CONFIG)) { 599 hdev->hci_ver = rp->hci_ver; 600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 601 hdev->lmp_ver = rp->lmp_ver; 602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 604 } 605 } 606 607 static void hci_cc_read_local_commands(struct hci_dev *hdev, 608 struct sk_buff *skb) 609 { 610 struct hci_rp_read_local_commands *rp = (void *) skb->data; 611 612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 613 614 if (rp->status) 615 return; 616 617 if (hci_dev_test_flag(hdev, HCI_SETUP) || 618 hci_dev_test_flag(hdev, HCI_CONFIG)) 619 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 620 } 621 622 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, 623 struct sk_buff *skb) 624 { 625 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data; 626 struct hci_conn *conn; 627 628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 629 630 if (rp->status) 631 return; 632 633 hci_dev_lock(hdev); 634 635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 636 if (conn) 637 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 638 639 hci_dev_unlock(hdev); 640 } 641 642 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, 643 struct sk_buff *skb) 644 { 645 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data; 646 struct hci_conn *conn; 647 void *sent; 648 649 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 650 651 if (rp->status) 652 return; 653 654 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 655 if (!sent) 656 return; 657 658 hci_dev_lock(hdev); 659 660 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 661 if (conn) 662 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 663 664 hci_dev_unlock(hdev); 665 } 666 667 static void hci_cc_read_local_features(struct hci_dev *hdev, 668 struct sk_buff *skb) 669 { 670 struct hci_rp_read_local_features *rp = (void *) skb->data; 671 672 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 673 674 if (rp->status) 675 return; 676 677 memcpy(hdev->features, rp->features, 8); 678 679 /* Adjust default settings according to features 680 * supported by device. */ 681 682 if (hdev->features[0][0] & LMP_3SLOT) 683 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 684 685 if (hdev->features[0][0] & LMP_5SLOT) 686 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 687 688 if (hdev->features[0][1] & LMP_HV2) { 689 hdev->pkt_type |= (HCI_HV2); 690 hdev->esco_type |= (ESCO_HV2); 691 } 692 693 if (hdev->features[0][1] & LMP_HV3) { 694 hdev->pkt_type |= (HCI_HV3); 695 hdev->esco_type |= (ESCO_HV3); 696 } 697 698 if (lmp_esco_capable(hdev)) 699 hdev->esco_type |= (ESCO_EV3); 700 701 if (hdev->features[0][4] & LMP_EV4) 702 hdev->esco_type |= (ESCO_EV4); 703 704 if (hdev->features[0][4] & LMP_EV5) 705 hdev->esco_type |= (ESCO_EV5); 706 707 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 708 hdev->esco_type |= (ESCO_2EV3); 709 710 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 711 hdev->esco_type |= (ESCO_3EV3); 712 713 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 714 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 715 } 716 717 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 718 struct sk_buff *skb) 719 { 720 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 721 722 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 723 724 if (rp->status) 725 return; 726 727 if (hdev->max_page < rp->max_page) 728 hdev->max_page = rp->max_page; 729 730 if (rp->page < HCI_MAX_PAGES) 731 memcpy(hdev->features[rp->page], rp->features, 8); 732 } 733 734 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 735 struct sk_buff *skb) 736 { 737 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 738 739 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 740 741 if (rp->status) 742 return; 743 744 hdev->flow_ctl_mode = rp->mode; 745 } 746 747 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 748 { 749 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 750 751 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 752 753 if (rp->status) 754 return; 755 756 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 757 hdev->sco_mtu = rp->sco_mtu; 758 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 759 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 760 761 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 762 hdev->sco_mtu = 64; 763 hdev->sco_pkts = 8; 764 } 765 766 hdev->acl_cnt = hdev->acl_pkts; 767 hdev->sco_cnt = hdev->sco_pkts; 768 769 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 770 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 771 } 772 773 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 774 { 775 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 776 777 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 778 779 if (rp->status) 780 return; 781 782 if (test_bit(HCI_INIT, &hdev->flags)) 783 bacpy(&hdev->bdaddr, &rp->bdaddr); 784 785 if (hci_dev_test_flag(hdev, HCI_SETUP)) 786 bacpy(&hdev->setup_addr, &rp->bdaddr); 787 } 788 789 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev, 790 struct sk_buff *skb) 791 { 792 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data; 793 794 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 795 796 if (rp->status) 797 return; 798 799 if (hci_dev_test_flag(hdev, HCI_SETUP) || 800 hci_dev_test_flag(hdev, HCI_CONFIG)) { 801 hdev->pairing_opts = rp->pairing_opts; 802 hdev->max_enc_key_size = rp->max_key_size; 803 } 804 } 805 806 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 807 struct sk_buff *skb) 808 { 809 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 810 811 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 812 813 if (rp->status) 814 return; 815 816 if (test_bit(HCI_INIT, &hdev->flags)) { 817 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 818 hdev->page_scan_window = __le16_to_cpu(rp->window); 819 } 820 } 821 822 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 823 struct sk_buff *skb) 824 { 825 u8 status = *((u8 *) skb->data); 826 struct hci_cp_write_page_scan_activity *sent; 827 828 BT_DBG("%s status 0x%2.2x", hdev->name, status); 829 830 if (status) 831 return; 832 833 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 834 if (!sent) 835 return; 836 837 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 838 hdev->page_scan_window = __le16_to_cpu(sent->window); 839 } 840 841 static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 842 struct sk_buff *skb) 843 { 844 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 845 846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 847 848 if (rp->status) 849 return; 850 851 if (test_bit(HCI_INIT, &hdev->flags)) 852 hdev->page_scan_type = rp->type; 853 } 854 855 static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 856 struct sk_buff *skb) 857 { 858 u8 status = *((u8 *) skb->data); 859 u8 *type; 860 861 BT_DBG("%s status 0x%2.2x", hdev->name, status); 862 863 if (status) 864 return; 865 866 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 867 if (type) 868 hdev->page_scan_type = *type; 869 } 870 871 static void hci_cc_read_data_block_size(struct hci_dev *hdev, 872 struct sk_buff *skb) 873 { 874 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 875 876 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 877 878 if (rp->status) 879 return; 880 881 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 882 hdev->block_len = __le16_to_cpu(rp->block_len); 883 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 884 885 hdev->block_cnt = hdev->num_blocks; 886 887 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 888 hdev->block_cnt, hdev->block_len); 889 } 890 891 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) 892 { 893 struct hci_rp_read_clock *rp = (void *) skb->data; 894 struct hci_cp_read_clock *cp; 895 struct hci_conn *conn; 896 897 BT_DBG("%s", hdev->name); 898 899 if (skb->len < sizeof(*rp)) 900 return; 901 902 if (rp->status) 903 return; 904 905 hci_dev_lock(hdev); 906 907 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 908 if (!cp) 909 goto unlock; 910 911 if (cp->which == 0x00) { 912 hdev->clock = le32_to_cpu(rp->clock); 913 goto unlock; 914 } 915 916 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 917 if (conn) { 918 conn->clock = le32_to_cpu(rp->clock); 919 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 920 } 921 922 unlock: 923 hci_dev_unlock(hdev); 924 } 925 926 static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 927 struct sk_buff *skb) 928 { 929 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 930 931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 932 933 if (rp->status) 934 return; 935 936 hdev->amp_status = rp->amp_status; 937 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 938 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 939 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 940 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 941 hdev->amp_type = rp->amp_type; 942 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 943 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 944 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 945 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 946 } 947 948 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 949 struct sk_buff *skb) 950 { 951 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 952 953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 954 955 if (rp->status) 956 return; 957 958 hdev->inq_tx_power = rp->tx_power; 959 } 960 961 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, 962 struct sk_buff *skb) 963 { 964 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data; 965 966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 967 968 if (rp->status) 969 return; 970 971 hdev->err_data_reporting = rp->err_data_reporting; 972 } 973 974 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, 975 struct sk_buff *skb) 976 { 977 __u8 status = *((__u8 *)skb->data); 978 struct hci_cp_write_def_err_data_reporting *cp; 979 980 BT_DBG("%s status 0x%2.2x", hdev->name, status); 981 982 if (status) 983 return; 984 985 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 986 if (!cp) 987 return; 988 989 hdev->err_data_reporting = cp->err_data_reporting; 990 } 991 992 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 993 { 994 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 995 struct hci_cp_pin_code_reply *cp; 996 struct hci_conn *conn; 997 998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 999 1000 hci_dev_lock(hdev); 1001 1002 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1003 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1004 1005 if (rp->status) 1006 goto unlock; 1007 1008 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1009 if (!cp) 1010 goto unlock; 1011 1012 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1013 if (conn) 1014 conn->pin_length = cp->pin_len; 1015 1016 unlock: 1017 hci_dev_unlock(hdev); 1018 } 1019 1020 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 1021 { 1022 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 1023 1024 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1025 1026 hci_dev_lock(hdev); 1027 1028 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1029 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1030 rp->status); 1031 1032 hci_dev_unlock(hdev); 1033 } 1034 1035 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 1036 struct sk_buff *skb) 1037 { 1038 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 1039 1040 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1041 1042 if (rp->status) 1043 return; 1044 1045 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1046 hdev->le_pkts = rp->le_max_pkt; 1047 1048 hdev->le_cnt = hdev->le_pkts; 1049 1050 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1051 } 1052 1053 static void hci_cc_le_read_local_features(struct hci_dev *hdev, 1054 struct sk_buff *skb) 1055 { 1056 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 1057 1058 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1059 1060 if (rp->status) 1061 return; 1062 1063 memcpy(hdev->le_features, rp->features, 8); 1064 } 1065 1066 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 1067 struct sk_buff *skb) 1068 { 1069 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 1070 1071 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1072 1073 if (rp->status) 1074 return; 1075 1076 hdev->adv_tx_power = rp->tx_power; 1077 } 1078 1079 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 1080 { 1081 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1082 1083 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1084 1085 hci_dev_lock(hdev); 1086 1087 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1088 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1089 rp->status); 1090 1091 hci_dev_unlock(hdev); 1092 } 1093 1094 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 1095 struct sk_buff *skb) 1096 { 1097 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1098 1099 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1100 1101 hci_dev_lock(hdev); 1102 1103 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1104 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1105 ACL_LINK, 0, rp->status); 1106 1107 hci_dev_unlock(hdev); 1108 } 1109 1110 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 1111 { 1112 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1113 1114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1115 1116 hci_dev_lock(hdev); 1117 1118 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1119 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1120 0, rp->status); 1121 1122 hci_dev_unlock(hdev); 1123 } 1124 1125 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1126 struct sk_buff *skb) 1127 { 1128 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1129 1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1131 1132 hci_dev_lock(hdev); 1133 1134 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1135 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1136 ACL_LINK, 0, rp->status); 1137 1138 hci_dev_unlock(hdev); 1139 } 1140 1141 static void hci_cc_read_local_oob_data(struct hci_dev *hdev, 1142 struct sk_buff *skb) 1143 { 1144 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1145 1146 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1147 } 1148 1149 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 1150 struct sk_buff *skb) 1151 { 1152 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 1153 1154 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1155 } 1156 1157 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 1158 { 1159 __u8 status = *((__u8 *) skb->data); 1160 bdaddr_t *sent; 1161 1162 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1163 1164 if (status) 1165 return; 1166 1167 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1168 if (!sent) 1169 return; 1170 1171 hci_dev_lock(hdev); 1172 1173 bacpy(&hdev->random_addr, sent); 1174 1175 if (!bacmp(&hdev->rpa, sent)) { 1176 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1177 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1178 secs_to_jiffies(hdev->rpa_timeout)); 1179 } 1180 1181 hci_dev_unlock(hdev); 1182 } 1183 1184 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) 1185 { 1186 __u8 status = *((__u8 *) skb->data); 1187 struct hci_cp_le_set_default_phy *cp; 1188 1189 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1190 1191 if (status) 1192 return; 1193 1194 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1195 if (!cp) 1196 return; 1197 1198 hci_dev_lock(hdev); 1199 1200 hdev->le_tx_def_phys = cp->tx_phys; 1201 hdev->le_rx_def_phys = cp->rx_phys; 1202 1203 hci_dev_unlock(hdev); 1204 } 1205 1206 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, 1207 struct sk_buff *skb) 1208 { 1209 __u8 status = *((__u8 *) skb->data); 1210 struct hci_cp_le_set_adv_set_rand_addr *cp; 1211 struct adv_info *adv; 1212 1213 if (status) 1214 return; 1215 1216 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1217 /* Update only in case the adv instance since handle 0x00 shall be using 1218 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1219 * non-extended adverting. 1220 */ 1221 if (!cp || !cp->handle) 1222 return; 1223 1224 hci_dev_lock(hdev); 1225 1226 adv = hci_find_adv_instance(hdev, cp->handle); 1227 if (adv) { 1228 bacpy(&adv->random_addr, &cp->bdaddr); 1229 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1230 adv->rpa_expired = false; 1231 queue_delayed_work(hdev->workqueue, 1232 &adv->rpa_expired_cb, 1233 secs_to_jiffies(hdev->rpa_timeout)); 1234 } 1235 } 1236 1237 hci_dev_unlock(hdev); 1238 } 1239 1240 static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb) 1241 { 1242 __u8 status = *((__u8 *)skb->data); 1243 u8 *instance; 1244 int err; 1245 1246 if (status) 1247 return; 1248 1249 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1250 if (!instance) 1251 return; 1252 1253 hci_dev_lock(hdev); 1254 1255 err = hci_remove_adv_instance(hdev, *instance); 1256 if (!err) 1257 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1258 *instance); 1259 1260 hci_dev_unlock(hdev); 1261 } 1262 1263 static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb) 1264 { 1265 __u8 status = *((__u8 *)skb->data); 1266 struct adv_info *adv, *n; 1267 int err; 1268 1269 if (status) 1270 return; 1271 1272 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1273 return; 1274 1275 hci_dev_lock(hdev); 1276 1277 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1278 u8 instance = adv->instance; 1279 1280 err = hci_remove_adv_instance(hdev, instance); 1281 if (!err) 1282 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1283 hdev, instance); 1284 } 1285 1286 hci_dev_unlock(hdev); 1287 } 1288 1289 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev, 1290 struct sk_buff *skb) 1291 { 1292 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data; 1293 1294 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1295 1296 if (rp->status) 1297 return; 1298 1299 hdev->min_le_tx_power = rp->min_le_tx_power; 1300 hdev->max_le_tx_power = rp->max_le_tx_power; 1301 } 1302 1303 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 1304 { 1305 __u8 *sent, status = *((__u8 *) skb->data); 1306 1307 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1308 1309 if (status) 1310 return; 1311 1312 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1313 if (!sent) 1314 return; 1315 1316 hci_dev_lock(hdev); 1317 1318 /* If we're doing connection initiation as peripheral. Set a 1319 * timeout in case something goes wrong. 1320 */ 1321 if (*sent) { 1322 struct hci_conn *conn; 1323 1324 hci_dev_set_flag(hdev, HCI_LE_ADV); 1325 1326 conn = hci_lookup_le_connect(hdev); 1327 if (conn) 1328 queue_delayed_work(hdev->workqueue, 1329 &conn->le_conn_timeout, 1330 conn->conn_timeout); 1331 } else { 1332 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1333 } 1334 1335 hci_dev_unlock(hdev); 1336 } 1337 1338 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, 1339 struct sk_buff *skb) 1340 { 1341 struct hci_cp_le_set_ext_adv_enable *cp; 1342 struct hci_cp_ext_adv_set *set; 1343 __u8 status = *((__u8 *) skb->data); 1344 struct adv_info *adv = NULL, *n; 1345 1346 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1347 1348 if (status) 1349 return; 1350 1351 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1352 if (!cp) 1353 return; 1354 1355 set = (void *)cp->data; 1356 1357 hci_dev_lock(hdev); 1358 1359 if (cp->num_of_sets) 1360 adv = hci_find_adv_instance(hdev, set->handle); 1361 1362 if (cp->enable) { 1363 struct hci_conn *conn; 1364 1365 hci_dev_set_flag(hdev, HCI_LE_ADV); 1366 1367 if (adv) 1368 adv->enabled = true; 1369 1370 conn = hci_lookup_le_connect(hdev); 1371 if (conn) 1372 queue_delayed_work(hdev->workqueue, 1373 &conn->le_conn_timeout, 1374 conn->conn_timeout); 1375 } else { 1376 if (cp->num_of_sets) { 1377 if (adv) 1378 adv->enabled = false; 1379 1380 /* If just one instance was disabled check if there are 1381 * any other instance enabled before clearing HCI_LE_ADV 1382 */ 1383 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1384 list) { 1385 if (adv->enabled) 1386 goto unlock; 1387 } 1388 } else { 1389 /* All instances shall be considered disabled */ 1390 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1391 list) 1392 adv->enabled = false; 1393 } 1394 1395 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1396 } 1397 1398 unlock: 1399 hci_dev_unlock(hdev); 1400 } 1401 1402 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1403 { 1404 struct hci_cp_le_set_scan_param *cp; 1405 __u8 status = *((__u8 *) skb->data); 1406 1407 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1408 1409 if (status) 1410 return; 1411 1412 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1413 if (!cp) 1414 return; 1415 1416 hci_dev_lock(hdev); 1417 1418 hdev->le_scan_type = cp->type; 1419 1420 hci_dev_unlock(hdev); 1421 } 1422 1423 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, 1424 struct sk_buff *skb) 1425 { 1426 struct hci_cp_le_set_ext_scan_params *cp; 1427 __u8 status = *((__u8 *) skb->data); 1428 struct hci_cp_le_scan_phy_params *phy_param; 1429 1430 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1431 1432 if (status) 1433 return; 1434 1435 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1436 if (!cp) 1437 return; 1438 1439 phy_param = (void *)cp->data; 1440 1441 hci_dev_lock(hdev); 1442 1443 hdev->le_scan_type = phy_param->type; 1444 1445 hci_dev_unlock(hdev); 1446 } 1447 1448 static bool has_pending_adv_report(struct hci_dev *hdev) 1449 { 1450 struct discovery_state *d = &hdev->discovery; 1451 1452 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1453 } 1454 1455 static void clear_pending_adv_report(struct hci_dev *hdev) 1456 { 1457 struct discovery_state *d = &hdev->discovery; 1458 1459 bacpy(&d->last_adv_addr, BDADDR_ANY); 1460 d->last_adv_data_len = 0; 1461 } 1462 1463 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1464 u8 bdaddr_type, s8 rssi, u32 flags, 1465 u8 *data, u8 len) 1466 { 1467 struct discovery_state *d = &hdev->discovery; 1468 1469 if (len > HCI_MAX_AD_LENGTH) 1470 return; 1471 1472 bacpy(&d->last_adv_addr, bdaddr); 1473 d->last_adv_addr_type = bdaddr_type; 1474 d->last_adv_rssi = rssi; 1475 d->last_adv_flags = flags; 1476 memcpy(d->last_adv_data, data, len); 1477 d->last_adv_data_len = len; 1478 } 1479 1480 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1481 { 1482 hci_dev_lock(hdev); 1483 1484 switch (enable) { 1485 case LE_SCAN_ENABLE: 1486 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1487 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1488 clear_pending_adv_report(hdev); 1489 break; 1490 1491 case LE_SCAN_DISABLE: 1492 /* We do this here instead of when setting DISCOVERY_STOPPED 1493 * since the latter would potentially require waiting for 1494 * inquiry to stop too. 1495 */ 1496 if (has_pending_adv_report(hdev)) { 1497 struct discovery_state *d = &hdev->discovery; 1498 1499 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1500 d->last_adv_addr_type, NULL, 1501 d->last_adv_rssi, d->last_adv_flags, 1502 d->last_adv_data, 1503 d->last_adv_data_len, NULL, 0); 1504 } 1505 1506 /* Cancel this timer so that we don't try to disable scanning 1507 * when it's already disabled. 1508 */ 1509 cancel_delayed_work(&hdev->le_scan_disable); 1510 1511 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1512 1513 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1514 * interrupted scanning due to a connect request. Mark 1515 * therefore discovery as stopped. 1516 */ 1517 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1518 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1519 1520 break; 1521 1522 default: 1523 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1524 enable); 1525 break; 1526 } 1527 1528 hci_dev_unlock(hdev); 1529 } 1530 1531 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1532 struct sk_buff *skb) 1533 { 1534 struct hci_cp_le_set_scan_enable *cp; 1535 __u8 status = *((__u8 *) skb->data); 1536 1537 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1538 1539 if (status) 1540 return; 1541 1542 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1543 if (!cp) 1544 return; 1545 1546 le_set_scan_enable_complete(hdev, cp->enable); 1547 } 1548 1549 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, 1550 struct sk_buff *skb) 1551 { 1552 struct hci_cp_le_set_ext_scan_enable *cp; 1553 __u8 status = *((__u8 *) skb->data); 1554 1555 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1556 1557 if (status) 1558 return; 1559 1560 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1561 if (!cp) 1562 return; 1563 1564 le_set_scan_enable_complete(hdev, cp->enable); 1565 } 1566 1567 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, 1568 struct sk_buff *skb) 1569 { 1570 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data; 1571 1572 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status, 1573 rp->num_of_sets); 1574 1575 if (rp->status) 1576 return; 1577 1578 hdev->le_num_of_adv_sets = rp->num_of_sets; 1579 } 1580 1581 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev, 1582 struct sk_buff *skb) 1583 { 1584 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data; 1585 1586 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1587 1588 if (rp->status) 1589 return; 1590 1591 hdev->le_accept_list_size = rp->size; 1592 } 1593 1594 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev, 1595 struct sk_buff *skb) 1596 { 1597 __u8 status = *((__u8 *) skb->data); 1598 1599 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1600 1601 if (status) 1602 return; 1603 1604 hci_bdaddr_list_clear(&hdev->le_accept_list); 1605 } 1606 1607 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev, 1608 struct sk_buff *skb) 1609 { 1610 struct hci_cp_le_add_to_accept_list *sent; 1611 __u8 status = *((__u8 *) skb->data); 1612 1613 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1614 1615 if (status) 1616 return; 1617 1618 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1619 if (!sent) 1620 return; 1621 1622 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1623 sent->bdaddr_type); 1624 } 1625 1626 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev, 1627 struct sk_buff *skb) 1628 { 1629 struct hci_cp_le_del_from_accept_list *sent; 1630 __u8 status = *((__u8 *) skb->data); 1631 1632 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1633 1634 if (status) 1635 return; 1636 1637 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1638 if (!sent) 1639 return; 1640 1641 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1642 sent->bdaddr_type); 1643 } 1644 1645 static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1646 struct sk_buff *skb) 1647 { 1648 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1649 1650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1651 1652 if (rp->status) 1653 return; 1654 1655 memcpy(hdev->le_states, rp->le_states, 8); 1656 } 1657 1658 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev, 1659 struct sk_buff *skb) 1660 { 1661 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data; 1662 1663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1664 1665 if (rp->status) 1666 return; 1667 1668 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1669 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1670 } 1671 1672 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, 1673 struct sk_buff *skb) 1674 { 1675 struct hci_cp_le_write_def_data_len *sent; 1676 __u8 status = *((__u8 *) skb->data); 1677 1678 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1679 1680 if (status) 1681 return; 1682 1683 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1684 if (!sent) 1685 return; 1686 1687 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1688 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1689 } 1690 1691 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, 1692 struct sk_buff *skb) 1693 { 1694 struct hci_cp_le_add_to_resolv_list *sent; 1695 __u8 status = *((__u8 *) skb->data); 1696 1697 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1698 1699 if (status) 1700 return; 1701 1702 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1703 if (!sent) 1704 return; 1705 1706 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1707 sent->bdaddr_type, sent->peer_irk, 1708 sent->local_irk); 1709 } 1710 1711 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, 1712 struct sk_buff *skb) 1713 { 1714 struct hci_cp_le_del_from_resolv_list *sent; 1715 __u8 status = *((__u8 *) skb->data); 1716 1717 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1718 1719 if (status) 1720 return; 1721 1722 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1723 if (!sent) 1724 return; 1725 1726 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1727 sent->bdaddr_type); 1728 } 1729 1730 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, 1731 struct sk_buff *skb) 1732 { 1733 __u8 status = *((__u8 *) skb->data); 1734 1735 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1736 1737 if (status) 1738 return; 1739 1740 hci_bdaddr_list_clear(&hdev->le_resolv_list); 1741 } 1742 1743 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, 1744 struct sk_buff *skb) 1745 { 1746 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data; 1747 1748 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1749 1750 if (rp->status) 1751 return; 1752 1753 hdev->le_resolv_list_size = rp->size; 1754 } 1755 1756 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, 1757 struct sk_buff *skb) 1758 { 1759 __u8 *sent, status = *((__u8 *) skb->data); 1760 1761 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1762 1763 if (status) 1764 return; 1765 1766 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 1767 if (!sent) 1768 return; 1769 1770 hci_dev_lock(hdev); 1771 1772 if (*sent) 1773 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 1774 else 1775 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 1776 1777 hci_dev_unlock(hdev); 1778 } 1779 1780 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, 1781 struct sk_buff *skb) 1782 { 1783 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data; 1784 1785 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1786 1787 if (rp->status) 1788 return; 1789 1790 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 1791 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 1792 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 1793 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 1794 } 1795 1796 static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1797 struct sk_buff *skb) 1798 { 1799 struct hci_cp_write_le_host_supported *sent; 1800 __u8 status = *((__u8 *) skb->data); 1801 1802 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1803 1804 if (status) 1805 return; 1806 1807 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1808 if (!sent) 1809 return; 1810 1811 hci_dev_lock(hdev); 1812 1813 if (sent->le) { 1814 hdev->features[1][0] |= LMP_HOST_LE; 1815 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 1816 } else { 1817 hdev->features[1][0] &= ~LMP_HOST_LE; 1818 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 1819 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 1820 } 1821 1822 if (sent->simul) 1823 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1824 else 1825 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1826 1827 hci_dev_unlock(hdev); 1828 } 1829 1830 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1831 { 1832 struct hci_cp_le_set_adv_param *cp; 1833 u8 status = *((u8 *) skb->data); 1834 1835 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1836 1837 if (status) 1838 return; 1839 1840 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 1841 if (!cp) 1842 return; 1843 1844 hci_dev_lock(hdev); 1845 hdev->adv_addr_type = cp->own_address_type; 1846 hci_dev_unlock(hdev); 1847 } 1848 1849 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1850 { 1851 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data; 1852 struct hci_cp_le_set_ext_adv_params *cp; 1853 struct adv_info *adv_instance; 1854 1855 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1856 1857 if (rp->status) 1858 return; 1859 1860 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 1861 if (!cp) 1862 return; 1863 1864 hci_dev_lock(hdev); 1865 hdev->adv_addr_type = cp->own_addr_type; 1866 if (!cp->handle) { 1867 /* Store in hdev for instance 0 */ 1868 hdev->adv_tx_power = rp->tx_power; 1869 } else { 1870 adv_instance = hci_find_adv_instance(hdev, cp->handle); 1871 if (adv_instance) 1872 adv_instance->tx_power = rp->tx_power; 1873 } 1874 /* Update adv data as tx power is known now */ 1875 hci_req_update_adv_data(hdev, cp->handle); 1876 1877 hci_dev_unlock(hdev); 1878 } 1879 1880 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) 1881 { 1882 struct hci_rp_read_rssi *rp = (void *) skb->data; 1883 struct hci_conn *conn; 1884 1885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1886 1887 if (rp->status) 1888 return; 1889 1890 hci_dev_lock(hdev); 1891 1892 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1893 if (conn) 1894 conn->rssi = rp->rssi; 1895 1896 hci_dev_unlock(hdev); 1897 } 1898 1899 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) 1900 { 1901 struct hci_cp_read_tx_power *sent; 1902 struct hci_rp_read_tx_power *rp = (void *) skb->data; 1903 struct hci_conn *conn; 1904 1905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1906 1907 if (rp->status) 1908 return; 1909 1910 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 1911 if (!sent) 1912 return; 1913 1914 hci_dev_lock(hdev); 1915 1916 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1917 if (!conn) 1918 goto unlock; 1919 1920 switch (sent->type) { 1921 case 0x00: 1922 conn->tx_power = rp->tx_power; 1923 break; 1924 case 0x01: 1925 conn->max_tx_power = rp->tx_power; 1926 break; 1927 } 1928 1929 unlock: 1930 hci_dev_unlock(hdev); 1931 } 1932 1933 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb) 1934 { 1935 u8 status = *((u8 *) skb->data); 1936 u8 *mode; 1937 1938 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1939 1940 if (status) 1941 return; 1942 1943 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 1944 if (mode) 1945 hdev->ssp_debug_mode = *mode; 1946 } 1947 1948 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1949 { 1950 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1951 1952 if (status) { 1953 hci_conn_check_pending(hdev); 1954 return; 1955 } 1956 1957 set_bit(HCI_INQUIRY, &hdev->flags); 1958 } 1959 1960 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1961 { 1962 struct hci_cp_create_conn *cp; 1963 struct hci_conn *conn; 1964 1965 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1966 1967 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1968 if (!cp) 1969 return; 1970 1971 hci_dev_lock(hdev); 1972 1973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1974 1975 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1976 1977 if (status) { 1978 if (conn && conn->state == BT_CONNECT) { 1979 if (status != 0x0c || conn->attempt > 2) { 1980 conn->state = BT_CLOSED; 1981 hci_connect_cfm(conn, status); 1982 hci_conn_del(conn); 1983 } else 1984 conn->state = BT_CONNECT2; 1985 } 1986 } else { 1987 if (!conn) { 1988 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 1989 HCI_ROLE_MASTER); 1990 if (!conn) 1991 bt_dev_err(hdev, "no memory for new connection"); 1992 } 1993 } 1994 1995 hci_dev_unlock(hdev); 1996 } 1997 1998 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1999 { 2000 struct hci_cp_add_sco *cp; 2001 struct hci_conn *acl, *sco; 2002 __u16 handle; 2003 2004 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2005 2006 if (!status) 2007 return; 2008 2009 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2010 if (!cp) 2011 return; 2012 2013 handle = __le16_to_cpu(cp->handle); 2014 2015 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 2016 2017 hci_dev_lock(hdev); 2018 2019 acl = hci_conn_hash_lookup_handle(hdev, handle); 2020 if (acl) { 2021 sco = acl->link; 2022 if (sco) { 2023 sco->state = BT_CLOSED; 2024 2025 hci_connect_cfm(sco, status); 2026 hci_conn_del(sco); 2027 } 2028 } 2029 2030 hci_dev_unlock(hdev); 2031 } 2032 2033 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2034 { 2035 struct hci_cp_auth_requested *cp; 2036 struct hci_conn *conn; 2037 2038 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2039 2040 if (!status) 2041 return; 2042 2043 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2044 if (!cp) 2045 return; 2046 2047 hci_dev_lock(hdev); 2048 2049 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2050 if (conn) { 2051 if (conn->state == BT_CONFIG) { 2052 hci_connect_cfm(conn, status); 2053 hci_conn_drop(conn); 2054 } 2055 } 2056 2057 hci_dev_unlock(hdev); 2058 } 2059 2060 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2061 { 2062 struct hci_cp_set_conn_encrypt *cp; 2063 struct hci_conn *conn; 2064 2065 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2066 2067 if (!status) 2068 return; 2069 2070 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2071 if (!cp) 2072 return; 2073 2074 hci_dev_lock(hdev); 2075 2076 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2077 if (conn) { 2078 if (conn->state == BT_CONFIG) { 2079 hci_connect_cfm(conn, status); 2080 hci_conn_drop(conn); 2081 } 2082 } 2083 2084 hci_dev_unlock(hdev); 2085 } 2086 2087 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2088 struct hci_conn *conn) 2089 { 2090 if (conn->state != BT_CONFIG || !conn->out) 2091 return 0; 2092 2093 if (conn->pending_sec_level == BT_SECURITY_SDP) 2094 return 0; 2095 2096 /* Only request authentication for SSP connections or non-SSP 2097 * devices with sec_level MEDIUM or HIGH or if MITM protection 2098 * is requested. 2099 */ 2100 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2101 conn->pending_sec_level != BT_SECURITY_FIPS && 2102 conn->pending_sec_level != BT_SECURITY_HIGH && 2103 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2104 return 0; 2105 2106 return 1; 2107 } 2108 2109 static int hci_resolve_name(struct hci_dev *hdev, 2110 struct inquiry_entry *e) 2111 { 2112 struct hci_cp_remote_name_req cp; 2113 2114 memset(&cp, 0, sizeof(cp)); 2115 2116 bacpy(&cp.bdaddr, &e->data.bdaddr); 2117 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2118 cp.pscan_mode = e->data.pscan_mode; 2119 cp.clock_offset = e->data.clock_offset; 2120 2121 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2122 } 2123 2124 static bool hci_resolve_next_name(struct hci_dev *hdev) 2125 { 2126 struct discovery_state *discov = &hdev->discovery; 2127 struct inquiry_entry *e; 2128 2129 if (list_empty(&discov->resolve)) 2130 return false; 2131 2132 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2133 if (!e) 2134 return false; 2135 2136 if (hci_resolve_name(hdev, e) == 0) { 2137 e->name_state = NAME_PENDING; 2138 return true; 2139 } 2140 2141 return false; 2142 } 2143 2144 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2145 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2146 { 2147 struct discovery_state *discov = &hdev->discovery; 2148 struct inquiry_entry *e; 2149 2150 /* Update the mgmt connected state if necessary. Be careful with 2151 * conn objects that exist but are not (yet) connected however. 2152 * Only those in BT_CONFIG or BT_CONNECTED states can be 2153 * considered connected. 2154 */ 2155 if (conn && 2156 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2157 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2158 mgmt_device_connected(hdev, conn, name, name_len); 2159 2160 if (discov->state == DISCOVERY_STOPPED) 2161 return; 2162 2163 if (discov->state == DISCOVERY_STOPPING) 2164 goto discov_complete; 2165 2166 if (discov->state != DISCOVERY_RESOLVING) 2167 return; 2168 2169 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2170 /* If the device was not found in a list of found devices names of which 2171 * are pending. there is no need to continue resolving a next name as it 2172 * will be done upon receiving another Remote Name Request Complete 2173 * Event */ 2174 if (!e) 2175 return; 2176 2177 list_del(&e->list); 2178 if (name) { 2179 e->name_state = NAME_KNOWN; 2180 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 2181 e->data.rssi, name, name_len); 2182 } else { 2183 e->name_state = NAME_NOT_KNOWN; 2184 } 2185 2186 if (hci_resolve_next_name(hdev)) 2187 return; 2188 2189 discov_complete: 2190 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2191 } 2192 2193 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2194 { 2195 struct hci_cp_remote_name_req *cp; 2196 struct hci_conn *conn; 2197 2198 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2199 2200 /* If successful wait for the name req complete event before 2201 * checking for the need to do authentication */ 2202 if (!status) 2203 return; 2204 2205 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2206 if (!cp) 2207 return; 2208 2209 hci_dev_lock(hdev); 2210 2211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2212 2213 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2214 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2215 2216 if (!conn) 2217 goto unlock; 2218 2219 if (!hci_outgoing_auth_needed(hdev, conn)) 2220 goto unlock; 2221 2222 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2223 struct hci_cp_auth_requested auth_cp; 2224 2225 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2226 2227 auth_cp.handle = __cpu_to_le16(conn->handle); 2228 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2229 sizeof(auth_cp), &auth_cp); 2230 } 2231 2232 unlock: 2233 hci_dev_unlock(hdev); 2234 } 2235 2236 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2237 { 2238 struct hci_cp_read_remote_features *cp; 2239 struct hci_conn *conn; 2240 2241 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2242 2243 if (!status) 2244 return; 2245 2246 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2247 if (!cp) 2248 return; 2249 2250 hci_dev_lock(hdev); 2251 2252 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2253 if (conn) { 2254 if (conn->state == BT_CONFIG) { 2255 hci_connect_cfm(conn, status); 2256 hci_conn_drop(conn); 2257 } 2258 } 2259 2260 hci_dev_unlock(hdev); 2261 } 2262 2263 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2264 { 2265 struct hci_cp_read_remote_ext_features *cp; 2266 struct hci_conn *conn; 2267 2268 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2269 2270 if (!status) 2271 return; 2272 2273 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2274 if (!cp) 2275 return; 2276 2277 hci_dev_lock(hdev); 2278 2279 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2280 if (conn) { 2281 if (conn->state == BT_CONFIG) { 2282 hci_connect_cfm(conn, status); 2283 hci_conn_drop(conn); 2284 } 2285 } 2286 2287 hci_dev_unlock(hdev); 2288 } 2289 2290 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2291 { 2292 struct hci_cp_setup_sync_conn *cp; 2293 struct hci_conn *acl, *sco; 2294 __u16 handle; 2295 2296 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2297 2298 if (!status) 2299 return; 2300 2301 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2302 if (!cp) 2303 return; 2304 2305 handle = __le16_to_cpu(cp->handle); 2306 2307 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 2308 2309 hci_dev_lock(hdev); 2310 2311 acl = hci_conn_hash_lookup_handle(hdev, handle); 2312 if (acl) { 2313 sco = acl->link; 2314 if (sco) { 2315 sco->state = BT_CLOSED; 2316 2317 hci_connect_cfm(sco, status); 2318 hci_conn_del(sco); 2319 } 2320 } 2321 2322 hci_dev_unlock(hdev); 2323 } 2324 2325 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2326 { 2327 struct hci_cp_enhanced_setup_sync_conn *cp; 2328 struct hci_conn *acl, *sco; 2329 __u16 handle; 2330 2331 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2332 2333 if (!status) 2334 return; 2335 2336 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2337 if (!cp) 2338 return; 2339 2340 handle = __le16_to_cpu(cp->handle); 2341 2342 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2343 2344 hci_dev_lock(hdev); 2345 2346 acl = hci_conn_hash_lookup_handle(hdev, handle); 2347 if (acl) { 2348 sco = acl->link; 2349 if (sco) { 2350 sco->state = BT_CLOSED; 2351 2352 hci_connect_cfm(sco, status); 2353 hci_conn_del(sco); 2354 } 2355 } 2356 2357 hci_dev_unlock(hdev); 2358 } 2359 2360 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2361 { 2362 struct hci_cp_sniff_mode *cp; 2363 struct hci_conn *conn; 2364 2365 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2366 2367 if (!status) 2368 return; 2369 2370 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2371 if (!cp) 2372 return; 2373 2374 hci_dev_lock(hdev); 2375 2376 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2377 if (conn) { 2378 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2379 2380 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2381 hci_sco_setup(conn, status); 2382 } 2383 2384 hci_dev_unlock(hdev); 2385 } 2386 2387 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2388 { 2389 struct hci_cp_exit_sniff_mode *cp; 2390 struct hci_conn *conn; 2391 2392 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2393 2394 if (!status) 2395 return; 2396 2397 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2398 if (!cp) 2399 return; 2400 2401 hci_dev_lock(hdev); 2402 2403 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2404 if (conn) { 2405 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2406 2407 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2408 hci_sco_setup(conn, status); 2409 } 2410 2411 hci_dev_unlock(hdev); 2412 } 2413 2414 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2415 { 2416 struct hci_cp_disconnect *cp; 2417 struct hci_conn_params *params; 2418 struct hci_conn *conn; 2419 bool mgmt_conn; 2420 2421 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2422 * otherwise cleanup the connection immediately. 2423 */ 2424 if (!status && !hdev->suspended) 2425 return; 2426 2427 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2428 if (!cp) 2429 return; 2430 2431 hci_dev_lock(hdev); 2432 2433 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2434 if (!conn) 2435 goto unlock; 2436 2437 if (status) { 2438 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2439 conn->dst_type, status); 2440 2441 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2442 hdev->cur_adv_instance = conn->adv_instance; 2443 hci_enable_advertising(hdev); 2444 } 2445 2446 goto done; 2447 } 2448 2449 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2450 2451 if (conn->type == ACL_LINK) { 2452 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2453 hci_remove_link_key(hdev, &conn->dst); 2454 } 2455 2456 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2457 if (params) { 2458 switch (params->auto_connect) { 2459 case HCI_AUTO_CONN_LINK_LOSS: 2460 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2461 break; 2462 fallthrough; 2463 2464 case HCI_AUTO_CONN_DIRECT: 2465 case HCI_AUTO_CONN_ALWAYS: 2466 list_del_init(¶ms->action); 2467 list_add(¶ms->action, &hdev->pend_le_conns); 2468 break; 2469 2470 default: 2471 break; 2472 } 2473 } 2474 2475 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2476 cp->reason, mgmt_conn); 2477 2478 hci_disconn_cfm(conn, cp->reason); 2479 2480 done: 2481 /* If the disconnection failed for any reason, the upper layer 2482 * does not retry to disconnect in current implementation. 2483 * Hence, we need to do some basic cleanup here and re-enable 2484 * advertising if necessary. 2485 */ 2486 hci_conn_del(conn); 2487 unlock: 2488 hci_dev_unlock(hdev); 2489 } 2490 2491 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2492 { 2493 /* When using controller based address resolution, then the new 2494 * address types 0x02 and 0x03 are used. These types need to be 2495 * converted back into either public address or random address type 2496 */ 2497 switch (type) { 2498 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2499 if (resolved) 2500 *resolved = true; 2501 return ADDR_LE_DEV_PUBLIC; 2502 case ADDR_LE_DEV_RANDOM_RESOLVED: 2503 if (resolved) 2504 *resolved = true; 2505 return ADDR_LE_DEV_RANDOM; 2506 } 2507 2508 if (resolved) 2509 *resolved = false; 2510 return type; 2511 } 2512 2513 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2514 u8 peer_addr_type, u8 own_address_type, 2515 u8 filter_policy) 2516 { 2517 struct hci_conn *conn; 2518 2519 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2520 peer_addr_type); 2521 if (!conn) 2522 return; 2523 2524 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2525 2526 /* Store the initiator and responder address information which 2527 * is needed for SMP. These values will not change during the 2528 * lifetime of the connection. 2529 */ 2530 conn->init_addr_type = own_address_type; 2531 if (own_address_type == ADDR_LE_DEV_RANDOM) 2532 bacpy(&conn->init_addr, &hdev->random_addr); 2533 else 2534 bacpy(&conn->init_addr, &hdev->bdaddr); 2535 2536 conn->resp_addr_type = peer_addr_type; 2537 bacpy(&conn->resp_addr, peer_addr); 2538 2539 /* We don't want the connection attempt to stick around 2540 * indefinitely since LE doesn't have a page timeout concept 2541 * like BR/EDR. Set a timer for any connection that doesn't use 2542 * the accept list for connecting. 2543 */ 2544 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2545 queue_delayed_work(conn->hdev->workqueue, 2546 &conn->le_conn_timeout, 2547 conn->conn_timeout); 2548 } 2549 2550 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2551 { 2552 struct hci_cp_le_create_conn *cp; 2553 2554 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2555 2556 /* All connection failure handling is taken care of by the 2557 * hci_le_conn_failed function which is triggered by the HCI 2558 * request completion callbacks used for connecting. 2559 */ 2560 if (status) 2561 return; 2562 2563 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2564 if (!cp) 2565 return; 2566 2567 hci_dev_lock(hdev); 2568 2569 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2570 cp->own_address_type, cp->filter_policy); 2571 2572 hci_dev_unlock(hdev); 2573 } 2574 2575 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2576 { 2577 struct hci_cp_le_ext_create_conn *cp; 2578 2579 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2580 2581 /* All connection failure handling is taken care of by the 2582 * hci_le_conn_failed function which is triggered by the HCI 2583 * request completion callbacks used for connecting. 2584 */ 2585 if (status) 2586 return; 2587 2588 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2589 if (!cp) 2590 return; 2591 2592 hci_dev_lock(hdev); 2593 2594 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2595 cp->own_addr_type, cp->filter_policy); 2596 2597 hci_dev_unlock(hdev); 2598 } 2599 2600 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2601 { 2602 struct hci_cp_le_read_remote_features *cp; 2603 struct hci_conn *conn; 2604 2605 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2606 2607 if (!status) 2608 return; 2609 2610 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2611 if (!cp) 2612 return; 2613 2614 hci_dev_lock(hdev); 2615 2616 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2617 if (conn) { 2618 if (conn->state == BT_CONFIG) { 2619 hci_connect_cfm(conn, status); 2620 hci_conn_drop(conn); 2621 } 2622 } 2623 2624 hci_dev_unlock(hdev); 2625 } 2626 2627 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2628 { 2629 struct hci_cp_le_start_enc *cp; 2630 struct hci_conn *conn; 2631 2632 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2633 2634 if (!status) 2635 return; 2636 2637 hci_dev_lock(hdev); 2638 2639 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2640 if (!cp) 2641 goto unlock; 2642 2643 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2644 if (!conn) 2645 goto unlock; 2646 2647 if (conn->state != BT_CONNECTED) 2648 goto unlock; 2649 2650 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2651 hci_conn_drop(conn); 2652 2653 unlock: 2654 hci_dev_unlock(hdev); 2655 } 2656 2657 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2658 { 2659 struct hci_cp_switch_role *cp; 2660 struct hci_conn *conn; 2661 2662 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2663 2664 if (!status) 2665 return; 2666 2667 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2668 if (!cp) 2669 return; 2670 2671 hci_dev_lock(hdev); 2672 2673 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2674 if (conn) 2675 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2676 2677 hci_dev_unlock(hdev); 2678 } 2679 2680 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2681 { 2682 __u8 status = *((__u8 *) skb->data); 2683 struct discovery_state *discov = &hdev->discovery; 2684 struct inquiry_entry *e; 2685 2686 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2687 2688 hci_conn_check_pending(hdev); 2689 2690 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2691 return; 2692 2693 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2694 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2695 2696 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2697 return; 2698 2699 hci_dev_lock(hdev); 2700 2701 if (discov->state != DISCOVERY_FINDING) 2702 goto unlock; 2703 2704 if (list_empty(&discov->resolve)) { 2705 /* When BR/EDR inquiry is active and no LE scanning is in 2706 * progress, then change discovery state to indicate completion. 2707 * 2708 * When running LE scanning and BR/EDR inquiry simultaneously 2709 * and the LE scan already finished, then change the discovery 2710 * state to indicate completion. 2711 */ 2712 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2713 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2714 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2715 goto unlock; 2716 } 2717 2718 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2719 if (e && hci_resolve_name(hdev, e) == 0) { 2720 e->name_state = NAME_PENDING; 2721 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 2722 } else { 2723 /* When BR/EDR inquiry is active and no LE scanning is in 2724 * progress, then change discovery state to indicate completion. 2725 * 2726 * When running LE scanning and BR/EDR inquiry simultaneously 2727 * and the LE scan already finished, then change the discovery 2728 * state to indicate completion. 2729 */ 2730 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2731 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2732 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2733 } 2734 2735 unlock: 2736 hci_dev_unlock(hdev); 2737 } 2738 2739 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2740 { 2741 struct inquiry_data data; 2742 struct inquiry_info *info = (void *) (skb->data + 1); 2743 int num_rsp = *((__u8 *) skb->data); 2744 2745 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2746 2747 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) 2748 return; 2749 2750 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 2751 return; 2752 2753 hci_dev_lock(hdev); 2754 2755 for (; num_rsp; num_rsp--, info++) { 2756 u32 flags; 2757 2758 bacpy(&data.bdaddr, &info->bdaddr); 2759 data.pscan_rep_mode = info->pscan_rep_mode; 2760 data.pscan_period_mode = info->pscan_period_mode; 2761 data.pscan_mode = info->pscan_mode; 2762 memcpy(data.dev_class, info->dev_class, 3); 2763 data.clock_offset = info->clock_offset; 2764 data.rssi = HCI_RSSI_INVALID; 2765 data.ssp_mode = 0x00; 2766 2767 flags = hci_inquiry_cache_update(hdev, &data, false); 2768 2769 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2770 info->dev_class, HCI_RSSI_INVALID, 2771 flags, NULL, 0, NULL, 0); 2772 } 2773 2774 hci_dev_unlock(hdev); 2775 } 2776 2777 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2778 { 2779 struct hci_ev_conn_complete *ev = (void *) skb->data; 2780 struct hci_conn *conn; 2781 2782 BT_DBG("%s", hdev->name); 2783 2784 hci_dev_lock(hdev); 2785 2786 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2787 if (!conn) { 2788 /* Connection may not exist if auto-connected. Check the bredr 2789 * allowlist to see if this device is allowed to auto connect. 2790 * If link is an ACL type, create a connection class 2791 * automatically. 2792 * 2793 * Auto-connect will only occur if the event filter is 2794 * programmed with a given address. Right now, event filter is 2795 * only used during suspend. 2796 */ 2797 if (ev->link_type == ACL_LINK && 2798 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 2799 &ev->bdaddr, 2800 BDADDR_BREDR)) { 2801 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2802 HCI_ROLE_SLAVE); 2803 if (!conn) { 2804 bt_dev_err(hdev, "no memory for new conn"); 2805 goto unlock; 2806 } 2807 } else { 2808 if (ev->link_type != SCO_LINK) 2809 goto unlock; 2810 2811 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 2812 &ev->bdaddr); 2813 if (!conn) 2814 goto unlock; 2815 2816 conn->type = SCO_LINK; 2817 } 2818 } 2819 2820 if (!ev->status) { 2821 conn->handle = __le16_to_cpu(ev->handle); 2822 2823 if (conn->type == ACL_LINK) { 2824 conn->state = BT_CONFIG; 2825 hci_conn_hold(conn); 2826 2827 if (!conn->out && !hci_conn_ssp_enabled(conn) && 2828 !hci_find_link_key(hdev, &ev->bdaddr)) 2829 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2830 else 2831 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2832 } else 2833 conn->state = BT_CONNECTED; 2834 2835 hci_debugfs_create_conn(conn); 2836 hci_conn_add_sysfs(conn); 2837 2838 if (test_bit(HCI_AUTH, &hdev->flags)) 2839 set_bit(HCI_CONN_AUTH, &conn->flags); 2840 2841 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2842 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2843 2844 /* Get remote features */ 2845 if (conn->type == ACL_LINK) { 2846 struct hci_cp_read_remote_features cp; 2847 cp.handle = ev->handle; 2848 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 2849 sizeof(cp), &cp); 2850 2851 hci_req_update_scan(hdev); 2852 } 2853 2854 /* Set packet type for incoming connection */ 2855 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 2856 struct hci_cp_change_conn_ptype cp; 2857 cp.handle = ev->handle; 2858 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2859 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 2860 &cp); 2861 } 2862 } else { 2863 conn->state = BT_CLOSED; 2864 if (conn->type == ACL_LINK) 2865 mgmt_connect_failed(hdev, &conn->dst, conn->type, 2866 conn->dst_type, ev->status); 2867 } 2868 2869 if (conn->type == ACL_LINK) 2870 hci_sco_setup(conn, ev->status); 2871 2872 if (ev->status) { 2873 hci_connect_cfm(conn, ev->status); 2874 hci_conn_del(conn); 2875 } else if (ev->link_type == SCO_LINK) { 2876 switch (conn->setting & SCO_AIRMODE_MASK) { 2877 case SCO_AIRMODE_CVSD: 2878 if (hdev->notify) 2879 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 2880 break; 2881 } 2882 2883 hci_connect_cfm(conn, ev->status); 2884 } 2885 2886 unlock: 2887 hci_dev_unlock(hdev); 2888 2889 hci_conn_check_pending(hdev); 2890 } 2891 2892 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 2893 { 2894 struct hci_cp_reject_conn_req cp; 2895 2896 bacpy(&cp.bdaddr, bdaddr); 2897 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 2898 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 2899 } 2900 2901 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2902 { 2903 struct hci_ev_conn_request *ev = (void *) skb->data; 2904 int mask = hdev->link_mode; 2905 struct inquiry_entry *ie; 2906 struct hci_conn *conn; 2907 __u8 flags = 0; 2908 2909 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2910 ev->link_type); 2911 2912 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2913 &flags); 2914 2915 if (!(mask & HCI_LM_ACCEPT)) { 2916 hci_reject_conn(hdev, &ev->bdaddr); 2917 return; 2918 } 2919 2920 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 2921 BDADDR_BREDR)) { 2922 hci_reject_conn(hdev, &ev->bdaddr); 2923 return; 2924 } 2925 2926 /* Require HCI_CONNECTABLE or an accept list entry to accept the 2927 * connection. These features are only touched through mgmt so 2928 * only do the checks if HCI_MGMT is set. 2929 */ 2930 if (hci_dev_test_flag(hdev, HCI_MGMT) && 2931 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 2932 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 2933 BDADDR_BREDR)) { 2934 hci_reject_conn(hdev, &ev->bdaddr); 2935 return; 2936 } 2937 2938 /* Connection accepted */ 2939 2940 hci_dev_lock(hdev); 2941 2942 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2943 if (ie) 2944 memcpy(ie->data.dev_class, ev->dev_class, 3); 2945 2946 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2947 &ev->bdaddr); 2948 if (!conn) { 2949 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2950 HCI_ROLE_SLAVE); 2951 if (!conn) { 2952 bt_dev_err(hdev, "no memory for new connection"); 2953 hci_dev_unlock(hdev); 2954 return; 2955 } 2956 } 2957 2958 memcpy(conn->dev_class, ev->dev_class, 3); 2959 2960 hci_dev_unlock(hdev); 2961 2962 if (ev->link_type == ACL_LINK || 2963 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2964 struct hci_cp_accept_conn_req cp; 2965 conn->state = BT_CONNECT; 2966 2967 bacpy(&cp.bdaddr, &ev->bdaddr); 2968 2969 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2970 cp.role = 0x00; /* Become central */ 2971 else 2972 cp.role = 0x01; /* Remain peripheral */ 2973 2974 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 2975 } else if (!(flags & HCI_PROTO_DEFER)) { 2976 struct hci_cp_accept_sync_conn_req cp; 2977 conn->state = BT_CONNECT; 2978 2979 bacpy(&cp.bdaddr, &ev->bdaddr); 2980 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2981 2982 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2983 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2984 cp.max_latency = cpu_to_le16(0xffff); 2985 cp.content_format = cpu_to_le16(hdev->voice_setting); 2986 cp.retrans_effort = 0xff; 2987 2988 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 2989 &cp); 2990 } else { 2991 conn->state = BT_CONNECT2; 2992 hci_connect_cfm(conn, 0); 2993 } 2994 } 2995 2996 static u8 hci_to_mgmt_reason(u8 err) 2997 { 2998 switch (err) { 2999 case HCI_ERROR_CONNECTION_TIMEOUT: 3000 return MGMT_DEV_DISCONN_TIMEOUT; 3001 case HCI_ERROR_REMOTE_USER_TERM: 3002 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3003 case HCI_ERROR_REMOTE_POWER_OFF: 3004 return MGMT_DEV_DISCONN_REMOTE; 3005 case HCI_ERROR_LOCAL_HOST_TERM: 3006 return MGMT_DEV_DISCONN_LOCAL_HOST; 3007 default: 3008 return MGMT_DEV_DISCONN_UNKNOWN; 3009 } 3010 } 3011 3012 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3013 { 3014 struct hci_ev_disconn_complete *ev = (void *) skb->data; 3015 u8 reason; 3016 struct hci_conn_params *params; 3017 struct hci_conn *conn; 3018 bool mgmt_connected; 3019 3020 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3021 3022 hci_dev_lock(hdev); 3023 3024 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3025 if (!conn) 3026 goto unlock; 3027 3028 if (ev->status) { 3029 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3030 conn->dst_type, ev->status); 3031 goto unlock; 3032 } 3033 3034 conn->state = BT_CLOSED; 3035 3036 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3037 3038 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3039 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3040 else 3041 reason = hci_to_mgmt_reason(ev->reason); 3042 3043 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3044 reason, mgmt_connected); 3045 3046 if (conn->type == ACL_LINK) { 3047 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3048 hci_remove_link_key(hdev, &conn->dst); 3049 3050 hci_req_update_scan(hdev); 3051 } 3052 3053 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3054 if (params) { 3055 switch (params->auto_connect) { 3056 case HCI_AUTO_CONN_LINK_LOSS: 3057 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3058 break; 3059 fallthrough; 3060 3061 case HCI_AUTO_CONN_DIRECT: 3062 case HCI_AUTO_CONN_ALWAYS: 3063 list_del_init(¶ms->action); 3064 list_add(¶ms->action, &hdev->pend_le_conns); 3065 hci_update_passive_scan(hdev); 3066 break; 3067 3068 default: 3069 break; 3070 } 3071 } 3072 3073 hci_disconn_cfm(conn, ev->reason); 3074 3075 /* Re-enable advertising if necessary, since it might 3076 * have been disabled by the connection. From the 3077 * HCI_LE_Set_Advertise_Enable command description in 3078 * the core specification (v4.0): 3079 * "The Controller shall continue advertising until the Host 3080 * issues an LE_Set_Advertise_Enable command with 3081 * Advertising_Enable set to 0x00 (Advertising is disabled) 3082 * or until a connection is created or until the Advertising 3083 * is timed out due to Directed Advertising." 3084 */ 3085 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3086 hdev->cur_adv_instance = conn->adv_instance; 3087 hci_enable_advertising(hdev); 3088 } 3089 3090 hci_conn_del(conn); 3091 3092 unlock: 3093 hci_dev_unlock(hdev); 3094 } 3095 3096 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3097 { 3098 struct hci_ev_auth_complete *ev = (void *) skb->data; 3099 struct hci_conn *conn; 3100 3101 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3102 3103 hci_dev_lock(hdev); 3104 3105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3106 if (!conn) 3107 goto unlock; 3108 3109 if (!ev->status) { 3110 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3111 3112 if (!hci_conn_ssp_enabled(conn) && 3113 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3114 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3115 } else { 3116 set_bit(HCI_CONN_AUTH, &conn->flags); 3117 conn->sec_level = conn->pending_sec_level; 3118 } 3119 } else { 3120 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3121 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3122 3123 mgmt_auth_failed(conn, ev->status); 3124 } 3125 3126 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3127 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3128 3129 if (conn->state == BT_CONFIG) { 3130 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3131 struct hci_cp_set_conn_encrypt cp; 3132 cp.handle = ev->handle; 3133 cp.encrypt = 0x01; 3134 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3135 &cp); 3136 } else { 3137 conn->state = BT_CONNECTED; 3138 hci_connect_cfm(conn, ev->status); 3139 hci_conn_drop(conn); 3140 } 3141 } else { 3142 hci_auth_cfm(conn, ev->status); 3143 3144 hci_conn_hold(conn); 3145 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3146 hci_conn_drop(conn); 3147 } 3148 3149 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3150 if (!ev->status) { 3151 struct hci_cp_set_conn_encrypt cp; 3152 cp.handle = ev->handle; 3153 cp.encrypt = 0x01; 3154 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3155 &cp); 3156 } else { 3157 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3158 hci_encrypt_cfm(conn, ev->status); 3159 } 3160 } 3161 3162 unlock: 3163 hci_dev_unlock(hdev); 3164 } 3165 3166 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 3167 { 3168 struct hci_ev_remote_name *ev = (void *) skb->data; 3169 struct hci_conn *conn; 3170 3171 BT_DBG("%s", hdev->name); 3172 3173 hci_conn_check_pending(hdev); 3174 3175 hci_dev_lock(hdev); 3176 3177 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3178 3179 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3180 goto check_auth; 3181 3182 if (ev->status == 0) 3183 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3184 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3185 else 3186 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3187 3188 check_auth: 3189 if (!conn) 3190 goto unlock; 3191 3192 if (!hci_outgoing_auth_needed(hdev, conn)) 3193 goto unlock; 3194 3195 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3196 struct hci_cp_auth_requested cp; 3197 3198 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3199 3200 cp.handle = __cpu_to_le16(conn->handle); 3201 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3202 } 3203 3204 unlock: 3205 hci_dev_unlock(hdev); 3206 } 3207 3208 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3209 u16 opcode, struct sk_buff *skb) 3210 { 3211 const struct hci_rp_read_enc_key_size *rp; 3212 struct hci_conn *conn; 3213 u16 handle; 3214 3215 BT_DBG("%s status 0x%02x", hdev->name, status); 3216 3217 if (!skb || skb->len < sizeof(*rp)) { 3218 bt_dev_err(hdev, "invalid read key size response"); 3219 return; 3220 } 3221 3222 rp = (void *)skb->data; 3223 handle = le16_to_cpu(rp->handle); 3224 3225 hci_dev_lock(hdev); 3226 3227 conn = hci_conn_hash_lookup_handle(hdev, handle); 3228 if (!conn) 3229 goto unlock; 3230 3231 /* While unexpected, the read_enc_key_size command may fail. The most 3232 * secure approach is to then assume the key size is 0 to force a 3233 * disconnection. 3234 */ 3235 if (rp->status) { 3236 bt_dev_err(hdev, "failed to read key size for handle %u", 3237 handle); 3238 conn->enc_key_size = 0; 3239 } else { 3240 conn->enc_key_size = rp->key_size; 3241 } 3242 3243 hci_encrypt_cfm(conn, 0); 3244 3245 unlock: 3246 hci_dev_unlock(hdev); 3247 } 3248 3249 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3250 { 3251 struct hci_ev_encrypt_change *ev = (void *) skb->data; 3252 struct hci_conn *conn; 3253 3254 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3255 3256 hci_dev_lock(hdev); 3257 3258 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3259 if (!conn) 3260 goto unlock; 3261 3262 if (!ev->status) { 3263 if (ev->encrypt) { 3264 /* Encryption implies authentication */ 3265 set_bit(HCI_CONN_AUTH, &conn->flags); 3266 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3267 conn->sec_level = conn->pending_sec_level; 3268 3269 /* P-256 authentication key implies FIPS */ 3270 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3271 set_bit(HCI_CONN_FIPS, &conn->flags); 3272 3273 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3274 conn->type == LE_LINK) 3275 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3276 } else { 3277 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3278 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3279 } 3280 } 3281 3282 /* We should disregard the current RPA and generate a new one 3283 * whenever the encryption procedure fails. 3284 */ 3285 if (ev->status && conn->type == LE_LINK) { 3286 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3287 hci_adv_instances_set_rpa_expired(hdev, true); 3288 } 3289 3290 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3291 3292 /* Check link security requirements are met */ 3293 if (!hci_conn_check_link_mode(conn)) 3294 ev->status = HCI_ERROR_AUTH_FAILURE; 3295 3296 if (ev->status && conn->state == BT_CONNECTED) { 3297 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3298 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3299 3300 /* Notify upper layers so they can cleanup before 3301 * disconnecting. 3302 */ 3303 hci_encrypt_cfm(conn, ev->status); 3304 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3305 hci_conn_drop(conn); 3306 goto unlock; 3307 } 3308 3309 /* Try reading the encryption key size for encrypted ACL links */ 3310 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3311 struct hci_cp_read_enc_key_size cp; 3312 struct hci_request req; 3313 3314 /* Only send HCI_Read_Encryption_Key_Size if the 3315 * controller really supports it. If it doesn't, assume 3316 * the default size (16). 3317 */ 3318 if (!(hdev->commands[20] & 0x10)) { 3319 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3320 goto notify; 3321 } 3322 3323 hci_req_init(&req, hdev); 3324 3325 cp.handle = cpu_to_le16(conn->handle); 3326 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3327 3328 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3329 bt_dev_err(hdev, "sending read key size failed"); 3330 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3331 goto notify; 3332 } 3333 3334 goto unlock; 3335 } 3336 3337 /* Set the default Authenticated Payload Timeout after 3338 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3339 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3340 * sent when the link is active and Encryption is enabled, the conn 3341 * type can be either LE or ACL and controller must support LMP Ping. 3342 * Ensure for AES-CCM encryption as well. 3343 */ 3344 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3345 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3346 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3347 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3348 struct hci_cp_write_auth_payload_to cp; 3349 3350 cp.handle = cpu_to_le16(conn->handle); 3351 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3352 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3353 sizeof(cp), &cp); 3354 } 3355 3356 notify: 3357 hci_encrypt_cfm(conn, ev->status); 3358 3359 unlock: 3360 hci_dev_unlock(hdev); 3361 } 3362 3363 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 3364 struct sk_buff *skb) 3365 { 3366 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 3367 struct hci_conn *conn; 3368 3369 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3370 3371 hci_dev_lock(hdev); 3372 3373 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3374 if (conn) { 3375 if (!ev->status) 3376 set_bit(HCI_CONN_SECURE, &conn->flags); 3377 3378 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3379 3380 hci_key_change_cfm(conn, ev->status); 3381 } 3382 3383 hci_dev_unlock(hdev); 3384 } 3385 3386 static void hci_remote_features_evt(struct hci_dev *hdev, 3387 struct sk_buff *skb) 3388 { 3389 struct hci_ev_remote_features *ev = (void *) skb->data; 3390 struct hci_conn *conn; 3391 3392 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3393 3394 hci_dev_lock(hdev); 3395 3396 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3397 if (!conn) 3398 goto unlock; 3399 3400 if (!ev->status) 3401 memcpy(conn->features[0], ev->features, 8); 3402 3403 if (conn->state != BT_CONFIG) 3404 goto unlock; 3405 3406 if (!ev->status && lmp_ext_feat_capable(hdev) && 3407 lmp_ext_feat_capable(conn)) { 3408 struct hci_cp_read_remote_ext_features cp; 3409 cp.handle = ev->handle; 3410 cp.page = 0x01; 3411 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3412 sizeof(cp), &cp); 3413 goto unlock; 3414 } 3415 3416 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3417 struct hci_cp_remote_name_req cp; 3418 memset(&cp, 0, sizeof(cp)); 3419 bacpy(&cp.bdaddr, &conn->dst); 3420 cp.pscan_rep_mode = 0x02; 3421 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3422 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3423 mgmt_device_connected(hdev, conn, NULL, 0); 3424 3425 if (!hci_outgoing_auth_needed(hdev, conn)) { 3426 conn->state = BT_CONNECTED; 3427 hci_connect_cfm(conn, ev->status); 3428 hci_conn_drop(conn); 3429 } 3430 3431 unlock: 3432 hci_dev_unlock(hdev); 3433 } 3434 3435 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3436 { 3437 cancel_delayed_work(&hdev->cmd_timer); 3438 3439 if (!test_bit(HCI_RESET, &hdev->flags)) { 3440 if (ncmd) { 3441 cancel_delayed_work(&hdev->ncmd_timer); 3442 atomic_set(&hdev->cmd_cnt, 1); 3443 } else { 3444 schedule_delayed_work(&hdev->ncmd_timer, 3445 HCI_NCMD_TIMEOUT); 3446 } 3447 } 3448 } 3449 3450 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, 3451 u16 *opcode, u8 *status, 3452 hci_req_complete_t *req_complete, 3453 hci_req_complete_skb_t *req_complete_skb) 3454 { 3455 struct hci_ev_cmd_complete *ev = (void *) skb->data; 3456 3457 *opcode = __le16_to_cpu(ev->opcode); 3458 *status = skb->data[sizeof(*ev)]; 3459 3460 skb_pull(skb, sizeof(*ev)); 3461 3462 switch (*opcode) { 3463 case HCI_OP_INQUIRY_CANCEL: 3464 hci_cc_inquiry_cancel(hdev, skb, status); 3465 break; 3466 3467 case HCI_OP_PERIODIC_INQ: 3468 hci_cc_periodic_inq(hdev, skb); 3469 break; 3470 3471 case HCI_OP_EXIT_PERIODIC_INQ: 3472 hci_cc_exit_periodic_inq(hdev, skb); 3473 break; 3474 3475 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 3476 hci_cc_remote_name_req_cancel(hdev, skb); 3477 break; 3478 3479 case HCI_OP_ROLE_DISCOVERY: 3480 hci_cc_role_discovery(hdev, skb); 3481 break; 3482 3483 case HCI_OP_READ_LINK_POLICY: 3484 hci_cc_read_link_policy(hdev, skb); 3485 break; 3486 3487 case HCI_OP_WRITE_LINK_POLICY: 3488 hci_cc_write_link_policy(hdev, skb); 3489 break; 3490 3491 case HCI_OP_READ_DEF_LINK_POLICY: 3492 hci_cc_read_def_link_policy(hdev, skb); 3493 break; 3494 3495 case HCI_OP_WRITE_DEF_LINK_POLICY: 3496 hci_cc_write_def_link_policy(hdev, skb); 3497 break; 3498 3499 case HCI_OP_RESET: 3500 hci_cc_reset(hdev, skb); 3501 break; 3502 3503 case HCI_OP_READ_STORED_LINK_KEY: 3504 hci_cc_read_stored_link_key(hdev, skb); 3505 break; 3506 3507 case HCI_OP_DELETE_STORED_LINK_KEY: 3508 hci_cc_delete_stored_link_key(hdev, skb); 3509 break; 3510 3511 case HCI_OP_WRITE_LOCAL_NAME: 3512 hci_cc_write_local_name(hdev, skb); 3513 break; 3514 3515 case HCI_OP_READ_LOCAL_NAME: 3516 hci_cc_read_local_name(hdev, skb); 3517 break; 3518 3519 case HCI_OP_WRITE_AUTH_ENABLE: 3520 hci_cc_write_auth_enable(hdev, skb); 3521 break; 3522 3523 case HCI_OP_WRITE_ENCRYPT_MODE: 3524 hci_cc_write_encrypt_mode(hdev, skb); 3525 break; 3526 3527 case HCI_OP_WRITE_SCAN_ENABLE: 3528 hci_cc_write_scan_enable(hdev, skb); 3529 break; 3530 3531 case HCI_OP_SET_EVENT_FLT: 3532 hci_cc_set_event_filter(hdev, skb); 3533 break; 3534 3535 case HCI_OP_READ_CLASS_OF_DEV: 3536 hci_cc_read_class_of_dev(hdev, skb); 3537 break; 3538 3539 case HCI_OP_WRITE_CLASS_OF_DEV: 3540 hci_cc_write_class_of_dev(hdev, skb); 3541 break; 3542 3543 case HCI_OP_READ_VOICE_SETTING: 3544 hci_cc_read_voice_setting(hdev, skb); 3545 break; 3546 3547 case HCI_OP_WRITE_VOICE_SETTING: 3548 hci_cc_write_voice_setting(hdev, skb); 3549 break; 3550 3551 case HCI_OP_READ_NUM_SUPPORTED_IAC: 3552 hci_cc_read_num_supported_iac(hdev, skb); 3553 break; 3554 3555 case HCI_OP_WRITE_SSP_MODE: 3556 hci_cc_write_ssp_mode(hdev, skb); 3557 break; 3558 3559 case HCI_OP_WRITE_SC_SUPPORT: 3560 hci_cc_write_sc_support(hdev, skb); 3561 break; 3562 3563 case HCI_OP_READ_AUTH_PAYLOAD_TO: 3564 hci_cc_read_auth_payload_timeout(hdev, skb); 3565 break; 3566 3567 case HCI_OP_WRITE_AUTH_PAYLOAD_TO: 3568 hci_cc_write_auth_payload_timeout(hdev, skb); 3569 break; 3570 3571 case HCI_OP_READ_LOCAL_VERSION: 3572 hci_cc_read_local_version(hdev, skb); 3573 break; 3574 3575 case HCI_OP_READ_LOCAL_COMMANDS: 3576 hci_cc_read_local_commands(hdev, skb); 3577 break; 3578 3579 case HCI_OP_READ_LOCAL_FEATURES: 3580 hci_cc_read_local_features(hdev, skb); 3581 break; 3582 3583 case HCI_OP_READ_LOCAL_EXT_FEATURES: 3584 hci_cc_read_local_ext_features(hdev, skb); 3585 break; 3586 3587 case HCI_OP_READ_BUFFER_SIZE: 3588 hci_cc_read_buffer_size(hdev, skb); 3589 break; 3590 3591 case HCI_OP_READ_BD_ADDR: 3592 hci_cc_read_bd_addr(hdev, skb); 3593 break; 3594 3595 case HCI_OP_READ_LOCAL_PAIRING_OPTS: 3596 hci_cc_read_local_pairing_opts(hdev, skb); 3597 break; 3598 3599 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 3600 hci_cc_read_page_scan_activity(hdev, skb); 3601 break; 3602 3603 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 3604 hci_cc_write_page_scan_activity(hdev, skb); 3605 break; 3606 3607 case HCI_OP_READ_PAGE_SCAN_TYPE: 3608 hci_cc_read_page_scan_type(hdev, skb); 3609 break; 3610 3611 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 3612 hci_cc_write_page_scan_type(hdev, skb); 3613 break; 3614 3615 case HCI_OP_READ_DATA_BLOCK_SIZE: 3616 hci_cc_read_data_block_size(hdev, skb); 3617 break; 3618 3619 case HCI_OP_READ_FLOW_CONTROL_MODE: 3620 hci_cc_read_flow_control_mode(hdev, skb); 3621 break; 3622 3623 case HCI_OP_READ_LOCAL_AMP_INFO: 3624 hci_cc_read_local_amp_info(hdev, skb); 3625 break; 3626 3627 case HCI_OP_READ_CLOCK: 3628 hci_cc_read_clock(hdev, skb); 3629 break; 3630 3631 case HCI_OP_READ_INQ_RSP_TX_POWER: 3632 hci_cc_read_inq_rsp_tx_power(hdev, skb); 3633 break; 3634 3635 case HCI_OP_READ_DEF_ERR_DATA_REPORTING: 3636 hci_cc_read_def_err_data_reporting(hdev, skb); 3637 break; 3638 3639 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING: 3640 hci_cc_write_def_err_data_reporting(hdev, skb); 3641 break; 3642 3643 case HCI_OP_PIN_CODE_REPLY: 3644 hci_cc_pin_code_reply(hdev, skb); 3645 break; 3646 3647 case HCI_OP_PIN_CODE_NEG_REPLY: 3648 hci_cc_pin_code_neg_reply(hdev, skb); 3649 break; 3650 3651 case HCI_OP_READ_LOCAL_OOB_DATA: 3652 hci_cc_read_local_oob_data(hdev, skb); 3653 break; 3654 3655 case HCI_OP_READ_LOCAL_OOB_EXT_DATA: 3656 hci_cc_read_local_oob_ext_data(hdev, skb); 3657 break; 3658 3659 case HCI_OP_LE_READ_BUFFER_SIZE: 3660 hci_cc_le_read_buffer_size(hdev, skb); 3661 break; 3662 3663 case HCI_OP_LE_READ_LOCAL_FEATURES: 3664 hci_cc_le_read_local_features(hdev, skb); 3665 break; 3666 3667 case HCI_OP_LE_READ_ADV_TX_POWER: 3668 hci_cc_le_read_adv_tx_power(hdev, skb); 3669 break; 3670 3671 case HCI_OP_USER_CONFIRM_REPLY: 3672 hci_cc_user_confirm_reply(hdev, skb); 3673 break; 3674 3675 case HCI_OP_USER_CONFIRM_NEG_REPLY: 3676 hci_cc_user_confirm_neg_reply(hdev, skb); 3677 break; 3678 3679 case HCI_OP_USER_PASSKEY_REPLY: 3680 hci_cc_user_passkey_reply(hdev, skb); 3681 break; 3682 3683 case HCI_OP_USER_PASSKEY_NEG_REPLY: 3684 hci_cc_user_passkey_neg_reply(hdev, skb); 3685 break; 3686 3687 case HCI_OP_LE_SET_RANDOM_ADDR: 3688 hci_cc_le_set_random_addr(hdev, skb); 3689 break; 3690 3691 case HCI_OP_LE_SET_ADV_ENABLE: 3692 hci_cc_le_set_adv_enable(hdev, skb); 3693 break; 3694 3695 case HCI_OP_LE_SET_SCAN_PARAM: 3696 hci_cc_le_set_scan_param(hdev, skb); 3697 break; 3698 3699 case HCI_OP_LE_SET_SCAN_ENABLE: 3700 hci_cc_le_set_scan_enable(hdev, skb); 3701 break; 3702 3703 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE: 3704 hci_cc_le_read_accept_list_size(hdev, skb); 3705 break; 3706 3707 case HCI_OP_LE_CLEAR_ACCEPT_LIST: 3708 hci_cc_le_clear_accept_list(hdev, skb); 3709 break; 3710 3711 case HCI_OP_LE_ADD_TO_ACCEPT_LIST: 3712 hci_cc_le_add_to_accept_list(hdev, skb); 3713 break; 3714 3715 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST: 3716 hci_cc_le_del_from_accept_list(hdev, skb); 3717 break; 3718 3719 case HCI_OP_LE_READ_SUPPORTED_STATES: 3720 hci_cc_le_read_supported_states(hdev, skb); 3721 break; 3722 3723 case HCI_OP_LE_READ_DEF_DATA_LEN: 3724 hci_cc_le_read_def_data_len(hdev, skb); 3725 break; 3726 3727 case HCI_OP_LE_WRITE_DEF_DATA_LEN: 3728 hci_cc_le_write_def_data_len(hdev, skb); 3729 break; 3730 3731 case HCI_OP_LE_ADD_TO_RESOLV_LIST: 3732 hci_cc_le_add_to_resolv_list(hdev, skb); 3733 break; 3734 3735 case HCI_OP_LE_DEL_FROM_RESOLV_LIST: 3736 hci_cc_le_del_from_resolv_list(hdev, skb); 3737 break; 3738 3739 case HCI_OP_LE_CLEAR_RESOLV_LIST: 3740 hci_cc_le_clear_resolv_list(hdev, skb); 3741 break; 3742 3743 case HCI_OP_LE_READ_RESOLV_LIST_SIZE: 3744 hci_cc_le_read_resolv_list_size(hdev, skb); 3745 break; 3746 3747 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE: 3748 hci_cc_le_set_addr_resolution_enable(hdev, skb); 3749 break; 3750 3751 case HCI_OP_LE_READ_MAX_DATA_LEN: 3752 hci_cc_le_read_max_data_len(hdev, skb); 3753 break; 3754 3755 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 3756 hci_cc_write_le_host_supported(hdev, skb); 3757 break; 3758 3759 case HCI_OP_LE_SET_ADV_PARAM: 3760 hci_cc_set_adv_param(hdev, skb); 3761 break; 3762 3763 case HCI_OP_READ_RSSI: 3764 hci_cc_read_rssi(hdev, skb); 3765 break; 3766 3767 case HCI_OP_READ_TX_POWER: 3768 hci_cc_read_tx_power(hdev, skb); 3769 break; 3770 3771 case HCI_OP_WRITE_SSP_DEBUG_MODE: 3772 hci_cc_write_ssp_debug_mode(hdev, skb); 3773 break; 3774 3775 case HCI_OP_LE_SET_EXT_SCAN_PARAMS: 3776 hci_cc_le_set_ext_scan_param(hdev, skb); 3777 break; 3778 3779 case HCI_OP_LE_SET_EXT_SCAN_ENABLE: 3780 hci_cc_le_set_ext_scan_enable(hdev, skb); 3781 break; 3782 3783 case HCI_OP_LE_SET_DEFAULT_PHY: 3784 hci_cc_le_set_default_phy(hdev, skb); 3785 break; 3786 3787 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS: 3788 hci_cc_le_read_num_adv_sets(hdev, skb); 3789 break; 3790 3791 case HCI_OP_LE_SET_EXT_ADV_PARAMS: 3792 hci_cc_set_ext_adv_param(hdev, skb); 3793 break; 3794 3795 case HCI_OP_LE_SET_EXT_ADV_ENABLE: 3796 hci_cc_le_set_ext_adv_enable(hdev, skb); 3797 break; 3798 3799 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR: 3800 hci_cc_le_set_adv_set_random_addr(hdev, skb); 3801 break; 3802 3803 case HCI_OP_LE_REMOVE_ADV_SET: 3804 hci_cc_le_remove_adv_set(hdev, skb); 3805 break; 3806 3807 case HCI_OP_LE_CLEAR_ADV_SETS: 3808 hci_cc_le_clear_adv_sets(hdev, skb); 3809 break; 3810 3811 case HCI_OP_LE_READ_TRANSMIT_POWER: 3812 hci_cc_le_read_transmit_power(hdev, skb); 3813 break; 3814 3815 default: 3816 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3817 break; 3818 } 3819 3820 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 3821 3822 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3823 req_complete_skb); 3824 3825 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3826 bt_dev_err(hdev, 3827 "unexpected event for opcode 0x%4.4x", *opcode); 3828 return; 3829 } 3830 3831 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3832 queue_work(hdev->workqueue, &hdev->cmd_work); 3833 } 3834 3835 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, 3836 u16 *opcode, u8 *status, 3837 hci_req_complete_t *req_complete, 3838 hci_req_complete_skb_t *req_complete_skb) 3839 { 3840 struct hci_ev_cmd_status *ev = (void *) skb->data; 3841 3842 skb_pull(skb, sizeof(*ev)); 3843 3844 *opcode = __le16_to_cpu(ev->opcode); 3845 *status = ev->status; 3846 3847 switch (*opcode) { 3848 case HCI_OP_INQUIRY: 3849 hci_cs_inquiry(hdev, ev->status); 3850 break; 3851 3852 case HCI_OP_CREATE_CONN: 3853 hci_cs_create_conn(hdev, ev->status); 3854 break; 3855 3856 case HCI_OP_DISCONNECT: 3857 hci_cs_disconnect(hdev, ev->status); 3858 break; 3859 3860 case HCI_OP_ADD_SCO: 3861 hci_cs_add_sco(hdev, ev->status); 3862 break; 3863 3864 case HCI_OP_AUTH_REQUESTED: 3865 hci_cs_auth_requested(hdev, ev->status); 3866 break; 3867 3868 case HCI_OP_SET_CONN_ENCRYPT: 3869 hci_cs_set_conn_encrypt(hdev, ev->status); 3870 break; 3871 3872 case HCI_OP_REMOTE_NAME_REQ: 3873 hci_cs_remote_name_req(hdev, ev->status); 3874 break; 3875 3876 case HCI_OP_READ_REMOTE_FEATURES: 3877 hci_cs_read_remote_features(hdev, ev->status); 3878 break; 3879 3880 case HCI_OP_READ_REMOTE_EXT_FEATURES: 3881 hci_cs_read_remote_ext_features(hdev, ev->status); 3882 break; 3883 3884 case HCI_OP_SETUP_SYNC_CONN: 3885 hci_cs_setup_sync_conn(hdev, ev->status); 3886 break; 3887 3888 case HCI_OP_ENHANCED_SETUP_SYNC_CONN: 3889 hci_cs_enhanced_setup_sync_conn(hdev, ev->status); 3890 break; 3891 3892 case HCI_OP_SNIFF_MODE: 3893 hci_cs_sniff_mode(hdev, ev->status); 3894 break; 3895 3896 case HCI_OP_EXIT_SNIFF_MODE: 3897 hci_cs_exit_sniff_mode(hdev, ev->status); 3898 break; 3899 3900 case HCI_OP_SWITCH_ROLE: 3901 hci_cs_switch_role(hdev, ev->status); 3902 break; 3903 3904 case HCI_OP_LE_CREATE_CONN: 3905 hci_cs_le_create_conn(hdev, ev->status); 3906 break; 3907 3908 case HCI_OP_LE_READ_REMOTE_FEATURES: 3909 hci_cs_le_read_remote_features(hdev, ev->status); 3910 break; 3911 3912 case HCI_OP_LE_START_ENC: 3913 hci_cs_le_start_enc(hdev, ev->status); 3914 break; 3915 3916 case HCI_OP_LE_EXT_CREATE_CONN: 3917 hci_cs_le_ext_create_conn(hdev, ev->status); 3918 break; 3919 3920 default: 3921 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3922 break; 3923 } 3924 3925 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 3926 3927 /* Indicate request completion if the command failed. Also, if 3928 * we're not waiting for a special event and we get a success 3929 * command status we should try to flag the request as completed 3930 * (since for this kind of commands there will not be a command 3931 * complete event). 3932 */ 3933 if (ev->status || 3934 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event)) 3935 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 3936 req_complete_skb); 3937 3938 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3939 bt_dev_err(hdev, 3940 "unexpected event for opcode 0x%4.4x", *opcode); 3941 return; 3942 } 3943 3944 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3945 queue_work(hdev->workqueue, &hdev->cmd_work); 3946 } 3947 3948 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) 3949 { 3950 struct hci_ev_hardware_error *ev = (void *) skb->data; 3951 3952 hdev->hw_error_code = ev->code; 3953 3954 queue_work(hdev->req_workqueue, &hdev->error_reset); 3955 } 3956 3957 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3958 { 3959 struct hci_ev_role_change *ev = (void *) skb->data; 3960 struct hci_conn *conn; 3961 3962 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3963 3964 hci_dev_lock(hdev); 3965 3966 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3967 if (conn) { 3968 if (!ev->status) 3969 conn->role = ev->role; 3970 3971 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 3972 3973 hci_role_switch_cfm(conn, ev->status, ev->role); 3974 } 3975 3976 hci_dev_unlock(hdev); 3977 } 3978 3979 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 3980 { 3981 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 3982 int i; 3983 3984 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 3985 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 3986 return; 3987 } 3988 3989 if (skb->len < sizeof(*ev) || 3990 skb->len < struct_size(ev, handles, ev->num_hndl)) { 3991 BT_DBG("%s bad parameters", hdev->name); 3992 return; 3993 } 3994 3995 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 3996 3997 for (i = 0; i < ev->num_hndl; i++) { 3998 struct hci_comp_pkts_info *info = &ev->handles[i]; 3999 struct hci_conn *conn; 4000 __u16 handle, count; 4001 4002 handle = __le16_to_cpu(info->handle); 4003 count = __le16_to_cpu(info->count); 4004 4005 conn = hci_conn_hash_lookup_handle(hdev, handle); 4006 if (!conn) 4007 continue; 4008 4009 conn->sent -= count; 4010 4011 switch (conn->type) { 4012 case ACL_LINK: 4013 hdev->acl_cnt += count; 4014 if (hdev->acl_cnt > hdev->acl_pkts) 4015 hdev->acl_cnt = hdev->acl_pkts; 4016 break; 4017 4018 case LE_LINK: 4019 if (hdev->le_pkts) { 4020 hdev->le_cnt += count; 4021 if (hdev->le_cnt > hdev->le_pkts) 4022 hdev->le_cnt = hdev->le_pkts; 4023 } else { 4024 hdev->acl_cnt += count; 4025 if (hdev->acl_cnt > hdev->acl_pkts) 4026 hdev->acl_cnt = hdev->acl_pkts; 4027 } 4028 break; 4029 4030 case SCO_LINK: 4031 hdev->sco_cnt += count; 4032 if (hdev->sco_cnt > hdev->sco_pkts) 4033 hdev->sco_cnt = hdev->sco_pkts; 4034 break; 4035 4036 default: 4037 bt_dev_err(hdev, "unknown type %d conn %p", 4038 conn->type, conn); 4039 break; 4040 } 4041 } 4042 4043 queue_work(hdev->workqueue, &hdev->tx_work); 4044 } 4045 4046 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4047 __u16 handle) 4048 { 4049 struct hci_chan *chan; 4050 4051 switch (hdev->dev_type) { 4052 case HCI_PRIMARY: 4053 return hci_conn_hash_lookup_handle(hdev, handle); 4054 case HCI_AMP: 4055 chan = hci_chan_lookup_handle(hdev, handle); 4056 if (chan) 4057 return chan->conn; 4058 break; 4059 default: 4060 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4061 break; 4062 } 4063 4064 return NULL; 4065 } 4066 4067 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 4068 { 4069 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 4070 int i; 4071 4072 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4073 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4074 return; 4075 } 4076 4077 if (skb->len < sizeof(*ev) || 4078 skb->len < struct_size(ev, handles, ev->num_hndl)) { 4079 BT_DBG("%s bad parameters", hdev->name); 4080 return; 4081 } 4082 4083 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 4084 ev->num_hndl); 4085 4086 for (i = 0; i < ev->num_hndl; i++) { 4087 struct hci_comp_blocks_info *info = &ev->handles[i]; 4088 struct hci_conn *conn = NULL; 4089 __u16 handle, block_count; 4090 4091 handle = __le16_to_cpu(info->handle); 4092 block_count = __le16_to_cpu(info->blocks); 4093 4094 conn = __hci_conn_lookup_handle(hdev, handle); 4095 if (!conn) 4096 continue; 4097 4098 conn->sent -= block_count; 4099 4100 switch (conn->type) { 4101 case ACL_LINK: 4102 case AMP_LINK: 4103 hdev->block_cnt += block_count; 4104 if (hdev->block_cnt > hdev->num_blocks) 4105 hdev->block_cnt = hdev->num_blocks; 4106 break; 4107 4108 default: 4109 bt_dev_err(hdev, "unknown type %d conn %p", 4110 conn->type, conn); 4111 break; 4112 } 4113 } 4114 4115 queue_work(hdev->workqueue, &hdev->tx_work); 4116 } 4117 4118 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 4119 { 4120 struct hci_ev_mode_change *ev = (void *) skb->data; 4121 struct hci_conn *conn; 4122 4123 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4124 4125 hci_dev_lock(hdev); 4126 4127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4128 if (conn) { 4129 conn->mode = ev->mode; 4130 4131 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4132 &conn->flags)) { 4133 if (conn->mode == HCI_CM_ACTIVE) 4134 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4135 else 4136 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4137 } 4138 4139 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4140 hci_sco_setup(conn, ev->status); 4141 } 4142 4143 hci_dev_unlock(hdev); 4144 } 4145 4146 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4147 { 4148 struct hci_ev_pin_code_req *ev = (void *) skb->data; 4149 struct hci_conn *conn; 4150 4151 BT_DBG("%s", hdev->name); 4152 4153 hci_dev_lock(hdev); 4154 4155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4156 if (!conn) 4157 goto unlock; 4158 4159 if (conn->state == BT_CONNECTED) { 4160 hci_conn_hold(conn); 4161 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4162 hci_conn_drop(conn); 4163 } 4164 4165 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4166 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4167 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4168 sizeof(ev->bdaddr), &ev->bdaddr); 4169 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4170 u8 secure; 4171 4172 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4173 secure = 1; 4174 else 4175 secure = 0; 4176 4177 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4178 } 4179 4180 unlock: 4181 hci_dev_unlock(hdev); 4182 } 4183 4184 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4185 { 4186 if (key_type == HCI_LK_CHANGED_COMBINATION) 4187 return; 4188 4189 conn->pin_length = pin_len; 4190 conn->key_type = key_type; 4191 4192 switch (key_type) { 4193 case HCI_LK_LOCAL_UNIT: 4194 case HCI_LK_REMOTE_UNIT: 4195 case HCI_LK_DEBUG_COMBINATION: 4196 return; 4197 case HCI_LK_COMBINATION: 4198 if (pin_len == 16) 4199 conn->pending_sec_level = BT_SECURITY_HIGH; 4200 else 4201 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4202 break; 4203 case HCI_LK_UNAUTH_COMBINATION_P192: 4204 case HCI_LK_UNAUTH_COMBINATION_P256: 4205 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4206 break; 4207 case HCI_LK_AUTH_COMBINATION_P192: 4208 conn->pending_sec_level = BT_SECURITY_HIGH; 4209 break; 4210 case HCI_LK_AUTH_COMBINATION_P256: 4211 conn->pending_sec_level = BT_SECURITY_FIPS; 4212 break; 4213 } 4214 } 4215 4216 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4217 { 4218 struct hci_ev_link_key_req *ev = (void *) skb->data; 4219 struct hci_cp_link_key_reply cp; 4220 struct hci_conn *conn; 4221 struct link_key *key; 4222 4223 BT_DBG("%s", hdev->name); 4224 4225 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4226 return; 4227 4228 hci_dev_lock(hdev); 4229 4230 key = hci_find_link_key(hdev, &ev->bdaddr); 4231 if (!key) { 4232 BT_DBG("%s link key not found for %pMR", hdev->name, 4233 &ev->bdaddr); 4234 goto not_found; 4235 } 4236 4237 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 4238 &ev->bdaddr); 4239 4240 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4241 if (conn) { 4242 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4243 4244 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4245 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4246 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4247 BT_DBG("%s ignoring unauthenticated key", hdev->name); 4248 goto not_found; 4249 } 4250 4251 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4252 (conn->pending_sec_level == BT_SECURITY_HIGH || 4253 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4254 BT_DBG("%s ignoring key unauthenticated for high security", 4255 hdev->name); 4256 goto not_found; 4257 } 4258 4259 conn_set_key(conn, key->type, key->pin_len); 4260 } 4261 4262 bacpy(&cp.bdaddr, &ev->bdaddr); 4263 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4264 4265 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4266 4267 hci_dev_unlock(hdev); 4268 4269 return; 4270 4271 not_found: 4272 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4273 hci_dev_unlock(hdev); 4274 } 4275 4276 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 4277 { 4278 struct hci_ev_link_key_notify *ev = (void *) skb->data; 4279 struct hci_conn *conn; 4280 struct link_key *key; 4281 bool persistent; 4282 u8 pin_len = 0; 4283 4284 BT_DBG("%s", hdev->name); 4285 4286 hci_dev_lock(hdev); 4287 4288 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4289 if (!conn) 4290 goto unlock; 4291 4292 hci_conn_hold(conn); 4293 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4294 hci_conn_drop(conn); 4295 4296 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4297 conn_set_key(conn, ev->key_type, conn->pin_length); 4298 4299 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4300 goto unlock; 4301 4302 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4303 ev->key_type, pin_len, &persistent); 4304 if (!key) 4305 goto unlock; 4306 4307 /* Update connection information since adding the key will have 4308 * fixed up the type in the case of changed combination keys. 4309 */ 4310 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4311 conn_set_key(conn, key->type, key->pin_len); 4312 4313 mgmt_new_link_key(hdev, key, persistent); 4314 4315 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4316 * is set. If it's not set simply remove the key from the kernel 4317 * list (we've still notified user space about it but with 4318 * store_hint being 0). 4319 */ 4320 if (key->type == HCI_LK_DEBUG_COMBINATION && 4321 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4322 list_del_rcu(&key->list); 4323 kfree_rcu(key, rcu); 4324 goto unlock; 4325 } 4326 4327 if (persistent) 4328 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4329 else 4330 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4331 4332 unlock: 4333 hci_dev_unlock(hdev); 4334 } 4335 4336 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 4337 { 4338 struct hci_ev_clock_offset *ev = (void *) skb->data; 4339 struct hci_conn *conn; 4340 4341 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4342 4343 hci_dev_lock(hdev); 4344 4345 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4346 if (conn && !ev->status) { 4347 struct inquiry_entry *ie; 4348 4349 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4350 if (ie) { 4351 ie->data.clock_offset = ev->clock_offset; 4352 ie->timestamp = jiffies; 4353 } 4354 } 4355 4356 hci_dev_unlock(hdev); 4357 } 4358 4359 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 4360 { 4361 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 4362 struct hci_conn *conn; 4363 4364 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4365 4366 hci_dev_lock(hdev); 4367 4368 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4369 if (conn && !ev->status) 4370 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4371 4372 hci_dev_unlock(hdev); 4373 } 4374 4375 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 4376 { 4377 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 4378 struct inquiry_entry *ie; 4379 4380 BT_DBG("%s", hdev->name); 4381 4382 hci_dev_lock(hdev); 4383 4384 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4385 if (ie) { 4386 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4387 ie->timestamp = jiffies; 4388 } 4389 4390 hci_dev_unlock(hdev); 4391 } 4392 4393 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 4394 struct sk_buff *skb) 4395 { 4396 struct inquiry_data data; 4397 int num_rsp = *((__u8 *) skb->data); 4398 4399 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4400 4401 if (!num_rsp) 4402 return; 4403 4404 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4405 return; 4406 4407 hci_dev_lock(hdev); 4408 4409 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 4410 struct inquiry_info_with_rssi_and_pscan_mode *info; 4411 info = (void *) (skb->data + 1); 4412 4413 if (skb->len < num_rsp * sizeof(*info) + 1) 4414 goto unlock; 4415 4416 for (; num_rsp; num_rsp--, info++) { 4417 u32 flags; 4418 4419 bacpy(&data.bdaddr, &info->bdaddr); 4420 data.pscan_rep_mode = info->pscan_rep_mode; 4421 data.pscan_period_mode = info->pscan_period_mode; 4422 data.pscan_mode = info->pscan_mode; 4423 memcpy(data.dev_class, info->dev_class, 3); 4424 data.clock_offset = info->clock_offset; 4425 data.rssi = info->rssi; 4426 data.ssp_mode = 0x00; 4427 4428 flags = hci_inquiry_cache_update(hdev, &data, false); 4429 4430 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4431 info->dev_class, info->rssi, 4432 flags, NULL, 0, NULL, 0); 4433 } 4434 } else { 4435 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 4436 4437 if (skb->len < num_rsp * sizeof(*info) + 1) 4438 goto unlock; 4439 4440 for (; num_rsp; num_rsp--, info++) { 4441 u32 flags; 4442 4443 bacpy(&data.bdaddr, &info->bdaddr); 4444 data.pscan_rep_mode = info->pscan_rep_mode; 4445 data.pscan_period_mode = info->pscan_period_mode; 4446 data.pscan_mode = 0x00; 4447 memcpy(data.dev_class, info->dev_class, 3); 4448 data.clock_offset = info->clock_offset; 4449 data.rssi = info->rssi; 4450 data.ssp_mode = 0x00; 4451 4452 flags = hci_inquiry_cache_update(hdev, &data, false); 4453 4454 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4455 info->dev_class, info->rssi, 4456 flags, NULL, 0, NULL, 0); 4457 } 4458 } 4459 4460 unlock: 4461 hci_dev_unlock(hdev); 4462 } 4463 4464 static void hci_remote_ext_features_evt(struct hci_dev *hdev, 4465 struct sk_buff *skb) 4466 { 4467 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 4468 struct hci_conn *conn; 4469 4470 BT_DBG("%s", hdev->name); 4471 4472 hci_dev_lock(hdev); 4473 4474 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4475 if (!conn) 4476 goto unlock; 4477 4478 if (ev->page < HCI_MAX_PAGES) 4479 memcpy(conn->features[ev->page], ev->features, 8); 4480 4481 if (!ev->status && ev->page == 0x01) { 4482 struct inquiry_entry *ie; 4483 4484 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4485 if (ie) 4486 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4487 4488 if (ev->features[0] & LMP_HOST_SSP) { 4489 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4490 } else { 4491 /* It is mandatory by the Bluetooth specification that 4492 * Extended Inquiry Results are only used when Secure 4493 * Simple Pairing is enabled, but some devices violate 4494 * this. 4495 * 4496 * To make these devices work, the internal SSP 4497 * enabled flag needs to be cleared if the remote host 4498 * features do not indicate SSP support */ 4499 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4500 } 4501 4502 if (ev->features[0] & LMP_HOST_SC) 4503 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4504 } 4505 4506 if (conn->state != BT_CONFIG) 4507 goto unlock; 4508 4509 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4510 struct hci_cp_remote_name_req cp; 4511 memset(&cp, 0, sizeof(cp)); 4512 bacpy(&cp.bdaddr, &conn->dst); 4513 cp.pscan_rep_mode = 0x02; 4514 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4515 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4516 mgmt_device_connected(hdev, conn, NULL, 0); 4517 4518 if (!hci_outgoing_auth_needed(hdev, conn)) { 4519 conn->state = BT_CONNECTED; 4520 hci_connect_cfm(conn, ev->status); 4521 hci_conn_drop(conn); 4522 } 4523 4524 unlock: 4525 hci_dev_unlock(hdev); 4526 } 4527 4528 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 4529 struct sk_buff *skb) 4530 { 4531 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 4532 struct hci_conn *conn; 4533 4534 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4535 4536 hci_dev_lock(hdev); 4537 4538 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4539 if (!conn) { 4540 if (ev->link_type == ESCO_LINK) 4541 goto unlock; 4542 4543 /* When the link type in the event indicates SCO connection 4544 * and lookup of the connection object fails, then check 4545 * if an eSCO connection object exists. 4546 * 4547 * The core limits the synchronous connections to either 4548 * SCO or eSCO. The eSCO connection is preferred and tried 4549 * to be setup first and until successfully established, 4550 * the link type will be hinted as eSCO. 4551 */ 4552 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4553 if (!conn) 4554 goto unlock; 4555 } 4556 4557 switch (ev->status) { 4558 case 0x00: 4559 /* The synchronous connection complete event should only be 4560 * sent once per new connection. Receiving a successful 4561 * complete event when the connection status is already 4562 * BT_CONNECTED means that the device is misbehaving and sent 4563 * multiple complete event packets for the same new connection. 4564 * 4565 * Registering the device more than once can corrupt kernel 4566 * memory, hence upon detecting this invalid event, we report 4567 * an error and ignore the packet. 4568 */ 4569 if (conn->state == BT_CONNECTED) { 4570 bt_dev_err(hdev, "Ignoring connect complete event for existing connection"); 4571 goto unlock; 4572 } 4573 4574 conn->handle = __le16_to_cpu(ev->handle); 4575 conn->state = BT_CONNECTED; 4576 conn->type = ev->link_type; 4577 4578 hci_debugfs_create_conn(conn); 4579 hci_conn_add_sysfs(conn); 4580 break; 4581 4582 case 0x10: /* Connection Accept Timeout */ 4583 case 0x0d: /* Connection Rejected due to Limited Resources */ 4584 case 0x11: /* Unsupported Feature or Parameter Value */ 4585 case 0x1c: /* SCO interval rejected */ 4586 case 0x1a: /* Unsupported Remote Feature */ 4587 case 0x1e: /* Invalid LMP Parameters */ 4588 case 0x1f: /* Unspecified error */ 4589 case 0x20: /* Unsupported LMP Parameter value */ 4590 if (conn->out) { 4591 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4592 (hdev->esco_type & EDR_ESCO_MASK); 4593 if (hci_setup_sync(conn, conn->link->handle)) 4594 goto unlock; 4595 } 4596 fallthrough; 4597 4598 default: 4599 conn->state = BT_CLOSED; 4600 break; 4601 } 4602 4603 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4604 /* Notify only in case of SCO over HCI transport data path which 4605 * is zero and non-zero value shall be non-HCI transport data path 4606 */ 4607 if (conn->codec.data_path == 0 && hdev->notify) { 4608 switch (ev->air_mode) { 4609 case 0x02: 4610 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4611 break; 4612 case 0x03: 4613 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4614 break; 4615 } 4616 } 4617 4618 hci_connect_cfm(conn, ev->status); 4619 if (ev->status) 4620 hci_conn_del(conn); 4621 4622 unlock: 4623 hci_dev_unlock(hdev); 4624 } 4625 4626 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 4627 { 4628 size_t parsed = 0; 4629 4630 while (parsed < eir_len) { 4631 u8 field_len = eir[0]; 4632 4633 if (field_len == 0) 4634 return parsed; 4635 4636 parsed += field_len + 1; 4637 eir += field_len + 1; 4638 } 4639 4640 return eir_len; 4641 } 4642 4643 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 4644 struct sk_buff *skb) 4645 { 4646 struct inquiry_data data; 4647 struct extended_inquiry_info *info = (void *) (skb->data + 1); 4648 int num_rsp = *((__u8 *) skb->data); 4649 size_t eir_len; 4650 4651 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4652 4653 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) 4654 return; 4655 4656 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4657 return; 4658 4659 hci_dev_lock(hdev); 4660 4661 for (; num_rsp; num_rsp--, info++) { 4662 u32 flags; 4663 bool name_known; 4664 4665 bacpy(&data.bdaddr, &info->bdaddr); 4666 data.pscan_rep_mode = info->pscan_rep_mode; 4667 data.pscan_period_mode = info->pscan_period_mode; 4668 data.pscan_mode = 0x00; 4669 memcpy(data.dev_class, info->dev_class, 3); 4670 data.clock_offset = info->clock_offset; 4671 data.rssi = info->rssi; 4672 data.ssp_mode = 0x01; 4673 4674 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4675 name_known = eir_get_data(info->data, 4676 sizeof(info->data), 4677 EIR_NAME_COMPLETE, NULL); 4678 else 4679 name_known = true; 4680 4681 flags = hci_inquiry_cache_update(hdev, &data, name_known); 4682 4683 eir_len = eir_get_length(info->data, sizeof(info->data)); 4684 4685 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4686 info->dev_class, info->rssi, 4687 flags, info->data, eir_len, NULL, 0); 4688 } 4689 4690 hci_dev_unlock(hdev); 4691 } 4692 4693 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 4694 struct sk_buff *skb) 4695 { 4696 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 4697 struct hci_conn *conn; 4698 4699 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 4700 __le16_to_cpu(ev->handle)); 4701 4702 hci_dev_lock(hdev); 4703 4704 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4705 if (!conn) 4706 goto unlock; 4707 4708 /* For BR/EDR the necessary steps are taken through the 4709 * auth_complete event. 4710 */ 4711 if (conn->type != LE_LINK) 4712 goto unlock; 4713 4714 if (!ev->status) 4715 conn->sec_level = conn->pending_sec_level; 4716 4717 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 4718 4719 if (ev->status && conn->state == BT_CONNECTED) { 4720 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4721 hci_conn_drop(conn); 4722 goto unlock; 4723 } 4724 4725 if (conn->state == BT_CONFIG) { 4726 if (!ev->status) 4727 conn->state = BT_CONNECTED; 4728 4729 hci_connect_cfm(conn, ev->status); 4730 hci_conn_drop(conn); 4731 } else { 4732 hci_auth_cfm(conn, ev->status); 4733 4734 hci_conn_hold(conn); 4735 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4736 hci_conn_drop(conn); 4737 } 4738 4739 unlock: 4740 hci_dev_unlock(hdev); 4741 } 4742 4743 static u8 hci_get_auth_req(struct hci_conn *conn) 4744 { 4745 /* If remote requests no-bonding follow that lead */ 4746 if (conn->remote_auth == HCI_AT_NO_BONDING || 4747 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4748 return conn->remote_auth | (conn->auth_type & 0x01); 4749 4750 /* If both remote and local have enough IO capabilities, require 4751 * MITM protection 4752 */ 4753 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4754 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4755 return conn->remote_auth | 0x01; 4756 4757 /* No MITM protection possible so ignore remote requirement */ 4758 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4759 } 4760 4761 static u8 bredr_oob_data_present(struct hci_conn *conn) 4762 { 4763 struct hci_dev *hdev = conn->hdev; 4764 struct oob_data *data; 4765 4766 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4767 if (!data) 4768 return 0x00; 4769 4770 if (bredr_sc_enabled(hdev)) { 4771 /* When Secure Connections is enabled, then just 4772 * return the present value stored with the OOB 4773 * data. The stored value contains the right present 4774 * information. However it can only be trusted when 4775 * not in Secure Connection Only mode. 4776 */ 4777 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4778 return data->present; 4779 4780 /* When Secure Connections Only mode is enabled, then 4781 * the P-256 values are required. If they are not 4782 * available, then do not declare that OOB data is 4783 * present. 4784 */ 4785 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4786 !memcmp(data->hash256, ZERO_KEY, 16)) 4787 return 0x00; 4788 4789 return 0x02; 4790 } 4791 4792 /* When Secure Connections is not enabled or actually 4793 * not supported by the hardware, then check that if 4794 * P-192 data values are present. 4795 */ 4796 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4797 !memcmp(data->hash192, ZERO_KEY, 16)) 4798 return 0x00; 4799 4800 return 0x01; 4801 } 4802 4803 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4804 { 4805 struct hci_ev_io_capa_request *ev = (void *) skb->data; 4806 struct hci_conn *conn; 4807 4808 BT_DBG("%s", hdev->name); 4809 4810 hci_dev_lock(hdev); 4811 4812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4813 if (!conn) 4814 goto unlock; 4815 4816 hci_conn_hold(conn); 4817 4818 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4819 goto unlock; 4820 4821 /* Allow pairing if we're pairable, the initiators of the 4822 * pairing or if the remote is not requesting bonding. 4823 */ 4824 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4825 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4826 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 4827 struct hci_cp_io_capability_reply cp; 4828 4829 bacpy(&cp.bdaddr, &ev->bdaddr); 4830 /* Change the IO capability from KeyboardDisplay 4831 * to DisplayYesNo as it is not supported by BT spec. */ 4832 cp.capability = (conn->io_capability == 0x04) ? 4833 HCI_IO_DISPLAY_YESNO : conn->io_capability; 4834 4835 /* If we are initiators, there is no remote information yet */ 4836 if (conn->remote_auth == 0xff) { 4837 /* Request MITM protection if our IO caps allow it 4838 * except for the no-bonding case. 4839 */ 4840 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4841 conn->auth_type != HCI_AT_NO_BONDING) 4842 conn->auth_type |= 0x01; 4843 } else { 4844 conn->auth_type = hci_get_auth_req(conn); 4845 } 4846 4847 /* If we're not bondable, force one of the non-bondable 4848 * authentication requirement values. 4849 */ 4850 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 4851 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 4852 4853 cp.authentication = conn->auth_type; 4854 cp.oob_data = bredr_oob_data_present(conn); 4855 4856 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 4857 sizeof(cp), &cp); 4858 } else { 4859 struct hci_cp_io_capability_neg_reply cp; 4860 4861 bacpy(&cp.bdaddr, &ev->bdaddr); 4862 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 4863 4864 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 4865 sizeof(cp), &cp); 4866 } 4867 4868 unlock: 4869 hci_dev_unlock(hdev); 4870 } 4871 4872 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 4873 { 4874 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 4875 struct hci_conn *conn; 4876 4877 BT_DBG("%s", hdev->name); 4878 4879 hci_dev_lock(hdev); 4880 4881 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4882 if (!conn) 4883 goto unlock; 4884 4885 conn->remote_cap = ev->capability; 4886 conn->remote_auth = ev->authentication; 4887 4888 unlock: 4889 hci_dev_unlock(hdev); 4890 } 4891 4892 static void hci_user_confirm_request_evt(struct hci_dev *hdev, 4893 struct sk_buff *skb) 4894 { 4895 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 4896 int loc_mitm, rem_mitm, confirm_hint = 0; 4897 struct hci_conn *conn; 4898 4899 BT_DBG("%s", hdev->name); 4900 4901 hci_dev_lock(hdev); 4902 4903 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4904 goto unlock; 4905 4906 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4907 if (!conn) 4908 goto unlock; 4909 4910 loc_mitm = (conn->auth_type & 0x01); 4911 rem_mitm = (conn->remote_auth & 0x01); 4912 4913 /* If we require MITM but the remote device can't provide that 4914 * (it has NoInputNoOutput) then reject the confirmation 4915 * request. We check the security level here since it doesn't 4916 * necessarily match conn->auth_type. 4917 */ 4918 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 4919 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 4920 BT_DBG("Rejecting request: remote device can't provide MITM"); 4921 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 4922 sizeof(ev->bdaddr), &ev->bdaddr); 4923 goto unlock; 4924 } 4925 4926 /* If no side requires MITM protection; auto-accept */ 4927 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 4928 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 4929 4930 /* If we're not the initiators request authorization to 4931 * proceed from user space (mgmt_user_confirm with 4932 * confirm_hint set to 1). The exception is if neither 4933 * side had MITM or if the local IO capability is 4934 * NoInputNoOutput, in which case we do auto-accept 4935 */ 4936 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 4937 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4938 (loc_mitm || rem_mitm)) { 4939 BT_DBG("Confirming auto-accept as acceptor"); 4940 confirm_hint = 1; 4941 goto confirm; 4942 } 4943 4944 /* If there already exists link key in local host, leave the 4945 * decision to user space since the remote device could be 4946 * legitimate or malicious. 4947 */ 4948 if (hci_find_link_key(hdev, &ev->bdaddr)) { 4949 bt_dev_dbg(hdev, "Local host already has link key"); 4950 confirm_hint = 1; 4951 goto confirm; 4952 } 4953 4954 BT_DBG("Auto-accept of user confirmation with %ums delay", 4955 hdev->auto_accept_delay); 4956 4957 if (hdev->auto_accept_delay > 0) { 4958 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 4959 queue_delayed_work(conn->hdev->workqueue, 4960 &conn->auto_accept_work, delay); 4961 goto unlock; 4962 } 4963 4964 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 4965 sizeof(ev->bdaddr), &ev->bdaddr); 4966 goto unlock; 4967 } 4968 4969 confirm: 4970 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 4971 le32_to_cpu(ev->passkey), confirm_hint); 4972 4973 unlock: 4974 hci_dev_unlock(hdev); 4975 } 4976 4977 static void hci_user_passkey_request_evt(struct hci_dev *hdev, 4978 struct sk_buff *skb) 4979 { 4980 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 4981 4982 BT_DBG("%s", hdev->name); 4983 4984 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4985 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 4986 } 4987 4988 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 4989 struct sk_buff *skb) 4990 { 4991 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 4992 struct hci_conn *conn; 4993 4994 BT_DBG("%s", hdev->name); 4995 4996 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4997 if (!conn) 4998 return; 4999 5000 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5001 conn->passkey_entered = 0; 5002 5003 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5004 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5005 conn->dst_type, conn->passkey_notify, 5006 conn->passkey_entered); 5007 } 5008 5009 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 5010 { 5011 struct hci_ev_keypress_notify *ev = (void *) skb->data; 5012 struct hci_conn *conn; 5013 5014 BT_DBG("%s", hdev->name); 5015 5016 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5017 if (!conn) 5018 return; 5019 5020 switch (ev->type) { 5021 case HCI_KEYPRESS_STARTED: 5022 conn->passkey_entered = 0; 5023 return; 5024 5025 case HCI_KEYPRESS_ENTERED: 5026 conn->passkey_entered++; 5027 break; 5028 5029 case HCI_KEYPRESS_ERASED: 5030 conn->passkey_entered--; 5031 break; 5032 5033 case HCI_KEYPRESS_CLEARED: 5034 conn->passkey_entered = 0; 5035 break; 5036 5037 case HCI_KEYPRESS_COMPLETED: 5038 return; 5039 } 5040 5041 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5042 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5043 conn->dst_type, conn->passkey_notify, 5044 conn->passkey_entered); 5045 } 5046 5047 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 5048 struct sk_buff *skb) 5049 { 5050 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 5051 struct hci_conn *conn; 5052 5053 BT_DBG("%s", hdev->name); 5054 5055 hci_dev_lock(hdev); 5056 5057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5058 if (!conn) 5059 goto unlock; 5060 5061 /* Reset the authentication requirement to unknown */ 5062 conn->remote_auth = 0xff; 5063 5064 /* To avoid duplicate auth_failed events to user space we check 5065 * the HCI_CONN_AUTH_PEND flag which will be set if we 5066 * initiated the authentication. A traditional auth_complete 5067 * event gets always produced as initiator and is also mapped to 5068 * the mgmt_auth_failed event */ 5069 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5070 mgmt_auth_failed(conn, ev->status); 5071 5072 hci_conn_drop(conn); 5073 5074 unlock: 5075 hci_dev_unlock(hdev); 5076 } 5077 5078 static void hci_remote_host_features_evt(struct hci_dev *hdev, 5079 struct sk_buff *skb) 5080 { 5081 struct hci_ev_remote_host_features *ev = (void *) skb->data; 5082 struct inquiry_entry *ie; 5083 struct hci_conn *conn; 5084 5085 BT_DBG("%s", hdev->name); 5086 5087 hci_dev_lock(hdev); 5088 5089 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5090 if (conn) 5091 memcpy(conn->features[1], ev->features, 8); 5092 5093 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5094 if (ie) 5095 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5096 5097 hci_dev_unlock(hdev); 5098 } 5099 5100 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 5101 struct sk_buff *skb) 5102 { 5103 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 5104 struct oob_data *data; 5105 5106 BT_DBG("%s", hdev->name); 5107 5108 hci_dev_lock(hdev); 5109 5110 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5111 goto unlock; 5112 5113 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5114 if (!data) { 5115 struct hci_cp_remote_oob_data_neg_reply cp; 5116 5117 bacpy(&cp.bdaddr, &ev->bdaddr); 5118 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5119 sizeof(cp), &cp); 5120 goto unlock; 5121 } 5122 5123 if (bredr_sc_enabled(hdev)) { 5124 struct hci_cp_remote_oob_ext_data_reply cp; 5125 5126 bacpy(&cp.bdaddr, &ev->bdaddr); 5127 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5128 memset(cp.hash192, 0, sizeof(cp.hash192)); 5129 memset(cp.rand192, 0, sizeof(cp.rand192)); 5130 } else { 5131 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5132 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5133 } 5134 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5135 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5136 5137 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5138 sizeof(cp), &cp); 5139 } else { 5140 struct hci_cp_remote_oob_data_reply cp; 5141 5142 bacpy(&cp.bdaddr, &ev->bdaddr); 5143 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5144 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5145 5146 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5147 sizeof(cp), &cp); 5148 } 5149 5150 unlock: 5151 hci_dev_unlock(hdev); 5152 } 5153 5154 #if IS_ENABLED(CONFIG_BT_HS) 5155 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 5156 { 5157 struct hci_ev_channel_selected *ev = (void *)skb->data; 5158 struct hci_conn *hcon; 5159 5160 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 5161 5162 skb_pull(skb, sizeof(*ev)); 5163 5164 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5165 if (!hcon) 5166 return; 5167 5168 amp_read_loc_assoc_final_data(hdev, hcon); 5169 } 5170 5171 static void hci_phy_link_complete_evt(struct hci_dev *hdev, 5172 struct sk_buff *skb) 5173 { 5174 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 5175 struct hci_conn *hcon, *bredr_hcon; 5176 5177 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 5178 ev->status); 5179 5180 hci_dev_lock(hdev); 5181 5182 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5183 if (!hcon) 5184 goto unlock; 5185 5186 if (!hcon->amp_mgr) 5187 goto unlock; 5188 5189 if (ev->status) { 5190 hci_conn_del(hcon); 5191 goto unlock; 5192 } 5193 5194 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5195 5196 hcon->state = BT_CONNECTED; 5197 bacpy(&hcon->dst, &bredr_hcon->dst); 5198 5199 hci_conn_hold(hcon); 5200 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5201 hci_conn_drop(hcon); 5202 5203 hci_debugfs_create_conn(hcon); 5204 hci_conn_add_sysfs(hcon); 5205 5206 amp_physical_cfm(bredr_hcon, hcon); 5207 5208 unlock: 5209 hci_dev_unlock(hdev); 5210 } 5211 5212 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 5213 { 5214 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 5215 struct hci_conn *hcon; 5216 struct hci_chan *hchan; 5217 struct amp_mgr *mgr; 5218 5219 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5220 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 5221 ev->status); 5222 5223 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5224 if (!hcon) 5225 return; 5226 5227 /* Create AMP hchan */ 5228 hchan = hci_chan_create(hcon); 5229 if (!hchan) 5230 return; 5231 5232 hchan->handle = le16_to_cpu(ev->handle); 5233 hchan->amp = true; 5234 5235 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5236 5237 mgr = hcon->amp_mgr; 5238 if (mgr && mgr->bredr_chan) { 5239 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5240 5241 l2cap_chan_lock(bredr_chan); 5242 5243 bredr_chan->conn->mtu = hdev->block_mtu; 5244 l2cap_logical_cfm(bredr_chan, hchan, 0); 5245 hci_conn_hold(hcon); 5246 5247 l2cap_chan_unlock(bredr_chan); 5248 } 5249 } 5250 5251 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 5252 struct sk_buff *skb) 5253 { 5254 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 5255 struct hci_chan *hchan; 5256 5257 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 5258 le16_to_cpu(ev->handle), ev->status); 5259 5260 if (ev->status) 5261 return; 5262 5263 hci_dev_lock(hdev); 5264 5265 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5266 if (!hchan || !hchan->amp) 5267 goto unlock; 5268 5269 amp_destroy_logical_link(hchan, ev->reason); 5270 5271 unlock: 5272 hci_dev_unlock(hdev); 5273 } 5274 5275 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 5276 struct sk_buff *skb) 5277 { 5278 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 5279 struct hci_conn *hcon; 5280 5281 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5282 5283 if (ev->status) 5284 return; 5285 5286 hci_dev_lock(hdev); 5287 5288 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5289 if (hcon) { 5290 hcon->state = BT_CLOSED; 5291 hci_conn_del(hcon); 5292 } 5293 5294 hci_dev_unlock(hdev); 5295 } 5296 #endif 5297 5298 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5299 u8 bdaddr_type, bdaddr_t *local_rpa) 5300 { 5301 if (conn->out) { 5302 conn->dst_type = bdaddr_type; 5303 conn->resp_addr_type = bdaddr_type; 5304 bacpy(&conn->resp_addr, bdaddr); 5305 5306 /* Check if the controller has set a Local RPA then it must be 5307 * used instead or hdev->rpa. 5308 */ 5309 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5310 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5311 bacpy(&conn->init_addr, local_rpa); 5312 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5313 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5314 bacpy(&conn->init_addr, &conn->hdev->rpa); 5315 } else { 5316 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5317 &conn->init_addr_type); 5318 } 5319 } else { 5320 conn->resp_addr_type = conn->hdev->adv_addr_type; 5321 /* Check if the controller has set a Local RPA then it must be 5322 * used instead or hdev->rpa. 5323 */ 5324 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5325 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5326 bacpy(&conn->resp_addr, local_rpa); 5327 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5328 /* In case of ext adv, resp_addr will be updated in 5329 * Adv Terminated event. 5330 */ 5331 if (!ext_adv_capable(conn->hdev)) 5332 bacpy(&conn->resp_addr, 5333 &conn->hdev->random_addr); 5334 } else { 5335 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5336 } 5337 5338 conn->init_addr_type = bdaddr_type; 5339 bacpy(&conn->init_addr, bdaddr); 5340 5341 /* For incoming connections, set the default minimum 5342 * and maximum connection interval. They will be used 5343 * to check if the parameters are in range and if not 5344 * trigger the connection update procedure. 5345 */ 5346 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5347 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5348 } 5349 } 5350 5351 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5352 bdaddr_t *bdaddr, u8 bdaddr_type, 5353 bdaddr_t *local_rpa, u8 role, u16 handle, 5354 u16 interval, u16 latency, 5355 u16 supervision_timeout) 5356 { 5357 struct hci_conn_params *params; 5358 struct hci_conn *conn; 5359 struct smp_irk *irk; 5360 u8 addr_type; 5361 5362 hci_dev_lock(hdev); 5363 5364 /* All controllers implicitly stop advertising in the event of a 5365 * connection, so ensure that the state bit is cleared. 5366 */ 5367 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5368 5369 conn = hci_lookup_le_connect(hdev); 5370 if (!conn) { 5371 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5372 if (!conn) { 5373 bt_dev_err(hdev, "no memory for new connection"); 5374 goto unlock; 5375 } 5376 5377 conn->dst_type = bdaddr_type; 5378 5379 /* If we didn't have a hci_conn object previously 5380 * but we're in central role this must be something 5381 * initiated using an accept list. Since accept list based 5382 * connections are not "first class citizens" we don't 5383 * have full tracking of them. Therefore, we go ahead 5384 * with a "best effort" approach of determining the 5385 * initiator address based on the HCI_PRIVACY flag. 5386 */ 5387 if (conn->out) { 5388 conn->resp_addr_type = bdaddr_type; 5389 bacpy(&conn->resp_addr, bdaddr); 5390 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5391 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5392 bacpy(&conn->init_addr, &hdev->rpa); 5393 } else { 5394 hci_copy_identity_address(hdev, 5395 &conn->init_addr, 5396 &conn->init_addr_type); 5397 } 5398 } 5399 } else { 5400 cancel_delayed_work(&conn->le_conn_timeout); 5401 } 5402 5403 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5404 5405 /* Lookup the identity address from the stored connection 5406 * address and address type. 5407 * 5408 * When establishing connections to an identity address, the 5409 * connection procedure will store the resolvable random 5410 * address first. Now if it can be converted back into the 5411 * identity address, start using the identity address from 5412 * now on. 5413 */ 5414 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5415 if (irk) { 5416 bacpy(&conn->dst, &irk->bdaddr); 5417 conn->dst_type = irk->addr_type; 5418 } 5419 5420 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5421 5422 if (status) { 5423 hci_le_conn_failed(conn, status); 5424 goto unlock; 5425 } 5426 5427 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5428 addr_type = BDADDR_LE_PUBLIC; 5429 else 5430 addr_type = BDADDR_LE_RANDOM; 5431 5432 /* Drop the connection if the device is blocked */ 5433 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5434 hci_conn_drop(conn); 5435 goto unlock; 5436 } 5437 5438 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5439 mgmt_device_connected(hdev, conn, NULL, 0); 5440 5441 conn->sec_level = BT_SECURITY_LOW; 5442 conn->handle = handle; 5443 conn->state = BT_CONFIG; 5444 5445 /* Store current advertising instance as connection advertising instance 5446 * when sotfware rotation is in use so it can be re-enabled when 5447 * disconnected. 5448 */ 5449 if (!ext_adv_capable(hdev)) 5450 conn->adv_instance = hdev->cur_adv_instance; 5451 5452 conn->le_conn_interval = interval; 5453 conn->le_conn_latency = latency; 5454 conn->le_supv_timeout = supervision_timeout; 5455 5456 hci_debugfs_create_conn(conn); 5457 hci_conn_add_sysfs(conn); 5458 5459 /* The remote features procedure is defined for central 5460 * role only. So only in case of an initiated connection 5461 * request the remote features. 5462 * 5463 * If the local controller supports peripheral-initiated features 5464 * exchange, then requesting the remote features in peripheral 5465 * role is possible. Otherwise just transition into the 5466 * connected state without requesting the remote features. 5467 */ 5468 if (conn->out || 5469 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5470 struct hci_cp_le_read_remote_features cp; 5471 5472 cp.handle = __cpu_to_le16(conn->handle); 5473 5474 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5475 sizeof(cp), &cp); 5476 5477 hci_conn_hold(conn); 5478 } else { 5479 conn->state = BT_CONNECTED; 5480 hci_connect_cfm(conn, status); 5481 } 5482 5483 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5484 conn->dst_type); 5485 if (params) { 5486 list_del_init(¶ms->action); 5487 if (params->conn) { 5488 hci_conn_drop(params->conn); 5489 hci_conn_put(params->conn); 5490 params->conn = NULL; 5491 } 5492 } 5493 5494 unlock: 5495 hci_update_passive_scan(hdev); 5496 hci_dev_unlock(hdev); 5497 } 5498 5499 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 5500 { 5501 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 5502 5503 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5504 5505 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5506 NULL, ev->role, le16_to_cpu(ev->handle), 5507 le16_to_cpu(ev->interval), 5508 le16_to_cpu(ev->latency), 5509 le16_to_cpu(ev->supervision_timeout)); 5510 } 5511 5512 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, 5513 struct sk_buff *skb) 5514 { 5515 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; 5516 5517 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5518 5519 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5520 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5521 le16_to_cpu(ev->interval), 5522 le16_to_cpu(ev->latency), 5523 le16_to_cpu(ev->supervision_timeout)); 5524 } 5525 5526 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) 5527 { 5528 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; 5529 struct hci_conn *conn; 5530 struct adv_info *adv, *n; 5531 5532 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5533 5534 adv = hci_find_adv_instance(hdev, ev->handle); 5535 5536 /* The Bluetooth Core 5.3 specification clearly states that this event 5537 * shall not be sent when the Host disables the advertising set. So in 5538 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5539 * 5540 * When the Host disables an advertising set, all cleanup is done via 5541 * its command callback and not needed to be duplicated here. 5542 */ 5543 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5544 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5545 return; 5546 } 5547 5548 if (ev->status) { 5549 if (!adv) 5550 return; 5551 5552 /* Remove advertising as it has been terminated */ 5553 hci_remove_adv_instance(hdev, ev->handle); 5554 mgmt_advertising_removed(NULL, hdev, ev->handle); 5555 5556 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 5557 if (adv->enabled) 5558 return; 5559 } 5560 5561 /* We are no longer advertising, clear HCI_LE_ADV */ 5562 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5563 return; 5564 } 5565 5566 if (adv) 5567 adv->enabled = false; 5568 5569 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5570 if (conn) { 5571 /* Store handle in the connection so the correct advertising 5572 * instance can be re-enabled when disconnected. 5573 */ 5574 conn->adv_instance = ev->handle; 5575 5576 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 5577 bacmp(&conn->resp_addr, BDADDR_ANY)) 5578 return; 5579 5580 if (!ev->handle) { 5581 bacpy(&conn->resp_addr, &hdev->random_addr); 5582 return; 5583 } 5584 5585 if (adv) 5586 bacpy(&conn->resp_addr, &adv->random_addr); 5587 } 5588 } 5589 5590 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, 5591 struct sk_buff *skb) 5592 { 5593 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; 5594 struct hci_conn *conn; 5595 5596 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5597 5598 if (ev->status) 5599 return; 5600 5601 hci_dev_lock(hdev); 5602 5603 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5604 if (conn) { 5605 conn->le_conn_interval = le16_to_cpu(ev->interval); 5606 conn->le_conn_latency = le16_to_cpu(ev->latency); 5607 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5608 } 5609 5610 hci_dev_unlock(hdev); 5611 } 5612 5613 /* This function requires the caller holds hdev->lock */ 5614 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5615 bdaddr_t *addr, 5616 u8 addr_type, bool addr_resolved, 5617 u8 adv_type, bdaddr_t *direct_rpa) 5618 { 5619 struct hci_conn *conn; 5620 struct hci_conn_params *params; 5621 5622 /* If the event is not connectable don't proceed further */ 5623 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5624 return NULL; 5625 5626 /* Ignore if the device is blocked or hdev is suspended */ 5627 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 5628 hdev->suspended) 5629 return NULL; 5630 5631 /* Most controller will fail if we try to create new connections 5632 * while we have an existing one in peripheral role. 5633 */ 5634 if (hdev->conn_hash.le_num_peripheral > 0 && 5635 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 5636 !(hdev->le_states[3] & 0x10))) 5637 return NULL; 5638 5639 /* If we're not connectable only connect devices that we have in 5640 * our pend_le_conns list. 5641 */ 5642 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5643 addr_type); 5644 if (!params) 5645 return NULL; 5646 5647 if (!params->explicit_connect) { 5648 switch (params->auto_connect) { 5649 case HCI_AUTO_CONN_DIRECT: 5650 /* Only devices advertising with ADV_DIRECT_IND are 5651 * triggering a connection attempt. This is allowing 5652 * incoming connections from peripheral devices. 5653 */ 5654 if (adv_type != LE_ADV_DIRECT_IND) 5655 return NULL; 5656 break; 5657 case HCI_AUTO_CONN_ALWAYS: 5658 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5659 * are triggering a connection attempt. This means 5660 * that incoming connections from peripheral device are 5661 * accepted and also outgoing connections to peripheral 5662 * devices are established when found. 5663 */ 5664 break; 5665 default: 5666 return NULL; 5667 } 5668 } 5669 5670 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5671 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5672 HCI_ROLE_MASTER, direct_rpa); 5673 if (!IS_ERR(conn)) { 5674 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5675 * by higher layer that tried to connect, if no then 5676 * store the pointer since we don't really have any 5677 * other owner of the object besides the params that 5678 * triggered it. This way we can abort the connection if 5679 * the parameters get removed and keep the reference 5680 * count consistent once the connection is established. 5681 */ 5682 5683 if (!params->explicit_connect) 5684 params->conn = hci_conn_get(conn); 5685 5686 return conn; 5687 } 5688 5689 switch (PTR_ERR(conn)) { 5690 case -EBUSY: 5691 /* If hci_connect() returns -EBUSY it means there is already 5692 * an LE connection attempt going on. Since controllers don't 5693 * support more than one connection attempt at the time, we 5694 * don't consider this an error case. 5695 */ 5696 break; 5697 default: 5698 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5699 return NULL; 5700 } 5701 5702 return NULL; 5703 } 5704 5705 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5706 u8 bdaddr_type, bdaddr_t *direct_addr, 5707 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 5708 bool ext_adv) 5709 { 5710 struct discovery_state *d = &hdev->discovery; 5711 struct smp_irk *irk; 5712 struct hci_conn *conn; 5713 bool match, bdaddr_resolved; 5714 u32 flags; 5715 u8 *ptr; 5716 5717 switch (type) { 5718 case LE_ADV_IND: 5719 case LE_ADV_DIRECT_IND: 5720 case LE_ADV_SCAN_IND: 5721 case LE_ADV_NONCONN_IND: 5722 case LE_ADV_SCAN_RSP: 5723 break; 5724 default: 5725 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 5726 "type: 0x%02x", type); 5727 return; 5728 } 5729 5730 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 5731 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 5732 return; 5733 } 5734 5735 /* Find the end of the data in case the report contains padded zero 5736 * bytes at the end causing an invalid length value. 5737 * 5738 * When data is NULL, len is 0 so there is no need for extra ptr 5739 * check as 'ptr < data + 0' is already false in such case. 5740 */ 5741 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 5742 if (ptr + 1 + *ptr > data + len) 5743 break; 5744 } 5745 5746 /* Adjust for actual length. This handles the case when remote 5747 * device is advertising with incorrect data length. 5748 */ 5749 len = ptr - data; 5750 5751 /* If the direct address is present, then this report is from 5752 * a LE Direct Advertising Report event. In that case it is 5753 * important to see if the address is matching the local 5754 * controller address. 5755 */ 5756 if (direct_addr) { 5757 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 5758 &bdaddr_resolved); 5759 5760 /* Only resolvable random addresses are valid for these 5761 * kind of reports and others can be ignored. 5762 */ 5763 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 5764 return; 5765 5766 /* If the controller is not using resolvable random 5767 * addresses, then this report can be ignored. 5768 */ 5769 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 5770 return; 5771 5772 /* If the local IRK of the controller does not match 5773 * with the resolvable random address provided, then 5774 * this report can be ignored. 5775 */ 5776 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 5777 return; 5778 } 5779 5780 /* Check if we need to convert to identity address */ 5781 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 5782 if (irk) { 5783 bdaddr = &irk->bdaddr; 5784 bdaddr_type = irk->addr_type; 5785 } 5786 5787 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 5788 5789 /* Check if we have been requested to connect to this device. 5790 * 5791 * direct_addr is set only for directed advertising reports (it is NULL 5792 * for advertising reports) and is already verified to be RPA above. 5793 */ 5794 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 5795 type, direct_addr); 5796 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 5797 /* Store report for later inclusion by 5798 * mgmt_device_connected 5799 */ 5800 memcpy(conn->le_adv_data, data, len); 5801 conn->le_adv_data_len = len; 5802 } 5803 5804 /* Passive scanning shouldn't trigger any device found events, 5805 * except for devices marked as CONN_REPORT for which we do send 5806 * device found events, or advertisement monitoring requested. 5807 */ 5808 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 5809 if (type == LE_ADV_DIRECT_IND) 5810 return; 5811 5812 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 5813 bdaddr, bdaddr_type) && 5814 idr_is_empty(&hdev->adv_monitors_idr)) 5815 return; 5816 5817 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 5818 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5819 else 5820 flags = 0; 5821 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5822 rssi, flags, data, len, NULL, 0); 5823 return; 5824 } 5825 5826 /* When receiving non-connectable or scannable undirected 5827 * advertising reports, this means that the remote device is 5828 * not connectable and then clearly indicate this in the 5829 * device found event. 5830 * 5831 * When receiving a scan response, then there is no way to 5832 * know if the remote device is connectable or not. However 5833 * since scan responses are merged with a previously seen 5834 * advertising report, the flags field from that report 5835 * will be used. 5836 * 5837 * In the really unlikely case that a controller get confused 5838 * and just sends a scan response event, then it is marked as 5839 * not connectable as well. 5840 */ 5841 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 5842 type == LE_ADV_SCAN_RSP) 5843 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5844 else 5845 flags = 0; 5846 5847 /* If there's nothing pending either store the data from this 5848 * event or send an immediate device found event if the data 5849 * should not be stored for later. 5850 */ 5851 if (!ext_adv && !has_pending_adv_report(hdev)) { 5852 /* If the report will trigger a SCAN_REQ store it for 5853 * later merging. 5854 */ 5855 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 5856 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5857 rssi, flags, data, len); 5858 return; 5859 } 5860 5861 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5862 rssi, flags, data, len, NULL, 0); 5863 return; 5864 } 5865 5866 /* Check if the pending report is for the same device as the new one */ 5867 match = (!bacmp(bdaddr, &d->last_adv_addr) && 5868 bdaddr_type == d->last_adv_addr_type); 5869 5870 /* If the pending data doesn't match this report or this isn't a 5871 * scan response (e.g. we got a duplicate ADV_IND) then force 5872 * sending of the pending data. 5873 */ 5874 if (type != LE_ADV_SCAN_RSP || !match) { 5875 /* Send out whatever is in the cache, but skip duplicates */ 5876 if (!match) 5877 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5878 d->last_adv_addr_type, NULL, 5879 d->last_adv_rssi, d->last_adv_flags, 5880 d->last_adv_data, 5881 d->last_adv_data_len, NULL, 0); 5882 5883 /* If the new report will trigger a SCAN_REQ store it for 5884 * later merging. 5885 */ 5886 if (!ext_adv && (type == LE_ADV_IND || 5887 type == LE_ADV_SCAN_IND)) { 5888 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5889 rssi, flags, data, len); 5890 return; 5891 } 5892 5893 /* The advertising reports cannot be merged, so clear 5894 * the pending report and send out a device found event. 5895 */ 5896 clear_pending_adv_report(hdev); 5897 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5898 rssi, flags, data, len, NULL, 0); 5899 return; 5900 } 5901 5902 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 5903 * the new event is a SCAN_RSP. We can therefore proceed with 5904 * sending a merged device found event. 5905 */ 5906 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5907 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 5908 d->last_adv_data, d->last_adv_data_len, data, len); 5909 clear_pending_adv_report(hdev); 5910 } 5911 5912 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5913 { 5914 u8 num_reports = skb->data[0]; 5915 void *ptr = &skb->data[1]; 5916 5917 hci_dev_lock(hdev); 5918 5919 while (num_reports--) { 5920 struct hci_ev_le_advertising_info *ev = ptr; 5921 s8 rssi; 5922 5923 if (ev->length <= HCI_MAX_AD_LENGTH && 5924 ev->data + ev->length <= skb_tail_pointer(skb)) { 5925 rssi = ev->data[ev->length]; 5926 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 5927 ev->bdaddr_type, NULL, 0, rssi, 5928 ev->data, ev->length, false); 5929 } else { 5930 bt_dev_err(hdev, "Dropping invalid advertising data"); 5931 } 5932 5933 ptr += sizeof(*ev) + ev->length + 1; 5934 5935 if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) { 5936 bt_dev_err(hdev, "Malicious advertising data. Stopping processing"); 5937 break; 5938 } 5939 } 5940 5941 hci_dev_unlock(hdev); 5942 } 5943 5944 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 5945 { 5946 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 5947 switch (evt_type) { 5948 case LE_LEGACY_ADV_IND: 5949 return LE_ADV_IND; 5950 case LE_LEGACY_ADV_DIRECT_IND: 5951 return LE_ADV_DIRECT_IND; 5952 case LE_LEGACY_ADV_SCAN_IND: 5953 return LE_ADV_SCAN_IND; 5954 case LE_LEGACY_NONCONN_IND: 5955 return LE_ADV_NONCONN_IND; 5956 case LE_LEGACY_SCAN_RSP_ADV: 5957 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 5958 return LE_ADV_SCAN_RSP; 5959 } 5960 5961 goto invalid; 5962 } 5963 5964 if (evt_type & LE_EXT_ADV_CONN_IND) { 5965 if (evt_type & LE_EXT_ADV_DIRECT_IND) 5966 return LE_ADV_DIRECT_IND; 5967 5968 return LE_ADV_IND; 5969 } 5970 5971 if (evt_type & LE_EXT_ADV_SCAN_RSP) 5972 return LE_ADV_SCAN_RSP; 5973 5974 if (evt_type & LE_EXT_ADV_SCAN_IND) 5975 return LE_ADV_SCAN_IND; 5976 5977 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 5978 evt_type & LE_EXT_ADV_DIRECT_IND) 5979 return LE_ADV_NONCONN_IND; 5980 5981 invalid: 5982 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 5983 evt_type); 5984 5985 return LE_ADV_INVALID; 5986 } 5987 5988 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5989 { 5990 u8 num_reports = skb->data[0]; 5991 void *ptr = &skb->data[1]; 5992 5993 hci_dev_lock(hdev); 5994 5995 while (num_reports--) { 5996 struct hci_ev_le_ext_adv_report *ev = ptr; 5997 u8 legacy_evt_type; 5998 u16 evt_type; 5999 6000 evt_type = __le16_to_cpu(ev->evt_type); 6001 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6002 if (legacy_evt_type != LE_ADV_INVALID) { 6003 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, 6004 ev->bdaddr_type, NULL, 0, ev->rssi, 6005 ev->data, ev->length, 6006 !(evt_type & LE_EXT_ADV_LEGACY_PDU)); 6007 } 6008 6009 ptr += sizeof(*ev) + ev->length; 6010 } 6011 6012 hci_dev_unlock(hdev); 6013 } 6014 6015 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, 6016 struct sk_buff *skb) 6017 { 6018 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data; 6019 struct hci_conn *conn; 6020 6021 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6022 6023 hci_dev_lock(hdev); 6024 6025 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6026 if (conn) { 6027 if (!ev->status) 6028 memcpy(conn->features[0], ev->features, 8); 6029 6030 if (conn->state == BT_CONFIG) { 6031 __u8 status; 6032 6033 /* If the local controller supports peripheral-initiated 6034 * features exchange, but the remote controller does 6035 * not, then it is possible that the error code 0x1a 6036 * for unsupported remote feature gets returned. 6037 * 6038 * In this specific case, allow the connection to 6039 * transition into connected state and mark it as 6040 * successful. 6041 */ 6042 if (!conn->out && ev->status == 0x1a && 6043 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6044 status = 0x00; 6045 else 6046 status = ev->status; 6047 6048 conn->state = BT_CONNECTED; 6049 hci_connect_cfm(conn, status); 6050 hci_conn_drop(conn); 6051 } 6052 } 6053 6054 hci_dev_unlock(hdev); 6055 } 6056 6057 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 6058 { 6059 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 6060 struct hci_cp_le_ltk_reply cp; 6061 struct hci_cp_le_ltk_neg_reply neg; 6062 struct hci_conn *conn; 6063 struct smp_ltk *ltk; 6064 6065 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 6066 6067 hci_dev_lock(hdev); 6068 6069 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6070 if (conn == NULL) 6071 goto not_found; 6072 6073 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6074 if (!ltk) 6075 goto not_found; 6076 6077 if (smp_ltk_is_sc(ltk)) { 6078 /* With SC both EDiv and Rand are set to zero */ 6079 if (ev->ediv || ev->rand) 6080 goto not_found; 6081 } else { 6082 /* For non-SC keys check that EDiv and Rand match */ 6083 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6084 goto not_found; 6085 } 6086 6087 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6088 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6089 cp.handle = cpu_to_le16(conn->handle); 6090 6091 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6092 6093 conn->enc_key_size = ltk->enc_size; 6094 6095 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6096 6097 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6098 * temporary key used to encrypt a connection following 6099 * pairing. It is used during the Encrypted Session Setup to 6100 * distribute the keys. Later, security can be re-established 6101 * using a distributed LTK. 6102 */ 6103 if (ltk->type == SMP_STK) { 6104 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6105 list_del_rcu(<k->list); 6106 kfree_rcu(ltk, rcu); 6107 } else { 6108 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6109 } 6110 6111 hci_dev_unlock(hdev); 6112 6113 return; 6114 6115 not_found: 6116 neg.handle = ev->handle; 6117 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6118 hci_dev_unlock(hdev); 6119 } 6120 6121 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6122 u8 reason) 6123 { 6124 struct hci_cp_le_conn_param_req_neg_reply cp; 6125 6126 cp.handle = cpu_to_le16(handle); 6127 cp.reason = reason; 6128 6129 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6130 &cp); 6131 } 6132 6133 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, 6134 struct sk_buff *skb) 6135 { 6136 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data; 6137 struct hci_cp_le_conn_param_req_reply cp; 6138 struct hci_conn *hcon; 6139 u16 handle, min, max, latency, timeout; 6140 6141 handle = le16_to_cpu(ev->handle); 6142 min = le16_to_cpu(ev->interval_min); 6143 max = le16_to_cpu(ev->interval_max); 6144 latency = le16_to_cpu(ev->latency); 6145 timeout = le16_to_cpu(ev->timeout); 6146 6147 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6148 if (!hcon || hcon->state != BT_CONNECTED) 6149 return send_conn_param_neg_reply(hdev, handle, 6150 HCI_ERROR_UNKNOWN_CONN_ID); 6151 6152 if (hci_check_conn_params(min, max, latency, timeout)) 6153 return send_conn_param_neg_reply(hdev, handle, 6154 HCI_ERROR_INVALID_LL_PARAMS); 6155 6156 if (hcon->role == HCI_ROLE_MASTER) { 6157 struct hci_conn_params *params; 6158 u8 store_hint; 6159 6160 hci_dev_lock(hdev); 6161 6162 params = hci_conn_params_lookup(hdev, &hcon->dst, 6163 hcon->dst_type); 6164 if (params) { 6165 params->conn_min_interval = min; 6166 params->conn_max_interval = max; 6167 params->conn_latency = latency; 6168 params->supervision_timeout = timeout; 6169 store_hint = 0x01; 6170 } else { 6171 store_hint = 0x00; 6172 } 6173 6174 hci_dev_unlock(hdev); 6175 6176 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6177 store_hint, min, max, latency, timeout); 6178 } 6179 6180 cp.handle = ev->handle; 6181 cp.interval_min = ev->interval_min; 6182 cp.interval_max = ev->interval_max; 6183 cp.latency = ev->latency; 6184 cp.timeout = ev->timeout; 6185 cp.min_ce_len = 0; 6186 cp.max_ce_len = 0; 6187 6188 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6189 } 6190 6191 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, 6192 struct sk_buff *skb) 6193 { 6194 u8 num_reports = skb->data[0]; 6195 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1]; 6196 6197 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1) 6198 return; 6199 6200 hci_dev_lock(hdev); 6201 6202 for (; num_reports; num_reports--, ev++) 6203 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 6204 ev->bdaddr_type, &ev->direct_addr, 6205 ev->direct_addr_type, ev->rssi, NULL, 0, 6206 false); 6207 6208 hci_dev_unlock(hdev); 6209 } 6210 6211 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb) 6212 { 6213 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data; 6214 struct hci_conn *conn; 6215 6216 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6217 6218 if (ev->status) 6219 return; 6220 6221 hci_dev_lock(hdev); 6222 6223 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6224 if (!conn) 6225 goto unlock; 6226 6227 conn->le_tx_phy = ev->tx_phy; 6228 conn->le_rx_phy = ev->rx_phy; 6229 6230 unlock: 6231 hci_dev_unlock(hdev); 6232 } 6233 6234 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 6235 { 6236 struct hci_ev_le_meta *le_ev = (void *) skb->data; 6237 6238 skb_pull(skb, sizeof(*le_ev)); 6239 6240 switch (le_ev->subevent) { 6241 case HCI_EV_LE_CONN_COMPLETE: 6242 hci_le_conn_complete_evt(hdev, skb); 6243 break; 6244 6245 case HCI_EV_LE_CONN_UPDATE_COMPLETE: 6246 hci_le_conn_update_complete_evt(hdev, skb); 6247 break; 6248 6249 case HCI_EV_LE_ADVERTISING_REPORT: 6250 hci_le_adv_report_evt(hdev, skb); 6251 break; 6252 6253 case HCI_EV_LE_REMOTE_FEAT_COMPLETE: 6254 hci_le_remote_feat_complete_evt(hdev, skb); 6255 break; 6256 6257 case HCI_EV_LE_LTK_REQ: 6258 hci_le_ltk_request_evt(hdev, skb); 6259 break; 6260 6261 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ: 6262 hci_le_remote_conn_param_req_evt(hdev, skb); 6263 break; 6264 6265 case HCI_EV_LE_DIRECT_ADV_REPORT: 6266 hci_le_direct_adv_report_evt(hdev, skb); 6267 break; 6268 6269 case HCI_EV_LE_PHY_UPDATE_COMPLETE: 6270 hci_le_phy_update_evt(hdev, skb); 6271 break; 6272 6273 case HCI_EV_LE_EXT_ADV_REPORT: 6274 hci_le_ext_adv_report_evt(hdev, skb); 6275 break; 6276 6277 case HCI_EV_LE_ENHANCED_CONN_COMPLETE: 6278 hci_le_enh_conn_complete_evt(hdev, skb); 6279 break; 6280 6281 case HCI_EV_LE_EXT_ADV_SET_TERM: 6282 hci_le_ext_adv_term_evt(hdev, skb); 6283 break; 6284 6285 default: 6286 break; 6287 } 6288 } 6289 6290 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 6291 u8 event, struct sk_buff *skb) 6292 { 6293 struct hci_ev_cmd_complete *ev; 6294 struct hci_event_hdr *hdr; 6295 6296 if (!skb) 6297 return false; 6298 6299 if (skb->len < sizeof(*hdr)) { 6300 bt_dev_err(hdev, "too short HCI event"); 6301 return false; 6302 } 6303 6304 hdr = (void *) skb->data; 6305 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6306 6307 if (event) { 6308 if (hdr->evt != event) 6309 return false; 6310 return true; 6311 } 6312 6313 /* Check if request ended in Command Status - no way to retrieve 6314 * any extra parameters in this case. 6315 */ 6316 if (hdr->evt == HCI_EV_CMD_STATUS) 6317 return false; 6318 6319 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 6320 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 6321 hdr->evt); 6322 return false; 6323 } 6324 6325 if (skb->len < sizeof(*ev)) { 6326 bt_dev_err(hdev, "too short cmd_complete event"); 6327 return false; 6328 } 6329 6330 ev = (void *) skb->data; 6331 skb_pull(skb, sizeof(*ev)); 6332 6333 if (opcode != __le16_to_cpu(ev->opcode)) { 6334 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 6335 __le16_to_cpu(ev->opcode)); 6336 return false; 6337 } 6338 6339 return true; 6340 } 6341 6342 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 6343 struct sk_buff *skb) 6344 { 6345 struct hci_ev_le_advertising_info *adv; 6346 struct hci_ev_le_direct_adv_info *direct_adv; 6347 struct hci_ev_le_ext_adv_report *ext_adv; 6348 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 6349 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 6350 6351 hci_dev_lock(hdev); 6352 6353 /* If we are currently suspended and this is the first BT event seen, 6354 * save the wake reason associated with the event. 6355 */ 6356 if (!hdev->suspended || hdev->wake_reason) 6357 goto unlock; 6358 6359 /* Default to remote wake. Values for wake_reason are documented in the 6360 * Bluez mgmt api docs. 6361 */ 6362 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 6363 6364 /* Once configured for remote wakeup, we should only wake up for 6365 * reconnections. It's useful to see which device is waking us up so 6366 * keep track of the bdaddr of the connection event that woke us up. 6367 */ 6368 if (event == HCI_EV_CONN_REQUEST) { 6369 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 6370 hdev->wake_addr_type = BDADDR_BREDR; 6371 } else if (event == HCI_EV_CONN_COMPLETE) { 6372 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 6373 hdev->wake_addr_type = BDADDR_BREDR; 6374 } else if (event == HCI_EV_LE_META) { 6375 struct hci_ev_le_meta *le_ev = (void *)skb->data; 6376 u8 subevent = le_ev->subevent; 6377 u8 *ptr = &skb->data[sizeof(*le_ev)]; 6378 u8 num_reports = *ptr; 6379 6380 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 6381 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 6382 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 6383 num_reports) { 6384 adv = (void *)(ptr + 1); 6385 direct_adv = (void *)(ptr + 1); 6386 ext_adv = (void *)(ptr + 1); 6387 6388 switch (subevent) { 6389 case HCI_EV_LE_ADVERTISING_REPORT: 6390 bacpy(&hdev->wake_addr, &adv->bdaddr); 6391 hdev->wake_addr_type = adv->bdaddr_type; 6392 break; 6393 case HCI_EV_LE_DIRECT_ADV_REPORT: 6394 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 6395 hdev->wake_addr_type = direct_adv->bdaddr_type; 6396 break; 6397 case HCI_EV_LE_EXT_ADV_REPORT: 6398 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 6399 hdev->wake_addr_type = ext_adv->bdaddr_type; 6400 break; 6401 } 6402 } 6403 } else { 6404 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 6405 } 6406 6407 unlock: 6408 hci_dev_unlock(hdev); 6409 } 6410 6411 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 6412 { 6413 struct hci_event_hdr *hdr = (void *) skb->data; 6414 hci_req_complete_t req_complete = NULL; 6415 hci_req_complete_skb_t req_complete_skb = NULL; 6416 struct sk_buff *orig_skb = NULL; 6417 u8 status = 0, event = hdr->evt, req_evt = 0; 6418 u16 opcode = HCI_OP_NOP; 6419 6420 if (!event) { 6421 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); 6422 goto done; 6423 } 6424 6425 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { 6426 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 6427 opcode = __le16_to_cpu(cmd_hdr->opcode); 6428 hci_req_cmd_complete(hdev, opcode, status, &req_complete, 6429 &req_complete_skb); 6430 req_evt = event; 6431 } 6432 6433 /* If it looks like we might end up having to call 6434 * req_complete_skb, store a pristine copy of the skb since the 6435 * various handlers may modify the original one through 6436 * skb_pull() calls, etc. 6437 */ 6438 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 6439 event == HCI_EV_CMD_COMPLETE) 6440 orig_skb = skb_clone(skb, GFP_KERNEL); 6441 6442 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6443 6444 /* Store wake reason if we're suspended */ 6445 hci_store_wake_reason(hdev, event, skb); 6446 6447 switch (event) { 6448 case HCI_EV_INQUIRY_COMPLETE: 6449 hci_inquiry_complete_evt(hdev, skb); 6450 break; 6451 6452 case HCI_EV_INQUIRY_RESULT: 6453 hci_inquiry_result_evt(hdev, skb); 6454 break; 6455 6456 case HCI_EV_CONN_COMPLETE: 6457 hci_conn_complete_evt(hdev, skb); 6458 break; 6459 6460 case HCI_EV_CONN_REQUEST: 6461 hci_conn_request_evt(hdev, skb); 6462 break; 6463 6464 case HCI_EV_DISCONN_COMPLETE: 6465 hci_disconn_complete_evt(hdev, skb); 6466 break; 6467 6468 case HCI_EV_AUTH_COMPLETE: 6469 hci_auth_complete_evt(hdev, skb); 6470 break; 6471 6472 case HCI_EV_REMOTE_NAME: 6473 hci_remote_name_evt(hdev, skb); 6474 break; 6475 6476 case HCI_EV_ENCRYPT_CHANGE: 6477 hci_encrypt_change_evt(hdev, skb); 6478 break; 6479 6480 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 6481 hci_change_link_key_complete_evt(hdev, skb); 6482 break; 6483 6484 case HCI_EV_REMOTE_FEATURES: 6485 hci_remote_features_evt(hdev, skb); 6486 break; 6487 6488 case HCI_EV_CMD_COMPLETE: 6489 hci_cmd_complete_evt(hdev, skb, &opcode, &status, 6490 &req_complete, &req_complete_skb); 6491 break; 6492 6493 case HCI_EV_CMD_STATUS: 6494 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete, 6495 &req_complete_skb); 6496 break; 6497 6498 case HCI_EV_HARDWARE_ERROR: 6499 hci_hardware_error_evt(hdev, skb); 6500 break; 6501 6502 case HCI_EV_ROLE_CHANGE: 6503 hci_role_change_evt(hdev, skb); 6504 break; 6505 6506 case HCI_EV_NUM_COMP_PKTS: 6507 hci_num_comp_pkts_evt(hdev, skb); 6508 break; 6509 6510 case HCI_EV_MODE_CHANGE: 6511 hci_mode_change_evt(hdev, skb); 6512 break; 6513 6514 case HCI_EV_PIN_CODE_REQ: 6515 hci_pin_code_request_evt(hdev, skb); 6516 break; 6517 6518 case HCI_EV_LINK_KEY_REQ: 6519 hci_link_key_request_evt(hdev, skb); 6520 break; 6521 6522 case HCI_EV_LINK_KEY_NOTIFY: 6523 hci_link_key_notify_evt(hdev, skb); 6524 break; 6525 6526 case HCI_EV_CLOCK_OFFSET: 6527 hci_clock_offset_evt(hdev, skb); 6528 break; 6529 6530 case HCI_EV_PKT_TYPE_CHANGE: 6531 hci_pkt_type_change_evt(hdev, skb); 6532 break; 6533 6534 case HCI_EV_PSCAN_REP_MODE: 6535 hci_pscan_rep_mode_evt(hdev, skb); 6536 break; 6537 6538 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 6539 hci_inquiry_result_with_rssi_evt(hdev, skb); 6540 break; 6541 6542 case HCI_EV_REMOTE_EXT_FEATURES: 6543 hci_remote_ext_features_evt(hdev, skb); 6544 break; 6545 6546 case HCI_EV_SYNC_CONN_COMPLETE: 6547 hci_sync_conn_complete_evt(hdev, skb); 6548 break; 6549 6550 case HCI_EV_EXTENDED_INQUIRY_RESULT: 6551 hci_extended_inquiry_result_evt(hdev, skb); 6552 break; 6553 6554 case HCI_EV_KEY_REFRESH_COMPLETE: 6555 hci_key_refresh_complete_evt(hdev, skb); 6556 break; 6557 6558 case HCI_EV_IO_CAPA_REQUEST: 6559 hci_io_capa_request_evt(hdev, skb); 6560 break; 6561 6562 case HCI_EV_IO_CAPA_REPLY: 6563 hci_io_capa_reply_evt(hdev, skb); 6564 break; 6565 6566 case HCI_EV_USER_CONFIRM_REQUEST: 6567 hci_user_confirm_request_evt(hdev, skb); 6568 break; 6569 6570 case HCI_EV_USER_PASSKEY_REQUEST: 6571 hci_user_passkey_request_evt(hdev, skb); 6572 break; 6573 6574 case HCI_EV_USER_PASSKEY_NOTIFY: 6575 hci_user_passkey_notify_evt(hdev, skb); 6576 break; 6577 6578 case HCI_EV_KEYPRESS_NOTIFY: 6579 hci_keypress_notify_evt(hdev, skb); 6580 break; 6581 6582 case HCI_EV_SIMPLE_PAIR_COMPLETE: 6583 hci_simple_pair_complete_evt(hdev, skb); 6584 break; 6585 6586 case HCI_EV_REMOTE_HOST_FEATURES: 6587 hci_remote_host_features_evt(hdev, skb); 6588 break; 6589 6590 case HCI_EV_LE_META: 6591 hci_le_meta_evt(hdev, skb); 6592 break; 6593 6594 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 6595 hci_remote_oob_data_request_evt(hdev, skb); 6596 break; 6597 6598 #if IS_ENABLED(CONFIG_BT_HS) 6599 case HCI_EV_CHANNEL_SELECTED: 6600 hci_chan_selected_evt(hdev, skb); 6601 break; 6602 6603 case HCI_EV_PHY_LINK_COMPLETE: 6604 hci_phy_link_complete_evt(hdev, skb); 6605 break; 6606 6607 case HCI_EV_LOGICAL_LINK_COMPLETE: 6608 hci_loglink_complete_evt(hdev, skb); 6609 break; 6610 6611 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 6612 hci_disconn_loglink_complete_evt(hdev, skb); 6613 break; 6614 6615 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 6616 hci_disconn_phylink_complete_evt(hdev, skb); 6617 break; 6618 #endif 6619 6620 case HCI_EV_NUM_COMP_BLOCKS: 6621 hci_num_comp_blocks_evt(hdev, skb); 6622 break; 6623 6624 case HCI_EV_VENDOR: 6625 msft_vendor_evt(hdev, skb); 6626 break; 6627 6628 default: 6629 BT_DBG("%s event 0x%2.2x", hdev->name, event); 6630 break; 6631 } 6632 6633 if (req_complete) { 6634 req_complete(hdev, status, opcode); 6635 } else if (req_complete_skb) { 6636 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 6637 kfree_skb(orig_skb); 6638 orig_skb = NULL; 6639 } 6640 req_complete_skb(hdev, status, opcode, orig_skb); 6641 } 6642 6643 done: 6644 kfree_skb(orig_skb); 6645 kfree_skb(skb); 6646 hdev->stat.evt_rx++; 6647 } 6648