1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 #include "eir.h" 40 41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 42 "\x00\x00\x00\x00\x00\x00\x00\x00" 43 44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) 45 46 /* Handle HCI Event packets */ 47 48 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, 49 u8 *new_status) 50 { 51 __u8 status = *((__u8 *) skb->data); 52 53 BT_DBG("%s status 0x%2.2x", hdev->name, status); 54 55 /* It is possible that we receive Inquiry Complete event right 56 * before we receive Inquiry Cancel Command Complete event, in 57 * which case the latter event should have status of Command 58 * Disallowed (0x0c). This should not be treated as error, since 59 * we actually achieve what Inquiry Cancel wants to achieve, 60 * which is to end the last Inquiry session. 61 */ 62 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 63 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 64 status = 0x00; 65 } 66 67 *new_status = status; 68 69 if (status) 70 return; 71 72 clear_bit(HCI_INQUIRY, &hdev->flags); 73 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 74 wake_up_bit(&hdev->flags, HCI_INQUIRY); 75 76 hci_dev_lock(hdev); 77 /* Set discovery state to stopped if we're not doing LE active 78 * scanning. 79 */ 80 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 81 hdev->le_scan_type != LE_SCAN_ACTIVE) 82 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 83 hci_dev_unlock(hdev); 84 85 hci_conn_check_pending(hdev); 86 } 87 88 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 89 { 90 __u8 status = *((__u8 *) skb->data); 91 92 BT_DBG("%s status 0x%2.2x", hdev->name, status); 93 94 if (status) 95 return; 96 97 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 98 } 99 100 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 101 { 102 __u8 status = *((__u8 *) skb->data); 103 104 BT_DBG("%s status 0x%2.2x", hdev->name, status); 105 106 if (status) 107 return; 108 109 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 110 111 hci_conn_check_pending(hdev); 112 } 113 114 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 115 struct sk_buff *skb) 116 { 117 BT_DBG("%s", hdev->name); 118 } 119 120 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 121 { 122 struct hci_rp_role_discovery *rp = (void *) skb->data; 123 struct hci_conn *conn; 124 125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 126 127 if (rp->status) 128 return; 129 130 hci_dev_lock(hdev); 131 132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 133 if (conn) 134 conn->role = rp->role; 135 136 hci_dev_unlock(hdev); 137 } 138 139 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 140 { 141 struct hci_rp_read_link_policy *rp = (void *) skb->data; 142 struct hci_conn *conn; 143 144 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 145 146 if (rp->status) 147 return; 148 149 hci_dev_lock(hdev); 150 151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 152 if (conn) 153 conn->link_policy = __le16_to_cpu(rp->policy); 154 155 hci_dev_unlock(hdev); 156 } 157 158 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 159 { 160 struct hci_rp_write_link_policy *rp = (void *) skb->data; 161 struct hci_conn *conn; 162 void *sent; 163 164 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 165 166 if (rp->status) 167 return; 168 169 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 170 if (!sent) 171 return; 172 173 hci_dev_lock(hdev); 174 175 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 176 if (conn) 177 conn->link_policy = get_unaligned_le16(sent + 2); 178 179 hci_dev_unlock(hdev); 180 } 181 182 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 183 struct sk_buff *skb) 184 { 185 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 186 187 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 188 189 if (rp->status) 190 return; 191 192 hdev->link_policy = __le16_to_cpu(rp->policy); 193 } 194 195 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 196 struct sk_buff *skb) 197 { 198 __u8 status = *((__u8 *) skb->data); 199 void *sent; 200 201 BT_DBG("%s status 0x%2.2x", hdev->name, status); 202 203 if (status) 204 return; 205 206 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 207 if (!sent) 208 return; 209 210 hdev->link_policy = get_unaligned_le16(sent); 211 } 212 213 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 214 { 215 __u8 status = *((__u8 *) skb->data); 216 217 BT_DBG("%s status 0x%2.2x", hdev->name, status); 218 219 clear_bit(HCI_RESET, &hdev->flags); 220 221 if (status) 222 return; 223 224 /* Reset all non-persistent flags */ 225 hci_dev_clear_volatile_flags(hdev); 226 227 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 228 229 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 230 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 231 232 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 233 hdev->adv_data_len = 0; 234 235 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 236 hdev->scan_rsp_data_len = 0; 237 238 hdev->le_scan_type = LE_SCAN_PASSIVE; 239 240 hdev->ssp_debug_mode = 0; 241 242 hci_bdaddr_list_clear(&hdev->le_accept_list); 243 hci_bdaddr_list_clear(&hdev->le_resolv_list); 244 } 245 246 static void hci_cc_read_stored_link_key(struct hci_dev *hdev, 247 struct sk_buff *skb) 248 { 249 struct hci_rp_read_stored_link_key *rp = (void *)skb->data; 250 struct hci_cp_read_stored_link_key *sent; 251 252 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 253 254 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 255 if (!sent) 256 return; 257 258 if (!rp->status && sent->read_all == 0x01) { 259 hdev->stored_max_keys = le16_to_cpu(rp->max_keys); 260 hdev->stored_num_keys = le16_to_cpu(rp->num_keys); 261 } 262 } 263 264 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 265 struct sk_buff *skb) 266 { 267 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data; 268 269 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 270 271 if (rp->status) 272 return; 273 274 if (rp->num_keys <= hdev->stored_num_keys) 275 hdev->stored_num_keys -= le16_to_cpu(rp->num_keys); 276 else 277 hdev->stored_num_keys = 0; 278 } 279 280 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 281 { 282 __u8 status = *((__u8 *) skb->data); 283 void *sent; 284 285 BT_DBG("%s status 0x%2.2x", hdev->name, status); 286 287 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 288 if (!sent) 289 return; 290 291 hci_dev_lock(hdev); 292 293 if (hci_dev_test_flag(hdev, HCI_MGMT)) 294 mgmt_set_local_name_complete(hdev, sent, status); 295 else if (!status) 296 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 297 298 hci_dev_unlock(hdev); 299 } 300 301 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 302 { 303 struct hci_rp_read_local_name *rp = (void *) skb->data; 304 305 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 306 307 if (rp->status) 308 return; 309 310 if (hci_dev_test_flag(hdev, HCI_SETUP) || 311 hci_dev_test_flag(hdev, HCI_CONFIG)) 312 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 313 } 314 315 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 316 { 317 __u8 status = *((__u8 *) skb->data); 318 void *sent; 319 320 BT_DBG("%s status 0x%2.2x", hdev->name, status); 321 322 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 323 if (!sent) 324 return; 325 326 hci_dev_lock(hdev); 327 328 if (!status) { 329 __u8 param = *((__u8 *) sent); 330 331 if (param == AUTH_ENABLED) 332 set_bit(HCI_AUTH, &hdev->flags); 333 else 334 clear_bit(HCI_AUTH, &hdev->flags); 335 } 336 337 if (hci_dev_test_flag(hdev, HCI_MGMT)) 338 mgmt_auth_enable_complete(hdev, status); 339 340 hci_dev_unlock(hdev); 341 } 342 343 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 344 { 345 __u8 status = *((__u8 *) skb->data); 346 __u8 param; 347 void *sent; 348 349 BT_DBG("%s status 0x%2.2x", hdev->name, status); 350 351 if (status) 352 return; 353 354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 355 if (!sent) 356 return; 357 358 param = *((__u8 *) sent); 359 360 if (param) 361 set_bit(HCI_ENCRYPT, &hdev->flags); 362 else 363 clear_bit(HCI_ENCRYPT, &hdev->flags); 364 } 365 366 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 367 { 368 __u8 status = *((__u8 *) skb->data); 369 __u8 param; 370 void *sent; 371 372 BT_DBG("%s status 0x%2.2x", hdev->name, status); 373 374 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 375 if (!sent) 376 return; 377 378 param = *((__u8 *) sent); 379 380 hci_dev_lock(hdev); 381 382 if (status) { 383 hdev->discov_timeout = 0; 384 goto done; 385 } 386 387 if (param & SCAN_INQUIRY) 388 set_bit(HCI_ISCAN, &hdev->flags); 389 else 390 clear_bit(HCI_ISCAN, &hdev->flags); 391 392 if (param & SCAN_PAGE) 393 set_bit(HCI_PSCAN, &hdev->flags); 394 else 395 clear_bit(HCI_PSCAN, &hdev->flags); 396 397 done: 398 hci_dev_unlock(hdev); 399 } 400 401 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb) 402 { 403 __u8 status = *((__u8 *)skb->data); 404 struct hci_cp_set_event_filter *cp; 405 void *sent; 406 407 BT_DBG("%s status 0x%2.2x", hdev->name, status); 408 409 if (status) 410 return; 411 412 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); 413 if (!sent) 414 return; 415 416 cp = (struct hci_cp_set_event_filter *)sent; 417 418 if (cp->flt_type == HCI_FLT_CLEAR_ALL) 419 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 420 else 421 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); 422 } 423 424 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 425 { 426 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 427 428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 429 430 if (rp->status) 431 return; 432 433 memcpy(hdev->dev_class, rp->dev_class, 3); 434 435 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 436 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 437 } 438 439 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 440 { 441 __u8 status = *((__u8 *) skb->data); 442 void *sent; 443 444 BT_DBG("%s status 0x%2.2x", hdev->name, status); 445 446 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 447 if (!sent) 448 return; 449 450 hci_dev_lock(hdev); 451 452 if (status == 0) 453 memcpy(hdev->dev_class, sent, 3); 454 455 if (hci_dev_test_flag(hdev, HCI_MGMT)) 456 mgmt_set_class_of_dev_complete(hdev, sent, status); 457 458 hci_dev_unlock(hdev); 459 } 460 461 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 462 { 463 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 464 __u16 setting; 465 466 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 467 468 if (rp->status) 469 return; 470 471 setting = __le16_to_cpu(rp->voice_setting); 472 473 if (hdev->voice_setting == setting) 474 return; 475 476 hdev->voice_setting = setting; 477 478 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 479 480 if (hdev->notify) 481 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 482 } 483 484 static void hci_cc_write_voice_setting(struct hci_dev *hdev, 485 struct sk_buff *skb) 486 { 487 __u8 status = *((__u8 *) skb->data); 488 __u16 setting; 489 void *sent; 490 491 BT_DBG("%s status 0x%2.2x", hdev->name, status); 492 493 if (status) 494 return; 495 496 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 497 if (!sent) 498 return; 499 500 setting = get_unaligned_le16(sent); 501 502 if (hdev->voice_setting == setting) 503 return; 504 505 hdev->voice_setting = setting; 506 507 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 508 509 if (hdev->notify) 510 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 511 } 512 513 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, 514 struct sk_buff *skb) 515 { 516 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; 517 518 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 519 520 if (rp->status) 521 return; 522 523 hdev->num_iac = rp->num_iac; 524 525 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); 526 } 527 528 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 529 { 530 __u8 status = *((__u8 *) skb->data); 531 struct hci_cp_write_ssp_mode *sent; 532 533 BT_DBG("%s status 0x%2.2x", hdev->name, status); 534 535 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 536 if (!sent) 537 return; 538 539 hci_dev_lock(hdev); 540 541 if (!status) { 542 if (sent->mode) 543 hdev->features[1][0] |= LMP_HOST_SSP; 544 else 545 hdev->features[1][0] &= ~LMP_HOST_SSP; 546 } 547 548 if (!status) { 549 if (sent->mode) 550 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 551 else 552 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 553 } 554 555 hci_dev_unlock(hdev); 556 } 557 558 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) 559 { 560 u8 status = *((u8 *) skb->data); 561 struct hci_cp_write_sc_support *sent; 562 563 BT_DBG("%s status 0x%2.2x", hdev->name, status); 564 565 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 566 if (!sent) 567 return; 568 569 hci_dev_lock(hdev); 570 571 if (!status) { 572 if (sent->support) 573 hdev->features[1][0] |= LMP_HOST_SC; 574 else 575 hdev->features[1][0] &= ~LMP_HOST_SC; 576 } 577 578 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) { 579 if (sent->support) 580 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 581 else 582 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 583 } 584 585 hci_dev_unlock(hdev); 586 } 587 588 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 589 { 590 struct hci_rp_read_local_version *rp = (void *) skb->data; 591 592 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 593 594 if (rp->status) 595 return; 596 597 if (hci_dev_test_flag(hdev, HCI_SETUP) || 598 hci_dev_test_flag(hdev, HCI_CONFIG)) { 599 hdev->hci_ver = rp->hci_ver; 600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 601 hdev->lmp_ver = rp->lmp_ver; 602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 604 } 605 } 606 607 static void hci_cc_read_local_commands(struct hci_dev *hdev, 608 struct sk_buff *skb) 609 { 610 struct hci_rp_read_local_commands *rp = (void *) skb->data; 611 612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 613 614 if (rp->status) 615 return; 616 617 if (hci_dev_test_flag(hdev, HCI_SETUP) || 618 hci_dev_test_flag(hdev, HCI_CONFIG)) 619 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 620 } 621 622 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, 623 struct sk_buff *skb) 624 { 625 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data; 626 struct hci_conn *conn; 627 628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 629 630 if (rp->status) 631 return; 632 633 hci_dev_lock(hdev); 634 635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 636 if (conn) 637 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 638 639 hci_dev_unlock(hdev); 640 } 641 642 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, 643 struct sk_buff *skb) 644 { 645 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data; 646 struct hci_conn *conn; 647 void *sent; 648 649 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 650 651 if (rp->status) 652 return; 653 654 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 655 if (!sent) 656 return; 657 658 hci_dev_lock(hdev); 659 660 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 661 if (conn) 662 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 663 664 hci_dev_unlock(hdev); 665 } 666 667 static void hci_cc_read_local_features(struct hci_dev *hdev, 668 struct sk_buff *skb) 669 { 670 struct hci_rp_read_local_features *rp = (void *) skb->data; 671 672 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 673 674 if (rp->status) 675 return; 676 677 memcpy(hdev->features, rp->features, 8); 678 679 /* Adjust default settings according to features 680 * supported by device. */ 681 682 if (hdev->features[0][0] & LMP_3SLOT) 683 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 684 685 if (hdev->features[0][0] & LMP_5SLOT) 686 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 687 688 if (hdev->features[0][1] & LMP_HV2) { 689 hdev->pkt_type |= (HCI_HV2); 690 hdev->esco_type |= (ESCO_HV2); 691 } 692 693 if (hdev->features[0][1] & LMP_HV3) { 694 hdev->pkt_type |= (HCI_HV3); 695 hdev->esco_type |= (ESCO_HV3); 696 } 697 698 if (lmp_esco_capable(hdev)) 699 hdev->esco_type |= (ESCO_EV3); 700 701 if (hdev->features[0][4] & LMP_EV4) 702 hdev->esco_type |= (ESCO_EV4); 703 704 if (hdev->features[0][4] & LMP_EV5) 705 hdev->esco_type |= (ESCO_EV5); 706 707 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 708 hdev->esco_type |= (ESCO_2EV3); 709 710 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 711 hdev->esco_type |= (ESCO_3EV3); 712 713 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 714 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 715 } 716 717 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 718 struct sk_buff *skb) 719 { 720 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 721 722 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 723 724 if (rp->status) 725 return; 726 727 if (hdev->max_page < rp->max_page) 728 hdev->max_page = rp->max_page; 729 730 if (rp->page < HCI_MAX_PAGES) 731 memcpy(hdev->features[rp->page], rp->features, 8); 732 } 733 734 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 735 struct sk_buff *skb) 736 { 737 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 738 739 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 740 741 if (rp->status) 742 return; 743 744 hdev->flow_ctl_mode = rp->mode; 745 } 746 747 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 748 { 749 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 750 751 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 752 753 if (rp->status) 754 return; 755 756 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 757 hdev->sco_mtu = rp->sco_mtu; 758 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 759 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 760 761 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 762 hdev->sco_mtu = 64; 763 hdev->sco_pkts = 8; 764 } 765 766 hdev->acl_cnt = hdev->acl_pkts; 767 hdev->sco_cnt = hdev->sco_pkts; 768 769 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 770 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 771 } 772 773 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 774 { 775 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 776 777 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 778 779 if (rp->status) 780 return; 781 782 if (test_bit(HCI_INIT, &hdev->flags)) 783 bacpy(&hdev->bdaddr, &rp->bdaddr); 784 785 if (hci_dev_test_flag(hdev, HCI_SETUP)) 786 bacpy(&hdev->setup_addr, &rp->bdaddr); 787 } 788 789 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev, 790 struct sk_buff *skb) 791 { 792 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data; 793 794 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 795 796 if (rp->status) 797 return; 798 799 if (hci_dev_test_flag(hdev, HCI_SETUP) || 800 hci_dev_test_flag(hdev, HCI_CONFIG)) { 801 hdev->pairing_opts = rp->pairing_opts; 802 hdev->max_enc_key_size = rp->max_key_size; 803 } 804 } 805 806 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 807 struct sk_buff *skb) 808 { 809 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 810 811 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 812 813 if (rp->status) 814 return; 815 816 if (test_bit(HCI_INIT, &hdev->flags)) { 817 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 818 hdev->page_scan_window = __le16_to_cpu(rp->window); 819 } 820 } 821 822 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 823 struct sk_buff *skb) 824 { 825 u8 status = *((u8 *) skb->data); 826 struct hci_cp_write_page_scan_activity *sent; 827 828 BT_DBG("%s status 0x%2.2x", hdev->name, status); 829 830 if (status) 831 return; 832 833 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 834 if (!sent) 835 return; 836 837 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 838 hdev->page_scan_window = __le16_to_cpu(sent->window); 839 } 840 841 static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 842 struct sk_buff *skb) 843 { 844 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 845 846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 847 848 if (rp->status) 849 return; 850 851 if (test_bit(HCI_INIT, &hdev->flags)) 852 hdev->page_scan_type = rp->type; 853 } 854 855 static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 856 struct sk_buff *skb) 857 { 858 u8 status = *((u8 *) skb->data); 859 u8 *type; 860 861 BT_DBG("%s status 0x%2.2x", hdev->name, status); 862 863 if (status) 864 return; 865 866 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 867 if (type) 868 hdev->page_scan_type = *type; 869 } 870 871 static void hci_cc_read_data_block_size(struct hci_dev *hdev, 872 struct sk_buff *skb) 873 { 874 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 875 876 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 877 878 if (rp->status) 879 return; 880 881 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 882 hdev->block_len = __le16_to_cpu(rp->block_len); 883 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 884 885 hdev->block_cnt = hdev->num_blocks; 886 887 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 888 hdev->block_cnt, hdev->block_len); 889 } 890 891 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) 892 { 893 struct hci_rp_read_clock *rp = (void *) skb->data; 894 struct hci_cp_read_clock *cp; 895 struct hci_conn *conn; 896 897 BT_DBG("%s", hdev->name); 898 899 if (skb->len < sizeof(*rp)) 900 return; 901 902 if (rp->status) 903 return; 904 905 hci_dev_lock(hdev); 906 907 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 908 if (!cp) 909 goto unlock; 910 911 if (cp->which == 0x00) { 912 hdev->clock = le32_to_cpu(rp->clock); 913 goto unlock; 914 } 915 916 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 917 if (conn) { 918 conn->clock = le32_to_cpu(rp->clock); 919 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 920 } 921 922 unlock: 923 hci_dev_unlock(hdev); 924 } 925 926 static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 927 struct sk_buff *skb) 928 { 929 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 930 931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 932 933 if (rp->status) 934 return; 935 936 hdev->amp_status = rp->amp_status; 937 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 938 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 939 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 940 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 941 hdev->amp_type = rp->amp_type; 942 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 943 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 944 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 945 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 946 } 947 948 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 949 struct sk_buff *skb) 950 { 951 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 952 953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 954 955 if (rp->status) 956 return; 957 958 hdev->inq_tx_power = rp->tx_power; 959 } 960 961 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, 962 struct sk_buff *skb) 963 { 964 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data; 965 966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 967 968 if (rp->status) 969 return; 970 971 hdev->err_data_reporting = rp->err_data_reporting; 972 } 973 974 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, 975 struct sk_buff *skb) 976 { 977 __u8 status = *((__u8 *)skb->data); 978 struct hci_cp_write_def_err_data_reporting *cp; 979 980 BT_DBG("%s status 0x%2.2x", hdev->name, status); 981 982 if (status) 983 return; 984 985 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 986 if (!cp) 987 return; 988 989 hdev->err_data_reporting = cp->err_data_reporting; 990 } 991 992 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 993 { 994 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 995 struct hci_cp_pin_code_reply *cp; 996 struct hci_conn *conn; 997 998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 999 1000 hci_dev_lock(hdev); 1001 1002 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1003 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 1004 1005 if (rp->status) 1006 goto unlock; 1007 1008 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 1009 if (!cp) 1010 goto unlock; 1011 1012 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1013 if (conn) 1014 conn->pin_length = cp->pin_len; 1015 1016 unlock: 1017 hci_dev_unlock(hdev); 1018 } 1019 1020 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 1021 { 1022 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 1023 1024 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1025 1026 hci_dev_lock(hdev); 1027 1028 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1029 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1030 rp->status); 1031 1032 hci_dev_unlock(hdev); 1033 } 1034 1035 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 1036 struct sk_buff *skb) 1037 { 1038 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 1039 1040 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1041 1042 if (rp->status) 1043 return; 1044 1045 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1046 hdev->le_pkts = rp->le_max_pkt; 1047 1048 hdev->le_cnt = hdev->le_pkts; 1049 1050 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1051 } 1052 1053 static void hci_cc_le_read_local_features(struct hci_dev *hdev, 1054 struct sk_buff *skb) 1055 { 1056 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 1057 1058 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1059 1060 if (rp->status) 1061 return; 1062 1063 memcpy(hdev->le_features, rp->features, 8); 1064 } 1065 1066 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 1067 struct sk_buff *skb) 1068 { 1069 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 1070 1071 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1072 1073 if (rp->status) 1074 return; 1075 1076 hdev->adv_tx_power = rp->tx_power; 1077 } 1078 1079 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 1080 { 1081 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1082 1083 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1084 1085 hci_dev_lock(hdev); 1086 1087 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1088 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1089 rp->status); 1090 1091 hci_dev_unlock(hdev); 1092 } 1093 1094 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 1095 struct sk_buff *skb) 1096 { 1097 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1098 1099 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1100 1101 hci_dev_lock(hdev); 1102 1103 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1104 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1105 ACL_LINK, 0, rp->status); 1106 1107 hci_dev_unlock(hdev); 1108 } 1109 1110 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 1111 { 1112 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1113 1114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1115 1116 hci_dev_lock(hdev); 1117 1118 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1119 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1120 0, rp->status); 1121 1122 hci_dev_unlock(hdev); 1123 } 1124 1125 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1126 struct sk_buff *skb) 1127 { 1128 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1129 1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1131 1132 hci_dev_lock(hdev); 1133 1134 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1135 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1136 ACL_LINK, 0, rp->status); 1137 1138 hci_dev_unlock(hdev); 1139 } 1140 1141 static void hci_cc_read_local_oob_data(struct hci_dev *hdev, 1142 struct sk_buff *skb) 1143 { 1144 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1145 1146 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1147 } 1148 1149 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 1150 struct sk_buff *skb) 1151 { 1152 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 1153 1154 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1155 } 1156 1157 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 1158 { 1159 __u8 status = *((__u8 *) skb->data); 1160 bdaddr_t *sent; 1161 1162 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1163 1164 if (status) 1165 return; 1166 1167 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1168 if (!sent) 1169 return; 1170 1171 hci_dev_lock(hdev); 1172 1173 bacpy(&hdev->random_addr, sent); 1174 1175 if (!bacmp(&hdev->rpa, sent)) { 1176 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); 1177 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, 1178 secs_to_jiffies(hdev->rpa_timeout)); 1179 } 1180 1181 hci_dev_unlock(hdev); 1182 } 1183 1184 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) 1185 { 1186 __u8 status = *((__u8 *) skb->data); 1187 struct hci_cp_le_set_default_phy *cp; 1188 1189 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1190 1191 if (status) 1192 return; 1193 1194 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1195 if (!cp) 1196 return; 1197 1198 hci_dev_lock(hdev); 1199 1200 hdev->le_tx_def_phys = cp->tx_phys; 1201 hdev->le_rx_def_phys = cp->rx_phys; 1202 1203 hci_dev_unlock(hdev); 1204 } 1205 1206 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, 1207 struct sk_buff *skb) 1208 { 1209 __u8 status = *((__u8 *) skb->data); 1210 struct hci_cp_le_set_adv_set_rand_addr *cp; 1211 struct adv_info *adv; 1212 1213 if (status) 1214 return; 1215 1216 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1217 /* Update only in case the adv instance since handle 0x00 shall be using 1218 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and 1219 * non-extended adverting. 1220 */ 1221 if (!cp || !cp->handle) 1222 return; 1223 1224 hci_dev_lock(hdev); 1225 1226 adv = hci_find_adv_instance(hdev, cp->handle); 1227 if (adv) { 1228 bacpy(&adv->random_addr, &cp->bdaddr); 1229 if (!bacmp(&hdev->rpa, &cp->bdaddr)) { 1230 adv->rpa_expired = false; 1231 queue_delayed_work(hdev->workqueue, 1232 &adv->rpa_expired_cb, 1233 secs_to_jiffies(hdev->rpa_timeout)); 1234 } 1235 } 1236 1237 hci_dev_unlock(hdev); 1238 } 1239 1240 static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb) 1241 { 1242 __u8 status = *((__u8 *)skb->data); 1243 u8 *instance; 1244 int err; 1245 1246 if (status) 1247 return; 1248 1249 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); 1250 if (!instance) 1251 return; 1252 1253 hci_dev_lock(hdev); 1254 1255 err = hci_remove_adv_instance(hdev, *instance); 1256 if (!err) 1257 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, 1258 *instance); 1259 1260 hci_dev_unlock(hdev); 1261 } 1262 1263 static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb) 1264 { 1265 __u8 status = *((__u8 *)skb->data); 1266 struct adv_info *adv, *n; 1267 int err; 1268 1269 if (status) 1270 return; 1271 1272 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) 1273 return; 1274 1275 hci_dev_lock(hdev); 1276 1277 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1278 u8 instance = adv->instance; 1279 1280 err = hci_remove_adv_instance(hdev, instance); 1281 if (!err) 1282 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), 1283 hdev, instance); 1284 } 1285 1286 hci_dev_unlock(hdev); 1287 } 1288 1289 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev, 1290 struct sk_buff *skb) 1291 { 1292 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data; 1293 1294 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1295 1296 if (rp->status) 1297 return; 1298 1299 hdev->min_le_tx_power = rp->min_le_tx_power; 1300 hdev->max_le_tx_power = rp->max_le_tx_power; 1301 } 1302 1303 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 1304 { 1305 __u8 *sent, status = *((__u8 *) skb->data); 1306 1307 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1308 1309 if (status) 1310 return; 1311 1312 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1313 if (!sent) 1314 return; 1315 1316 hci_dev_lock(hdev); 1317 1318 /* If we're doing connection initiation as peripheral. Set a 1319 * timeout in case something goes wrong. 1320 */ 1321 if (*sent) { 1322 struct hci_conn *conn; 1323 1324 hci_dev_set_flag(hdev, HCI_LE_ADV); 1325 1326 conn = hci_lookup_le_connect(hdev); 1327 if (conn) 1328 queue_delayed_work(hdev->workqueue, 1329 &conn->le_conn_timeout, 1330 conn->conn_timeout); 1331 } else { 1332 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1333 } 1334 1335 hci_dev_unlock(hdev); 1336 } 1337 1338 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, 1339 struct sk_buff *skb) 1340 { 1341 struct hci_cp_le_set_ext_adv_enable *cp; 1342 struct hci_cp_ext_adv_set *set; 1343 __u8 status = *((__u8 *) skb->data); 1344 struct adv_info *adv = NULL, *n; 1345 1346 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1347 1348 if (status) 1349 return; 1350 1351 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1352 if (!cp) 1353 return; 1354 1355 set = (void *)cp->data; 1356 1357 hci_dev_lock(hdev); 1358 1359 if (cp->num_of_sets) 1360 adv = hci_find_adv_instance(hdev, set->handle); 1361 1362 if (cp->enable) { 1363 struct hci_conn *conn; 1364 1365 hci_dev_set_flag(hdev, HCI_LE_ADV); 1366 1367 if (adv) 1368 adv->enabled = true; 1369 1370 conn = hci_lookup_le_connect(hdev); 1371 if (conn) 1372 queue_delayed_work(hdev->workqueue, 1373 &conn->le_conn_timeout, 1374 conn->conn_timeout); 1375 } else { 1376 if (cp->num_of_sets) { 1377 if (adv) 1378 adv->enabled = false; 1379 1380 /* If just one instance was disabled check if there are 1381 * any other instance enabled before clearing HCI_LE_ADV 1382 */ 1383 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1384 list) { 1385 if (adv->enabled) 1386 goto unlock; 1387 } 1388 } else { 1389 /* All instances shall be considered disabled */ 1390 list_for_each_entry_safe(adv, n, &hdev->adv_instances, 1391 list) 1392 adv->enabled = false; 1393 } 1394 1395 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1396 } 1397 1398 unlock: 1399 hci_dev_unlock(hdev); 1400 } 1401 1402 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1403 { 1404 struct hci_cp_le_set_scan_param *cp; 1405 __u8 status = *((__u8 *) skb->data); 1406 1407 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1408 1409 if (status) 1410 return; 1411 1412 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1413 if (!cp) 1414 return; 1415 1416 hci_dev_lock(hdev); 1417 1418 hdev->le_scan_type = cp->type; 1419 1420 hci_dev_unlock(hdev); 1421 } 1422 1423 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, 1424 struct sk_buff *skb) 1425 { 1426 struct hci_cp_le_set_ext_scan_params *cp; 1427 __u8 status = *((__u8 *) skb->data); 1428 struct hci_cp_le_scan_phy_params *phy_param; 1429 1430 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1431 1432 if (status) 1433 return; 1434 1435 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1436 if (!cp) 1437 return; 1438 1439 phy_param = (void *)cp->data; 1440 1441 hci_dev_lock(hdev); 1442 1443 hdev->le_scan_type = phy_param->type; 1444 1445 hci_dev_unlock(hdev); 1446 } 1447 1448 static bool has_pending_adv_report(struct hci_dev *hdev) 1449 { 1450 struct discovery_state *d = &hdev->discovery; 1451 1452 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1453 } 1454 1455 static void clear_pending_adv_report(struct hci_dev *hdev) 1456 { 1457 struct discovery_state *d = &hdev->discovery; 1458 1459 bacpy(&d->last_adv_addr, BDADDR_ANY); 1460 d->last_adv_data_len = 0; 1461 } 1462 1463 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1464 u8 bdaddr_type, s8 rssi, u32 flags, 1465 u8 *data, u8 len) 1466 { 1467 struct discovery_state *d = &hdev->discovery; 1468 1469 if (len > HCI_MAX_AD_LENGTH) 1470 return; 1471 1472 bacpy(&d->last_adv_addr, bdaddr); 1473 d->last_adv_addr_type = bdaddr_type; 1474 d->last_adv_rssi = rssi; 1475 d->last_adv_flags = flags; 1476 memcpy(d->last_adv_data, data, len); 1477 d->last_adv_data_len = len; 1478 } 1479 1480 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1481 { 1482 hci_dev_lock(hdev); 1483 1484 switch (enable) { 1485 case LE_SCAN_ENABLE: 1486 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1487 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1488 clear_pending_adv_report(hdev); 1489 break; 1490 1491 case LE_SCAN_DISABLE: 1492 /* We do this here instead of when setting DISCOVERY_STOPPED 1493 * since the latter would potentially require waiting for 1494 * inquiry to stop too. 1495 */ 1496 if (has_pending_adv_report(hdev)) { 1497 struct discovery_state *d = &hdev->discovery; 1498 1499 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1500 d->last_adv_addr_type, NULL, 1501 d->last_adv_rssi, d->last_adv_flags, 1502 d->last_adv_data, 1503 d->last_adv_data_len, NULL, 0); 1504 } 1505 1506 /* Cancel this timer so that we don't try to disable scanning 1507 * when it's already disabled. 1508 */ 1509 cancel_delayed_work(&hdev->le_scan_disable); 1510 1511 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1512 1513 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1514 * interrupted scanning due to a connect request. Mark 1515 * therefore discovery as stopped. 1516 */ 1517 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1518 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1519 1520 break; 1521 1522 default: 1523 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1524 enable); 1525 break; 1526 } 1527 1528 hci_dev_unlock(hdev); 1529 } 1530 1531 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1532 struct sk_buff *skb) 1533 { 1534 struct hci_cp_le_set_scan_enable *cp; 1535 __u8 status = *((__u8 *) skb->data); 1536 1537 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1538 1539 if (status) 1540 return; 1541 1542 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1543 if (!cp) 1544 return; 1545 1546 le_set_scan_enable_complete(hdev, cp->enable); 1547 } 1548 1549 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, 1550 struct sk_buff *skb) 1551 { 1552 struct hci_cp_le_set_ext_scan_enable *cp; 1553 __u8 status = *((__u8 *) skb->data); 1554 1555 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1556 1557 if (status) 1558 return; 1559 1560 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1561 if (!cp) 1562 return; 1563 1564 le_set_scan_enable_complete(hdev, cp->enable); 1565 } 1566 1567 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, 1568 struct sk_buff *skb) 1569 { 1570 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data; 1571 1572 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status, 1573 rp->num_of_sets); 1574 1575 if (rp->status) 1576 return; 1577 1578 hdev->le_num_of_adv_sets = rp->num_of_sets; 1579 } 1580 1581 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev, 1582 struct sk_buff *skb) 1583 { 1584 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data; 1585 1586 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1587 1588 if (rp->status) 1589 return; 1590 1591 hdev->le_accept_list_size = rp->size; 1592 } 1593 1594 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev, 1595 struct sk_buff *skb) 1596 { 1597 __u8 status = *((__u8 *) skb->data); 1598 1599 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1600 1601 if (status) 1602 return; 1603 1604 hci_bdaddr_list_clear(&hdev->le_accept_list); 1605 } 1606 1607 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev, 1608 struct sk_buff *skb) 1609 { 1610 struct hci_cp_le_add_to_accept_list *sent; 1611 __u8 status = *((__u8 *) skb->data); 1612 1613 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1614 1615 if (status) 1616 return; 1617 1618 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); 1619 if (!sent) 1620 return; 1621 1622 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, 1623 sent->bdaddr_type); 1624 } 1625 1626 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev, 1627 struct sk_buff *skb) 1628 { 1629 struct hci_cp_le_del_from_accept_list *sent; 1630 __u8 status = *((__u8 *) skb->data); 1631 1632 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1633 1634 if (status) 1635 return; 1636 1637 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); 1638 if (!sent) 1639 return; 1640 1641 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, 1642 sent->bdaddr_type); 1643 } 1644 1645 static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1646 struct sk_buff *skb) 1647 { 1648 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1649 1650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1651 1652 if (rp->status) 1653 return; 1654 1655 memcpy(hdev->le_states, rp->le_states, 8); 1656 } 1657 1658 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev, 1659 struct sk_buff *skb) 1660 { 1661 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data; 1662 1663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1664 1665 if (rp->status) 1666 return; 1667 1668 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1669 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1670 } 1671 1672 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, 1673 struct sk_buff *skb) 1674 { 1675 struct hci_cp_le_write_def_data_len *sent; 1676 __u8 status = *((__u8 *) skb->data); 1677 1678 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1679 1680 if (status) 1681 return; 1682 1683 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1684 if (!sent) 1685 return; 1686 1687 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1688 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1689 } 1690 1691 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, 1692 struct sk_buff *skb) 1693 { 1694 struct hci_cp_le_add_to_resolv_list *sent; 1695 __u8 status = *((__u8 *) skb->data); 1696 1697 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1698 1699 if (status) 1700 return; 1701 1702 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1703 if (!sent) 1704 return; 1705 1706 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1707 sent->bdaddr_type, sent->peer_irk, 1708 sent->local_irk); 1709 } 1710 1711 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, 1712 struct sk_buff *skb) 1713 { 1714 struct hci_cp_le_del_from_resolv_list *sent; 1715 __u8 status = *((__u8 *) skb->data); 1716 1717 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1718 1719 if (status) 1720 return; 1721 1722 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1723 if (!sent) 1724 return; 1725 1726 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1727 sent->bdaddr_type); 1728 } 1729 1730 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, 1731 struct sk_buff *skb) 1732 { 1733 __u8 status = *((__u8 *) skb->data); 1734 1735 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1736 1737 if (status) 1738 return; 1739 1740 hci_bdaddr_list_clear(&hdev->le_resolv_list); 1741 } 1742 1743 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, 1744 struct sk_buff *skb) 1745 { 1746 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data; 1747 1748 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1749 1750 if (rp->status) 1751 return; 1752 1753 hdev->le_resolv_list_size = rp->size; 1754 } 1755 1756 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, 1757 struct sk_buff *skb) 1758 { 1759 __u8 *sent, status = *((__u8 *) skb->data); 1760 1761 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1762 1763 if (status) 1764 return; 1765 1766 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 1767 if (!sent) 1768 return; 1769 1770 hci_dev_lock(hdev); 1771 1772 if (*sent) 1773 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 1774 else 1775 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 1776 1777 hci_dev_unlock(hdev); 1778 } 1779 1780 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, 1781 struct sk_buff *skb) 1782 { 1783 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data; 1784 1785 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1786 1787 if (rp->status) 1788 return; 1789 1790 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 1791 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 1792 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 1793 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 1794 } 1795 1796 static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1797 struct sk_buff *skb) 1798 { 1799 struct hci_cp_write_le_host_supported *sent; 1800 __u8 status = *((__u8 *) skb->data); 1801 1802 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1803 1804 if (status) 1805 return; 1806 1807 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1808 if (!sent) 1809 return; 1810 1811 hci_dev_lock(hdev); 1812 1813 if (sent->le) { 1814 hdev->features[1][0] |= LMP_HOST_LE; 1815 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 1816 } else { 1817 hdev->features[1][0] &= ~LMP_HOST_LE; 1818 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 1819 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 1820 } 1821 1822 if (sent->simul) 1823 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1824 else 1825 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1826 1827 hci_dev_unlock(hdev); 1828 } 1829 1830 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1831 { 1832 struct hci_cp_le_set_adv_param *cp; 1833 u8 status = *((u8 *) skb->data); 1834 1835 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1836 1837 if (status) 1838 return; 1839 1840 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 1841 if (!cp) 1842 return; 1843 1844 hci_dev_lock(hdev); 1845 hdev->adv_addr_type = cp->own_address_type; 1846 hci_dev_unlock(hdev); 1847 } 1848 1849 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1850 { 1851 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data; 1852 struct hci_cp_le_set_ext_adv_params *cp; 1853 struct adv_info *adv_instance; 1854 1855 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1856 1857 if (rp->status) 1858 return; 1859 1860 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 1861 if (!cp) 1862 return; 1863 1864 hci_dev_lock(hdev); 1865 hdev->adv_addr_type = cp->own_addr_type; 1866 if (!cp->handle) { 1867 /* Store in hdev for instance 0 */ 1868 hdev->adv_tx_power = rp->tx_power; 1869 } else { 1870 adv_instance = hci_find_adv_instance(hdev, cp->handle); 1871 if (adv_instance) 1872 adv_instance->tx_power = rp->tx_power; 1873 } 1874 /* Update adv data as tx power is known now */ 1875 hci_req_update_adv_data(hdev, cp->handle); 1876 1877 hci_dev_unlock(hdev); 1878 } 1879 1880 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) 1881 { 1882 struct hci_rp_read_rssi *rp = (void *) skb->data; 1883 struct hci_conn *conn; 1884 1885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1886 1887 if (rp->status) 1888 return; 1889 1890 hci_dev_lock(hdev); 1891 1892 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1893 if (conn) 1894 conn->rssi = rp->rssi; 1895 1896 hci_dev_unlock(hdev); 1897 } 1898 1899 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) 1900 { 1901 struct hci_cp_read_tx_power *sent; 1902 struct hci_rp_read_tx_power *rp = (void *) skb->data; 1903 struct hci_conn *conn; 1904 1905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1906 1907 if (rp->status) 1908 return; 1909 1910 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 1911 if (!sent) 1912 return; 1913 1914 hci_dev_lock(hdev); 1915 1916 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1917 if (!conn) 1918 goto unlock; 1919 1920 switch (sent->type) { 1921 case 0x00: 1922 conn->tx_power = rp->tx_power; 1923 break; 1924 case 0x01: 1925 conn->max_tx_power = rp->tx_power; 1926 break; 1927 } 1928 1929 unlock: 1930 hci_dev_unlock(hdev); 1931 } 1932 1933 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb) 1934 { 1935 u8 status = *((u8 *) skb->data); 1936 u8 *mode; 1937 1938 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1939 1940 if (status) 1941 return; 1942 1943 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 1944 if (mode) 1945 hdev->ssp_debug_mode = *mode; 1946 } 1947 1948 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1949 { 1950 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1951 1952 if (status) { 1953 hci_conn_check_pending(hdev); 1954 return; 1955 } 1956 1957 set_bit(HCI_INQUIRY, &hdev->flags); 1958 } 1959 1960 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1961 { 1962 struct hci_cp_create_conn *cp; 1963 struct hci_conn *conn; 1964 1965 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1966 1967 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1968 if (!cp) 1969 return; 1970 1971 hci_dev_lock(hdev); 1972 1973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1974 1975 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1976 1977 if (status) { 1978 if (conn && conn->state == BT_CONNECT) { 1979 if (status != 0x0c || conn->attempt > 2) { 1980 conn->state = BT_CLOSED; 1981 hci_connect_cfm(conn, status); 1982 hci_conn_del(conn); 1983 } else 1984 conn->state = BT_CONNECT2; 1985 } 1986 } else { 1987 if (!conn) { 1988 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 1989 HCI_ROLE_MASTER); 1990 if (!conn) 1991 bt_dev_err(hdev, "no memory for new connection"); 1992 } 1993 } 1994 1995 hci_dev_unlock(hdev); 1996 } 1997 1998 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1999 { 2000 struct hci_cp_add_sco *cp; 2001 struct hci_conn *acl, *sco; 2002 __u16 handle; 2003 2004 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2005 2006 if (!status) 2007 return; 2008 2009 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 2010 if (!cp) 2011 return; 2012 2013 handle = __le16_to_cpu(cp->handle); 2014 2015 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 2016 2017 hci_dev_lock(hdev); 2018 2019 acl = hci_conn_hash_lookup_handle(hdev, handle); 2020 if (acl) { 2021 sco = acl->link; 2022 if (sco) { 2023 sco->state = BT_CLOSED; 2024 2025 hci_connect_cfm(sco, status); 2026 hci_conn_del(sco); 2027 } 2028 } 2029 2030 hci_dev_unlock(hdev); 2031 } 2032 2033 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 2034 { 2035 struct hci_cp_auth_requested *cp; 2036 struct hci_conn *conn; 2037 2038 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2039 2040 if (!status) 2041 return; 2042 2043 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 2044 if (!cp) 2045 return; 2046 2047 hci_dev_lock(hdev); 2048 2049 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2050 if (conn) { 2051 if (conn->state == BT_CONFIG) { 2052 hci_connect_cfm(conn, status); 2053 hci_conn_drop(conn); 2054 } 2055 } 2056 2057 hci_dev_unlock(hdev); 2058 } 2059 2060 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 2061 { 2062 struct hci_cp_set_conn_encrypt *cp; 2063 struct hci_conn *conn; 2064 2065 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2066 2067 if (!status) 2068 return; 2069 2070 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 2071 if (!cp) 2072 return; 2073 2074 hci_dev_lock(hdev); 2075 2076 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2077 if (conn) { 2078 if (conn->state == BT_CONFIG) { 2079 hci_connect_cfm(conn, status); 2080 hci_conn_drop(conn); 2081 } 2082 } 2083 2084 hci_dev_unlock(hdev); 2085 } 2086 2087 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 2088 struct hci_conn *conn) 2089 { 2090 if (conn->state != BT_CONFIG || !conn->out) 2091 return 0; 2092 2093 if (conn->pending_sec_level == BT_SECURITY_SDP) 2094 return 0; 2095 2096 /* Only request authentication for SSP connections or non-SSP 2097 * devices with sec_level MEDIUM or HIGH or if MITM protection 2098 * is requested. 2099 */ 2100 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 2101 conn->pending_sec_level != BT_SECURITY_FIPS && 2102 conn->pending_sec_level != BT_SECURITY_HIGH && 2103 conn->pending_sec_level != BT_SECURITY_MEDIUM) 2104 return 0; 2105 2106 return 1; 2107 } 2108 2109 static int hci_resolve_name(struct hci_dev *hdev, 2110 struct inquiry_entry *e) 2111 { 2112 struct hci_cp_remote_name_req cp; 2113 2114 memset(&cp, 0, sizeof(cp)); 2115 2116 bacpy(&cp.bdaddr, &e->data.bdaddr); 2117 cp.pscan_rep_mode = e->data.pscan_rep_mode; 2118 cp.pscan_mode = e->data.pscan_mode; 2119 cp.clock_offset = e->data.clock_offset; 2120 2121 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2122 } 2123 2124 static bool hci_resolve_next_name(struct hci_dev *hdev) 2125 { 2126 struct discovery_state *discov = &hdev->discovery; 2127 struct inquiry_entry *e; 2128 2129 if (list_empty(&discov->resolve)) 2130 return false; 2131 2132 /* We should stop if we already spent too much time resolving names. */ 2133 if (time_after(jiffies, discov->name_resolve_timeout)) { 2134 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); 2135 return false; 2136 } 2137 2138 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2139 if (!e) 2140 return false; 2141 2142 if (hci_resolve_name(hdev, e) == 0) { 2143 e->name_state = NAME_PENDING; 2144 return true; 2145 } 2146 2147 return false; 2148 } 2149 2150 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2151 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2152 { 2153 struct discovery_state *discov = &hdev->discovery; 2154 struct inquiry_entry *e; 2155 2156 /* Update the mgmt connected state if necessary. Be careful with 2157 * conn objects that exist but are not (yet) connected however. 2158 * Only those in BT_CONFIG or BT_CONNECTED states can be 2159 * considered connected. 2160 */ 2161 if (conn && 2162 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2163 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2164 mgmt_device_connected(hdev, conn, name, name_len); 2165 2166 if (discov->state == DISCOVERY_STOPPED) 2167 return; 2168 2169 if (discov->state == DISCOVERY_STOPPING) 2170 goto discov_complete; 2171 2172 if (discov->state != DISCOVERY_RESOLVING) 2173 return; 2174 2175 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2176 /* If the device was not found in a list of found devices names of which 2177 * are pending. there is no need to continue resolving a next name as it 2178 * will be done upon receiving another Remote Name Request Complete 2179 * Event */ 2180 if (!e) 2181 return; 2182 2183 list_del(&e->list); 2184 2185 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; 2186 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, 2187 name, name_len); 2188 2189 if (hci_resolve_next_name(hdev)) 2190 return; 2191 2192 discov_complete: 2193 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2194 } 2195 2196 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2197 { 2198 struct hci_cp_remote_name_req *cp; 2199 struct hci_conn *conn; 2200 2201 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2202 2203 /* If successful wait for the name req complete event before 2204 * checking for the need to do authentication */ 2205 if (!status) 2206 return; 2207 2208 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2209 if (!cp) 2210 return; 2211 2212 hci_dev_lock(hdev); 2213 2214 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2215 2216 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2217 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2218 2219 if (!conn) 2220 goto unlock; 2221 2222 if (!hci_outgoing_auth_needed(hdev, conn)) 2223 goto unlock; 2224 2225 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2226 struct hci_cp_auth_requested auth_cp; 2227 2228 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2229 2230 auth_cp.handle = __cpu_to_le16(conn->handle); 2231 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2232 sizeof(auth_cp), &auth_cp); 2233 } 2234 2235 unlock: 2236 hci_dev_unlock(hdev); 2237 } 2238 2239 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2240 { 2241 struct hci_cp_read_remote_features *cp; 2242 struct hci_conn *conn; 2243 2244 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2245 2246 if (!status) 2247 return; 2248 2249 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2250 if (!cp) 2251 return; 2252 2253 hci_dev_lock(hdev); 2254 2255 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2256 if (conn) { 2257 if (conn->state == BT_CONFIG) { 2258 hci_connect_cfm(conn, status); 2259 hci_conn_drop(conn); 2260 } 2261 } 2262 2263 hci_dev_unlock(hdev); 2264 } 2265 2266 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2267 { 2268 struct hci_cp_read_remote_ext_features *cp; 2269 struct hci_conn *conn; 2270 2271 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2272 2273 if (!status) 2274 return; 2275 2276 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2277 if (!cp) 2278 return; 2279 2280 hci_dev_lock(hdev); 2281 2282 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2283 if (conn) { 2284 if (conn->state == BT_CONFIG) { 2285 hci_connect_cfm(conn, status); 2286 hci_conn_drop(conn); 2287 } 2288 } 2289 2290 hci_dev_unlock(hdev); 2291 } 2292 2293 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2294 { 2295 struct hci_cp_setup_sync_conn *cp; 2296 struct hci_conn *acl, *sco; 2297 __u16 handle; 2298 2299 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2300 2301 if (!status) 2302 return; 2303 2304 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2305 if (!cp) 2306 return; 2307 2308 handle = __le16_to_cpu(cp->handle); 2309 2310 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 2311 2312 hci_dev_lock(hdev); 2313 2314 acl = hci_conn_hash_lookup_handle(hdev, handle); 2315 if (acl) { 2316 sco = acl->link; 2317 if (sco) { 2318 sco->state = BT_CLOSED; 2319 2320 hci_connect_cfm(sco, status); 2321 hci_conn_del(sco); 2322 } 2323 } 2324 2325 hci_dev_unlock(hdev); 2326 } 2327 2328 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2329 { 2330 struct hci_cp_enhanced_setup_sync_conn *cp; 2331 struct hci_conn *acl, *sco; 2332 __u16 handle; 2333 2334 bt_dev_dbg(hdev, "status 0x%2.2x", status); 2335 2336 if (!status) 2337 return; 2338 2339 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); 2340 if (!cp) 2341 return; 2342 2343 handle = __le16_to_cpu(cp->handle); 2344 2345 bt_dev_dbg(hdev, "handle 0x%4.4x", handle); 2346 2347 hci_dev_lock(hdev); 2348 2349 acl = hci_conn_hash_lookup_handle(hdev, handle); 2350 if (acl) { 2351 sco = acl->link; 2352 if (sco) { 2353 sco->state = BT_CLOSED; 2354 2355 hci_connect_cfm(sco, status); 2356 hci_conn_del(sco); 2357 } 2358 } 2359 2360 hci_dev_unlock(hdev); 2361 } 2362 2363 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2364 { 2365 struct hci_cp_sniff_mode *cp; 2366 struct hci_conn *conn; 2367 2368 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2369 2370 if (!status) 2371 return; 2372 2373 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2374 if (!cp) 2375 return; 2376 2377 hci_dev_lock(hdev); 2378 2379 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2380 if (conn) { 2381 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2382 2383 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2384 hci_sco_setup(conn, status); 2385 } 2386 2387 hci_dev_unlock(hdev); 2388 } 2389 2390 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2391 { 2392 struct hci_cp_exit_sniff_mode *cp; 2393 struct hci_conn *conn; 2394 2395 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2396 2397 if (!status) 2398 return; 2399 2400 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2401 if (!cp) 2402 return; 2403 2404 hci_dev_lock(hdev); 2405 2406 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2407 if (conn) { 2408 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2409 2410 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2411 hci_sco_setup(conn, status); 2412 } 2413 2414 hci_dev_unlock(hdev); 2415 } 2416 2417 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2418 { 2419 struct hci_cp_disconnect *cp; 2420 struct hci_conn_params *params; 2421 struct hci_conn *conn; 2422 bool mgmt_conn; 2423 2424 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended 2425 * otherwise cleanup the connection immediately. 2426 */ 2427 if (!status && !hdev->suspended) 2428 return; 2429 2430 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2431 if (!cp) 2432 return; 2433 2434 hci_dev_lock(hdev); 2435 2436 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2437 if (!conn) 2438 goto unlock; 2439 2440 if (status) { 2441 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2442 conn->dst_type, status); 2443 2444 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 2445 hdev->cur_adv_instance = conn->adv_instance; 2446 hci_enable_advertising(hdev); 2447 } 2448 2449 goto done; 2450 } 2451 2452 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2453 2454 if (conn->type == ACL_LINK) { 2455 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2456 hci_remove_link_key(hdev, &conn->dst); 2457 } 2458 2459 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2460 if (params) { 2461 switch (params->auto_connect) { 2462 case HCI_AUTO_CONN_LINK_LOSS: 2463 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2464 break; 2465 fallthrough; 2466 2467 case HCI_AUTO_CONN_DIRECT: 2468 case HCI_AUTO_CONN_ALWAYS: 2469 list_del_init(¶ms->action); 2470 list_add(¶ms->action, &hdev->pend_le_conns); 2471 break; 2472 2473 default: 2474 break; 2475 } 2476 } 2477 2478 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2479 cp->reason, mgmt_conn); 2480 2481 hci_disconn_cfm(conn, cp->reason); 2482 2483 done: 2484 /* If the disconnection failed for any reason, the upper layer 2485 * does not retry to disconnect in current implementation. 2486 * Hence, we need to do some basic cleanup here and re-enable 2487 * advertising if necessary. 2488 */ 2489 hci_conn_del(conn); 2490 unlock: 2491 hci_dev_unlock(hdev); 2492 } 2493 2494 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) 2495 { 2496 /* When using controller based address resolution, then the new 2497 * address types 0x02 and 0x03 are used. These types need to be 2498 * converted back into either public address or random address type 2499 */ 2500 switch (type) { 2501 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2502 if (resolved) 2503 *resolved = true; 2504 return ADDR_LE_DEV_PUBLIC; 2505 case ADDR_LE_DEV_RANDOM_RESOLVED: 2506 if (resolved) 2507 *resolved = true; 2508 return ADDR_LE_DEV_RANDOM; 2509 } 2510 2511 if (resolved) 2512 *resolved = false; 2513 return type; 2514 } 2515 2516 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2517 u8 peer_addr_type, u8 own_address_type, 2518 u8 filter_policy) 2519 { 2520 struct hci_conn *conn; 2521 2522 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2523 peer_addr_type); 2524 if (!conn) 2525 return; 2526 2527 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); 2528 2529 /* Store the initiator and responder address information which 2530 * is needed for SMP. These values will not change during the 2531 * lifetime of the connection. 2532 */ 2533 conn->init_addr_type = own_address_type; 2534 if (own_address_type == ADDR_LE_DEV_RANDOM) 2535 bacpy(&conn->init_addr, &hdev->random_addr); 2536 else 2537 bacpy(&conn->init_addr, &hdev->bdaddr); 2538 2539 conn->resp_addr_type = peer_addr_type; 2540 bacpy(&conn->resp_addr, peer_addr); 2541 2542 /* We don't want the connection attempt to stick around 2543 * indefinitely since LE doesn't have a page timeout concept 2544 * like BR/EDR. Set a timer for any connection that doesn't use 2545 * the accept list for connecting. 2546 */ 2547 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2548 queue_delayed_work(conn->hdev->workqueue, 2549 &conn->le_conn_timeout, 2550 conn->conn_timeout); 2551 } 2552 2553 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2554 { 2555 struct hci_cp_le_create_conn *cp; 2556 2557 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2558 2559 /* All connection failure handling is taken care of by the 2560 * hci_le_conn_failed function which is triggered by the HCI 2561 * request completion callbacks used for connecting. 2562 */ 2563 if (status) 2564 return; 2565 2566 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2567 if (!cp) 2568 return; 2569 2570 hci_dev_lock(hdev); 2571 2572 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2573 cp->own_address_type, cp->filter_policy); 2574 2575 hci_dev_unlock(hdev); 2576 } 2577 2578 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2579 { 2580 struct hci_cp_le_ext_create_conn *cp; 2581 2582 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2583 2584 /* All connection failure handling is taken care of by the 2585 * hci_le_conn_failed function which is triggered by the HCI 2586 * request completion callbacks used for connecting. 2587 */ 2588 if (status) 2589 return; 2590 2591 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2592 if (!cp) 2593 return; 2594 2595 hci_dev_lock(hdev); 2596 2597 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2598 cp->own_addr_type, cp->filter_policy); 2599 2600 hci_dev_unlock(hdev); 2601 } 2602 2603 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2604 { 2605 struct hci_cp_le_read_remote_features *cp; 2606 struct hci_conn *conn; 2607 2608 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2609 2610 if (!status) 2611 return; 2612 2613 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2614 if (!cp) 2615 return; 2616 2617 hci_dev_lock(hdev); 2618 2619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2620 if (conn) { 2621 if (conn->state == BT_CONFIG) { 2622 hci_connect_cfm(conn, status); 2623 hci_conn_drop(conn); 2624 } 2625 } 2626 2627 hci_dev_unlock(hdev); 2628 } 2629 2630 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2631 { 2632 struct hci_cp_le_start_enc *cp; 2633 struct hci_conn *conn; 2634 2635 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2636 2637 if (!status) 2638 return; 2639 2640 hci_dev_lock(hdev); 2641 2642 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2643 if (!cp) 2644 goto unlock; 2645 2646 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2647 if (!conn) 2648 goto unlock; 2649 2650 if (conn->state != BT_CONNECTED) 2651 goto unlock; 2652 2653 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2654 hci_conn_drop(conn); 2655 2656 unlock: 2657 hci_dev_unlock(hdev); 2658 } 2659 2660 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2661 { 2662 struct hci_cp_switch_role *cp; 2663 struct hci_conn *conn; 2664 2665 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2666 2667 if (!status) 2668 return; 2669 2670 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2671 if (!cp) 2672 return; 2673 2674 hci_dev_lock(hdev); 2675 2676 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2677 if (conn) 2678 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2679 2680 hci_dev_unlock(hdev); 2681 } 2682 2683 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2684 { 2685 __u8 status = *((__u8 *) skb->data); 2686 struct discovery_state *discov = &hdev->discovery; 2687 struct inquiry_entry *e; 2688 2689 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2690 2691 hci_conn_check_pending(hdev); 2692 2693 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2694 return; 2695 2696 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2697 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2698 2699 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2700 return; 2701 2702 hci_dev_lock(hdev); 2703 2704 if (discov->state != DISCOVERY_FINDING) 2705 goto unlock; 2706 2707 if (list_empty(&discov->resolve)) { 2708 /* When BR/EDR inquiry is active and no LE scanning is in 2709 * progress, then change discovery state to indicate completion. 2710 * 2711 * When running LE scanning and BR/EDR inquiry simultaneously 2712 * and the LE scan already finished, then change the discovery 2713 * state to indicate completion. 2714 */ 2715 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2716 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2717 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2718 goto unlock; 2719 } 2720 2721 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2722 if (e && hci_resolve_name(hdev, e) == 0) { 2723 e->name_state = NAME_PENDING; 2724 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 2725 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; 2726 } else { 2727 /* When BR/EDR inquiry is active and no LE scanning is in 2728 * progress, then change discovery state to indicate completion. 2729 * 2730 * When running LE scanning and BR/EDR inquiry simultaneously 2731 * and the LE scan already finished, then change the discovery 2732 * state to indicate completion. 2733 */ 2734 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2735 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2736 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2737 } 2738 2739 unlock: 2740 hci_dev_unlock(hdev); 2741 } 2742 2743 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2744 { 2745 struct inquiry_data data; 2746 struct inquiry_info *info = (void *) (skb->data + 1); 2747 int num_rsp = *((__u8 *) skb->data); 2748 2749 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2750 2751 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) 2752 return; 2753 2754 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 2755 return; 2756 2757 hci_dev_lock(hdev); 2758 2759 for (; num_rsp; num_rsp--, info++) { 2760 u32 flags; 2761 2762 bacpy(&data.bdaddr, &info->bdaddr); 2763 data.pscan_rep_mode = info->pscan_rep_mode; 2764 data.pscan_period_mode = info->pscan_period_mode; 2765 data.pscan_mode = info->pscan_mode; 2766 memcpy(data.dev_class, info->dev_class, 3); 2767 data.clock_offset = info->clock_offset; 2768 data.rssi = HCI_RSSI_INVALID; 2769 data.ssp_mode = 0x00; 2770 2771 flags = hci_inquiry_cache_update(hdev, &data, false); 2772 2773 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2774 info->dev_class, HCI_RSSI_INVALID, 2775 flags, NULL, 0, NULL, 0); 2776 } 2777 2778 hci_dev_unlock(hdev); 2779 } 2780 2781 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2782 { 2783 struct hci_ev_conn_complete *ev = (void *) skb->data; 2784 struct hci_conn *conn; 2785 2786 BT_DBG("%s", hdev->name); 2787 2788 hci_dev_lock(hdev); 2789 2790 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2791 if (!conn) { 2792 /* Connection may not exist if auto-connected. Check the bredr 2793 * allowlist to see if this device is allowed to auto connect. 2794 * If link is an ACL type, create a connection class 2795 * automatically. 2796 * 2797 * Auto-connect will only occur if the event filter is 2798 * programmed with a given address. Right now, event filter is 2799 * only used during suspend. 2800 */ 2801 if (ev->link_type == ACL_LINK && 2802 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, 2803 &ev->bdaddr, 2804 BDADDR_BREDR)) { 2805 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2806 HCI_ROLE_SLAVE); 2807 if (!conn) { 2808 bt_dev_err(hdev, "no memory for new conn"); 2809 goto unlock; 2810 } 2811 } else { 2812 if (ev->link_type != SCO_LINK) 2813 goto unlock; 2814 2815 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 2816 &ev->bdaddr); 2817 if (!conn) 2818 goto unlock; 2819 2820 conn->type = SCO_LINK; 2821 } 2822 } 2823 2824 if (!ev->status) { 2825 conn->handle = __le16_to_cpu(ev->handle); 2826 2827 if (conn->type == ACL_LINK) { 2828 conn->state = BT_CONFIG; 2829 hci_conn_hold(conn); 2830 2831 if (!conn->out && !hci_conn_ssp_enabled(conn) && 2832 !hci_find_link_key(hdev, &ev->bdaddr)) 2833 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2834 else 2835 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2836 } else 2837 conn->state = BT_CONNECTED; 2838 2839 hci_debugfs_create_conn(conn); 2840 hci_conn_add_sysfs(conn); 2841 2842 if (test_bit(HCI_AUTH, &hdev->flags)) 2843 set_bit(HCI_CONN_AUTH, &conn->flags); 2844 2845 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2846 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2847 2848 /* Get remote features */ 2849 if (conn->type == ACL_LINK) { 2850 struct hci_cp_read_remote_features cp; 2851 cp.handle = ev->handle; 2852 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 2853 sizeof(cp), &cp); 2854 2855 hci_req_update_scan(hdev); 2856 } 2857 2858 /* Set packet type for incoming connection */ 2859 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 2860 struct hci_cp_change_conn_ptype cp; 2861 cp.handle = ev->handle; 2862 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2863 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 2864 &cp); 2865 } 2866 } else { 2867 conn->state = BT_CLOSED; 2868 if (conn->type == ACL_LINK) 2869 mgmt_connect_failed(hdev, &conn->dst, conn->type, 2870 conn->dst_type, ev->status); 2871 } 2872 2873 if (conn->type == ACL_LINK) 2874 hci_sco_setup(conn, ev->status); 2875 2876 if (ev->status) { 2877 hci_connect_cfm(conn, ev->status); 2878 hci_conn_del(conn); 2879 } else if (ev->link_type == SCO_LINK) { 2880 switch (conn->setting & SCO_AIRMODE_MASK) { 2881 case SCO_AIRMODE_CVSD: 2882 if (hdev->notify) 2883 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 2884 break; 2885 } 2886 2887 hci_connect_cfm(conn, ev->status); 2888 } 2889 2890 unlock: 2891 hci_dev_unlock(hdev); 2892 2893 hci_conn_check_pending(hdev); 2894 } 2895 2896 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 2897 { 2898 struct hci_cp_reject_conn_req cp; 2899 2900 bacpy(&cp.bdaddr, bdaddr); 2901 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 2902 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 2903 } 2904 2905 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2906 { 2907 struct hci_ev_conn_request *ev = (void *) skb->data; 2908 int mask = hdev->link_mode; 2909 struct inquiry_entry *ie; 2910 struct hci_conn *conn; 2911 __u8 flags = 0; 2912 2913 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2914 ev->link_type); 2915 2916 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2917 &flags); 2918 2919 if (!(mask & HCI_LM_ACCEPT)) { 2920 hci_reject_conn(hdev, &ev->bdaddr); 2921 return; 2922 } 2923 2924 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, 2925 BDADDR_BREDR)) { 2926 hci_reject_conn(hdev, &ev->bdaddr); 2927 return; 2928 } 2929 2930 /* Require HCI_CONNECTABLE or an accept list entry to accept the 2931 * connection. These features are only touched through mgmt so 2932 * only do the checks if HCI_MGMT is set. 2933 */ 2934 if (hci_dev_test_flag(hdev, HCI_MGMT) && 2935 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 2936 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, 2937 BDADDR_BREDR)) { 2938 hci_reject_conn(hdev, &ev->bdaddr); 2939 return; 2940 } 2941 2942 /* Connection accepted */ 2943 2944 hci_dev_lock(hdev); 2945 2946 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2947 if (ie) 2948 memcpy(ie->data.dev_class, ev->dev_class, 3); 2949 2950 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2951 &ev->bdaddr); 2952 if (!conn) { 2953 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2954 HCI_ROLE_SLAVE); 2955 if (!conn) { 2956 bt_dev_err(hdev, "no memory for new connection"); 2957 hci_dev_unlock(hdev); 2958 return; 2959 } 2960 } 2961 2962 memcpy(conn->dev_class, ev->dev_class, 3); 2963 2964 hci_dev_unlock(hdev); 2965 2966 if (ev->link_type == ACL_LINK || 2967 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2968 struct hci_cp_accept_conn_req cp; 2969 conn->state = BT_CONNECT; 2970 2971 bacpy(&cp.bdaddr, &ev->bdaddr); 2972 2973 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2974 cp.role = 0x00; /* Become central */ 2975 else 2976 cp.role = 0x01; /* Remain peripheral */ 2977 2978 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 2979 } else if (!(flags & HCI_PROTO_DEFER)) { 2980 struct hci_cp_accept_sync_conn_req cp; 2981 conn->state = BT_CONNECT; 2982 2983 bacpy(&cp.bdaddr, &ev->bdaddr); 2984 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2985 2986 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2987 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2988 cp.max_latency = cpu_to_le16(0xffff); 2989 cp.content_format = cpu_to_le16(hdev->voice_setting); 2990 cp.retrans_effort = 0xff; 2991 2992 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 2993 &cp); 2994 } else { 2995 conn->state = BT_CONNECT2; 2996 hci_connect_cfm(conn, 0); 2997 } 2998 } 2999 3000 static u8 hci_to_mgmt_reason(u8 err) 3001 { 3002 switch (err) { 3003 case HCI_ERROR_CONNECTION_TIMEOUT: 3004 return MGMT_DEV_DISCONN_TIMEOUT; 3005 case HCI_ERROR_REMOTE_USER_TERM: 3006 case HCI_ERROR_REMOTE_LOW_RESOURCES: 3007 case HCI_ERROR_REMOTE_POWER_OFF: 3008 return MGMT_DEV_DISCONN_REMOTE; 3009 case HCI_ERROR_LOCAL_HOST_TERM: 3010 return MGMT_DEV_DISCONN_LOCAL_HOST; 3011 default: 3012 return MGMT_DEV_DISCONN_UNKNOWN; 3013 } 3014 } 3015 3016 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3017 { 3018 struct hci_ev_disconn_complete *ev = (void *) skb->data; 3019 u8 reason; 3020 struct hci_conn_params *params; 3021 struct hci_conn *conn; 3022 bool mgmt_connected; 3023 3024 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3025 3026 hci_dev_lock(hdev); 3027 3028 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3029 if (!conn) 3030 goto unlock; 3031 3032 if (ev->status) { 3033 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 3034 conn->dst_type, ev->status); 3035 goto unlock; 3036 } 3037 3038 conn->state = BT_CLOSED; 3039 3040 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 3041 3042 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 3043 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 3044 else 3045 reason = hci_to_mgmt_reason(ev->reason); 3046 3047 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 3048 reason, mgmt_connected); 3049 3050 if (conn->type == ACL_LINK) { 3051 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 3052 hci_remove_link_key(hdev, &conn->dst); 3053 3054 hci_req_update_scan(hdev); 3055 } 3056 3057 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 3058 if (params) { 3059 switch (params->auto_connect) { 3060 case HCI_AUTO_CONN_LINK_LOSS: 3061 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 3062 break; 3063 fallthrough; 3064 3065 case HCI_AUTO_CONN_DIRECT: 3066 case HCI_AUTO_CONN_ALWAYS: 3067 list_del_init(¶ms->action); 3068 list_add(¶ms->action, &hdev->pend_le_conns); 3069 hci_update_passive_scan(hdev); 3070 break; 3071 3072 default: 3073 break; 3074 } 3075 } 3076 3077 hci_disconn_cfm(conn, ev->reason); 3078 3079 /* Re-enable advertising if necessary, since it might 3080 * have been disabled by the connection. From the 3081 * HCI_LE_Set_Advertise_Enable command description in 3082 * the core specification (v4.0): 3083 * "The Controller shall continue advertising until the Host 3084 * issues an LE_Set_Advertise_Enable command with 3085 * Advertising_Enable set to 0x00 (Advertising is disabled) 3086 * or until a connection is created or until the Advertising 3087 * is timed out due to Directed Advertising." 3088 */ 3089 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { 3090 hdev->cur_adv_instance = conn->adv_instance; 3091 hci_enable_advertising(hdev); 3092 } 3093 3094 hci_conn_del(conn); 3095 3096 unlock: 3097 hci_dev_unlock(hdev); 3098 } 3099 3100 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3101 { 3102 struct hci_ev_auth_complete *ev = (void *) skb->data; 3103 struct hci_conn *conn; 3104 3105 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3106 3107 hci_dev_lock(hdev); 3108 3109 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3110 if (!conn) 3111 goto unlock; 3112 3113 if (!ev->status) { 3114 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3115 3116 if (!hci_conn_ssp_enabled(conn) && 3117 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 3118 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 3119 } else { 3120 set_bit(HCI_CONN_AUTH, &conn->flags); 3121 conn->sec_level = conn->pending_sec_level; 3122 } 3123 } else { 3124 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3125 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3126 3127 mgmt_auth_failed(conn, ev->status); 3128 } 3129 3130 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3131 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 3132 3133 if (conn->state == BT_CONFIG) { 3134 if (!ev->status && hci_conn_ssp_enabled(conn)) { 3135 struct hci_cp_set_conn_encrypt cp; 3136 cp.handle = ev->handle; 3137 cp.encrypt = 0x01; 3138 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3139 &cp); 3140 } else { 3141 conn->state = BT_CONNECTED; 3142 hci_connect_cfm(conn, ev->status); 3143 hci_conn_drop(conn); 3144 } 3145 } else { 3146 hci_auth_cfm(conn, ev->status); 3147 3148 hci_conn_hold(conn); 3149 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3150 hci_conn_drop(conn); 3151 } 3152 3153 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 3154 if (!ev->status) { 3155 struct hci_cp_set_conn_encrypt cp; 3156 cp.handle = ev->handle; 3157 cp.encrypt = 0x01; 3158 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 3159 &cp); 3160 } else { 3161 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3162 hci_encrypt_cfm(conn, ev->status); 3163 } 3164 } 3165 3166 unlock: 3167 hci_dev_unlock(hdev); 3168 } 3169 3170 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 3171 { 3172 struct hci_ev_remote_name *ev = (void *) skb->data; 3173 struct hci_conn *conn; 3174 3175 BT_DBG("%s", hdev->name); 3176 3177 hci_conn_check_pending(hdev); 3178 3179 hci_dev_lock(hdev); 3180 3181 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3182 3183 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3184 goto check_auth; 3185 3186 if (ev->status == 0) 3187 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 3188 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 3189 else 3190 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 3191 3192 check_auth: 3193 if (!conn) 3194 goto unlock; 3195 3196 if (!hci_outgoing_auth_needed(hdev, conn)) 3197 goto unlock; 3198 3199 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3200 struct hci_cp_auth_requested cp; 3201 3202 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 3203 3204 cp.handle = __cpu_to_le16(conn->handle); 3205 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 3206 } 3207 3208 unlock: 3209 hci_dev_unlock(hdev); 3210 } 3211 3212 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3213 u16 opcode, struct sk_buff *skb) 3214 { 3215 const struct hci_rp_read_enc_key_size *rp; 3216 struct hci_conn *conn; 3217 u16 handle; 3218 3219 BT_DBG("%s status 0x%02x", hdev->name, status); 3220 3221 if (!skb || skb->len < sizeof(*rp)) { 3222 bt_dev_err(hdev, "invalid read key size response"); 3223 return; 3224 } 3225 3226 rp = (void *)skb->data; 3227 handle = le16_to_cpu(rp->handle); 3228 3229 hci_dev_lock(hdev); 3230 3231 conn = hci_conn_hash_lookup_handle(hdev, handle); 3232 if (!conn) 3233 goto unlock; 3234 3235 /* While unexpected, the read_enc_key_size command may fail. The most 3236 * secure approach is to then assume the key size is 0 to force a 3237 * disconnection. 3238 */ 3239 if (rp->status) { 3240 bt_dev_err(hdev, "failed to read key size for handle %u", 3241 handle); 3242 conn->enc_key_size = 0; 3243 } else { 3244 conn->enc_key_size = rp->key_size; 3245 } 3246 3247 hci_encrypt_cfm(conn, 0); 3248 3249 unlock: 3250 hci_dev_unlock(hdev); 3251 } 3252 3253 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3254 { 3255 struct hci_ev_encrypt_change *ev = (void *) skb->data; 3256 struct hci_conn *conn; 3257 3258 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3259 3260 hci_dev_lock(hdev); 3261 3262 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3263 if (!conn) 3264 goto unlock; 3265 3266 if (!ev->status) { 3267 if (ev->encrypt) { 3268 /* Encryption implies authentication */ 3269 set_bit(HCI_CONN_AUTH, &conn->flags); 3270 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3271 conn->sec_level = conn->pending_sec_level; 3272 3273 /* P-256 authentication key implies FIPS */ 3274 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3275 set_bit(HCI_CONN_FIPS, &conn->flags); 3276 3277 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3278 conn->type == LE_LINK) 3279 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3280 } else { 3281 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3282 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3283 } 3284 } 3285 3286 /* We should disregard the current RPA and generate a new one 3287 * whenever the encryption procedure fails. 3288 */ 3289 if (ev->status && conn->type == LE_LINK) { 3290 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3291 hci_adv_instances_set_rpa_expired(hdev, true); 3292 } 3293 3294 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3295 3296 /* Check link security requirements are met */ 3297 if (!hci_conn_check_link_mode(conn)) 3298 ev->status = HCI_ERROR_AUTH_FAILURE; 3299 3300 if (ev->status && conn->state == BT_CONNECTED) { 3301 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3302 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3303 3304 /* Notify upper layers so they can cleanup before 3305 * disconnecting. 3306 */ 3307 hci_encrypt_cfm(conn, ev->status); 3308 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3309 hci_conn_drop(conn); 3310 goto unlock; 3311 } 3312 3313 /* Try reading the encryption key size for encrypted ACL links */ 3314 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3315 struct hci_cp_read_enc_key_size cp; 3316 struct hci_request req; 3317 3318 /* Only send HCI_Read_Encryption_Key_Size if the 3319 * controller really supports it. If it doesn't, assume 3320 * the default size (16). 3321 */ 3322 if (!(hdev->commands[20] & 0x10)) { 3323 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3324 goto notify; 3325 } 3326 3327 hci_req_init(&req, hdev); 3328 3329 cp.handle = cpu_to_le16(conn->handle); 3330 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3331 3332 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3333 bt_dev_err(hdev, "sending read key size failed"); 3334 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3335 goto notify; 3336 } 3337 3338 goto unlock; 3339 } 3340 3341 /* Set the default Authenticated Payload Timeout after 3342 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3343 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3344 * sent when the link is active and Encryption is enabled, the conn 3345 * type can be either LE or ACL and controller must support LMP Ping. 3346 * Ensure for AES-CCM encryption as well. 3347 */ 3348 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3349 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3350 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3351 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3352 struct hci_cp_write_auth_payload_to cp; 3353 3354 cp.handle = cpu_to_le16(conn->handle); 3355 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3356 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3357 sizeof(cp), &cp); 3358 } 3359 3360 notify: 3361 hci_encrypt_cfm(conn, ev->status); 3362 3363 unlock: 3364 hci_dev_unlock(hdev); 3365 } 3366 3367 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 3368 struct sk_buff *skb) 3369 { 3370 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 3371 struct hci_conn *conn; 3372 3373 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3374 3375 hci_dev_lock(hdev); 3376 3377 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3378 if (conn) { 3379 if (!ev->status) 3380 set_bit(HCI_CONN_SECURE, &conn->flags); 3381 3382 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3383 3384 hci_key_change_cfm(conn, ev->status); 3385 } 3386 3387 hci_dev_unlock(hdev); 3388 } 3389 3390 static void hci_remote_features_evt(struct hci_dev *hdev, 3391 struct sk_buff *skb) 3392 { 3393 struct hci_ev_remote_features *ev = (void *) skb->data; 3394 struct hci_conn *conn; 3395 3396 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3397 3398 hci_dev_lock(hdev); 3399 3400 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3401 if (!conn) 3402 goto unlock; 3403 3404 if (!ev->status) 3405 memcpy(conn->features[0], ev->features, 8); 3406 3407 if (conn->state != BT_CONFIG) 3408 goto unlock; 3409 3410 if (!ev->status && lmp_ext_feat_capable(hdev) && 3411 lmp_ext_feat_capable(conn)) { 3412 struct hci_cp_read_remote_ext_features cp; 3413 cp.handle = ev->handle; 3414 cp.page = 0x01; 3415 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3416 sizeof(cp), &cp); 3417 goto unlock; 3418 } 3419 3420 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3421 struct hci_cp_remote_name_req cp; 3422 memset(&cp, 0, sizeof(cp)); 3423 bacpy(&cp.bdaddr, &conn->dst); 3424 cp.pscan_rep_mode = 0x02; 3425 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3426 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3427 mgmt_device_connected(hdev, conn, NULL, 0); 3428 3429 if (!hci_outgoing_auth_needed(hdev, conn)) { 3430 conn->state = BT_CONNECTED; 3431 hci_connect_cfm(conn, ev->status); 3432 hci_conn_drop(conn); 3433 } 3434 3435 unlock: 3436 hci_dev_unlock(hdev); 3437 } 3438 3439 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) 3440 { 3441 cancel_delayed_work(&hdev->cmd_timer); 3442 3443 if (!test_bit(HCI_RESET, &hdev->flags)) { 3444 if (ncmd) { 3445 cancel_delayed_work(&hdev->ncmd_timer); 3446 atomic_set(&hdev->cmd_cnt, 1); 3447 } else { 3448 schedule_delayed_work(&hdev->ncmd_timer, 3449 HCI_NCMD_TIMEOUT); 3450 } 3451 } 3452 } 3453 3454 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, 3455 u16 *opcode, u8 *status, 3456 hci_req_complete_t *req_complete, 3457 hci_req_complete_skb_t *req_complete_skb) 3458 { 3459 struct hci_ev_cmd_complete *ev = (void *) skb->data; 3460 3461 *opcode = __le16_to_cpu(ev->opcode); 3462 *status = skb->data[sizeof(*ev)]; 3463 3464 skb_pull(skb, sizeof(*ev)); 3465 3466 switch (*opcode) { 3467 case HCI_OP_INQUIRY_CANCEL: 3468 hci_cc_inquiry_cancel(hdev, skb, status); 3469 break; 3470 3471 case HCI_OP_PERIODIC_INQ: 3472 hci_cc_periodic_inq(hdev, skb); 3473 break; 3474 3475 case HCI_OP_EXIT_PERIODIC_INQ: 3476 hci_cc_exit_periodic_inq(hdev, skb); 3477 break; 3478 3479 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 3480 hci_cc_remote_name_req_cancel(hdev, skb); 3481 break; 3482 3483 case HCI_OP_ROLE_DISCOVERY: 3484 hci_cc_role_discovery(hdev, skb); 3485 break; 3486 3487 case HCI_OP_READ_LINK_POLICY: 3488 hci_cc_read_link_policy(hdev, skb); 3489 break; 3490 3491 case HCI_OP_WRITE_LINK_POLICY: 3492 hci_cc_write_link_policy(hdev, skb); 3493 break; 3494 3495 case HCI_OP_READ_DEF_LINK_POLICY: 3496 hci_cc_read_def_link_policy(hdev, skb); 3497 break; 3498 3499 case HCI_OP_WRITE_DEF_LINK_POLICY: 3500 hci_cc_write_def_link_policy(hdev, skb); 3501 break; 3502 3503 case HCI_OP_RESET: 3504 hci_cc_reset(hdev, skb); 3505 break; 3506 3507 case HCI_OP_READ_STORED_LINK_KEY: 3508 hci_cc_read_stored_link_key(hdev, skb); 3509 break; 3510 3511 case HCI_OP_DELETE_STORED_LINK_KEY: 3512 hci_cc_delete_stored_link_key(hdev, skb); 3513 break; 3514 3515 case HCI_OP_WRITE_LOCAL_NAME: 3516 hci_cc_write_local_name(hdev, skb); 3517 break; 3518 3519 case HCI_OP_READ_LOCAL_NAME: 3520 hci_cc_read_local_name(hdev, skb); 3521 break; 3522 3523 case HCI_OP_WRITE_AUTH_ENABLE: 3524 hci_cc_write_auth_enable(hdev, skb); 3525 break; 3526 3527 case HCI_OP_WRITE_ENCRYPT_MODE: 3528 hci_cc_write_encrypt_mode(hdev, skb); 3529 break; 3530 3531 case HCI_OP_WRITE_SCAN_ENABLE: 3532 hci_cc_write_scan_enable(hdev, skb); 3533 break; 3534 3535 case HCI_OP_SET_EVENT_FLT: 3536 hci_cc_set_event_filter(hdev, skb); 3537 break; 3538 3539 case HCI_OP_READ_CLASS_OF_DEV: 3540 hci_cc_read_class_of_dev(hdev, skb); 3541 break; 3542 3543 case HCI_OP_WRITE_CLASS_OF_DEV: 3544 hci_cc_write_class_of_dev(hdev, skb); 3545 break; 3546 3547 case HCI_OP_READ_VOICE_SETTING: 3548 hci_cc_read_voice_setting(hdev, skb); 3549 break; 3550 3551 case HCI_OP_WRITE_VOICE_SETTING: 3552 hci_cc_write_voice_setting(hdev, skb); 3553 break; 3554 3555 case HCI_OP_READ_NUM_SUPPORTED_IAC: 3556 hci_cc_read_num_supported_iac(hdev, skb); 3557 break; 3558 3559 case HCI_OP_WRITE_SSP_MODE: 3560 hci_cc_write_ssp_mode(hdev, skb); 3561 break; 3562 3563 case HCI_OP_WRITE_SC_SUPPORT: 3564 hci_cc_write_sc_support(hdev, skb); 3565 break; 3566 3567 case HCI_OP_READ_AUTH_PAYLOAD_TO: 3568 hci_cc_read_auth_payload_timeout(hdev, skb); 3569 break; 3570 3571 case HCI_OP_WRITE_AUTH_PAYLOAD_TO: 3572 hci_cc_write_auth_payload_timeout(hdev, skb); 3573 break; 3574 3575 case HCI_OP_READ_LOCAL_VERSION: 3576 hci_cc_read_local_version(hdev, skb); 3577 break; 3578 3579 case HCI_OP_READ_LOCAL_COMMANDS: 3580 hci_cc_read_local_commands(hdev, skb); 3581 break; 3582 3583 case HCI_OP_READ_LOCAL_FEATURES: 3584 hci_cc_read_local_features(hdev, skb); 3585 break; 3586 3587 case HCI_OP_READ_LOCAL_EXT_FEATURES: 3588 hci_cc_read_local_ext_features(hdev, skb); 3589 break; 3590 3591 case HCI_OP_READ_BUFFER_SIZE: 3592 hci_cc_read_buffer_size(hdev, skb); 3593 break; 3594 3595 case HCI_OP_READ_BD_ADDR: 3596 hci_cc_read_bd_addr(hdev, skb); 3597 break; 3598 3599 case HCI_OP_READ_LOCAL_PAIRING_OPTS: 3600 hci_cc_read_local_pairing_opts(hdev, skb); 3601 break; 3602 3603 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 3604 hci_cc_read_page_scan_activity(hdev, skb); 3605 break; 3606 3607 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 3608 hci_cc_write_page_scan_activity(hdev, skb); 3609 break; 3610 3611 case HCI_OP_READ_PAGE_SCAN_TYPE: 3612 hci_cc_read_page_scan_type(hdev, skb); 3613 break; 3614 3615 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 3616 hci_cc_write_page_scan_type(hdev, skb); 3617 break; 3618 3619 case HCI_OP_READ_DATA_BLOCK_SIZE: 3620 hci_cc_read_data_block_size(hdev, skb); 3621 break; 3622 3623 case HCI_OP_READ_FLOW_CONTROL_MODE: 3624 hci_cc_read_flow_control_mode(hdev, skb); 3625 break; 3626 3627 case HCI_OP_READ_LOCAL_AMP_INFO: 3628 hci_cc_read_local_amp_info(hdev, skb); 3629 break; 3630 3631 case HCI_OP_READ_CLOCK: 3632 hci_cc_read_clock(hdev, skb); 3633 break; 3634 3635 case HCI_OP_READ_INQ_RSP_TX_POWER: 3636 hci_cc_read_inq_rsp_tx_power(hdev, skb); 3637 break; 3638 3639 case HCI_OP_READ_DEF_ERR_DATA_REPORTING: 3640 hci_cc_read_def_err_data_reporting(hdev, skb); 3641 break; 3642 3643 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING: 3644 hci_cc_write_def_err_data_reporting(hdev, skb); 3645 break; 3646 3647 case HCI_OP_PIN_CODE_REPLY: 3648 hci_cc_pin_code_reply(hdev, skb); 3649 break; 3650 3651 case HCI_OP_PIN_CODE_NEG_REPLY: 3652 hci_cc_pin_code_neg_reply(hdev, skb); 3653 break; 3654 3655 case HCI_OP_READ_LOCAL_OOB_DATA: 3656 hci_cc_read_local_oob_data(hdev, skb); 3657 break; 3658 3659 case HCI_OP_READ_LOCAL_OOB_EXT_DATA: 3660 hci_cc_read_local_oob_ext_data(hdev, skb); 3661 break; 3662 3663 case HCI_OP_LE_READ_BUFFER_SIZE: 3664 hci_cc_le_read_buffer_size(hdev, skb); 3665 break; 3666 3667 case HCI_OP_LE_READ_LOCAL_FEATURES: 3668 hci_cc_le_read_local_features(hdev, skb); 3669 break; 3670 3671 case HCI_OP_LE_READ_ADV_TX_POWER: 3672 hci_cc_le_read_adv_tx_power(hdev, skb); 3673 break; 3674 3675 case HCI_OP_USER_CONFIRM_REPLY: 3676 hci_cc_user_confirm_reply(hdev, skb); 3677 break; 3678 3679 case HCI_OP_USER_CONFIRM_NEG_REPLY: 3680 hci_cc_user_confirm_neg_reply(hdev, skb); 3681 break; 3682 3683 case HCI_OP_USER_PASSKEY_REPLY: 3684 hci_cc_user_passkey_reply(hdev, skb); 3685 break; 3686 3687 case HCI_OP_USER_PASSKEY_NEG_REPLY: 3688 hci_cc_user_passkey_neg_reply(hdev, skb); 3689 break; 3690 3691 case HCI_OP_LE_SET_RANDOM_ADDR: 3692 hci_cc_le_set_random_addr(hdev, skb); 3693 break; 3694 3695 case HCI_OP_LE_SET_ADV_ENABLE: 3696 hci_cc_le_set_adv_enable(hdev, skb); 3697 break; 3698 3699 case HCI_OP_LE_SET_SCAN_PARAM: 3700 hci_cc_le_set_scan_param(hdev, skb); 3701 break; 3702 3703 case HCI_OP_LE_SET_SCAN_ENABLE: 3704 hci_cc_le_set_scan_enable(hdev, skb); 3705 break; 3706 3707 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE: 3708 hci_cc_le_read_accept_list_size(hdev, skb); 3709 break; 3710 3711 case HCI_OP_LE_CLEAR_ACCEPT_LIST: 3712 hci_cc_le_clear_accept_list(hdev, skb); 3713 break; 3714 3715 case HCI_OP_LE_ADD_TO_ACCEPT_LIST: 3716 hci_cc_le_add_to_accept_list(hdev, skb); 3717 break; 3718 3719 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST: 3720 hci_cc_le_del_from_accept_list(hdev, skb); 3721 break; 3722 3723 case HCI_OP_LE_READ_SUPPORTED_STATES: 3724 hci_cc_le_read_supported_states(hdev, skb); 3725 break; 3726 3727 case HCI_OP_LE_READ_DEF_DATA_LEN: 3728 hci_cc_le_read_def_data_len(hdev, skb); 3729 break; 3730 3731 case HCI_OP_LE_WRITE_DEF_DATA_LEN: 3732 hci_cc_le_write_def_data_len(hdev, skb); 3733 break; 3734 3735 case HCI_OP_LE_ADD_TO_RESOLV_LIST: 3736 hci_cc_le_add_to_resolv_list(hdev, skb); 3737 break; 3738 3739 case HCI_OP_LE_DEL_FROM_RESOLV_LIST: 3740 hci_cc_le_del_from_resolv_list(hdev, skb); 3741 break; 3742 3743 case HCI_OP_LE_CLEAR_RESOLV_LIST: 3744 hci_cc_le_clear_resolv_list(hdev, skb); 3745 break; 3746 3747 case HCI_OP_LE_READ_RESOLV_LIST_SIZE: 3748 hci_cc_le_read_resolv_list_size(hdev, skb); 3749 break; 3750 3751 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE: 3752 hci_cc_le_set_addr_resolution_enable(hdev, skb); 3753 break; 3754 3755 case HCI_OP_LE_READ_MAX_DATA_LEN: 3756 hci_cc_le_read_max_data_len(hdev, skb); 3757 break; 3758 3759 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 3760 hci_cc_write_le_host_supported(hdev, skb); 3761 break; 3762 3763 case HCI_OP_LE_SET_ADV_PARAM: 3764 hci_cc_set_adv_param(hdev, skb); 3765 break; 3766 3767 case HCI_OP_READ_RSSI: 3768 hci_cc_read_rssi(hdev, skb); 3769 break; 3770 3771 case HCI_OP_READ_TX_POWER: 3772 hci_cc_read_tx_power(hdev, skb); 3773 break; 3774 3775 case HCI_OP_WRITE_SSP_DEBUG_MODE: 3776 hci_cc_write_ssp_debug_mode(hdev, skb); 3777 break; 3778 3779 case HCI_OP_LE_SET_EXT_SCAN_PARAMS: 3780 hci_cc_le_set_ext_scan_param(hdev, skb); 3781 break; 3782 3783 case HCI_OP_LE_SET_EXT_SCAN_ENABLE: 3784 hci_cc_le_set_ext_scan_enable(hdev, skb); 3785 break; 3786 3787 case HCI_OP_LE_SET_DEFAULT_PHY: 3788 hci_cc_le_set_default_phy(hdev, skb); 3789 break; 3790 3791 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS: 3792 hci_cc_le_read_num_adv_sets(hdev, skb); 3793 break; 3794 3795 case HCI_OP_LE_SET_EXT_ADV_PARAMS: 3796 hci_cc_set_ext_adv_param(hdev, skb); 3797 break; 3798 3799 case HCI_OP_LE_SET_EXT_ADV_ENABLE: 3800 hci_cc_le_set_ext_adv_enable(hdev, skb); 3801 break; 3802 3803 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR: 3804 hci_cc_le_set_adv_set_random_addr(hdev, skb); 3805 break; 3806 3807 case HCI_OP_LE_REMOVE_ADV_SET: 3808 hci_cc_le_remove_adv_set(hdev, skb); 3809 break; 3810 3811 case HCI_OP_LE_CLEAR_ADV_SETS: 3812 hci_cc_le_clear_adv_sets(hdev, skb); 3813 break; 3814 3815 case HCI_OP_LE_READ_TRANSMIT_POWER: 3816 hci_cc_le_read_transmit_power(hdev, skb); 3817 break; 3818 3819 default: 3820 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3821 break; 3822 } 3823 3824 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 3825 3826 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3827 req_complete_skb); 3828 3829 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3830 bt_dev_err(hdev, 3831 "unexpected event for opcode 0x%4.4x", *opcode); 3832 return; 3833 } 3834 3835 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3836 queue_work(hdev->workqueue, &hdev->cmd_work); 3837 } 3838 3839 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, 3840 u16 *opcode, u8 *status, 3841 hci_req_complete_t *req_complete, 3842 hci_req_complete_skb_t *req_complete_skb) 3843 { 3844 struct hci_ev_cmd_status *ev = (void *) skb->data; 3845 3846 skb_pull(skb, sizeof(*ev)); 3847 3848 *opcode = __le16_to_cpu(ev->opcode); 3849 *status = ev->status; 3850 3851 switch (*opcode) { 3852 case HCI_OP_INQUIRY: 3853 hci_cs_inquiry(hdev, ev->status); 3854 break; 3855 3856 case HCI_OP_CREATE_CONN: 3857 hci_cs_create_conn(hdev, ev->status); 3858 break; 3859 3860 case HCI_OP_DISCONNECT: 3861 hci_cs_disconnect(hdev, ev->status); 3862 break; 3863 3864 case HCI_OP_ADD_SCO: 3865 hci_cs_add_sco(hdev, ev->status); 3866 break; 3867 3868 case HCI_OP_AUTH_REQUESTED: 3869 hci_cs_auth_requested(hdev, ev->status); 3870 break; 3871 3872 case HCI_OP_SET_CONN_ENCRYPT: 3873 hci_cs_set_conn_encrypt(hdev, ev->status); 3874 break; 3875 3876 case HCI_OP_REMOTE_NAME_REQ: 3877 hci_cs_remote_name_req(hdev, ev->status); 3878 break; 3879 3880 case HCI_OP_READ_REMOTE_FEATURES: 3881 hci_cs_read_remote_features(hdev, ev->status); 3882 break; 3883 3884 case HCI_OP_READ_REMOTE_EXT_FEATURES: 3885 hci_cs_read_remote_ext_features(hdev, ev->status); 3886 break; 3887 3888 case HCI_OP_SETUP_SYNC_CONN: 3889 hci_cs_setup_sync_conn(hdev, ev->status); 3890 break; 3891 3892 case HCI_OP_ENHANCED_SETUP_SYNC_CONN: 3893 hci_cs_enhanced_setup_sync_conn(hdev, ev->status); 3894 break; 3895 3896 case HCI_OP_SNIFF_MODE: 3897 hci_cs_sniff_mode(hdev, ev->status); 3898 break; 3899 3900 case HCI_OP_EXIT_SNIFF_MODE: 3901 hci_cs_exit_sniff_mode(hdev, ev->status); 3902 break; 3903 3904 case HCI_OP_SWITCH_ROLE: 3905 hci_cs_switch_role(hdev, ev->status); 3906 break; 3907 3908 case HCI_OP_LE_CREATE_CONN: 3909 hci_cs_le_create_conn(hdev, ev->status); 3910 break; 3911 3912 case HCI_OP_LE_READ_REMOTE_FEATURES: 3913 hci_cs_le_read_remote_features(hdev, ev->status); 3914 break; 3915 3916 case HCI_OP_LE_START_ENC: 3917 hci_cs_le_start_enc(hdev, ev->status); 3918 break; 3919 3920 case HCI_OP_LE_EXT_CREATE_CONN: 3921 hci_cs_le_ext_create_conn(hdev, ev->status); 3922 break; 3923 3924 default: 3925 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3926 break; 3927 } 3928 3929 handle_cmd_cnt_and_timer(hdev, ev->ncmd); 3930 3931 /* Indicate request completion if the command failed. Also, if 3932 * we're not waiting for a special event and we get a success 3933 * command status we should try to flag the request as completed 3934 * (since for this kind of commands there will not be a command 3935 * complete event). 3936 */ 3937 if (ev->status || 3938 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event)) 3939 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 3940 req_complete_skb); 3941 3942 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3943 bt_dev_err(hdev, 3944 "unexpected event for opcode 0x%4.4x", *opcode); 3945 return; 3946 } 3947 3948 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3949 queue_work(hdev->workqueue, &hdev->cmd_work); 3950 } 3951 3952 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) 3953 { 3954 struct hci_ev_hardware_error *ev = (void *) skb->data; 3955 3956 hdev->hw_error_code = ev->code; 3957 3958 queue_work(hdev->req_workqueue, &hdev->error_reset); 3959 } 3960 3961 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3962 { 3963 struct hci_ev_role_change *ev = (void *) skb->data; 3964 struct hci_conn *conn; 3965 3966 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3967 3968 hci_dev_lock(hdev); 3969 3970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3971 if (conn) { 3972 if (!ev->status) 3973 conn->role = ev->role; 3974 3975 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 3976 3977 hci_role_switch_cfm(conn, ev->status, ev->role); 3978 } 3979 3980 hci_dev_unlock(hdev); 3981 } 3982 3983 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 3984 { 3985 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 3986 int i; 3987 3988 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 3989 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 3990 return; 3991 } 3992 3993 if (skb->len < sizeof(*ev) || 3994 skb->len < struct_size(ev, handles, ev->num_hndl)) { 3995 BT_DBG("%s bad parameters", hdev->name); 3996 return; 3997 } 3998 3999 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 4000 4001 for (i = 0; i < ev->num_hndl; i++) { 4002 struct hci_comp_pkts_info *info = &ev->handles[i]; 4003 struct hci_conn *conn; 4004 __u16 handle, count; 4005 4006 handle = __le16_to_cpu(info->handle); 4007 count = __le16_to_cpu(info->count); 4008 4009 conn = hci_conn_hash_lookup_handle(hdev, handle); 4010 if (!conn) 4011 continue; 4012 4013 conn->sent -= count; 4014 4015 switch (conn->type) { 4016 case ACL_LINK: 4017 hdev->acl_cnt += count; 4018 if (hdev->acl_cnt > hdev->acl_pkts) 4019 hdev->acl_cnt = hdev->acl_pkts; 4020 break; 4021 4022 case LE_LINK: 4023 if (hdev->le_pkts) { 4024 hdev->le_cnt += count; 4025 if (hdev->le_cnt > hdev->le_pkts) 4026 hdev->le_cnt = hdev->le_pkts; 4027 } else { 4028 hdev->acl_cnt += count; 4029 if (hdev->acl_cnt > hdev->acl_pkts) 4030 hdev->acl_cnt = hdev->acl_pkts; 4031 } 4032 break; 4033 4034 case SCO_LINK: 4035 hdev->sco_cnt += count; 4036 if (hdev->sco_cnt > hdev->sco_pkts) 4037 hdev->sco_cnt = hdev->sco_pkts; 4038 break; 4039 4040 default: 4041 bt_dev_err(hdev, "unknown type %d conn %p", 4042 conn->type, conn); 4043 break; 4044 } 4045 } 4046 4047 queue_work(hdev->workqueue, &hdev->tx_work); 4048 } 4049 4050 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 4051 __u16 handle) 4052 { 4053 struct hci_chan *chan; 4054 4055 switch (hdev->dev_type) { 4056 case HCI_PRIMARY: 4057 return hci_conn_hash_lookup_handle(hdev, handle); 4058 case HCI_AMP: 4059 chan = hci_chan_lookup_handle(hdev, handle); 4060 if (chan) 4061 return chan->conn; 4062 break; 4063 default: 4064 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4065 break; 4066 } 4067 4068 return NULL; 4069 } 4070 4071 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 4072 { 4073 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 4074 int i; 4075 4076 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 4077 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 4078 return; 4079 } 4080 4081 if (skb->len < sizeof(*ev) || 4082 skb->len < struct_size(ev, handles, ev->num_hndl)) { 4083 BT_DBG("%s bad parameters", hdev->name); 4084 return; 4085 } 4086 4087 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 4088 ev->num_hndl); 4089 4090 for (i = 0; i < ev->num_hndl; i++) { 4091 struct hci_comp_blocks_info *info = &ev->handles[i]; 4092 struct hci_conn *conn = NULL; 4093 __u16 handle, block_count; 4094 4095 handle = __le16_to_cpu(info->handle); 4096 block_count = __le16_to_cpu(info->blocks); 4097 4098 conn = __hci_conn_lookup_handle(hdev, handle); 4099 if (!conn) 4100 continue; 4101 4102 conn->sent -= block_count; 4103 4104 switch (conn->type) { 4105 case ACL_LINK: 4106 case AMP_LINK: 4107 hdev->block_cnt += block_count; 4108 if (hdev->block_cnt > hdev->num_blocks) 4109 hdev->block_cnt = hdev->num_blocks; 4110 break; 4111 4112 default: 4113 bt_dev_err(hdev, "unknown type %d conn %p", 4114 conn->type, conn); 4115 break; 4116 } 4117 } 4118 4119 queue_work(hdev->workqueue, &hdev->tx_work); 4120 } 4121 4122 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 4123 { 4124 struct hci_ev_mode_change *ev = (void *) skb->data; 4125 struct hci_conn *conn; 4126 4127 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4128 4129 hci_dev_lock(hdev); 4130 4131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4132 if (conn) { 4133 conn->mode = ev->mode; 4134 4135 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 4136 &conn->flags)) { 4137 if (conn->mode == HCI_CM_ACTIVE) 4138 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4139 else 4140 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 4141 } 4142 4143 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 4144 hci_sco_setup(conn, ev->status); 4145 } 4146 4147 hci_dev_unlock(hdev); 4148 } 4149 4150 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4151 { 4152 struct hci_ev_pin_code_req *ev = (void *) skb->data; 4153 struct hci_conn *conn; 4154 4155 BT_DBG("%s", hdev->name); 4156 4157 hci_dev_lock(hdev); 4158 4159 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4160 if (!conn) 4161 goto unlock; 4162 4163 if (conn->state == BT_CONNECTED) { 4164 hci_conn_hold(conn); 4165 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 4166 hci_conn_drop(conn); 4167 } 4168 4169 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 4170 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 4171 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 4172 sizeof(ev->bdaddr), &ev->bdaddr); 4173 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4174 u8 secure; 4175 4176 if (conn->pending_sec_level == BT_SECURITY_HIGH) 4177 secure = 1; 4178 else 4179 secure = 0; 4180 4181 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 4182 } 4183 4184 unlock: 4185 hci_dev_unlock(hdev); 4186 } 4187 4188 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 4189 { 4190 if (key_type == HCI_LK_CHANGED_COMBINATION) 4191 return; 4192 4193 conn->pin_length = pin_len; 4194 conn->key_type = key_type; 4195 4196 switch (key_type) { 4197 case HCI_LK_LOCAL_UNIT: 4198 case HCI_LK_REMOTE_UNIT: 4199 case HCI_LK_DEBUG_COMBINATION: 4200 return; 4201 case HCI_LK_COMBINATION: 4202 if (pin_len == 16) 4203 conn->pending_sec_level = BT_SECURITY_HIGH; 4204 else 4205 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4206 break; 4207 case HCI_LK_UNAUTH_COMBINATION_P192: 4208 case HCI_LK_UNAUTH_COMBINATION_P256: 4209 conn->pending_sec_level = BT_SECURITY_MEDIUM; 4210 break; 4211 case HCI_LK_AUTH_COMBINATION_P192: 4212 conn->pending_sec_level = BT_SECURITY_HIGH; 4213 break; 4214 case HCI_LK_AUTH_COMBINATION_P256: 4215 conn->pending_sec_level = BT_SECURITY_FIPS; 4216 break; 4217 } 4218 } 4219 4220 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4221 { 4222 struct hci_ev_link_key_req *ev = (void *) skb->data; 4223 struct hci_cp_link_key_reply cp; 4224 struct hci_conn *conn; 4225 struct link_key *key; 4226 4227 BT_DBG("%s", hdev->name); 4228 4229 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4230 return; 4231 4232 hci_dev_lock(hdev); 4233 4234 key = hci_find_link_key(hdev, &ev->bdaddr); 4235 if (!key) { 4236 BT_DBG("%s link key not found for %pMR", hdev->name, 4237 &ev->bdaddr); 4238 goto not_found; 4239 } 4240 4241 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 4242 &ev->bdaddr); 4243 4244 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4245 if (conn) { 4246 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4247 4248 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4249 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4250 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4251 BT_DBG("%s ignoring unauthenticated key", hdev->name); 4252 goto not_found; 4253 } 4254 4255 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4256 (conn->pending_sec_level == BT_SECURITY_HIGH || 4257 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4258 BT_DBG("%s ignoring key unauthenticated for high security", 4259 hdev->name); 4260 goto not_found; 4261 } 4262 4263 conn_set_key(conn, key->type, key->pin_len); 4264 } 4265 4266 bacpy(&cp.bdaddr, &ev->bdaddr); 4267 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4268 4269 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4270 4271 hci_dev_unlock(hdev); 4272 4273 return; 4274 4275 not_found: 4276 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4277 hci_dev_unlock(hdev); 4278 } 4279 4280 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 4281 { 4282 struct hci_ev_link_key_notify *ev = (void *) skb->data; 4283 struct hci_conn *conn; 4284 struct link_key *key; 4285 bool persistent; 4286 u8 pin_len = 0; 4287 4288 BT_DBG("%s", hdev->name); 4289 4290 hci_dev_lock(hdev); 4291 4292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4293 if (!conn) 4294 goto unlock; 4295 4296 hci_conn_hold(conn); 4297 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4298 hci_conn_drop(conn); 4299 4300 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4301 conn_set_key(conn, ev->key_type, conn->pin_length); 4302 4303 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4304 goto unlock; 4305 4306 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4307 ev->key_type, pin_len, &persistent); 4308 if (!key) 4309 goto unlock; 4310 4311 /* Update connection information since adding the key will have 4312 * fixed up the type in the case of changed combination keys. 4313 */ 4314 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4315 conn_set_key(conn, key->type, key->pin_len); 4316 4317 mgmt_new_link_key(hdev, key, persistent); 4318 4319 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4320 * is set. If it's not set simply remove the key from the kernel 4321 * list (we've still notified user space about it but with 4322 * store_hint being 0). 4323 */ 4324 if (key->type == HCI_LK_DEBUG_COMBINATION && 4325 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4326 list_del_rcu(&key->list); 4327 kfree_rcu(key, rcu); 4328 goto unlock; 4329 } 4330 4331 if (persistent) 4332 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4333 else 4334 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4335 4336 unlock: 4337 hci_dev_unlock(hdev); 4338 } 4339 4340 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 4341 { 4342 struct hci_ev_clock_offset *ev = (void *) skb->data; 4343 struct hci_conn *conn; 4344 4345 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4346 4347 hci_dev_lock(hdev); 4348 4349 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4350 if (conn && !ev->status) { 4351 struct inquiry_entry *ie; 4352 4353 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4354 if (ie) { 4355 ie->data.clock_offset = ev->clock_offset; 4356 ie->timestamp = jiffies; 4357 } 4358 } 4359 4360 hci_dev_unlock(hdev); 4361 } 4362 4363 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 4364 { 4365 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 4366 struct hci_conn *conn; 4367 4368 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4369 4370 hci_dev_lock(hdev); 4371 4372 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4373 if (conn && !ev->status) 4374 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4375 4376 hci_dev_unlock(hdev); 4377 } 4378 4379 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 4380 { 4381 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 4382 struct inquiry_entry *ie; 4383 4384 BT_DBG("%s", hdev->name); 4385 4386 hci_dev_lock(hdev); 4387 4388 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4389 if (ie) { 4390 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4391 ie->timestamp = jiffies; 4392 } 4393 4394 hci_dev_unlock(hdev); 4395 } 4396 4397 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 4398 struct sk_buff *skb) 4399 { 4400 struct inquiry_data data; 4401 int num_rsp = *((__u8 *) skb->data); 4402 4403 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4404 4405 if (!num_rsp) 4406 return; 4407 4408 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4409 return; 4410 4411 hci_dev_lock(hdev); 4412 4413 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 4414 struct inquiry_info_with_rssi_and_pscan_mode *info; 4415 info = (void *) (skb->data + 1); 4416 4417 if (skb->len < num_rsp * sizeof(*info) + 1) 4418 goto unlock; 4419 4420 for (; num_rsp; num_rsp--, info++) { 4421 u32 flags; 4422 4423 bacpy(&data.bdaddr, &info->bdaddr); 4424 data.pscan_rep_mode = info->pscan_rep_mode; 4425 data.pscan_period_mode = info->pscan_period_mode; 4426 data.pscan_mode = info->pscan_mode; 4427 memcpy(data.dev_class, info->dev_class, 3); 4428 data.clock_offset = info->clock_offset; 4429 data.rssi = info->rssi; 4430 data.ssp_mode = 0x00; 4431 4432 flags = hci_inquiry_cache_update(hdev, &data, false); 4433 4434 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4435 info->dev_class, info->rssi, 4436 flags, NULL, 0, NULL, 0); 4437 } 4438 } else { 4439 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 4440 4441 if (skb->len < num_rsp * sizeof(*info) + 1) 4442 goto unlock; 4443 4444 for (; num_rsp; num_rsp--, info++) { 4445 u32 flags; 4446 4447 bacpy(&data.bdaddr, &info->bdaddr); 4448 data.pscan_rep_mode = info->pscan_rep_mode; 4449 data.pscan_period_mode = info->pscan_period_mode; 4450 data.pscan_mode = 0x00; 4451 memcpy(data.dev_class, info->dev_class, 3); 4452 data.clock_offset = info->clock_offset; 4453 data.rssi = info->rssi; 4454 data.ssp_mode = 0x00; 4455 4456 flags = hci_inquiry_cache_update(hdev, &data, false); 4457 4458 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4459 info->dev_class, info->rssi, 4460 flags, NULL, 0, NULL, 0); 4461 } 4462 } 4463 4464 unlock: 4465 hci_dev_unlock(hdev); 4466 } 4467 4468 static void hci_remote_ext_features_evt(struct hci_dev *hdev, 4469 struct sk_buff *skb) 4470 { 4471 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 4472 struct hci_conn *conn; 4473 4474 BT_DBG("%s", hdev->name); 4475 4476 hci_dev_lock(hdev); 4477 4478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4479 if (!conn) 4480 goto unlock; 4481 4482 if (ev->page < HCI_MAX_PAGES) 4483 memcpy(conn->features[ev->page], ev->features, 8); 4484 4485 if (!ev->status && ev->page == 0x01) { 4486 struct inquiry_entry *ie; 4487 4488 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4489 if (ie) 4490 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4491 4492 if (ev->features[0] & LMP_HOST_SSP) { 4493 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4494 } else { 4495 /* It is mandatory by the Bluetooth specification that 4496 * Extended Inquiry Results are only used when Secure 4497 * Simple Pairing is enabled, but some devices violate 4498 * this. 4499 * 4500 * To make these devices work, the internal SSP 4501 * enabled flag needs to be cleared if the remote host 4502 * features do not indicate SSP support */ 4503 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4504 } 4505 4506 if (ev->features[0] & LMP_HOST_SC) 4507 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4508 } 4509 4510 if (conn->state != BT_CONFIG) 4511 goto unlock; 4512 4513 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4514 struct hci_cp_remote_name_req cp; 4515 memset(&cp, 0, sizeof(cp)); 4516 bacpy(&cp.bdaddr, &conn->dst); 4517 cp.pscan_rep_mode = 0x02; 4518 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4519 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4520 mgmt_device_connected(hdev, conn, NULL, 0); 4521 4522 if (!hci_outgoing_auth_needed(hdev, conn)) { 4523 conn->state = BT_CONNECTED; 4524 hci_connect_cfm(conn, ev->status); 4525 hci_conn_drop(conn); 4526 } 4527 4528 unlock: 4529 hci_dev_unlock(hdev); 4530 } 4531 4532 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 4533 struct sk_buff *skb) 4534 { 4535 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 4536 struct hci_conn *conn; 4537 4538 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4539 4540 hci_dev_lock(hdev); 4541 4542 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4543 if (!conn) { 4544 if (ev->link_type == ESCO_LINK) 4545 goto unlock; 4546 4547 /* When the link type in the event indicates SCO connection 4548 * and lookup of the connection object fails, then check 4549 * if an eSCO connection object exists. 4550 * 4551 * The core limits the synchronous connections to either 4552 * SCO or eSCO. The eSCO connection is preferred and tried 4553 * to be setup first and until successfully established, 4554 * the link type will be hinted as eSCO. 4555 */ 4556 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4557 if (!conn) 4558 goto unlock; 4559 } 4560 4561 switch (ev->status) { 4562 case 0x00: 4563 /* The synchronous connection complete event should only be 4564 * sent once per new connection. Receiving a successful 4565 * complete event when the connection status is already 4566 * BT_CONNECTED means that the device is misbehaving and sent 4567 * multiple complete event packets for the same new connection. 4568 * 4569 * Registering the device more than once can corrupt kernel 4570 * memory, hence upon detecting this invalid event, we report 4571 * an error and ignore the packet. 4572 */ 4573 if (conn->state == BT_CONNECTED) { 4574 bt_dev_err(hdev, "Ignoring connect complete event for existing connection"); 4575 goto unlock; 4576 } 4577 4578 conn->handle = __le16_to_cpu(ev->handle); 4579 conn->state = BT_CONNECTED; 4580 conn->type = ev->link_type; 4581 4582 hci_debugfs_create_conn(conn); 4583 hci_conn_add_sysfs(conn); 4584 break; 4585 4586 case 0x10: /* Connection Accept Timeout */ 4587 case 0x0d: /* Connection Rejected due to Limited Resources */ 4588 case 0x11: /* Unsupported Feature or Parameter Value */ 4589 case 0x1c: /* SCO interval rejected */ 4590 case 0x1a: /* Unsupported Remote Feature */ 4591 case 0x1e: /* Invalid LMP Parameters */ 4592 case 0x1f: /* Unspecified error */ 4593 case 0x20: /* Unsupported LMP Parameter value */ 4594 if (conn->out) { 4595 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4596 (hdev->esco_type & EDR_ESCO_MASK); 4597 if (hci_setup_sync(conn, conn->link->handle)) 4598 goto unlock; 4599 } 4600 fallthrough; 4601 4602 default: 4603 conn->state = BT_CLOSED; 4604 break; 4605 } 4606 4607 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4608 /* Notify only in case of SCO over HCI transport data path which 4609 * is zero and non-zero value shall be non-HCI transport data path 4610 */ 4611 if (conn->codec.data_path == 0 && hdev->notify) { 4612 switch (ev->air_mode) { 4613 case 0x02: 4614 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4615 break; 4616 case 0x03: 4617 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4618 break; 4619 } 4620 } 4621 4622 hci_connect_cfm(conn, ev->status); 4623 if (ev->status) 4624 hci_conn_del(conn); 4625 4626 unlock: 4627 hci_dev_unlock(hdev); 4628 } 4629 4630 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 4631 { 4632 size_t parsed = 0; 4633 4634 while (parsed < eir_len) { 4635 u8 field_len = eir[0]; 4636 4637 if (field_len == 0) 4638 return parsed; 4639 4640 parsed += field_len + 1; 4641 eir += field_len + 1; 4642 } 4643 4644 return eir_len; 4645 } 4646 4647 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 4648 struct sk_buff *skb) 4649 { 4650 struct inquiry_data data; 4651 struct extended_inquiry_info *info = (void *) (skb->data + 1); 4652 int num_rsp = *((__u8 *) skb->data); 4653 size_t eir_len; 4654 4655 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4656 4657 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) 4658 return; 4659 4660 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4661 return; 4662 4663 hci_dev_lock(hdev); 4664 4665 for (; num_rsp; num_rsp--, info++) { 4666 u32 flags; 4667 bool name_known; 4668 4669 bacpy(&data.bdaddr, &info->bdaddr); 4670 data.pscan_rep_mode = info->pscan_rep_mode; 4671 data.pscan_period_mode = info->pscan_period_mode; 4672 data.pscan_mode = 0x00; 4673 memcpy(data.dev_class, info->dev_class, 3); 4674 data.clock_offset = info->clock_offset; 4675 data.rssi = info->rssi; 4676 data.ssp_mode = 0x01; 4677 4678 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4679 name_known = eir_get_data(info->data, 4680 sizeof(info->data), 4681 EIR_NAME_COMPLETE, NULL); 4682 else 4683 name_known = true; 4684 4685 flags = hci_inquiry_cache_update(hdev, &data, name_known); 4686 4687 eir_len = eir_get_length(info->data, sizeof(info->data)); 4688 4689 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4690 info->dev_class, info->rssi, 4691 flags, info->data, eir_len, NULL, 0); 4692 } 4693 4694 hci_dev_unlock(hdev); 4695 } 4696 4697 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 4698 struct sk_buff *skb) 4699 { 4700 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 4701 struct hci_conn *conn; 4702 4703 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 4704 __le16_to_cpu(ev->handle)); 4705 4706 hci_dev_lock(hdev); 4707 4708 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4709 if (!conn) 4710 goto unlock; 4711 4712 /* For BR/EDR the necessary steps are taken through the 4713 * auth_complete event. 4714 */ 4715 if (conn->type != LE_LINK) 4716 goto unlock; 4717 4718 if (!ev->status) 4719 conn->sec_level = conn->pending_sec_level; 4720 4721 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 4722 4723 if (ev->status && conn->state == BT_CONNECTED) { 4724 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4725 hci_conn_drop(conn); 4726 goto unlock; 4727 } 4728 4729 if (conn->state == BT_CONFIG) { 4730 if (!ev->status) 4731 conn->state = BT_CONNECTED; 4732 4733 hci_connect_cfm(conn, ev->status); 4734 hci_conn_drop(conn); 4735 } else { 4736 hci_auth_cfm(conn, ev->status); 4737 4738 hci_conn_hold(conn); 4739 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4740 hci_conn_drop(conn); 4741 } 4742 4743 unlock: 4744 hci_dev_unlock(hdev); 4745 } 4746 4747 static u8 hci_get_auth_req(struct hci_conn *conn) 4748 { 4749 /* If remote requests no-bonding follow that lead */ 4750 if (conn->remote_auth == HCI_AT_NO_BONDING || 4751 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4752 return conn->remote_auth | (conn->auth_type & 0x01); 4753 4754 /* If both remote and local have enough IO capabilities, require 4755 * MITM protection 4756 */ 4757 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4758 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4759 return conn->remote_auth | 0x01; 4760 4761 /* No MITM protection possible so ignore remote requirement */ 4762 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4763 } 4764 4765 static u8 bredr_oob_data_present(struct hci_conn *conn) 4766 { 4767 struct hci_dev *hdev = conn->hdev; 4768 struct oob_data *data; 4769 4770 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4771 if (!data) 4772 return 0x00; 4773 4774 if (bredr_sc_enabled(hdev)) { 4775 /* When Secure Connections is enabled, then just 4776 * return the present value stored with the OOB 4777 * data. The stored value contains the right present 4778 * information. However it can only be trusted when 4779 * not in Secure Connection Only mode. 4780 */ 4781 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4782 return data->present; 4783 4784 /* When Secure Connections Only mode is enabled, then 4785 * the P-256 values are required. If they are not 4786 * available, then do not declare that OOB data is 4787 * present. 4788 */ 4789 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4790 !memcmp(data->hash256, ZERO_KEY, 16)) 4791 return 0x00; 4792 4793 return 0x02; 4794 } 4795 4796 /* When Secure Connections is not enabled or actually 4797 * not supported by the hardware, then check that if 4798 * P-192 data values are present. 4799 */ 4800 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4801 !memcmp(data->hash192, ZERO_KEY, 16)) 4802 return 0x00; 4803 4804 return 0x01; 4805 } 4806 4807 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4808 { 4809 struct hci_ev_io_capa_request *ev = (void *) skb->data; 4810 struct hci_conn *conn; 4811 4812 BT_DBG("%s", hdev->name); 4813 4814 hci_dev_lock(hdev); 4815 4816 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4817 if (!conn) 4818 goto unlock; 4819 4820 hci_conn_hold(conn); 4821 4822 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4823 goto unlock; 4824 4825 /* Allow pairing if we're pairable, the initiators of the 4826 * pairing or if the remote is not requesting bonding. 4827 */ 4828 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4829 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4830 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 4831 struct hci_cp_io_capability_reply cp; 4832 4833 bacpy(&cp.bdaddr, &ev->bdaddr); 4834 /* Change the IO capability from KeyboardDisplay 4835 * to DisplayYesNo as it is not supported by BT spec. */ 4836 cp.capability = (conn->io_capability == 0x04) ? 4837 HCI_IO_DISPLAY_YESNO : conn->io_capability; 4838 4839 /* If we are initiators, there is no remote information yet */ 4840 if (conn->remote_auth == 0xff) { 4841 /* Request MITM protection if our IO caps allow it 4842 * except for the no-bonding case. 4843 */ 4844 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4845 conn->auth_type != HCI_AT_NO_BONDING) 4846 conn->auth_type |= 0x01; 4847 } else { 4848 conn->auth_type = hci_get_auth_req(conn); 4849 } 4850 4851 /* If we're not bondable, force one of the non-bondable 4852 * authentication requirement values. 4853 */ 4854 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 4855 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 4856 4857 cp.authentication = conn->auth_type; 4858 cp.oob_data = bredr_oob_data_present(conn); 4859 4860 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 4861 sizeof(cp), &cp); 4862 } else { 4863 struct hci_cp_io_capability_neg_reply cp; 4864 4865 bacpy(&cp.bdaddr, &ev->bdaddr); 4866 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 4867 4868 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 4869 sizeof(cp), &cp); 4870 } 4871 4872 unlock: 4873 hci_dev_unlock(hdev); 4874 } 4875 4876 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 4877 { 4878 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 4879 struct hci_conn *conn; 4880 4881 BT_DBG("%s", hdev->name); 4882 4883 hci_dev_lock(hdev); 4884 4885 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4886 if (!conn) 4887 goto unlock; 4888 4889 conn->remote_cap = ev->capability; 4890 conn->remote_auth = ev->authentication; 4891 4892 unlock: 4893 hci_dev_unlock(hdev); 4894 } 4895 4896 static void hci_user_confirm_request_evt(struct hci_dev *hdev, 4897 struct sk_buff *skb) 4898 { 4899 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 4900 int loc_mitm, rem_mitm, confirm_hint = 0; 4901 struct hci_conn *conn; 4902 4903 BT_DBG("%s", hdev->name); 4904 4905 hci_dev_lock(hdev); 4906 4907 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4908 goto unlock; 4909 4910 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4911 if (!conn) 4912 goto unlock; 4913 4914 loc_mitm = (conn->auth_type & 0x01); 4915 rem_mitm = (conn->remote_auth & 0x01); 4916 4917 /* If we require MITM but the remote device can't provide that 4918 * (it has NoInputNoOutput) then reject the confirmation 4919 * request. We check the security level here since it doesn't 4920 * necessarily match conn->auth_type. 4921 */ 4922 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 4923 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 4924 BT_DBG("Rejecting request: remote device can't provide MITM"); 4925 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 4926 sizeof(ev->bdaddr), &ev->bdaddr); 4927 goto unlock; 4928 } 4929 4930 /* If no side requires MITM protection; auto-accept */ 4931 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 4932 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 4933 4934 /* If we're not the initiators request authorization to 4935 * proceed from user space (mgmt_user_confirm with 4936 * confirm_hint set to 1). The exception is if neither 4937 * side had MITM or if the local IO capability is 4938 * NoInputNoOutput, in which case we do auto-accept 4939 */ 4940 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 4941 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4942 (loc_mitm || rem_mitm)) { 4943 BT_DBG("Confirming auto-accept as acceptor"); 4944 confirm_hint = 1; 4945 goto confirm; 4946 } 4947 4948 /* If there already exists link key in local host, leave the 4949 * decision to user space since the remote device could be 4950 * legitimate or malicious. 4951 */ 4952 if (hci_find_link_key(hdev, &ev->bdaddr)) { 4953 bt_dev_dbg(hdev, "Local host already has link key"); 4954 confirm_hint = 1; 4955 goto confirm; 4956 } 4957 4958 BT_DBG("Auto-accept of user confirmation with %ums delay", 4959 hdev->auto_accept_delay); 4960 4961 if (hdev->auto_accept_delay > 0) { 4962 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 4963 queue_delayed_work(conn->hdev->workqueue, 4964 &conn->auto_accept_work, delay); 4965 goto unlock; 4966 } 4967 4968 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 4969 sizeof(ev->bdaddr), &ev->bdaddr); 4970 goto unlock; 4971 } 4972 4973 confirm: 4974 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 4975 le32_to_cpu(ev->passkey), confirm_hint); 4976 4977 unlock: 4978 hci_dev_unlock(hdev); 4979 } 4980 4981 static void hci_user_passkey_request_evt(struct hci_dev *hdev, 4982 struct sk_buff *skb) 4983 { 4984 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 4985 4986 BT_DBG("%s", hdev->name); 4987 4988 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4989 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 4990 } 4991 4992 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 4993 struct sk_buff *skb) 4994 { 4995 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 4996 struct hci_conn *conn; 4997 4998 BT_DBG("%s", hdev->name); 4999 5000 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5001 if (!conn) 5002 return; 5003 5004 conn->passkey_notify = __le32_to_cpu(ev->passkey); 5005 conn->passkey_entered = 0; 5006 5007 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5008 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5009 conn->dst_type, conn->passkey_notify, 5010 conn->passkey_entered); 5011 } 5012 5013 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 5014 { 5015 struct hci_ev_keypress_notify *ev = (void *) skb->data; 5016 struct hci_conn *conn; 5017 5018 BT_DBG("%s", hdev->name); 5019 5020 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5021 if (!conn) 5022 return; 5023 5024 switch (ev->type) { 5025 case HCI_KEYPRESS_STARTED: 5026 conn->passkey_entered = 0; 5027 return; 5028 5029 case HCI_KEYPRESS_ENTERED: 5030 conn->passkey_entered++; 5031 break; 5032 5033 case HCI_KEYPRESS_ERASED: 5034 conn->passkey_entered--; 5035 break; 5036 5037 case HCI_KEYPRESS_CLEARED: 5038 conn->passkey_entered = 0; 5039 break; 5040 5041 case HCI_KEYPRESS_COMPLETED: 5042 return; 5043 } 5044 5045 if (hci_dev_test_flag(hdev, HCI_MGMT)) 5046 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 5047 conn->dst_type, conn->passkey_notify, 5048 conn->passkey_entered); 5049 } 5050 5051 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 5052 struct sk_buff *skb) 5053 { 5054 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 5055 struct hci_conn *conn; 5056 5057 BT_DBG("%s", hdev->name); 5058 5059 hci_dev_lock(hdev); 5060 5061 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5062 if (!conn) 5063 goto unlock; 5064 5065 /* Reset the authentication requirement to unknown */ 5066 conn->remote_auth = 0xff; 5067 5068 /* To avoid duplicate auth_failed events to user space we check 5069 * the HCI_CONN_AUTH_PEND flag which will be set if we 5070 * initiated the authentication. A traditional auth_complete 5071 * event gets always produced as initiator and is also mapped to 5072 * the mgmt_auth_failed event */ 5073 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 5074 mgmt_auth_failed(conn, ev->status); 5075 5076 hci_conn_drop(conn); 5077 5078 unlock: 5079 hci_dev_unlock(hdev); 5080 } 5081 5082 static void hci_remote_host_features_evt(struct hci_dev *hdev, 5083 struct sk_buff *skb) 5084 { 5085 struct hci_ev_remote_host_features *ev = (void *) skb->data; 5086 struct inquiry_entry *ie; 5087 struct hci_conn *conn; 5088 5089 BT_DBG("%s", hdev->name); 5090 5091 hci_dev_lock(hdev); 5092 5093 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 5094 if (conn) 5095 memcpy(conn->features[1], ev->features, 8); 5096 5097 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 5098 if (ie) 5099 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 5100 5101 hci_dev_unlock(hdev); 5102 } 5103 5104 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 5105 struct sk_buff *skb) 5106 { 5107 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 5108 struct oob_data *data; 5109 5110 BT_DBG("%s", hdev->name); 5111 5112 hci_dev_lock(hdev); 5113 5114 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 5115 goto unlock; 5116 5117 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 5118 if (!data) { 5119 struct hci_cp_remote_oob_data_neg_reply cp; 5120 5121 bacpy(&cp.bdaddr, &ev->bdaddr); 5122 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 5123 sizeof(cp), &cp); 5124 goto unlock; 5125 } 5126 5127 if (bredr_sc_enabled(hdev)) { 5128 struct hci_cp_remote_oob_ext_data_reply cp; 5129 5130 bacpy(&cp.bdaddr, &ev->bdaddr); 5131 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 5132 memset(cp.hash192, 0, sizeof(cp.hash192)); 5133 memset(cp.rand192, 0, sizeof(cp.rand192)); 5134 } else { 5135 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 5136 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 5137 } 5138 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 5139 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 5140 5141 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 5142 sizeof(cp), &cp); 5143 } else { 5144 struct hci_cp_remote_oob_data_reply cp; 5145 5146 bacpy(&cp.bdaddr, &ev->bdaddr); 5147 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 5148 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 5149 5150 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 5151 sizeof(cp), &cp); 5152 } 5153 5154 unlock: 5155 hci_dev_unlock(hdev); 5156 } 5157 5158 #if IS_ENABLED(CONFIG_BT_HS) 5159 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 5160 { 5161 struct hci_ev_channel_selected *ev = (void *)skb->data; 5162 struct hci_conn *hcon; 5163 5164 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 5165 5166 skb_pull(skb, sizeof(*ev)); 5167 5168 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5169 if (!hcon) 5170 return; 5171 5172 amp_read_loc_assoc_final_data(hdev, hcon); 5173 } 5174 5175 static void hci_phy_link_complete_evt(struct hci_dev *hdev, 5176 struct sk_buff *skb) 5177 { 5178 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 5179 struct hci_conn *hcon, *bredr_hcon; 5180 5181 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 5182 ev->status); 5183 5184 hci_dev_lock(hdev); 5185 5186 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5187 if (!hcon) 5188 goto unlock; 5189 5190 if (!hcon->amp_mgr) 5191 goto unlock; 5192 5193 if (ev->status) { 5194 hci_conn_del(hcon); 5195 goto unlock; 5196 } 5197 5198 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 5199 5200 hcon->state = BT_CONNECTED; 5201 bacpy(&hcon->dst, &bredr_hcon->dst); 5202 5203 hci_conn_hold(hcon); 5204 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 5205 hci_conn_drop(hcon); 5206 5207 hci_debugfs_create_conn(hcon); 5208 hci_conn_add_sysfs(hcon); 5209 5210 amp_physical_cfm(bredr_hcon, hcon); 5211 5212 unlock: 5213 hci_dev_unlock(hdev); 5214 } 5215 5216 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 5217 { 5218 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 5219 struct hci_conn *hcon; 5220 struct hci_chan *hchan; 5221 struct amp_mgr *mgr; 5222 5223 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 5224 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 5225 ev->status); 5226 5227 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5228 if (!hcon) 5229 return; 5230 5231 /* Create AMP hchan */ 5232 hchan = hci_chan_create(hcon); 5233 if (!hchan) 5234 return; 5235 5236 hchan->handle = le16_to_cpu(ev->handle); 5237 hchan->amp = true; 5238 5239 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 5240 5241 mgr = hcon->amp_mgr; 5242 if (mgr && mgr->bredr_chan) { 5243 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 5244 5245 l2cap_chan_lock(bredr_chan); 5246 5247 bredr_chan->conn->mtu = hdev->block_mtu; 5248 l2cap_logical_cfm(bredr_chan, hchan, 0); 5249 hci_conn_hold(hcon); 5250 5251 l2cap_chan_unlock(bredr_chan); 5252 } 5253 } 5254 5255 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 5256 struct sk_buff *skb) 5257 { 5258 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 5259 struct hci_chan *hchan; 5260 5261 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 5262 le16_to_cpu(ev->handle), ev->status); 5263 5264 if (ev->status) 5265 return; 5266 5267 hci_dev_lock(hdev); 5268 5269 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5270 if (!hchan || !hchan->amp) 5271 goto unlock; 5272 5273 amp_destroy_logical_link(hchan, ev->reason); 5274 5275 unlock: 5276 hci_dev_unlock(hdev); 5277 } 5278 5279 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 5280 struct sk_buff *skb) 5281 { 5282 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 5283 struct hci_conn *hcon; 5284 5285 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5286 5287 if (ev->status) 5288 return; 5289 5290 hci_dev_lock(hdev); 5291 5292 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5293 if (hcon) { 5294 hcon->state = BT_CLOSED; 5295 hci_conn_del(hcon); 5296 } 5297 5298 hci_dev_unlock(hdev); 5299 } 5300 #endif 5301 5302 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, 5303 u8 bdaddr_type, bdaddr_t *local_rpa) 5304 { 5305 if (conn->out) { 5306 conn->dst_type = bdaddr_type; 5307 conn->resp_addr_type = bdaddr_type; 5308 bacpy(&conn->resp_addr, bdaddr); 5309 5310 /* Check if the controller has set a Local RPA then it must be 5311 * used instead or hdev->rpa. 5312 */ 5313 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5314 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5315 bacpy(&conn->init_addr, local_rpa); 5316 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { 5317 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5318 bacpy(&conn->init_addr, &conn->hdev->rpa); 5319 } else { 5320 hci_copy_identity_address(conn->hdev, &conn->init_addr, 5321 &conn->init_addr_type); 5322 } 5323 } else { 5324 conn->resp_addr_type = conn->hdev->adv_addr_type; 5325 /* Check if the controller has set a Local RPA then it must be 5326 * used instead or hdev->rpa. 5327 */ 5328 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { 5329 conn->resp_addr_type = ADDR_LE_DEV_RANDOM; 5330 bacpy(&conn->resp_addr, local_rpa); 5331 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5332 /* In case of ext adv, resp_addr will be updated in 5333 * Adv Terminated event. 5334 */ 5335 if (!ext_adv_capable(conn->hdev)) 5336 bacpy(&conn->resp_addr, 5337 &conn->hdev->random_addr); 5338 } else { 5339 bacpy(&conn->resp_addr, &conn->hdev->bdaddr); 5340 } 5341 5342 conn->init_addr_type = bdaddr_type; 5343 bacpy(&conn->init_addr, bdaddr); 5344 5345 /* For incoming connections, set the default minimum 5346 * and maximum connection interval. They will be used 5347 * to check if the parameters are in range and if not 5348 * trigger the connection update procedure. 5349 */ 5350 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; 5351 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; 5352 } 5353 } 5354 5355 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5356 bdaddr_t *bdaddr, u8 bdaddr_type, 5357 bdaddr_t *local_rpa, u8 role, u16 handle, 5358 u16 interval, u16 latency, 5359 u16 supervision_timeout) 5360 { 5361 struct hci_conn_params *params; 5362 struct hci_conn *conn; 5363 struct smp_irk *irk; 5364 u8 addr_type; 5365 5366 hci_dev_lock(hdev); 5367 5368 /* All controllers implicitly stop advertising in the event of a 5369 * connection, so ensure that the state bit is cleared. 5370 */ 5371 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5372 5373 conn = hci_lookup_le_connect(hdev); 5374 if (!conn) { 5375 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5376 if (!conn) { 5377 bt_dev_err(hdev, "no memory for new connection"); 5378 goto unlock; 5379 } 5380 5381 conn->dst_type = bdaddr_type; 5382 5383 /* If we didn't have a hci_conn object previously 5384 * but we're in central role this must be something 5385 * initiated using an accept list. Since accept list based 5386 * connections are not "first class citizens" we don't 5387 * have full tracking of them. Therefore, we go ahead 5388 * with a "best effort" approach of determining the 5389 * initiator address based on the HCI_PRIVACY flag. 5390 */ 5391 if (conn->out) { 5392 conn->resp_addr_type = bdaddr_type; 5393 bacpy(&conn->resp_addr, bdaddr); 5394 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5395 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5396 bacpy(&conn->init_addr, &hdev->rpa); 5397 } else { 5398 hci_copy_identity_address(hdev, 5399 &conn->init_addr, 5400 &conn->init_addr_type); 5401 } 5402 } 5403 } else { 5404 cancel_delayed_work(&conn->le_conn_timeout); 5405 } 5406 5407 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); 5408 5409 /* Lookup the identity address from the stored connection 5410 * address and address type. 5411 * 5412 * When establishing connections to an identity address, the 5413 * connection procedure will store the resolvable random 5414 * address first. Now if it can be converted back into the 5415 * identity address, start using the identity address from 5416 * now on. 5417 */ 5418 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5419 if (irk) { 5420 bacpy(&conn->dst, &irk->bdaddr); 5421 conn->dst_type = irk->addr_type; 5422 } 5423 5424 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); 5425 5426 if (status) { 5427 hci_le_conn_failed(conn, status); 5428 goto unlock; 5429 } 5430 5431 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5432 addr_type = BDADDR_LE_PUBLIC; 5433 else 5434 addr_type = BDADDR_LE_RANDOM; 5435 5436 /* Drop the connection if the device is blocked */ 5437 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { 5438 hci_conn_drop(conn); 5439 goto unlock; 5440 } 5441 5442 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5443 mgmt_device_connected(hdev, conn, NULL, 0); 5444 5445 conn->sec_level = BT_SECURITY_LOW; 5446 conn->handle = handle; 5447 conn->state = BT_CONFIG; 5448 5449 /* Store current advertising instance as connection advertising instance 5450 * when sotfware rotation is in use so it can be re-enabled when 5451 * disconnected. 5452 */ 5453 if (!ext_adv_capable(hdev)) 5454 conn->adv_instance = hdev->cur_adv_instance; 5455 5456 conn->le_conn_interval = interval; 5457 conn->le_conn_latency = latency; 5458 conn->le_supv_timeout = supervision_timeout; 5459 5460 hci_debugfs_create_conn(conn); 5461 hci_conn_add_sysfs(conn); 5462 5463 /* The remote features procedure is defined for central 5464 * role only. So only in case of an initiated connection 5465 * request the remote features. 5466 * 5467 * If the local controller supports peripheral-initiated features 5468 * exchange, then requesting the remote features in peripheral 5469 * role is possible. Otherwise just transition into the 5470 * connected state without requesting the remote features. 5471 */ 5472 if (conn->out || 5473 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { 5474 struct hci_cp_le_read_remote_features cp; 5475 5476 cp.handle = __cpu_to_le16(conn->handle); 5477 5478 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5479 sizeof(cp), &cp); 5480 5481 hci_conn_hold(conn); 5482 } else { 5483 conn->state = BT_CONNECTED; 5484 hci_connect_cfm(conn, status); 5485 } 5486 5487 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5488 conn->dst_type); 5489 if (params) { 5490 list_del_init(¶ms->action); 5491 if (params->conn) { 5492 hci_conn_drop(params->conn); 5493 hci_conn_put(params->conn); 5494 params->conn = NULL; 5495 } 5496 } 5497 5498 unlock: 5499 hci_update_passive_scan(hdev); 5500 hci_dev_unlock(hdev); 5501 } 5502 5503 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 5504 { 5505 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 5506 5507 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5508 5509 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5510 NULL, ev->role, le16_to_cpu(ev->handle), 5511 le16_to_cpu(ev->interval), 5512 le16_to_cpu(ev->latency), 5513 le16_to_cpu(ev->supervision_timeout)); 5514 } 5515 5516 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, 5517 struct sk_buff *skb) 5518 { 5519 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; 5520 5521 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5522 5523 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5524 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), 5525 le16_to_cpu(ev->interval), 5526 le16_to_cpu(ev->latency), 5527 le16_to_cpu(ev->supervision_timeout)); 5528 } 5529 5530 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) 5531 { 5532 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; 5533 struct hci_conn *conn; 5534 struct adv_info *adv, *n; 5535 5536 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5537 5538 adv = hci_find_adv_instance(hdev, ev->handle); 5539 5540 /* The Bluetooth Core 5.3 specification clearly states that this event 5541 * shall not be sent when the Host disables the advertising set. So in 5542 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. 5543 * 5544 * When the Host disables an advertising set, all cleanup is done via 5545 * its command callback and not needed to be duplicated here. 5546 */ 5547 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { 5548 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); 5549 return; 5550 } 5551 5552 if (ev->status) { 5553 if (!adv) 5554 return; 5555 5556 /* Remove advertising as it has been terminated */ 5557 hci_remove_adv_instance(hdev, ev->handle); 5558 mgmt_advertising_removed(NULL, hdev, ev->handle); 5559 5560 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 5561 if (adv->enabled) 5562 return; 5563 } 5564 5565 /* We are no longer advertising, clear HCI_LE_ADV */ 5566 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5567 return; 5568 } 5569 5570 if (adv) 5571 adv->enabled = false; 5572 5573 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5574 if (conn) { 5575 /* Store handle in the connection so the correct advertising 5576 * instance can be re-enabled when disconnected. 5577 */ 5578 conn->adv_instance = ev->handle; 5579 5580 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || 5581 bacmp(&conn->resp_addr, BDADDR_ANY)) 5582 return; 5583 5584 if (!ev->handle) { 5585 bacpy(&conn->resp_addr, &hdev->random_addr); 5586 return; 5587 } 5588 5589 if (adv) 5590 bacpy(&conn->resp_addr, &adv->random_addr); 5591 } 5592 } 5593 5594 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, 5595 struct sk_buff *skb) 5596 { 5597 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; 5598 struct hci_conn *conn; 5599 5600 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5601 5602 if (ev->status) 5603 return; 5604 5605 hci_dev_lock(hdev); 5606 5607 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5608 if (conn) { 5609 conn->le_conn_interval = le16_to_cpu(ev->interval); 5610 conn->le_conn_latency = le16_to_cpu(ev->latency); 5611 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5612 } 5613 5614 hci_dev_unlock(hdev); 5615 } 5616 5617 /* This function requires the caller holds hdev->lock */ 5618 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5619 bdaddr_t *addr, 5620 u8 addr_type, bool addr_resolved, 5621 u8 adv_type, bdaddr_t *direct_rpa) 5622 { 5623 struct hci_conn *conn; 5624 struct hci_conn_params *params; 5625 5626 /* If the event is not connectable don't proceed further */ 5627 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5628 return NULL; 5629 5630 /* Ignore if the device is blocked or hdev is suspended */ 5631 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || 5632 hdev->suspended) 5633 return NULL; 5634 5635 /* Most controller will fail if we try to create new connections 5636 * while we have an existing one in peripheral role. 5637 */ 5638 if (hdev->conn_hash.le_num_peripheral > 0 && 5639 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 5640 !(hdev->le_states[3] & 0x10))) 5641 return NULL; 5642 5643 /* If we're not connectable only connect devices that we have in 5644 * our pend_le_conns list. 5645 */ 5646 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5647 addr_type); 5648 if (!params) 5649 return NULL; 5650 5651 if (!params->explicit_connect) { 5652 switch (params->auto_connect) { 5653 case HCI_AUTO_CONN_DIRECT: 5654 /* Only devices advertising with ADV_DIRECT_IND are 5655 * triggering a connection attempt. This is allowing 5656 * incoming connections from peripheral devices. 5657 */ 5658 if (adv_type != LE_ADV_DIRECT_IND) 5659 return NULL; 5660 break; 5661 case HCI_AUTO_CONN_ALWAYS: 5662 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5663 * are triggering a connection attempt. This means 5664 * that incoming connections from peripheral device are 5665 * accepted and also outgoing connections to peripheral 5666 * devices are established when found. 5667 */ 5668 break; 5669 default: 5670 return NULL; 5671 } 5672 } 5673 5674 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, 5675 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, 5676 HCI_ROLE_MASTER, direct_rpa); 5677 if (!IS_ERR(conn)) { 5678 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5679 * by higher layer that tried to connect, if no then 5680 * store the pointer since we don't really have any 5681 * other owner of the object besides the params that 5682 * triggered it. This way we can abort the connection if 5683 * the parameters get removed and keep the reference 5684 * count consistent once the connection is established. 5685 */ 5686 5687 if (!params->explicit_connect) 5688 params->conn = hci_conn_get(conn); 5689 5690 return conn; 5691 } 5692 5693 switch (PTR_ERR(conn)) { 5694 case -EBUSY: 5695 /* If hci_connect() returns -EBUSY it means there is already 5696 * an LE connection attempt going on. Since controllers don't 5697 * support more than one connection attempt at the time, we 5698 * don't consider this an error case. 5699 */ 5700 break; 5701 default: 5702 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5703 return NULL; 5704 } 5705 5706 return NULL; 5707 } 5708 5709 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5710 u8 bdaddr_type, bdaddr_t *direct_addr, 5711 u8 direct_addr_type, s8 rssi, u8 *data, u8 len, 5712 bool ext_adv) 5713 { 5714 struct discovery_state *d = &hdev->discovery; 5715 struct smp_irk *irk; 5716 struct hci_conn *conn; 5717 bool match, bdaddr_resolved; 5718 u32 flags; 5719 u8 *ptr; 5720 5721 switch (type) { 5722 case LE_ADV_IND: 5723 case LE_ADV_DIRECT_IND: 5724 case LE_ADV_SCAN_IND: 5725 case LE_ADV_NONCONN_IND: 5726 case LE_ADV_SCAN_RSP: 5727 break; 5728 default: 5729 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 5730 "type: 0x%02x", type); 5731 return; 5732 } 5733 5734 if (!ext_adv && len > HCI_MAX_AD_LENGTH) { 5735 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); 5736 return; 5737 } 5738 5739 /* Find the end of the data in case the report contains padded zero 5740 * bytes at the end causing an invalid length value. 5741 * 5742 * When data is NULL, len is 0 so there is no need for extra ptr 5743 * check as 'ptr < data + 0' is already false in such case. 5744 */ 5745 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 5746 if (ptr + 1 + *ptr > data + len) 5747 break; 5748 } 5749 5750 /* Adjust for actual length. This handles the case when remote 5751 * device is advertising with incorrect data length. 5752 */ 5753 len = ptr - data; 5754 5755 /* If the direct address is present, then this report is from 5756 * a LE Direct Advertising Report event. In that case it is 5757 * important to see if the address is matching the local 5758 * controller address. 5759 */ 5760 if (direct_addr) { 5761 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, 5762 &bdaddr_resolved); 5763 5764 /* Only resolvable random addresses are valid for these 5765 * kind of reports and others can be ignored. 5766 */ 5767 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 5768 return; 5769 5770 /* If the controller is not using resolvable random 5771 * addresses, then this report can be ignored. 5772 */ 5773 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 5774 return; 5775 5776 /* If the local IRK of the controller does not match 5777 * with the resolvable random address provided, then 5778 * this report can be ignored. 5779 */ 5780 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 5781 return; 5782 } 5783 5784 /* Check if we need to convert to identity address */ 5785 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 5786 if (irk) { 5787 bdaddr = &irk->bdaddr; 5788 bdaddr_type = irk->addr_type; 5789 } 5790 5791 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); 5792 5793 /* Check if we have been requested to connect to this device. 5794 * 5795 * direct_addr is set only for directed advertising reports (it is NULL 5796 * for advertising reports) and is already verified to be RPA above. 5797 */ 5798 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, 5799 type, direct_addr); 5800 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { 5801 /* Store report for later inclusion by 5802 * mgmt_device_connected 5803 */ 5804 memcpy(conn->le_adv_data, data, len); 5805 conn->le_adv_data_len = len; 5806 } 5807 5808 /* Passive scanning shouldn't trigger any device found events, 5809 * except for devices marked as CONN_REPORT for which we do send 5810 * device found events, or advertisement monitoring requested. 5811 */ 5812 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 5813 if (type == LE_ADV_DIRECT_IND) 5814 return; 5815 5816 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 5817 bdaddr, bdaddr_type) && 5818 idr_is_empty(&hdev->adv_monitors_idr)) 5819 return; 5820 5821 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 5822 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5823 else 5824 flags = 0; 5825 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5826 rssi, flags, data, len, NULL, 0); 5827 return; 5828 } 5829 5830 /* When receiving non-connectable or scannable undirected 5831 * advertising reports, this means that the remote device is 5832 * not connectable and then clearly indicate this in the 5833 * device found event. 5834 * 5835 * When receiving a scan response, then there is no way to 5836 * know if the remote device is connectable or not. However 5837 * since scan responses are merged with a previously seen 5838 * advertising report, the flags field from that report 5839 * will be used. 5840 * 5841 * In the really unlikely case that a controller get confused 5842 * and just sends a scan response event, then it is marked as 5843 * not connectable as well. 5844 */ 5845 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 5846 type == LE_ADV_SCAN_RSP) 5847 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5848 else 5849 flags = 0; 5850 5851 /* If there's nothing pending either store the data from this 5852 * event or send an immediate device found event if the data 5853 * should not be stored for later. 5854 */ 5855 if (!ext_adv && !has_pending_adv_report(hdev)) { 5856 /* If the report will trigger a SCAN_REQ store it for 5857 * later merging. 5858 */ 5859 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 5860 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5861 rssi, flags, data, len); 5862 return; 5863 } 5864 5865 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5866 rssi, flags, data, len, NULL, 0); 5867 return; 5868 } 5869 5870 /* Check if the pending report is for the same device as the new one */ 5871 match = (!bacmp(bdaddr, &d->last_adv_addr) && 5872 bdaddr_type == d->last_adv_addr_type); 5873 5874 /* If the pending data doesn't match this report or this isn't a 5875 * scan response (e.g. we got a duplicate ADV_IND) then force 5876 * sending of the pending data. 5877 */ 5878 if (type != LE_ADV_SCAN_RSP || !match) { 5879 /* Send out whatever is in the cache, but skip duplicates */ 5880 if (!match) 5881 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5882 d->last_adv_addr_type, NULL, 5883 d->last_adv_rssi, d->last_adv_flags, 5884 d->last_adv_data, 5885 d->last_adv_data_len, NULL, 0); 5886 5887 /* If the new report will trigger a SCAN_REQ store it for 5888 * later merging. 5889 */ 5890 if (!ext_adv && (type == LE_ADV_IND || 5891 type == LE_ADV_SCAN_IND)) { 5892 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5893 rssi, flags, data, len); 5894 return; 5895 } 5896 5897 /* The advertising reports cannot be merged, so clear 5898 * the pending report and send out a device found event. 5899 */ 5900 clear_pending_adv_report(hdev); 5901 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5902 rssi, flags, data, len, NULL, 0); 5903 return; 5904 } 5905 5906 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 5907 * the new event is a SCAN_RSP. We can therefore proceed with 5908 * sending a merged device found event. 5909 */ 5910 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5911 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 5912 d->last_adv_data, d->last_adv_data_len, data, len); 5913 clear_pending_adv_report(hdev); 5914 } 5915 5916 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5917 { 5918 u8 num_reports = skb->data[0]; 5919 void *ptr = &skb->data[1]; 5920 5921 hci_dev_lock(hdev); 5922 5923 while (num_reports--) { 5924 struct hci_ev_le_advertising_info *ev = ptr; 5925 s8 rssi; 5926 5927 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) { 5928 bt_dev_err(hdev, "Malicious advertising data."); 5929 break; 5930 } 5931 5932 if (ev->length <= HCI_MAX_AD_LENGTH && 5933 ev->data + ev->length <= skb_tail_pointer(skb)) { 5934 rssi = ev->data[ev->length]; 5935 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 5936 ev->bdaddr_type, NULL, 0, rssi, 5937 ev->data, ev->length, false); 5938 } else { 5939 bt_dev_err(hdev, "Dropping invalid advertising data"); 5940 } 5941 5942 ptr += sizeof(*ev) + ev->length + 1; 5943 } 5944 5945 hci_dev_unlock(hdev); 5946 } 5947 5948 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 5949 { 5950 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 5951 switch (evt_type) { 5952 case LE_LEGACY_ADV_IND: 5953 return LE_ADV_IND; 5954 case LE_LEGACY_ADV_DIRECT_IND: 5955 return LE_ADV_DIRECT_IND; 5956 case LE_LEGACY_ADV_SCAN_IND: 5957 return LE_ADV_SCAN_IND; 5958 case LE_LEGACY_NONCONN_IND: 5959 return LE_ADV_NONCONN_IND; 5960 case LE_LEGACY_SCAN_RSP_ADV: 5961 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 5962 return LE_ADV_SCAN_RSP; 5963 } 5964 5965 goto invalid; 5966 } 5967 5968 if (evt_type & LE_EXT_ADV_CONN_IND) { 5969 if (evt_type & LE_EXT_ADV_DIRECT_IND) 5970 return LE_ADV_DIRECT_IND; 5971 5972 return LE_ADV_IND; 5973 } 5974 5975 if (evt_type & LE_EXT_ADV_SCAN_RSP) 5976 return LE_ADV_SCAN_RSP; 5977 5978 if (evt_type & LE_EXT_ADV_SCAN_IND) 5979 return LE_ADV_SCAN_IND; 5980 5981 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 5982 evt_type & LE_EXT_ADV_DIRECT_IND) 5983 return LE_ADV_NONCONN_IND; 5984 5985 invalid: 5986 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 5987 evt_type); 5988 5989 return LE_ADV_INVALID; 5990 } 5991 5992 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5993 { 5994 u8 num_reports = skb->data[0]; 5995 void *ptr = &skb->data[1]; 5996 5997 hci_dev_lock(hdev); 5998 5999 while (num_reports--) { 6000 struct hci_ev_le_ext_adv_report *ev = ptr; 6001 u8 legacy_evt_type; 6002 u16 evt_type; 6003 6004 evt_type = __le16_to_cpu(ev->evt_type); 6005 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 6006 if (legacy_evt_type != LE_ADV_INVALID) { 6007 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, 6008 ev->bdaddr_type, NULL, 0, ev->rssi, 6009 ev->data, ev->length, 6010 !(evt_type & LE_EXT_ADV_LEGACY_PDU)); 6011 } 6012 6013 ptr += sizeof(*ev) + ev->length; 6014 } 6015 6016 hci_dev_unlock(hdev); 6017 } 6018 6019 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, 6020 struct sk_buff *skb) 6021 { 6022 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data; 6023 struct hci_conn *conn; 6024 6025 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6026 6027 hci_dev_lock(hdev); 6028 6029 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6030 if (conn) { 6031 if (!ev->status) 6032 memcpy(conn->features[0], ev->features, 8); 6033 6034 if (conn->state == BT_CONFIG) { 6035 __u8 status; 6036 6037 /* If the local controller supports peripheral-initiated 6038 * features exchange, but the remote controller does 6039 * not, then it is possible that the error code 0x1a 6040 * for unsupported remote feature gets returned. 6041 * 6042 * In this specific case, allow the connection to 6043 * transition into connected state and mark it as 6044 * successful. 6045 */ 6046 if (!conn->out && ev->status == 0x1a && 6047 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) 6048 status = 0x00; 6049 else 6050 status = ev->status; 6051 6052 conn->state = BT_CONNECTED; 6053 hci_connect_cfm(conn, status); 6054 hci_conn_drop(conn); 6055 } 6056 } 6057 6058 hci_dev_unlock(hdev); 6059 } 6060 6061 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 6062 { 6063 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 6064 struct hci_cp_le_ltk_reply cp; 6065 struct hci_cp_le_ltk_neg_reply neg; 6066 struct hci_conn *conn; 6067 struct smp_ltk *ltk; 6068 6069 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 6070 6071 hci_dev_lock(hdev); 6072 6073 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6074 if (conn == NULL) 6075 goto not_found; 6076 6077 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 6078 if (!ltk) 6079 goto not_found; 6080 6081 if (smp_ltk_is_sc(ltk)) { 6082 /* With SC both EDiv and Rand are set to zero */ 6083 if (ev->ediv || ev->rand) 6084 goto not_found; 6085 } else { 6086 /* For non-SC keys check that EDiv and Rand match */ 6087 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 6088 goto not_found; 6089 } 6090 6091 memcpy(cp.ltk, ltk->val, ltk->enc_size); 6092 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 6093 cp.handle = cpu_to_le16(conn->handle); 6094 6095 conn->pending_sec_level = smp_ltk_sec_level(ltk); 6096 6097 conn->enc_key_size = ltk->enc_size; 6098 6099 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 6100 6101 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 6102 * temporary key used to encrypt a connection following 6103 * pairing. It is used during the Encrypted Session Setup to 6104 * distribute the keys. Later, security can be re-established 6105 * using a distributed LTK. 6106 */ 6107 if (ltk->type == SMP_STK) { 6108 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6109 list_del_rcu(<k->list); 6110 kfree_rcu(ltk, rcu); 6111 } else { 6112 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 6113 } 6114 6115 hci_dev_unlock(hdev); 6116 6117 return; 6118 6119 not_found: 6120 neg.handle = ev->handle; 6121 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 6122 hci_dev_unlock(hdev); 6123 } 6124 6125 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 6126 u8 reason) 6127 { 6128 struct hci_cp_le_conn_param_req_neg_reply cp; 6129 6130 cp.handle = cpu_to_le16(handle); 6131 cp.reason = reason; 6132 6133 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 6134 &cp); 6135 } 6136 6137 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, 6138 struct sk_buff *skb) 6139 { 6140 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data; 6141 struct hci_cp_le_conn_param_req_reply cp; 6142 struct hci_conn *hcon; 6143 u16 handle, min, max, latency, timeout; 6144 6145 handle = le16_to_cpu(ev->handle); 6146 min = le16_to_cpu(ev->interval_min); 6147 max = le16_to_cpu(ev->interval_max); 6148 latency = le16_to_cpu(ev->latency); 6149 timeout = le16_to_cpu(ev->timeout); 6150 6151 hcon = hci_conn_hash_lookup_handle(hdev, handle); 6152 if (!hcon || hcon->state != BT_CONNECTED) 6153 return send_conn_param_neg_reply(hdev, handle, 6154 HCI_ERROR_UNKNOWN_CONN_ID); 6155 6156 if (hci_check_conn_params(min, max, latency, timeout)) 6157 return send_conn_param_neg_reply(hdev, handle, 6158 HCI_ERROR_INVALID_LL_PARAMS); 6159 6160 if (hcon->role == HCI_ROLE_MASTER) { 6161 struct hci_conn_params *params; 6162 u8 store_hint; 6163 6164 hci_dev_lock(hdev); 6165 6166 params = hci_conn_params_lookup(hdev, &hcon->dst, 6167 hcon->dst_type); 6168 if (params) { 6169 params->conn_min_interval = min; 6170 params->conn_max_interval = max; 6171 params->conn_latency = latency; 6172 params->supervision_timeout = timeout; 6173 store_hint = 0x01; 6174 } else { 6175 store_hint = 0x00; 6176 } 6177 6178 hci_dev_unlock(hdev); 6179 6180 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 6181 store_hint, min, max, latency, timeout); 6182 } 6183 6184 cp.handle = ev->handle; 6185 cp.interval_min = ev->interval_min; 6186 cp.interval_max = ev->interval_max; 6187 cp.latency = ev->latency; 6188 cp.timeout = ev->timeout; 6189 cp.min_ce_len = 0; 6190 cp.max_ce_len = 0; 6191 6192 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 6193 } 6194 6195 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, 6196 struct sk_buff *skb) 6197 { 6198 u8 num_reports = skb->data[0]; 6199 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1]; 6200 6201 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1) 6202 return; 6203 6204 hci_dev_lock(hdev); 6205 6206 for (; num_reports; num_reports--, ev++) 6207 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 6208 ev->bdaddr_type, &ev->direct_addr, 6209 ev->direct_addr_type, ev->rssi, NULL, 0, 6210 false); 6211 6212 hci_dev_unlock(hdev); 6213 } 6214 6215 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb) 6216 { 6217 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data; 6218 struct hci_conn *conn; 6219 6220 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 6221 6222 if (ev->status) 6223 return; 6224 6225 hci_dev_lock(hdev); 6226 6227 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 6228 if (!conn) 6229 goto unlock; 6230 6231 conn->le_tx_phy = ev->tx_phy; 6232 conn->le_rx_phy = ev->rx_phy; 6233 6234 unlock: 6235 hci_dev_unlock(hdev); 6236 } 6237 6238 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 6239 { 6240 struct hci_ev_le_meta *le_ev = (void *) skb->data; 6241 6242 skb_pull(skb, sizeof(*le_ev)); 6243 6244 switch (le_ev->subevent) { 6245 case HCI_EV_LE_CONN_COMPLETE: 6246 hci_le_conn_complete_evt(hdev, skb); 6247 break; 6248 6249 case HCI_EV_LE_CONN_UPDATE_COMPLETE: 6250 hci_le_conn_update_complete_evt(hdev, skb); 6251 break; 6252 6253 case HCI_EV_LE_ADVERTISING_REPORT: 6254 hci_le_adv_report_evt(hdev, skb); 6255 break; 6256 6257 case HCI_EV_LE_REMOTE_FEAT_COMPLETE: 6258 hci_le_remote_feat_complete_evt(hdev, skb); 6259 break; 6260 6261 case HCI_EV_LE_LTK_REQ: 6262 hci_le_ltk_request_evt(hdev, skb); 6263 break; 6264 6265 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ: 6266 hci_le_remote_conn_param_req_evt(hdev, skb); 6267 break; 6268 6269 case HCI_EV_LE_DIRECT_ADV_REPORT: 6270 hci_le_direct_adv_report_evt(hdev, skb); 6271 break; 6272 6273 case HCI_EV_LE_PHY_UPDATE_COMPLETE: 6274 hci_le_phy_update_evt(hdev, skb); 6275 break; 6276 6277 case HCI_EV_LE_EXT_ADV_REPORT: 6278 hci_le_ext_adv_report_evt(hdev, skb); 6279 break; 6280 6281 case HCI_EV_LE_ENHANCED_CONN_COMPLETE: 6282 hci_le_enh_conn_complete_evt(hdev, skb); 6283 break; 6284 6285 case HCI_EV_LE_EXT_ADV_SET_TERM: 6286 hci_le_ext_adv_term_evt(hdev, skb); 6287 break; 6288 6289 default: 6290 break; 6291 } 6292 } 6293 6294 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 6295 u8 event, struct sk_buff *skb) 6296 { 6297 struct hci_ev_cmd_complete *ev; 6298 struct hci_event_hdr *hdr; 6299 6300 if (!skb) 6301 return false; 6302 6303 if (skb->len < sizeof(*hdr)) { 6304 bt_dev_err(hdev, "too short HCI event"); 6305 return false; 6306 } 6307 6308 hdr = (void *) skb->data; 6309 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6310 6311 if (event) { 6312 if (hdr->evt != event) 6313 return false; 6314 return true; 6315 } 6316 6317 /* Check if request ended in Command Status - no way to retrieve 6318 * any extra parameters in this case. 6319 */ 6320 if (hdr->evt == HCI_EV_CMD_STATUS) 6321 return false; 6322 6323 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 6324 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 6325 hdr->evt); 6326 return false; 6327 } 6328 6329 if (skb->len < sizeof(*ev)) { 6330 bt_dev_err(hdev, "too short cmd_complete event"); 6331 return false; 6332 } 6333 6334 ev = (void *) skb->data; 6335 skb_pull(skb, sizeof(*ev)); 6336 6337 if (opcode != __le16_to_cpu(ev->opcode)) { 6338 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 6339 __le16_to_cpu(ev->opcode)); 6340 return false; 6341 } 6342 6343 return true; 6344 } 6345 6346 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, 6347 struct sk_buff *skb) 6348 { 6349 struct hci_ev_le_advertising_info *adv; 6350 struct hci_ev_le_direct_adv_info *direct_adv; 6351 struct hci_ev_le_ext_adv_report *ext_adv; 6352 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; 6353 const struct hci_ev_conn_request *conn_request = (void *)skb->data; 6354 6355 hci_dev_lock(hdev); 6356 6357 /* If we are currently suspended and this is the first BT event seen, 6358 * save the wake reason associated with the event. 6359 */ 6360 if (!hdev->suspended || hdev->wake_reason) 6361 goto unlock; 6362 6363 /* Default to remote wake. Values for wake_reason are documented in the 6364 * Bluez mgmt api docs. 6365 */ 6366 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; 6367 6368 /* Once configured for remote wakeup, we should only wake up for 6369 * reconnections. It's useful to see which device is waking us up so 6370 * keep track of the bdaddr of the connection event that woke us up. 6371 */ 6372 if (event == HCI_EV_CONN_REQUEST) { 6373 bacpy(&hdev->wake_addr, &conn_complete->bdaddr); 6374 hdev->wake_addr_type = BDADDR_BREDR; 6375 } else if (event == HCI_EV_CONN_COMPLETE) { 6376 bacpy(&hdev->wake_addr, &conn_request->bdaddr); 6377 hdev->wake_addr_type = BDADDR_BREDR; 6378 } else if (event == HCI_EV_LE_META) { 6379 struct hci_ev_le_meta *le_ev = (void *)skb->data; 6380 u8 subevent = le_ev->subevent; 6381 u8 *ptr = &skb->data[sizeof(*le_ev)]; 6382 u8 num_reports = *ptr; 6383 6384 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || 6385 subevent == HCI_EV_LE_DIRECT_ADV_REPORT || 6386 subevent == HCI_EV_LE_EXT_ADV_REPORT) && 6387 num_reports) { 6388 adv = (void *)(ptr + 1); 6389 direct_adv = (void *)(ptr + 1); 6390 ext_adv = (void *)(ptr + 1); 6391 6392 switch (subevent) { 6393 case HCI_EV_LE_ADVERTISING_REPORT: 6394 bacpy(&hdev->wake_addr, &adv->bdaddr); 6395 hdev->wake_addr_type = adv->bdaddr_type; 6396 break; 6397 case HCI_EV_LE_DIRECT_ADV_REPORT: 6398 bacpy(&hdev->wake_addr, &direct_adv->bdaddr); 6399 hdev->wake_addr_type = direct_adv->bdaddr_type; 6400 break; 6401 case HCI_EV_LE_EXT_ADV_REPORT: 6402 bacpy(&hdev->wake_addr, &ext_adv->bdaddr); 6403 hdev->wake_addr_type = ext_adv->bdaddr_type; 6404 break; 6405 } 6406 } 6407 } else { 6408 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; 6409 } 6410 6411 unlock: 6412 hci_dev_unlock(hdev); 6413 } 6414 6415 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 6416 { 6417 struct hci_event_hdr *hdr = (void *) skb->data; 6418 hci_req_complete_t req_complete = NULL; 6419 hci_req_complete_skb_t req_complete_skb = NULL; 6420 struct sk_buff *orig_skb = NULL; 6421 u8 status = 0, event = hdr->evt, req_evt = 0; 6422 u16 opcode = HCI_OP_NOP; 6423 6424 if (!event) { 6425 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); 6426 goto done; 6427 } 6428 6429 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { 6430 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 6431 opcode = __le16_to_cpu(cmd_hdr->opcode); 6432 hci_req_cmd_complete(hdev, opcode, status, &req_complete, 6433 &req_complete_skb); 6434 req_evt = event; 6435 } 6436 6437 /* If it looks like we might end up having to call 6438 * req_complete_skb, store a pristine copy of the skb since the 6439 * various handlers may modify the original one through 6440 * skb_pull() calls, etc. 6441 */ 6442 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 6443 event == HCI_EV_CMD_COMPLETE) 6444 orig_skb = skb_clone(skb, GFP_KERNEL); 6445 6446 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6447 6448 /* Store wake reason if we're suspended */ 6449 hci_store_wake_reason(hdev, event, skb); 6450 6451 switch (event) { 6452 case HCI_EV_INQUIRY_COMPLETE: 6453 hci_inquiry_complete_evt(hdev, skb); 6454 break; 6455 6456 case HCI_EV_INQUIRY_RESULT: 6457 hci_inquiry_result_evt(hdev, skb); 6458 break; 6459 6460 case HCI_EV_CONN_COMPLETE: 6461 hci_conn_complete_evt(hdev, skb); 6462 break; 6463 6464 case HCI_EV_CONN_REQUEST: 6465 hci_conn_request_evt(hdev, skb); 6466 break; 6467 6468 case HCI_EV_DISCONN_COMPLETE: 6469 hci_disconn_complete_evt(hdev, skb); 6470 break; 6471 6472 case HCI_EV_AUTH_COMPLETE: 6473 hci_auth_complete_evt(hdev, skb); 6474 break; 6475 6476 case HCI_EV_REMOTE_NAME: 6477 hci_remote_name_evt(hdev, skb); 6478 break; 6479 6480 case HCI_EV_ENCRYPT_CHANGE: 6481 hci_encrypt_change_evt(hdev, skb); 6482 break; 6483 6484 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 6485 hci_change_link_key_complete_evt(hdev, skb); 6486 break; 6487 6488 case HCI_EV_REMOTE_FEATURES: 6489 hci_remote_features_evt(hdev, skb); 6490 break; 6491 6492 case HCI_EV_CMD_COMPLETE: 6493 hci_cmd_complete_evt(hdev, skb, &opcode, &status, 6494 &req_complete, &req_complete_skb); 6495 break; 6496 6497 case HCI_EV_CMD_STATUS: 6498 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete, 6499 &req_complete_skb); 6500 break; 6501 6502 case HCI_EV_HARDWARE_ERROR: 6503 hci_hardware_error_evt(hdev, skb); 6504 break; 6505 6506 case HCI_EV_ROLE_CHANGE: 6507 hci_role_change_evt(hdev, skb); 6508 break; 6509 6510 case HCI_EV_NUM_COMP_PKTS: 6511 hci_num_comp_pkts_evt(hdev, skb); 6512 break; 6513 6514 case HCI_EV_MODE_CHANGE: 6515 hci_mode_change_evt(hdev, skb); 6516 break; 6517 6518 case HCI_EV_PIN_CODE_REQ: 6519 hci_pin_code_request_evt(hdev, skb); 6520 break; 6521 6522 case HCI_EV_LINK_KEY_REQ: 6523 hci_link_key_request_evt(hdev, skb); 6524 break; 6525 6526 case HCI_EV_LINK_KEY_NOTIFY: 6527 hci_link_key_notify_evt(hdev, skb); 6528 break; 6529 6530 case HCI_EV_CLOCK_OFFSET: 6531 hci_clock_offset_evt(hdev, skb); 6532 break; 6533 6534 case HCI_EV_PKT_TYPE_CHANGE: 6535 hci_pkt_type_change_evt(hdev, skb); 6536 break; 6537 6538 case HCI_EV_PSCAN_REP_MODE: 6539 hci_pscan_rep_mode_evt(hdev, skb); 6540 break; 6541 6542 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 6543 hci_inquiry_result_with_rssi_evt(hdev, skb); 6544 break; 6545 6546 case HCI_EV_REMOTE_EXT_FEATURES: 6547 hci_remote_ext_features_evt(hdev, skb); 6548 break; 6549 6550 case HCI_EV_SYNC_CONN_COMPLETE: 6551 hci_sync_conn_complete_evt(hdev, skb); 6552 break; 6553 6554 case HCI_EV_EXTENDED_INQUIRY_RESULT: 6555 hci_extended_inquiry_result_evt(hdev, skb); 6556 break; 6557 6558 case HCI_EV_KEY_REFRESH_COMPLETE: 6559 hci_key_refresh_complete_evt(hdev, skb); 6560 break; 6561 6562 case HCI_EV_IO_CAPA_REQUEST: 6563 hci_io_capa_request_evt(hdev, skb); 6564 break; 6565 6566 case HCI_EV_IO_CAPA_REPLY: 6567 hci_io_capa_reply_evt(hdev, skb); 6568 break; 6569 6570 case HCI_EV_USER_CONFIRM_REQUEST: 6571 hci_user_confirm_request_evt(hdev, skb); 6572 break; 6573 6574 case HCI_EV_USER_PASSKEY_REQUEST: 6575 hci_user_passkey_request_evt(hdev, skb); 6576 break; 6577 6578 case HCI_EV_USER_PASSKEY_NOTIFY: 6579 hci_user_passkey_notify_evt(hdev, skb); 6580 break; 6581 6582 case HCI_EV_KEYPRESS_NOTIFY: 6583 hci_keypress_notify_evt(hdev, skb); 6584 break; 6585 6586 case HCI_EV_SIMPLE_PAIR_COMPLETE: 6587 hci_simple_pair_complete_evt(hdev, skb); 6588 break; 6589 6590 case HCI_EV_REMOTE_HOST_FEATURES: 6591 hci_remote_host_features_evt(hdev, skb); 6592 break; 6593 6594 case HCI_EV_LE_META: 6595 hci_le_meta_evt(hdev, skb); 6596 break; 6597 6598 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 6599 hci_remote_oob_data_request_evt(hdev, skb); 6600 break; 6601 6602 #if IS_ENABLED(CONFIG_BT_HS) 6603 case HCI_EV_CHANNEL_SELECTED: 6604 hci_chan_selected_evt(hdev, skb); 6605 break; 6606 6607 case HCI_EV_PHY_LINK_COMPLETE: 6608 hci_phy_link_complete_evt(hdev, skb); 6609 break; 6610 6611 case HCI_EV_LOGICAL_LINK_COMPLETE: 6612 hci_loglink_complete_evt(hdev, skb); 6613 break; 6614 6615 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 6616 hci_disconn_loglink_complete_evt(hdev, skb); 6617 break; 6618 6619 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 6620 hci_disconn_phylink_complete_evt(hdev, skb); 6621 break; 6622 #endif 6623 6624 case HCI_EV_NUM_COMP_BLOCKS: 6625 hci_num_comp_blocks_evt(hdev, skb); 6626 break; 6627 6628 case HCI_EV_VENDOR: 6629 msft_vendor_evt(hdev, skb); 6630 break; 6631 6632 default: 6633 BT_DBG("%s event 0x%2.2x", hdev->name, event); 6634 break; 6635 } 6636 6637 if (req_complete) { 6638 req_complete(hdev, status, opcode); 6639 } else if (req_complete_skb) { 6640 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 6641 kfree_skb(orig_skb); 6642 orig_skb = NULL; 6643 } 6644 req_complete_skb(hdev, status, opcode, orig_skb); 6645 } 6646 6647 done: 6648 kfree_skb(orig_skb); 6649 kfree_skb(skb); 6650 hdev->stat.evt_rx++; 6651 } 6652