1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 #include "msft.h" 39 40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 41 "\x00\x00\x00\x00\x00\x00\x00\x00" 42 43 /* Handle HCI Event packets */ 44 45 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, 46 u8 *new_status) 47 { 48 __u8 status = *((__u8 *) skb->data); 49 50 BT_DBG("%s status 0x%2.2x", hdev->name, status); 51 52 /* It is possible that we receive Inquiry Complete event right 53 * before we receive Inquiry Cancel Command Complete event, in 54 * which case the latter event should have status of Command 55 * Disallowed (0x0c). This should not be treated as error, since 56 * we actually achieve what Inquiry Cancel wants to achieve, 57 * which is to end the last Inquiry session. 58 */ 59 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { 60 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); 61 status = 0x00; 62 } 63 64 *new_status = status; 65 66 if (status) 67 return; 68 69 clear_bit(HCI_INQUIRY, &hdev->flags); 70 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 71 wake_up_bit(&hdev->flags, HCI_INQUIRY); 72 73 hci_dev_lock(hdev); 74 /* Set discovery state to stopped if we're not doing LE active 75 * scanning. 76 */ 77 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 78 hdev->le_scan_type != LE_SCAN_ACTIVE) 79 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 80 hci_dev_unlock(hdev); 81 82 hci_conn_check_pending(hdev); 83 } 84 85 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 86 { 87 __u8 status = *((__u8 *) skb->data); 88 89 BT_DBG("%s status 0x%2.2x", hdev->name, status); 90 91 if (status) 92 return; 93 94 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 95 } 96 97 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 98 { 99 __u8 status = *((__u8 *) skb->data); 100 101 BT_DBG("%s status 0x%2.2x", hdev->name, status); 102 103 if (status) 104 return; 105 106 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 107 108 hci_conn_check_pending(hdev); 109 } 110 111 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 112 struct sk_buff *skb) 113 { 114 BT_DBG("%s", hdev->name); 115 } 116 117 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 118 { 119 struct hci_rp_role_discovery *rp = (void *) skb->data; 120 struct hci_conn *conn; 121 122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 123 124 if (rp->status) 125 return; 126 127 hci_dev_lock(hdev); 128 129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 130 if (conn) 131 conn->role = rp->role; 132 133 hci_dev_unlock(hdev); 134 } 135 136 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 137 { 138 struct hci_rp_read_link_policy *rp = (void *) skb->data; 139 struct hci_conn *conn; 140 141 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 142 143 if (rp->status) 144 return; 145 146 hci_dev_lock(hdev); 147 148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 149 if (conn) 150 conn->link_policy = __le16_to_cpu(rp->policy); 151 152 hci_dev_unlock(hdev); 153 } 154 155 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 156 { 157 struct hci_rp_write_link_policy *rp = (void *) skb->data; 158 struct hci_conn *conn; 159 void *sent; 160 161 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 162 163 if (rp->status) 164 return; 165 166 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 167 if (!sent) 168 return; 169 170 hci_dev_lock(hdev); 171 172 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 173 if (conn) 174 conn->link_policy = get_unaligned_le16(sent + 2); 175 176 hci_dev_unlock(hdev); 177 } 178 179 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 180 struct sk_buff *skb) 181 { 182 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 183 184 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 185 186 if (rp->status) 187 return; 188 189 hdev->link_policy = __le16_to_cpu(rp->policy); 190 } 191 192 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 193 struct sk_buff *skb) 194 { 195 __u8 status = *((__u8 *) skb->data); 196 void *sent; 197 198 BT_DBG("%s status 0x%2.2x", hdev->name, status); 199 200 if (status) 201 return; 202 203 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 204 if (!sent) 205 return; 206 207 hdev->link_policy = get_unaligned_le16(sent); 208 } 209 210 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 211 { 212 __u8 status = *((__u8 *) skb->data); 213 214 BT_DBG("%s status 0x%2.2x", hdev->name, status); 215 216 clear_bit(HCI_RESET, &hdev->flags); 217 218 if (status) 219 return; 220 221 /* Reset all non-persistent flags */ 222 hci_dev_clear_volatile_flags(hdev); 223 224 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 225 226 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 227 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 228 229 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 230 hdev->adv_data_len = 0; 231 232 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 233 hdev->scan_rsp_data_len = 0; 234 235 hdev->le_scan_type = LE_SCAN_PASSIVE; 236 237 hdev->ssp_debug_mode = 0; 238 239 hci_bdaddr_list_clear(&hdev->le_white_list); 240 hci_bdaddr_list_clear(&hdev->le_resolv_list); 241 } 242 243 static void hci_cc_read_stored_link_key(struct hci_dev *hdev, 244 struct sk_buff *skb) 245 { 246 struct hci_rp_read_stored_link_key *rp = (void *)skb->data; 247 struct hci_cp_read_stored_link_key *sent; 248 249 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 250 251 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 252 if (!sent) 253 return; 254 255 if (!rp->status && sent->read_all == 0x01) { 256 hdev->stored_max_keys = rp->max_keys; 257 hdev->stored_num_keys = rp->num_keys; 258 } 259 } 260 261 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 262 struct sk_buff *skb) 263 { 264 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data; 265 266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 267 268 if (rp->status) 269 return; 270 271 if (rp->num_keys <= hdev->stored_num_keys) 272 hdev->stored_num_keys -= rp->num_keys; 273 else 274 hdev->stored_num_keys = 0; 275 } 276 277 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 278 { 279 __u8 status = *((__u8 *) skb->data); 280 void *sent; 281 282 BT_DBG("%s status 0x%2.2x", hdev->name, status); 283 284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 285 if (!sent) 286 return; 287 288 hci_dev_lock(hdev); 289 290 if (hci_dev_test_flag(hdev, HCI_MGMT)) 291 mgmt_set_local_name_complete(hdev, sent, status); 292 else if (!status) 293 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 294 295 hci_dev_unlock(hdev); 296 } 297 298 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 299 { 300 struct hci_rp_read_local_name *rp = (void *) skb->data; 301 302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 303 304 if (rp->status) 305 return; 306 307 if (hci_dev_test_flag(hdev, HCI_SETUP) || 308 hci_dev_test_flag(hdev, HCI_CONFIG)) 309 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 310 } 311 312 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 313 { 314 __u8 status = *((__u8 *) skb->data); 315 void *sent; 316 317 BT_DBG("%s status 0x%2.2x", hdev->name, status); 318 319 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 320 if (!sent) 321 return; 322 323 hci_dev_lock(hdev); 324 325 if (!status) { 326 __u8 param = *((__u8 *) sent); 327 328 if (param == AUTH_ENABLED) 329 set_bit(HCI_AUTH, &hdev->flags); 330 else 331 clear_bit(HCI_AUTH, &hdev->flags); 332 } 333 334 if (hci_dev_test_flag(hdev, HCI_MGMT)) 335 mgmt_auth_enable_complete(hdev, status); 336 337 hci_dev_unlock(hdev); 338 } 339 340 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 341 { 342 __u8 status = *((__u8 *) skb->data); 343 __u8 param; 344 void *sent; 345 346 BT_DBG("%s status 0x%2.2x", hdev->name, status); 347 348 if (status) 349 return; 350 351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 352 if (!sent) 353 return; 354 355 param = *((__u8 *) sent); 356 357 if (param) 358 set_bit(HCI_ENCRYPT, &hdev->flags); 359 else 360 clear_bit(HCI_ENCRYPT, &hdev->flags); 361 } 362 363 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 364 { 365 __u8 status = *((__u8 *) skb->data); 366 __u8 param; 367 void *sent; 368 369 BT_DBG("%s status 0x%2.2x", hdev->name, status); 370 371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 372 if (!sent) 373 return; 374 375 param = *((__u8 *) sent); 376 377 hci_dev_lock(hdev); 378 379 if (status) { 380 hdev->discov_timeout = 0; 381 goto done; 382 } 383 384 if (param & SCAN_INQUIRY) 385 set_bit(HCI_ISCAN, &hdev->flags); 386 else 387 clear_bit(HCI_ISCAN, &hdev->flags); 388 389 if (param & SCAN_PAGE) 390 set_bit(HCI_PSCAN, &hdev->flags); 391 else 392 clear_bit(HCI_PSCAN, &hdev->flags); 393 394 done: 395 hci_dev_unlock(hdev); 396 } 397 398 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 399 { 400 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 401 402 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 403 404 if (rp->status) 405 return; 406 407 memcpy(hdev->dev_class, rp->dev_class, 3); 408 409 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 410 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 411 } 412 413 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 414 { 415 __u8 status = *((__u8 *) skb->data); 416 void *sent; 417 418 BT_DBG("%s status 0x%2.2x", hdev->name, status); 419 420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 421 if (!sent) 422 return; 423 424 hci_dev_lock(hdev); 425 426 if (status == 0) 427 memcpy(hdev->dev_class, sent, 3); 428 429 if (hci_dev_test_flag(hdev, HCI_MGMT)) 430 mgmt_set_class_of_dev_complete(hdev, sent, status); 431 432 hci_dev_unlock(hdev); 433 } 434 435 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 436 { 437 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 438 __u16 setting; 439 440 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 441 442 if (rp->status) 443 return; 444 445 setting = __le16_to_cpu(rp->voice_setting); 446 447 if (hdev->voice_setting == setting) 448 return; 449 450 hdev->voice_setting = setting; 451 452 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 453 454 if (hdev->notify) 455 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 456 } 457 458 static void hci_cc_write_voice_setting(struct hci_dev *hdev, 459 struct sk_buff *skb) 460 { 461 __u8 status = *((__u8 *) skb->data); 462 __u16 setting; 463 void *sent; 464 465 BT_DBG("%s status 0x%2.2x", hdev->name, status); 466 467 if (status) 468 return; 469 470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 471 if (!sent) 472 return; 473 474 setting = get_unaligned_le16(sent); 475 476 if (hdev->voice_setting == setting) 477 return; 478 479 hdev->voice_setting = setting; 480 481 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 482 483 if (hdev->notify) 484 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 485 } 486 487 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, 488 struct sk_buff *skb) 489 { 490 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; 491 492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 493 494 if (rp->status) 495 return; 496 497 hdev->num_iac = rp->num_iac; 498 499 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); 500 } 501 502 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 503 { 504 __u8 status = *((__u8 *) skb->data); 505 struct hci_cp_write_ssp_mode *sent; 506 507 BT_DBG("%s status 0x%2.2x", hdev->name, status); 508 509 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 510 if (!sent) 511 return; 512 513 hci_dev_lock(hdev); 514 515 if (!status) { 516 if (sent->mode) 517 hdev->features[1][0] |= LMP_HOST_SSP; 518 else 519 hdev->features[1][0] &= ~LMP_HOST_SSP; 520 } 521 522 if (hci_dev_test_flag(hdev, HCI_MGMT)) 523 mgmt_ssp_enable_complete(hdev, sent->mode, status); 524 else if (!status) { 525 if (sent->mode) 526 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 527 else 528 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 529 } 530 531 hci_dev_unlock(hdev); 532 } 533 534 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) 535 { 536 u8 status = *((u8 *) skb->data); 537 struct hci_cp_write_sc_support *sent; 538 539 BT_DBG("%s status 0x%2.2x", hdev->name, status); 540 541 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 542 if (!sent) 543 return; 544 545 hci_dev_lock(hdev); 546 547 if (!status) { 548 if (sent->support) 549 hdev->features[1][0] |= LMP_HOST_SC; 550 else 551 hdev->features[1][0] &= ~LMP_HOST_SC; 552 } 553 554 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) { 555 if (sent->support) 556 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 557 else 558 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 559 } 560 561 hci_dev_unlock(hdev); 562 } 563 564 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 565 { 566 struct hci_rp_read_local_version *rp = (void *) skb->data; 567 568 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 569 570 if (rp->status) 571 return; 572 573 if (hci_dev_test_flag(hdev, HCI_SETUP) || 574 hci_dev_test_flag(hdev, HCI_CONFIG)) { 575 hdev->hci_ver = rp->hci_ver; 576 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 577 hdev->lmp_ver = rp->lmp_ver; 578 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 579 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 580 } 581 } 582 583 static void hci_cc_read_local_commands(struct hci_dev *hdev, 584 struct sk_buff *skb) 585 { 586 struct hci_rp_read_local_commands *rp = (void *) skb->data; 587 588 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 589 590 if (rp->status) 591 return; 592 593 if (hci_dev_test_flag(hdev, HCI_SETUP) || 594 hci_dev_test_flag(hdev, HCI_CONFIG)) 595 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 596 } 597 598 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, 599 struct sk_buff *skb) 600 { 601 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data; 602 struct hci_conn *conn; 603 604 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 605 606 if (rp->status) 607 return; 608 609 hci_dev_lock(hdev); 610 611 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 612 if (conn) 613 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); 614 615 hci_dev_unlock(hdev); 616 } 617 618 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, 619 struct sk_buff *skb) 620 { 621 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data; 622 struct hci_conn *conn; 623 void *sent; 624 625 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 626 627 if (rp->status) 628 return; 629 630 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); 631 if (!sent) 632 return; 633 634 hci_dev_lock(hdev); 635 636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 637 if (conn) 638 conn->auth_payload_timeout = get_unaligned_le16(sent + 2); 639 640 hci_dev_unlock(hdev); 641 } 642 643 static void hci_cc_read_local_features(struct hci_dev *hdev, 644 struct sk_buff *skb) 645 { 646 struct hci_rp_read_local_features *rp = (void *) skb->data; 647 648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 649 650 if (rp->status) 651 return; 652 653 memcpy(hdev->features, rp->features, 8); 654 655 /* Adjust default settings according to features 656 * supported by device. */ 657 658 if (hdev->features[0][0] & LMP_3SLOT) 659 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 660 661 if (hdev->features[0][0] & LMP_5SLOT) 662 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 663 664 if (hdev->features[0][1] & LMP_HV2) { 665 hdev->pkt_type |= (HCI_HV2); 666 hdev->esco_type |= (ESCO_HV2); 667 } 668 669 if (hdev->features[0][1] & LMP_HV3) { 670 hdev->pkt_type |= (HCI_HV3); 671 hdev->esco_type |= (ESCO_HV3); 672 } 673 674 if (lmp_esco_capable(hdev)) 675 hdev->esco_type |= (ESCO_EV3); 676 677 if (hdev->features[0][4] & LMP_EV4) 678 hdev->esco_type |= (ESCO_EV4); 679 680 if (hdev->features[0][4] & LMP_EV5) 681 hdev->esco_type |= (ESCO_EV5); 682 683 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 684 hdev->esco_type |= (ESCO_2EV3); 685 686 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 687 hdev->esco_type |= (ESCO_3EV3); 688 689 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 690 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 691 } 692 693 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 694 struct sk_buff *skb) 695 { 696 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 697 698 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 699 700 if (rp->status) 701 return; 702 703 if (hdev->max_page < rp->max_page) 704 hdev->max_page = rp->max_page; 705 706 if (rp->page < HCI_MAX_PAGES) 707 memcpy(hdev->features[rp->page], rp->features, 8); 708 } 709 710 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 711 struct sk_buff *skb) 712 { 713 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 714 715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 716 717 if (rp->status) 718 return; 719 720 hdev->flow_ctl_mode = rp->mode; 721 } 722 723 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 724 { 725 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 726 727 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 728 729 if (rp->status) 730 return; 731 732 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 733 hdev->sco_mtu = rp->sco_mtu; 734 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 735 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 736 737 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 738 hdev->sco_mtu = 64; 739 hdev->sco_pkts = 8; 740 } 741 742 hdev->acl_cnt = hdev->acl_pkts; 743 hdev->sco_cnt = hdev->sco_pkts; 744 745 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 746 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 747 } 748 749 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 750 { 751 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 752 753 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 754 755 if (rp->status) 756 return; 757 758 if (test_bit(HCI_INIT, &hdev->flags)) 759 bacpy(&hdev->bdaddr, &rp->bdaddr); 760 761 if (hci_dev_test_flag(hdev, HCI_SETUP)) 762 bacpy(&hdev->setup_addr, &rp->bdaddr); 763 } 764 765 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev, 766 struct sk_buff *skb) 767 { 768 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data; 769 770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 771 772 if (rp->status) 773 return; 774 775 if (hci_dev_test_flag(hdev, HCI_SETUP) || 776 hci_dev_test_flag(hdev, HCI_CONFIG)) { 777 hdev->pairing_opts = rp->pairing_opts; 778 hdev->max_enc_key_size = rp->max_key_size; 779 } 780 } 781 782 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 783 struct sk_buff *skb) 784 { 785 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 786 787 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 788 789 if (rp->status) 790 return; 791 792 if (test_bit(HCI_INIT, &hdev->flags)) { 793 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 794 hdev->page_scan_window = __le16_to_cpu(rp->window); 795 } 796 } 797 798 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 799 struct sk_buff *skb) 800 { 801 u8 status = *((u8 *) skb->data); 802 struct hci_cp_write_page_scan_activity *sent; 803 804 BT_DBG("%s status 0x%2.2x", hdev->name, status); 805 806 if (status) 807 return; 808 809 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 810 if (!sent) 811 return; 812 813 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 814 hdev->page_scan_window = __le16_to_cpu(sent->window); 815 } 816 817 static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 818 struct sk_buff *skb) 819 { 820 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 821 822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 823 824 if (rp->status) 825 return; 826 827 if (test_bit(HCI_INIT, &hdev->flags)) 828 hdev->page_scan_type = rp->type; 829 } 830 831 static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 832 struct sk_buff *skb) 833 { 834 u8 status = *((u8 *) skb->data); 835 u8 *type; 836 837 BT_DBG("%s status 0x%2.2x", hdev->name, status); 838 839 if (status) 840 return; 841 842 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 843 if (type) 844 hdev->page_scan_type = *type; 845 } 846 847 static void hci_cc_read_data_block_size(struct hci_dev *hdev, 848 struct sk_buff *skb) 849 { 850 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 851 852 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 853 854 if (rp->status) 855 return; 856 857 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 858 hdev->block_len = __le16_to_cpu(rp->block_len); 859 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 860 861 hdev->block_cnt = hdev->num_blocks; 862 863 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 864 hdev->block_cnt, hdev->block_len); 865 } 866 867 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) 868 { 869 struct hci_rp_read_clock *rp = (void *) skb->data; 870 struct hci_cp_read_clock *cp; 871 struct hci_conn *conn; 872 873 BT_DBG("%s", hdev->name); 874 875 if (skb->len < sizeof(*rp)) 876 return; 877 878 if (rp->status) 879 return; 880 881 hci_dev_lock(hdev); 882 883 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 884 if (!cp) 885 goto unlock; 886 887 if (cp->which == 0x00) { 888 hdev->clock = le32_to_cpu(rp->clock); 889 goto unlock; 890 } 891 892 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 893 if (conn) { 894 conn->clock = le32_to_cpu(rp->clock); 895 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 896 } 897 898 unlock: 899 hci_dev_unlock(hdev); 900 } 901 902 static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 903 struct sk_buff *skb) 904 { 905 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 906 907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 908 909 if (rp->status) 910 return; 911 912 hdev->amp_status = rp->amp_status; 913 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 914 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 915 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 916 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 917 hdev->amp_type = rp->amp_type; 918 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 919 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 920 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 921 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 922 } 923 924 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 925 struct sk_buff *skb) 926 { 927 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 928 929 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 930 931 if (rp->status) 932 return; 933 934 hdev->inq_tx_power = rp->tx_power; 935 } 936 937 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, 938 struct sk_buff *skb) 939 { 940 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data; 941 942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 943 944 if (rp->status) 945 return; 946 947 hdev->err_data_reporting = rp->err_data_reporting; 948 } 949 950 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, 951 struct sk_buff *skb) 952 { 953 __u8 status = *((__u8 *)skb->data); 954 struct hci_cp_write_def_err_data_reporting *cp; 955 956 BT_DBG("%s status 0x%2.2x", hdev->name, status); 957 958 if (status) 959 return; 960 961 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); 962 if (!cp) 963 return; 964 965 hdev->err_data_reporting = cp->err_data_reporting; 966 } 967 968 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 969 { 970 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 971 struct hci_cp_pin_code_reply *cp; 972 struct hci_conn *conn; 973 974 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 975 976 hci_dev_lock(hdev); 977 978 if (hci_dev_test_flag(hdev, HCI_MGMT)) 979 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 980 981 if (rp->status) 982 goto unlock; 983 984 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 985 if (!cp) 986 goto unlock; 987 988 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 989 if (conn) 990 conn->pin_length = cp->pin_len; 991 992 unlock: 993 hci_dev_unlock(hdev); 994 } 995 996 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 997 { 998 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 999 1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1001 1002 hci_dev_lock(hdev); 1003 1004 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1005 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 1006 rp->status); 1007 1008 hci_dev_unlock(hdev); 1009 } 1010 1011 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 1012 struct sk_buff *skb) 1013 { 1014 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 1015 1016 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1017 1018 if (rp->status) 1019 return; 1020 1021 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 1022 hdev->le_pkts = rp->le_max_pkt; 1023 1024 hdev->le_cnt = hdev->le_pkts; 1025 1026 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 1027 } 1028 1029 static void hci_cc_le_read_local_features(struct hci_dev *hdev, 1030 struct sk_buff *skb) 1031 { 1032 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 1033 1034 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1035 1036 if (rp->status) 1037 return; 1038 1039 memcpy(hdev->le_features, rp->features, 8); 1040 } 1041 1042 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 1043 struct sk_buff *skb) 1044 { 1045 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 1046 1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1048 1049 if (rp->status) 1050 return; 1051 1052 hdev->adv_tx_power = rp->tx_power; 1053 } 1054 1055 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 1056 { 1057 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1058 1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1060 1061 hci_dev_lock(hdev); 1062 1063 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1064 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 1065 rp->status); 1066 1067 hci_dev_unlock(hdev); 1068 } 1069 1070 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 1071 struct sk_buff *skb) 1072 { 1073 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1074 1075 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1076 1077 hci_dev_lock(hdev); 1078 1079 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1080 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1081 ACL_LINK, 0, rp->status); 1082 1083 hci_dev_unlock(hdev); 1084 } 1085 1086 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 1087 { 1088 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1089 1090 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1091 1092 hci_dev_lock(hdev); 1093 1094 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1095 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1096 0, rp->status); 1097 1098 hci_dev_unlock(hdev); 1099 } 1100 1101 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1102 struct sk_buff *skb) 1103 { 1104 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1105 1106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1107 1108 hci_dev_lock(hdev); 1109 1110 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1111 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1112 ACL_LINK, 0, rp->status); 1113 1114 hci_dev_unlock(hdev); 1115 } 1116 1117 static void hci_cc_read_local_oob_data(struct hci_dev *hdev, 1118 struct sk_buff *skb) 1119 { 1120 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1121 1122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1123 } 1124 1125 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 1126 struct sk_buff *skb) 1127 { 1128 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 1129 1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1131 } 1132 1133 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 1134 { 1135 __u8 status = *((__u8 *) skb->data); 1136 bdaddr_t *sent; 1137 1138 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1139 1140 if (status) 1141 return; 1142 1143 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1144 if (!sent) 1145 return; 1146 1147 hci_dev_lock(hdev); 1148 1149 bacpy(&hdev->random_addr, sent); 1150 1151 hci_dev_unlock(hdev); 1152 } 1153 1154 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb) 1155 { 1156 __u8 status = *((__u8 *) skb->data); 1157 struct hci_cp_le_set_default_phy *cp; 1158 1159 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1160 1161 if (status) 1162 return; 1163 1164 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); 1165 if (!cp) 1166 return; 1167 1168 hci_dev_lock(hdev); 1169 1170 hdev->le_tx_def_phys = cp->tx_phys; 1171 hdev->le_rx_def_phys = cp->rx_phys; 1172 1173 hci_dev_unlock(hdev); 1174 } 1175 1176 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, 1177 struct sk_buff *skb) 1178 { 1179 __u8 status = *((__u8 *) skb->data); 1180 struct hci_cp_le_set_adv_set_rand_addr *cp; 1181 struct adv_info *adv_instance; 1182 1183 if (status) 1184 return; 1185 1186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); 1187 if (!cp) 1188 return; 1189 1190 hci_dev_lock(hdev); 1191 1192 if (!hdev->cur_adv_instance) { 1193 /* Store in hdev for instance 0 (Set adv and Directed advs) */ 1194 bacpy(&hdev->random_addr, &cp->bdaddr); 1195 } else { 1196 adv_instance = hci_find_adv_instance(hdev, 1197 hdev->cur_adv_instance); 1198 if (adv_instance) 1199 bacpy(&adv_instance->random_addr, &cp->bdaddr); 1200 } 1201 1202 hci_dev_unlock(hdev); 1203 } 1204 1205 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 1206 { 1207 __u8 *sent, status = *((__u8 *) skb->data); 1208 1209 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1210 1211 if (status) 1212 return; 1213 1214 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1215 if (!sent) 1216 return; 1217 1218 hci_dev_lock(hdev); 1219 1220 /* If we're doing connection initiation as peripheral. Set a 1221 * timeout in case something goes wrong. 1222 */ 1223 if (*sent) { 1224 struct hci_conn *conn; 1225 1226 hci_dev_set_flag(hdev, HCI_LE_ADV); 1227 1228 conn = hci_lookup_le_connect(hdev); 1229 if (conn) 1230 queue_delayed_work(hdev->workqueue, 1231 &conn->le_conn_timeout, 1232 conn->conn_timeout); 1233 } else { 1234 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1235 } 1236 1237 hci_dev_unlock(hdev); 1238 } 1239 1240 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, 1241 struct sk_buff *skb) 1242 { 1243 struct hci_cp_le_set_ext_adv_enable *cp; 1244 __u8 status = *((__u8 *) skb->data); 1245 1246 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1247 1248 if (status) 1249 return; 1250 1251 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); 1252 if (!cp) 1253 return; 1254 1255 hci_dev_lock(hdev); 1256 1257 if (cp->enable) { 1258 struct hci_conn *conn; 1259 1260 hci_dev_set_flag(hdev, HCI_LE_ADV); 1261 1262 conn = hci_lookup_le_connect(hdev); 1263 if (conn) 1264 queue_delayed_work(hdev->workqueue, 1265 &conn->le_conn_timeout, 1266 conn->conn_timeout); 1267 } else { 1268 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1269 } 1270 1271 hci_dev_unlock(hdev); 1272 } 1273 1274 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1275 { 1276 struct hci_cp_le_set_scan_param *cp; 1277 __u8 status = *((__u8 *) skb->data); 1278 1279 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1280 1281 if (status) 1282 return; 1283 1284 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1285 if (!cp) 1286 return; 1287 1288 hci_dev_lock(hdev); 1289 1290 hdev->le_scan_type = cp->type; 1291 1292 hci_dev_unlock(hdev); 1293 } 1294 1295 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, 1296 struct sk_buff *skb) 1297 { 1298 struct hci_cp_le_set_ext_scan_params *cp; 1299 __u8 status = *((__u8 *) skb->data); 1300 struct hci_cp_le_scan_phy_params *phy_param; 1301 1302 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1303 1304 if (status) 1305 return; 1306 1307 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); 1308 if (!cp) 1309 return; 1310 1311 phy_param = (void *)cp->data; 1312 1313 hci_dev_lock(hdev); 1314 1315 hdev->le_scan_type = phy_param->type; 1316 1317 hci_dev_unlock(hdev); 1318 } 1319 1320 static bool has_pending_adv_report(struct hci_dev *hdev) 1321 { 1322 struct discovery_state *d = &hdev->discovery; 1323 1324 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1325 } 1326 1327 static void clear_pending_adv_report(struct hci_dev *hdev) 1328 { 1329 struct discovery_state *d = &hdev->discovery; 1330 1331 bacpy(&d->last_adv_addr, BDADDR_ANY); 1332 d->last_adv_data_len = 0; 1333 } 1334 1335 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1336 u8 bdaddr_type, s8 rssi, u32 flags, 1337 u8 *data, u8 len) 1338 { 1339 struct discovery_state *d = &hdev->discovery; 1340 1341 bacpy(&d->last_adv_addr, bdaddr); 1342 d->last_adv_addr_type = bdaddr_type; 1343 d->last_adv_rssi = rssi; 1344 d->last_adv_flags = flags; 1345 memcpy(d->last_adv_data, data, len); 1346 d->last_adv_data_len = len; 1347 } 1348 1349 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) 1350 { 1351 hci_dev_lock(hdev); 1352 1353 switch (enable) { 1354 case LE_SCAN_ENABLE: 1355 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1356 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1357 clear_pending_adv_report(hdev); 1358 break; 1359 1360 case LE_SCAN_DISABLE: 1361 /* We do this here instead of when setting DISCOVERY_STOPPED 1362 * since the latter would potentially require waiting for 1363 * inquiry to stop too. 1364 */ 1365 if (has_pending_adv_report(hdev)) { 1366 struct discovery_state *d = &hdev->discovery; 1367 1368 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1369 d->last_adv_addr_type, NULL, 1370 d->last_adv_rssi, d->last_adv_flags, 1371 d->last_adv_data, 1372 d->last_adv_data_len, NULL, 0); 1373 } 1374 1375 /* Cancel this timer so that we don't try to disable scanning 1376 * when it's already disabled. 1377 */ 1378 cancel_delayed_work(&hdev->le_scan_disable); 1379 1380 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1381 1382 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1383 * interrupted scanning due to a connect request. Mark 1384 * therefore discovery as stopped. If this was not 1385 * because of a connect request advertising might have 1386 * been disabled because of active scanning, so 1387 * re-enable it again if necessary. 1388 */ 1389 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1390 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1391 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && 1392 hdev->discovery.state == DISCOVERY_FINDING) 1393 hci_req_reenable_advertising(hdev); 1394 1395 break; 1396 1397 default: 1398 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", 1399 enable); 1400 break; 1401 } 1402 1403 hci_dev_unlock(hdev); 1404 } 1405 1406 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1407 struct sk_buff *skb) 1408 { 1409 struct hci_cp_le_set_scan_enable *cp; 1410 __u8 status = *((__u8 *) skb->data); 1411 1412 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1413 1414 if (status) 1415 return; 1416 1417 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1418 if (!cp) 1419 return; 1420 1421 le_set_scan_enable_complete(hdev, cp->enable); 1422 } 1423 1424 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, 1425 struct sk_buff *skb) 1426 { 1427 struct hci_cp_le_set_ext_scan_enable *cp; 1428 __u8 status = *((__u8 *) skb->data); 1429 1430 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1431 1432 if (status) 1433 return; 1434 1435 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); 1436 if (!cp) 1437 return; 1438 1439 le_set_scan_enable_complete(hdev, cp->enable); 1440 } 1441 1442 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, 1443 struct sk_buff *skb) 1444 { 1445 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data; 1446 1447 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status, 1448 rp->num_of_sets); 1449 1450 if (rp->status) 1451 return; 1452 1453 hdev->le_num_of_adv_sets = rp->num_of_sets; 1454 } 1455 1456 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, 1457 struct sk_buff *skb) 1458 { 1459 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; 1460 1461 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1462 1463 if (rp->status) 1464 return; 1465 1466 hdev->le_white_list_size = rp->size; 1467 } 1468 1469 static void hci_cc_le_clear_white_list(struct hci_dev *hdev, 1470 struct sk_buff *skb) 1471 { 1472 __u8 status = *((__u8 *) skb->data); 1473 1474 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1475 1476 if (status) 1477 return; 1478 1479 hci_bdaddr_list_clear(&hdev->le_white_list); 1480 } 1481 1482 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, 1483 struct sk_buff *skb) 1484 { 1485 struct hci_cp_le_add_to_white_list *sent; 1486 __u8 status = *((__u8 *) skb->data); 1487 1488 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1489 1490 if (status) 1491 return; 1492 1493 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); 1494 if (!sent) 1495 return; 1496 1497 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr, 1498 sent->bdaddr_type); 1499 } 1500 1501 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, 1502 struct sk_buff *skb) 1503 { 1504 struct hci_cp_le_del_from_white_list *sent; 1505 __u8 status = *((__u8 *) skb->data); 1506 1507 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1508 1509 if (status) 1510 return; 1511 1512 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); 1513 if (!sent) 1514 return; 1515 1516 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr, 1517 sent->bdaddr_type); 1518 } 1519 1520 static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1521 struct sk_buff *skb) 1522 { 1523 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1524 1525 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1526 1527 if (rp->status) 1528 return; 1529 1530 memcpy(hdev->le_states, rp->le_states, 8); 1531 } 1532 1533 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev, 1534 struct sk_buff *skb) 1535 { 1536 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data; 1537 1538 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1539 1540 if (rp->status) 1541 return; 1542 1543 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1544 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1545 } 1546 1547 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, 1548 struct sk_buff *skb) 1549 { 1550 struct hci_cp_le_write_def_data_len *sent; 1551 __u8 status = *((__u8 *) skb->data); 1552 1553 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1554 1555 if (status) 1556 return; 1557 1558 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1559 if (!sent) 1560 return; 1561 1562 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1563 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1564 } 1565 1566 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, 1567 struct sk_buff *skb) 1568 { 1569 struct hci_cp_le_add_to_resolv_list *sent; 1570 __u8 status = *((__u8 *) skb->data); 1571 1572 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1573 1574 if (status) 1575 return; 1576 1577 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); 1578 if (!sent) 1579 return; 1580 1581 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1582 sent->bdaddr_type, sent->peer_irk, 1583 sent->local_irk); 1584 } 1585 1586 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, 1587 struct sk_buff *skb) 1588 { 1589 struct hci_cp_le_del_from_resolv_list *sent; 1590 __u8 status = *((__u8 *) skb->data); 1591 1592 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1593 1594 if (status) 1595 return; 1596 1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); 1598 if (!sent) 1599 return; 1600 1601 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, 1602 sent->bdaddr_type); 1603 } 1604 1605 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev, 1606 struct sk_buff *skb) 1607 { 1608 __u8 status = *((__u8 *) skb->data); 1609 1610 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1611 1612 if (status) 1613 return; 1614 1615 hci_bdaddr_list_clear(&hdev->le_resolv_list); 1616 } 1617 1618 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, 1619 struct sk_buff *skb) 1620 { 1621 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data; 1622 1623 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1624 1625 if (rp->status) 1626 return; 1627 1628 hdev->le_resolv_list_size = rp->size; 1629 } 1630 1631 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, 1632 struct sk_buff *skb) 1633 { 1634 __u8 *sent, status = *((__u8 *) skb->data); 1635 1636 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1637 1638 if (status) 1639 return; 1640 1641 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); 1642 if (!sent) 1643 return; 1644 1645 hci_dev_lock(hdev); 1646 1647 if (*sent) 1648 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); 1649 else 1650 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); 1651 1652 hci_dev_unlock(hdev); 1653 } 1654 1655 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, 1656 struct sk_buff *skb) 1657 { 1658 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data; 1659 1660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1661 1662 if (rp->status) 1663 return; 1664 1665 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 1666 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 1667 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 1668 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 1669 } 1670 1671 static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1672 struct sk_buff *skb) 1673 { 1674 struct hci_cp_write_le_host_supported *sent; 1675 __u8 status = *((__u8 *) skb->data); 1676 1677 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1678 1679 if (status) 1680 return; 1681 1682 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1683 if (!sent) 1684 return; 1685 1686 hci_dev_lock(hdev); 1687 1688 if (sent->le) { 1689 hdev->features[1][0] |= LMP_HOST_LE; 1690 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 1691 } else { 1692 hdev->features[1][0] &= ~LMP_HOST_LE; 1693 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 1694 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 1695 } 1696 1697 if (sent->simul) 1698 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1699 else 1700 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1701 1702 hci_dev_unlock(hdev); 1703 } 1704 1705 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1706 { 1707 struct hci_cp_le_set_adv_param *cp; 1708 u8 status = *((u8 *) skb->data); 1709 1710 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1711 1712 if (status) 1713 return; 1714 1715 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 1716 if (!cp) 1717 return; 1718 1719 hci_dev_lock(hdev); 1720 hdev->adv_addr_type = cp->own_address_type; 1721 hci_dev_unlock(hdev); 1722 } 1723 1724 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1725 { 1726 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data; 1727 struct hci_cp_le_set_ext_adv_params *cp; 1728 struct adv_info *adv_instance; 1729 1730 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1731 1732 if (rp->status) 1733 return; 1734 1735 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); 1736 if (!cp) 1737 return; 1738 1739 hci_dev_lock(hdev); 1740 hdev->adv_addr_type = cp->own_addr_type; 1741 if (!hdev->cur_adv_instance) { 1742 /* Store in hdev for instance 0 */ 1743 hdev->adv_tx_power = rp->tx_power; 1744 } else { 1745 adv_instance = hci_find_adv_instance(hdev, 1746 hdev->cur_adv_instance); 1747 if (adv_instance) 1748 adv_instance->tx_power = rp->tx_power; 1749 } 1750 /* Update adv data as tx power is known now */ 1751 hci_req_update_adv_data(hdev, hdev->cur_adv_instance); 1752 hci_dev_unlock(hdev); 1753 } 1754 1755 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) 1756 { 1757 struct hci_rp_read_rssi *rp = (void *) skb->data; 1758 struct hci_conn *conn; 1759 1760 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1761 1762 if (rp->status) 1763 return; 1764 1765 hci_dev_lock(hdev); 1766 1767 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1768 if (conn) 1769 conn->rssi = rp->rssi; 1770 1771 hci_dev_unlock(hdev); 1772 } 1773 1774 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) 1775 { 1776 struct hci_cp_read_tx_power *sent; 1777 struct hci_rp_read_tx_power *rp = (void *) skb->data; 1778 struct hci_conn *conn; 1779 1780 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1781 1782 if (rp->status) 1783 return; 1784 1785 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 1786 if (!sent) 1787 return; 1788 1789 hci_dev_lock(hdev); 1790 1791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1792 if (!conn) 1793 goto unlock; 1794 1795 switch (sent->type) { 1796 case 0x00: 1797 conn->tx_power = rp->tx_power; 1798 break; 1799 case 0x01: 1800 conn->max_tx_power = rp->tx_power; 1801 break; 1802 } 1803 1804 unlock: 1805 hci_dev_unlock(hdev); 1806 } 1807 1808 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb) 1809 { 1810 u8 status = *((u8 *) skb->data); 1811 u8 *mode; 1812 1813 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1814 1815 if (status) 1816 return; 1817 1818 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 1819 if (mode) 1820 hdev->ssp_debug_mode = *mode; 1821 } 1822 1823 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1824 { 1825 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1826 1827 if (status) { 1828 hci_conn_check_pending(hdev); 1829 return; 1830 } 1831 1832 set_bit(HCI_INQUIRY, &hdev->flags); 1833 } 1834 1835 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1836 { 1837 struct hci_cp_create_conn *cp; 1838 struct hci_conn *conn; 1839 1840 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1841 1842 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1843 if (!cp) 1844 return; 1845 1846 hci_dev_lock(hdev); 1847 1848 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1849 1850 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1851 1852 if (status) { 1853 if (conn && conn->state == BT_CONNECT) { 1854 if (status != 0x0c || conn->attempt > 2) { 1855 conn->state = BT_CLOSED; 1856 hci_connect_cfm(conn, status); 1857 hci_conn_del(conn); 1858 } else 1859 conn->state = BT_CONNECT2; 1860 } 1861 } else { 1862 if (!conn) { 1863 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 1864 HCI_ROLE_MASTER); 1865 if (!conn) 1866 bt_dev_err(hdev, "no memory for new connection"); 1867 } 1868 } 1869 1870 hci_dev_unlock(hdev); 1871 } 1872 1873 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1874 { 1875 struct hci_cp_add_sco *cp; 1876 struct hci_conn *acl, *sco; 1877 __u16 handle; 1878 1879 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1880 1881 if (!status) 1882 return; 1883 1884 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1885 if (!cp) 1886 return; 1887 1888 handle = __le16_to_cpu(cp->handle); 1889 1890 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1891 1892 hci_dev_lock(hdev); 1893 1894 acl = hci_conn_hash_lookup_handle(hdev, handle); 1895 if (acl) { 1896 sco = acl->link; 1897 if (sco) { 1898 sco->state = BT_CLOSED; 1899 1900 hci_connect_cfm(sco, status); 1901 hci_conn_del(sco); 1902 } 1903 } 1904 1905 hci_dev_unlock(hdev); 1906 } 1907 1908 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1909 { 1910 struct hci_cp_auth_requested *cp; 1911 struct hci_conn *conn; 1912 1913 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1914 1915 if (!status) 1916 return; 1917 1918 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1919 if (!cp) 1920 return; 1921 1922 hci_dev_lock(hdev); 1923 1924 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1925 if (conn) { 1926 if (conn->state == BT_CONFIG) { 1927 hci_connect_cfm(conn, status); 1928 hci_conn_drop(conn); 1929 } 1930 } 1931 1932 hci_dev_unlock(hdev); 1933 } 1934 1935 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1936 { 1937 struct hci_cp_set_conn_encrypt *cp; 1938 struct hci_conn *conn; 1939 1940 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1941 1942 if (!status) 1943 return; 1944 1945 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1946 if (!cp) 1947 return; 1948 1949 hci_dev_lock(hdev); 1950 1951 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1952 if (conn) { 1953 if (conn->state == BT_CONFIG) { 1954 hci_connect_cfm(conn, status); 1955 hci_conn_drop(conn); 1956 } 1957 } 1958 1959 hci_dev_unlock(hdev); 1960 } 1961 1962 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1963 struct hci_conn *conn) 1964 { 1965 if (conn->state != BT_CONFIG || !conn->out) 1966 return 0; 1967 1968 if (conn->pending_sec_level == BT_SECURITY_SDP) 1969 return 0; 1970 1971 /* Only request authentication for SSP connections or non-SSP 1972 * devices with sec_level MEDIUM or HIGH or if MITM protection 1973 * is requested. 1974 */ 1975 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1976 conn->pending_sec_level != BT_SECURITY_FIPS && 1977 conn->pending_sec_level != BT_SECURITY_HIGH && 1978 conn->pending_sec_level != BT_SECURITY_MEDIUM) 1979 return 0; 1980 1981 return 1; 1982 } 1983 1984 static int hci_resolve_name(struct hci_dev *hdev, 1985 struct inquiry_entry *e) 1986 { 1987 struct hci_cp_remote_name_req cp; 1988 1989 memset(&cp, 0, sizeof(cp)); 1990 1991 bacpy(&cp.bdaddr, &e->data.bdaddr); 1992 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1993 cp.pscan_mode = e->data.pscan_mode; 1994 cp.clock_offset = e->data.clock_offset; 1995 1996 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1997 } 1998 1999 static bool hci_resolve_next_name(struct hci_dev *hdev) 2000 { 2001 struct discovery_state *discov = &hdev->discovery; 2002 struct inquiry_entry *e; 2003 2004 if (list_empty(&discov->resolve)) 2005 return false; 2006 2007 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2008 if (!e) 2009 return false; 2010 2011 if (hci_resolve_name(hdev, e) == 0) { 2012 e->name_state = NAME_PENDING; 2013 return true; 2014 } 2015 2016 return false; 2017 } 2018 2019 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 2020 bdaddr_t *bdaddr, u8 *name, u8 name_len) 2021 { 2022 struct discovery_state *discov = &hdev->discovery; 2023 struct inquiry_entry *e; 2024 2025 /* Update the mgmt connected state if necessary. Be careful with 2026 * conn objects that exist but are not (yet) connected however. 2027 * Only those in BT_CONFIG or BT_CONNECTED states can be 2028 * considered connected. 2029 */ 2030 if (conn && 2031 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 2032 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2033 mgmt_device_connected(hdev, conn, 0, name, name_len); 2034 2035 if (discov->state == DISCOVERY_STOPPED) 2036 return; 2037 2038 if (discov->state == DISCOVERY_STOPPING) 2039 goto discov_complete; 2040 2041 if (discov->state != DISCOVERY_RESOLVING) 2042 return; 2043 2044 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 2045 /* If the device was not found in a list of found devices names of which 2046 * are pending. there is no need to continue resolving a next name as it 2047 * will be done upon receiving another Remote Name Request Complete 2048 * Event */ 2049 if (!e) 2050 return; 2051 2052 list_del(&e->list); 2053 if (name) { 2054 e->name_state = NAME_KNOWN; 2055 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 2056 e->data.rssi, name, name_len); 2057 } else { 2058 e->name_state = NAME_NOT_KNOWN; 2059 } 2060 2061 if (hci_resolve_next_name(hdev)) 2062 return; 2063 2064 discov_complete: 2065 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2066 } 2067 2068 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 2069 { 2070 struct hci_cp_remote_name_req *cp; 2071 struct hci_conn *conn; 2072 2073 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2074 2075 /* If successful wait for the name req complete event before 2076 * checking for the need to do authentication */ 2077 if (!status) 2078 return; 2079 2080 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 2081 if (!cp) 2082 return; 2083 2084 hci_dev_lock(hdev); 2085 2086 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2087 2088 if (hci_dev_test_flag(hdev, HCI_MGMT)) 2089 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 2090 2091 if (!conn) 2092 goto unlock; 2093 2094 if (!hci_outgoing_auth_needed(hdev, conn)) 2095 goto unlock; 2096 2097 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2098 struct hci_cp_auth_requested auth_cp; 2099 2100 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2101 2102 auth_cp.handle = __cpu_to_le16(conn->handle); 2103 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 2104 sizeof(auth_cp), &auth_cp); 2105 } 2106 2107 unlock: 2108 hci_dev_unlock(hdev); 2109 } 2110 2111 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 2112 { 2113 struct hci_cp_read_remote_features *cp; 2114 struct hci_conn *conn; 2115 2116 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2117 2118 if (!status) 2119 return; 2120 2121 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 2122 if (!cp) 2123 return; 2124 2125 hci_dev_lock(hdev); 2126 2127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2128 if (conn) { 2129 if (conn->state == BT_CONFIG) { 2130 hci_connect_cfm(conn, status); 2131 hci_conn_drop(conn); 2132 } 2133 } 2134 2135 hci_dev_unlock(hdev); 2136 } 2137 2138 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 2139 { 2140 struct hci_cp_read_remote_ext_features *cp; 2141 struct hci_conn *conn; 2142 2143 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2144 2145 if (!status) 2146 return; 2147 2148 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 2149 if (!cp) 2150 return; 2151 2152 hci_dev_lock(hdev); 2153 2154 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2155 if (conn) { 2156 if (conn->state == BT_CONFIG) { 2157 hci_connect_cfm(conn, status); 2158 hci_conn_drop(conn); 2159 } 2160 } 2161 2162 hci_dev_unlock(hdev); 2163 } 2164 2165 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 2166 { 2167 struct hci_cp_setup_sync_conn *cp; 2168 struct hci_conn *acl, *sco; 2169 __u16 handle; 2170 2171 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2172 2173 if (!status) 2174 return; 2175 2176 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 2177 if (!cp) 2178 return; 2179 2180 handle = __le16_to_cpu(cp->handle); 2181 2182 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 2183 2184 hci_dev_lock(hdev); 2185 2186 acl = hci_conn_hash_lookup_handle(hdev, handle); 2187 if (acl) { 2188 sco = acl->link; 2189 if (sco) { 2190 sco->state = BT_CLOSED; 2191 2192 hci_connect_cfm(sco, status); 2193 hci_conn_del(sco); 2194 } 2195 } 2196 2197 hci_dev_unlock(hdev); 2198 } 2199 2200 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 2201 { 2202 struct hci_cp_sniff_mode *cp; 2203 struct hci_conn *conn; 2204 2205 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2206 2207 if (!status) 2208 return; 2209 2210 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 2211 if (!cp) 2212 return; 2213 2214 hci_dev_lock(hdev); 2215 2216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2217 if (conn) { 2218 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2219 2220 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2221 hci_sco_setup(conn, status); 2222 } 2223 2224 hci_dev_unlock(hdev); 2225 } 2226 2227 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 2228 { 2229 struct hci_cp_exit_sniff_mode *cp; 2230 struct hci_conn *conn; 2231 2232 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2233 2234 if (!status) 2235 return; 2236 2237 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 2238 if (!cp) 2239 return; 2240 2241 hci_dev_lock(hdev); 2242 2243 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2244 if (conn) { 2245 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 2246 2247 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2248 hci_sco_setup(conn, status); 2249 } 2250 2251 hci_dev_unlock(hdev); 2252 } 2253 2254 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 2255 { 2256 struct hci_cp_disconnect *cp; 2257 struct hci_conn *conn; 2258 2259 if (!status) 2260 return; 2261 2262 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 2263 if (!cp) 2264 return; 2265 2266 hci_dev_lock(hdev); 2267 2268 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2269 if (conn) { 2270 u8 type = conn->type; 2271 2272 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2273 conn->dst_type, status); 2274 2275 /* If the disconnection failed for any reason, the upper layer 2276 * does not retry to disconnect in current implementation. 2277 * Hence, we need to do some basic cleanup here and re-enable 2278 * advertising if necessary. 2279 */ 2280 hci_conn_del(conn); 2281 if (type == LE_LINK) 2282 hci_req_reenable_advertising(hdev); 2283 } 2284 2285 hci_dev_unlock(hdev); 2286 } 2287 2288 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, 2289 u8 peer_addr_type, u8 own_address_type, 2290 u8 filter_policy) 2291 { 2292 struct hci_conn *conn; 2293 2294 conn = hci_conn_hash_lookup_le(hdev, peer_addr, 2295 peer_addr_type); 2296 if (!conn) 2297 return; 2298 2299 /* When using controller based address resolution, then the new 2300 * address types 0x02 and 0x03 are used. These types need to be 2301 * converted back into either public address or random address type 2302 */ 2303 if (use_ll_privacy(hdev) && 2304 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { 2305 switch (own_address_type) { 2306 case ADDR_LE_DEV_PUBLIC_RESOLVED: 2307 own_address_type = ADDR_LE_DEV_PUBLIC; 2308 break; 2309 case ADDR_LE_DEV_RANDOM_RESOLVED: 2310 own_address_type = ADDR_LE_DEV_RANDOM; 2311 break; 2312 } 2313 } 2314 2315 /* Store the initiator and responder address information which 2316 * is needed for SMP. These values will not change during the 2317 * lifetime of the connection. 2318 */ 2319 conn->init_addr_type = own_address_type; 2320 if (own_address_type == ADDR_LE_DEV_RANDOM) 2321 bacpy(&conn->init_addr, &hdev->random_addr); 2322 else 2323 bacpy(&conn->init_addr, &hdev->bdaddr); 2324 2325 conn->resp_addr_type = peer_addr_type; 2326 bacpy(&conn->resp_addr, peer_addr); 2327 2328 /* We don't want the connection attempt to stick around 2329 * indefinitely since LE doesn't have a page timeout concept 2330 * like BR/EDR. Set a timer for any connection that doesn't use 2331 * the white list for connecting. 2332 */ 2333 if (filter_policy == HCI_LE_USE_PEER_ADDR) 2334 queue_delayed_work(conn->hdev->workqueue, 2335 &conn->le_conn_timeout, 2336 conn->conn_timeout); 2337 } 2338 2339 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 2340 { 2341 struct hci_cp_le_create_conn *cp; 2342 2343 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2344 2345 /* All connection failure handling is taken care of by the 2346 * hci_le_conn_failed function which is triggered by the HCI 2347 * request completion callbacks used for connecting. 2348 */ 2349 if (status) 2350 return; 2351 2352 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2353 if (!cp) 2354 return; 2355 2356 hci_dev_lock(hdev); 2357 2358 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2359 cp->own_address_type, cp->filter_policy); 2360 2361 hci_dev_unlock(hdev); 2362 } 2363 2364 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) 2365 { 2366 struct hci_cp_le_ext_create_conn *cp; 2367 2368 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2369 2370 /* All connection failure handling is taken care of by the 2371 * hci_le_conn_failed function which is triggered by the HCI 2372 * request completion callbacks used for connecting. 2373 */ 2374 if (status) 2375 return; 2376 2377 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); 2378 if (!cp) 2379 return; 2380 2381 hci_dev_lock(hdev); 2382 2383 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, 2384 cp->own_addr_type, cp->filter_policy); 2385 2386 hci_dev_unlock(hdev); 2387 } 2388 2389 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2390 { 2391 struct hci_cp_le_read_remote_features *cp; 2392 struct hci_conn *conn; 2393 2394 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2395 2396 if (!status) 2397 return; 2398 2399 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2400 if (!cp) 2401 return; 2402 2403 hci_dev_lock(hdev); 2404 2405 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2406 if (conn) { 2407 if (conn->state == BT_CONFIG) { 2408 hci_connect_cfm(conn, status); 2409 hci_conn_drop(conn); 2410 } 2411 } 2412 2413 hci_dev_unlock(hdev); 2414 } 2415 2416 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2417 { 2418 struct hci_cp_le_start_enc *cp; 2419 struct hci_conn *conn; 2420 2421 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2422 2423 if (!status) 2424 return; 2425 2426 hci_dev_lock(hdev); 2427 2428 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2429 if (!cp) 2430 goto unlock; 2431 2432 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2433 if (!conn) 2434 goto unlock; 2435 2436 if (conn->state != BT_CONNECTED) 2437 goto unlock; 2438 2439 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2440 hci_conn_drop(conn); 2441 2442 unlock: 2443 hci_dev_unlock(hdev); 2444 } 2445 2446 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2447 { 2448 struct hci_cp_switch_role *cp; 2449 struct hci_conn *conn; 2450 2451 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2452 2453 if (!status) 2454 return; 2455 2456 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2457 if (!cp) 2458 return; 2459 2460 hci_dev_lock(hdev); 2461 2462 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2463 if (conn) 2464 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2465 2466 hci_dev_unlock(hdev); 2467 } 2468 2469 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2470 { 2471 __u8 status = *((__u8 *) skb->data); 2472 struct discovery_state *discov = &hdev->discovery; 2473 struct inquiry_entry *e; 2474 2475 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2476 2477 hci_conn_check_pending(hdev); 2478 2479 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2480 return; 2481 2482 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2483 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2484 2485 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2486 return; 2487 2488 hci_dev_lock(hdev); 2489 2490 if (discov->state != DISCOVERY_FINDING) 2491 goto unlock; 2492 2493 if (list_empty(&discov->resolve)) { 2494 /* When BR/EDR inquiry is active and no LE scanning is in 2495 * progress, then change discovery state to indicate completion. 2496 * 2497 * When running LE scanning and BR/EDR inquiry simultaneously 2498 * and the LE scan already finished, then change the discovery 2499 * state to indicate completion. 2500 */ 2501 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2502 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2503 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2504 goto unlock; 2505 } 2506 2507 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2508 if (e && hci_resolve_name(hdev, e) == 0) { 2509 e->name_state = NAME_PENDING; 2510 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 2511 } else { 2512 /* When BR/EDR inquiry is active and no LE scanning is in 2513 * progress, then change discovery state to indicate completion. 2514 * 2515 * When running LE scanning and BR/EDR inquiry simultaneously 2516 * and the LE scan already finished, then change the discovery 2517 * state to indicate completion. 2518 */ 2519 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2520 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2521 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2522 } 2523 2524 unlock: 2525 hci_dev_unlock(hdev); 2526 } 2527 2528 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2529 { 2530 struct inquiry_data data; 2531 struct inquiry_info *info = (void *) (skb->data + 1); 2532 int num_rsp = *((__u8 *) skb->data); 2533 2534 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2535 2536 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) 2537 return; 2538 2539 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 2540 return; 2541 2542 hci_dev_lock(hdev); 2543 2544 for (; num_rsp; num_rsp--, info++) { 2545 u32 flags; 2546 2547 bacpy(&data.bdaddr, &info->bdaddr); 2548 data.pscan_rep_mode = info->pscan_rep_mode; 2549 data.pscan_period_mode = info->pscan_period_mode; 2550 data.pscan_mode = info->pscan_mode; 2551 memcpy(data.dev_class, info->dev_class, 3); 2552 data.clock_offset = info->clock_offset; 2553 data.rssi = HCI_RSSI_INVALID; 2554 data.ssp_mode = 0x00; 2555 2556 flags = hci_inquiry_cache_update(hdev, &data, false); 2557 2558 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2559 info->dev_class, HCI_RSSI_INVALID, 2560 flags, NULL, 0, NULL, 0); 2561 } 2562 2563 hci_dev_unlock(hdev); 2564 } 2565 2566 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2567 { 2568 struct hci_ev_conn_complete *ev = (void *) skb->data; 2569 struct inquiry_entry *ie; 2570 struct hci_conn *conn; 2571 2572 BT_DBG("%s", hdev->name); 2573 2574 hci_dev_lock(hdev); 2575 2576 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2577 if (!conn) { 2578 /* Connection may not exist if auto-connected. Check the inquiry 2579 * cache to see if we've already discovered this bdaddr before. 2580 * If found and link is an ACL type, create a connection class 2581 * automatically. 2582 */ 2583 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2584 if (ie && ev->link_type == ACL_LINK) { 2585 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2586 HCI_ROLE_SLAVE); 2587 if (!conn) { 2588 bt_dev_err(hdev, "no memory for new conn"); 2589 goto unlock; 2590 } 2591 } else { 2592 if (ev->link_type != SCO_LINK) 2593 goto unlock; 2594 2595 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, 2596 &ev->bdaddr); 2597 if (!conn) 2598 goto unlock; 2599 2600 conn->type = SCO_LINK; 2601 } 2602 } 2603 2604 if (!ev->status) { 2605 conn->handle = __le16_to_cpu(ev->handle); 2606 2607 if (conn->type == ACL_LINK) { 2608 conn->state = BT_CONFIG; 2609 hci_conn_hold(conn); 2610 2611 if (!conn->out && !hci_conn_ssp_enabled(conn) && 2612 !hci_find_link_key(hdev, &ev->bdaddr)) 2613 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2614 else 2615 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2616 } else 2617 conn->state = BT_CONNECTED; 2618 2619 hci_debugfs_create_conn(conn); 2620 hci_conn_add_sysfs(conn); 2621 2622 if (test_bit(HCI_AUTH, &hdev->flags)) 2623 set_bit(HCI_CONN_AUTH, &conn->flags); 2624 2625 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2626 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2627 2628 /* Get remote features */ 2629 if (conn->type == ACL_LINK) { 2630 struct hci_cp_read_remote_features cp; 2631 cp.handle = ev->handle; 2632 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 2633 sizeof(cp), &cp); 2634 2635 hci_req_update_scan(hdev); 2636 } 2637 2638 /* Set packet type for incoming connection */ 2639 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 2640 struct hci_cp_change_conn_ptype cp; 2641 cp.handle = ev->handle; 2642 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2643 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 2644 &cp); 2645 } 2646 } else { 2647 conn->state = BT_CLOSED; 2648 if (conn->type == ACL_LINK) 2649 mgmt_connect_failed(hdev, &conn->dst, conn->type, 2650 conn->dst_type, ev->status); 2651 } 2652 2653 if (conn->type == ACL_LINK) 2654 hci_sco_setup(conn, ev->status); 2655 2656 if (ev->status) { 2657 hci_connect_cfm(conn, ev->status); 2658 hci_conn_del(conn); 2659 } else if (ev->link_type == SCO_LINK) { 2660 switch (conn->setting & SCO_AIRMODE_MASK) { 2661 case SCO_AIRMODE_CVSD: 2662 if (hdev->notify) 2663 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 2664 break; 2665 } 2666 2667 hci_connect_cfm(conn, ev->status); 2668 } 2669 2670 unlock: 2671 hci_dev_unlock(hdev); 2672 2673 hci_conn_check_pending(hdev); 2674 } 2675 2676 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 2677 { 2678 struct hci_cp_reject_conn_req cp; 2679 2680 bacpy(&cp.bdaddr, bdaddr); 2681 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 2682 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 2683 } 2684 2685 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2686 { 2687 struct hci_ev_conn_request *ev = (void *) skb->data; 2688 int mask = hdev->link_mode; 2689 struct inquiry_entry *ie; 2690 struct hci_conn *conn; 2691 __u8 flags = 0; 2692 2693 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2694 ev->link_type); 2695 2696 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2697 &flags); 2698 2699 if (!(mask & HCI_LM_ACCEPT)) { 2700 hci_reject_conn(hdev, &ev->bdaddr); 2701 return; 2702 } 2703 2704 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr, 2705 BDADDR_BREDR)) { 2706 hci_reject_conn(hdev, &ev->bdaddr); 2707 return; 2708 } 2709 2710 /* Require HCI_CONNECTABLE or a whitelist entry to accept the 2711 * connection. These features are only touched through mgmt so 2712 * only do the checks if HCI_MGMT is set. 2713 */ 2714 if (hci_dev_test_flag(hdev, HCI_MGMT) && 2715 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 2716 !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr, 2717 BDADDR_BREDR)) { 2718 hci_reject_conn(hdev, &ev->bdaddr); 2719 return; 2720 } 2721 2722 /* Connection accepted */ 2723 2724 hci_dev_lock(hdev); 2725 2726 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2727 if (ie) 2728 memcpy(ie->data.dev_class, ev->dev_class, 3); 2729 2730 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2731 &ev->bdaddr); 2732 if (!conn) { 2733 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2734 HCI_ROLE_SLAVE); 2735 if (!conn) { 2736 bt_dev_err(hdev, "no memory for new connection"); 2737 hci_dev_unlock(hdev); 2738 return; 2739 } 2740 } 2741 2742 memcpy(conn->dev_class, ev->dev_class, 3); 2743 2744 hci_dev_unlock(hdev); 2745 2746 if (ev->link_type == ACL_LINK || 2747 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2748 struct hci_cp_accept_conn_req cp; 2749 conn->state = BT_CONNECT; 2750 2751 bacpy(&cp.bdaddr, &ev->bdaddr); 2752 2753 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2754 cp.role = 0x00; /* Become master */ 2755 else 2756 cp.role = 0x01; /* Remain slave */ 2757 2758 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 2759 } else if (!(flags & HCI_PROTO_DEFER)) { 2760 struct hci_cp_accept_sync_conn_req cp; 2761 conn->state = BT_CONNECT; 2762 2763 bacpy(&cp.bdaddr, &ev->bdaddr); 2764 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2765 2766 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2767 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2768 cp.max_latency = cpu_to_le16(0xffff); 2769 cp.content_format = cpu_to_le16(hdev->voice_setting); 2770 cp.retrans_effort = 0xff; 2771 2772 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 2773 &cp); 2774 } else { 2775 conn->state = BT_CONNECT2; 2776 hci_connect_cfm(conn, 0); 2777 } 2778 } 2779 2780 static u8 hci_to_mgmt_reason(u8 err) 2781 { 2782 switch (err) { 2783 case HCI_ERROR_CONNECTION_TIMEOUT: 2784 return MGMT_DEV_DISCONN_TIMEOUT; 2785 case HCI_ERROR_REMOTE_USER_TERM: 2786 case HCI_ERROR_REMOTE_LOW_RESOURCES: 2787 case HCI_ERROR_REMOTE_POWER_OFF: 2788 return MGMT_DEV_DISCONN_REMOTE; 2789 case HCI_ERROR_LOCAL_HOST_TERM: 2790 return MGMT_DEV_DISCONN_LOCAL_HOST; 2791 default: 2792 return MGMT_DEV_DISCONN_UNKNOWN; 2793 } 2794 } 2795 2796 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2797 { 2798 struct hci_ev_disconn_complete *ev = (void *) skb->data; 2799 u8 reason; 2800 struct hci_conn_params *params; 2801 struct hci_conn *conn; 2802 bool mgmt_connected; 2803 u8 type; 2804 2805 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2806 2807 hci_dev_lock(hdev); 2808 2809 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2810 if (!conn) 2811 goto unlock; 2812 2813 if (ev->status) { 2814 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2815 conn->dst_type, ev->status); 2816 goto unlock; 2817 } 2818 2819 conn->state = BT_CLOSED; 2820 2821 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2822 2823 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) 2824 reason = MGMT_DEV_DISCONN_AUTH_FAILURE; 2825 else 2826 reason = hci_to_mgmt_reason(ev->reason); 2827 2828 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2829 reason, mgmt_connected); 2830 2831 if (conn->type == ACL_LINK) { 2832 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2833 hci_remove_link_key(hdev, &conn->dst); 2834 2835 hci_req_update_scan(hdev); 2836 } 2837 2838 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2839 if (params) { 2840 switch (params->auto_connect) { 2841 case HCI_AUTO_CONN_LINK_LOSS: 2842 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2843 break; 2844 fallthrough; 2845 2846 case HCI_AUTO_CONN_DIRECT: 2847 case HCI_AUTO_CONN_ALWAYS: 2848 list_del_init(¶ms->action); 2849 list_add(¶ms->action, &hdev->pend_le_conns); 2850 hci_update_background_scan(hdev); 2851 break; 2852 2853 default: 2854 break; 2855 } 2856 } 2857 2858 type = conn->type; 2859 2860 hci_disconn_cfm(conn, ev->reason); 2861 hci_conn_del(conn); 2862 2863 /* The suspend notifier is waiting for all devices to disconnect so 2864 * clear the bit from pending tasks and inform the wait queue. 2865 */ 2866 if (list_empty(&hdev->conn_hash.list) && 2867 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) { 2868 wake_up(&hdev->suspend_wait_q); 2869 } 2870 2871 /* Re-enable advertising if necessary, since it might 2872 * have been disabled by the connection. From the 2873 * HCI_LE_Set_Advertise_Enable command description in 2874 * the core specification (v4.0): 2875 * "The Controller shall continue advertising until the Host 2876 * issues an LE_Set_Advertise_Enable command with 2877 * Advertising_Enable set to 0x00 (Advertising is disabled) 2878 * or until a connection is created or until the Advertising 2879 * is timed out due to Directed Advertising." 2880 */ 2881 if (type == LE_LINK) 2882 hci_req_reenable_advertising(hdev); 2883 2884 unlock: 2885 hci_dev_unlock(hdev); 2886 } 2887 2888 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2889 { 2890 struct hci_ev_auth_complete *ev = (void *) skb->data; 2891 struct hci_conn *conn; 2892 2893 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2894 2895 hci_dev_lock(hdev); 2896 2897 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2898 if (!conn) 2899 goto unlock; 2900 2901 if (!ev->status) { 2902 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 2903 2904 if (!hci_conn_ssp_enabled(conn) && 2905 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 2906 bt_dev_info(hdev, "re-auth of legacy device is not possible."); 2907 } else { 2908 set_bit(HCI_CONN_AUTH, &conn->flags); 2909 conn->sec_level = conn->pending_sec_level; 2910 } 2911 } else { 2912 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 2913 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 2914 2915 mgmt_auth_failed(conn, ev->status); 2916 } 2917 2918 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2919 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 2920 2921 if (conn->state == BT_CONFIG) { 2922 if (!ev->status && hci_conn_ssp_enabled(conn)) { 2923 struct hci_cp_set_conn_encrypt cp; 2924 cp.handle = ev->handle; 2925 cp.encrypt = 0x01; 2926 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2927 &cp); 2928 } else { 2929 conn->state = BT_CONNECTED; 2930 hci_connect_cfm(conn, ev->status); 2931 hci_conn_drop(conn); 2932 } 2933 } else { 2934 hci_auth_cfm(conn, ev->status); 2935 2936 hci_conn_hold(conn); 2937 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2938 hci_conn_drop(conn); 2939 } 2940 2941 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 2942 if (!ev->status) { 2943 struct hci_cp_set_conn_encrypt cp; 2944 cp.handle = ev->handle; 2945 cp.encrypt = 0x01; 2946 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2947 &cp); 2948 } else { 2949 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2950 hci_encrypt_cfm(conn, ev->status); 2951 } 2952 } 2953 2954 unlock: 2955 hci_dev_unlock(hdev); 2956 } 2957 2958 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 2959 { 2960 struct hci_ev_remote_name *ev = (void *) skb->data; 2961 struct hci_conn *conn; 2962 2963 BT_DBG("%s", hdev->name); 2964 2965 hci_conn_check_pending(hdev); 2966 2967 hci_dev_lock(hdev); 2968 2969 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2970 2971 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2972 goto check_auth; 2973 2974 if (ev->status == 0) 2975 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 2976 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 2977 else 2978 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 2979 2980 check_auth: 2981 if (!conn) 2982 goto unlock; 2983 2984 if (!hci_outgoing_auth_needed(hdev, conn)) 2985 goto unlock; 2986 2987 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2988 struct hci_cp_auth_requested cp; 2989 2990 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2991 2992 cp.handle = __cpu_to_le16(conn->handle); 2993 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2994 } 2995 2996 unlock: 2997 hci_dev_unlock(hdev); 2998 } 2999 3000 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 3001 u16 opcode, struct sk_buff *skb) 3002 { 3003 const struct hci_rp_read_enc_key_size *rp; 3004 struct hci_conn *conn; 3005 u16 handle; 3006 3007 BT_DBG("%s status 0x%02x", hdev->name, status); 3008 3009 if (!skb || skb->len < sizeof(*rp)) { 3010 bt_dev_err(hdev, "invalid read key size response"); 3011 return; 3012 } 3013 3014 rp = (void *)skb->data; 3015 handle = le16_to_cpu(rp->handle); 3016 3017 hci_dev_lock(hdev); 3018 3019 conn = hci_conn_hash_lookup_handle(hdev, handle); 3020 if (!conn) 3021 goto unlock; 3022 3023 /* While unexpected, the read_enc_key_size command may fail. The most 3024 * secure approach is to then assume the key size is 0 to force a 3025 * disconnection. 3026 */ 3027 if (rp->status) { 3028 bt_dev_err(hdev, "failed to read key size for handle %u", 3029 handle); 3030 conn->enc_key_size = 0; 3031 } else { 3032 conn->enc_key_size = rp->key_size; 3033 } 3034 3035 hci_encrypt_cfm(conn, 0); 3036 3037 unlock: 3038 hci_dev_unlock(hdev); 3039 } 3040 3041 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3042 { 3043 struct hci_ev_encrypt_change *ev = (void *) skb->data; 3044 struct hci_conn *conn; 3045 3046 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3047 3048 hci_dev_lock(hdev); 3049 3050 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3051 if (!conn) 3052 goto unlock; 3053 3054 if (!ev->status) { 3055 if (ev->encrypt) { 3056 /* Encryption implies authentication */ 3057 set_bit(HCI_CONN_AUTH, &conn->flags); 3058 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 3059 conn->sec_level = conn->pending_sec_level; 3060 3061 /* P-256 authentication key implies FIPS */ 3062 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 3063 set_bit(HCI_CONN_FIPS, &conn->flags); 3064 3065 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 3066 conn->type == LE_LINK) 3067 set_bit(HCI_CONN_AES_CCM, &conn->flags); 3068 } else { 3069 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 3070 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 3071 } 3072 } 3073 3074 /* We should disregard the current RPA and generate a new one 3075 * whenever the encryption procedure fails. 3076 */ 3077 if (ev->status && conn->type == LE_LINK) { 3078 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 3079 hci_adv_instances_set_rpa_expired(hdev, true); 3080 } 3081 3082 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3083 3084 /* Check link security requirements are met */ 3085 if (!hci_conn_check_link_mode(conn)) 3086 ev->status = HCI_ERROR_AUTH_FAILURE; 3087 3088 if (ev->status && conn->state == BT_CONNECTED) { 3089 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) 3090 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); 3091 3092 /* Notify upper layers so they can cleanup before 3093 * disconnecting. 3094 */ 3095 hci_encrypt_cfm(conn, ev->status); 3096 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3097 hci_conn_drop(conn); 3098 goto unlock; 3099 } 3100 3101 /* Try reading the encryption key size for encrypted ACL links */ 3102 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 3103 struct hci_cp_read_enc_key_size cp; 3104 struct hci_request req; 3105 3106 /* Only send HCI_Read_Encryption_Key_Size if the 3107 * controller really supports it. If it doesn't, assume 3108 * the default size (16). 3109 */ 3110 if (!(hdev->commands[20] & 0x10)) { 3111 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3112 goto notify; 3113 } 3114 3115 hci_req_init(&req, hdev); 3116 3117 cp.handle = cpu_to_le16(conn->handle); 3118 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 3119 3120 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 3121 bt_dev_err(hdev, "sending read key size failed"); 3122 conn->enc_key_size = HCI_LINK_KEY_SIZE; 3123 goto notify; 3124 } 3125 3126 goto unlock; 3127 } 3128 3129 /* Set the default Authenticated Payload Timeout after 3130 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B 3131 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be 3132 * sent when the link is active and Encryption is enabled, the conn 3133 * type can be either LE or ACL and controller must support LMP Ping. 3134 * Ensure for AES-CCM encryption as well. 3135 */ 3136 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && 3137 test_bit(HCI_CONN_AES_CCM, &conn->flags) && 3138 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || 3139 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { 3140 struct hci_cp_write_auth_payload_to cp; 3141 3142 cp.handle = cpu_to_le16(conn->handle); 3143 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); 3144 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, 3145 sizeof(cp), &cp); 3146 } 3147 3148 notify: 3149 hci_encrypt_cfm(conn, ev->status); 3150 3151 unlock: 3152 hci_dev_unlock(hdev); 3153 } 3154 3155 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 3156 struct sk_buff *skb) 3157 { 3158 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 3159 struct hci_conn *conn; 3160 3161 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3162 3163 hci_dev_lock(hdev); 3164 3165 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3166 if (conn) { 3167 if (!ev->status) 3168 set_bit(HCI_CONN_SECURE, &conn->flags); 3169 3170 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 3171 3172 hci_key_change_cfm(conn, ev->status); 3173 } 3174 3175 hci_dev_unlock(hdev); 3176 } 3177 3178 static void hci_remote_features_evt(struct hci_dev *hdev, 3179 struct sk_buff *skb) 3180 { 3181 struct hci_ev_remote_features *ev = (void *) skb->data; 3182 struct hci_conn *conn; 3183 3184 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3185 3186 hci_dev_lock(hdev); 3187 3188 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3189 if (!conn) 3190 goto unlock; 3191 3192 if (!ev->status) 3193 memcpy(conn->features[0], ev->features, 8); 3194 3195 if (conn->state != BT_CONFIG) 3196 goto unlock; 3197 3198 if (!ev->status && lmp_ext_feat_capable(hdev) && 3199 lmp_ext_feat_capable(conn)) { 3200 struct hci_cp_read_remote_ext_features cp; 3201 cp.handle = ev->handle; 3202 cp.page = 0x01; 3203 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 3204 sizeof(cp), &cp); 3205 goto unlock; 3206 } 3207 3208 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3209 struct hci_cp_remote_name_req cp; 3210 memset(&cp, 0, sizeof(cp)); 3211 bacpy(&cp.bdaddr, &conn->dst); 3212 cp.pscan_rep_mode = 0x02; 3213 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3214 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3215 mgmt_device_connected(hdev, conn, 0, NULL, 0); 3216 3217 if (!hci_outgoing_auth_needed(hdev, conn)) { 3218 conn->state = BT_CONNECTED; 3219 hci_connect_cfm(conn, ev->status); 3220 hci_conn_drop(conn); 3221 } 3222 3223 unlock: 3224 hci_dev_unlock(hdev); 3225 } 3226 3227 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, 3228 u16 *opcode, u8 *status, 3229 hci_req_complete_t *req_complete, 3230 hci_req_complete_skb_t *req_complete_skb) 3231 { 3232 struct hci_ev_cmd_complete *ev = (void *) skb->data; 3233 3234 *opcode = __le16_to_cpu(ev->opcode); 3235 *status = skb->data[sizeof(*ev)]; 3236 3237 skb_pull(skb, sizeof(*ev)); 3238 3239 switch (*opcode) { 3240 case HCI_OP_INQUIRY_CANCEL: 3241 hci_cc_inquiry_cancel(hdev, skb, status); 3242 break; 3243 3244 case HCI_OP_PERIODIC_INQ: 3245 hci_cc_periodic_inq(hdev, skb); 3246 break; 3247 3248 case HCI_OP_EXIT_PERIODIC_INQ: 3249 hci_cc_exit_periodic_inq(hdev, skb); 3250 break; 3251 3252 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 3253 hci_cc_remote_name_req_cancel(hdev, skb); 3254 break; 3255 3256 case HCI_OP_ROLE_DISCOVERY: 3257 hci_cc_role_discovery(hdev, skb); 3258 break; 3259 3260 case HCI_OP_READ_LINK_POLICY: 3261 hci_cc_read_link_policy(hdev, skb); 3262 break; 3263 3264 case HCI_OP_WRITE_LINK_POLICY: 3265 hci_cc_write_link_policy(hdev, skb); 3266 break; 3267 3268 case HCI_OP_READ_DEF_LINK_POLICY: 3269 hci_cc_read_def_link_policy(hdev, skb); 3270 break; 3271 3272 case HCI_OP_WRITE_DEF_LINK_POLICY: 3273 hci_cc_write_def_link_policy(hdev, skb); 3274 break; 3275 3276 case HCI_OP_RESET: 3277 hci_cc_reset(hdev, skb); 3278 break; 3279 3280 case HCI_OP_READ_STORED_LINK_KEY: 3281 hci_cc_read_stored_link_key(hdev, skb); 3282 break; 3283 3284 case HCI_OP_DELETE_STORED_LINK_KEY: 3285 hci_cc_delete_stored_link_key(hdev, skb); 3286 break; 3287 3288 case HCI_OP_WRITE_LOCAL_NAME: 3289 hci_cc_write_local_name(hdev, skb); 3290 break; 3291 3292 case HCI_OP_READ_LOCAL_NAME: 3293 hci_cc_read_local_name(hdev, skb); 3294 break; 3295 3296 case HCI_OP_WRITE_AUTH_ENABLE: 3297 hci_cc_write_auth_enable(hdev, skb); 3298 break; 3299 3300 case HCI_OP_WRITE_ENCRYPT_MODE: 3301 hci_cc_write_encrypt_mode(hdev, skb); 3302 break; 3303 3304 case HCI_OP_WRITE_SCAN_ENABLE: 3305 hci_cc_write_scan_enable(hdev, skb); 3306 break; 3307 3308 case HCI_OP_READ_CLASS_OF_DEV: 3309 hci_cc_read_class_of_dev(hdev, skb); 3310 break; 3311 3312 case HCI_OP_WRITE_CLASS_OF_DEV: 3313 hci_cc_write_class_of_dev(hdev, skb); 3314 break; 3315 3316 case HCI_OP_READ_VOICE_SETTING: 3317 hci_cc_read_voice_setting(hdev, skb); 3318 break; 3319 3320 case HCI_OP_WRITE_VOICE_SETTING: 3321 hci_cc_write_voice_setting(hdev, skb); 3322 break; 3323 3324 case HCI_OP_READ_NUM_SUPPORTED_IAC: 3325 hci_cc_read_num_supported_iac(hdev, skb); 3326 break; 3327 3328 case HCI_OP_WRITE_SSP_MODE: 3329 hci_cc_write_ssp_mode(hdev, skb); 3330 break; 3331 3332 case HCI_OP_WRITE_SC_SUPPORT: 3333 hci_cc_write_sc_support(hdev, skb); 3334 break; 3335 3336 case HCI_OP_READ_AUTH_PAYLOAD_TO: 3337 hci_cc_read_auth_payload_timeout(hdev, skb); 3338 break; 3339 3340 case HCI_OP_WRITE_AUTH_PAYLOAD_TO: 3341 hci_cc_write_auth_payload_timeout(hdev, skb); 3342 break; 3343 3344 case HCI_OP_READ_LOCAL_VERSION: 3345 hci_cc_read_local_version(hdev, skb); 3346 break; 3347 3348 case HCI_OP_READ_LOCAL_COMMANDS: 3349 hci_cc_read_local_commands(hdev, skb); 3350 break; 3351 3352 case HCI_OP_READ_LOCAL_FEATURES: 3353 hci_cc_read_local_features(hdev, skb); 3354 break; 3355 3356 case HCI_OP_READ_LOCAL_EXT_FEATURES: 3357 hci_cc_read_local_ext_features(hdev, skb); 3358 break; 3359 3360 case HCI_OP_READ_BUFFER_SIZE: 3361 hci_cc_read_buffer_size(hdev, skb); 3362 break; 3363 3364 case HCI_OP_READ_BD_ADDR: 3365 hci_cc_read_bd_addr(hdev, skb); 3366 break; 3367 3368 case HCI_OP_READ_LOCAL_PAIRING_OPTS: 3369 hci_cc_read_local_pairing_opts(hdev, skb); 3370 break; 3371 3372 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 3373 hci_cc_read_page_scan_activity(hdev, skb); 3374 break; 3375 3376 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 3377 hci_cc_write_page_scan_activity(hdev, skb); 3378 break; 3379 3380 case HCI_OP_READ_PAGE_SCAN_TYPE: 3381 hci_cc_read_page_scan_type(hdev, skb); 3382 break; 3383 3384 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 3385 hci_cc_write_page_scan_type(hdev, skb); 3386 break; 3387 3388 case HCI_OP_READ_DATA_BLOCK_SIZE: 3389 hci_cc_read_data_block_size(hdev, skb); 3390 break; 3391 3392 case HCI_OP_READ_FLOW_CONTROL_MODE: 3393 hci_cc_read_flow_control_mode(hdev, skb); 3394 break; 3395 3396 case HCI_OP_READ_LOCAL_AMP_INFO: 3397 hci_cc_read_local_amp_info(hdev, skb); 3398 break; 3399 3400 case HCI_OP_READ_CLOCK: 3401 hci_cc_read_clock(hdev, skb); 3402 break; 3403 3404 case HCI_OP_READ_INQ_RSP_TX_POWER: 3405 hci_cc_read_inq_rsp_tx_power(hdev, skb); 3406 break; 3407 3408 case HCI_OP_READ_DEF_ERR_DATA_REPORTING: 3409 hci_cc_read_def_err_data_reporting(hdev, skb); 3410 break; 3411 3412 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING: 3413 hci_cc_write_def_err_data_reporting(hdev, skb); 3414 break; 3415 3416 case HCI_OP_PIN_CODE_REPLY: 3417 hci_cc_pin_code_reply(hdev, skb); 3418 break; 3419 3420 case HCI_OP_PIN_CODE_NEG_REPLY: 3421 hci_cc_pin_code_neg_reply(hdev, skb); 3422 break; 3423 3424 case HCI_OP_READ_LOCAL_OOB_DATA: 3425 hci_cc_read_local_oob_data(hdev, skb); 3426 break; 3427 3428 case HCI_OP_READ_LOCAL_OOB_EXT_DATA: 3429 hci_cc_read_local_oob_ext_data(hdev, skb); 3430 break; 3431 3432 case HCI_OP_LE_READ_BUFFER_SIZE: 3433 hci_cc_le_read_buffer_size(hdev, skb); 3434 break; 3435 3436 case HCI_OP_LE_READ_LOCAL_FEATURES: 3437 hci_cc_le_read_local_features(hdev, skb); 3438 break; 3439 3440 case HCI_OP_LE_READ_ADV_TX_POWER: 3441 hci_cc_le_read_adv_tx_power(hdev, skb); 3442 break; 3443 3444 case HCI_OP_USER_CONFIRM_REPLY: 3445 hci_cc_user_confirm_reply(hdev, skb); 3446 break; 3447 3448 case HCI_OP_USER_CONFIRM_NEG_REPLY: 3449 hci_cc_user_confirm_neg_reply(hdev, skb); 3450 break; 3451 3452 case HCI_OP_USER_PASSKEY_REPLY: 3453 hci_cc_user_passkey_reply(hdev, skb); 3454 break; 3455 3456 case HCI_OP_USER_PASSKEY_NEG_REPLY: 3457 hci_cc_user_passkey_neg_reply(hdev, skb); 3458 break; 3459 3460 case HCI_OP_LE_SET_RANDOM_ADDR: 3461 hci_cc_le_set_random_addr(hdev, skb); 3462 break; 3463 3464 case HCI_OP_LE_SET_ADV_ENABLE: 3465 hci_cc_le_set_adv_enable(hdev, skb); 3466 break; 3467 3468 case HCI_OP_LE_SET_SCAN_PARAM: 3469 hci_cc_le_set_scan_param(hdev, skb); 3470 break; 3471 3472 case HCI_OP_LE_SET_SCAN_ENABLE: 3473 hci_cc_le_set_scan_enable(hdev, skb); 3474 break; 3475 3476 case HCI_OP_LE_READ_WHITE_LIST_SIZE: 3477 hci_cc_le_read_white_list_size(hdev, skb); 3478 break; 3479 3480 case HCI_OP_LE_CLEAR_WHITE_LIST: 3481 hci_cc_le_clear_white_list(hdev, skb); 3482 break; 3483 3484 case HCI_OP_LE_ADD_TO_WHITE_LIST: 3485 hci_cc_le_add_to_white_list(hdev, skb); 3486 break; 3487 3488 case HCI_OP_LE_DEL_FROM_WHITE_LIST: 3489 hci_cc_le_del_from_white_list(hdev, skb); 3490 break; 3491 3492 case HCI_OP_LE_READ_SUPPORTED_STATES: 3493 hci_cc_le_read_supported_states(hdev, skb); 3494 break; 3495 3496 case HCI_OP_LE_READ_DEF_DATA_LEN: 3497 hci_cc_le_read_def_data_len(hdev, skb); 3498 break; 3499 3500 case HCI_OP_LE_WRITE_DEF_DATA_LEN: 3501 hci_cc_le_write_def_data_len(hdev, skb); 3502 break; 3503 3504 case HCI_OP_LE_ADD_TO_RESOLV_LIST: 3505 hci_cc_le_add_to_resolv_list(hdev, skb); 3506 break; 3507 3508 case HCI_OP_LE_DEL_FROM_RESOLV_LIST: 3509 hci_cc_le_del_from_resolv_list(hdev, skb); 3510 break; 3511 3512 case HCI_OP_LE_CLEAR_RESOLV_LIST: 3513 hci_cc_le_clear_resolv_list(hdev, skb); 3514 break; 3515 3516 case HCI_OP_LE_READ_RESOLV_LIST_SIZE: 3517 hci_cc_le_read_resolv_list_size(hdev, skb); 3518 break; 3519 3520 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE: 3521 hci_cc_le_set_addr_resolution_enable(hdev, skb); 3522 break; 3523 3524 case HCI_OP_LE_READ_MAX_DATA_LEN: 3525 hci_cc_le_read_max_data_len(hdev, skb); 3526 break; 3527 3528 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 3529 hci_cc_write_le_host_supported(hdev, skb); 3530 break; 3531 3532 case HCI_OP_LE_SET_ADV_PARAM: 3533 hci_cc_set_adv_param(hdev, skb); 3534 break; 3535 3536 case HCI_OP_READ_RSSI: 3537 hci_cc_read_rssi(hdev, skb); 3538 break; 3539 3540 case HCI_OP_READ_TX_POWER: 3541 hci_cc_read_tx_power(hdev, skb); 3542 break; 3543 3544 case HCI_OP_WRITE_SSP_DEBUG_MODE: 3545 hci_cc_write_ssp_debug_mode(hdev, skb); 3546 break; 3547 3548 case HCI_OP_LE_SET_EXT_SCAN_PARAMS: 3549 hci_cc_le_set_ext_scan_param(hdev, skb); 3550 break; 3551 3552 case HCI_OP_LE_SET_EXT_SCAN_ENABLE: 3553 hci_cc_le_set_ext_scan_enable(hdev, skb); 3554 break; 3555 3556 case HCI_OP_LE_SET_DEFAULT_PHY: 3557 hci_cc_le_set_default_phy(hdev, skb); 3558 break; 3559 3560 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS: 3561 hci_cc_le_read_num_adv_sets(hdev, skb); 3562 break; 3563 3564 case HCI_OP_LE_SET_EXT_ADV_PARAMS: 3565 hci_cc_set_ext_adv_param(hdev, skb); 3566 break; 3567 3568 case HCI_OP_LE_SET_EXT_ADV_ENABLE: 3569 hci_cc_le_set_ext_adv_enable(hdev, skb); 3570 break; 3571 3572 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR: 3573 hci_cc_le_set_adv_set_random_addr(hdev, skb); 3574 break; 3575 3576 default: 3577 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3578 break; 3579 } 3580 3581 if (*opcode != HCI_OP_NOP) 3582 cancel_delayed_work(&hdev->cmd_timer); 3583 3584 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) 3585 atomic_set(&hdev->cmd_cnt, 1); 3586 3587 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3588 req_complete_skb); 3589 3590 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3591 bt_dev_err(hdev, 3592 "unexpected event for opcode 0x%4.4x", *opcode); 3593 return; 3594 } 3595 3596 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3597 queue_work(hdev->workqueue, &hdev->cmd_work); 3598 } 3599 3600 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, 3601 u16 *opcode, u8 *status, 3602 hci_req_complete_t *req_complete, 3603 hci_req_complete_skb_t *req_complete_skb) 3604 { 3605 struct hci_ev_cmd_status *ev = (void *) skb->data; 3606 3607 skb_pull(skb, sizeof(*ev)); 3608 3609 *opcode = __le16_to_cpu(ev->opcode); 3610 *status = ev->status; 3611 3612 switch (*opcode) { 3613 case HCI_OP_INQUIRY: 3614 hci_cs_inquiry(hdev, ev->status); 3615 break; 3616 3617 case HCI_OP_CREATE_CONN: 3618 hci_cs_create_conn(hdev, ev->status); 3619 break; 3620 3621 case HCI_OP_DISCONNECT: 3622 hci_cs_disconnect(hdev, ev->status); 3623 break; 3624 3625 case HCI_OP_ADD_SCO: 3626 hci_cs_add_sco(hdev, ev->status); 3627 break; 3628 3629 case HCI_OP_AUTH_REQUESTED: 3630 hci_cs_auth_requested(hdev, ev->status); 3631 break; 3632 3633 case HCI_OP_SET_CONN_ENCRYPT: 3634 hci_cs_set_conn_encrypt(hdev, ev->status); 3635 break; 3636 3637 case HCI_OP_REMOTE_NAME_REQ: 3638 hci_cs_remote_name_req(hdev, ev->status); 3639 break; 3640 3641 case HCI_OP_READ_REMOTE_FEATURES: 3642 hci_cs_read_remote_features(hdev, ev->status); 3643 break; 3644 3645 case HCI_OP_READ_REMOTE_EXT_FEATURES: 3646 hci_cs_read_remote_ext_features(hdev, ev->status); 3647 break; 3648 3649 case HCI_OP_SETUP_SYNC_CONN: 3650 hci_cs_setup_sync_conn(hdev, ev->status); 3651 break; 3652 3653 case HCI_OP_SNIFF_MODE: 3654 hci_cs_sniff_mode(hdev, ev->status); 3655 break; 3656 3657 case HCI_OP_EXIT_SNIFF_MODE: 3658 hci_cs_exit_sniff_mode(hdev, ev->status); 3659 break; 3660 3661 case HCI_OP_SWITCH_ROLE: 3662 hci_cs_switch_role(hdev, ev->status); 3663 break; 3664 3665 case HCI_OP_LE_CREATE_CONN: 3666 hci_cs_le_create_conn(hdev, ev->status); 3667 break; 3668 3669 case HCI_OP_LE_READ_REMOTE_FEATURES: 3670 hci_cs_le_read_remote_features(hdev, ev->status); 3671 break; 3672 3673 case HCI_OP_LE_START_ENC: 3674 hci_cs_le_start_enc(hdev, ev->status); 3675 break; 3676 3677 case HCI_OP_LE_EXT_CREATE_CONN: 3678 hci_cs_le_ext_create_conn(hdev, ev->status); 3679 break; 3680 3681 default: 3682 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3683 break; 3684 } 3685 3686 if (*opcode != HCI_OP_NOP) 3687 cancel_delayed_work(&hdev->cmd_timer); 3688 3689 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) 3690 atomic_set(&hdev->cmd_cnt, 1); 3691 3692 /* Indicate request completion if the command failed. Also, if 3693 * we're not waiting for a special event and we get a success 3694 * command status we should try to flag the request as completed 3695 * (since for this kind of commands there will not be a command 3696 * complete event). 3697 */ 3698 if (ev->status || 3699 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event)) 3700 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 3701 req_complete_skb); 3702 3703 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { 3704 bt_dev_err(hdev, 3705 "unexpected event for opcode 0x%4.4x", *opcode); 3706 return; 3707 } 3708 3709 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3710 queue_work(hdev->workqueue, &hdev->cmd_work); 3711 } 3712 3713 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) 3714 { 3715 struct hci_ev_hardware_error *ev = (void *) skb->data; 3716 3717 hdev->hw_error_code = ev->code; 3718 3719 queue_work(hdev->req_workqueue, &hdev->error_reset); 3720 } 3721 3722 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3723 { 3724 struct hci_ev_role_change *ev = (void *) skb->data; 3725 struct hci_conn *conn; 3726 3727 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3728 3729 hci_dev_lock(hdev); 3730 3731 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3732 if (conn) { 3733 if (!ev->status) 3734 conn->role = ev->role; 3735 3736 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 3737 3738 hci_role_switch_cfm(conn, ev->status, ev->role); 3739 } 3740 3741 hci_dev_unlock(hdev); 3742 } 3743 3744 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 3745 { 3746 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 3747 int i; 3748 3749 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 3750 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 3751 return; 3752 } 3753 3754 if (skb->len < sizeof(*ev) || 3755 skb->len < struct_size(ev, handles, ev->num_hndl)) { 3756 BT_DBG("%s bad parameters", hdev->name); 3757 return; 3758 } 3759 3760 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 3761 3762 for (i = 0; i < ev->num_hndl; i++) { 3763 struct hci_comp_pkts_info *info = &ev->handles[i]; 3764 struct hci_conn *conn; 3765 __u16 handle, count; 3766 3767 handle = __le16_to_cpu(info->handle); 3768 count = __le16_to_cpu(info->count); 3769 3770 conn = hci_conn_hash_lookup_handle(hdev, handle); 3771 if (!conn) 3772 continue; 3773 3774 conn->sent -= count; 3775 3776 switch (conn->type) { 3777 case ACL_LINK: 3778 hdev->acl_cnt += count; 3779 if (hdev->acl_cnt > hdev->acl_pkts) 3780 hdev->acl_cnt = hdev->acl_pkts; 3781 break; 3782 3783 case LE_LINK: 3784 if (hdev->le_pkts) { 3785 hdev->le_cnt += count; 3786 if (hdev->le_cnt > hdev->le_pkts) 3787 hdev->le_cnt = hdev->le_pkts; 3788 } else { 3789 hdev->acl_cnt += count; 3790 if (hdev->acl_cnt > hdev->acl_pkts) 3791 hdev->acl_cnt = hdev->acl_pkts; 3792 } 3793 break; 3794 3795 case SCO_LINK: 3796 hdev->sco_cnt += count; 3797 if (hdev->sco_cnt > hdev->sco_pkts) 3798 hdev->sco_cnt = hdev->sco_pkts; 3799 break; 3800 3801 default: 3802 bt_dev_err(hdev, "unknown type %d conn %p", 3803 conn->type, conn); 3804 break; 3805 } 3806 } 3807 3808 queue_work(hdev->workqueue, &hdev->tx_work); 3809 } 3810 3811 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 3812 __u16 handle) 3813 { 3814 struct hci_chan *chan; 3815 3816 switch (hdev->dev_type) { 3817 case HCI_PRIMARY: 3818 return hci_conn_hash_lookup_handle(hdev, handle); 3819 case HCI_AMP: 3820 chan = hci_chan_lookup_handle(hdev, handle); 3821 if (chan) 3822 return chan->conn; 3823 break; 3824 default: 3825 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 3826 break; 3827 } 3828 3829 return NULL; 3830 } 3831 3832 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 3833 { 3834 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 3835 int i; 3836 3837 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 3838 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); 3839 return; 3840 } 3841 3842 if (skb->len < sizeof(*ev) || 3843 skb->len < struct_size(ev, handles, ev->num_hndl)) { 3844 BT_DBG("%s bad parameters", hdev->name); 3845 return; 3846 } 3847 3848 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 3849 ev->num_hndl); 3850 3851 for (i = 0; i < ev->num_hndl; i++) { 3852 struct hci_comp_blocks_info *info = &ev->handles[i]; 3853 struct hci_conn *conn = NULL; 3854 __u16 handle, block_count; 3855 3856 handle = __le16_to_cpu(info->handle); 3857 block_count = __le16_to_cpu(info->blocks); 3858 3859 conn = __hci_conn_lookup_handle(hdev, handle); 3860 if (!conn) 3861 continue; 3862 3863 conn->sent -= block_count; 3864 3865 switch (conn->type) { 3866 case ACL_LINK: 3867 case AMP_LINK: 3868 hdev->block_cnt += block_count; 3869 if (hdev->block_cnt > hdev->num_blocks) 3870 hdev->block_cnt = hdev->num_blocks; 3871 break; 3872 3873 default: 3874 bt_dev_err(hdev, "unknown type %d conn %p", 3875 conn->type, conn); 3876 break; 3877 } 3878 } 3879 3880 queue_work(hdev->workqueue, &hdev->tx_work); 3881 } 3882 3883 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3884 { 3885 struct hci_ev_mode_change *ev = (void *) skb->data; 3886 struct hci_conn *conn; 3887 3888 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3889 3890 hci_dev_lock(hdev); 3891 3892 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3893 if (conn) { 3894 conn->mode = ev->mode; 3895 3896 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 3897 &conn->flags)) { 3898 if (conn->mode == HCI_CM_ACTIVE) 3899 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3900 else 3901 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3902 } 3903 3904 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 3905 hci_sco_setup(conn, ev->status); 3906 } 3907 3908 hci_dev_unlock(hdev); 3909 } 3910 3911 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3912 { 3913 struct hci_ev_pin_code_req *ev = (void *) skb->data; 3914 struct hci_conn *conn; 3915 3916 BT_DBG("%s", hdev->name); 3917 3918 hci_dev_lock(hdev); 3919 3920 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3921 if (!conn) 3922 goto unlock; 3923 3924 if (conn->state == BT_CONNECTED) { 3925 hci_conn_hold(conn); 3926 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3927 hci_conn_drop(conn); 3928 } 3929 3930 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 3931 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 3932 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 3933 sizeof(ev->bdaddr), &ev->bdaddr); 3934 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 3935 u8 secure; 3936 3937 if (conn->pending_sec_level == BT_SECURITY_HIGH) 3938 secure = 1; 3939 else 3940 secure = 0; 3941 3942 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 3943 } 3944 3945 unlock: 3946 hci_dev_unlock(hdev); 3947 } 3948 3949 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 3950 { 3951 if (key_type == HCI_LK_CHANGED_COMBINATION) 3952 return; 3953 3954 conn->pin_length = pin_len; 3955 conn->key_type = key_type; 3956 3957 switch (key_type) { 3958 case HCI_LK_LOCAL_UNIT: 3959 case HCI_LK_REMOTE_UNIT: 3960 case HCI_LK_DEBUG_COMBINATION: 3961 return; 3962 case HCI_LK_COMBINATION: 3963 if (pin_len == 16) 3964 conn->pending_sec_level = BT_SECURITY_HIGH; 3965 else 3966 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3967 break; 3968 case HCI_LK_UNAUTH_COMBINATION_P192: 3969 case HCI_LK_UNAUTH_COMBINATION_P256: 3970 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3971 break; 3972 case HCI_LK_AUTH_COMBINATION_P192: 3973 conn->pending_sec_level = BT_SECURITY_HIGH; 3974 break; 3975 case HCI_LK_AUTH_COMBINATION_P256: 3976 conn->pending_sec_level = BT_SECURITY_FIPS; 3977 break; 3978 } 3979 } 3980 3981 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3982 { 3983 struct hci_ev_link_key_req *ev = (void *) skb->data; 3984 struct hci_cp_link_key_reply cp; 3985 struct hci_conn *conn; 3986 struct link_key *key; 3987 3988 BT_DBG("%s", hdev->name); 3989 3990 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3991 return; 3992 3993 hci_dev_lock(hdev); 3994 3995 key = hci_find_link_key(hdev, &ev->bdaddr); 3996 if (!key) { 3997 BT_DBG("%s link key not found for %pMR", hdev->name, 3998 &ev->bdaddr); 3999 goto not_found; 4000 } 4001 4002 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 4003 &ev->bdaddr); 4004 4005 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4006 if (conn) { 4007 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4008 4009 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 4010 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 4011 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 4012 BT_DBG("%s ignoring unauthenticated key", hdev->name); 4013 goto not_found; 4014 } 4015 4016 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 4017 (conn->pending_sec_level == BT_SECURITY_HIGH || 4018 conn->pending_sec_level == BT_SECURITY_FIPS)) { 4019 BT_DBG("%s ignoring key unauthenticated for high security", 4020 hdev->name); 4021 goto not_found; 4022 } 4023 4024 conn_set_key(conn, key->type, key->pin_len); 4025 } 4026 4027 bacpy(&cp.bdaddr, &ev->bdaddr); 4028 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 4029 4030 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 4031 4032 hci_dev_unlock(hdev); 4033 4034 return; 4035 4036 not_found: 4037 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 4038 hci_dev_unlock(hdev); 4039 } 4040 4041 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 4042 { 4043 struct hci_ev_link_key_notify *ev = (void *) skb->data; 4044 struct hci_conn *conn; 4045 struct link_key *key; 4046 bool persistent; 4047 u8 pin_len = 0; 4048 4049 BT_DBG("%s", hdev->name); 4050 4051 hci_dev_lock(hdev); 4052 4053 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4054 if (!conn) 4055 goto unlock; 4056 4057 hci_conn_hold(conn); 4058 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4059 hci_conn_drop(conn); 4060 4061 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 4062 conn_set_key(conn, ev->key_type, conn->pin_length); 4063 4064 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4065 goto unlock; 4066 4067 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 4068 ev->key_type, pin_len, &persistent); 4069 if (!key) 4070 goto unlock; 4071 4072 /* Update connection information since adding the key will have 4073 * fixed up the type in the case of changed combination keys. 4074 */ 4075 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 4076 conn_set_key(conn, key->type, key->pin_len); 4077 4078 mgmt_new_link_key(hdev, key, persistent); 4079 4080 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 4081 * is set. If it's not set simply remove the key from the kernel 4082 * list (we've still notified user space about it but with 4083 * store_hint being 0). 4084 */ 4085 if (key->type == HCI_LK_DEBUG_COMBINATION && 4086 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 4087 list_del_rcu(&key->list); 4088 kfree_rcu(key, rcu); 4089 goto unlock; 4090 } 4091 4092 if (persistent) 4093 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4094 else 4095 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 4096 4097 unlock: 4098 hci_dev_unlock(hdev); 4099 } 4100 4101 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 4102 { 4103 struct hci_ev_clock_offset *ev = (void *) skb->data; 4104 struct hci_conn *conn; 4105 4106 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4107 4108 hci_dev_lock(hdev); 4109 4110 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4111 if (conn && !ev->status) { 4112 struct inquiry_entry *ie; 4113 4114 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4115 if (ie) { 4116 ie->data.clock_offset = ev->clock_offset; 4117 ie->timestamp = jiffies; 4118 } 4119 } 4120 4121 hci_dev_unlock(hdev); 4122 } 4123 4124 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 4125 { 4126 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 4127 struct hci_conn *conn; 4128 4129 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4130 4131 hci_dev_lock(hdev); 4132 4133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4134 if (conn && !ev->status) 4135 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 4136 4137 hci_dev_unlock(hdev); 4138 } 4139 4140 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 4141 { 4142 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 4143 struct inquiry_entry *ie; 4144 4145 BT_DBG("%s", hdev->name); 4146 4147 hci_dev_lock(hdev); 4148 4149 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4150 if (ie) { 4151 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 4152 ie->timestamp = jiffies; 4153 } 4154 4155 hci_dev_unlock(hdev); 4156 } 4157 4158 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 4159 struct sk_buff *skb) 4160 { 4161 struct inquiry_data data; 4162 int num_rsp = *((__u8 *) skb->data); 4163 4164 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4165 4166 if (!num_rsp) 4167 return; 4168 4169 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4170 return; 4171 4172 hci_dev_lock(hdev); 4173 4174 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 4175 struct inquiry_info_with_rssi_and_pscan_mode *info; 4176 info = (void *) (skb->data + 1); 4177 4178 if (skb->len < num_rsp * sizeof(*info) + 1) 4179 goto unlock; 4180 4181 for (; num_rsp; num_rsp--, info++) { 4182 u32 flags; 4183 4184 bacpy(&data.bdaddr, &info->bdaddr); 4185 data.pscan_rep_mode = info->pscan_rep_mode; 4186 data.pscan_period_mode = info->pscan_period_mode; 4187 data.pscan_mode = info->pscan_mode; 4188 memcpy(data.dev_class, info->dev_class, 3); 4189 data.clock_offset = info->clock_offset; 4190 data.rssi = info->rssi; 4191 data.ssp_mode = 0x00; 4192 4193 flags = hci_inquiry_cache_update(hdev, &data, false); 4194 4195 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4196 info->dev_class, info->rssi, 4197 flags, NULL, 0, NULL, 0); 4198 } 4199 } else { 4200 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 4201 4202 if (skb->len < num_rsp * sizeof(*info) + 1) 4203 goto unlock; 4204 4205 for (; num_rsp; num_rsp--, info++) { 4206 u32 flags; 4207 4208 bacpy(&data.bdaddr, &info->bdaddr); 4209 data.pscan_rep_mode = info->pscan_rep_mode; 4210 data.pscan_period_mode = info->pscan_period_mode; 4211 data.pscan_mode = 0x00; 4212 memcpy(data.dev_class, info->dev_class, 3); 4213 data.clock_offset = info->clock_offset; 4214 data.rssi = info->rssi; 4215 data.ssp_mode = 0x00; 4216 4217 flags = hci_inquiry_cache_update(hdev, &data, false); 4218 4219 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4220 info->dev_class, info->rssi, 4221 flags, NULL, 0, NULL, 0); 4222 } 4223 } 4224 4225 unlock: 4226 hci_dev_unlock(hdev); 4227 } 4228 4229 static void hci_remote_ext_features_evt(struct hci_dev *hdev, 4230 struct sk_buff *skb) 4231 { 4232 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 4233 struct hci_conn *conn; 4234 4235 BT_DBG("%s", hdev->name); 4236 4237 hci_dev_lock(hdev); 4238 4239 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4240 if (!conn) 4241 goto unlock; 4242 4243 if (ev->page < HCI_MAX_PAGES) 4244 memcpy(conn->features[ev->page], ev->features, 8); 4245 4246 if (!ev->status && ev->page == 0x01) { 4247 struct inquiry_entry *ie; 4248 4249 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 4250 if (ie) 4251 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4252 4253 if (ev->features[0] & LMP_HOST_SSP) { 4254 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4255 } else { 4256 /* It is mandatory by the Bluetooth specification that 4257 * Extended Inquiry Results are only used when Secure 4258 * Simple Pairing is enabled, but some devices violate 4259 * this. 4260 * 4261 * To make these devices work, the internal SSP 4262 * enabled flag needs to be cleared if the remote host 4263 * features do not indicate SSP support */ 4264 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 4265 } 4266 4267 if (ev->features[0] & LMP_HOST_SC) 4268 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 4269 } 4270 4271 if (conn->state != BT_CONFIG) 4272 goto unlock; 4273 4274 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 4275 struct hci_cp_remote_name_req cp; 4276 memset(&cp, 0, sizeof(cp)); 4277 bacpy(&cp.bdaddr, &conn->dst); 4278 cp.pscan_rep_mode = 0x02; 4279 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 4280 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4281 mgmt_device_connected(hdev, conn, 0, NULL, 0); 4282 4283 if (!hci_outgoing_auth_needed(hdev, conn)) { 4284 conn->state = BT_CONNECTED; 4285 hci_connect_cfm(conn, ev->status); 4286 hci_conn_drop(conn); 4287 } 4288 4289 unlock: 4290 hci_dev_unlock(hdev); 4291 } 4292 4293 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 4294 struct sk_buff *skb) 4295 { 4296 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 4297 struct hci_conn *conn; 4298 4299 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4300 4301 hci_dev_lock(hdev); 4302 4303 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 4304 if (!conn) { 4305 if (ev->link_type == ESCO_LINK) 4306 goto unlock; 4307 4308 /* When the link type in the event indicates SCO connection 4309 * and lookup of the connection object fails, then check 4310 * if an eSCO connection object exists. 4311 * 4312 * The core limits the synchronous connections to either 4313 * SCO or eSCO. The eSCO connection is preferred and tried 4314 * to be setup first and until successfully established, 4315 * the link type will be hinted as eSCO. 4316 */ 4317 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 4318 if (!conn) 4319 goto unlock; 4320 } 4321 4322 switch (ev->status) { 4323 case 0x00: 4324 conn->handle = __le16_to_cpu(ev->handle); 4325 conn->state = BT_CONNECTED; 4326 conn->type = ev->link_type; 4327 4328 hci_debugfs_create_conn(conn); 4329 hci_conn_add_sysfs(conn); 4330 break; 4331 4332 case 0x10: /* Connection Accept Timeout */ 4333 case 0x0d: /* Connection Rejected due to Limited Resources */ 4334 case 0x11: /* Unsupported Feature or Parameter Value */ 4335 case 0x1c: /* SCO interval rejected */ 4336 case 0x1a: /* Unsupported Remote Feature */ 4337 case 0x1e: /* Invalid LMP Parameters */ 4338 case 0x1f: /* Unspecified error */ 4339 case 0x20: /* Unsupported LMP Parameter value */ 4340 if (conn->out) { 4341 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 4342 (hdev->esco_type & EDR_ESCO_MASK); 4343 if (hci_setup_sync(conn, conn->link->handle)) 4344 goto unlock; 4345 } 4346 fallthrough; 4347 4348 default: 4349 conn->state = BT_CLOSED; 4350 break; 4351 } 4352 4353 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); 4354 4355 switch (conn->setting & SCO_AIRMODE_MASK) { 4356 case SCO_AIRMODE_CVSD: 4357 if (hdev->notify) 4358 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); 4359 break; 4360 case SCO_AIRMODE_TRANSP: 4361 if (hdev->notify) 4362 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); 4363 break; 4364 } 4365 4366 hci_connect_cfm(conn, ev->status); 4367 if (ev->status) 4368 hci_conn_del(conn); 4369 4370 unlock: 4371 hci_dev_unlock(hdev); 4372 } 4373 4374 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 4375 { 4376 size_t parsed = 0; 4377 4378 while (parsed < eir_len) { 4379 u8 field_len = eir[0]; 4380 4381 if (field_len == 0) 4382 return parsed; 4383 4384 parsed += field_len + 1; 4385 eir += field_len + 1; 4386 } 4387 4388 return eir_len; 4389 } 4390 4391 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 4392 struct sk_buff *skb) 4393 { 4394 struct inquiry_data data; 4395 struct extended_inquiry_info *info = (void *) (skb->data + 1); 4396 int num_rsp = *((__u8 *) skb->data); 4397 size_t eir_len; 4398 4399 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 4400 4401 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) 4402 return; 4403 4404 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 4405 return; 4406 4407 hci_dev_lock(hdev); 4408 4409 for (; num_rsp; num_rsp--, info++) { 4410 u32 flags; 4411 bool name_known; 4412 4413 bacpy(&data.bdaddr, &info->bdaddr); 4414 data.pscan_rep_mode = info->pscan_rep_mode; 4415 data.pscan_period_mode = info->pscan_period_mode; 4416 data.pscan_mode = 0x00; 4417 memcpy(data.dev_class, info->dev_class, 3); 4418 data.clock_offset = info->clock_offset; 4419 data.rssi = info->rssi; 4420 data.ssp_mode = 0x01; 4421 4422 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4423 name_known = eir_get_data(info->data, 4424 sizeof(info->data), 4425 EIR_NAME_COMPLETE, NULL); 4426 else 4427 name_known = true; 4428 4429 flags = hci_inquiry_cache_update(hdev, &data, name_known); 4430 4431 eir_len = eir_get_length(info->data, sizeof(info->data)); 4432 4433 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 4434 info->dev_class, info->rssi, 4435 flags, info->data, eir_len, NULL, 0); 4436 } 4437 4438 hci_dev_unlock(hdev); 4439 } 4440 4441 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 4442 struct sk_buff *skb) 4443 { 4444 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 4445 struct hci_conn *conn; 4446 4447 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 4448 __le16_to_cpu(ev->handle)); 4449 4450 hci_dev_lock(hdev); 4451 4452 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4453 if (!conn) 4454 goto unlock; 4455 4456 /* For BR/EDR the necessary steps are taken through the 4457 * auth_complete event. 4458 */ 4459 if (conn->type != LE_LINK) 4460 goto unlock; 4461 4462 if (!ev->status) 4463 conn->sec_level = conn->pending_sec_level; 4464 4465 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 4466 4467 if (ev->status && conn->state == BT_CONNECTED) { 4468 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 4469 hci_conn_drop(conn); 4470 goto unlock; 4471 } 4472 4473 if (conn->state == BT_CONFIG) { 4474 if (!ev->status) 4475 conn->state = BT_CONNECTED; 4476 4477 hci_connect_cfm(conn, ev->status); 4478 hci_conn_drop(conn); 4479 } else { 4480 hci_auth_cfm(conn, ev->status); 4481 4482 hci_conn_hold(conn); 4483 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 4484 hci_conn_drop(conn); 4485 } 4486 4487 unlock: 4488 hci_dev_unlock(hdev); 4489 } 4490 4491 static u8 hci_get_auth_req(struct hci_conn *conn) 4492 { 4493 /* If remote requests no-bonding follow that lead */ 4494 if (conn->remote_auth == HCI_AT_NO_BONDING || 4495 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4496 return conn->remote_auth | (conn->auth_type & 0x01); 4497 4498 /* If both remote and local have enough IO capabilities, require 4499 * MITM protection 4500 */ 4501 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4502 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4503 return conn->remote_auth | 0x01; 4504 4505 /* No MITM protection possible so ignore remote requirement */ 4506 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4507 } 4508 4509 static u8 bredr_oob_data_present(struct hci_conn *conn) 4510 { 4511 struct hci_dev *hdev = conn->hdev; 4512 struct oob_data *data; 4513 4514 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4515 if (!data) 4516 return 0x00; 4517 4518 if (bredr_sc_enabled(hdev)) { 4519 /* When Secure Connections is enabled, then just 4520 * return the present value stored with the OOB 4521 * data. The stored value contains the right present 4522 * information. However it can only be trusted when 4523 * not in Secure Connection Only mode. 4524 */ 4525 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4526 return data->present; 4527 4528 /* When Secure Connections Only mode is enabled, then 4529 * the P-256 values are required. If they are not 4530 * available, then do not declare that OOB data is 4531 * present. 4532 */ 4533 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4534 !memcmp(data->hash256, ZERO_KEY, 16)) 4535 return 0x00; 4536 4537 return 0x02; 4538 } 4539 4540 /* When Secure Connections is not enabled or actually 4541 * not supported by the hardware, then check that if 4542 * P-192 data values are present. 4543 */ 4544 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4545 !memcmp(data->hash192, ZERO_KEY, 16)) 4546 return 0x00; 4547 4548 return 0x01; 4549 } 4550 4551 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4552 { 4553 struct hci_ev_io_capa_request *ev = (void *) skb->data; 4554 struct hci_conn *conn; 4555 4556 BT_DBG("%s", hdev->name); 4557 4558 hci_dev_lock(hdev); 4559 4560 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4561 if (!conn) 4562 goto unlock; 4563 4564 hci_conn_hold(conn); 4565 4566 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4567 goto unlock; 4568 4569 /* Allow pairing if we're pairable, the initiators of the 4570 * pairing or if the remote is not requesting bonding. 4571 */ 4572 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4573 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4574 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 4575 struct hci_cp_io_capability_reply cp; 4576 4577 bacpy(&cp.bdaddr, &ev->bdaddr); 4578 /* Change the IO capability from KeyboardDisplay 4579 * to DisplayYesNo as it is not supported by BT spec. */ 4580 cp.capability = (conn->io_capability == 0x04) ? 4581 HCI_IO_DISPLAY_YESNO : conn->io_capability; 4582 4583 /* If we are initiators, there is no remote information yet */ 4584 if (conn->remote_auth == 0xff) { 4585 /* Request MITM protection if our IO caps allow it 4586 * except for the no-bonding case. 4587 */ 4588 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4589 conn->auth_type != HCI_AT_NO_BONDING) 4590 conn->auth_type |= 0x01; 4591 } else { 4592 conn->auth_type = hci_get_auth_req(conn); 4593 } 4594 4595 /* If we're not bondable, force one of the non-bondable 4596 * authentication requirement values. 4597 */ 4598 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 4599 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 4600 4601 cp.authentication = conn->auth_type; 4602 cp.oob_data = bredr_oob_data_present(conn); 4603 4604 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 4605 sizeof(cp), &cp); 4606 } else { 4607 struct hci_cp_io_capability_neg_reply cp; 4608 4609 bacpy(&cp.bdaddr, &ev->bdaddr); 4610 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 4611 4612 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 4613 sizeof(cp), &cp); 4614 } 4615 4616 unlock: 4617 hci_dev_unlock(hdev); 4618 } 4619 4620 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 4621 { 4622 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 4623 struct hci_conn *conn; 4624 4625 BT_DBG("%s", hdev->name); 4626 4627 hci_dev_lock(hdev); 4628 4629 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4630 if (!conn) 4631 goto unlock; 4632 4633 conn->remote_cap = ev->capability; 4634 conn->remote_auth = ev->authentication; 4635 4636 unlock: 4637 hci_dev_unlock(hdev); 4638 } 4639 4640 static void hci_user_confirm_request_evt(struct hci_dev *hdev, 4641 struct sk_buff *skb) 4642 { 4643 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 4644 int loc_mitm, rem_mitm, confirm_hint = 0; 4645 struct hci_conn *conn; 4646 4647 BT_DBG("%s", hdev->name); 4648 4649 hci_dev_lock(hdev); 4650 4651 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4652 goto unlock; 4653 4654 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4655 if (!conn) 4656 goto unlock; 4657 4658 loc_mitm = (conn->auth_type & 0x01); 4659 rem_mitm = (conn->remote_auth & 0x01); 4660 4661 /* If we require MITM but the remote device can't provide that 4662 * (it has NoInputNoOutput) then reject the confirmation 4663 * request. We check the security level here since it doesn't 4664 * necessarily match conn->auth_type. 4665 */ 4666 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 4667 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 4668 BT_DBG("Rejecting request: remote device can't provide MITM"); 4669 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 4670 sizeof(ev->bdaddr), &ev->bdaddr); 4671 goto unlock; 4672 } 4673 4674 /* If no side requires MITM protection; auto-accept */ 4675 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 4676 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 4677 4678 /* If we're not the initiators request authorization to 4679 * proceed from user space (mgmt_user_confirm with 4680 * confirm_hint set to 1). The exception is if neither 4681 * side had MITM or if the local IO capability is 4682 * NoInputNoOutput, in which case we do auto-accept 4683 */ 4684 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 4685 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4686 (loc_mitm || rem_mitm)) { 4687 BT_DBG("Confirming auto-accept as acceptor"); 4688 confirm_hint = 1; 4689 goto confirm; 4690 } 4691 4692 /* If there already exists link key in local host, leave the 4693 * decision to user space since the remote device could be 4694 * legitimate or malicious. 4695 */ 4696 if (hci_find_link_key(hdev, &ev->bdaddr)) { 4697 bt_dev_dbg(hdev, "Local host already has link key"); 4698 confirm_hint = 1; 4699 goto confirm; 4700 } 4701 4702 BT_DBG("Auto-accept of user confirmation with %ums delay", 4703 hdev->auto_accept_delay); 4704 4705 if (hdev->auto_accept_delay > 0) { 4706 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 4707 queue_delayed_work(conn->hdev->workqueue, 4708 &conn->auto_accept_work, delay); 4709 goto unlock; 4710 } 4711 4712 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 4713 sizeof(ev->bdaddr), &ev->bdaddr); 4714 goto unlock; 4715 } 4716 4717 confirm: 4718 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 4719 le32_to_cpu(ev->passkey), confirm_hint); 4720 4721 unlock: 4722 hci_dev_unlock(hdev); 4723 } 4724 4725 static void hci_user_passkey_request_evt(struct hci_dev *hdev, 4726 struct sk_buff *skb) 4727 { 4728 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 4729 4730 BT_DBG("%s", hdev->name); 4731 4732 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4733 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 4734 } 4735 4736 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 4737 struct sk_buff *skb) 4738 { 4739 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 4740 struct hci_conn *conn; 4741 4742 BT_DBG("%s", hdev->name); 4743 4744 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4745 if (!conn) 4746 return; 4747 4748 conn->passkey_notify = __le32_to_cpu(ev->passkey); 4749 conn->passkey_entered = 0; 4750 4751 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4752 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4753 conn->dst_type, conn->passkey_notify, 4754 conn->passkey_entered); 4755 } 4756 4757 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 4758 { 4759 struct hci_ev_keypress_notify *ev = (void *) skb->data; 4760 struct hci_conn *conn; 4761 4762 BT_DBG("%s", hdev->name); 4763 4764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4765 if (!conn) 4766 return; 4767 4768 switch (ev->type) { 4769 case HCI_KEYPRESS_STARTED: 4770 conn->passkey_entered = 0; 4771 return; 4772 4773 case HCI_KEYPRESS_ENTERED: 4774 conn->passkey_entered++; 4775 break; 4776 4777 case HCI_KEYPRESS_ERASED: 4778 conn->passkey_entered--; 4779 break; 4780 4781 case HCI_KEYPRESS_CLEARED: 4782 conn->passkey_entered = 0; 4783 break; 4784 4785 case HCI_KEYPRESS_COMPLETED: 4786 return; 4787 } 4788 4789 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4790 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4791 conn->dst_type, conn->passkey_notify, 4792 conn->passkey_entered); 4793 } 4794 4795 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 4796 struct sk_buff *skb) 4797 { 4798 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 4799 struct hci_conn *conn; 4800 4801 BT_DBG("%s", hdev->name); 4802 4803 hci_dev_lock(hdev); 4804 4805 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4806 if (!conn) 4807 goto unlock; 4808 4809 /* Reset the authentication requirement to unknown */ 4810 conn->remote_auth = 0xff; 4811 4812 /* To avoid duplicate auth_failed events to user space we check 4813 * the HCI_CONN_AUTH_PEND flag which will be set if we 4814 * initiated the authentication. A traditional auth_complete 4815 * event gets always produced as initiator and is also mapped to 4816 * the mgmt_auth_failed event */ 4817 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 4818 mgmt_auth_failed(conn, ev->status); 4819 4820 hci_conn_drop(conn); 4821 4822 unlock: 4823 hci_dev_unlock(hdev); 4824 } 4825 4826 static void hci_remote_host_features_evt(struct hci_dev *hdev, 4827 struct sk_buff *skb) 4828 { 4829 struct hci_ev_remote_host_features *ev = (void *) skb->data; 4830 struct inquiry_entry *ie; 4831 struct hci_conn *conn; 4832 4833 BT_DBG("%s", hdev->name); 4834 4835 hci_dev_lock(hdev); 4836 4837 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4838 if (conn) 4839 memcpy(conn->features[1], ev->features, 8); 4840 4841 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4842 if (ie) 4843 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4844 4845 hci_dev_unlock(hdev); 4846 } 4847 4848 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 4849 struct sk_buff *skb) 4850 { 4851 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 4852 struct oob_data *data; 4853 4854 BT_DBG("%s", hdev->name); 4855 4856 hci_dev_lock(hdev); 4857 4858 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4859 goto unlock; 4860 4861 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 4862 if (!data) { 4863 struct hci_cp_remote_oob_data_neg_reply cp; 4864 4865 bacpy(&cp.bdaddr, &ev->bdaddr); 4866 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 4867 sizeof(cp), &cp); 4868 goto unlock; 4869 } 4870 4871 if (bredr_sc_enabled(hdev)) { 4872 struct hci_cp_remote_oob_ext_data_reply cp; 4873 4874 bacpy(&cp.bdaddr, &ev->bdaddr); 4875 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 4876 memset(cp.hash192, 0, sizeof(cp.hash192)); 4877 memset(cp.rand192, 0, sizeof(cp.rand192)); 4878 } else { 4879 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 4880 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 4881 } 4882 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 4883 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 4884 4885 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 4886 sizeof(cp), &cp); 4887 } else { 4888 struct hci_cp_remote_oob_data_reply cp; 4889 4890 bacpy(&cp.bdaddr, &ev->bdaddr); 4891 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 4892 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 4893 4894 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 4895 sizeof(cp), &cp); 4896 } 4897 4898 unlock: 4899 hci_dev_unlock(hdev); 4900 } 4901 4902 #if IS_ENABLED(CONFIG_BT_HS) 4903 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 4904 { 4905 struct hci_ev_channel_selected *ev = (void *)skb->data; 4906 struct hci_conn *hcon; 4907 4908 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 4909 4910 skb_pull(skb, sizeof(*ev)); 4911 4912 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4913 if (!hcon) 4914 return; 4915 4916 amp_read_loc_assoc_final_data(hdev, hcon); 4917 } 4918 4919 static void hci_phy_link_complete_evt(struct hci_dev *hdev, 4920 struct sk_buff *skb) 4921 { 4922 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 4923 struct hci_conn *hcon, *bredr_hcon; 4924 4925 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 4926 ev->status); 4927 4928 hci_dev_lock(hdev); 4929 4930 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4931 if (!hcon) { 4932 hci_dev_unlock(hdev); 4933 return; 4934 } 4935 4936 if (ev->status) { 4937 hci_conn_del(hcon); 4938 hci_dev_unlock(hdev); 4939 return; 4940 } 4941 4942 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 4943 4944 hcon->state = BT_CONNECTED; 4945 bacpy(&hcon->dst, &bredr_hcon->dst); 4946 4947 hci_conn_hold(hcon); 4948 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 4949 hci_conn_drop(hcon); 4950 4951 hci_debugfs_create_conn(hcon); 4952 hci_conn_add_sysfs(hcon); 4953 4954 amp_physical_cfm(bredr_hcon, hcon); 4955 4956 hci_dev_unlock(hdev); 4957 } 4958 4959 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4960 { 4961 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 4962 struct hci_conn *hcon; 4963 struct hci_chan *hchan; 4964 struct amp_mgr *mgr; 4965 4966 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 4967 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 4968 ev->status); 4969 4970 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4971 if (!hcon) 4972 return; 4973 4974 /* Create AMP hchan */ 4975 hchan = hci_chan_create(hcon); 4976 if (!hchan) 4977 return; 4978 4979 hchan->handle = le16_to_cpu(ev->handle); 4980 4981 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 4982 4983 mgr = hcon->amp_mgr; 4984 if (mgr && mgr->bredr_chan) { 4985 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 4986 4987 l2cap_chan_lock(bredr_chan); 4988 4989 bredr_chan->conn->mtu = hdev->block_mtu; 4990 l2cap_logical_cfm(bredr_chan, hchan, 0); 4991 hci_conn_hold(hcon); 4992 4993 l2cap_chan_unlock(bredr_chan); 4994 } 4995 } 4996 4997 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 4998 struct sk_buff *skb) 4999 { 5000 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 5001 struct hci_chan *hchan; 5002 5003 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 5004 le16_to_cpu(ev->handle), ev->status); 5005 5006 if (ev->status) 5007 return; 5008 5009 hci_dev_lock(hdev); 5010 5011 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 5012 if (!hchan) 5013 goto unlock; 5014 5015 amp_destroy_logical_link(hchan, ev->reason); 5016 5017 unlock: 5018 hci_dev_unlock(hdev); 5019 } 5020 5021 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 5022 struct sk_buff *skb) 5023 { 5024 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 5025 struct hci_conn *hcon; 5026 5027 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5028 5029 if (ev->status) 5030 return; 5031 5032 hci_dev_lock(hdev); 5033 5034 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5035 if (hcon) { 5036 hcon->state = BT_CLOSED; 5037 hci_conn_del(hcon); 5038 } 5039 5040 hci_dev_unlock(hdev); 5041 } 5042 #endif 5043 5044 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, 5045 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle, 5046 u16 interval, u16 latency, u16 supervision_timeout) 5047 { 5048 struct hci_conn_params *params; 5049 struct hci_conn *conn; 5050 struct smp_irk *irk; 5051 u8 addr_type; 5052 5053 hci_dev_lock(hdev); 5054 5055 /* All controllers implicitly stop advertising in the event of a 5056 * connection, so ensure that the state bit is cleared. 5057 */ 5058 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5059 5060 conn = hci_lookup_le_connect(hdev); 5061 if (!conn) { 5062 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); 5063 if (!conn) { 5064 bt_dev_err(hdev, "no memory for new connection"); 5065 goto unlock; 5066 } 5067 5068 conn->dst_type = bdaddr_type; 5069 5070 /* If we didn't have a hci_conn object previously 5071 * but we're in master role this must be something 5072 * initiated using a white list. Since white list based 5073 * connections are not "first class citizens" we don't 5074 * have full tracking of them. Therefore, we go ahead 5075 * with a "best effort" approach of determining the 5076 * initiator address based on the HCI_PRIVACY flag. 5077 */ 5078 if (conn->out) { 5079 conn->resp_addr_type = bdaddr_type; 5080 bacpy(&conn->resp_addr, bdaddr); 5081 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5082 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 5083 bacpy(&conn->init_addr, &hdev->rpa); 5084 } else { 5085 hci_copy_identity_address(hdev, 5086 &conn->init_addr, 5087 &conn->init_addr_type); 5088 } 5089 } 5090 } else { 5091 cancel_delayed_work(&conn->le_conn_timeout); 5092 } 5093 5094 if (!conn->out) { 5095 /* Set the responder (our side) address type based on 5096 * the advertising address type. 5097 */ 5098 conn->resp_addr_type = hdev->adv_addr_type; 5099 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { 5100 /* In case of ext adv, resp_addr will be updated in 5101 * Adv Terminated event. 5102 */ 5103 if (!ext_adv_capable(hdev)) 5104 bacpy(&conn->resp_addr, &hdev->random_addr); 5105 } else { 5106 bacpy(&conn->resp_addr, &hdev->bdaddr); 5107 } 5108 5109 conn->init_addr_type = bdaddr_type; 5110 bacpy(&conn->init_addr, bdaddr); 5111 5112 /* For incoming connections, set the default minimum 5113 * and maximum connection interval. They will be used 5114 * to check if the parameters are in range and if not 5115 * trigger the connection update procedure. 5116 */ 5117 conn->le_conn_min_interval = hdev->le_conn_min_interval; 5118 conn->le_conn_max_interval = hdev->le_conn_max_interval; 5119 } 5120 5121 /* Lookup the identity address from the stored connection 5122 * address and address type. 5123 * 5124 * When establishing connections to an identity address, the 5125 * connection procedure will store the resolvable random 5126 * address first. Now if it can be converted back into the 5127 * identity address, start using the identity address from 5128 * now on. 5129 */ 5130 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 5131 if (irk) { 5132 bacpy(&conn->dst, &irk->bdaddr); 5133 conn->dst_type = irk->addr_type; 5134 } 5135 5136 if (status) { 5137 hci_le_conn_failed(conn, status); 5138 goto unlock; 5139 } 5140 5141 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 5142 addr_type = BDADDR_LE_PUBLIC; 5143 else 5144 addr_type = BDADDR_LE_RANDOM; 5145 5146 /* Drop the connection if the device is blocked */ 5147 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) { 5148 hci_conn_drop(conn); 5149 goto unlock; 5150 } 5151 5152 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 5153 mgmt_device_connected(hdev, conn, 0, NULL, 0); 5154 5155 conn->sec_level = BT_SECURITY_LOW; 5156 conn->handle = handle; 5157 conn->state = BT_CONFIG; 5158 5159 conn->le_conn_interval = interval; 5160 conn->le_conn_latency = latency; 5161 conn->le_supv_timeout = supervision_timeout; 5162 5163 hci_debugfs_create_conn(conn); 5164 hci_conn_add_sysfs(conn); 5165 5166 /* The remote features procedure is defined for master 5167 * role only. So only in case of an initiated connection 5168 * request the remote features. 5169 * 5170 * If the local controller supports slave-initiated features 5171 * exchange, then requesting the remote features in slave 5172 * role is possible. Otherwise just transition into the 5173 * connected state without requesting the remote features. 5174 */ 5175 if (conn->out || 5176 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) { 5177 struct hci_cp_le_read_remote_features cp; 5178 5179 cp.handle = __cpu_to_le16(conn->handle); 5180 5181 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 5182 sizeof(cp), &cp); 5183 5184 hci_conn_hold(conn); 5185 } else { 5186 conn->state = BT_CONNECTED; 5187 hci_connect_cfm(conn, status); 5188 } 5189 5190 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 5191 conn->dst_type); 5192 if (params) { 5193 list_del_init(¶ms->action); 5194 if (params->conn) { 5195 hci_conn_drop(params->conn); 5196 hci_conn_put(params->conn); 5197 params->conn = NULL; 5198 } 5199 } 5200 5201 unlock: 5202 hci_update_background_scan(hdev); 5203 hci_dev_unlock(hdev); 5204 } 5205 5206 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 5207 { 5208 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 5209 5210 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5211 5212 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5213 ev->role, le16_to_cpu(ev->handle), 5214 le16_to_cpu(ev->interval), 5215 le16_to_cpu(ev->latency), 5216 le16_to_cpu(ev->supervision_timeout)); 5217 } 5218 5219 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, 5220 struct sk_buff *skb) 5221 { 5222 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data; 5223 5224 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5225 5226 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, 5227 ev->role, le16_to_cpu(ev->handle), 5228 le16_to_cpu(ev->interval), 5229 le16_to_cpu(ev->latency), 5230 le16_to_cpu(ev->supervision_timeout)); 5231 5232 if (use_ll_privacy(hdev) && 5233 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) 5234 hci_req_disable_address_resolution(hdev); 5235 } 5236 5237 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) 5238 { 5239 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; 5240 struct hci_conn *conn; 5241 5242 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5243 5244 if (ev->status) 5245 return; 5246 5247 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); 5248 if (conn) { 5249 struct adv_info *adv_instance; 5250 5251 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM) 5252 return; 5253 5254 if (!hdev->cur_adv_instance) { 5255 bacpy(&conn->resp_addr, &hdev->random_addr); 5256 return; 5257 } 5258 5259 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 5260 if (adv_instance) 5261 bacpy(&conn->resp_addr, &adv_instance->random_addr); 5262 } 5263 } 5264 5265 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, 5266 struct sk_buff *skb) 5267 { 5268 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; 5269 struct hci_conn *conn; 5270 5271 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5272 5273 if (ev->status) 5274 return; 5275 5276 hci_dev_lock(hdev); 5277 5278 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5279 if (conn) { 5280 conn->le_conn_interval = le16_to_cpu(ev->interval); 5281 conn->le_conn_latency = le16_to_cpu(ev->latency); 5282 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 5283 } 5284 5285 hci_dev_unlock(hdev); 5286 } 5287 5288 /* This function requires the caller holds hdev->lock */ 5289 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 5290 bdaddr_t *addr, 5291 u8 addr_type, u8 adv_type, 5292 bdaddr_t *direct_rpa) 5293 { 5294 struct hci_conn *conn; 5295 struct hci_conn_params *params; 5296 5297 /* If the event is not connectable don't proceed further */ 5298 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 5299 return NULL; 5300 5301 /* Ignore if the device is blocked */ 5302 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type)) 5303 return NULL; 5304 5305 /* Most controller will fail if we try to create new connections 5306 * while we have an existing one in slave role. 5307 */ 5308 if (hdev->conn_hash.le_num_slave > 0 && 5309 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || 5310 !(hdev->le_states[3] & 0x10))) 5311 return NULL; 5312 5313 /* If we're not connectable only connect devices that we have in 5314 * our pend_le_conns list. 5315 */ 5316 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, 5317 addr_type); 5318 if (!params) 5319 return NULL; 5320 5321 if (!params->explicit_connect) { 5322 switch (params->auto_connect) { 5323 case HCI_AUTO_CONN_DIRECT: 5324 /* Only devices advertising with ADV_DIRECT_IND are 5325 * triggering a connection attempt. This is allowing 5326 * incoming connections from slave devices. 5327 */ 5328 if (adv_type != LE_ADV_DIRECT_IND) 5329 return NULL; 5330 break; 5331 case HCI_AUTO_CONN_ALWAYS: 5332 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 5333 * are triggering a connection attempt. This means 5334 * that incoming connections from slave device are 5335 * accepted and also outgoing connections to slave 5336 * devices are established when found. 5337 */ 5338 break; 5339 default: 5340 return NULL; 5341 } 5342 } 5343 5344 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 5345 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER, 5346 direct_rpa); 5347 if (!IS_ERR(conn)) { 5348 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned 5349 * by higher layer that tried to connect, if no then 5350 * store the pointer since we don't really have any 5351 * other owner of the object besides the params that 5352 * triggered it. This way we can abort the connection if 5353 * the parameters get removed and keep the reference 5354 * count consistent once the connection is established. 5355 */ 5356 5357 if (!params->explicit_connect) 5358 params->conn = hci_conn_get(conn); 5359 5360 return conn; 5361 } 5362 5363 switch (PTR_ERR(conn)) { 5364 case -EBUSY: 5365 /* If hci_connect() returns -EBUSY it means there is already 5366 * an LE connection attempt going on. Since controllers don't 5367 * support more than one connection attempt at the time, we 5368 * don't consider this an error case. 5369 */ 5370 break; 5371 default: 5372 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 5373 return NULL; 5374 } 5375 5376 return NULL; 5377 } 5378 5379 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 5380 u8 bdaddr_type, bdaddr_t *direct_addr, 5381 u8 direct_addr_type, s8 rssi, u8 *data, u8 len) 5382 { 5383 struct discovery_state *d = &hdev->discovery; 5384 struct smp_irk *irk; 5385 struct hci_conn *conn; 5386 bool match; 5387 u32 flags; 5388 u8 *ptr, real_len; 5389 5390 switch (type) { 5391 case LE_ADV_IND: 5392 case LE_ADV_DIRECT_IND: 5393 case LE_ADV_SCAN_IND: 5394 case LE_ADV_NONCONN_IND: 5395 case LE_ADV_SCAN_RSP: 5396 break; 5397 default: 5398 bt_dev_err_ratelimited(hdev, "unknown advertising packet " 5399 "type: 0x%02x", type); 5400 return; 5401 } 5402 5403 /* Find the end of the data in case the report contains padded zero 5404 * bytes at the end causing an invalid length value. 5405 * 5406 * When data is NULL, len is 0 so there is no need for extra ptr 5407 * check as 'ptr < data + 0' is already false in such case. 5408 */ 5409 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { 5410 if (ptr + 1 + *ptr > data + len) 5411 break; 5412 } 5413 5414 real_len = ptr - data; 5415 5416 /* Adjust for actual length */ 5417 if (len != real_len) { 5418 bt_dev_err_ratelimited(hdev, "advertising data len corrected %u -> %u", 5419 len, real_len); 5420 len = real_len; 5421 } 5422 5423 /* If the direct address is present, then this report is from 5424 * a LE Direct Advertising Report event. In that case it is 5425 * important to see if the address is matching the local 5426 * controller address. 5427 */ 5428 if (direct_addr) { 5429 /* Only resolvable random addresses are valid for these 5430 * kind of reports and others can be ignored. 5431 */ 5432 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 5433 return; 5434 5435 /* If the controller is not using resolvable random 5436 * addresses, then this report can be ignored. 5437 */ 5438 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 5439 return; 5440 5441 /* If the local IRK of the controller does not match 5442 * with the resolvable random address provided, then 5443 * this report can be ignored. 5444 */ 5445 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 5446 return; 5447 } 5448 5449 /* Check if we need to convert to identity address */ 5450 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 5451 if (irk) { 5452 bdaddr = &irk->bdaddr; 5453 bdaddr_type = irk->addr_type; 5454 } 5455 5456 /* Check if we have been requested to connect to this device. 5457 * 5458 * direct_addr is set only for directed advertising reports (it is NULL 5459 * for advertising reports) and is already verified to be RPA above. 5460 */ 5461 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, 5462 direct_addr); 5463 if (conn && type == LE_ADV_IND) { 5464 /* Store report for later inclusion by 5465 * mgmt_device_connected 5466 */ 5467 memcpy(conn->le_adv_data, data, len); 5468 conn->le_adv_data_len = len; 5469 } 5470 5471 /* Passive scanning shouldn't trigger any device found events, 5472 * except for devices marked as CONN_REPORT for which we do send 5473 * device found events, or advertisement monitoring requested. 5474 */ 5475 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 5476 if (type == LE_ADV_DIRECT_IND) 5477 return; 5478 5479 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 5480 bdaddr, bdaddr_type) && 5481 idr_is_empty(&hdev->adv_monitors_idr)) 5482 return; 5483 5484 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 5485 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5486 else 5487 flags = 0; 5488 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5489 rssi, flags, data, len, NULL, 0); 5490 return; 5491 } 5492 5493 /* When receiving non-connectable or scannable undirected 5494 * advertising reports, this means that the remote device is 5495 * not connectable and then clearly indicate this in the 5496 * device found event. 5497 * 5498 * When receiving a scan response, then there is no way to 5499 * know if the remote device is connectable or not. However 5500 * since scan responses are merged with a previously seen 5501 * advertising report, the flags field from that report 5502 * will be used. 5503 * 5504 * In the really unlikely case that a controller get confused 5505 * and just sends a scan response event, then it is marked as 5506 * not connectable as well. 5507 */ 5508 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 5509 type == LE_ADV_SCAN_RSP) 5510 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 5511 else 5512 flags = 0; 5513 5514 /* If there's nothing pending either store the data from this 5515 * event or send an immediate device found event if the data 5516 * should not be stored for later. 5517 */ 5518 if (!has_pending_adv_report(hdev)) { 5519 /* If the report will trigger a SCAN_REQ store it for 5520 * later merging. 5521 */ 5522 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 5523 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5524 rssi, flags, data, len); 5525 return; 5526 } 5527 5528 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5529 rssi, flags, data, len, NULL, 0); 5530 return; 5531 } 5532 5533 /* Check if the pending report is for the same device as the new one */ 5534 match = (!bacmp(bdaddr, &d->last_adv_addr) && 5535 bdaddr_type == d->last_adv_addr_type); 5536 5537 /* If the pending data doesn't match this report or this isn't a 5538 * scan response (e.g. we got a duplicate ADV_IND) then force 5539 * sending of the pending data. 5540 */ 5541 if (type != LE_ADV_SCAN_RSP || !match) { 5542 /* Send out whatever is in the cache, but skip duplicates */ 5543 if (!match) 5544 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5545 d->last_adv_addr_type, NULL, 5546 d->last_adv_rssi, d->last_adv_flags, 5547 d->last_adv_data, 5548 d->last_adv_data_len, NULL, 0); 5549 5550 /* If the new report will trigger a SCAN_REQ store it for 5551 * later merging. 5552 */ 5553 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 5554 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 5555 rssi, flags, data, len); 5556 return; 5557 } 5558 5559 /* The advertising reports cannot be merged, so clear 5560 * the pending report and send out a device found event. 5561 */ 5562 clear_pending_adv_report(hdev); 5563 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 5564 rssi, flags, data, len, NULL, 0); 5565 return; 5566 } 5567 5568 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 5569 * the new event is a SCAN_RSP. We can therefore proceed with 5570 * sending a merged device found event. 5571 */ 5572 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 5573 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 5574 d->last_adv_data, d->last_adv_data_len, data, len); 5575 clear_pending_adv_report(hdev); 5576 } 5577 5578 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5579 { 5580 u8 num_reports = skb->data[0]; 5581 void *ptr = &skb->data[1]; 5582 5583 hci_dev_lock(hdev); 5584 5585 while (num_reports--) { 5586 struct hci_ev_le_advertising_info *ev = ptr; 5587 s8 rssi; 5588 5589 if (ev->length <= HCI_MAX_AD_LENGTH) { 5590 rssi = ev->data[ev->length]; 5591 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 5592 ev->bdaddr_type, NULL, 0, rssi, 5593 ev->data, ev->length); 5594 } else { 5595 bt_dev_err(hdev, "Dropping invalid advertising data"); 5596 } 5597 5598 ptr += sizeof(*ev) + ev->length + 1; 5599 } 5600 5601 hci_dev_unlock(hdev); 5602 } 5603 5604 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) 5605 { 5606 if (evt_type & LE_EXT_ADV_LEGACY_PDU) { 5607 switch (evt_type) { 5608 case LE_LEGACY_ADV_IND: 5609 return LE_ADV_IND; 5610 case LE_LEGACY_ADV_DIRECT_IND: 5611 return LE_ADV_DIRECT_IND; 5612 case LE_LEGACY_ADV_SCAN_IND: 5613 return LE_ADV_SCAN_IND; 5614 case LE_LEGACY_NONCONN_IND: 5615 return LE_ADV_NONCONN_IND; 5616 case LE_LEGACY_SCAN_RSP_ADV: 5617 case LE_LEGACY_SCAN_RSP_ADV_SCAN: 5618 return LE_ADV_SCAN_RSP; 5619 } 5620 5621 goto invalid; 5622 } 5623 5624 if (evt_type & LE_EXT_ADV_CONN_IND) { 5625 if (evt_type & LE_EXT_ADV_DIRECT_IND) 5626 return LE_ADV_DIRECT_IND; 5627 5628 return LE_ADV_IND; 5629 } 5630 5631 if (evt_type & LE_EXT_ADV_SCAN_RSP) 5632 return LE_ADV_SCAN_RSP; 5633 5634 if (evt_type & LE_EXT_ADV_SCAN_IND) 5635 return LE_ADV_SCAN_IND; 5636 5637 if (evt_type == LE_EXT_ADV_NON_CONN_IND || 5638 evt_type & LE_EXT_ADV_DIRECT_IND) 5639 return LE_ADV_NONCONN_IND; 5640 5641 invalid: 5642 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", 5643 evt_type); 5644 5645 return LE_ADV_INVALID; 5646 } 5647 5648 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 5649 { 5650 u8 num_reports = skb->data[0]; 5651 void *ptr = &skb->data[1]; 5652 5653 hci_dev_lock(hdev); 5654 5655 while (num_reports--) { 5656 struct hci_ev_le_ext_adv_report *ev = ptr; 5657 u8 legacy_evt_type; 5658 u16 evt_type; 5659 5660 evt_type = __le16_to_cpu(ev->evt_type); 5661 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); 5662 if (legacy_evt_type != LE_ADV_INVALID) { 5663 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, 5664 ev->bdaddr_type, NULL, 0, ev->rssi, 5665 ev->data, ev->length); 5666 } 5667 5668 ptr += sizeof(*ev) + ev->length; 5669 } 5670 5671 hci_dev_unlock(hdev); 5672 } 5673 5674 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, 5675 struct sk_buff *skb) 5676 { 5677 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data; 5678 struct hci_conn *conn; 5679 5680 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5681 5682 hci_dev_lock(hdev); 5683 5684 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5685 if (conn) { 5686 if (!ev->status) 5687 memcpy(conn->features[0], ev->features, 8); 5688 5689 if (conn->state == BT_CONFIG) { 5690 __u8 status; 5691 5692 /* If the local controller supports slave-initiated 5693 * features exchange, but the remote controller does 5694 * not, then it is possible that the error code 0x1a 5695 * for unsupported remote feature gets returned. 5696 * 5697 * In this specific case, allow the connection to 5698 * transition into connected state and mark it as 5699 * successful. 5700 */ 5701 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) && 5702 !conn->out && ev->status == 0x1a) 5703 status = 0x00; 5704 else 5705 status = ev->status; 5706 5707 conn->state = BT_CONNECTED; 5708 hci_connect_cfm(conn, status); 5709 hci_conn_drop(conn); 5710 } 5711 } 5712 5713 hci_dev_unlock(hdev); 5714 } 5715 5716 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 5717 { 5718 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 5719 struct hci_cp_le_ltk_reply cp; 5720 struct hci_cp_le_ltk_neg_reply neg; 5721 struct hci_conn *conn; 5722 struct smp_ltk *ltk; 5723 5724 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 5725 5726 hci_dev_lock(hdev); 5727 5728 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5729 if (conn == NULL) 5730 goto not_found; 5731 5732 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 5733 if (!ltk) 5734 goto not_found; 5735 5736 if (smp_ltk_is_sc(ltk)) { 5737 /* With SC both EDiv and Rand are set to zero */ 5738 if (ev->ediv || ev->rand) 5739 goto not_found; 5740 } else { 5741 /* For non-SC keys check that EDiv and Rand match */ 5742 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 5743 goto not_found; 5744 } 5745 5746 memcpy(cp.ltk, ltk->val, ltk->enc_size); 5747 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 5748 cp.handle = cpu_to_le16(conn->handle); 5749 5750 conn->pending_sec_level = smp_ltk_sec_level(ltk); 5751 5752 conn->enc_key_size = ltk->enc_size; 5753 5754 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 5755 5756 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 5757 * temporary key used to encrypt a connection following 5758 * pairing. It is used during the Encrypted Session Setup to 5759 * distribute the keys. Later, security can be re-established 5760 * using a distributed LTK. 5761 */ 5762 if (ltk->type == SMP_STK) { 5763 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 5764 list_del_rcu(<k->list); 5765 kfree_rcu(ltk, rcu); 5766 } else { 5767 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 5768 } 5769 5770 hci_dev_unlock(hdev); 5771 5772 return; 5773 5774 not_found: 5775 neg.handle = ev->handle; 5776 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 5777 hci_dev_unlock(hdev); 5778 } 5779 5780 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 5781 u8 reason) 5782 { 5783 struct hci_cp_le_conn_param_req_neg_reply cp; 5784 5785 cp.handle = cpu_to_le16(handle); 5786 cp.reason = reason; 5787 5788 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 5789 &cp); 5790 } 5791 5792 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, 5793 struct sk_buff *skb) 5794 { 5795 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data; 5796 struct hci_cp_le_conn_param_req_reply cp; 5797 struct hci_conn *hcon; 5798 u16 handle, min, max, latency, timeout; 5799 5800 handle = le16_to_cpu(ev->handle); 5801 min = le16_to_cpu(ev->interval_min); 5802 max = le16_to_cpu(ev->interval_max); 5803 latency = le16_to_cpu(ev->latency); 5804 timeout = le16_to_cpu(ev->timeout); 5805 5806 hcon = hci_conn_hash_lookup_handle(hdev, handle); 5807 if (!hcon || hcon->state != BT_CONNECTED) 5808 return send_conn_param_neg_reply(hdev, handle, 5809 HCI_ERROR_UNKNOWN_CONN_ID); 5810 5811 if (hci_check_conn_params(min, max, latency, timeout)) 5812 return send_conn_param_neg_reply(hdev, handle, 5813 HCI_ERROR_INVALID_LL_PARAMS); 5814 5815 if (hcon->role == HCI_ROLE_MASTER) { 5816 struct hci_conn_params *params; 5817 u8 store_hint; 5818 5819 hci_dev_lock(hdev); 5820 5821 params = hci_conn_params_lookup(hdev, &hcon->dst, 5822 hcon->dst_type); 5823 if (params) { 5824 params->conn_min_interval = min; 5825 params->conn_max_interval = max; 5826 params->conn_latency = latency; 5827 params->supervision_timeout = timeout; 5828 store_hint = 0x01; 5829 } else{ 5830 store_hint = 0x00; 5831 } 5832 5833 hci_dev_unlock(hdev); 5834 5835 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 5836 store_hint, min, max, latency, timeout); 5837 } 5838 5839 cp.handle = ev->handle; 5840 cp.interval_min = ev->interval_min; 5841 cp.interval_max = ev->interval_max; 5842 cp.latency = ev->latency; 5843 cp.timeout = ev->timeout; 5844 cp.min_ce_len = 0; 5845 cp.max_ce_len = 0; 5846 5847 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 5848 } 5849 5850 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, 5851 struct sk_buff *skb) 5852 { 5853 u8 num_reports = skb->data[0]; 5854 void *ptr = &skb->data[1]; 5855 5856 hci_dev_lock(hdev); 5857 5858 while (num_reports--) { 5859 struct hci_ev_le_direct_adv_info *ev = ptr; 5860 5861 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 5862 ev->bdaddr_type, &ev->direct_addr, 5863 ev->direct_addr_type, ev->rssi, NULL, 0); 5864 5865 ptr += sizeof(*ev); 5866 } 5867 5868 hci_dev_unlock(hdev); 5869 } 5870 5871 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb) 5872 { 5873 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data; 5874 struct hci_conn *conn; 5875 5876 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 5877 5878 if (!ev->status) 5879 return; 5880 5881 hci_dev_lock(hdev); 5882 5883 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5884 if (!conn) 5885 goto unlock; 5886 5887 conn->le_tx_phy = ev->tx_phy; 5888 conn->le_rx_phy = ev->rx_phy; 5889 5890 unlock: 5891 hci_dev_unlock(hdev); 5892 } 5893 5894 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 5895 { 5896 struct hci_ev_le_meta *le_ev = (void *) skb->data; 5897 5898 skb_pull(skb, sizeof(*le_ev)); 5899 5900 switch (le_ev->subevent) { 5901 case HCI_EV_LE_CONN_COMPLETE: 5902 hci_le_conn_complete_evt(hdev, skb); 5903 break; 5904 5905 case HCI_EV_LE_CONN_UPDATE_COMPLETE: 5906 hci_le_conn_update_complete_evt(hdev, skb); 5907 break; 5908 5909 case HCI_EV_LE_ADVERTISING_REPORT: 5910 hci_le_adv_report_evt(hdev, skb); 5911 break; 5912 5913 case HCI_EV_LE_REMOTE_FEAT_COMPLETE: 5914 hci_le_remote_feat_complete_evt(hdev, skb); 5915 break; 5916 5917 case HCI_EV_LE_LTK_REQ: 5918 hci_le_ltk_request_evt(hdev, skb); 5919 break; 5920 5921 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ: 5922 hci_le_remote_conn_param_req_evt(hdev, skb); 5923 break; 5924 5925 case HCI_EV_LE_DIRECT_ADV_REPORT: 5926 hci_le_direct_adv_report_evt(hdev, skb); 5927 break; 5928 5929 case HCI_EV_LE_PHY_UPDATE_COMPLETE: 5930 hci_le_phy_update_evt(hdev, skb); 5931 break; 5932 5933 case HCI_EV_LE_EXT_ADV_REPORT: 5934 hci_le_ext_adv_report_evt(hdev, skb); 5935 break; 5936 5937 case HCI_EV_LE_ENHANCED_CONN_COMPLETE: 5938 hci_le_enh_conn_complete_evt(hdev, skb); 5939 break; 5940 5941 case HCI_EV_LE_EXT_ADV_SET_TERM: 5942 hci_le_ext_adv_term_evt(hdev, skb); 5943 break; 5944 5945 default: 5946 break; 5947 } 5948 } 5949 5950 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 5951 u8 event, struct sk_buff *skb) 5952 { 5953 struct hci_ev_cmd_complete *ev; 5954 struct hci_event_hdr *hdr; 5955 5956 if (!skb) 5957 return false; 5958 5959 if (skb->len < sizeof(*hdr)) { 5960 bt_dev_err(hdev, "too short HCI event"); 5961 return false; 5962 } 5963 5964 hdr = (void *) skb->data; 5965 skb_pull(skb, HCI_EVENT_HDR_SIZE); 5966 5967 if (event) { 5968 if (hdr->evt != event) 5969 return false; 5970 return true; 5971 } 5972 5973 /* Check if request ended in Command Status - no way to retreive 5974 * any extra parameters in this case. 5975 */ 5976 if (hdr->evt == HCI_EV_CMD_STATUS) 5977 return false; 5978 5979 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 5980 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", 5981 hdr->evt); 5982 return false; 5983 } 5984 5985 if (skb->len < sizeof(*ev)) { 5986 bt_dev_err(hdev, "too short cmd_complete event"); 5987 return false; 5988 } 5989 5990 ev = (void *) skb->data; 5991 skb_pull(skb, sizeof(*ev)); 5992 5993 if (opcode != __le16_to_cpu(ev->opcode)) { 5994 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 5995 __le16_to_cpu(ev->opcode)); 5996 return false; 5997 } 5998 5999 return true; 6000 } 6001 6002 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 6003 { 6004 struct hci_event_hdr *hdr = (void *) skb->data; 6005 hci_req_complete_t req_complete = NULL; 6006 hci_req_complete_skb_t req_complete_skb = NULL; 6007 struct sk_buff *orig_skb = NULL; 6008 u8 status = 0, event = hdr->evt, req_evt = 0; 6009 u16 opcode = HCI_OP_NOP; 6010 6011 if (!event) { 6012 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); 6013 goto done; 6014 } 6015 6016 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { 6017 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 6018 opcode = __le16_to_cpu(cmd_hdr->opcode); 6019 hci_req_cmd_complete(hdev, opcode, status, &req_complete, 6020 &req_complete_skb); 6021 req_evt = event; 6022 } 6023 6024 /* If it looks like we might end up having to call 6025 * req_complete_skb, store a pristine copy of the skb since the 6026 * various handlers may modify the original one through 6027 * skb_pull() calls, etc. 6028 */ 6029 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 6030 event == HCI_EV_CMD_COMPLETE) 6031 orig_skb = skb_clone(skb, GFP_KERNEL); 6032 6033 skb_pull(skb, HCI_EVENT_HDR_SIZE); 6034 6035 switch (event) { 6036 case HCI_EV_INQUIRY_COMPLETE: 6037 hci_inquiry_complete_evt(hdev, skb); 6038 break; 6039 6040 case HCI_EV_INQUIRY_RESULT: 6041 hci_inquiry_result_evt(hdev, skb); 6042 break; 6043 6044 case HCI_EV_CONN_COMPLETE: 6045 hci_conn_complete_evt(hdev, skb); 6046 break; 6047 6048 case HCI_EV_CONN_REQUEST: 6049 hci_conn_request_evt(hdev, skb); 6050 break; 6051 6052 case HCI_EV_DISCONN_COMPLETE: 6053 hci_disconn_complete_evt(hdev, skb); 6054 break; 6055 6056 case HCI_EV_AUTH_COMPLETE: 6057 hci_auth_complete_evt(hdev, skb); 6058 break; 6059 6060 case HCI_EV_REMOTE_NAME: 6061 hci_remote_name_evt(hdev, skb); 6062 break; 6063 6064 case HCI_EV_ENCRYPT_CHANGE: 6065 hci_encrypt_change_evt(hdev, skb); 6066 break; 6067 6068 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 6069 hci_change_link_key_complete_evt(hdev, skb); 6070 break; 6071 6072 case HCI_EV_REMOTE_FEATURES: 6073 hci_remote_features_evt(hdev, skb); 6074 break; 6075 6076 case HCI_EV_CMD_COMPLETE: 6077 hci_cmd_complete_evt(hdev, skb, &opcode, &status, 6078 &req_complete, &req_complete_skb); 6079 break; 6080 6081 case HCI_EV_CMD_STATUS: 6082 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete, 6083 &req_complete_skb); 6084 break; 6085 6086 case HCI_EV_HARDWARE_ERROR: 6087 hci_hardware_error_evt(hdev, skb); 6088 break; 6089 6090 case HCI_EV_ROLE_CHANGE: 6091 hci_role_change_evt(hdev, skb); 6092 break; 6093 6094 case HCI_EV_NUM_COMP_PKTS: 6095 hci_num_comp_pkts_evt(hdev, skb); 6096 break; 6097 6098 case HCI_EV_MODE_CHANGE: 6099 hci_mode_change_evt(hdev, skb); 6100 break; 6101 6102 case HCI_EV_PIN_CODE_REQ: 6103 hci_pin_code_request_evt(hdev, skb); 6104 break; 6105 6106 case HCI_EV_LINK_KEY_REQ: 6107 hci_link_key_request_evt(hdev, skb); 6108 break; 6109 6110 case HCI_EV_LINK_KEY_NOTIFY: 6111 hci_link_key_notify_evt(hdev, skb); 6112 break; 6113 6114 case HCI_EV_CLOCK_OFFSET: 6115 hci_clock_offset_evt(hdev, skb); 6116 break; 6117 6118 case HCI_EV_PKT_TYPE_CHANGE: 6119 hci_pkt_type_change_evt(hdev, skb); 6120 break; 6121 6122 case HCI_EV_PSCAN_REP_MODE: 6123 hci_pscan_rep_mode_evt(hdev, skb); 6124 break; 6125 6126 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 6127 hci_inquiry_result_with_rssi_evt(hdev, skb); 6128 break; 6129 6130 case HCI_EV_REMOTE_EXT_FEATURES: 6131 hci_remote_ext_features_evt(hdev, skb); 6132 break; 6133 6134 case HCI_EV_SYNC_CONN_COMPLETE: 6135 hci_sync_conn_complete_evt(hdev, skb); 6136 break; 6137 6138 case HCI_EV_EXTENDED_INQUIRY_RESULT: 6139 hci_extended_inquiry_result_evt(hdev, skb); 6140 break; 6141 6142 case HCI_EV_KEY_REFRESH_COMPLETE: 6143 hci_key_refresh_complete_evt(hdev, skb); 6144 break; 6145 6146 case HCI_EV_IO_CAPA_REQUEST: 6147 hci_io_capa_request_evt(hdev, skb); 6148 break; 6149 6150 case HCI_EV_IO_CAPA_REPLY: 6151 hci_io_capa_reply_evt(hdev, skb); 6152 break; 6153 6154 case HCI_EV_USER_CONFIRM_REQUEST: 6155 hci_user_confirm_request_evt(hdev, skb); 6156 break; 6157 6158 case HCI_EV_USER_PASSKEY_REQUEST: 6159 hci_user_passkey_request_evt(hdev, skb); 6160 break; 6161 6162 case HCI_EV_USER_PASSKEY_NOTIFY: 6163 hci_user_passkey_notify_evt(hdev, skb); 6164 break; 6165 6166 case HCI_EV_KEYPRESS_NOTIFY: 6167 hci_keypress_notify_evt(hdev, skb); 6168 break; 6169 6170 case HCI_EV_SIMPLE_PAIR_COMPLETE: 6171 hci_simple_pair_complete_evt(hdev, skb); 6172 break; 6173 6174 case HCI_EV_REMOTE_HOST_FEATURES: 6175 hci_remote_host_features_evt(hdev, skb); 6176 break; 6177 6178 case HCI_EV_LE_META: 6179 hci_le_meta_evt(hdev, skb); 6180 break; 6181 6182 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 6183 hci_remote_oob_data_request_evt(hdev, skb); 6184 break; 6185 6186 #if IS_ENABLED(CONFIG_BT_HS) 6187 case HCI_EV_CHANNEL_SELECTED: 6188 hci_chan_selected_evt(hdev, skb); 6189 break; 6190 6191 case HCI_EV_PHY_LINK_COMPLETE: 6192 hci_phy_link_complete_evt(hdev, skb); 6193 break; 6194 6195 case HCI_EV_LOGICAL_LINK_COMPLETE: 6196 hci_loglink_complete_evt(hdev, skb); 6197 break; 6198 6199 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 6200 hci_disconn_loglink_complete_evt(hdev, skb); 6201 break; 6202 6203 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 6204 hci_disconn_phylink_complete_evt(hdev, skb); 6205 break; 6206 #endif 6207 6208 case HCI_EV_NUM_COMP_BLOCKS: 6209 hci_num_comp_blocks_evt(hdev, skb); 6210 break; 6211 6212 case HCI_EV_VENDOR: 6213 msft_vendor_evt(hdev, skb); 6214 break; 6215 6216 default: 6217 BT_DBG("%s event 0x%2.2x", hdev->name, event); 6218 break; 6219 } 6220 6221 if (req_complete) { 6222 req_complete(hdev, status, opcode); 6223 } else if (req_complete_skb) { 6224 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 6225 kfree_skb(orig_skb); 6226 orig_skb = NULL; 6227 } 6228 req_complete_skb(hdev, status, opcode, orig_skb); 6229 } 6230 6231 done: 6232 kfree_skb(orig_skb); 6233 kfree_skb(skb); 6234 hdev->stat.evt_rx++; 6235 } 6236