1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 33 #include "hci_request.h" 34 #include "hci_debugfs.h" 35 #include "a2mp.h" 36 #include "amp.h" 37 #include "smp.h" 38 39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ 40 "\x00\x00\x00\x00\x00\x00\x00\x00" 41 42 /* Handle HCI Event packets */ 43 44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 45 { 46 __u8 status = *((__u8 *) skb->data); 47 48 BT_DBG("%s status 0x%2.2x", hdev->name, status); 49 50 if (status) 51 return; 52 53 clear_bit(HCI_INQUIRY, &hdev->flags); 54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 55 wake_up_bit(&hdev->flags, HCI_INQUIRY); 56 57 hci_dev_lock(hdev); 58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 59 hci_dev_unlock(hdev); 60 61 hci_conn_check_pending(hdev); 62 } 63 64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 65 { 66 __u8 status = *((__u8 *) skb->data); 67 68 BT_DBG("%s status 0x%2.2x", hdev->name, status); 69 70 if (status) 71 return; 72 73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); 74 } 75 76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 77 { 78 __u8 status = *((__u8 *) skb->data); 79 80 BT_DBG("%s status 0x%2.2x", hdev->name, status); 81 82 if (status) 83 return; 84 85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); 86 87 hci_conn_check_pending(hdev); 88 } 89 90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 91 struct sk_buff *skb) 92 { 93 BT_DBG("%s", hdev->name); 94 } 95 96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 97 { 98 struct hci_rp_role_discovery *rp = (void *) skb->data; 99 struct hci_conn *conn; 100 101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 102 103 if (rp->status) 104 return; 105 106 hci_dev_lock(hdev); 107 108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 109 if (conn) 110 conn->role = rp->role; 111 112 hci_dev_unlock(hdev); 113 } 114 115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 116 { 117 struct hci_rp_read_link_policy *rp = (void *) skb->data; 118 struct hci_conn *conn; 119 120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 121 122 if (rp->status) 123 return; 124 125 hci_dev_lock(hdev); 126 127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 128 if (conn) 129 conn->link_policy = __le16_to_cpu(rp->policy); 130 131 hci_dev_unlock(hdev); 132 } 133 134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 135 { 136 struct hci_rp_write_link_policy *rp = (void *) skb->data; 137 struct hci_conn *conn; 138 void *sent; 139 140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 141 142 if (rp->status) 143 return; 144 145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 146 if (!sent) 147 return; 148 149 hci_dev_lock(hdev); 150 151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 152 if (conn) 153 conn->link_policy = get_unaligned_le16(sent + 2); 154 155 hci_dev_unlock(hdev); 156 } 157 158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 159 struct sk_buff *skb) 160 { 161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 162 163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 164 165 if (rp->status) 166 return; 167 168 hdev->link_policy = __le16_to_cpu(rp->policy); 169 } 170 171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 172 struct sk_buff *skb) 173 { 174 __u8 status = *((__u8 *) skb->data); 175 void *sent; 176 177 BT_DBG("%s status 0x%2.2x", hdev->name, status); 178 179 if (status) 180 return; 181 182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 183 if (!sent) 184 return; 185 186 hdev->link_policy = get_unaligned_le16(sent); 187 } 188 189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 190 { 191 __u8 status = *((__u8 *) skb->data); 192 193 BT_DBG("%s status 0x%2.2x", hdev->name, status); 194 195 clear_bit(HCI_RESET, &hdev->flags); 196 197 if (status) 198 return; 199 200 /* Reset all non-persistent flags */ 201 hci_dev_clear_volatile_flags(hdev); 202 203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 204 205 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 206 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 207 208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 209 hdev->adv_data_len = 0; 210 211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); 212 hdev->scan_rsp_data_len = 0; 213 214 hdev->le_scan_type = LE_SCAN_PASSIVE; 215 216 hdev->ssp_debug_mode = 0; 217 218 hci_bdaddr_list_clear(&hdev->le_white_list); 219 } 220 221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev, 222 struct sk_buff *skb) 223 { 224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data; 225 struct hci_cp_read_stored_link_key *sent; 226 227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 228 229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); 230 if (!sent) 231 return; 232 233 if (!rp->status && sent->read_all == 0x01) { 234 hdev->stored_max_keys = rp->max_keys; 235 hdev->stored_num_keys = rp->num_keys; 236 } 237 } 238 239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 240 struct sk_buff *skb) 241 { 242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data; 243 244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 245 246 if (rp->status) 247 return; 248 249 if (rp->num_keys <= hdev->stored_num_keys) 250 hdev->stored_num_keys -= rp->num_keys; 251 else 252 hdev->stored_num_keys = 0; 253 } 254 255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 256 { 257 __u8 status = *((__u8 *) skb->data); 258 void *sent; 259 260 BT_DBG("%s status 0x%2.2x", hdev->name, status); 261 262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 263 if (!sent) 264 return; 265 266 hci_dev_lock(hdev); 267 268 if (hci_dev_test_flag(hdev, HCI_MGMT)) 269 mgmt_set_local_name_complete(hdev, sent, status); 270 else if (!status) 271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 272 273 hci_dev_unlock(hdev); 274 } 275 276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 277 { 278 struct hci_rp_read_local_name *rp = (void *) skb->data; 279 280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 281 282 if (rp->status) 283 return; 284 285 if (hci_dev_test_flag(hdev, HCI_SETUP) || 286 hci_dev_test_flag(hdev, HCI_CONFIG)) 287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 288 } 289 290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 291 { 292 __u8 status = *((__u8 *) skb->data); 293 void *sent; 294 295 BT_DBG("%s status 0x%2.2x", hdev->name, status); 296 297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 298 if (!sent) 299 return; 300 301 hci_dev_lock(hdev); 302 303 if (!status) { 304 __u8 param = *((__u8 *) sent); 305 306 if (param == AUTH_ENABLED) 307 set_bit(HCI_AUTH, &hdev->flags); 308 else 309 clear_bit(HCI_AUTH, &hdev->flags); 310 } 311 312 if (hci_dev_test_flag(hdev, HCI_MGMT)) 313 mgmt_auth_enable_complete(hdev, status); 314 315 hci_dev_unlock(hdev); 316 } 317 318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 319 { 320 __u8 status = *((__u8 *) skb->data); 321 __u8 param; 322 void *sent; 323 324 BT_DBG("%s status 0x%2.2x", hdev->name, status); 325 326 if (status) 327 return; 328 329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 330 if (!sent) 331 return; 332 333 param = *((__u8 *) sent); 334 335 if (param) 336 set_bit(HCI_ENCRYPT, &hdev->flags); 337 else 338 clear_bit(HCI_ENCRYPT, &hdev->flags); 339 } 340 341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 342 { 343 __u8 status = *((__u8 *) skb->data); 344 __u8 param; 345 void *sent; 346 347 BT_DBG("%s status 0x%2.2x", hdev->name, status); 348 349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 350 if (!sent) 351 return; 352 353 param = *((__u8 *) sent); 354 355 hci_dev_lock(hdev); 356 357 if (status) { 358 hdev->discov_timeout = 0; 359 goto done; 360 } 361 362 if (param & SCAN_INQUIRY) 363 set_bit(HCI_ISCAN, &hdev->flags); 364 else 365 clear_bit(HCI_ISCAN, &hdev->flags); 366 367 if (param & SCAN_PAGE) 368 set_bit(HCI_PSCAN, &hdev->flags); 369 else 370 clear_bit(HCI_PSCAN, &hdev->flags); 371 372 done: 373 hci_dev_unlock(hdev); 374 } 375 376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 377 { 378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 379 380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 381 382 if (rp->status) 383 return; 384 385 memcpy(hdev->dev_class, rp->dev_class, 3); 386 387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 389 } 390 391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 392 { 393 __u8 status = *((__u8 *) skb->data); 394 void *sent; 395 396 BT_DBG("%s status 0x%2.2x", hdev->name, status); 397 398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 399 if (!sent) 400 return; 401 402 hci_dev_lock(hdev); 403 404 if (status == 0) 405 memcpy(hdev->dev_class, sent, 3); 406 407 if (hci_dev_test_flag(hdev, HCI_MGMT)) 408 mgmt_set_class_of_dev_complete(hdev, sent, status); 409 410 hci_dev_unlock(hdev); 411 } 412 413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 414 { 415 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 416 __u16 setting; 417 418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 419 420 if (rp->status) 421 return; 422 423 setting = __le16_to_cpu(rp->voice_setting); 424 425 if (hdev->voice_setting == setting) 426 return; 427 428 hdev->voice_setting = setting; 429 430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 431 432 if (hdev->notify) 433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 434 } 435 436 static void hci_cc_write_voice_setting(struct hci_dev *hdev, 437 struct sk_buff *skb) 438 { 439 __u8 status = *((__u8 *) skb->data); 440 __u16 setting; 441 void *sent; 442 443 BT_DBG("%s status 0x%2.2x", hdev->name, status); 444 445 if (status) 446 return; 447 448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 449 if (!sent) 450 return; 451 452 setting = get_unaligned_le16(sent); 453 454 if (hdev->voice_setting == setting) 455 return; 456 457 hdev->voice_setting = setting; 458 459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 460 461 if (hdev->notify) 462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 463 } 464 465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev, 466 struct sk_buff *skb) 467 { 468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data; 469 470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 471 472 if (rp->status) 473 return; 474 475 hdev->num_iac = rp->num_iac; 476 477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac); 478 } 479 480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 481 { 482 __u8 status = *((__u8 *) skb->data); 483 struct hci_cp_write_ssp_mode *sent; 484 485 BT_DBG("%s status 0x%2.2x", hdev->name, status); 486 487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 488 if (!sent) 489 return; 490 491 hci_dev_lock(hdev); 492 493 if (!status) { 494 if (sent->mode) 495 hdev->features[1][0] |= LMP_HOST_SSP; 496 else 497 hdev->features[1][0] &= ~LMP_HOST_SSP; 498 } 499 500 if (hci_dev_test_flag(hdev, HCI_MGMT)) 501 mgmt_ssp_enable_complete(hdev, sent->mode, status); 502 else if (!status) { 503 if (sent->mode) 504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED); 505 else 506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); 507 } 508 509 hci_dev_unlock(hdev); 510 } 511 512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb) 513 { 514 u8 status = *((u8 *) skb->data); 515 struct hci_cp_write_sc_support *sent; 516 517 BT_DBG("%s status 0x%2.2x", hdev->name, status); 518 519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); 520 if (!sent) 521 return; 522 523 hci_dev_lock(hdev); 524 525 if (!status) { 526 if (sent->support) 527 hdev->features[1][0] |= LMP_HOST_SC; 528 else 529 hdev->features[1][0] &= ~LMP_HOST_SC; 530 } 531 532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) { 533 if (sent->support) 534 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 535 else 536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 537 } 538 539 hci_dev_unlock(hdev); 540 } 541 542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 543 { 544 struct hci_rp_read_local_version *rp = (void *) skb->data; 545 546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 547 548 if (rp->status) 549 return; 550 551 if (hci_dev_test_flag(hdev, HCI_SETUP) || 552 hci_dev_test_flag(hdev, HCI_CONFIG)) { 553 hdev->hci_ver = rp->hci_ver; 554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 555 hdev->lmp_ver = rp->lmp_ver; 556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 558 } 559 } 560 561 static void hci_cc_read_local_commands(struct hci_dev *hdev, 562 struct sk_buff *skb) 563 { 564 struct hci_rp_read_local_commands *rp = (void *) skb->data; 565 566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 567 568 if (rp->status) 569 return; 570 571 if (hci_dev_test_flag(hdev, HCI_SETUP) || 572 hci_dev_test_flag(hdev, HCI_CONFIG)) 573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 574 } 575 576 static void hci_cc_read_local_features(struct hci_dev *hdev, 577 struct sk_buff *skb) 578 { 579 struct hci_rp_read_local_features *rp = (void *) skb->data; 580 581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 582 583 if (rp->status) 584 return; 585 586 memcpy(hdev->features, rp->features, 8); 587 588 /* Adjust default settings according to features 589 * supported by device. */ 590 591 if (hdev->features[0][0] & LMP_3SLOT) 592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 593 594 if (hdev->features[0][0] & LMP_5SLOT) 595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 596 597 if (hdev->features[0][1] & LMP_HV2) { 598 hdev->pkt_type |= (HCI_HV2); 599 hdev->esco_type |= (ESCO_HV2); 600 } 601 602 if (hdev->features[0][1] & LMP_HV3) { 603 hdev->pkt_type |= (HCI_HV3); 604 hdev->esco_type |= (ESCO_HV3); 605 } 606 607 if (lmp_esco_capable(hdev)) 608 hdev->esco_type |= (ESCO_EV3); 609 610 if (hdev->features[0][4] & LMP_EV4) 611 hdev->esco_type |= (ESCO_EV4); 612 613 if (hdev->features[0][4] & LMP_EV5) 614 hdev->esco_type |= (ESCO_EV5); 615 616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 617 hdev->esco_type |= (ESCO_2EV3); 618 619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 620 hdev->esco_type |= (ESCO_3EV3); 621 622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 624 } 625 626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 627 struct sk_buff *skb) 628 { 629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 630 631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 632 633 if (rp->status) 634 return; 635 636 if (hdev->max_page < rp->max_page) 637 hdev->max_page = rp->max_page; 638 639 if (rp->page < HCI_MAX_PAGES) 640 memcpy(hdev->features[rp->page], rp->features, 8); 641 } 642 643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 644 struct sk_buff *skb) 645 { 646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 647 648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 649 650 if (rp->status) 651 return; 652 653 hdev->flow_ctl_mode = rp->mode; 654 } 655 656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 657 { 658 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 659 660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 661 662 if (rp->status) 663 return; 664 665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 666 hdev->sco_mtu = rp->sco_mtu; 667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 669 670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 671 hdev->sco_mtu = 64; 672 hdev->sco_pkts = 8; 673 } 674 675 hdev->acl_cnt = hdev->acl_pkts; 676 hdev->sco_cnt = hdev->sco_pkts; 677 678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 680 } 681 682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 683 { 684 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 685 686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 687 688 if (rp->status) 689 return; 690 691 if (test_bit(HCI_INIT, &hdev->flags)) 692 bacpy(&hdev->bdaddr, &rp->bdaddr); 693 694 if (hci_dev_test_flag(hdev, HCI_SETUP)) 695 bacpy(&hdev->setup_addr, &rp->bdaddr); 696 } 697 698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 699 struct sk_buff *skb) 700 { 701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 702 703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 704 705 if (rp->status) 706 return; 707 708 if (test_bit(HCI_INIT, &hdev->flags)) { 709 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 710 hdev->page_scan_window = __le16_to_cpu(rp->window); 711 } 712 } 713 714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 715 struct sk_buff *skb) 716 { 717 u8 status = *((u8 *) skb->data); 718 struct hci_cp_write_page_scan_activity *sent; 719 720 BT_DBG("%s status 0x%2.2x", hdev->name, status); 721 722 if (status) 723 return; 724 725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 726 if (!sent) 727 return; 728 729 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 730 hdev->page_scan_window = __le16_to_cpu(sent->window); 731 } 732 733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 734 struct sk_buff *skb) 735 { 736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 737 738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 739 740 if (rp->status) 741 return; 742 743 if (test_bit(HCI_INIT, &hdev->flags)) 744 hdev->page_scan_type = rp->type; 745 } 746 747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 748 struct sk_buff *skb) 749 { 750 u8 status = *((u8 *) skb->data); 751 u8 *type; 752 753 BT_DBG("%s status 0x%2.2x", hdev->name, status); 754 755 if (status) 756 return; 757 758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 759 if (type) 760 hdev->page_scan_type = *type; 761 } 762 763 static void hci_cc_read_data_block_size(struct hci_dev *hdev, 764 struct sk_buff *skb) 765 { 766 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 767 768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 769 770 if (rp->status) 771 return; 772 773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 774 hdev->block_len = __le16_to_cpu(rp->block_len); 775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 776 777 hdev->block_cnt = hdev->num_blocks; 778 779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 780 hdev->block_cnt, hdev->block_len); 781 } 782 783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb) 784 { 785 struct hci_rp_read_clock *rp = (void *) skb->data; 786 struct hci_cp_read_clock *cp; 787 struct hci_conn *conn; 788 789 BT_DBG("%s", hdev->name); 790 791 if (skb->len < sizeof(*rp)) 792 return; 793 794 if (rp->status) 795 return; 796 797 hci_dev_lock(hdev); 798 799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); 800 if (!cp) 801 goto unlock; 802 803 if (cp->which == 0x00) { 804 hdev->clock = le32_to_cpu(rp->clock); 805 goto unlock; 806 } 807 808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 809 if (conn) { 810 conn->clock = le32_to_cpu(rp->clock); 811 conn->clock_accuracy = le16_to_cpu(rp->accuracy); 812 } 813 814 unlock: 815 hci_dev_unlock(hdev); 816 } 817 818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 819 struct sk_buff *skb) 820 { 821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 822 823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 824 825 if (rp->status) 826 goto a2mp_rsp; 827 828 hdev->amp_status = rp->amp_status; 829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 833 hdev->amp_type = rp->amp_type; 834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 838 839 a2mp_rsp: 840 a2mp_send_getinfo_rsp(hdev); 841 } 842 843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev, 844 struct sk_buff *skb) 845 { 846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data; 847 struct amp_assoc *assoc = &hdev->loc_assoc; 848 size_t rem_len, frag_len; 849 850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 851 852 if (rp->status) 853 goto a2mp_rsp; 854 855 frag_len = skb->len - sizeof(*rp); 856 rem_len = __le16_to_cpu(rp->rem_len); 857 858 if (rem_len > frag_len) { 859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); 860 861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len); 862 assoc->offset += frag_len; 863 864 /* Read other fragments */ 865 amp_read_loc_assoc_frag(hdev, rp->phy_handle); 866 867 return; 868 } 869 870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len); 871 assoc->len = assoc->offset + rem_len; 872 assoc->offset = 0; 873 874 a2mp_rsp: 875 /* Send A2MP Rsp when all fragments are received */ 876 a2mp_send_getampassoc_rsp(hdev, rp->status); 877 a2mp_send_create_phy_link_req(hdev, rp->status); 878 } 879 880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 881 struct sk_buff *skb) 882 { 883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 884 885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 886 887 if (rp->status) 888 return; 889 890 hdev->inq_tx_power = rp->tx_power; 891 } 892 893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 894 { 895 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 896 struct hci_cp_pin_code_reply *cp; 897 struct hci_conn *conn; 898 899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 900 901 hci_dev_lock(hdev); 902 903 if (hci_dev_test_flag(hdev, HCI_MGMT)) 904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 905 906 if (rp->status) 907 goto unlock; 908 909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 910 if (!cp) 911 goto unlock; 912 913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 914 if (conn) 915 conn->pin_length = cp->pin_len; 916 917 unlock: 918 hci_dev_unlock(hdev); 919 } 920 921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 922 { 923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 924 925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 926 927 hci_dev_lock(hdev); 928 929 if (hci_dev_test_flag(hdev, HCI_MGMT)) 930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 931 rp->status); 932 933 hci_dev_unlock(hdev); 934 } 935 936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 937 struct sk_buff *skb) 938 { 939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 940 941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 942 943 if (rp->status) 944 return; 945 946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 947 hdev->le_pkts = rp->le_max_pkt; 948 949 hdev->le_cnt = hdev->le_pkts; 950 951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 952 } 953 954 static void hci_cc_le_read_local_features(struct hci_dev *hdev, 955 struct sk_buff *skb) 956 { 957 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 958 959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 960 961 if (rp->status) 962 return; 963 964 memcpy(hdev->le_features, rp->features, 8); 965 } 966 967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 968 struct sk_buff *skb) 969 { 970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 971 972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 973 974 if (rp->status) 975 return; 976 977 hdev->adv_tx_power = rp->tx_power; 978 } 979 980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 981 { 982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 983 984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 985 986 hci_dev_lock(hdev); 987 988 if (hci_dev_test_flag(hdev, HCI_MGMT)) 989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 990 rp->status); 991 992 hci_dev_unlock(hdev); 993 } 994 995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 996 struct sk_buff *skb) 997 { 998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 999 1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1001 1002 hci_dev_lock(hdev); 1003 1004 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 1006 ACL_LINK, 0, rp->status); 1007 1008 hci_dev_unlock(hdev); 1009 } 1010 1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 1012 { 1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1014 1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1016 1017 hci_dev_lock(hdev); 1018 1019 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 1021 0, rp->status); 1022 1023 hci_dev_unlock(hdev); 1024 } 1025 1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 1027 struct sk_buff *skb) 1028 { 1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 1030 1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1032 1033 hci_dev_lock(hdev); 1034 1035 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 1037 ACL_LINK, 0, rp->status); 1038 1039 hci_dev_unlock(hdev); 1040 } 1041 1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev, 1043 struct sk_buff *skb) 1044 { 1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 1046 1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1048 } 1049 1050 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, 1051 struct sk_buff *skb) 1052 { 1053 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; 1054 1055 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1056 } 1057 1058 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb) 1059 { 1060 __u8 status = *((__u8 *) skb->data); 1061 bdaddr_t *sent; 1062 1063 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1064 1065 if (status) 1066 return; 1067 1068 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); 1069 if (!sent) 1070 return; 1071 1072 hci_dev_lock(hdev); 1073 1074 bacpy(&hdev->random_addr, sent); 1075 1076 hci_dev_unlock(hdev); 1077 } 1078 1079 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 1080 { 1081 __u8 *sent, status = *((__u8 *) skb->data); 1082 1083 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1084 1085 if (status) 1086 return; 1087 1088 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 1089 if (!sent) 1090 return; 1091 1092 hci_dev_lock(hdev); 1093 1094 /* If we're doing connection initiation as peripheral. Set a 1095 * timeout in case something goes wrong. 1096 */ 1097 if (*sent) { 1098 struct hci_conn *conn; 1099 1100 hci_dev_set_flag(hdev, HCI_LE_ADV); 1101 1102 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 1103 if (conn) 1104 queue_delayed_work(hdev->workqueue, 1105 &conn->le_conn_timeout, 1106 conn->conn_timeout); 1107 } else { 1108 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1109 } 1110 1111 hci_dev_unlock(hdev); 1112 } 1113 1114 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) 1115 { 1116 struct hci_cp_le_set_scan_param *cp; 1117 __u8 status = *((__u8 *) skb->data); 1118 1119 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1120 1121 if (status) 1122 return; 1123 1124 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); 1125 if (!cp) 1126 return; 1127 1128 hci_dev_lock(hdev); 1129 1130 hdev->le_scan_type = cp->type; 1131 1132 hci_dev_unlock(hdev); 1133 } 1134 1135 static bool has_pending_adv_report(struct hci_dev *hdev) 1136 { 1137 struct discovery_state *d = &hdev->discovery; 1138 1139 return bacmp(&d->last_adv_addr, BDADDR_ANY); 1140 } 1141 1142 static void clear_pending_adv_report(struct hci_dev *hdev) 1143 { 1144 struct discovery_state *d = &hdev->discovery; 1145 1146 bacpy(&d->last_adv_addr, BDADDR_ANY); 1147 d->last_adv_data_len = 0; 1148 } 1149 1150 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, 1151 u8 bdaddr_type, s8 rssi, u32 flags, 1152 u8 *data, u8 len) 1153 { 1154 struct discovery_state *d = &hdev->discovery; 1155 1156 bacpy(&d->last_adv_addr, bdaddr); 1157 d->last_adv_addr_type = bdaddr_type; 1158 d->last_adv_rssi = rssi; 1159 d->last_adv_flags = flags; 1160 memcpy(d->last_adv_data, data, len); 1161 d->last_adv_data_len = len; 1162 } 1163 1164 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 1165 struct sk_buff *skb) 1166 { 1167 struct hci_cp_le_set_scan_enable *cp; 1168 __u8 status = *((__u8 *) skb->data); 1169 1170 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1171 1172 if (status) 1173 return; 1174 1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 1176 if (!cp) 1177 return; 1178 1179 hci_dev_lock(hdev); 1180 1181 switch (cp->enable) { 1182 case LE_SCAN_ENABLE: 1183 hci_dev_set_flag(hdev, HCI_LE_SCAN); 1184 if (hdev->le_scan_type == LE_SCAN_ACTIVE) 1185 clear_pending_adv_report(hdev); 1186 break; 1187 1188 case LE_SCAN_DISABLE: 1189 /* We do this here instead of when setting DISCOVERY_STOPPED 1190 * since the latter would potentially require waiting for 1191 * inquiry to stop too. 1192 */ 1193 if (has_pending_adv_report(hdev)) { 1194 struct discovery_state *d = &hdev->discovery; 1195 1196 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 1197 d->last_adv_addr_type, NULL, 1198 d->last_adv_rssi, d->last_adv_flags, 1199 d->last_adv_data, 1200 d->last_adv_data_len, NULL, 0); 1201 } 1202 1203 /* Cancel this timer so that we don't try to disable scanning 1204 * when it's already disabled. 1205 */ 1206 cancel_delayed_work(&hdev->le_scan_disable); 1207 1208 hci_dev_clear_flag(hdev, HCI_LE_SCAN); 1209 1210 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we 1211 * interrupted scanning due to a connect request. Mark 1212 * therefore discovery as stopped. If this was not 1213 * because of a connect request advertising might have 1214 * been disabled because of active scanning, so 1215 * re-enable it again if necessary. 1216 */ 1217 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) 1218 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1219 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && 1220 hdev->discovery.state == DISCOVERY_FINDING) 1221 mgmt_reenable_advertising(hdev); 1222 1223 break; 1224 1225 default: 1226 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); 1227 break; 1228 } 1229 1230 hci_dev_unlock(hdev); 1231 } 1232 1233 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, 1234 struct sk_buff *skb) 1235 { 1236 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; 1237 1238 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 1239 1240 if (rp->status) 1241 return; 1242 1243 hdev->le_white_list_size = rp->size; 1244 } 1245 1246 static void hci_cc_le_clear_white_list(struct hci_dev *hdev, 1247 struct sk_buff *skb) 1248 { 1249 __u8 status = *((__u8 *) skb->data); 1250 1251 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1252 1253 if (status) 1254 return; 1255 1256 hci_bdaddr_list_clear(&hdev->le_white_list); 1257 } 1258 1259 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev, 1260 struct sk_buff *skb) 1261 { 1262 struct hci_cp_le_add_to_white_list *sent; 1263 __u8 status = *((__u8 *) skb->data); 1264 1265 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1266 1267 if (status) 1268 return; 1269 1270 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST); 1271 if (!sent) 1272 return; 1273 1274 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr, 1275 sent->bdaddr_type); 1276 } 1277 1278 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev, 1279 struct sk_buff *skb) 1280 { 1281 struct hci_cp_le_del_from_white_list *sent; 1282 __u8 status = *((__u8 *) skb->data); 1283 1284 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1285 1286 if (status) 1287 return; 1288 1289 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST); 1290 if (!sent) 1291 return; 1292 1293 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr, 1294 sent->bdaddr_type); 1295 } 1296 1297 static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 1298 struct sk_buff *skb) 1299 { 1300 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 1301 1302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1303 1304 if (rp->status) 1305 return; 1306 1307 memcpy(hdev->le_states, rp->le_states, 8); 1308 } 1309 1310 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev, 1311 struct sk_buff *skb) 1312 { 1313 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data; 1314 1315 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1316 1317 if (rp->status) 1318 return; 1319 1320 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); 1321 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); 1322 } 1323 1324 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev, 1325 struct sk_buff *skb) 1326 { 1327 struct hci_cp_le_write_def_data_len *sent; 1328 __u8 status = *((__u8 *) skb->data); 1329 1330 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1331 1332 if (status) 1333 return; 1334 1335 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); 1336 if (!sent) 1337 return; 1338 1339 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); 1340 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); 1341 } 1342 1343 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev, 1344 struct sk_buff *skb) 1345 { 1346 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data; 1347 1348 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1349 1350 if (rp->status) 1351 return; 1352 1353 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); 1354 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); 1355 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); 1356 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); 1357 } 1358 1359 static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 1360 struct sk_buff *skb) 1361 { 1362 struct hci_cp_write_le_host_supported *sent; 1363 __u8 status = *((__u8 *) skb->data); 1364 1365 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1366 1367 if (status) 1368 return; 1369 1370 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 1371 if (!sent) 1372 return; 1373 1374 hci_dev_lock(hdev); 1375 1376 if (sent->le) { 1377 hdev->features[1][0] |= LMP_HOST_LE; 1378 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 1379 } else { 1380 hdev->features[1][0] &= ~LMP_HOST_LE; 1381 hci_dev_clear_flag(hdev, HCI_LE_ENABLED); 1382 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 1383 } 1384 1385 if (sent->simul) 1386 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1387 else 1388 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1389 1390 hci_dev_unlock(hdev); 1391 } 1392 1393 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb) 1394 { 1395 struct hci_cp_le_set_adv_param *cp; 1396 u8 status = *((u8 *) skb->data); 1397 1398 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1399 1400 if (status) 1401 return; 1402 1403 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); 1404 if (!cp) 1405 return; 1406 1407 hci_dev_lock(hdev); 1408 hdev->adv_addr_type = cp->own_address_type; 1409 hci_dev_unlock(hdev); 1410 } 1411 1412 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, 1413 struct sk_buff *skb) 1414 { 1415 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data; 1416 1417 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", 1418 hdev->name, rp->status, rp->phy_handle); 1419 1420 if (rp->status) 1421 return; 1422 1423 amp_write_rem_assoc_continue(hdev, rp->phy_handle); 1424 } 1425 1426 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) 1427 { 1428 struct hci_rp_read_rssi *rp = (void *) skb->data; 1429 struct hci_conn *conn; 1430 1431 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1432 1433 if (rp->status) 1434 return; 1435 1436 hci_dev_lock(hdev); 1437 1438 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1439 if (conn) 1440 conn->rssi = rp->rssi; 1441 1442 hci_dev_unlock(hdev); 1443 } 1444 1445 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) 1446 { 1447 struct hci_cp_read_tx_power *sent; 1448 struct hci_rp_read_tx_power *rp = (void *) skb->data; 1449 struct hci_conn *conn; 1450 1451 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 1452 1453 if (rp->status) 1454 return; 1455 1456 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); 1457 if (!sent) 1458 return; 1459 1460 hci_dev_lock(hdev); 1461 1462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 1463 if (!conn) 1464 goto unlock; 1465 1466 switch (sent->type) { 1467 case 0x00: 1468 conn->tx_power = rp->tx_power; 1469 break; 1470 case 0x01: 1471 conn->max_tx_power = rp->tx_power; 1472 break; 1473 } 1474 1475 unlock: 1476 hci_dev_unlock(hdev); 1477 } 1478 1479 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb) 1480 { 1481 u8 status = *((u8 *) skb->data); 1482 u8 *mode; 1483 1484 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1485 1486 if (status) 1487 return; 1488 1489 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); 1490 if (mode) 1491 hdev->ssp_debug_mode = *mode; 1492 } 1493 1494 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1495 { 1496 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1497 1498 if (status) { 1499 hci_conn_check_pending(hdev); 1500 return; 1501 } 1502 1503 set_bit(HCI_INQUIRY, &hdev->flags); 1504 } 1505 1506 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1507 { 1508 struct hci_cp_create_conn *cp; 1509 struct hci_conn *conn; 1510 1511 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1512 1513 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1514 if (!cp) 1515 return; 1516 1517 hci_dev_lock(hdev); 1518 1519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1520 1521 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1522 1523 if (status) { 1524 if (conn && conn->state == BT_CONNECT) { 1525 if (status != 0x0c || conn->attempt > 2) { 1526 conn->state = BT_CLOSED; 1527 hci_connect_cfm(conn, status); 1528 hci_conn_del(conn); 1529 } else 1530 conn->state = BT_CONNECT2; 1531 } 1532 } else { 1533 if (!conn) { 1534 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, 1535 HCI_ROLE_MASTER); 1536 if (!conn) 1537 BT_ERR("No memory for new connection"); 1538 } 1539 } 1540 1541 hci_dev_unlock(hdev); 1542 } 1543 1544 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1545 { 1546 struct hci_cp_add_sco *cp; 1547 struct hci_conn *acl, *sco; 1548 __u16 handle; 1549 1550 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1551 1552 if (!status) 1553 return; 1554 1555 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1556 if (!cp) 1557 return; 1558 1559 handle = __le16_to_cpu(cp->handle); 1560 1561 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1562 1563 hci_dev_lock(hdev); 1564 1565 acl = hci_conn_hash_lookup_handle(hdev, handle); 1566 if (acl) { 1567 sco = acl->link; 1568 if (sco) { 1569 sco->state = BT_CLOSED; 1570 1571 hci_connect_cfm(sco, status); 1572 hci_conn_del(sco); 1573 } 1574 } 1575 1576 hci_dev_unlock(hdev); 1577 } 1578 1579 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1580 { 1581 struct hci_cp_auth_requested *cp; 1582 struct hci_conn *conn; 1583 1584 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1585 1586 if (!status) 1587 return; 1588 1589 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1590 if (!cp) 1591 return; 1592 1593 hci_dev_lock(hdev); 1594 1595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1596 if (conn) { 1597 if (conn->state == BT_CONFIG) { 1598 hci_connect_cfm(conn, status); 1599 hci_conn_drop(conn); 1600 } 1601 } 1602 1603 hci_dev_unlock(hdev); 1604 } 1605 1606 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1607 { 1608 struct hci_cp_set_conn_encrypt *cp; 1609 struct hci_conn *conn; 1610 1611 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1612 1613 if (!status) 1614 return; 1615 1616 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1617 if (!cp) 1618 return; 1619 1620 hci_dev_lock(hdev); 1621 1622 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1623 if (conn) { 1624 if (conn->state == BT_CONFIG) { 1625 hci_connect_cfm(conn, status); 1626 hci_conn_drop(conn); 1627 } 1628 } 1629 1630 hci_dev_unlock(hdev); 1631 } 1632 1633 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1634 struct hci_conn *conn) 1635 { 1636 if (conn->state != BT_CONFIG || !conn->out) 1637 return 0; 1638 1639 if (conn->pending_sec_level == BT_SECURITY_SDP) 1640 return 0; 1641 1642 /* Only request authentication for SSP connections or non-SSP 1643 * devices with sec_level MEDIUM or HIGH or if MITM protection 1644 * is requested. 1645 */ 1646 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1647 conn->pending_sec_level != BT_SECURITY_FIPS && 1648 conn->pending_sec_level != BT_SECURITY_HIGH && 1649 conn->pending_sec_level != BT_SECURITY_MEDIUM) 1650 return 0; 1651 1652 return 1; 1653 } 1654 1655 static int hci_resolve_name(struct hci_dev *hdev, 1656 struct inquiry_entry *e) 1657 { 1658 struct hci_cp_remote_name_req cp; 1659 1660 memset(&cp, 0, sizeof(cp)); 1661 1662 bacpy(&cp.bdaddr, &e->data.bdaddr); 1663 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1664 cp.pscan_mode = e->data.pscan_mode; 1665 cp.clock_offset = e->data.clock_offset; 1666 1667 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1668 } 1669 1670 static bool hci_resolve_next_name(struct hci_dev *hdev) 1671 { 1672 struct discovery_state *discov = &hdev->discovery; 1673 struct inquiry_entry *e; 1674 1675 if (list_empty(&discov->resolve)) 1676 return false; 1677 1678 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1679 if (!e) 1680 return false; 1681 1682 if (hci_resolve_name(hdev, e) == 0) { 1683 e->name_state = NAME_PENDING; 1684 return true; 1685 } 1686 1687 return false; 1688 } 1689 1690 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 1691 bdaddr_t *bdaddr, u8 *name, u8 name_len) 1692 { 1693 struct discovery_state *discov = &hdev->discovery; 1694 struct inquiry_entry *e; 1695 1696 /* Update the mgmt connected state if necessary. Be careful with 1697 * conn objects that exist but are not (yet) connected however. 1698 * Only those in BT_CONFIG or BT_CONNECTED states can be 1699 * considered connected. 1700 */ 1701 if (conn && 1702 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && 1703 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 1704 mgmt_device_connected(hdev, conn, 0, name, name_len); 1705 1706 if (discov->state == DISCOVERY_STOPPED) 1707 return; 1708 1709 if (discov->state == DISCOVERY_STOPPING) 1710 goto discov_complete; 1711 1712 if (discov->state != DISCOVERY_RESOLVING) 1713 return; 1714 1715 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 1716 /* If the device was not found in a list of found devices names of which 1717 * are pending. there is no need to continue resolving a next name as it 1718 * will be done upon receiving another Remote Name Request Complete 1719 * Event */ 1720 if (!e) 1721 return; 1722 1723 list_del(&e->list); 1724 if (name) { 1725 e->name_state = NAME_KNOWN; 1726 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 1727 e->data.rssi, name, name_len); 1728 } else { 1729 e->name_state = NAME_NOT_KNOWN; 1730 } 1731 1732 if (hci_resolve_next_name(hdev)) 1733 return; 1734 1735 discov_complete: 1736 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1737 } 1738 1739 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1740 { 1741 struct hci_cp_remote_name_req *cp; 1742 struct hci_conn *conn; 1743 1744 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1745 1746 /* If successful wait for the name req complete event before 1747 * checking for the need to do authentication */ 1748 if (!status) 1749 return; 1750 1751 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1752 if (!cp) 1753 return; 1754 1755 hci_dev_lock(hdev); 1756 1757 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1758 1759 if (hci_dev_test_flag(hdev, HCI_MGMT)) 1760 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 1761 1762 if (!conn) 1763 goto unlock; 1764 1765 if (!hci_outgoing_auth_needed(hdev, conn)) 1766 goto unlock; 1767 1768 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1769 struct hci_cp_auth_requested auth_cp; 1770 1771 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 1772 1773 auth_cp.handle = __cpu_to_le16(conn->handle); 1774 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 1775 sizeof(auth_cp), &auth_cp); 1776 } 1777 1778 unlock: 1779 hci_dev_unlock(hdev); 1780 } 1781 1782 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1783 { 1784 struct hci_cp_read_remote_features *cp; 1785 struct hci_conn *conn; 1786 1787 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1788 1789 if (!status) 1790 return; 1791 1792 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1793 if (!cp) 1794 return; 1795 1796 hci_dev_lock(hdev); 1797 1798 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1799 if (conn) { 1800 if (conn->state == BT_CONFIG) { 1801 hci_connect_cfm(conn, status); 1802 hci_conn_drop(conn); 1803 } 1804 } 1805 1806 hci_dev_unlock(hdev); 1807 } 1808 1809 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1810 { 1811 struct hci_cp_read_remote_ext_features *cp; 1812 struct hci_conn *conn; 1813 1814 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1815 1816 if (!status) 1817 return; 1818 1819 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1820 if (!cp) 1821 return; 1822 1823 hci_dev_lock(hdev); 1824 1825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1826 if (conn) { 1827 if (conn->state == BT_CONFIG) { 1828 hci_connect_cfm(conn, status); 1829 hci_conn_drop(conn); 1830 } 1831 } 1832 1833 hci_dev_unlock(hdev); 1834 } 1835 1836 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1837 { 1838 struct hci_cp_setup_sync_conn *cp; 1839 struct hci_conn *acl, *sco; 1840 __u16 handle; 1841 1842 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1843 1844 if (!status) 1845 return; 1846 1847 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1848 if (!cp) 1849 return; 1850 1851 handle = __le16_to_cpu(cp->handle); 1852 1853 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1854 1855 hci_dev_lock(hdev); 1856 1857 acl = hci_conn_hash_lookup_handle(hdev, handle); 1858 if (acl) { 1859 sco = acl->link; 1860 if (sco) { 1861 sco->state = BT_CLOSED; 1862 1863 hci_connect_cfm(sco, status); 1864 hci_conn_del(sco); 1865 } 1866 } 1867 1868 hci_dev_unlock(hdev); 1869 } 1870 1871 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1872 { 1873 struct hci_cp_sniff_mode *cp; 1874 struct hci_conn *conn; 1875 1876 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1877 1878 if (!status) 1879 return; 1880 1881 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1882 if (!cp) 1883 return; 1884 1885 hci_dev_lock(hdev); 1886 1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1888 if (conn) { 1889 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1890 1891 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1892 hci_sco_setup(conn, status); 1893 } 1894 1895 hci_dev_unlock(hdev); 1896 } 1897 1898 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1899 { 1900 struct hci_cp_exit_sniff_mode *cp; 1901 struct hci_conn *conn; 1902 1903 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1904 1905 if (!status) 1906 return; 1907 1908 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1909 if (!cp) 1910 return; 1911 1912 hci_dev_lock(hdev); 1913 1914 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1915 if (conn) { 1916 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1917 1918 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1919 hci_sco_setup(conn, status); 1920 } 1921 1922 hci_dev_unlock(hdev); 1923 } 1924 1925 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 1926 { 1927 struct hci_cp_disconnect *cp; 1928 struct hci_conn *conn; 1929 1930 if (!status) 1931 return; 1932 1933 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 1934 if (!cp) 1935 return; 1936 1937 hci_dev_lock(hdev); 1938 1939 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1940 if (conn) 1941 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1942 conn->dst_type, status); 1943 1944 hci_dev_unlock(hdev); 1945 } 1946 1947 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) 1948 { 1949 struct hci_cp_create_phy_link *cp; 1950 1951 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1952 1953 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); 1954 if (!cp) 1955 return; 1956 1957 hci_dev_lock(hdev); 1958 1959 if (status) { 1960 struct hci_conn *hcon; 1961 1962 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); 1963 if (hcon) 1964 hci_conn_del(hcon); 1965 } else { 1966 amp_write_remote_assoc(hdev, cp->phy_handle); 1967 } 1968 1969 hci_dev_unlock(hdev); 1970 } 1971 1972 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) 1973 { 1974 struct hci_cp_accept_phy_link *cp; 1975 1976 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1977 1978 if (status) 1979 return; 1980 1981 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); 1982 if (!cp) 1983 return; 1984 1985 amp_write_remote_assoc(hdev, cp->phy_handle); 1986 } 1987 1988 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) 1989 { 1990 struct hci_cp_le_create_conn *cp; 1991 struct hci_conn *conn; 1992 1993 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1994 1995 /* All connection failure handling is taken care of by the 1996 * hci_le_conn_failed function which is triggered by the HCI 1997 * request completion callbacks used for connecting. 1998 */ 1999 if (status) 2000 return; 2001 2002 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 2003 if (!cp) 2004 return; 2005 2006 hci_dev_lock(hdev); 2007 2008 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 2009 if (!conn) 2010 goto unlock; 2011 2012 /* Store the initiator and responder address information which 2013 * is needed for SMP. These values will not change during the 2014 * lifetime of the connection. 2015 */ 2016 conn->init_addr_type = cp->own_address_type; 2017 if (cp->own_address_type == ADDR_LE_DEV_RANDOM) 2018 bacpy(&conn->init_addr, &hdev->random_addr); 2019 else 2020 bacpy(&conn->init_addr, &hdev->bdaddr); 2021 2022 conn->resp_addr_type = cp->peer_addr_type; 2023 bacpy(&conn->resp_addr, &cp->peer_addr); 2024 2025 /* We don't want the connection attempt to stick around 2026 * indefinitely since LE doesn't have a page timeout concept 2027 * like BR/EDR. Set a timer for any connection that doesn't use 2028 * the white list for connecting. 2029 */ 2030 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR) 2031 queue_delayed_work(conn->hdev->workqueue, 2032 &conn->le_conn_timeout, 2033 conn->conn_timeout); 2034 2035 unlock: 2036 hci_dev_unlock(hdev); 2037 } 2038 2039 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) 2040 { 2041 struct hci_cp_le_read_remote_features *cp; 2042 struct hci_conn *conn; 2043 2044 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2045 2046 if (!status) 2047 return; 2048 2049 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); 2050 if (!cp) 2051 return; 2052 2053 hci_dev_lock(hdev); 2054 2055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2056 if (conn) { 2057 if (conn->state == BT_CONFIG) { 2058 hci_connect_cfm(conn, status); 2059 hci_conn_drop(conn); 2060 } 2061 } 2062 2063 hci_dev_unlock(hdev); 2064 } 2065 2066 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 2067 { 2068 struct hci_cp_le_start_enc *cp; 2069 struct hci_conn *conn; 2070 2071 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2072 2073 if (!status) 2074 return; 2075 2076 hci_dev_lock(hdev); 2077 2078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); 2079 if (!cp) 2080 goto unlock; 2081 2082 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 2083 if (!conn) 2084 goto unlock; 2085 2086 if (conn->state != BT_CONNECTED) 2087 goto unlock; 2088 2089 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2090 hci_conn_drop(conn); 2091 2092 unlock: 2093 hci_dev_unlock(hdev); 2094 } 2095 2096 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) 2097 { 2098 struct hci_cp_switch_role *cp; 2099 struct hci_conn *conn; 2100 2101 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2102 2103 if (!status) 2104 return; 2105 2106 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); 2107 if (!cp) 2108 return; 2109 2110 hci_dev_lock(hdev); 2111 2112 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 2113 if (conn) 2114 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2115 2116 hci_dev_unlock(hdev); 2117 } 2118 2119 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2120 { 2121 __u8 status = *((__u8 *) skb->data); 2122 struct discovery_state *discov = &hdev->discovery; 2123 struct inquiry_entry *e; 2124 2125 BT_DBG("%s status 0x%2.2x", hdev->name, status); 2126 2127 hci_conn_check_pending(hdev); 2128 2129 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 2130 return; 2131 2132 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 2133 wake_up_bit(&hdev->flags, HCI_INQUIRY); 2134 2135 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2136 return; 2137 2138 hci_dev_lock(hdev); 2139 2140 if (discov->state != DISCOVERY_FINDING) 2141 goto unlock; 2142 2143 if (list_empty(&discov->resolve)) { 2144 /* When BR/EDR inquiry is active and no LE scanning is in 2145 * progress, then change discovery state to indicate completion. 2146 * 2147 * When running LE scanning and BR/EDR inquiry simultaneously 2148 * and the LE scan already finished, then change the discovery 2149 * state to indicate completion. 2150 */ 2151 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2152 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2153 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2154 goto unlock; 2155 } 2156 2157 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 2158 if (e && hci_resolve_name(hdev, e) == 0) { 2159 e->name_state = NAME_PENDING; 2160 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 2161 } else { 2162 /* When BR/EDR inquiry is active and no LE scanning is in 2163 * progress, then change discovery state to indicate completion. 2164 * 2165 * When running LE scanning and BR/EDR inquiry simultaneously 2166 * and the LE scan already finished, then change the discovery 2167 * state to indicate completion. 2168 */ 2169 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2170 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) 2171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2172 } 2173 2174 unlock: 2175 hci_dev_unlock(hdev); 2176 } 2177 2178 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2179 { 2180 struct inquiry_data data; 2181 struct inquiry_info *info = (void *) (skb->data + 1); 2182 int num_rsp = *((__u8 *) skb->data); 2183 2184 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2185 2186 if (!num_rsp) 2187 return; 2188 2189 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 2190 return; 2191 2192 hci_dev_lock(hdev); 2193 2194 for (; num_rsp; num_rsp--, info++) { 2195 u32 flags; 2196 2197 bacpy(&data.bdaddr, &info->bdaddr); 2198 data.pscan_rep_mode = info->pscan_rep_mode; 2199 data.pscan_period_mode = info->pscan_period_mode; 2200 data.pscan_mode = info->pscan_mode; 2201 memcpy(data.dev_class, info->dev_class, 3); 2202 data.clock_offset = info->clock_offset; 2203 data.rssi = HCI_RSSI_INVALID; 2204 data.ssp_mode = 0x00; 2205 2206 flags = hci_inquiry_cache_update(hdev, &data, false); 2207 2208 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2209 info->dev_class, HCI_RSSI_INVALID, 2210 flags, NULL, 0, NULL, 0); 2211 } 2212 2213 hci_dev_unlock(hdev); 2214 } 2215 2216 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2217 { 2218 struct hci_ev_conn_complete *ev = (void *) skb->data; 2219 struct hci_conn *conn; 2220 2221 BT_DBG("%s", hdev->name); 2222 2223 hci_dev_lock(hdev); 2224 2225 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2226 if (!conn) { 2227 if (ev->link_type != SCO_LINK) 2228 goto unlock; 2229 2230 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2231 if (!conn) 2232 goto unlock; 2233 2234 conn->type = SCO_LINK; 2235 } 2236 2237 if (!ev->status) { 2238 conn->handle = __le16_to_cpu(ev->handle); 2239 2240 if (conn->type == ACL_LINK) { 2241 conn->state = BT_CONFIG; 2242 hci_conn_hold(conn); 2243 2244 if (!conn->out && !hci_conn_ssp_enabled(conn) && 2245 !hci_find_link_key(hdev, &ev->bdaddr)) 2246 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2247 else 2248 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2249 } else 2250 conn->state = BT_CONNECTED; 2251 2252 hci_debugfs_create_conn(conn); 2253 hci_conn_add_sysfs(conn); 2254 2255 if (test_bit(HCI_AUTH, &hdev->flags)) 2256 set_bit(HCI_CONN_AUTH, &conn->flags); 2257 2258 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 2259 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2260 2261 /* Get remote features */ 2262 if (conn->type == ACL_LINK) { 2263 struct hci_cp_read_remote_features cp; 2264 cp.handle = ev->handle; 2265 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 2266 sizeof(cp), &cp); 2267 2268 hci_update_page_scan(hdev); 2269 } 2270 2271 /* Set packet type for incoming connection */ 2272 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 2273 struct hci_cp_change_conn_ptype cp; 2274 cp.handle = ev->handle; 2275 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2276 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 2277 &cp); 2278 } 2279 } else { 2280 conn->state = BT_CLOSED; 2281 if (conn->type == ACL_LINK) 2282 mgmt_connect_failed(hdev, &conn->dst, conn->type, 2283 conn->dst_type, ev->status); 2284 } 2285 2286 if (conn->type == ACL_LINK) 2287 hci_sco_setup(conn, ev->status); 2288 2289 if (ev->status) { 2290 hci_connect_cfm(conn, ev->status); 2291 hci_conn_del(conn); 2292 } else if (ev->link_type != ACL_LINK) 2293 hci_connect_cfm(conn, ev->status); 2294 2295 unlock: 2296 hci_dev_unlock(hdev); 2297 2298 hci_conn_check_pending(hdev); 2299 } 2300 2301 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) 2302 { 2303 struct hci_cp_reject_conn_req cp; 2304 2305 bacpy(&cp.bdaddr, bdaddr); 2306 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 2307 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 2308 } 2309 2310 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2311 { 2312 struct hci_ev_conn_request *ev = (void *) skb->data; 2313 int mask = hdev->link_mode; 2314 struct inquiry_entry *ie; 2315 struct hci_conn *conn; 2316 __u8 flags = 0; 2317 2318 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 2319 ev->link_type); 2320 2321 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 2322 &flags); 2323 2324 if (!(mask & HCI_LM_ACCEPT)) { 2325 hci_reject_conn(hdev, &ev->bdaddr); 2326 return; 2327 } 2328 2329 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr, 2330 BDADDR_BREDR)) { 2331 hci_reject_conn(hdev, &ev->bdaddr); 2332 return; 2333 } 2334 2335 /* Require HCI_CONNECTABLE or a whitelist entry to accept the 2336 * connection. These features are only touched through mgmt so 2337 * only do the checks if HCI_MGMT is set. 2338 */ 2339 if (hci_dev_test_flag(hdev, HCI_MGMT) && 2340 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && 2341 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, 2342 BDADDR_BREDR)) { 2343 hci_reject_conn(hdev, &ev->bdaddr); 2344 return; 2345 } 2346 2347 /* Connection accepted */ 2348 2349 hci_dev_lock(hdev); 2350 2351 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2352 if (ie) 2353 memcpy(ie->data.dev_class, ev->dev_class, 3); 2354 2355 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 2356 &ev->bdaddr); 2357 if (!conn) { 2358 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, 2359 HCI_ROLE_SLAVE); 2360 if (!conn) { 2361 BT_ERR("No memory for new connection"); 2362 hci_dev_unlock(hdev); 2363 return; 2364 } 2365 } 2366 2367 memcpy(conn->dev_class, ev->dev_class, 3); 2368 2369 hci_dev_unlock(hdev); 2370 2371 if (ev->link_type == ACL_LINK || 2372 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 2373 struct hci_cp_accept_conn_req cp; 2374 conn->state = BT_CONNECT; 2375 2376 bacpy(&cp.bdaddr, &ev->bdaddr); 2377 2378 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 2379 cp.role = 0x00; /* Become master */ 2380 else 2381 cp.role = 0x01; /* Remain slave */ 2382 2383 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); 2384 } else if (!(flags & HCI_PROTO_DEFER)) { 2385 struct hci_cp_accept_sync_conn_req cp; 2386 conn->state = BT_CONNECT; 2387 2388 bacpy(&cp.bdaddr, &ev->bdaddr); 2389 cp.pkt_type = cpu_to_le16(conn->pkt_type); 2390 2391 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 2392 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 2393 cp.max_latency = cpu_to_le16(0xffff); 2394 cp.content_format = cpu_to_le16(hdev->voice_setting); 2395 cp.retrans_effort = 0xff; 2396 2397 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), 2398 &cp); 2399 } else { 2400 conn->state = BT_CONNECT2; 2401 hci_connect_cfm(conn, 0); 2402 } 2403 } 2404 2405 static u8 hci_to_mgmt_reason(u8 err) 2406 { 2407 switch (err) { 2408 case HCI_ERROR_CONNECTION_TIMEOUT: 2409 return MGMT_DEV_DISCONN_TIMEOUT; 2410 case HCI_ERROR_REMOTE_USER_TERM: 2411 case HCI_ERROR_REMOTE_LOW_RESOURCES: 2412 case HCI_ERROR_REMOTE_POWER_OFF: 2413 return MGMT_DEV_DISCONN_REMOTE; 2414 case HCI_ERROR_LOCAL_HOST_TERM: 2415 return MGMT_DEV_DISCONN_LOCAL_HOST; 2416 default: 2417 return MGMT_DEV_DISCONN_UNKNOWN; 2418 } 2419 } 2420 2421 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2422 { 2423 struct hci_ev_disconn_complete *ev = (void *) skb->data; 2424 u8 reason = hci_to_mgmt_reason(ev->reason); 2425 struct hci_conn_params *params; 2426 struct hci_conn *conn; 2427 bool mgmt_connected; 2428 u8 type; 2429 2430 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2431 2432 hci_dev_lock(hdev); 2433 2434 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2435 if (!conn) 2436 goto unlock; 2437 2438 if (ev->status) { 2439 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 2440 conn->dst_type, ev->status); 2441 goto unlock; 2442 } 2443 2444 conn->state = BT_CLOSED; 2445 2446 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); 2447 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, 2448 reason, mgmt_connected); 2449 2450 if (conn->type == ACL_LINK) { 2451 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 2452 hci_remove_link_key(hdev, &conn->dst); 2453 2454 hci_update_page_scan(hdev); 2455 } 2456 2457 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 2458 if (params) { 2459 switch (params->auto_connect) { 2460 case HCI_AUTO_CONN_LINK_LOSS: 2461 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) 2462 break; 2463 /* Fall through */ 2464 2465 case HCI_AUTO_CONN_DIRECT: 2466 case HCI_AUTO_CONN_ALWAYS: 2467 list_del_init(¶ms->action); 2468 list_add(¶ms->action, &hdev->pend_le_conns); 2469 hci_update_background_scan(hdev); 2470 break; 2471 2472 default: 2473 break; 2474 } 2475 } 2476 2477 type = conn->type; 2478 2479 hci_disconn_cfm(conn, ev->reason); 2480 hci_conn_del(conn); 2481 2482 /* Re-enable advertising if necessary, since it might 2483 * have been disabled by the connection. From the 2484 * HCI_LE_Set_Advertise_Enable command description in 2485 * the core specification (v4.0): 2486 * "The Controller shall continue advertising until the Host 2487 * issues an LE_Set_Advertise_Enable command with 2488 * Advertising_Enable set to 0x00 (Advertising is disabled) 2489 * or until a connection is created or until the Advertising 2490 * is timed out due to Directed Advertising." 2491 */ 2492 if (type == LE_LINK) 2493 mgmt_reenable_advertising(hdev); 2494 2495 unlock: 2496 hci_dev_unlock(hdev); 2497 } 2498 2499 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2500 { 2501 struct hci_ev_auth_complete *ev = (void *) skb->data; 2502 struct hci_conn *conn; 2503 2504 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2505 2506 hci_dev_lock(hdev); 2507 2508 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2509 if (!conn) 2510 goto unlock; 2511 2512 if (!ev->status) { 2513 if (!hci_conn_ssp_enabled(conn) && 2514 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 2515 BT_INFO("re-auth of legacy device is not possible."); 2516 } else { 2517 set_bit(HCI_CONN_AUTH, &conn->flags); 2518 conn->sec_level = conn->pending_sec_level; 2519 } 2520 } else { 2521 mgmt_auth_failed(conn, ev->status); 2522 } 2523 2524 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2525 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 2526 2527 if (conn->state == BT_CONFIG) { 2528 if (!ev->status && hci_conn_ssp_enabled(conn)) { 2529 struct hci_cp_set_conn_encrypt cp; 2530 cp.handle = ev->handle; 2531 cp.encrypt = 0x01; 2532 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2533 &cp); 2534 } else { 2535 conn->state = BT_CONNECTED; 2536 hci_connect_cfm(conn, ev->status); 2537 hci_conn_drop(conn); 2538 } 2539 } else { 2540 hci_auth_cfm(conn, ev->status); 2541 2542 hci_conn_hold(conn); 2543 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2544 hci_conn_drop(conn); 2545 } 2546 2547 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 2548 if (!ev->status) { 2549 struct hci_cp_set_conn_encrypt cp; 2550 cp.handle = ev->handle; 2551 cp.encrypt = 0x01; 2552 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2553 &cp); 2554 } else { 2555 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2556 hci_encrypt_cfm(conn, ev->status, 0x00); 2557 } 2558 } 2559 2560 unlock: 2561 hci_dev_unlock(hdev); 2562 } 2563 2564 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 2565 { 2566 struct hci_ev_remote_name *ev = (void *) skb->data; 2567 struct hci_conn *conn; 2568 2569 BT_DBG("%s", hdev->name); 2570 2571 hci_conn_check_pending(hdev); 2572 2573 hci_dev_lock(hdev); 2574 2575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2576 2577 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 2578 goto check_auth; 2579 2580 if (ev->status == 0) 2581 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 2582 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 2583 else 2584 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 2585 2586 check_auth: 2587 if (!conn) 2588 goto unlock; 2589 2590 if (!hci_outgoing_auth_needed(hdev, conn)) 2591 goto unlock; 2592 2593 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2594 struct hci_cp_auth_requested cp; 2595 2596 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2597 2598 cp.handle = __cpu_to_le16(conn->handle); 2599 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 2600 } 2601 2602 unlock: 2603 hci_dev_unlock(hdev); 2604 } 2605 2606 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, 2607 u16 opcode, struct sk_buff *skb) 2608 { 2609 const struct hci_rp_read_enc_key_size *rp; 2610 struct hci_conn *conn; 2611 u16 handle; 2612 2613 BT_DBG("%s status 0x%02x", hdev->name, status); 2614 2615 if (!skb || skb->len < sizeof(*rp)) { 2616 BT_ERR("%s invalid HCI Read Encryption Key Size response", 2617 hdev->name); 2618 return; 2619 } 2620 2621 rp = (void *)skb->data; 2622 handle = le16_to_cpu(rp->handle); 2623 2624 hci_dev_lock(hdev); 2625 2626 conn = hci_conn_hash_lookup_handle(hdev, handle); 2627 if (!conn) 2628 goto unlock; 2629 2630 /* If we fail to read the encryption key size, assume maximum 2631 * (which is the same we do also when this HCI command isn't 2632 * supported. 2633 */ 2634 if (rp->status) { 2635 BT_ERR("%s failed to read key size for handle %u", hdev->name, 2636 handle); 2637 conn->enc_key_size = HCI_LINK_KEY_SIZE; 2638 } else { 2639 conn->enc_key_size = rp->key_size; 2640 } 2641 2642 if (conn->state == BT_CONFIG) { 2643 conn->state = BT_CONNECTED; 2644 hci_connect_cfm(conn, 0); 2645 hci_conn_drop(conn); 2646 } else { 2647 u8 encrypt; 2648 2649 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2650 encrypt = 0x00; 2651 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) 2652 encrypt = 0x02; 2653 else 2654 encrypt = 0x01; 2655 2656 hci_encrypt_cfm(conn, 0, encrypt); 2657 } 2658 2659 unlock: 2660 hci_dev_unlock(hdev); 2661 } 2662 2663 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2664 { 2665 struct hci_ev_encrypt_change *ev = (void *) skb->data; 2666 struct hci_conn *conn; 2667 2668 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2669 2670 hci_dev_lock(hdev); 2671 2672 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2673 if (!conn) 2674 goto unlock; 2675 2676 if (!ev->status) { 2677 if (ev->encrypt) { 2678 /* Encryption implies authentication */ 2679 set_bit(HCI_CONN_AUTH, &conn->flags); 2680 set_bit(HCI_CONN_ENCRYPT, &conn->flags); 2681 conn->sec_level = conn->pending_sec_level; 2682 2683 /* P-256 authentication key implies FIPS */ 2684 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) 2685 set_bit(HCI_CONN_FIPS, &conn->flags); 2686 2687 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || 2688 conn->type == LE_LINK) 2689 set_bit(HCI_CONN_AES_CCM, &conn->flags); 2690 } else { 2691 clear_bit(HCI_CONN_ENCRYPT, &conn->flags); 2692 clear_bit(HCI_CONN_AES_CCM, &conn->flags); 2693 } 2694 } 2695 2696 /* We should disregard the current RPA and generate a new one 2697 * whenever the encryption procedure fails. 2698 */ 2699 if (ev->status && conn->type == LE_LINK) 2700 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 2701 2702 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2703 2704 if (ev->status && conn->state == BT_CONNECTED) { 2705 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 2706 hci_conn_drop(conn); 2707 goto unlock; 2708 } 2709 2710 /* In Secure Connections Only mode, do not allow any connections 2711 * that are not encrypted with AES-CCM using a P-256 authenticated 2712 * combination key. 2713 */ 2714 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && 2715 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || 2716 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { 2717 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); 2718 hci_conn_drop(conn); 2719 goto unlock; 2720 } 2721 2722 /* Try reading the encryption key size for encrypted ACL links */ 2723 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { 2724 struct hci_cp_read_enc_key_size cp; 2725 struct hci_request req; 2726 2727 /* Only send HCI_Read_Encryption_Key_Size if the 2728 * controller really supports it. If it doesn't, assume 2729 * the default size (16). 2730 */ 2731 if (!(hdev->commands[20] & 0x10)) { 2732 conn->enc_key_size = HCI_LINK_KEY_SIZE; 2733 goto notify; 2734 } 2735 2736 hci_req_init(&req, hdev); 2737 2738 cp.handle = cpu_to_le16(conn->handle); 2739 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); 2740 2741 if (hci_req_run_skb(&req, read_enc_key_size_complete)) { 2742 BT_ERR("Sending HCI Read Encryption Key Size failed"); 2743 conn->enc_key_size = HCI_LINK_KEY_SIZE; 2744 goto notify; 2745 } 2746 2747 goto unlock; 2748 } 2749 2750 notify: 2751 if (conn->state == BT_CONFIG) { 2752 if (!ev->status) 2753 conn->state = BT_CONNECTED; 2754 2755 hci_connect_cfm(conn, ev->status); 2756 hci_conn_drop(conn); 2757 } else 2758 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 2759 2760 unlock: 2761 hci_dev_unlock(hdev); 2762 } 2763 2764 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 2765 struct sk_buff *skb) 2766 { 2767 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 2768 struct hci_conn *conn; 2769 2770 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2771 2772 hci_dev_lock(hdev); 2773 2774 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2775 if (conn) { 2776 if (!ev->status) 2777 set_bit(HCI_CONN_SECURE, &conn->flags); 2778 2779 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 2780 2781 hci_key_change_cfm(conn, ev->status); 2782 } 2783 2784 hci_dev_unlock(hdev); 2785 } 2786 2787 static void hci_remote_features_evt(struct hci_dev *hdev, 2788 struct sk_buff *skb) 2789 { 2790 struct hci_ev_remote_features *ev = (void *) skb->data; 2791 struct hci_conn *conn; 2792 2793 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2794 2795 hci_dev_lock(hdev); 2796 2797 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2798 if (!conn) 2799 goto unlock; 2800 2801 if (!ev->status) 2802 memcpy(conn->features[0], ev->features, 8); 2803 2804 if (conn->state != BT_CONFIG) 2805 goto unlock; 2806 2807 if (!ev->status && lmp_ext_feat_capable(hdev) && 2808 lmp_ext_feat_capable(conn)) { 2809 struct hci_cp_read_remote_ext_features cp; 2810 cp.handle = ev->handle; 2811 cp.page = 0x01; 2812 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2813 sizeof(cp), &cp); 2814 goto unlock; 2815 } 2816 2817 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2818 struct hci_cp_remote_name_req cp; 2819 memset(&cp, 0, sizeof(cp)); 2820 bacpy(&cp.bdaddr, &conn->dst); 2821 cp.pscan_rep_mode = 0x02; 2822 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2823 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2824 mgmt_device_connected(hdev, conn, 0, NULL, 0); 2825 2826 if (!hci_outgoing_auth_needed(hdev, conn)) { 2827 conn->state = BT_CONNECTED; 2828 hci_connect_cfm(conn, ev->status); 2829 hci_conn_drop(conn); 2830 } 2831 2832 unlock: 2833 hci_dev_unlock(hdev); 2834 } 2835 2836 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, 2837 u16 *opcode, u8 *status, 2838 hci_req_complete_t *req_complete, 2839 hci_req_complete_skb_t *req_complete_skb) 2840 { 2841 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2842 2843 *opcode = __le16_to_cpu(ev->opcode); 2844 *status = skb->data[sizeof(*ev)]; 2845 2846 skb_pull(skb, sizeof(*ev)); 2847 2848 switch (*opcode) { 2849 case HCI_OP_INQUIRY_CANCEL: 2850 hci_cc_inquiry_cancel(hdev, skb); 2851 break; 2852 2853 case HCI_OP_PERIODIC_INQ: 2854 hci_cc_periodic_inq(hdev, skb); 2855 break; 2856 2857 case HCI_OP_EXIT_PERIODIC_INQ: 2858 hci_cc_exit_periodic_inq(hdev, skb); 2859 break; 2860 2861 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 2862 hci_cc_remote_name_req_cancel(hdev, skb); 2863 break; 2864 2865 case HCI_OP_ROLE_DISCOVERY: 2866 hci_cc_role_discovery(hdev, skb); 2867 break; 2868 2869 case HCI_OP_READ_LINK_POLICY: 2870 hci_cc_read_link_policy(hdev, skb); 2871 break; 2872 2873 case HCI_OP_WRITE_LINK_POLICY: 2874 hci_cc_write_link_policy(hdev, skb); 2875 break; 2876 2877 case HCI_OP_READ_DEF_LINK_POLICY: 2878 hci_cc_read_def_link_policy(hdev, skb); 2879 break; 2880 2881 case HCI_OP_WRITE_DEF_LINK_POLICY: 2882 hci_cc_write_def_link_policy(hdev, skb); 2883 break; 2884 2885 case HCI_OP_RESET: 2886 hci_cc_reset(hdev, skb); 2887 break; 2888 2889 case HCI_OP_READ_STORED_LINK_KEY: 2890 hci_cc_read_stored_link_key(hdev, skb); 2891 break; 2892 2893 case HCI_OP_DELETE_STORED_LINK_KEY: 2894 hci_cc_delete_stored_link_key(hdev, skb); 2895 break; 2896 2897 case HCI_OP_WRITE_LOCAL_NAME: 2898 hci_cc_write_local_name(hdev, skb); 2899 break; 2900 2901 case HCI_OP_READ_LOCAL_NAME: 2902 hci_cc_read_local_name(hdev, skb); 2903 break; 2904 2905 case HCI_OP_WRITE_AUTH_ENABLE: 2906 hci_cc_write_auth_enable(hdev, skb); 2907 break; 2908 2909 case HCI_OP_WRITE_ENCRYPT_MODE: 2910 hci_cc_write_encrypt_mode(hdev, skb); 2911 break; 2912 2913 case HCI_OP_WRITE_SCAN_ENABLE: 2914 hci_cc_write_scan_enable(hdev, skb); 2915 break; 2916 2917 case HCI_OP_READ_CLASS_OF_DEV: 2918 hci_cc_read_class_of_dev(hdev, skb); 2919 break; 2920 2921 case HCI_OP_WRITE_CLASS_OF_DEV: 2922 hci_cc_write_class_of_dev(hdev, skb); 2923 break; 2924 2925 case HCI_OP_READ_VOICE_SETTING: 2926 hci_cc_read_voice_setting(hdev, skb); 2927 break; 2928 2929 case HCI_OP_WRITE_VOICE_SETTING: 2930 hci_cc_write_voice_setting(hdev, skb); 2931 break; 2932 2933 case HCI_OP_READ_NUM_SUPPORTED_IAC: 2934 hci_cc_read_num_supported_iac(hdev, skb); 2935 break; 2936 2937 case HCI_OP_WRITE_SSP_MODE: 2938 hci_cc_write_ssp_mode(hdev, skb); 2939 break; 2940 2941 case HCI_OP_WRITE_SC_SUPPORT: 2942 hci_cc_write_sc_support(hdev, skb); 2943 break; 2944 2945 case HCI_OP_READ_LOCAL_VERSION: 2946 hci_cc_read_local_version(hdev, skb); 2947 break; 2948 2949 case HCI_OP_READ_LOCAL_COMMANDS: 2950 hci_cc_read_local_commands(hdev, skb); 2951 break; 2952 2953 case HCI_OP_READ_LOCAL_FEATURES: 2954 hci_cc_read_local_features(hdev, skb); 2955 break; 2956 2957 case HCI_OP_READ_LOCAL_EXT_FEATURES: 2958 hci_cc_read_local_ext_features(hdev, skb); 2959 break; 2960 2961 case HCI_OP_READ_BUFFER_SIZE: 2962 hci_cc_read_buffer_size(hdev, skb); 2963 break; 2964 2965 case HCI_OP_READ_BD_ADDR: 2966 hci_cc_read_bd_addr(hdev, skb); 2967 break; 2968 2969 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 2970 hci_cc_read_page_scan_activity(hdev, skb); 2971 break; 2972 2973 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 2974 hci_cc_write_page_scan_activity(hdev, skb); 2975 break; 2976 2977 case HCI_OP_READ_PAGE_SCAN_TYPE: 2978 hci_cc_read_page_scan_type(hdev, skb); 2979 break; 2980 2981 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 2982 hci_cc_write_page_scan_type(hdev, skb); 2983 break; 2984 2985 case HCI_OP_READ_DATA_BLOCK_SIZE: 2986 hci_cc_read_data_block_size(hdev, skb); 2987 break; 2988 2989 case HCI_OP_READ_FLOW_CONTROL_MODE: 2990 hci_cc_read_flow_control_mode(hdev, skb); 2991 break; 2992 2993 case HCI_OP_READ_LOCAL_AMP_INFO: 2994 hci_cc_read_local_amp_info(hdev, skb); 2995 break; 2996 2997 case HCI_OP_READ_CLOCK: 2998 hci_cc_read_clock(hdev, skb); 2999 break; 3000 3001 case HCI_OP_READ_LOCAL_AMP_ASSOC: 3002 hci_cc_read_local_amp_assoc(hdev, skb); 3003 break; 3004 3005 case HCI_OP_READ_INQ_RSP_TX_POWER: 3006 hci_cc_read_inq_rsp_tx_power(hdev, skb); 3007 break; 3008 3009 case HCI_OP_PIN_CODE_REPLY: 3010 hci_cc_pin_code_reply(hdev, skb); 3011 break; 3012 3013 case HCI_OP_PIN_CODE_NEG_REPLY: 3014 hci_cc_pin_code_neg_reply(hdev, skb); 3015 break; 3016 3017 case HCI_OP_READ_LOCAL_OOB_DATA: 3018 hci_cc_read_local_oob_data(hdev, skb); 3019 break; 3020 3021 case HCI_OP_READ_LOCAL_OOB_EXT_DATA: 3022 hci_cc_read_local_oob_ext_data(hdev, skb); 3023 break; 3024 3025 case HCI_OP_LE_READ_BUFFER_SIZE: 3026 hci_cc_le_read_buffer_size(hdev, skb); 3027 break; 3028 3029 case HCI_OP_LE_READ_LOCAL_FEATURES: 3030 hci_cc_le_read_local_features(hdev, skb); 3031 break; 3032 3033 case HCI_OP_LE_READ_ADV_TX_POWER: 3034 hci_cc_le_read_adv_tx_power(hdev, skb); 3035 break; 3036 3037 case HCI_OP_USER_CONFIRM_REPLY: 3038 hci_cc_user_confirm_reply(hdev, skb); 3039 break; 3040 3041 case HCI_OP_USER_CONFIRM_NEG_REPLY: 3042 hci_cc_user_confirm_neg_reply(hdev, skb); 3043 break; 3044 3045 case HCI_OP_USER_PASSKEY_REPLY: 3046 hci_cc_user_passkey_reply(hdev, skb); 3047 break; 3048 3049 case HCI_OP_USER_PASSKEY_NEG_REPLY: 3050 hci_cc_user_passkey_neg_reply(hdev, skb); 3051 break; 3052 3053 case HCI_OP_LE_SET_RANDOM_ADDR: 3054 hci_cc_le_set_random_addr(hdev, skb); 3055 break; 3056 3057 case HCI_OP_LE_SET_ADV_ENABLE: 3058 hci_cc_le_set_adv_enable(hdev, skb); 3059 break; 3060 3061 case HCI_OP_LE_SET_SCAN_PARAM: 3062 hci_cc_le_set_scan_param(hdev, skb); 3063 break; 3064 3065 case HCI_OP_LE_SET_SCAN_ENABLE: 3066 hci_cc_le_set_scan_enable(hdev, skb); 3067 break; 3068 3069 case HCI_OP_LE_READ_WHITE_LIST_SIZE: 3070 hci_cc_le_read_white_list_size(hdev, skb); 3071 break; 3072 3073 case HCI_OP_LE_CLEAR_WHITE_LIST: 3074 hci_cc_le_clear_white_list(hdev, skb); 3075 break; 3076 3077 case HCI_OP_LE_ADD_TO_WHITE_LIST: 3078 hci_cc_le_add_to_white_list(hdev, skb); 3079 break; 3080 3081 case HCI_OP_LE_DEL_FROM_WHITE_LIST: 3082 hci_cc_le_del_from_white_list(hdev, skb); 3083 break; 3084 3085 case HCI_OP_LE_READ_SUPPORTED_STATES: 3086 hci_cc_le_read_supported_states(hdev, skb); 3087 break; 3088 3089 case HCI_OP_LE_READ_DEF_DATA_LEN: 3090 hci_cc_le_read_def_data_len(hdev, skb); 3091 break; 3092 3093 case HCI_OP_LE_WRITE_DEF_DATA_LEN: 3094 hci_cc_le_write_def_data_len(hdev, skb); 3095 break; 3096 3097 case HCI_OP_LE_READ_MAX_DATA_LEN: 3098 hci_cc_le_read_max_data_len(hdev, skb); 3099 break; 3100 3101 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 3102 hci_cc_write_le_host_supported(hdev, skb); 3103 break; 3104 3105 case HCI_OP_LE_SET_ADV_PARAM: 3106 hci_cc_set_adv_param(hdev, skb); 3107 break; 3108 3109 case HCI_OP_WRITE_REMOTE_AMP_ASSOC: 3110 hci_cc_write_remote_amp_assoc(hdev, skb); 3111 break; 3112 3113 case HCI_OP_READ_RSSI: 3114 hci_cc_read_rssi(hdev, skb); 3115 break; 3116 3117 case HCI_OP_READ_TX_POWER: 3118 hci_cc_read_tx_power(hdev, skb); 3119 break; 3120 3121 case HCI_OP_WRITE_SSP_DEBUG_MODE: 3122 hci_cc_write_ssp_debug_mode(hdev, skb); 3123 break; 3124 3125 default: 3126 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3127 break; 3128 } 3129 3130 if (*opcode != HCI_OP_NOP) 3131 cancel_delayed_work(&hdev->cmd_timer); 3132 3133 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) 3134 atomic_set(&hdev->cmd_cnt, 1); 3135 3136 hci_req_cmd_complete(hdev, *opcode, *status, req_complete, 3137 req_complete_skb); 3138 3139 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3140 queue_work(hdev->workqueue, &hdev->cmd_work); 3141 } 3142 3143 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb, 3144 u16 *opcode, u8 *status, 3145 hci_req_complete_t *req_complete, 3146 hci_req_complete_skb_t *req_complete_skb) 3147 { 3148 struct hci_ev_cmd_status *ev = (void *) skb->data; 3149 3150 skb_pull(skb, sizeof(*ev)); 3151 3152 *opcode = __le16_to_cpu(ev->opcode); 3153 *status = ev->status; 3154 3155 switch (*opcode) { 3156 case HCI_OP_INQUIRY: 3157 hci_cs_inquiry(hdev, ev->status); 3158 break; 3159 3160 case HCI_OP_CREATE_CONN: 3161 hci_cs_create_conn(hdev, ev->status); 3162 break; 3163 3164 case HCI_OP_DISCONNECT: 3165 hci_cs_disconnect(hdev, ev->status); 3166 break; 3167 3168 case HCI_OP_ADD_SCO: 3169 hci_cs_add_sco(hdev, ev->status); 3170 break; 3171 3172 case HCI_OP_AUTH_REQUESTED: 3173 hci_cs_auth_requested(hdev, ev->status); 3174 break; 3175 3176 case HCI_OP_SET_CONN_ENCRYPT: 3177 hci_cs_set_conn_encrypt(hdev, ev->status); 3178 break; 3179 3180 case HCI_OP_REMOTE_NAME_REQ: 3181 hci_cs_remote_name_req(hdev, ev->status); 3182 break; 3183 3184 case HCI_OP_READ_REMOTE_FEATURES: 3185 hci_cs_read_remote_features(hdev, ev->status); 3186 break; 3187 3188 case HCI_OP_READ_REMOTE_EXT_FEATURES: 3189 hci_cs_read_remote_ext_features(hdev, ev->status); 3190 break; 3191 3192 case HCI_OP_SETUP_SYNC_CONN: 3193 hci_cs_setup_sync_conn(hdev, ev->status); 3194 break; 3195 3196 case HCI_OP_CREATE_PHY_LINK: 3197 hci_cs_create_phylink(hdev, ev->status); 3198 break; 3199 3200 case HCI_OP_ACCEPT_PHY_LINK: 3201 hci_cs_accept_phylink(hdev, ev->status); 3202 break; 3203 3204 case HCI_OP_SNIFF_MODE: 3205 hci_cs_sniff_mode(hdev, ev->status); 3206 break; 3207 3208 case HCI_OP_EXIT_SNIFF_MODE: 3209 hci_cs_exit_sniff_mode(hdev, ev->status); 3210 break; 3211 3212 case HCI_OP_SWITCH_ROLE: 3213 hci_cs_switch_role(hdev, ev->status); 3214 break; 3215 3216 case HCI_OP_LE_CREATE_CONN: 3217 hci_cs_le_create_conn(hdev, ev->status); 3218 break; 3219 3220 case HCI_OP_LE_READ_REMOTE_FEATURES: 3221 hci_cs_le_read_remote_features(hdev, ev->status); 3222 break; 3223 3224 case HCI_OP_LE_START_ENC: 3225 hci_cs_le_start_enc(hdev, ev->status); 3226 break; 3227 3228 default: 3229 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode); 3230 break; 3231 } 3232 3233 if (*opcode != HCI_OP_NOP) 3234 cancel_delayed_work(&hdev->cmd_timer); 3235 3236 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) 3237 atomic_set(&hdev->cmd_cnt, 1); 3238 3239 /* Indicate request completion if the command failed. Also, if 3240 * we're not waiting for a special event and we get a success 3241 * command status we should try to flag the request as completed 3242 * (since for this kind of commands there will not be a command 3243 * complete event). 3244 */ 3245 if (ev->status || 3246 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) 3247 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, 3248 req_complete_skb); 3249 3250 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) 3251 queue_work(hdev->workqueue, &hdev->cmd_work); 3252 } 3253 3254 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb) 3255 { 3256 struct hci_ev_hardware_error *ev = (void *) skb->data; 3257 3258 hdev->hw_error_code = ev->code; 3259 3260 queue_work(hdev->req_workqueue, &hdev->error_reset); 3261 } 3262 3263 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3264 { 3265 struct hci_ev_role_change *ev = (void *) skb->data; 3266 struct hci_conn *conn; 3267 3268 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3269 3270 hci_dev_lock(hdev); 3271 3272 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3273 if (conn) { 3274 if (!ev->status) 3275 conn->role = ev->role; 3276 3277 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 3278 3279 hci_role_switch_cfm(conn, ev->status, ev->role); 3280 } 3281 3282 hci_dev_unlock(hdev); 3283 } 3284 3285 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 3286 { 3287 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 3288 int i; 3289 3290 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 3291 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 3292 return; 3293 } 3294 3295 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 3296 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 3297 BT_DBG("%s bad parameters", hdev->name); 3298 return; 3299 } 3300 3301 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 3302 3303 for (i = 0; i < ev->num_hndl; i++) { 3304 struct hci_comp_pkts_info *info = &ev->handles[i]; 3305 struct hci_conn *conn; 3306 __u16 handle, count; 3307 3308 handle = __le16_to_cpu(info->handle); 3309 count = __le16_to_cpu(info->count); 3310 3311 conn = hci_conn_hash_lookup_handle(hdev, handle); 3312 if (!conn) 3313 continue; 3314 3315 conn->sent -= count; 3316 3317 switch (conn->type) { 3318 case ACL_LINK: 3319 hdev->acl_cnt += count; 3320 if (hdev->acl_cnt > hdev->acl_pkts) 3321 hdev->acl_cnt = hdev->acl_pkts; 3322 break; 3323 3324 case LE_LINK: 3325 if (hdev->le_pkts) { 3326 hdev->le_cnt += count; 3327 if (hdev->le_cnt > hdev->le_pkts) 3328 hdev->le_cnt = hdev->le_pkts; 3329 } else { 3330 hdev->acl_cnt += count; 3331 if (hdev->acl_cnt > hdev->acl_pkts) 3332 hdev->acl_cnt = hdev->acl_pkts; 3333 } 3334 break; 3335 3336 case SCO_LINK: 3337 hdev->sco_cnt += count; 3338 if (hdev->sco_cnt > hdev->sco_pkts) 3339 hdev->sco_cnt = hdev->sco_pkts; 3340 break; 3341 3342 default: 3343 BT_ERR("Unknown type %d conn %p", conn->type, conn); 3344 break; 3345 } 3346 } 3347 3348 queue_work(hdev->workqueue, &hdev->tx_work); 3349 } 3350 3351 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 3352 __u16 handle) 3353 { 3354 struct hci_chan *chan; 3355 3356 switch (hdev->dev_type) { 3357 case HCI_BREDR: 3358 return hci_conn_hash_lookup_handle(hdev, handle); 3359 case HCI_AMP: 3360 chan = hci_chan_lookup_handle(hdev, handle); 3361 if (chan) 3362 return chan->conn; 3363 break; 3364 default: 3365 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); 3366 break; 3367 } 3368 3369 return NULL; 3370 } 3371 3372 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 3373 { 3374 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 3375 int i; 3376 3377 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 3378 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 3379 return; 3380 } 3381 3382 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 3383 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 3384 BT_DBG("%s bad parameters", hdev->name); 3385 return; 3386 } 3387 3388 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 3389 ev->num_hndl); 3390 3391 for (i = 0; i < ev->num_hndl; i++) { 3392 struct hci_comp_blocks_info *info = &ev->handles[i]; 3393 struct hci_conn *conn = NULL; 3394 __u16 handle, block_count; 3395 3396 handle = __le16_to_cpu(info->handle); 3397 block_count = __le16_to_cpu(info->blocks); 3398 3399 conn = __hci_conn_lookup_handle(hdev, handle); 3400 if (!conn) 3401 continue; 3402 3403 conn->sent -= block_count; 3404 3405 switch (conn->type) { 3406 case ACL_LINK: 3407 case AMP_LINK: 3408 hdev->block_cnt += block_count; 3409 if (hdev->block_cnt > hdev->num_blocks) 3410 hdev->block_cnt = hdev->num_blocks; 3411 break; 3412 3413 default: 3414 BT_ERR("Unknown type %d conn %p", conn->type, conn); 3415 break; 3416 } 3417 } 3418 3419 queue_work(hdev->workqueue, &hdev->tx_work); 3420 } 3421 3422 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3423 { 3424 struct hci_ev_mode_change *ev = (void *) skb->data; 3425 struct hci_conn *conn; 3426 3427 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3428 3429 hci_dev_lock(hdev); 3430 3431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3432 if (conn) { 3433 conn->mode = ev->mode; 3434 3435 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 3436 &conn->flags)) { 3437 if (conn->mode == HCI_CM_ACTIVE) 3438 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3439 else 3440 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 3441 } 3442 3443 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 3444 hci_sco_setup(conn, ev->status); 3445 } 3446 3447 hci_dev_unlock(hdev); 3448 } 3449 3450 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3451 { 3452 struct hci_ev_pin_code_req *ev = (void *) skb->data; 3453 struct hci_conn *conn; 3454 3455 BT_DBG("%s", hdev->name); 3456 3457 hci_dev_lock(hdev); 3458 3459 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3460 if (!conn) 3461 goto unlock; 3462 3463 if (conn->state == BT_CONNECTED) { 3464 hci_conn_hold(conn); 3465 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 3466 hci_conn_drop(conn); 3467 } 3468 3469 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && 3470 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { 3471 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 3472 sizeof(ev->bdaddr), &ev->bdaddr); 3473 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { 3474 u8 secure; 3475 3476 if (conn->pending_sec_level == BT_SECURITY_HIGH) 3477 secure = 1; 3478 else 3479 secure = 0; 3480 3481 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 3482 } 3483 3484 unlock: 3485 hci_dev_unlock(hdev); 3486 } 3487 3488 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) 3489 { 3490 if (key_type == HCI_LK_CHANGED_COMBINATION) 3491 return; 3492 3493 conn->pin_length = pin_len; 3494 conn->key_type = key_type; 3495 3496 switch (key_type) { 3497 case HCI_LK_LOCAL_UNIT: 3498 case HCI_LK_REMOTE_UNIT: 3499 case HCI_LK_DEBUG_COMBINATION: 3500 return; 3501 case HCI_LK_COMBINATION: 3502 if (pin_len == 16) 3503 conn->pending_sec_level = BT_SECURITY_HIGH; 3504 else 3505 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3506 break; 3507 case HCI_LK_UNAUTH_COMBINATION_P192: 3508 case HCI_LK_UNAUTH_COMBINATION_P256: 3509 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3510 break; 3511 case HCI_LK_AUTH_COMBINATION_P192: 3512 conn->pending_sec_level = BT_SECURITY_HIGH; 3513 break; 3514 case HCI_LK_AUTH_COMBINATION_P256: 3515 conn->pending_sec_level = BT_SECURITY_FIPS; 3516 break; 3517 } 3518 } 3519 3520 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3521 { 3522 struct hci_ev_link_key_req *ev = (void *) skb->data; 3523 struct hci_cp_link_key_reply cp; 3524 struct hci_conn *conn; 3525 struct link_key *key; 3526 3527 BT_DBG("%s", hdev->name); 3528 3529 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3530 return; 3531 3532 hci_dev_lock(hdev); 3533 3534 key = hci_find_link_key(hdev, &ev->bdaddr); 3535 if (!key) { 3536 BT_DBG("%s link key not found for %pMR", hdev->name, 3537 &ev->bdaddr); 3538 goto not_found; 3539 } 3540 3541 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 3542 &ev->bdaddr); 3543 3544 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3545 if (conn) { 3546 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 3547 3548 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || 3549 key->type == HCI_LK_UNAUTH_COMBINATION_P256) && 3550 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 3551 BT_DBG("%s ignoring unauthenticated key", hdev->name); 3552 goto not_found; 3553 } 3554 3555 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 3556 (conn->pending_sec_level == BT_SECURITY_HIGH || 3557 conn->pending_sec_level == BT_SECURITY_FIPS)) { 3558 BT_DBG("%s ignoring key unauthenticated for high security", 3559 hdev->name); 3560 goto not_found; 3561 } 3562 3563 conn_set_key(conn, key->type, key->pin_len); 3564 } 3565 3566 bacpy(&cp.bdaddr, &ev->bdaddr); 3567 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 3568 3569 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 3570 3571 hci_dev_unlock(hdev); 3572 3573 return; 3574 3575 not_found: 3576 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 3577 hci_dev_unlock(hdev); 3578 } 3579 3580 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 3581 { 3582 struct hci_ev_link_key_notify *ev = (void *) skb->data; 3583 struct hci_conn *conn; 3584 struct link_key *key; 3585 bool persistent; 3586 u8 pin_len = 0; 3587 3588 BT_DBG("%s", hdev->name); 3589 3590 hci_dev_lock(hdev); 3591 3592 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3593 if (!conn) 3594 goto unlock; 3595 3596 hci_conn_hold(conn); 3597 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3598 hci_conn_drop(conn); 3599 3600 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); 3601 conn_set_key(conn, ev->key_type, conn->pin_length); 3602 3603 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 3604 goto unlock; 3605 3606 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, 3607 ev->key_type, pin_len, &persistent); 3608 if (!key) 3609 goto unlock; 3610 3611 /* Update connection information since adding the key will have 3612 * fixed up the type in the case of changed combination keys. 3613 */ 3614 if (ev->key_type == HCI_LK_CHANGED_COMBINATION) 3615 conn_set_key(conn, key->type, key->pin_len); 3616 3617 mgmt_new_link_key(hdev, key, persistent); 3618 3619 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag 3620 * is set. If it's not set simply remove the key from the kernel 3621 * list (we've still notified user space about it but with 3622 * store_hint being 0). 3623 */ 3624 if (key->type == HCI_LK_DEBUG_COMBINATION && 3625 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { 3626 list_del_rcu(&key->list); 3627 kfree_rcu(key, rcu); 3628 goto unlock; 3629 } 3630 3631 if (persistent) 3632 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 3633 else 3634 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); 3635 3636 unlock: 3637 hci_dev_unlock(hdev); 3638 } 3639 3640 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 3641 { 3642 struct hci_ev_clock_offset *ev = (void *) skb->data; 3643 struct hci_conn *conn; 3644 3645 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3646 3647 hci_dev_lock(hdev); 3648 3649 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3650 if (conn && !ev->status) { 3651 struct inquiry_entry *ie; 3652 3653 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 3654 if (ie) { 3655 ie->data.clock_offset = ev->clock_offset; 3656 ie->timestamp = jiffies; 3657 } 3658 } 3659 3660 hci_dev_unlock(hdev); 3661 } 3662 3663 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 3664 { 3665 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 3666 struct hci_conn *conn; 3667 3668 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3669 3670 hci_dev_lock(hdev); 3671 3672 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3673 if (conn && !ev->status) 3674 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 3675 3676 hci_dev_unlock(hdev); 3677 } 3678 3679 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 3680 { 3681 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 3682 struct inquiry_entry *ie; 3683 3684 BT_DBG("%s", hdev->name); 3685 3686 hci_dev_lock(hdev); 3687 3688 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3689 if (ie) { 3690 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 3691 ie->timestamp = jiffies; 3692 } 3693 3694 hci_dev_unlock(hdev); 3695 } 3696 3697 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 3698 struct sk_buff *skb) 3699 { 3700 struct inquiry_data data; 3701 int num_rsp = *((__u8 *) skb->data); 3702 3703 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3704 3705 if (!num_rsp) 3706 return; 3707 3708 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3709 return; 3710 3711 hci_dev_lock(hdev); 3712 3713 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 3714 struct inquiry_info_with_rssi_and_pscan_mode *info; 3715 info = (void *) (skb->data + 1); 3716 3717 for (; num_rsp; num_rsp--, info++) { 3718 u32 flags; 3719 3720 bacpy(&data.bdaddr, &info->bdaddr); 3721 data.pscan_rep_mode = info->pscan_rep_mode; 3722 data.pscan_period_mode = info->pscan_period_mode; 3723 data.pscan_mode = info->pscan_mode; 3724 memcpy(data.dev_class, info->dev_class, 3); 3725 data.clock_offset = info->clock_offset; 3726 data.rssi = info->rssi; 3727 data.ssp_mode = 0x00; 3728 3729 flags = hci_inquiry_cache_update(hdev, &data, false); 3730 3731 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3732 info->dev_class, info->rssi, 3733 flags, NULL, 0, NULL, 0); 3734 } 3735 } else { 3736 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 3737 3738 for (; num_rsp; num_rsp--, info++) { 3739 u32 flags; 3740 3741 bacpy(&data.bdaddr, &info->bdaddr); 3742 data.pscan_rep_mode = info->pscan_rep_mode; 3743 data.pscan_period_mode = info->pscan_period_mode; 3744 data.pscan_mode = 0x00; 3745 memcpy(data.dev_class, info->dev_class, 3); 3746 data.clock_offset = info->clock_offset; 3747 data.rssi = info->rssi; 3748 data.ssp_mode = 0x00; 3749 3750 flags = hci_inquiry_cache_update(hdev, &data, false); 3751 3752 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3753 info->dev_class, info->rssi, 3754 flags, NULL, 0, NULL, 0); 3755 } 3756 } 3757 3758 hci_dev_unlock(hdev); 3759 } 3760 3761 static void hci_remote_ext_features_evt(struct hci_dev *hdev, 3762 struct sk_buff *skb) 3763 { 3764 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 3765 struct hci_conn *conn; 3766 3767 BT_DBG("%s", hdev->name); 3768 3769 hci_dev_lock(hdev); 3770 3771 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3772 if (!conn) 3773 goto unlock; 3774 3775 if (ev->page < HCI_MAX_PAGES) 3776 memcpy(conn->features[ev->page], ev->features, 8); 3777 3778 if (!ev->status && ev->page == 0x01) { 3779 struct inquiry_entry *ie; 3780 3781 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 3782 if (ie) 3783 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3784 3785 if (ev->features[0] & LMP_HOST_SSP) { 3786 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 3787 } else { 3788 /* It is mandatory by the Bluetooth specification that 3789 * Extended Inquiry Results are only used when Secure 3790 * Simple Pairing is enabled, but some devices violate 3791 * this. 3792 * 3793 * To make these devices work, the internal SSP 3794 * enabled flag needs to be cleared if the remote host 3795 * features do not indicate SSP support */ 3796 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 3797 } 3798 3799 if (ev->features[0] & LMP_HOST_SC) 3800 set_bit(HCI_CONN_SC_ENABLED, &conn->flags); 3801 } 3802 3803 if (conn->state != BT_CONFIG) 3804 goto unlock; 3805 3806 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 3807 struct hci_cp_remote_name_req cp; 3808 memset(&cp, 0, sizeof(cp)); 3809 bacpy(&cp.bdaddr, &conn->dst); 3810 cp.pscan_rep_mode = 0x02; 3811 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 3812 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3813 mgmt_device_connected(hdev, conn, 0, NULL, 0); 3814 3815 if (!hci_outgoing_auth_needed(hdev, conn)) { 3816 conn->state = BT_CONNECTED; 3817 hci_connect_cfm(conn, ev->status); 3818 hci_conn_drop(conn); 3819 } 3820 3821 unlock: 3822 hci_dev_unlock(hdev); 3823 } 3824 3825 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 3826 struct sk_buff *skb) 3827 { 3828 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 3829 struct hci_conn *conn; 3830 3831 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3832 3833 hci_dev_lock(hdev); 3834 3835 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 3836 if (!conn) { 3837 if (ev->link_type == ESCO_LINK) 3838 goto unlock; 3839 3840 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 3841 if (!conn) 3842 goto unlock; 3843 3844 conn->type = SCO_LINK; 3845 } 3846 3847 switch (ev->status) { 3848 case 0x00: 3849 conn->handle = __le16_to_cpu(ev->handle); 3850 conn->state = BT_CONNECTED; 3851 3852 hci_debugfs_create_conn(conn); 3853 hci_conn_add_sysfs(conn); 3854 break; 3855 3856 case 0x10: /* Connection Accept Timeout */ 3857 case 0x0d: /* Connection Rejected due to Limited Resources */ 3858 case 0x11: /* Unsupported Feature or Parameter Value */ 3859 case 0x1c: /* SCO interval rejected */ 3860 case 0x1a: /* Unsupported Remote Feature */ 3861 case 0x1f: /* Unspecified error */ 3862 case 0x20: /* Unsupported LMP Parameter value */ 3863 if (conn->out) { 3864 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 3865 (hdev->esco_type & EDR_ESCO_MASK); 3866 if (hci_setup_sync(conn, conn->link->handle)) 3867 goto unlock; 3868 } 3869 /* fall through */ 3870 3871 default: 3872 conn->state = BT_CLOSED; 3873 break; 3874 } 3875 3876 hci_connect_cfm(conn, ev->status); 3877 if (ev->status) 3878 hci_conn_del(conn); 3879 3880 unlock: 3881 hci_dev_unlock(hdev); 3882 } 3883 3884 static inline size_t eir_get_length(u8 *eir, size_t eir_len) 3885 { 3886 size_t parsed = 0; 3887 3888 while (parsed < eir_len) { 3889 u8 field_len = eir[0]; 3890 3891 if (field_len == 0) 3892 return parsed; 3893 3894 parsed += field_len + 1; 3895 eir += field_len + 1; 3896 } 3897 3898 return eir_len; 3899 } 3900 3901 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 3902 struct sk_buff *skb) 3903 { 3904 struct inquiry_data data; 3905 struct extended_inquiry_info *info = (void *) (skb->data + 1); 3906 int num_rsp = *((__u8 *) skb->data); 3907 size_t eir_len; 3908 3909 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 3910 3911 if (!num_rsp) 3912 return; 3913 3914 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) 3915 return; 3916 3917 hci_dev_lock(hdev); 3918 3919 for (; num_rsp; num_rsp--, info++) { 3920 u32 flags; 3921 bool name_known; 3922 3923 bacpy(&data.bdaddr, &info->bdaddr); 3924 data.pscan_rep_mode = info->pscan_rep_mode; 3925 data.pscan_period_mode = info->pscan_period_mode; 3926 data.pscan_mode = 0x00; 3927 memcpy(data.dev_class, info->dev_class, 3); 3928 data.clock_offset = info->clock_offset; 3929 data.rssi = info->rssi; 3930 data.ssp_mode = 0x01; 3931 3932 if (hci_dev_test_flag(hdev, HCI_MGMT)) 3933 name_known = eir_has_data_type(info->data, 3934 sizeof(info->data), 3935 EIR_NAME_COMPLETE); 3936 else 3937 name_known = true; 3938 3939 flags = hci_inquiry_cache_update(hdev, &data, name_known); 3940 3941 eir_len = eir_get_length(info->data, sizeof(info->data)); 3942 3943 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 3944 info->dev_class, info->rssi, 3945 flags, info->data, eir_len, NULL, 0); 3946 } 3947 3948 hci_dev_unlock(hdev); 3949 } 3950 3951 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 3952 struct sk_buff *skb) 3953 { 3954 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 3955 struct hci_conn *conn; 3956 3957 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 3958 __le16_to_cpu(ev->handle)); 3959 3960 hci_dev_lock(hdev); 3961 3962 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3963 if (!conn) 3964 goto unlock; 3965 3966 /* For BR/EDR the necessary steps are taken through the 3967 * auth_complete event. 3968 */ 3969 if (conn->type != LE_LINK) 3970 goto unlock; 3971 3972 if (!ev->status) 3973 conn->sec_level = conn->pending_sec_level; 3974 3975 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3976 3977 if (ev->status && conn->state == BT_CONNECTED) { 3978 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3979 hci_conn_drop(conn); 3980 goto unlock; 3981 } 3982 3983 if (conn->state == BT_CONFIG) { 3984 if (!ev->status) 3985 conn->state = BT_CONNECTED; 3986 3987 hci_connect_cfm(conn, ev->status); 3988 hci_conn_drop(conn); 3989 } else { 3990 hci_auth_cfm(conn, ev->status); 3991 3992 hci_conn_hold(conn); 3993 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3994 hci_conn_drop(conn); 3995 } 3996 3997 unlock: 3998 hci_dev_unlock(hdev); 3999 } 4000 4001 static u8 hci_get_auth_req(struct hci_conn *conn) 4002 { 4003 /* If remote requests no-bonding follow that lead */ 4004 if (conn->remote_auth == HCI_AT_NO_BONDING || 4005 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 4006 return conn->remote_auth | (conn->auth_type & 0x01); 4007 4008 /* If both remote and local have enough IO capabilities, require 4009 * MITM protection 4010 */ 4011 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && 4012 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) 4013 return conn->remote_auth | 0x01; 4014 4015 /* No MITM protection possible so ignore remote requirement */ 4016 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); 4017 } 4018 4019 static u8 bredr_oob_data_present(struct hci_conn *conn) 4020 { 4021 struct hci_dev *hdev = conn->hdev; 4022 struct oob_data *data; 4023 4024 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); 4025 if (!data) 4026 return 0x00; 4027 4028 if (bredr_sc_enabled(hdev)) { 4029 /* When Secure Connections is enabled, then just 4030 * return the present value stored with the OOB 4031 * data. The stored value contains the right present 4032 * information. However it can only be trusted when 4033 * not in Secure Connection Only mode. 4034 */ 4035 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) 4036 return data->present; 4037 4038 /* When Secure Connections Only mode is enabled, then 4039 * the P-256 values are required. If they are not 4040 * available, then do not declare that OOB data is 4041 * present. 4042 */ 4043 if (!memcmp(data->rand256, ZERO_KEY, 16) || 4044 !memcmp(data->hash256, ZERO_KEY, 16)) 4045 return 0x00; 4046 4047 return 0x02; 4048 } 4049 4050 /* When Secure Connections is not enabled or actually 4051 * not supported by the hardware, then check that if 4052 * P-192 data values are present. 4053 */ 4054 if (!memcmp(data->rand192, ZERO_KEY, 16) || 4055 !memcmp(data->hash192, ZERO_KEY, 16)) 4056 return 0x00; 4057 4058 return 0x01; 4059 } 4060 4061 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 4062 { 4063 struct hci_ev_io_capa_request *ev = (void *) skb->data; 4064 struct hci_conn *conn; 4065 4066 BT_DBG("%s", hdev->name); 4067 4068 hci_dev_lock(hdev); 4069 4070 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4071 if (!conn) 4072 goto unlock; 4073 4074 hci_conn_hold(conn); 4075 4076 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4077 goto unlock; 4078 4079 /* Allow pairing if we're pairable, the initiators of the 4080 * pairing or if the remote is not requesting bonding. 4081 */ 4082 if (hci_dev_test_flag(hdev, HCI_BONDABLE) || 4083 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || 4084 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 4085 struct hci_cp_io_capability_reply cp; 4086 4087 bacpy(&cp.bdaddr, &ev->bdaddr); 4088 /* Change the IO capability from KeyboardDisplay 4089 * to DisplayYesNo as it is not supported by BT spec. */ 4090 cp.capability = (conn->io_capability == 0x04) ? 4091 HCI_IO_DISPLAY_YESNO : conn->io_capability; 4092 4093 /* If we are initiators, there is no remote information yet */ 4094 if (conn->remote_auth == 0xff) { 4095 /* Request MITM protection if our IO caps allow it 4096 * except for the no-bonding case. 4097 */ 4098 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4099 conn->auth_type != HCI_AT_NO_BONDING) 4100 conn->auth_type |= 0x01; 4101 } else { 4102 conn->auth_type = hci_get_auth_req(conn); 4103 } 4104 4105 /* If we're not bondable, force one of the non-bondable 4106 * authentication requirement values. 4107 */ 4108 if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) 4109 conn->auth_type &= HCI_AT_NO_BONDING_MITM; 4110 4111 cp.authentication = conn->auth_type; 4112 cp.oob_data = bredr_oob_data_present(conn); 4113 4114 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 4115 sizeof(cp), &cp); 4116 } else { 4117 struct hci_cp_io_capability_neg_reply cp; 4118 4119 bacpy(&cp.bdaddr, &ev->bdaddr); 4120 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 4121 4122 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 4123 sizeof(cp), &cp); 4124 } 4125 4126 unlock: 4127 hci_dev_unlock(hdev); 4128 } 4129 4130 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 4131 { 4132 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 4133 struct hci_conn *conn; 4134 4135 BT_DBG("%s", hdev->name); 4136 4137 hci_dev_lock(hdev); 4138 4139 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4140 if (!conn) 4141 goto unlock; 4142 4143 conn->remote_cap = ev->capability; 4144 conn->remote_auth = ev->authentication; 4145 4146 unlock: 4147 hci_dev_unlock(hdev); 4148 } 4149 4150 static void hci_user_confirm_request_evt(struct hci_dev *hdev, 4151 struct sk_buff *skb) 4152 { 4153 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 4154 int loc_mitm, rem_mitm, confirm_hint = 0; 4155 struct hci_conn *conn; 4156 4157 BT_DBG("%s", hdev->name); 4158 4159 hci_dev_lock(hdev); 4160 4161 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4162 goto unlock; 4163 4164 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4165 if (!conn) 4166 goto unlock; 4167 4168 loc_mitm = (conn->auth_type & 0x01); 4169 rem_mitm = (conn->remote_auth & 0x01); 4170 4171 /* If we require MITM but the remote device can't provide that 4172 * (it has NoInputNoOutput) then reject the confirmation 4173 * request. We check the security level here since it doesn't 4174 * necessarily match conn->auth_type. 4175 */ 4176 if (conn->pending_sec_level > BT_SECURITY_MEDIUM && 4177 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 4178 BT_DBG("Rejecting request: remote device can't provide MITM"); 4179 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 4180 sizeof(ev->bdaddr), &ev->bdaddr); 4181 goto unlock; 4182 } 4183 4184 /* If no side requires MITM protection; auto-accept */ 4185 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 4186 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 4187 4188 /* If we're not the initiators request authorization to 4189 * proceed from user space (mgmt_user_confirm with 4190 * confirm_hint set to 1). The exception is if neither 4191 * side had MITM or if the local IO capability is 4192 * NoInputNoOutput, in which case we do auto-accept 4193 */ 4194 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && 4195 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 4196 (loc_mitm || rem_mitm)) { 4197 BT_DBG("Confirming auto-accept as acceptor"); 4198 confirm_hint = 1; 4199 goto confirm; 4200 } 4201 4202 BT_DBG("Auto-accept of user confirmation with %ums delay", 4203 hdev->auto_accept_delay); 4204 4205 if (hdev->auto_accept_delay > 0) { 4206 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 4207 queue_delayed_work(conn->hdev->workqueue, 4208 &conn->auto_accept_work, delay); 4209 goto unlock; 4210 } 4211 4212 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 4213 sizeof(ev->bdaddr), &ev->bdaddr); 4214 goto unlock; 4215 } 4216 4217 confirm: 4218 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, 4219 le32_to_cpu(ev->passkey), confirm_hint); 4220 4221 unlock: 4222 hci_dev_unlock(hdev); 4223 } 4224 4225 static void hci_user_passkey_request_evt(struct hci_dev *hdev, 4226 struct sk_buff *skb) 4227 { 4228 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 4229 4230 BT_DBG("%s", hdev->name); 4231 4232 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4233 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 4234 } 4235 4236 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 4237 struct sk_buff *skb) 4238 { 4239 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 4240 struct hci_conn *conn; 4241 4242 BT_DBG("%s", hdev->name); 4243 4244 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4245 if (!conn) 4246 return; 4247 4248 conn->passkey_notify = __le32_to_cpu(ev->passkey); 4249 conn->passkey_entered = 0; 4250 4251 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4252 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4253 conn->dst_type, conn->passkey_notify, 4254 conn->passkey_entered); 4255 } 4256 4257 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 4258 { 4259 struct hci_ev_keypress_notify *ev = (void *) skb->data; 4260 struct hci_conn *conn; 4261 4262 BT_DBG("%s", hdev->name); 4263 4264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4265 if (!conn) 4266 return; 4267 4268 switch (ev->type) { 4269 case HCI_KEYPRESS_STARTED: 4270 conn->passkey_entered = 0; 4271 return; 4272 4273 case HCI_KEYPRESS_ENTERED: 4274 conn->passkey_entered++; 4275 break; 4276 4277 case HCI_KEYPRESS_ERASED: 4278 conn->passkey_entered--; 4279 break; 4280 4281 case HCI_KEYPRESS_CLEARED: 4282 conn->passkey_entered = 0; 4283 break; 4284 4285 case HCI_KEYPRESS_COMPLETED: 4286 return; 4287 } 4288 4289 if (hci_dev_test_flag(hdev, HCI_MGMT)) 4290 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 4291 conn->dst_type, conn->passkey_notify, 4292 conn->passkey_entered); 4293 } 4294 4295 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 4296 struct sk_buff *skb) 4297 { 4298 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 4299 struct hci_conn *conn; 4300 4301 BT_DBG("%s", hdev->name); 4302 4303 hci_dev_lock(hdev); 4304 4305 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4306 if (!conn) 4307 goto unlock; 4308 4309 /* Reset the authentication requirement to unknown */ 4310 conn->remote_auth = 0xff; 4311 4312 /* To avoid duplicate auth_failed events to user space we check 4313 * the HCI_CONN_AUTH_PEND flag which will be set if we 4314 * initiated the authentication. A traditional auth_complete 4315 * event gets always produced as initiator and is also mapped to 4316 * the mgmt_auth_failed event */ 4317 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 4318 mgmt_auth_failed(conn, ev->status); 4319 4320 hci_conn_drop(conn); 4321 4322 unlock: 4323 hci_dev_unlock(hdev); 4324 } 4325 4326 static void hci_remote_host_features_evt(struct hci_dev *hdev, 4327 struct sk_buff *skb) 4328 { 4329 struct hci_ev_remote_host_features *ev = (void *) skb->data; 4330 struct inquiry_entry *ie; 4331 struct hci_conn *conn; 4332 4333 BT_DBG("%s", hdev->name); 4334 4335 hci_dev_lock(hdev); 4336 4337 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 4338 if (conn) 4339 memcpy(conn->features[1], ev->features, 8); 4340 4341 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 4342 if (ie) 4343 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 4344 4345 hci_dev_unlock(hdev); 4346 } 4347 4348 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 4349 struct sk_buff *skb) 4350 { 4351 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 4352 struct oob_data *data; 4353 4354 BT_DBG("%s", hdev->name); 4355 4356 hci_dev_lock(hdev); 4357 4358 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 4359 goto unlock; 4360 4361 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); 4362 if (!data) { 4363 struct hci_cp_remote_oob_data_neg_reply cp; 4364 4365 bacpy(&cp.bdaddr, &ev->bdaddr); 4366 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, 4367 sizeof(cp), &cp); 4368 goto unlock; 4369 } 4370 4371 if (bredr_sc_enabled(hdev)) { 4372 struct hci_cp_remote_oob_ext_data_reply cp; 4373 4374 bacpy(&cp.bdaddr, &ev->bdaddr); 4375 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { 4376 memset(cp.hash192, 0, sizeof(cp.hash192)); 4377 memset(cp.rand192, 0, sizeof(cp.rand192)); 4378 } else { 4379 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); 4380 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); 4381 } 4382 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); 4383 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); 4384 4385 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, 4386 sizeof(cp), &cp); 4387 } else { 4388 struct hci_cp_remote_oob_data_reply cp; 4389 4390 bacpy(&cp.bdaddr, &ev->bdaddr); 4391 memcpy(cp.hash, data->hash192, sizeof(cp.hash)); 4392 memcpy(cp.rand, data->rand192, sizeof(cp.rand)); 4393 4394 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, 4395 sizeof(cp), &cp); 4396 } 4397 4398 unlock: 4399 hci_dev_unlock(hdev); 4400 } 4401 4402 static void hci_phy_link_complete_evt(struct hci_dev *hdev, 4403 struct sk_buff *skb) 4404 { 4405 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 4406 struct hci_conn *hcon, *bredr_hcon; 4407 4408 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 4409 ev->status); 4410 4411 hci_dev_lock(hdev); 4412 4413 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4414 if (!hcon) { 4415 hci_dev_unlock(hdev); 4416 return; 4417 } 4418 4419 if (ev->status) { 4420 hci_conn_del(hcon); 4421 hci_dev_unlock(hdev); 4422 return; 4423 } 4424 4425 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 4426 4427 hcon->state = BT_CONNECTED; 4428 bacpy(&hcon->dst, &bredr_hcon->dst); 4429 4430 hci_conn_hold(hcon); 4431 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 4432 hci_conn_drop(hcon); 4433 4434 hci_debugfs_create_conn(hcon); 4435 hci_conn_add_sysfs(hcon); 4436 4437 amp_physical_cfm(bredr_hcon, hcon); 4438 4439 hci_dev_unlock(hdev); 4440 } 4441 4442 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4443 { 4444 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 4445 struct hci_conn *hcon; 4446 struct hci_chan *hchan; 4447 struct amp_mgr *mgr; 4448 4449 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 4450 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 4451 ev->status); 4452 4453 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4454 if (!hcon) 4455 return; 4456 4457 /* Create AMP hchan */ 4458 hchan = hci_chan_create(hcon); 4459 if (!hchan) 4460 return; 4461 4462 hchan->handle = le16_to_cpu(ev->handle); 4463 4464 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 4465 4466 mgr = hcon->amp_mgr; 4467 if (mgr && mgr->bredr_chan) { 4468 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 4469 4470 l2cap_chan_lock(bredr_chan); 4471 4472 bredr_chan->conn->mtu = hdev->block_mtu; 4473 l2cap_logical_cfm(bredr_chan, hchan, 0); 4474 hci_conn_hold(hcon); 4475 4476 l2cap_chan_unlock(bredr_chan); 4477 } 4478 } 4479 4480 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 4481 struct sk_buff *skb) 4482 { 4483 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 4484 struct hci_chan *hchan; 4485 4486 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 4487 le16_to_cpu(ev->handle), ev->status); 4488 4489 if (ev->status) 4490 return; 4491 4492 hci_dev_lock(hdev); 4493 4494 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 4495 if (!hchan) 4496 goto unlock; 4497 4498 amp_destroy_logical_link(hchan, ev->reason); 4499 4500 unlock: 4501 hci_dev_unlock(hdev); 4502 } 4503 4504 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 4505 struct sk_buff *skb) 4506 { 4507 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 4508 struct hci_conn *hcon; 4509 4510 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4511 4512 if (ev->status) 4513 return; 4514 4515 hci_dev_lock(hdev); 4516 4517 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 4518 if (hcon) { 4519 hcon->state = BT_CLOSED; 4520 hci_conn_del(hcon); 4521 } 4522 4523 hci_dev_unlock(hdev); 4524 } 4525 4526 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 4527 { 4528 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 4529 struct hci_conn_params *params; 4530 struct hci_conn *conn; 4531 struct smp_irk *irk; 4532 u8 addr_type; 4533 4534 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4535 4536 hci_dev_lock(hdev); 4537 4538 /* All controllers implicitly stop advertising in the event of a 4539 * connection, so ensure that the state bit is cleared. 4540 */ 4541 hci_dev_clear_flag(hdev, HCI_LE_ADV); 4542 4543 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 4544 if (!conn) { 4545 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role); 4546 if (!conn) { 4547 BT_ERR("No memory for new connection"); 4548 goto unlock; 4549 } 4550 4551 conn->dst_type = ev->bdaddr_type; 4552 4553 /* If we didn't have a hci_conn object previously 4554 * but we're in master role this must be something 4555 * initiated using a white list. Since white list based 4556 * connections are not "first class citizens" we don't 4557 * have full tracking of them. Therefore, we go ahead 4558 * with a "best effort" approach of determining the 4559 * initiator address based on the HCI_PRIVACY flag. 4560 */ 4561 if (conn->out) { 4562 conn->resp_addr_type = ev->bdaddr_type; 4563 bacpy(&conn->resp_addr, &ev->bdaddr); 4564 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { 4565 conn->init_addr_type = ADDR_LE_DEV_RANDOM; 4566 bacpy(&conn->init_addr, &hdev->rpa); 4567 } else { 4568 hci_copy_identity_address(hdev, 4569 &conn->init_addr, 4570 &conn->init_addr_type); 4571 } 4572 } 4573 } else { 4574 cancel_delayed_work(&conn->le_conn_timeout); 4575 } 4576 4577 if (!conn->out) { 4578 /* Set the responder (our side) address type based on 4579 * the advertising address type. 4580 */ 4581 conn->resp_addr_type = hdev->adv_addr_type; 4582 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) 4583 bacpy(&conn->resp_addr, &hdev->random_addr); 4584 else 4585 bacpy(&conn->resp_addr, &hdev->bdaddr); 4586 4587 conn->init_addr_type = ev->bdaddr_type; 4588 bacpy(&conn->init_addr, &ev->bdaddr); 4589 4590 /* For incoming connections, set the default minimum 4591 * and maximum connection interval. They will be used 4592 * to check if the parameters are in range and if not 4593 * trigger the connection update procedure. 4594 */ 4595 conn->le_conn_min_interval = hdev->le_conn_min_interval; 4596 conn->le_conn_max_interval = hdev->le_conn_max_interval; 4597 } 4598 4599 /* Lookup the identity address from the stored connection 4600 * address and address type. 4601 * 4602 * When establishing connections to an identity address, the 4603 * connection procedure will store the resolvable random 4604 * address first. Now if it can be converted back into the 4605 * identity address, start using the identity address from 4606 * now on. 4607 */ 4608 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); 4609 if (irk) { 4610 bacpy(&conn->dst, &irk->bdaddr); 4611 conn->dst_type = irk->addr_type; 4612 } 4613 4614 if (ev->status) { 4615 hci_le_conn_failed(conn, ev->status); 4616 goto unlock; 4617 } 4618 4619 if (conn->dst_type == ADDR_LE_DEV_PUBLIC) 4620 addr_type = BDADDR_LE_PUBLIC; 4621 else 4622 addr_type = BDADDR_LE_RANDOM; 4623 4624 /* Drop the connection if the device is blocked */ 4625 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) { 4626 hci_conn_drop(conn); 4627 goto unlock; 4628 } 4629 4630 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 4631 mgmt_device_connected(hdev, conn, 0, NULL, 0); 4632 4633 conn->sec_level = BT_SECURITY_LOW; 4634 conn->handle = __le16_to_cpu(ev->handle); 4635 conn->state = BT_CONFIG; 4636 4637 conn->le_conn_interval = le16_to_cpu(ev->interval); 4638 conn->le_conn_latency = le16_to_cpu(ev->latency); 4639 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 4640 4641 hci_debugfs_create_conn(conn); 4642 hci_conn_add_sysfs(conn); 4643 4644 if (!ev->status) { 4645 /* The remote features procedure is defined for master 4646 * role only. So only in case of an initiated connection 4647 * request the remote features. 4648 * 4649 * If the local controller supports slave-initiated features 4650 * exchange, then requesting the remote features in slave 4651 * role is possible. Otherwise just transition into the 4652 * connected state without requesting the remote features. 4653 */ 4654 if (conn->out || 4655 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) { 4656 struct hci_cp_le_read_remote_features cp; 4657 4658 cp.handle = __cpu_to_le16(conn->handle); 4659 4660 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, 4661 sizeof(cp), &cp); 4662 4663 hci_conn_hold(conn); 4664 } else { 4665 conn->state = BT_CONNECTED; 4666 hci_connect_cfm(conn, ev->status); 4667 } 4668 } else { 4669 hci_connect_cfm(conn, ev->status); 4670 } 4671 4672 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, 4673 conn->dst_type); 4674 if (params) { 4675 list_del_init(¶ms->action); 4676 if (params->conn) { 4677 hci_conn_drop(params->conn); 4678 hci_conn_put(params->conn); 4679 params->conn = NULL; 4680 } 4681 } 4682 4683 unlock: 4684 hci_update_background_scan(hdev); 4685 hci_dev_unlock(hdev); 4686 } 4687 4688 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, 4689 struct sk_buff *skb) 4690 { 4691 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data; 4692 struct hci_conn *conn; 4693 4694 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4695 4696 if (ev->status) 4697 return; 4698 4699 hci_dev_lock(hdev); 4700 4701 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4702 if (conn) { 4703 conn->le_conn_interval = le16_to_cpu(ev->interval); 4704 conn->le_conn_latency = le16_to_cpu(ev->latency); 4705 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); 4706 } 4707 4708 hci_dev_unlock(hdev); 4709 } 4710 4711 /* This function requires the caller holds hdev->lock */ 4712 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, 4713 bdaddr_t *addr, 4714 u8 addr_type, u8 adv_type) 4715 { 4716 struct hci_conn *conn; 4717 struct hci_conn_params *params; 4718 4719 /* If the event is not connectable don't proceed further */ 4720 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) 4721 return NULL; 4722 4723 /* Ignore if the device is blocked */ 4724 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type)) 4725 return NULL; 4726 4727 /* Most controller will fail if we try to create new connections 4728 * while we have an existing one in slave role. 4729 */ 4730 if (hdev->conn_hash.le_num_slave > 0) 4731 return NULL; 4732 4733 /* If we're not connectable only connect devices that we have in 4734 * our pend_le_conns list. 4735 */ 4736 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, 4737 addr, addr_type); 4738 if (!params) 4739 return NULL; 4740 4741 switch (params->auto_connect) { 4742 case HCI_AUTO_CONN_DIRECT: 4743 /* Only devices advertising with ADV_DIRECT_IND are 4744 * triggering a connection attempt. This is allowing 4745 * incoming connections from slave devices. 4746 */ 4747 if (adv_type != LE_ADV_DIRECT_IND) 4748 return NULL; 4749 break; 4750 case HCI_AUTO_CONN_ALWAYS: 4751 /* Devices advertising with ADV_IND or ADV_DIRECT_IND 4752 * are triggering a connection attempt. This means 4753 * that incoming connectioms from slave device are 4754 * accepted and also outgoing connections to slave 4755 * devices are established when found. 4756 */ 4757 break; 4758 default: 4759 return NULL; 4760 } 4761 4762 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, 4763 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); 4764 if (!IS_ERR(conn)) { 4765 /* Store the pointer since we don't really have any 4766 * other owner of the object besides the params that 4767 * triggered it. This way we can abort the connection if 4768 * the parameters get removed and keep the reference 4769 * count consistent once the connection is established. 4770 */ 4771 params->conn = hci_conn_get(conn); 4772 return conn; 4773 } 4774 4775 switch (PTR_ERR(conn)) { 4776 case -EBUSY: 4777 /* If hci_connect() returns -EBUSY it means there is already 4778 * an LE connection attempt going on. Since controllers don't 4779 * support more than one connection attempt at the time, we 4780 * don't consider this an error case. 4781 */ 4782 break; 4783 default: 4784 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); 4785 return NULL; 4786 } 4787 4788 return NULL; 4789 } 4790 4791 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, 4792 u8 bdaddr_type, bdaddr_t *direct_addr, 4793 u8 direct_addr_type, s8 rssi, u8 *data, u8 len) 4794 { 4795 struct discovery_state *d = &hdev->discovery; 4796 struct smp_irk *irk; 4797 struct hci_conn *conn; 4798 bool match; 4799 u32 flags; 4800 4801 /* If the direct address is present, then this report is from 4802 * a LE Direct Advertising Report event. In that case it is 4803 * important to see if the address is matching the local 4804 * controller address. 4805 */ 4806 if (direct_addr) { 4807 /* Only resolvable random addresses are valid for these 4808 * kind of reports and others can be ignored. 4809 */ 4810 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) 4811 return; 4812 4813 /* If the controller is not using resolvable random 4814 * addresses, then this report can be ignored. 4815 */ 4816 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 4817 return; 4818 4819 /* If the local IRK of the controller does not match 4820 * with the resolvable random address provided, then 4821 * this report can be ignored. 4822 */ 4823 if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) 4824 return; 4825 } 4826 4827 /* Check if we need to convert to identity address */ 4828 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 4829 if (irk) { 4830 bdaddr = &irk->bdaddr; 4831 bdaddr_type = irk->addr_type; 4832 } 4833 4834 /* Check if we have been requested to connect to this device */ 4835 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type); 4836 if (conn && type == LE_ADV_IND) { 4837 /* Store report for later inclusion by 4838 * mgmt_device_connected 4839 */ 4840 memcpy(conn->le_adv_data, data, len); 4841 conn->le_adv_data_len = len; 4842 } 4843 4844 /* Passive scanning shouldn't trigger any device found events, 4845 * except for devices marked as CONN_REPORT for which we do send 4846 * device found events. 4847 */ 4848 if (hdev->le_scan_type == LE_SCAN_PASSIVE) { 4849 if (type == LE_ADV_DIRECT_IND) 4850 return; 4851 4852 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, 4853 bdaddr, bdaddr_type)) 4854 return; 4855 4856 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) 4857 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 4858 else 4859 flags = 0; 4860 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4861 rssi, flags, data, len, NULL, 0); 4862 return; 4863 } 4864 4865 /* When receiving non-connectable or scannable undirected 4866 * advertising reports, this means that the remote device is 4867 * not connectable and then clearly indicate this in the 4868 * device found event. 4869 * 4870 * When receiving a scan response, then there is no way to 4871 * know if the remote device is connectable or not. However 4872 * since scan responses are merged with a previously seen 4873 * advertising report, the flags field from that report 4874 * will be used. 4875 * 4876 * In the really unlikely case that a controller get confused 4877 * and just sends a scan response event, then it is marked as 4878 * not connectable as well. 4879 */ 4880 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || 4881 type == LE_ADV_SCAN_RSP) 4882 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; 4883 else 4884 flags = 0; 4885 4886 /* If there's nothing pending either store the data from this 4887 * event or send an immediate device found event if the data 4888 * should not be stored for later. 4889 */ 4890 if (!has_pending_adv_report(hdev)) { 4891 /* If the report will trigger a SCAN_REQ store it for 4892 * later merging. 4893 */ 4894 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4895 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4896 rssi, flags, data, len); 4897 return; 4898 } 4899 4900 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4901 rssi, flags, data, len, NULL, 0); 4902 return; 4903 } 4904 4905 /* Check if the pending report is for the same device as the new one */ 4906 match = (!bacmp(bdaddr, &d->last_adv_addr) && 4907 bdaddr_type == d->last_adv_addr_type); 4908 4909 /* If the pending data doesn't match this report or this isn't a 4910 * scan response (e.g. we got a duplicate ADV_IND) then force 4911 * sending of the pending data. 4912 */ 4913 if (type != LE_ADV_SCAN_RSP || !match) { 4914 /* Send out whatever is in the cache, but skip duplicates */ 4915 if (!match) 4916 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4917 d->last_adv_addr_type, NULL, 4918 d->last_adv_rssi, d->last_adv_flags, 4919 d->last_adv_data, 4920 d->last_adv_data_len, NULL, 0); 4921 4922 /* If the new report will trigger a SCAN_REQ store it for 4923 * later merging. 4924 */ 4925 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { 4926 store_pending_adv_report(hdev, bdaddr, bdaddr_type, 4927 rssi, flags, data, len); 4928 return; 4929 } 4930 4931 /* The advertising reports cannot be merged, so clear 4932 * the pending report and send out a device found event. 4933 */ 4934 clear_pending_adv_report(hdev); 4935 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, 4936 rssi, flags, data, len, NULL, 0); 4937 return; 4938 } 4939 4940 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and 4941 * the new event is a SCAN_RSP. We can therefore proceed with 4942 * sending a merged device found event. 4943 */ 4944 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, 4945 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, 4946 d->last_adv_data, d->last_adv_data_len, data, len); 4947 clear_pending_adv_report(hdev); 4948 } 4949 4950 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 4951 { 4952 u8 num_reports = skb->data[0]; 4953 void *ptr = &skb->data[1]; 4954 4955 hci_dev_lock(hdev); 4956 4957 while (num_reports--) { 4958 struct hci_ev_le_advertising_info *ev = ptr; 4959 s8 rssi; 4960 4961 rssi = ev->data[ev->length]; 4962 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 4963 ev->bdaddr_type, NULL, 0, rssi, 4964 ev->data, ev->length); 4965 4966 ptr += sizeof(*ev) + ev->length + 1; 4967 } 4968 4969 hci_dev_unlock(hdev); 4970 } 4971 4972 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, 4973 struct sk_buff *skb) 4974 { 4975 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data; 4976 struct hci_conn *conn; 4977 4978 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 4979 4980 hci_dev_lock(hdev); 4981 4982 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 4983 if (conn) { 4984 if (!ev->status) 4985 memcpy(conn->features[0], ev->features, 8); 4986 4987 if (conn->state == BT_CONFIG) { 4988 __u8 status; 4989 4990 /* If the local controller supports slave-initiated 4991 * features exchange, but the remote controller does 4992 * not, then it is possible that the error code 0x1a 4993 * for unsupported remote feature gets returned. 4994 * 4995 * In this specific case, allow the connection to 4996 * transition into connected state and mark it as 4997 * successful. 4998 */ 4999 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) && 5000 !conn->out && ev->status == 0x1a) 5001 status = 0x00; 5002 else 5003 status = ev->status; 5004 5005 conn->state = BT_CONNECTED; 5006 hci_connect_cfm(conn, status); 5007 hci_conn_drop(conn); 5008 } 5009 } 5010 5011 hci_dev_unlock(hdev); 5012 } 5013 5014 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 5015 { 5016 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 5017 struct hci_cp_le_ltk_reply cp; 5018 struct hci_cp_le_ltk_neg_reply neg; 5019 struct hci_conn *conn; 5020 struct smp_ltk *ltk; 5021 5022 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 5023 5024 hci_dev_lock(hdev); 5025 5026 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 5027 if (conn == NULL) 5028 goto not_found; 5029 5030 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); 5031 if (!ltk) 5032 goto not_found; 5033 5034 if (smp_ltk_is_sc(ltk)) { 5035 /* With SC both EDiv and Rand are set to zero */ 5036 if (ev->ediv || ev->rand) 5037 goto not_found; 5038 } else { 5039 /* For non-SC keys check that EDiv and Rand match */ 5040 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) 5041 goto not_found; 5042 } 5043 5044 memcpy(cp.ltk, ltk->val, ltk->enc_size); 5045 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); 5046 cp.handle = cpu_to_le16(conn->handle); 5047 5048 conn->pending_sec_level = smp_ltk_sec_level(ltk); 5049 5050 conn->enc_key_size = ltk->enc_size; 5051 5052 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 5053 5054 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a 5055 * temporary key used to encrypt a connection following 5056 * pairing. It is used during the Encrypted Session Setup to 5057 * distribute the keys. Later, security can be re-established 5058 * using a distributed LTK. 5059 */ 5060 if (ltk->type == SMP_STK) { 5061 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 5062 list_del_rcu(<k->list); 5063 kfree_rcu(ltk, rcu); 5064 } else { 5065 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); 5066 } 5067 5068 hci_dev_unlock(hdev); 5069 5070 return; 5071 5072 not_found: 5073 neg.handle = ev->handle; 5074 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 5075 hci_dev_unlock(hdev); 5076 } 5077 5078 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, 5079 u8 reason) 5080 { 5081 struct hci_cp_le_conn_param_req_neg_reply cp; 5082 5083 cp.handle = cpu_to_le16(handle); 5084 cp.reason = reason; 5085 5086 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), 5087 &cp); 5088 } 5089 5090 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, 5091 struct sk_buff *skb) 5092 { 5093 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data; 5094 struct hci_cp_le_conn_param_req_reply cp; 5095 struct hci_conn *hcon; 5096 u16 handle, min, max, latency, timeout; 5097 5098 handle = le16_to_cpu(ev->handle); 5099 min = le16_to_cpu(ev->interval_min); 5100 max = le16_to_cpu(ev->interval_max); 5101 latency = le16_to_cpu(ev->latency); 5102 timeout = le16_to_cpu(ev->timeout); 5103 5104 hcon = hci_conn_hash_lookup_handle(hdev, handle); 5105 if (!hcon || hcon->state != BT_CONNECTED) 5106 return send_conn_param_neg_reply(hdev, handle, 5107 HCI_ERROR_UNKNOWN_CONN_ID); 5108 5109 if (hci_check_conn_params(min, max, latency, timeout)) 5110 return send_conn_param_neg_reply(hdev, handle, 5111 HCI_ERROR_INVALID_LL_PARAMS); 5112 5113 if (hcon->role == HCI_ROLE_MASTER) { 5114 struct hci_conn_params *params; 5115 u8 store_hint; 5116 5117 hci_dev_lock(hdev); 5118 5119 params = hci_conn_params_lookup(hdev, &hcon->dst, 5120 hcon->dst_type); 5121 if (params) { 5122 params->conn_min_interval = min; 5123 params->conn_max_interval = max; 5124 params->conn_latency = latency; 5125 params->supervision_timeout = timeout; 5126 store_hint = 0x01; 5127 } else{ 5128 store_hint = 0x00; 5129 } 5130 5131 hci_dev_unlock(hdev); 5132 5133 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, 5134 store_hint, min, max, latency, timeout); 5135 } 5136 5137 cp.handle = ev->handle; 5138 cp.interval_min = ev->interval_min; 5139 cp.interval_max = ev->interval_max; 5140 cp.latency = ev->latency; 5141 cp.timeout = ev->timeout; 5142 cp.min_ce_len = 0; 5143 cp.max_ce_len = 0; 5144 5145 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); 5146 } 5147 5148 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, 5149 struct sk_buff *skb) 5150 { 5151 u8 num_reports = skb->data[0]; 5152 void *ptr = &skb->data[1]; 5153 5154 hci_dev_lock(hdev); 5155 5156 while (num_reports--) { 5157 struct hci_ev_le_direct_adv_info *ev = ptr; 5158 5159 process_adv_report(hdev, ev->evt_type, &ev->bdaddr, 5160 ev->bdaddr_type, &ev->direct_addr, 5161 ev->direct_addr_type, ev->rssi, NULL, 0); 5162 5163 ptr += sizeof(*ev); 5164 } 5165 5166 hci_dev_unlock(hdev); 5167 } 5168 5169 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 5170 { 5171 struct hci_ev_le_meta *le_ev = (void *) skb->data; 5172 5173 skb_pull(skb, sizeof(*le_ev)); 5174 5175 switch (le_ev->subevent) { 5176 case HCI_EV_LE_CONN_COMPLETE: 5177 hci_le_conn_complete_evt(hdev, skb); 5178 break; 5179 5180 case HCI_EV_LE_CONN_UPDATE_COMPLETE: 5181 hci_le_conn_update_complete_evt(hdev, skb); 5182 break; 5183 5184 case HCI_EV_LE_ADVERTISING_REPORT: 5185 hci_le_adv_report_evt(hdev, skb); 5186 break; 5187 5188 case HCI_EV_LE_REMOTE_FEAT_COMPLETE: 5189 hci_le_remote_feat_complete_evt(hdev, skb); 5190 break; 5191 5192 case HCI_EV_LE_LTK_REQ: 5193 hci_le_ltk_request_evt(hdev, skb); 5194 break; 5195 5196 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ: 5197 hci_le_remote_conn_param_req_evt(hdev, skb); 5198 break; 5199 5200 case HCI_EV_LE_DIRECT_ADV_REPORT: 5201 hci_le_direct_adv_report_evt(hdev, skb); 5202 break; 5203 5204 default: 5205 break; 5206 } 5207 } 5208 5209 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 5210 { 5211 struct hci_ev_channel_selected *ev = (void *) skb->data; 5212 struct hci_conn *hcon; 5213 5214 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 5215 5216 skb_pull(skb, sizeof(*ev)); 5217 5218 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 5219 if (!hcon) 5220 return; 5221 5222 amp_read_loc_assoc_final_data(hdev, hcon); 5223 } 5224 5225 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, 5226 u8 event, struct sk_buff *skb) 5227 { 5228 struct hci_ev_cmd_complete *ev; 5229 struct hci_event_hdr *hdr; 5230 5231 if (!skb) 5232 return false; 5233 5234 if (skb->len < sizeof(*hdr)) { 5235 BT_ERR("Too short HCI event"); 5236 return false; 5237 } 5238 5239 hdr = (void *) skb->data; 5240 skb_pull(skb, HCI_EVENT_HDR_SIZE); 5241 5242 if (event) { 5243 if (hdr->evt != event) 5244 return false; 5245 return true; 5246 } 5247 5248 if (hdr->evt != HCI_EV_CMD_COMPLETE) { 5249 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt); 5250 return false; 5251 } 5252 5253 if (skb->len < sizeof(*ev)) { 5254 BT_ERR("Too short cmd_complete event"); 5255 return false; 5256 } 5257 5258 ev = (void *) skb->data; 5259 skb_pull(skb, sizeof(*ev)); 5260 5261 if (opcode != __le16_to_cpu(ev->opcode)) { 5262 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, 5263 __le16_to_cpu(ev->opcode)); 5264 return false; 5265 } 5266 5267 return true; 5268 } 5269 5270 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 5271 { 5272 struct hci_event_hdr *hdr = (void *) skb->data; 5273 hci_req_complete_t req_complete = NULL; 5274 hci_req_complete_skb_t req_complete_skb = NULL; 5275 struct sk_buff *orig_skb = NULL; 5276 u8 status = 0, event = hdr->evt, req_evt = 0; 5277 u16 opcode = HCI_OP_NOP; 5278 5279 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) { 5280 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; 5281 opcode = __le16_to_cpu(cmd_hdr->opcode); 5282 hci_req_cmd_complete(hdev, opcode, status, &req_complete, 5283 &req_complete_skb); 5284 req_evt = event; 5285 } 5286 5287 /* If it looks like we might end up having to call 5288 * req_complete_skb, store a pristine copy of the skb since the 5289 * various handlers may modify the original one through 5290 * skb_pull() calls, etc. 5291 */ 5292 if (req_complete_skb || event == HCI_EV_CMD_STATUS || 5293 event == HCI_EV_CMD_COMPLETE) 5294 orig_skb = skb_clone(skb, GFP_KERNEL); 5295 5296 skb_pull(skb, HCI_EVENT_HDR_SIZE); 5297 5298 switch (event) { 5299 case HCI_EV_INQUIRY_COMPLETE: 5300 hci_inquiry_complete_evt(hdev, skb); 5301 break; 5302 5303 case HCI_EV_INQUIRY_RESULT: 5304 hci_inquiry_result_evt(hdev, skb); 5305 break; 5306 5307 case HCI_EV_CONN_COMPLETE: 5308 hci_conn_complete_evt(hdev, skb); 5309 break; 5310 5311 case HCI_EV_CONN_REQUEST: 5312 hci_conn_request_evt(hdev, skb); 5313 break; 5314 5315 case HCI_EV_DISCONN_COMPLETE: 5316 hci_disconn_complete_evt(hdev, skb); 5317 break; 5318 5319 case HCI_EV_AUTH_COMPLETE: 5320 hci_auth_complete_evt(hdev, skb); 5321 break; 5322 5323 case HCI_EV_REMOTE_NAME: 5324 hci_remote_name_evt(hdev, skb); 5325 break; 5326 5327 case HCI_EV_ENCRYPT_CHANGE: 5328 hci_encrypt_change_evt(hdev, skb); 5329 break; 5330 5331 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 5332 hci_change_link_key_complete_evt(hdev, skb); 5333 break; 5334 5335 case HCI_EV_REMOTE_FEATURES: 5336 hci_remote_features_evt(hdev, skb); 5337 break; 5338 5339 case HCI_EV_CMD_COMPLETE: 5340 hci_cmd_complete_evt(hdev, skb, &opcode, &status, 5341 &req_complete, &req_complete_skb); 5342 break; 5343 5344 case HCI_EV_CMD_STATUS: 5345 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete, 5346 &req_complete_skb); 5347 break; 5348 5349 case HCI_EV_HARDWARE_ERROR: 5350 hci_hardware_error_evt(hdev, skb); 5351 break; 5352 5353 case HCI_EV_ROLE_CHANGE: 5354 hci_role_change_evt(hdev, skb); 5355 break; 5356 5357 case HCI_EV_NUM_COMP_PKTS: 5358 hci_num_comp_pkts_evt(hdev, skb); 5359 break; 5360 5361 case HCI_EV_MODE_CHANGE: 5362 hci_mode_change_evt(hdev, skb); 5363 break; 5364 5365 case HCI_EV_PIN_CODE_REQ: 5366 hci_pin_code_request_evt(hdev, skb); 5367 break; 5368 5369 case HCI_EV_LINK_KEY_REQ: 5370 hci_link_key_request_evt(hdev, skb); 5371 break; 5372 5373 case HCI_EV_LINK_KEY_NOTIFY: 5374 hci_link_key_notify_evt(hdev, skb); 5375 break; 5376 5377 case HCI_EV_CLOCK_OFFSET: 5378 hci_clock_offset_evt(hdev, skb); 5379 break; 5380 5381 case HCI_EV_PKT_TYPE_CHANGE: 5382 hci_pkt_type_change_evt(hdev, skb); 5383 break; 5384 5385 case HCI_EV_PSCAN_REP_MODE: 5386 hci_pscan_rep_mode_evt(hdev, skb); 5387 break; 5388 5389 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 5390 hci_inquiry_result_with_rssi_evt(hdev, skb); 5391 break; 5392 5393 case HCI_EV_REMOTE_EXT_FEATURES: 5394 hci_remote_ext_features_evt(hdev, skb); 5395 break; 5396 5397 case HCI_EV_SYNC_CONN_COMPLETE: 5398 hci_sync_conn_complete_evt(hdev, skb); 5399 break; 5400 5401 case HCI_EV_EXTENDED_INQUIRY_RESULT: 5402 hci_extended_inquiry_result_evt(hdev, skb); 5403 break; 5404 5405 case HCI_EV_KEY_REFRESH_COMPLETE: 5406 hci_key_refresh_complete_evt(hdev, skb); 5407 break; 5408 5409 case HCI_EV_IO_CAPA_REQUEST: 5410 hci_io_capa_request_evt(hdev, skb); 5411 break; 5412 5413 case HCI_EV_IO_CAPA_REPLY: 5414 hci_io_capa_reply_evt(hdev, skb); 5415 break; 5416 5417 case HCI_EV_USER_CONFIRM_REQUEST: 5418 hci_user_confirm_request_evt(hdev, skb); 5419 break; 5420 5421 case HCI_EV_USER_PASSKEY_REQUEST: 5422 hci_user_passkey_request_evt(hdev, skb); 5423 break; 5424 5425 case HCI_EV_USER_PASSKEY_NOTIFY: 5426 hci_user_passkey_notify_evt(hdev, skb); 5427 break; 5428 5429 case HCI_EV_KEYPRESS_NOTIFY: 5430 hci_keypress_notify_evt(hdev, skb); 5431 break; 5432 5433 case HCI_EV_SIMPLE_PAIR_COMPLETE: 5434 hci_simple_pair_complete_evt(hdev, skb); 5435 break; 5436 5437 case HCI_EV_REMOTE_HOST_FEATURES: 5438 hci_remote_host_features_evt(hdev, skb); 5439 break; 5440 5441 case HCI_EV_LE_META: 5442 hci_le_meta_evt(hdev, skb); 5443 break; 5444 5445 case HCI_EV_CHANNEL_SELECTED: 5446 hci_chan_selected_evt(hdev, skb); 5447 break; 5448 5449 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 5450 hci_remote_oob_data_request_evt(hdev, skb); 5451 break; 5452 5453 case HCI_EV_PHY_LINK_COMPLETE: 5454 hci_phy_link_complete_evt(hdev, skb); 5455 break; 5456 5457 case HCI_EV_LOGICAL_LINK_COMPLETE: 5458 hci_loglink_complete_evt(hdev, skb); 5459 break; 5460 5461 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 5462 hci_disconn_loglink_complete_evt(hdev, skb); 5463 break; 5464 5465 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 5466 hci_disconn_phylink_complete_evt(hdev, skb); 5467 break; 5468 5469 case HCI_EV_NUM_COMP_BLOCKS: 5470 hci_num_comp_blocks_evt(hdev, skb); 5471 break; 5472 5473 default: 5474 BT_DBG("%s event 0x%2.2x", hdev->name, event); 5475 break; 5476 } 5477 5478 if (req_complete) { 5479 req_complete(hdev, status, opcode); 5480 } else if (req_complete_skb) { 5481 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { 5482 kfree_skb(orig_skb); 5483 orig_skb = NULL; 5484 } 5485 req_complete_skb(hdev, status, opcode, orig_skb); 5486 } 5487 5488 kfree_skb(orig_skb); 5489 kfree_skb(skb); 5490 hdev->stat.evt_rx++; 5491 } 5492