1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <asm/unaligned.h> 28 29 #include <net/bluetooth/bluetooth.h> 30 #include <net/bluetooth/hci_core.h> 31 #include <net/bluetooth/mgmt.h> 32 #include <net/bluetooth/a2mp.h> 33 #include <net/bluetooth/amp.h> 34 35 /* Handle HCI Event packets */ 36 37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 38 { 39 __u8 status = *((__u8 *) skb->data); 40 41 BT_DBG("%s status 0x%2.2x", hdev->name, status); 42 43 if (status) 44 return; 45 46 clear_bit(HCI_INQUIRY, &hdev->flags); 47 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 48 wake_up_bit(&hdev->flags, HCI_INQUIRY); 49 50 hci_conn_check_pending(hdev); 51 } 52 53 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 54 { 55 __u8 status = *((__u8 *) skb->data); 56 57 BT_DBG("%s status 0x%2.2x", hdev->name, status); 58 59 if (status) 60 return; 61 62 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 63 } 64 65 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 66 { 67 __u8 status = *((__u8 *) skb->data); 68 69 BT_DBG("%s status 0x%2.2x", hdev->name, status); 70 71 if (status) 72 return; 73 74 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags); 75 76 hci_conn_check_pending(hdev); 77 } 78 79 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, 80 struct sk_buff *skb) 81 { 82 BT_DBG("%s", hdev->name); 83 } 84 85 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 86 { 87 struct hci_rp_role_discovery *rp = (void *) skb->data; 88 struct hci_conn *conn; 89 90 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 91 92 if (rp->status) 93 return; 94 95 hci_dev_lock(hdev); 96 97 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 98 if (conn) { 99 if (rp->role) 100 conn->link_mode &= ~HCI_LM_MASTER; 101 else 102 conn->link_mode |= HCI_LM_MASTER; 103 } 104 105 hci_dev_unlock(hdev); 106 } 107 108 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 109 { 110 struct hci_rp_read_link_policy *rp = (void *) skb->data; 111 struct hci_conn *conn; 112 113 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 114 115 if (rp->status) 116 return; 117 118 hci_dev_lock(hdev); 119 120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 121 if (conn) 122 conn->link_policy = __le16_to_cpu(rp->policy); 123 124 hci_dev_unlock(hdev); 125 } 126 127 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 128 { 129 struct hci_rp_write_link_policy *rp = (void *) skb->data; 130 struct hci_conn *conn; 131 void *sent; 132 133 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 134 135 if (rp->status) 136 return; 137 138 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 139 if (!sent) 140 return; 141 142 hci_dev_lock(hdev); 143 144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 145 if (conn) 146 conn->link_policy = get_unaligned_le16(sent + 2); 147 148 hci_dev_unlock(hdev); 149 } 150 151 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, 152 struct sk_buff *skb) 153 { 154 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 155 156 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 157 158 if (rp->status) 159 return; 160 161 hdev->link_policy = __le16_to_cpu(rp->policy); 162 } 163 164 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, 165 struct sk_buff *skb) 166 { 167 __u8 status = *((__u8 *) skb->data); 168 void *sent; 169 170 BT_DBG("%s status 0x%2.2x", hdev->name, status); 171 172 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 173 if (!sent) 174 return; 175 176 if (!status) 177 hdev->link_policy = get_unaligned_le16(sent); 178 } 179 180 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 181 { 182 __u8 status = *((__u8 *) skb->data); 183 184 BT_DBG("%s status 0x%2.2x", hdev->name, status); 185 186 clear_bit(HCI_RESET, &hdev->flags); 187 188 /* Reset all non-persistent flags */ 189 hdev->dev_flags &= ~HCI_PERSISTENT_MASK; 190 191 hdev->discovery.state = DISCOVERY_STOPPED; 192 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 193 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 194 195 memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); 196 hdev->adv_data_len = 0; 197 } 198 199 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 200 { 201 __u8 status = *((__u8 *) skb->data); 202 void *sent; 203 204 BT_DBG("%s status 0x%2.2x", hdev->name, status); 205 206 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 207 if (!sent) 208 return; 209 210 hci_dev_lock(hdev); 211 212 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 213 mgmt_set_local_name_complete(hdev, sent, status); 214 else if (!status) 215 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 216 217 hci_dev_unlock(hdev); 218 } 219 220 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 221 { 222 struct hci_rp_read_local_name *rp = (void *) skb->data; 223 224 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 225 226 if (rp->status) 227 return; 228 229 if (test_bit(HCI_SETUP, &hdev->dev_flags)) 230 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 231 } 232 233 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 234 { 235 __u8 status = *((__u8 *) skb->data); 236 void *sent; 237 238 BT_DBG("%s status 0x%2.2x", hdev->name, status); 239 240 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 241 if (!sent) 242 return; 243 244 if (!status) { 245 __u8 param = *((__u8 *) sent); 246 247 if (param == AUTH_ENABLED) 248 set_bit(HCI_AUTH, &hdev->flags); 249 else 250 clear_bit(HCI_AUTH, &hdev->flags); 251 } 252 253 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 254 mgmt_auth_enable_complete(hdev, status); 255 } 256 257 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 258 { 259 __u8 status = *((__u8 *) skb->data); 260 void *sent; 261 262 BT_DBG("%s status 0x%2.2x", hdev->name, status); 263 264 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 265 if (!sent) 266 return; 267 268 if (!status) { 269 __u8 param = *((__u8 *) sent); 270 271 if (param) 272 set_bit(HCI_ENCRYPT, &hdev->flags); 273 else 274 clear_bit(HCI_ENCRYPT, &hdev->flags); 275 } 276 } 277 278 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 279 { 280 __u8 param, status = *((__u8 *) skb->data); 281 int old_pscan, old_iscan; 282 void *sent; 283 284 BT_DBG("%s status 0x%2.2x", hdev->name, status); 285 286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 287 if (!sent) 288 return; 289 290 param = *((__u8 *) sent); 291 292 hci_dev_lock(hdev); 293 294 if (status) { 295 mgmt_write_scan_failed(hdev, param, status); 296 hdev->discov_timeout = 0; 297 goto done; 298 } 299 300 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); 301 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); 302 303 if (param & SCAN_INQUIRY) { 304 set_bit(HCI_ISCAN, &hdev->flags); 305 if (!old_iscan) 306 mgmt_discoverable(hdev, 1); 307 if (hdev->discov_timeout > 0) { 308 int to = msecs_to_jiffies(hdev->discov_timeout * 1000); 309 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 310 to); 311 } 312 } else if (old_iscan) 313 mgmt_discoverable(hdev, 0); 314 315 if (param & SCAN_PAGE) { 316 set_bit(HCI_PSCAN, &hdev->flags); 317 if (!old_pscan) 318 mgmt_connectable(hdev, 1); 319 } else if (old_pscan) 320 mgmt_connectable(hdev, 0); 321 322 done: 323 hci_dev_unlock(hdev); 324 } 325 326 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 327 { 328 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 329 330 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 331 332 if (rp->status) 333 return; 334 335 memcpy(hdev->dev_class, rp->dev_class, 3); 336 337 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 338 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 339 } 340 341 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 342 { 343 __u8 status = *((__u8 *) skb->data); 344 void *sent; 345 346 BT_DBG("%s status 0x%2.2x", hdev->name, status); 347 348 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 349 if (!sent) 350 return; 351 352 hci_dev_lock(hdev); 353 354 if (status == 0) 355 memcpy(hdev->dev_class, sent, 3); 356 357 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 358 mgmt_set_class_of_dev_complete(hdev, sent, status); 359 360 hci_dev_unlock(hdev); 361 } 362 363 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 364 { 365 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 366 __u16 setting; 367 368 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 369 370 if (rp->status) 371 return; 372 373 setting = __le16_to_cpu(rp->voice_setting); 374 375 if (hdev->voice_setting == setting) 376 return; 377 378 hdev->voice_setting = setting; 379 380 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 381 382 if (hdev->notify) 383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 384 } 385 386 static void hci_cc_write_voice_setting(struct hci_dev *hdev, 387 struct sk_buff *skb) 388 { 389 __u8 status = *((__u8 *) skb->data); 390 __u16 setting; 391 void *sent; 392 393 BT_DBG("%s status 0x%2.2x", hdev->name, status); 394 395 if (status) 396 return; 397 398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 399 if (!sent) 400 return; 401 402 setting = get_unaligned_le16(sent); 403 404 if (hdev->voice_setting == setting) 405 return; 406 407 hdev->voice_setting = setting; 408 409 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting); 410 411 if (hdev->notify) 412 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 413 } 414 415 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 416 { 417 __u8 status = *((__u8 *) skb->data); 418 struct hci_cp_write_ssp_mode *sent; 419 420 BT_DBG("%s status 0x%2.2x", hdev->name, status); 421 422 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 423 if (!sent) 424 return; 425 426 if (!status) { 427 if (sent->mode) 428 hdev->features[1][0] |= LMP_HOST_SSP; 429 else 430 hdev->features[1][0] &= ~LMP_HOST_SSP; 431 } 432 433 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 434 mgmt_ssp_enable_complete(hdev, sent->mode, status); 435 else if (!status) { 436 if (sent->mode) 437 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 438 else 439 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 440 } 441 } 442 443 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 444 { 445 struct hci_rp_read_local_version *rp = (void *) skb->data; 446 447 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 448 449 if (rp->status) 450 return; 451 452 hdev->hci_ver = rp->hci_ver; 453 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 454 hdev->lmp_ver = rp->lmp_ver; 455 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 456 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 457 458 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name, 459 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); 460 } 461 462 static void hci_cc_read_local_commands(struct hci_dev *hdev, 463 struct sk_buff *skb) 464 { 465 struct hci_rp_read_local_commands *rp = (void *) skb->data; 466 467 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 468 469 if (!rp->status) 470 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 471 } 472 473 static void hci_cc_read_local_features(struct hci_dev *hdev, 474 struct sk_buff *skb) 475 { 476 struct hci_rp_read_local_features *rp = (void *) skb->data; 477 478 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 479 480 if (rp->status) 481 return; 482 483 memcpy(hdev->features, rp->features, 8); 484 485 /* Adjust default settings according to features 486 * supported by device. */ 487 488 if (hdev->features[0][0] & LMP_3SLOT) 489 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 490 491 if (hdev->features[0][0] & LMP_5SLOT) 492 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 493 494 if (hdev->features[0][1] & LMP_HV2) { 495 hdev->pkt_type |= (HCI_HV2); 496 hdev->esco_type |= (ESCO_HV2); 497 } 498 499 if (hdev->features[0][1] & LMP_HV3) { 500 hdev->pkt_type |= (HCI_HV3); 501 hdev->esco_type |= (ESCO_HV3); 502 } 503 504 if (lmp_esco_capable(hdev)) 505 hdev->esco_type |= (ESCO_EV3); 506 507 if (hdev->features[0][4] & LMP_EV4) 508 hdev->esco_type |= (ESCO_EV4); 509 510 if (hdev->features[0][4] & LMP_EV5) 511 hdev->esco_type |= (ESCO_EV5); 512 513 if (hdev->features[0][5] & LMP_EDR_ESCO_2M) 514 hdev->esco_type |= (ESCO_2EV3); 515 516 if (hdev->features[0][5] & LMP_EDR_ESCO_3M) 517 hdev->esco_type |= (ESCO_3EV3); 518 519 if (hdev->features[0][5] & LMP_EDR_3S_ESCO) 520 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 521 522 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 523 hdev->features[0][0], hdev->features[0][1], 524 hdev->features[0][2], hdev->features[0][3], 525 hdev->features[0][4], hdev->features[0][5], 526 hdev->features[0][6], hdev->features[0][7]); 527 } 528 529 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 530 struct sk_buff *skb) 531 { 532 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 533 534 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 535 536 if (rp->status) 537 return; 538 539 hdev->max_page = rp->max_page; 540 541 if (rp->page < HCI_MAX_PAGES) 542 memcpy(hdev->features[rp->page], rp->features, 8); 543 } 544 545 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, 546 struct sk_buff *skb) 547 { 548 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; 549 550 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 551 552 if (!rp->status) 553 hdev->flow_ctl_mode = rp->mode; 554 } 555 556 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 557 { 558 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 559 560 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 561 562 if (rp->status) 563 return; 564 565 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 566 hdev->sco_mtu = rp->sco_mtu; 567 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 568 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 569 570 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 571 hdev->sco_mtu = 64; 572 hdev->sco_pkts = 8; 573 } 574 575 hdev->acl_cnt = hdev->acl_pkts; 576 hdev->sco_cnt = hdev->sco_pkts; 577 578 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, 579 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); 580 } 581 582 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 583 { 584 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 585 586 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 587 588 if (!rp->status) 589 bacpy(&hdev->bdaddr, &rp->bdaddr); 590 } 591 592 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev, 593 struct sk_buff *skb) 594 { 595 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data; 596 597 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 598 599 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) { 600 hdev->page_scan_interval = __le16_to_cpu(rp->interval); 601 hdev->page_scan_window = __le16_to_cpu(rp->window); 602 } 603 } 604 605 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev, 606 struct sk_buff *skb) 607 { 608 u8 status = *((u8 *) skb->data); 609 struct hci_cp_write_page_scan_activity *sent; 610 611 BT_DBG("%s status 0x%2.2x", hdev->name, status); 612 613 if (status) 614 return; 615 616 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); 617 if (!sent) 618 return; 619 620 hdev->page_scan_interval = __le16_to_cpu(sent->interval); 621 hdev->page_scan_window = __le16_to_cpu(sent->window); 622 } 623 624 static void hci_cc_read_page_scan_type(struct hci_dev *hdev, 625 struct sk_buff *skb) 626 { 627 struct hci_rp_read_page_scan_type *rp = (void *) skb->data; 628 629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 630 631 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) 632 hdev->page_scan_type = rp->type; 633 } 634 635 static void hci_cc_write_page_scan_type(struct hci_dev *hdev, 636 struct sk_buff *skb) 637 { 638 u8 status = *((u8 *) skb->data); 639 u8 *type; 640 641 BT_DBG("%s status 0x%2.2x", hdev->name, status); 642 643 if (status) 644 return; 645 646 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); 647 if (type) 648 hdev->page_scan_type = *type; 649 } 650 651 static void hci_cc_read_data_block_size(struct hci_dev *hdev, 652 struct sk_buff *skb) 653 { 654 struct hci_rp_read_data_block_size *rp = (void *) skb->data; 655 656 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 657 658 if (rp->status) 659 return; 660 661 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); 662 hdev->block_len = __le16_to_cpu(rp->block_len); 663 hdev->num_blocks = __le16_to_cpu(rp->num_blocks); 664 665 hdev->block_cnt = hdev->num_blocks; 666 667 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, 668 hdev->block_cnt, hdev->block_len); 669 } 670 671 static void hci_cc_read_local_amp_info(struct hci_dev *hdev, 672 struct sk_buff *skb) 673 { 674 struct hci_rp_read_local_amp_info *rp = (void *) skb->data; 675 676 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 677 678 if (rp->status) 679 goto a2mp_rsp; 680 681 hdev->amp_status = rp->amp_status; 682 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); 683 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); 684 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); 685 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); 686 hdev->amp_type = rp->amp_type; 687 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); 688 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); 689 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); 690 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); 691 692 a2mp_rsp: 693 a2mp_send_getinfo_rsp(hdev); 694 } 695 696 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev, 697 struct sk_buff *skb) 698 { 699 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data; 700 struct amp_assoc *assoc = &hdev->loc_assoc; 701 size_t rem_len, frag_len; 702 703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 704 705 if (rp->status) 706 goto a2mp_rsp; 707 708 frag_len = skb->len - sizeof(*rp); 709 rem_len = __le16_to_cpu(rp->rem_len); 710 711 if (rem_len > frag_len) { 712 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); 713 714 memcpy(assoc->data + assoc->offset, rp->frag, frag_len); 715 assoc->offset += frag_len; 716 717 /* Read other fragments */ 718 amp_read_loc_assoc_frag(hdev, rp->phy_handle); 719 720 return; 721 } 722 723 memcpy(assoc->data + assoc->offset, rp->frag, rem_len); 724 assoc->len = assoc->offset + rem_len; 725 assoc->offset = 0; 726 727 a2mp_rsp: 728 /* Send A2MP Rsp when all fragments are received */ 729 a2mp_send_getampassoc_rsp(hdev, rp->status); 730 a2mp_send_create_phy_link_req(hdev, rp->status); 731 } 732 733 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 734 struct sk_buff *skb) 735 { 736 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; 737 738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 739 740 if (!rp->status) 741 hdev->inq_tx_power = rp->tx_power; 742 } 743 744 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 745 { 746 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 747 struct hci_cp_pin_code_reply *cp; 748 struct hci_conn *conn; 749 750 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 751 752 hci_dev_lock(hdev); 753 754 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 755 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); 756 757 if (rp->status) 758 goto unlock; 759 760 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 761 if (!cp) 762 goto unlock; 763 764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 765 if (conn) 766 conn->pin_length = cp->pin_len; 767 768 unlock: 769 hci_dev_unlock(hdev); 770 } 771 772 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 773 { 774 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 775 776 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 777 778 hci_dev_lock(hdev); 779 780 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 781 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, 782 rp->status); 783 784 hci_dev_unlock(hdev); 785 } 786 787 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 788 struct sk_buff *skb) 789 { 790 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 791 792 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 793 794 if (rp->status) 795 return; 796 797 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 798 hdev->le_pkts = rp->le_max_pkt; 799 800 hdev->le_cnt = hdev->le_pkts; 801 802 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 803 } 804 805 static void hci_cc_le_read_local_features(struct hci_dev *hdev, 806 struct sk_buff *skb) 807 { 808 struct hci_rp_le_read_local_features *rp = (void *) skb->data; 809 810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 811 812 if (!rp->status) 813 memcpy(hdev->le_features, rp->features, 8); 814 } 815 816 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, 817 struct sk_buff *skb) 818 { 819 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data; 820 821 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 822 823 if (!rp->status) 824 hdev->adv_tx_power = rp->tx_power; 825 } 826 827 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 828 { 829 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 830 831 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 832 833 hci_dev_lock(hdev); 834 835 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 836 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, 837 rp->status); 838 839 hci_dev_unlock(hdev); 840 } 841 842 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 843 struct sk_buff *skb) 844 { 845 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 846 847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 848 849 hci_dev_lock(hdev); 850 851 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 852 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, 853 ACL_LINK, 0, rp->status); 854 855 hci_dev_unlock(hdev); 856 } 857 858 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) 859 { 860 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 861 862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 863 864 hci_dev_lock(hdev); 865 866 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 867 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 868 0, rp->status); 869 870 hci_dev_unlock(hdev); 871 } 872 873 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, 874 struct sk_buff *skb) 875 { 876 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 877 878 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 879 880 hci_dev_lock(hdev); 881 882 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 883 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, 884 ACL_LINK, 0, rp->status); 885 886 hci_dev_unlock(hdev); 887 } 888 889 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 890 struct sk_buff *skb) 891 { 892 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 893 894 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 895 896 hci_dev_lock(hdev); 897 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, 898 rp->randomizer, rp->status); 899 hci_dev_unlock(hdev); 900 } 901 902 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) 903 { 904 __u8 *sent, status = *((__u8 *) skb->data); 905 906 BT_DBG("%s status 0x%2.2x", hdev->name, status); 907 908 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); 909 if (!sent) 910 return; 911 912 hci_dev_lock(hdev); 913 914 if (!status) { 915 if (*sent) 916 set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); 917 else 918 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags); 919 } 920 921 if (!test_bit(HCI_INIT, &hdev->flags)) { 922 struct hci_request req; 923 924 hci_req_init(&req, hdev); 925 hci_update_ad(&req); 926 hci_req_run(&req, NULL); 927 } 928 929 hci_dev_unlock(hdev); 930 } 931 932 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 933 struct sk_buff *skb) 934 { 935 struct hci_cp_le_set_scan_enable *cp; 936 __u8 status = *((__u8 *) skb->data); 937 938 BT_DBG("%s status 0x%2.2x", hdev->name, status); 939 940 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 941 if (!cp) 942 return; 943 944 if (status) 945 return; 946 947 switch (cp->enable) { 948 case LE_SCAN_ENABLE: 949 set_bit(HCI_LE_SCAN, &hdev->dev_flags); 950 break; 951 952 case LE_SCAN_DISABLE: 953 clear_bit(HCI_LE_SCAN, &hdev->dev_flags); 954 break; 955 956 default: 957 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable); 958 break; 959 } 960 } 961 962 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev, 963 struct sk_buff *skb) 964 { 965 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data; 966 967 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size); 968 969 if (!rp->status) 970 hdev->le_white_list_size = rp->size; 971 } 972 973 static void hci_cc_le_read_supported_states(struct hci_dev *hdev, 974 struct sk_buff *skb) 975 { 976 struct hci_rp_le_read_supported_states *rp = (void *) skb->data; 977 978 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); 979 980 if (!rp->status) 981 memcpy(hdev->le_states, rp->le_states, 8); 982 } 983 984 static void hci_cc_write_le_host_supported(struct hci_dev *hdev, 985 struct sk_buff *skb) 986 { 987 struct hci_cp_write_le_host_supported *sent; 988 __u8 status = *((__u8 *) skb->data); 989 990 BT_DBG("%s status 0x%2.2x", hdev->name, status); 991 992 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); 993 if (!sent) 994 return; 995 996 if (!status) { 997 if (sent->le) 998 hdev->features[1][0] |= LMP_HOST_LE; 999 else 1000 hdev->features[1][0] &= ~LMP_HOST_LE; 1001 1002 if (sent->simul) 1003 hdev->features[1][0] |= LMP_HOST_LE_BREDR; 1004 else 1005 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; 1006 } 1007 1008 if (test_bit(HCI_MGMT, &hdev->dev_flags) && 1009 !test_bit(HCI_INIT, &hdev->flags)) 1010 mgmt_le_enable_complete(hdev, sent->le, status); 1011 } 1012 1013 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev, 1014 struct sk_buff *skb) 1015 { 1016 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data; 1017 1018 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", 1019 hdev->name, rp->status, rp->phy_handle); 1020 1021 if (rp->status) 1022 return; 1023 1024 amp_write_rem_assoc_continue(hdev, rp->phy_handle); 1025 } 1026 1027 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 1028 { 1029 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1030 1031 if (status) { 1032 hci_conn_check_pending(hdev); 1033 return; 1034 } 1035 1036 set_bit(HCI_INQUIRY, &hdev->flags); 1037 } 1038 1039 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 1040 { 1041 struct hci_cp_create_conn *cp; 1042 struct hci_conn *conn; 1043 1044 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1045 1046 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 1047 if (!cp) 1048 return; 1049 1050 hci_dev_lock(hdev); 1051 1052 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1053 1054 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn); 1055 1056 if (status) { 1057 if (conn && conn->state == BT_CONNECT) { 1058 if (status != 0x0c || conn->attempt > 2) { 1059 conn->state = BT_CLOSED; 1060 hci_proto_connect_cfm(conn, status); 1061 hci_conn_del(conn); 1062 } else 1063 conn->state = BT_CONNECT2; 1064 } 1065 } else { 1066 if (!conn) { 1067 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 1068 if (conn) { 1069 conn->out = true; 1070 conn->link_mode |= HCI_LM_MASTER; 1071 } else 1072 BT_ERR("No memory for new connection"); 1073 } 1074 } 1075 1076 hci_dev_unlock(hdev); 1077 } 1078 1079 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1080 { 1081 struct hci_cp_add_sco *cp; 1082 struct hci_conn *acl, *sco; 1083 __u16 handle; 1084 1085 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1086 1087 if (!status) 1088 return; 1089 1090 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1091 if (!cp) 1092 return; 1093 1094 handle = __le16_to_cpu(cp->handle); 1095 1096 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1097 1098 hci_dev_lock(hdev); 1099 1100 acl = hci_conn_hash_lookup_handle(hdev, handle); 1101 if (acl) { 1102 sco = acl->link; 1103 if (sco) { 1104 sco->state = BT_CLOSED; 1105 1106 hci_proto_connect_cfm(sco, status); 1107 hci_conn_del(sco); 1108 } 1109 } 1110 1111 hci_dev_unlock(hdev); 1112 } 1113 1114 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1115 { 1116 struct hci_cp_auth_requested *cp; 1117 struct hci_conn *conn; 1118 1119 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1120 1121 if (!status) 1122 return; 1123 1124 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1125 if (!cp) 1126 return; 1127 1128 hci_dev_lock(hdev); 1129 1130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1131 if (conn) { 1132 if (conn->state == BT_CONFIG) { 1133 hci_proto_connect_cfm(conn, status); 1134 hci_conn_drop(conn); 1135 } 1136 } 1137 1138 hci_dev_unlock(hdev); 1139 } 1140 1141 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1142 { 1143 struct hci_cp_set_conn_encrypt *cp; 1144 struct hci_conn *conn; 1145 1146 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1147 1148 if (!status) 1149 return; 1150 1151 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1152 if (!cp) 1153 return; 1154 1155 hci_dev_lock(hdev); 1156 1157 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1158 if (conn) { 1159 if (conn->state == BT_CONFIG) { 1160 hci_proto_connect_cfm(conn, status); 1161 hci_conn_drop(conn); 1162 } 1163 } 1164 1165 hci_dev_unlock(hdev); 1166 } 1167 1168 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1169 struct hci_conn *conn) 1170 { 1171 if (conn->state != BT_CONFIG || !conn->out) 1172 return 0; 1173 1174 if (conn->pending_sec_level == BT_SECURITY_SDP) 1175 return 0; 1176 1177 /* Only request authentication for SSP connections or non-SSP 1178 * devices with sec_level HIGH or if MITM protection is requested */ 1179 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && 1180 conn->pending_sec_level != BT_SECURITY_HIGH) 1181 return 0; 1182 1183 return 1; 1184 } 1185 1186 static int hci_resolve_name(struct hci_dev *hdev, 1187 struct inquiry_entry *e) 1188 { 1189 struct hci_cp_remote_name_req cp; 1190 1191 memset(&cp, 0, sizeof(cp)); 1192 1193 bacpy(&cp.bdaddr, &e->data.bdaddr); 1194 cp.pscan_rep_mode = e->data.pscan_rep_mode; 1195 cp.pscan_mode = e->data.pscan_mode; 1196 cp.clock_offset = e->data.clock_offset; 1197 1198 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1199 } 1200 1201 static bool hci_resolve_next_name(struct hci_dev *hdev) 1202 { 1203 struct discovery_state *discov = &hdev->discovery; 1204 struct inquiry_entry *e; 1205 1206 if (list_empty(&discov->resolve)) 1207 return false; 1208 1209 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1210 if (!e) 1211 return false; 1212 1213 if (hci_resolve_name(hdev, e) == 0) { 1214 e->name_state = NAME_PENDING; 1215 return true; 1216 } 1217 1218 return false; 1219 } 1220 1221 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, 1222 bdaddr_t *bdaddr, u8 *name, u8 name_len) 1223 { 1224 struct discovery_state *discov = &hdev->discovery; 1225 struct inquiry_entry *e; 1226 1227 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 1228 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name, 1229 name_len, conn->dev_class); 1230 1231 if (discov->state == DISCOVERY_STOPPED) 1232 return; 1233 1234 if (discov->state == DISCOVERY_STOPPING) 1235 goto discov_complete; 1236 1237 if (discov->state != DISCOVERY_RESOLVING) 1238 return; 1239 1240 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); 1241 /* If the device was not found in a list of found devices names of which 1242 * are pending. there is no need to continue resolving a next name as it 1243 * will be done upon receiving another Remote Name Request Complete 1244 * Event */ 1245 if (!e) 1246 return; 1247 1248 list_del(&e->list); 1249 if (name) { 1250 e->name_state = NAME_KNOWN; 1251 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, 1252 e->data.rssi, name, name_len); 1253 } else { 1254 e->name_state = NAME_NOT_KNOWN; 1255 } 1256 1257 if (hci_resolve_next_name(hdev)) 1258 return; 1259 1260 discov_complete: 1261 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1262 } 1263 1264 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1265 { 1266 struct hci_cp_remote_name_req *cp; 1267 struct hci_conn *conn; 1268 1269 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1270 1271 /* If successful wait for the name req complete event before 1272 * checking for the need to do authentication */ 1273 if (!status) 1274 return; 1275 1276 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1277 if (!cp) 1278 return; 1279 1280 hci_dev_lock(hdev); 1281 1282 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1283 1284 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 1285 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); 1286 1287 if (!conn) 1288 goto unlock; 1289 1290 if (!hci_outgoing_auth_needed(hdev, conn)) 1291 goto unlock; 1292 1293 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1294 struct hci_cp_auth_requested cp; 1295 cp.handle = __cpu_to_le16(conn->handle); 1296 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1297 } 1298 1299 unlock: 1300 hci_dev_unlock(hdev); 1301 } 1302 1303 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1304 { 1305 struct hci_cp_read_remote_features *cp; 1306 struct hci_conn *conn; 1307 1308 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1309 1310 if (!status) 1311 return; 1312 1313 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1314 if (!cp) 1315 return; 1316 1317 hci_dev_lock(hdev); 1318 1319 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1320 if (conn) { 1321 if (conn->state == BT_CONFIG) { 1322 hci_proto_connect_cfm(conn, status); 1323 hci_conn_drop(conn); 1324 } 1325 } 1326 1327 hci_dev_unlock(hdev); 1328 } 1329 1330 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1331 { 1332 struct hci_cp_read_remote_ext_features *cp; 1333 struct hci_conn *conn; 1334 1335 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1336 1337 if (!status) 1338 return; 1339 1340 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1341 if (!cp) 1342 return; 1343 1344 hci_dev_lock(hdev); 1345 1346 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1347 if (conn) { 1348 if (conn->state == BT_CONFIG) { 1349 hci_proto_connect_cfm(conn, status); 1350 hci_conn_drop(conn); 1351 } 1352 } 1353 1354 hci_dev_unlock(hdev); 1355 } 1356 1357 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1358 { 1359 struct hci_cp_setup_sync_conn *cp; 1360 struct hci_conn *acl, *sco; 1361 __u16 handle; 1362 1363 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1364 1365 if (!status) 1366 return; 1367 1368 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1369 if (!cp) 1370 return; 1371 1372 handle = __le16_to_cpu(cp->handle); 1373 1374 BT_DBG("%s handle 0x%4.4x", hdev->name, handle); 1375 1376 hci_dev_lock(hdev); 1377 1378 acl = hci_conn_hash_lookup_handle(hdev, handle); 1379 if (acl) { 1380 sco = acl->link; 1381 if (sco) { 1382 sco->state = BT_CLOSED; 1383 1384 hci_proto_connect_cfm(sco, status); 1385 hci_conn_del(sco); 1386 } 1387 } 1388 1389 hci_dev_unlock(hdev); 1390 } 1391 1392 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1393 { 1394 struct hci_cp_sniff_mode *cp; 1395 struct hci_conn *conn; 1396 1397 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1398 1399 if (!status) 1400 return; 1401 1402 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1403 if (!cp) 1404 return; 1405 1406 hci_dev_lock(hdev); 1407 1408 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1409 if (conn) { 1410 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1411 1412 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1413 hci_sco_setup(conn, status); 1414 } 1415 1416 hci_dev_unlock(hdev); 1417 } 1418 1419 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1420 { 1421 struct hci_cp_exit_sniff_mode *cp; 1422 struct hci_conn *conn; 1423 1424 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1425 1426 if (!status) 1427 return; 1428 1429 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1430 if (!cp) 1431 return; 1432 1433 hci_dev_lock(hdev); 1434 1435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1436 if (conn) { 1437 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); 1438 1439 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 1440 hci_sco_setup(conn, status); 1441 } 1442 1443 hci_dev_unlock(hdev); 1444 } 1445 1446 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) 1447 { 1448 struct hci_cp_disconnect *cp; 1449 struct hci_conn *conn; 1450 1451 if (!status) 1452 return; 1453 1454 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); 1455 if (!cp) 1456 return; 1457 1458 hci_dev_lock(hdev); 1459 1460 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1461 if (conn) 1462 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1463 conn->dst_type, status); 1464 1465 hci_dev_unlock(hdev); 1466 } 1467 1468 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1469 { 1470 struct hci_conn *conn; 1471 1472 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1473 1474 if (status) { 1475 hci_dev_lock(hdev); 1476 1477 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 1478 if (!conn) { 1479 hci_dev_unlock(hdev); 1480 return; 1481 } 1482 1483 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn); 1484 1485 conn->state = BT_CLOSED; 1486 mgmt_connect_failed(hdev, &conn->dst, conn->type, 1487 conn->dst_type, status); 1488 hci_proto_connect_cfm(conn, status); 1489 hci_conn_del(conn); 1490 1491 hci_dev_unlock(hdev); 1492 } 1493 } 1494 1495 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status) 1496 { 1497 struct hci_cp_create_phy_link *cp; 1498 1499 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1500 1501 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); 1502 if (!cp) 1503 return; 1504 1505 hci_dev_lock(hdev); 1506 1507 if (status) { 1508 struct hci_conn *hcon; 1509 1510 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); 1511 if (hcon) 1512 hci_conn_del(hcon); 1513 } else { 1514 amp_write_remote_assoc(hdev, cp->phy_handle); 1515 } 1516 1517 hci_dev_unlock(hdev); 1518 } 1519 1520 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status) 1521 { 1522 struct hci_cp_accept_phy_link *cp; 1523 1524 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1525 1526 if (status) 1527 return; 1528 1529 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); 1530 if (!cp) 1531 return; 1532 1533 amp_write_remote_assoc(hdev, cp->phy_handle); 1534 } 1535 1536 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1537 { 1538 __u8 status = *((__u8 *) skb->data); 1539 struct discovery_state *discov = &hdev->discovery; 1540 struct inquiry_entry *e; 1541 1542 BT_DBG("%s status 0x%2.2x", hdev->name, status); 1543 1544 hci_conn_check_pending(hdev); 1545 1546 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1547 return; 1548 1549 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 1550 wake_up_bit(&hdev->flags, HCI_INQUIRY); 1551 1552 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1553 return; 1554 1555 hci_dev_lock(hdev); 1556 1557 if (discov->state != DISCOVERY_FINDING) 1558 goto unlock; 1559 1560 if (list_empty(&discov->resolve)) { 1561 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1562 goto unlock; 1563 } 1564 1565 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); 1566 if (e && hci_resolve_name(hdev, e) == 0) { 1567 e->name_state = NAME_PENDING; 1568 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); 1569 } else { 1570 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1571 } 1572 1573 unlock: 1574 hci_dev_unlock(hdev); 1575 } 1576 1577 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1578 { 1579 struct inquiry_data data; 1580 struct inquiry_info *info = (void *) (skb->data + 1); 1581 int num_rsp = *((__u8 *) skb->data); 1582 1583 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1584 1585 if (!num_rsp) 1586 return; 1587 1588 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 1589 return; 1590 1591 hci_dev_lock(hdev); 1592 1593 for (; num_rsp; num_rsp--, info++) { 1594 bool name_known, ssp; 1595 1596 bacpy(&data.bdaddr, &info->bdaddr); 1597 data.pscan_rep_mode = info->pscan_rep_mode; 1598 data.pscan_period_mode = info->pscan_period_mode; 1599 data.pscan_mode = info->pscan_mode; 1600 memcpy(data.dev_class, info->dev_class, 3); 1601 data.clock_offset = info->clock_offset; 1602 data.rssi = 0x00; 1603 data.ssp_mode = 0x00; 1604 1605 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); 1606 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 1607 info->dev_class, 0, !name_known, ssp, NULL, 1608 0); 1609 } 1610 1611 hci_dev_unlock(hdev); 1612 } 1613 1614 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1615 { 1616 struct hci_ev_conn_complete *ev = (void *) skb->data; 1617 struct hci_conn *conn; 1618 1619 BT_DBG("%s", hdev->name); 1620 1621 hci_dev_lock(hdev); 1622 1623 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1624 if (!conn) { 1625 if (ev->link_type != SCO_LINK) 1626 goto unlock; 1627 1628 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 1629 if (!conn) 1630 goto unlock; 1631 1632 conn->type = SCO_LINK; 1633 } 1634 1635 if (!ev->status) { 1636 conn->handle = __le16_to_cpu(ev->handle); 1637 1638 if (conn->type == ACL_LINK) { 1639 conn->state = BT_CONFIG; 1640 hci_conn_hold(conn); 1641 1642 if (!conn->out && !hci_conn_ssp_enabled(conn) && 1643 !hci_find_link_key(hdev, &ev->bdaddr)) 1644 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 1645 else 1646 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1647 } else 1648 conn->state = BT_CONNECTED; 1649 1650 hci_conn_add_sysfs(conn); 1651 1652 if (test_bit(HCI_AUTH, &hdev->flags)) 1653 conn->link_mode |= HCI_LM_AUTH; 1654 1655 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 1656 conn->link_mode |= HCI_LM_ENCRYPT; 1657 1658 /* Get remote features */ 1659 if (conn->type == ACL_LINK) { 1660 struct hci_cp_read_remote_features cp; 1661 cp.handle = ev->handle; 1662 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1663 sizeof(cp), &cp); 1664 } 1665 1666 /* Set packet type for incoming connection */ 1667 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { 1668 struct hci_cp_change_conn_ptype cp; 1669 cp.handle = ev->handle; 1670 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1671 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), 1672 &cp); 1673 } 1674 } else { 1675 conn->state = BT_CLOSED; 1676 if (conn->type == ACL_LINK) 1677 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, 1678 conn->dst_type, ev->status); 1679 } 1680 1681 if (conn->type == ACL_LINK) 1682 hci_sco_setup(conn, ev->status); 1683 1684 if (ev->status) { 1685 hci_proto_connect_cfm(conn, ev->status); 1686 hci_conn_del(conn); 1687 } else if (ev->link_type != ACL_LINK) 1688 hci_proto_connect_cfm(conn, ev->status); 1689 1690 unlock: 1691 hci_dev_unlock(hdev); 1692 1693 hci_conn_check_pending(hdev); 1694 } 1695 1696 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1697 { 1698 struct hci_ev_conn_request *ev = (void *) skb->data; 1699 int mask = hdev->link_mode; 1700 __u8 flags = 0; 1701 1702 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr, 1703 ev->link_type); 1704 1705 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, 1706 &flags); 1707 1708 if ((mask & HCI_LM_ACCEPT) && 1709 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1710 /* Connection accepted */ 1711 struct inquiry_entry *ie; 1712 struct hci_conn *conn; 1713 1714 hci_dev_lock(hdev); 1715 1716 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 1717 if (ie) 1718 memcpy(ie->data.dev_class, ev->dev_class, 3); 1719 1720 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, 1721 &ev->bdaddr); 1722 if (!conn) { 1723 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1724 if (!conn) { 1725 BT_ERR("No memory for new connection"); 1726 hci_dev_unlock(hdev); 1727 return; 1728 } 1729 } 1730 1731 memcpy(conn->dev_class, ev->dev_class, 3); 1732 1733 hci_dev_unlock(hdev); 1734 1735 if (ev->link_type == ACL_LINK || 1736 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { 1737 struct hci_cp_accept_conn_req cp; 1738 conn->state = BT_CONNECT; 1739 1740 bacpy(&cp.bdaddr, &ev->bdaddr); 1741 1742 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1743 cp.role = 0x00; /* Become master */ 1744 else 1745 cp.role = 0x01; /* Remain slave */ 1746 1747 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), 1748 &cp); 1749 } else if (!(flags & HCI_PROTO_DEFER)) { 1750 struct hci_cp_accept_sync_conn_req cp; 1751 conn->state = BT_CONNECT; 1752 1753 bacpy(&cp.bdaddr, &ev->bdaddr); 1754 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1755 1756 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1757 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); 1758 cp.max_latency = __constant_cpu_to_le16(0xffff); 1759 cp.content_format = cpu_to_le16(hdev->voice_setting); 1760 cp.retrans_effort = 0xff; 1761 1762 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1763 sizeof(cp), &cp); 1764 } else { 1765 conn->state = BT_CONNECT2; 1766 hci_proto_connect_cfm(conn, 0); 1767 } 1768 } else { 1769 /* Connection rejected */ 1770 struct hci_cp_reject_conn_req cp; 1771 1772 bacpy(&cp.bdaddr, &ev->bdaddr); 1773 cp.reason = HCI_ERROR_REJ_BAD_ADDR; 1774 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1775 } 1776 } 1777 1778 static u8 hci_to_mgmt_reason(u8 err) 1779 { 1780 switch (err) { 1781 case HCI_ERROR_CONNECTION_TIMEOUT: 1782 return MGMT_DEV_DISCONN_TIMEOUT; 1783 case HCI_ERROR_REMOTE_USER_TERM: 1784 case HCI_ERROR_REMOTE_LOW_RESOURCES: 1785 case HCI_ERROR_REMOTE_POWER_OFF: 1786 return MGMT_DEV_DISCONN_REMOTE; 1787 case HCI_ERROR_LOCAL_HOST_TERM: 1788 return MGMT_DEV_DISCONN_LOCAL_HOST; 1789 default: 1790 return MGMT_DEV_DISCONN_UNKNOWN; 1791 } 1792 } 1793 1794 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1795 { 1796 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1797 struct hci_conn *conn; 1798 1799 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 1800 1801 hci_dev_lock(hdev); 1802 1803 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1804 if (!conn) 1805 goto unlock; 1806 1807 if (ev->status == 0) 1808 conn->state = BT_CLOSED; 1809 1810 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && 1811 (conn->type == ACL_LINK || conn->type == LE_LINK)) { 1812 if (ev->status) { 1813 mgmt_disconnect_failed(hdev, &conn->dst, conn->type, 1814 conn->dst_type, ev->status); 1815 } else { 1816 u8 reason = hci_to_mgmt_reason(ev->reason); 1817 1818 mgmt_device_disconnected(hdev, &conn->dst, conn->type, 1819 conn->dst_type, reason); 1820 } 1821 } 1822 1823 if (ev->status == 0) { 1824 if (conn->type == ACL_LINK && conn->flush_key) 1825 hci_remove_link_key(hdev, &conn->dst); 1826 hci_proto_disconn_cfm(conn, ev->reason); 1827 hci_conn_del(conn); 1828 } 1829 1830 unlock: 1831 hci_dev_unlock(hdev); 1832 } 1833 1834 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1835 { 1836 struct hci_ev_auth_complete *ev = (void *) skb->data; 1837 struct hci_conn *conn; 1838 1839 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 1840 1841 hci_dev_lock(hdev); 1842 1843 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1844 if (!conn) 1845 goto unlock; 1846 1847 if (!ev->status) { 1848 if (!hci_conn_ssp_enabled(conn) && 1849 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { 1850 BT_INFO("re-auth of legacy device is not possible."); 1851 } else { 1852 conn->link_mode |= HCI_LM_AUTH; 1853 conn->sec_level = conn->pending_sec_level; 1854 } 1855 } else { 1856 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 1857 ev->status); 1858 } 1859 1860 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 1861 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 1862 1863 if (conn->state == BT_CONFIG) { 1864 if (!ev->status && hci_conn_ssp_enabled(conn)) { 1865 struct hci_cp_set_conn_encrypt cp; 1866 cp.handle = ev->handle; 1867 cp.encrypt = 0x01; 1868 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1869 &cp); 1870 } else { 1871 conn->state = BT_CONNECTED; 1872 hci_proto_connect_cfm(conn, ev->status); 1873 hci_conn_drop(conn); 1874 } 1875 } else { 1876 hci_auth_cfm(conn, ev->status); 1877 1878 hci_conn_hold(conn); 1879 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1880 hci_conn_drop(conn); 1881 } 1882 1883 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 1884 if (!ev->status) { 1885 struct hci_cp_set_conn_encrypt cp; 1886 cp.handle = ev->handle; 1887 cp.encrypt = 0x01; 1888 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1889 &cp); 1890 } else { 1891 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1892 hci_encrypt_cfm(conn, ev->status, 0x00); 1893 } 1894 } 1895 1896 unlock: 1897 hci_dev_unlock(hdev); 1898 } 1899 1900 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1901 { 1902 struct hci_ev_remote_name *ev = (void *) skb->data; 1903 struct hci_conn *conn; 1904 1905 BT_DBG("%s", hdev->name); 1906 1907 hci_conn_check_pending(hdev); 1908 1909 hci_dev_lock(hdev); 1910 1911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1912 1913 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 1914 goto check_auth; 1915 1916 if (ev->status == 0) 1917 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, 1918 strnlen(ev->name, HCI_MAX_NAME_LENGTH)); 1919 else 1920 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); 1921 1922 check_auth: 1923 if (!conn) 1924 goto unlock; 1925 1926 if (!hci_outgoing_auth_needed(hdev, conn)) 1927 goto unlock; 1928 1929 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 1930 struct hci_cp_auth_requested cp; 1931 cp.handle = __cpu_to_le16(conn->handle); 1932 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1933 } 1934 1935 unlock: 1936 hci_dev_unlock(hdev); 1937 } 1938 1939 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 1940 { 1941 struct hci_ev_encrypt_change *ev = (void *) skb->data; 1942 struct hci_conn *conn; 1943 1944 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 1945 1946 hci_dev_lock(hdev); 1947 1948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1949 if (conn) { 1950 if (!ev->status) { 1951 if (ev->encrypt) { 1952 /* Encryption implies authentication */ 1953 conn->link_mode |= HCI_LM_AUTH; 1954 conn->link_mode |= HCI_LM_ENCRYPT; 1955 conn->sec_level = conn->pending_sec_level; 1956 } else 1957 conn->link_mode &= ~HCI_LM_ENCRYPT; 1958 } 1959 1960 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 1961 1962 if (ev->status && conn->state == BT_CONNECTED) { 1963 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 1964 hci_conn_drop(conn); 1965 goto unlock; 1966 } 1967 1968 if (conn->state == BT_CONFIG) { 1969 if (!ev->status) 1970 conn->state = BT_CONNECTED; 1971 1972 hci_proto_connect_cfm(conn, ev->status); 1973 hci_conn_drop(conn); 1974 } else 1975 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 1976 } 1977 1978 unlock: 1979 hci_dev_unlock(hdev); 1980 } 1981 1982 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, 1983 struct sk_buff *skb) 1984 { 1985 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 1986 struct hci_conn *conn; 1987 1988 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 1989 1990 hci_dev_lock(hdev); 1991 1992 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1993 if (conn) { 1994 if (!ev->status) 1995 conn->link_mode |= HCI_LM_SECURE; 1996 1997 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); 1998 1999 hci_key_change_cfm(conn, ev->status); 2000 } 2001 2002 hci_dev_unlock(hdev); 2003 } 2004 2005 static void hci_remote_features_evt(struct hci_dev *hdev, 2006 struct sk_buff *skb) 2007 { 2008 struct hci_ev_remote_features *ev = (void *) skb->data; 2009 struct hci_conn *conn; 2010 2011 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2012 2013 hci_dev_lock(hdev); 2014 2015 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2016 if (!conn) 2017 goto unlock; 2018 2019 if (!ev->status) 2020 memcpy(conn->features[0], ev->features, 8); 2021 2022 if (conn->state != BT_CONFIG) 2023 goto unlock; 2024 2025 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 2026 struct hci_cp_read_remote_ext_features cp; 2027 cp.handle = ev->handle; 2028 cp.page = 0x01; 2029 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 2030 sizeof(cp), &cp); 2031 goto unlock; 2032 } 2033 2034 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2035 struct hci_cp_remote_name_req cp; 2036 memset(&cp, 0, sizeof(cp)); 2037 bacpy(&cp.bdaddr, &conn->dst); 2038 cp.pscan_rep_mode = 0x02; 2039 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2040 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2041 mgmt_device_connected(hdev, &conn->dst, conn->type, 2042 conn->dst_type, 0, NULL, 0, 2043 conn->dev_class); 2044 2045 if (!hci_outgoing_auth_needed(hdev, conn)) { 2046 conn->state = BT_CONNECTED; 2047 hci_proto_connect_cfm(conn, ev->status); 2048 hci_conn_drop(conn); 2049 } 2050 2051 unlock: 2052 hci_dev_unlock(hdev); 2053 } 2054 2055 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2056 { 2057 struct hci_ev_cmd_complete *ev = (void *) skb->data; 2058 u8 status = skb->data[sizeof(*ev)]; 2059 __u16 opcode; 2060 2061 skb_pull(skb, sizeof(*ev)); 2062 2063 opcode = __le16_to_cpu(ev->opcode); 2064 2065 switch (opcode) { 2066 case HCI_OP_INQUIRY_CANCEL: 2067 hci_cc_inquiry_cancel(hdev, skb); 2068 break; 2069 2070 case HCI_OP_PERIODIC_INQ: 2071 hci_cc_periodic_inq(hdev, skb); 2072 break; 2073 2074 case HCI_OP_EXIT_PERIODIC_INQ: 2075 hci_cc_exit_periodic_inq(hdev, skb); 2076 break; 2077 2078 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 2079 hci_cc_remote_name_req_cancel(hdev, skb); 2080 break; 2081 2082 case HCI_OP_ROLE_DISCOVERY: 2083 hci_cc_role_discovery(hdev, skb); 2084 break; 2085 2086 case HCI_OP_READ_LINK_POLICY: 2087 hci_cc_read_link_policy(hdev, skb); 2088 break; 2089 2090 case HCI_OP_WRITE_LINK_POLICY: 2091 hci_cc_write_link_policy(hdev, skb); 2092 break; 2093 2094 case HCI_OP_READ_DEF_LINK_POLICY: 2095 hci_cc_read_def_link_policy(hdev, skb); 2096 break; 2097 2098 case HCI_OP_WRITE_DEF_LINK_POLICY: 2099 hci_cc_write_def_link_policy(hdev, skb); 2100 break; 2101 2102 case HCI_OP_RESET: 2103 hci_cc_reset(hdev, skb); 2104 break; 2105 2106 case HCI_OP_WRITE_LOCAL_NAME: 2107 hci_cc_write_local_name(hdev, skb); 2108 break; 2109 2110 case HCI_OP_READ_LOCAL_NAME: 2111 hci_cc_read_local_name(hdev, skb); 2112 break; 2113 2114 case HCI_OP_WRITE_AUTH_ENABLE: 2115 hci_cc_write_auth_enable(hdev, skb); 2116 break; 2117 2118 case HCI_OP_WRITE_ENCRYPT_MODE: 2119 hci_cc_write_encrypt_mode(hdev, skb); 2120 break; 2121 2122 case HCI_OP_WRITE_SCAN_ENABLE: 2123 hci_cc_write_scan_enable(hdev, skb); 2124 break; 2125 2126 case HCI_OP_READ_CLASS_OF_DEV: 2127 hci_cc_read_class_of_dev(hdev, skb); 2128 break; 2129 2130 case HCI_OP_WRITE_CLASS_OF_DEV: 2131 hci_cc_write_class_of_dev(hdev, skb); 2132 break; 2133 2134 case HCI_OP_READ_VOICE_SETTING: 2135 hci_cc_read_voice_setting(hdev, skb); 2136 break; 2137 2138 case HCI_OP_WRITE_VOICE_SETTING: 2139 hci_cc_write_voice_setting(hdev, skb); 2140 break; 2141 2142 case HCI_OP_WRITE_SSP_MODE: 2143 hci_cc_write_ssp_mode(hdev, skb); 2144 break; 2145 2146 case HCI_OP_READ_LOCAL_VERSION: 2147 hci_cc_read_local_version(hdev, skb); 2148 break; 2149 2150 case HCI_OP_READ_LOCAL_COMMANDS: 2151 hci_cc_read_local_commands(hdev, skb); 2152 break; 2153 2154 case HCI_OP_READ_LOCAL_FEATURES: 2155 hci_cc_read_local_features(hdev, skb); 2156 break; 2157 2158 case HCI_OP_READ_LOCAL_EXT_FEATURES: 2159 hci_cc_read_local_ext_features(hdev, skb); 2160 break; 2161 2162 case HCI_OP_READ_BUFFER_SIZE: 2163 hci_cc_read_buffer_size(hdev, skb); 2164 break; 2165 2166 case HCI_OP_READ_BD_ADDR: 2167 hci_cc_read_bd_addr(hdev, skb); 2168 break; 2169 2170 case HCI_OP_READ_PAGE_SCAN_ACTIVITY: 2171 hci_cc_read_page_scan_activity(hdev, skb); 2172 break; 2173 2174 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY: 2175 hci_cc_write_page_scan_activity(hdev, skb); 2176 break; 2177 2178 case HCI_OP_READ_PAGE_SCAN_TYPE: 2179 hci_cc_read_page_scan_type(hdev, skb); 2180 break; 2181 2182 case HCI_OP_WRITE_PAGE_SCAN_TYPE: 2183 hci_cc_write_page_scan_type(hdev, skb); 2184 break; 2185 2186 case HCI_OP_READ_DATA_BLOCK_SIZE: 2187 hci_cc_read_data_block_size(hdev, skb); 2188 break; 2189 2190 case HCI_OP_READ_FLOW_CONTROL_MODE: 2191 hci_cc_read_flow_control_mode(hdev, skb); 2192 break; 2193 2194 case HCI_OP_READ_LOCAL_AMP_INFO: 2195 hci_cc_read_local_amp_info(hdev, skb); 2196 break; 2197 2198 case HCI_OP_READ_LOCAL_AMP_ASSOC: 2199 hci_cc_read_local_amp_assoc(hdev, skb); 2200 break; 2201 2202 case HCI_OP_READ_INQ_RSP_TX_POWER: 2203 hci_cc_read_inq_rsp_tx_power(hdev, skb); 2204 break; 2205 2206 case HCI_OP_PIN_CODE_REPLY: 2207 hci_cc_pin_code_reply(hdev, skb); 2208 break; 2209 2210 case HCI_OP_PIN_CODE_NEG_REPLY: 2211 hci_cc_pin_code_neg_reply(hdev, skb); 2212 break; 2213 2214 case HCI_OP_READ_LOCAL_OOB_DATA: 2215 hci_cc_read_local_oob_data_reply(hdev, skb); 2216 break; 2217 2218 case HCI_OP_LE_READ_BUFFER_SIZE: 2219 hci_cc_le_read_buffer_size(hdev, skb); 2220 break; 2221 2222 case HCI_OP_LE_READ_LOCAL_FEATURES: 2223 hci_cc_le_read_local_features(hdev, skb); 2224 break; 2225 2226 case HCI_OP_LE_READ_ADV_TX_POWER: 2227 hci_cc_le_read_adv_tx_power(hdev, skb); 2228 break; 2229 2230 case HCI_OP_USER_CONFIRM_REPLY: 2231 hci_cc_user_confirm_reply(hdev, skb); 2232 break; 2233 2234 case HCI_OP_USER_CONFIRM_NEG_REPLY: 2235 hci_cc_user_confirm_neg_reply(hdev, skb); 2236 break; 2237 2238 case HCI_OP_USER_PASSKEY_REPLY: 2239 hci_cc_user_passkey_reply(hdev, skb); 2240 break; 2241 2242 case HCI_OP_USER_PASSKEY_NEG_REPLY: 2243 hci_cc_user_passkey_neg_reply(hdev, skb); 2244 break; 2245 2246 case HCI_OP_LE_SET_ADV_ENABLE: 2247 hci_cc_le_set_adv_enable(hdev, skb); 2248 break; 2249 2250 case HCI_OP_LE_SET_SCAN_ENABLE: 2251 hci_cc_le_set_scan_enable(hdev, skb); 2252 break; 2253 2254 case HCI_OP_LE_READ_WHITE_LIST_SIZE: 2255 hci_cc_le_read_white_list_size(hdev, skb); 2256 break; 2257 2258 case HCI_OP_LE_READ_SUPPORTED_STATES: 2259 hci_cc_le_read_supported_states(hdev, skb); 2260 break; 2261 2262 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 2263 hci_cc_write_le_host_supported(hdev, skb); 2264 break; 2265 2266 case HCI_OP_WRITE_REMOTE_AMP_ASSOC: 2267 hci_cc_write_remote_amp_assoc(hdev, skb); 2268 break; 2269 2270 default: 2271 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2272 break; 2273 } 2274 2275 if (opcode != HCI_OP_NOP) 2276 del_timer(&hdev->cmd_timer); 2277 2278 hci_req_cmd_complete(hdev, opcode, status); 2279 2280 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2281 atomic_set(&hdev->cmd_cnt, 1); 2282 if (!skb_queue_empty(&hdev->cmd_q)) 2283 queue_work(hdev->workqueue, &hdev->cmd_work); 2284 } 2285 } 2286 2287 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 2288 { 2289 struct hci_ev_cmd_status *ev = (void *) skb->data; 2290 __u16 opcode; 2291 2292 skb_pull(skb, sizeof(*ev)); 2293 2294 opcode = __le16_to_cpu(ev->opcode); 2295 2296 switch (opcode) { 2297 case HCI_OP_INQUIRY: 2298 hci_cs_inquiry(hdev, ev->status); 2299 break; 2300 2301 case HCI_OP_CREATE_CONN: 2302 hci_cs_create_conn(hdev, ev->status); 2303 break; 2304 2305 case HCI_OP_ADD_SCO: 2306 hci_cs_add_sco(hdev, ev->status); 2307 break; 2308 2309 case HCI_OP_AUTH_REQUESTED: 2310 hci_cs_auth_requested(hdev, ev->status); 2311 break; 2312 2313 case HCI_OP_SET_CONN_ENCRYPT: 2314 hci_cs_set_conn_encrypt(hdev, ev->status); 2315 break; 2316 2317 case HCI_OP_REMOTE_NAME_REQ: 2318 hci_cs_remote_name_req(hdev, ev->status); 2319 break; 2320 2321 case HCI_OP_READ_REMOTE_FEATURES: 2322 hci_cs_read_remote_features(hdev, ev->status); 2323 break; 2324 2325 case HCI_OP_READ_REMOTE_EXT_FEATURES: 2326 hci_cs_read_remote_ext_features(hdev, ev->status); 2327 break; 2328 2329 case HCI_OP_SETUP_SYNC_CONN: 2330 hci_cs_setup_sync_conn(hdev, ev->status); 2331 break; 2332 2333 case HCI_OP_SNIFF_MODE: 2334 hci_cs_sniff_mode(hdev, ev->status); 2335 break; 2336 2337 case HCI_OP_EXIT_SNIFF_MODE: 2338 hci_cs_exit_sniff_mode(hdev, ev->status); 2339 break; 2340 2341 case HCI_OP_DISCONNECT: 2342 hci_cs_disconnect(hdev, ev->status); 2343 break; 2344 2345 case HCI_OP_LE_CREATE_CONN: 2346 hci_cs_le_create_conn(hdev, ev->status); 2347 break; 2348 2349 case HCI_OP_CREATE_PHY_LINK: 2350 hci_cs_create_phylink(hdev, ev->status); 2351 break; 2352 2353 case HCI_OP_ACCEPT_PHY_LINK: 2354 hci_cs_accept_phylink(hdev, ev->status); 2355 break; 2356 2357 default: 2358 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 2359 break; 2360 } 2361 2362 if (opcode != HCI_OP_NOP) 2363 del_timer(&hdev->cmd_timer); 2364 2365 if (ev->status || 2366 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event)) 2367 hci_req_cmd_complete(hdev, opcode, ev->status); 2368 2369 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2370 atomic_set(&hdev->cmd_cnt, 1); 2371 if (!skb_queue_empty(&hdev->cmd_q)) 2372 queue_work(hdev->workqueue, &hdev->cmd_work); 2373 } 2374 } 2375 2376 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2377 { 2378 struct hci_ev_role_change *ev = (void *) skb->data; 2379 struct hci_conn *conn; 2380 2381 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2382 2383 hci_dev_lock(hdev); 2384 2385 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2386 if (conn) { 2387 if (!ev->status) { 2388 if (ev->role) 2389 conn->link_mode &= ~HCI_LM_MASTER; 2390 else 2391 conn->link_mode |= HCI_LM_MASTER; 2392 } 2393 2394 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); 2395 2396 hci_role_switch_cfm(conn, ev->status, ev->role); 2397 } 2398 2399 hci_dev_unlock(hdev); 2400 } 2401 2402 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2403 { 2404 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2405 int i; 2406 2407 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { 2408 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2409 return; 2410 } 2411 2412 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2413 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { 2414 BT_DBG("%s bad parameters", hdev->name); 2415 return; 2416 } 2417 2418 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2419 2420 for (i = 0; i < ev->num_hndl; i++) { 2421 struct hci_comp_pkts_info *info = &ev->handles[i]; 2422 struct hci_conn *conn; 2423 __u16 handle, count; 2424 2425 handle = __le16_to_cpu(info->handle); 2426 count = __le16_to_cpu(info->count); 2427 2428 conn = hci_conn_hash_lookup_handle(hdev, handle); 2429 if (!conn) 2430 continue; 2431 2432 conn->sent -= count; 2433 2434 switch (conn->type) { 2435 case ACL_LINK: 2436 hdev->acl_cnt += count; 2437 if (hdev->acl_cnt > hdev->acl_pkts) 2438 hdev->acl_cnt = hdev->acl_pkts; 2439 break; 2440 2441 case LE_LINK: 2442 if (hdev->le_pkts) { 2443 hdev->le_cnt += count; 2444 if (hdev->le_cnt > hdev->le_pkts) 2445 hdev->le_cnt = hdev->le_pkts; 2446 } else { 2447 hdev->acl_cnt += count; 2448 if (hdev->acl_cnt > hdev->acl_pkts) 2449 hdev->acl_cnt = hdev->acl_pkts; 2450 } 2451 break; 2452 2453 case SCO_LINK: 2454 hdev->sco_cnt += count; 2455 if (hdev->sco_cnt > hdev->sco_pkts) 2456 hdev->sco_cnt = hdev->sco_pkts; 2457 break; 2458 2459 default: 2460 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2461 break; 2462 } 2463 } 2464 2465 queue_work(hdev->workqueue, &hdev->tx_work); 2466 } 2467 2468 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, 2469 __u16 handle) 2470 { 2471 struct hci_chan *chan; 2472 2473 switch (hdev->dev_type) { 2474 case HCI_BREDR: 2475 return hci_conn_hash_lookup_handle(hdev, handle); 2476 case HCI_AMP: 2477 chan = hci_chan_lookup_handle(hdev, handle); 2478 if (chan) 2479 return chan->conn; 2480 break; 2481 default: 2482 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); 2483 break; 2484 } 2485 2486 return NULL; 2487 } 2488 2489 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) 2490 { 2491 struct hci_ev_num_comp_blocks *ev = (void *) skb->data; 2492 int i; 2493 2494 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { 2495 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode); 2496 return; 2497 } 2498 2499 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + 2500 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { 2501 BT_DBG("%s bad parameters", hdev->name); 2502 return; 2503 } 2504 2505 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, 2506 ev->num_hndl); 2507 2508 for (i = 0; i < ev->num_hndl; i++) { 2509 struct hci_comp_blocks_info *info = &ev->handles[i]; 2510 struct hci_conn *conn = NULL; 2511 __u16 handle, block_count; 2512 2513 handle = __le16_to_cpu(info->handle); 2514 block_count = __le16_to_cpu(info->blocks); 2515 2516 conn = __hci_conn_lookup_handle(hdev, handle); 2517 if (!conn) 2518 continue; 2519 2520 conn->sent -= block_count; 2521 2522 switch (conn->type) { 2523 case ACL_LINK: 2524 case AMP_LINK: 2525 hdev->block_cnt += block_count; 2526 if (hdev->block_cnt > hdev->num_blocks) 2527 hdev->block_cnt = hdev->num_blocks; 2528 break; 2529 2530 default: 2531 BT_ERR("Unknown type %d conn %p", conn->type, conn); 2532 break; 2533 } 2534 } 2535 2536 queue_work(hdev->workqueue, &hdev->tx_work); 2537 } 2538 2539 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2540 { 2541 struct hci_ev_mode_change *ev = (void *) skb->data; 2542 struct hci_conn *conn; 2543 2544 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2545 2546 hci_dev_lock(hdev); 2547 2548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2549 if (conn) { 2550 conn->mode = ev->mode; 2551 conn->interval = __le16_to_cpu(ev->interval); 2552 2553 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, 2554 &conn->flags)) { 2555 if (conn->mode == HCI_CM_ACTIVE) 2556 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2557 else 2558 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); 2559 } 2560 2561 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) 2562 hci_sco_setup(conn, ev->status); 2563 } 2564 2565 hci_dev_unlock(hdev); 2566 } 2567 2568 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2569 { 2570 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2571 struct hci_conn *conn; 2572 2573 BT_DBG("%s", hdev->name); 2574 2575 hci_dev_lock(hdev); 2576 2577 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2578 if (!conn) 2579 goto unlock; 2580 2581 if (conn->state == BT_CONNECTED) { 2582 hci_conn_hold(conn); 2583 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2584 hci_conn_drop(conn); 2585 } 2586 2587 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 2588 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2589 sizeof(ev->bdaddr), &ev->bdaddr); 2590 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { 2591 u8 secure; 2592 2593 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2594 secure = 1; 2595 else 2596 secure = 0; 2597 2598 mgmt_pin_code_request(hdev, &ev->bdaddr, secure); 2599 } 2600 2601 unlock: 2602 hci_dev_unlock(hdev); 2603 } 2604 2605 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2606 { 2607 struct hci_ev_link_key_req *ev = (void *) skb->data; 2608 struct hci_cp_link_key_reply cp; 2609 struct hci_conn *conn; 2610 struct link_key *key; 2611 2612 BT_DBG("%s", hdev->name); 2613 2614 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 2615 return; 2616 2617 hci_dev_lock(hdev); 2618 2619 key = hci_find_link_key(hdev, &ev->bdaddr); 2620 if (!key) { 2621 BT_DBG("%s link key not found for %pMR", hdev->name, 2622 &ev->bdaddr); 2623 goto not_found; 2624 } 2625 2626 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type, 2627 &ev->bdaddr); 2628 2629 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && 2630 key->type == HCI_LK_DEBUG_COMBINATION) { 2631 BT_DBG("%s ignoring debug key", hdev->name); 2632 goto not_found; 2633 } 2634 2635 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2636 if (conn) { 2637 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2638 conn->auth_type != 0xff && (conn->auth_type & 0x01)) { 2639 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2640 goto not_found; 2641 } 2642 2643 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2644 conn->pending_sec_level == BT_SECURITY_HIGH) { 2645 BT_DBG("%s ignoring key unauthenticated for high security", 2646 hdev->name); 2647 goto not_found; 2648 } 2649 2650 conn->key_type = key->type; 2651 conn->pin_length = key->pin_len; 2652 } 2653 2654 bacpy(&cp.bdaddr, &ev->bdaddr); 2655 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); 2656 2657 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2658 2659 hci_dev_unlock(hdev); 2660 2661 return; 2662 2663 not_found: 2664 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 2665 hci_dev_unlock(hdev); 2666 } 2667 2668 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2669 { 2670 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2671 struct hci_conn *conn; 2672 u8 pin_len = 0; 2673 2674 BT_DBG("%s", hdev->name); 2675 2676 hci_dev_lock(hdev); 2677 2678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2679 if (conn) { 2680 hci_conn_hold(conn); 2681 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2682 pin_len = conn->pin_length; 2683 2684 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 2685 conn->key_type = ev->key_type; 2686 2687 hci_conn_drop(conn); 2688 } 2689 2690 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 2691 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2692 ev->key_type, pin_len); 2693 2694 hci_dev_unlock(hdev); 2695 } 2696 2697 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2698 { 2699 struct hci_ev_clock_offset *ev = (void *) skb->data; 2700 struct hci_conn *conn; 2701 2702 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2703 2704 hci_dev_lock(hdev); 2705 2706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2707 if (conn && !ev->status) { 2708 struct inquiry_entry *ie; 2709 2710 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2711 if (ie) { 2712 ie->data.clock_offset = ev->clock_offset; 2713 ie->timestamp = jiffies; 2714 } 2715 } 2716 2717 hci_dev_unlock(hdev); 2718 } 2719 2720 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2721 { 2722 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2723 struct hci_conn *conn; 2724 2725 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2726 2727 hci_dev_lock(hdev); 2728 2729 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2730 if (conn && !ev->status) 2731 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 2732 2733 hci_dev_unlock(hdev); 2734 } 2735 2736 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2737 { 2738 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2739 struct inquiry_entry *ie; 2740 2741 BT_DBG("%s", hdev->name); 2742 2743 hci_dev_lock(hdev); 2744 2745 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2746 if (ie) { 2747 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2748 ie->timestamp = jiffies; 2749 } 2750 2751 hci_dev_unlock(hdev); 2752 } 2753 2754 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, 2755 struct sk_buff *skb) 2756 { 2757 struct inquiry_data data; 2758 int num_rsp = *((__u8 *) skb->data); 2759 bool name_known, ssp; 2760 2761 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2762 2763 if (!num_rsp) 2764 return; 2765 2766 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 2767 return; 2768 2769 hci_dev_lock(hdev); 2770 2771 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2772 struct inquiry_info_with_rssi_and_pscan_mode *info; 2773 info = (void *) (skb->data + 1); 2774 2775 for (; num_rsp; num_rsp--, info++) { 2776 bacpy(&data.bdaddr, &info->bdaddr); 2777 data.pscan_rep_mode = info->pscan_rep_mode; 2778 data.pscan_period_mode = info->pscan_period_mode; 2779 data.pscan_mode = info->pscan_mode; 2780 memcpy(data.dev_class, info->dev_class, 3); 2781 data.clock_offset = info->clock_offset; 2782 data.rssi = info->rssi; 2783 data.ssp_mode = 0x00; 2784 2785 name_known = hci_inquiry_cache_update(hdev, &data, 2786 false, &ssp); 2787 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2788 info->dev_class, info->rssi, 2789 !name_known, ssp, NULL, 0); 2790 } 2791 } else { 2792 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2793 2794 for (; num_rsp; num_rsp--, info++) { 2795 bacpy(&data.bdaddr, &info->bdaddr); 2796 data.pscan_rep_mode = info->pscan_rep_mode; 2797 data.pscan_period_mode = info->pscan_period_mode; 2798 data.pscan_mode = 0x00; 2799 memcpy(data.dev_class, info->dev_class, 3); 2800 data.clock_offset = info->clock_offset; 2801 data.rssi = info->rssi; 2802 data.ssp_mode = 0x00; 2803 name_known = hci_inquiry_cache_update(hdev, &data, 2804 false, &ssp); 2805 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2806 info->dev_class, info->rssi, 2807 !name_known, ssp, NULL, 0); 2808 } 2809 } 2810 2811 hci_dev_unlock(hdev); 2812 } 2813 2814 static void hci_remote_ext_features_evt(struct hci_dev *hdev, 2815 struct sk_buff *skb) 2816 { 2817 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2818 struct hci_conn *conn; 2819 2820 BT_DBG("%s", hdev->name); 2821 2822 hci_dev_lock(hdev); 2823 2824 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2825 if (!conn) 2826 goto unlock; 2827 2828 if (ev->page < HCI_MAX_PAGES) 2829 memcpy(conn->features[ev->page], ev->features, 8); 2830 2831 if (!ev->status && ev->page == 0x01) { 2832 struct inquiry_entry *ie; 2833 2834 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2835 if (ie) 2836 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 2837 2838 if (ev->features[0] & LMP_HOST_SSP) { 2839 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 2840 } else { 2841 /* It is mandatory by the Bluetooth specification that 2842 * Extended Inquiry Results are only used when Secure 2843 * Simple Pairing is enabled, but some devices violate 2844 * this. 2845 * 2846 * To make these devices work, the internal SSP 2847 * enabled flag needs to be cleared if the remote host 2848 * features do not indicate SSP support */ 2849 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 2850 } 2851 } 2852 2853 if (conn->state != BT_CONFIG) 2854 goto unlock; 2855 2856 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { 2857 struct hci_cp_remote_name_req cp; 2858 memset(&cp, 0, sizeof(cp)); 2859 bacpy(&cp.bdaddr, &conn->dst); 2860 cp.pscan_rep_mode = 0x02; 2861 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2862 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 2863 mgmt_device_connected(hdev, &conn->dst, conn->type, 2864 conn->dst_type, 0, NULL, 0, 2865 conn->dev_class); 2866 2867 if (!hci_outgoing_auth_needed(hdev, conn)) { 2868 conn->state = BT_CONNECTED; 2869 hci_proto_connect_cfm(conn, ev->status); 2870 hci_conn_drop(conn); 2871 } 2872 2873 unlock: 2874 hci_dev_unlock(hdev); 2875 } 2876 2877 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, 2878 struct sk_buff *skb) 2879 { 2880 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2881 struct hci_conn *conn; 2882 2883 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 2884 2885 hci_dev_lock(hdev); 2886 2887 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2888 if (!conn) { 2889 if (ev->link_type == ESCO_LINK) 2890 goto unlock; 2891 2892 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2893 if (!conn) 2894 goto unlock; 2895 2896 conn->type = SCO_LINK; 2897 } 2898 2899 switch (ev->status) { 2900 case 0x00: 2901 conn->handle = __le16_to_cpu(ev->handle); 2902 conn->state = BT_CONNECTED; 2903 2904 hci_conn_add_sysfs(conn); 2905 break; 2906 2907 case 0x0d: /* Connection Rejected due to Limited Resources */ 2908 case 0x11: /* Unsupported Feature or Parameter Value */ 2909 case 0x1c: /* SCO interval rejected */ 2910 case 0x1a: /* Unsupported Remote Feature */ 2911 case 0x1f: /* Unspecified error */ 2912 if (conn->out) { 2913 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 2914 (hdev->esco_type & EDR_ESCO_MASK); 2915 if (hci_setup_sync(conn, conn->link->handle)) 2916 goto unlock; 2917 } 2918 /* fall through */ 2919 2920 default: 2921 conn->state = BT_CLOSED; 2922 break; 2923 } 2924 2925 hci_proto_connect_cfm(conn, ev->status); 2926 if (ev->status) 2927 hci_conn_del(conn); 2928 2929 unlock: 2930 hci_dev_unlock(hdev); 2931 } 2932 2933 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, 2934 struct sk_buff *skb) 2935 { 2936 struct inquiry_data data; 2937 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2938 int num_rsp = *((__u8 *) skb->data); 2939 size_t eir_len; 2940 2941 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2942 2943 if (!num_rsp) 2944 return; 2945 2946 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) 2947 return; 2948 2949 hci_dev_lock(hdev); 2950 2951 for (; num_rsp; num_rsp--, info++) { 2952 bool name_known, ssp; 2953 2954 bacpy(&data.bdaddr, &info->bdaddr); 2955 data.pscan_rep_mode = info->pscan_rep_mode; 2956 data.pscan_period_mode = info->pscan_period_mode; 2957 data.pscan_mode = 0x00; 2958 memcpy(data.dev_class, info->dev_class, 3); 2959 data.clock_offset = info->clock_offset; 2960 data.rssi = info->rssi; 2961 data.ssp_mode = 0x01; 2962 2963 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 2964 name_known = eir_has_data_type(info->data, 2965 sizeof(info->data), 2966 EIR_NAME_COMPLETE); 2967 else 2968 name_known = true; 2969 2970 name_known = hci_inquiry_cache_update(hdev, &data, name_known, 2971 &ssp); 2972 eir_len = eir_get_length(info->data, sizeof(info->data)); 2973 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, 2974 info->dev_class, info->rssi, !name_known, 2975 ssp, info->data, eir_len); 2976 } 2977 2978 hci_dev_unlock(hdev); 2979 } 2980 2981 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, 2982 struct sk_buff *skb) 2983 { 2984 struct hci_ev_key_refresh_complete *ev = (void *) skb->data; 2985 struct hci_conn *conn; 2986 2987 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status, 2988 __le16_to_cpu(ev->handle)); 2989 2990 hci_dev_lock(hdev); 2991 2992 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2993 if (!conn) 2994 goto unlock; 2995 2996 if (!ev->status) 2997 conn->sec_level = conn->pending_sec_level; 2998 2999 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 3000 3001 if (ev->status && conn->state == BT_CONNECTED) { 3002 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); 3003 hci_conn_drop(conn); 3004 goto unlock; 3005 } 3006 3007 if (conn->state == BT_CONFIG) { 3008 if (!ev->status) 3009 conn->state = BT_CONNECTED; 3010 3011 hci_proto_connect_cfm(conn, ev->status); 3012 hci_conn_drop(conn); 3013 } else { 3014 hci_auth_cfm(conn, ev->status); 3015 3016 hci_conn_hold(conn); 3017 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 3018 hci_conn_drop(conn); 3019 } 3020 3021 unlock: 3022 hci_dev_unlock(hdev); 3023 } 3024 3025 static u8 hci_get_auth_req(struct hci_conn *conn) 3026 { 3027 /* If remote requests dedicated bonding follow that lead */ 3028 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING || 3029 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) { 3030 /* If both remote and local IO capabilities allow MITM 3031 * protection then require it, otherwise don't */ 3032 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT || 3033 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT) 3034 return HCI_AT_DEDICATED_BONDING; 3035 else 3036 return HCI_AT_DEDICATED_BONDING_MITM; 3037 } 3038 3039 /* If remote requests no-bonding follow that lead */ 3040 if (conn->remote_auth == HCI_AT_NO_BONDING || 3041 conn->remote_auth == HCI_AT_NO_BONDING_MITM) 3042 return conn->remote_auth | (conn->auth_type & 0x01); 3043 3044 return conn->auth_type; 3045 } 3046 3047 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3048 { 3049 struct hci_ev_io_capa_request *ev = (void *) skb->data; 3050 struct hci_conn *conn; 3051 3052 BT_DBG("%s", hdev->name); 3053 3054 hci_dev_lock(hdev); 3055 3056 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3057 if (!conn) 3058 goto unlock; 3059 3060 hci_conn_hold(conn); 3061 3062 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3063 goto unlock; 3064 3065 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || 3066 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 3067 struct hci_cp_io_capability_reply cp; 3068 3069 bacpy(&cp.bdaddr, &ev->bdaddr); 3070 /* Change the IO capability from KeyboardDisplay 3071 * to DisplayYesNo as it is not supported by BT spec. */ 3072 cp.capability = (conn->io_capability == 0x04) ? 3073 HCI_IO_DISPLAY_YESNO : conn->io_capability; 3074 conn->auth_type = hci_get_auth_req(conn); 3075 cp.authentication = conn->auth_type; 3076 3077 if (hci_find_remote_oob_data(hdev, &conn->dst) && 3078 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) 3079 cp.oob_data = 0x01; 3080 else 3081 cp.oob_data = 0x00; 3082 3083 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 3084 sizeof(cp), &cp); 3085 } else { 3086 struct hci_cp_io_capability_neg_reply cp; 3087 3088 bacpy(&cp.bdaddr, &ev->bdaddr); 3089 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; 3090 3091 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 3092 sizeof(cp), &cp); 3093 } 3094 3095 unlock: 3096 hci_dev_unlock(hdev); 3097 } 3098 3099 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 3100 { 3101 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 3102 struct hci_conn *conn; 3103 3104 BT_DBG("%s", hdev->name); 3105 3106 hci_dev_lock(hdev); 3107 3108 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3109 if (!conn) 3110 goto unlock; 3111 3112 conn->remote_cap = ev->capability; 3113 conn->remote_auth = ev->authentication; 3114 if (ev->oob_data) 3115 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags); 3116 3117 unlock: 3118 hci_dev_unlock(hdev); 3119 } 3120 3121 static void hci_user_confirm_request_evt(struct hci_dev *hdev, 3122 struct sk_buff *skb) 3123 { 3124 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 3125 int loc_mitm, rem_mitm, confirm_hint = 0; 3126 struct hci_conn *conn; 3127 3128 BT_DBG("%s", hdev->name); 3129 3130 hci_dev_lock(hdev); 3131 3132 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3133 goto unlock; 3134 3135 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3136 if (!conn) 3137 goto unlock; 3138 3139 loc_mitm = (conn->auth_type & 0x01); 3140 rem_mitm = (conn->remote_auth & 0x01); 3141 3142 /* If we require MITM but the remote device can't provide that 3143 * (it has NoInputNoOutput) then reject the confirmation 3144 * request. The only exception is when we're dedicated bonding 3145 * initiators (connect_cfm_cb set) since then we always have the MITM 3146 * bit set. */ 3147 if (!conn->connect_cfm_cb && loc_mitm && 3148 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { 3149 BT_DBG("Rejecting request: remote device can't provide MITM"); 3150 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 3151 sizeof(ev->bdaddr), &ev->bdaddr); 3152 goto unlock; 3153 } 3154 3155 /* If no side requires MITM protection; auto-accept */ 3156 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && 3157 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { 3158 3159 /* If we're not the initiators request authorization to 3160 * proceed from user space (mgmt_user_confirm with 3161 * confirm_hint set to 1). */ 3162 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3163 BT_DBG("Confirming auto-accept as acceptor"); 3164 confirm_hint = 1; 3165 goto confirm; 3166 } 3167 3168 BT_DBG("Auto-accept of user confirmation with %ums delay", 3169 hdev->auto_accept_delay); 3170 3171 if (hdev->auto_accept_delay > 0) { 3172 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 3173 mod_timer(&conn->auto_accept_timer, jiffies + delay); 3174 goto unlock; 3175 } 3176 3177 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 3178 sizeof(ev->bdaddr), &ev->bdaddr); 3179 goto unlock; 3180 } 3181 3182 confirm: 3183 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey, 3184 confirm_hint); 3185 3186 unlock: 3187 hci_dev_unlock(hdev); 3188 } 3189 3190 static void hci_user_passkey_request_evt(struct hci_dev *hdev, 3191 struct sk_buff *skb) 3192 { 3193 struct hci_ev_user_passkey_req *ev = (void *) skb->data; 3194 3195 BT_DBG("%s", hdev->name); 3196 3197 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3198 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); 3199 } 3200 3201 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, 3202 struct sk_buff *skb) 3203 { 3204 struct hci_ev_user_passkey_notify *ev = (void *) skb->data; 3205 struct hci_conn *conn; 3206 3207 BT_DBG("%s", hdev->name); 3208 3209 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3210 if (!conn) 3211 return; 3212 3213 conn->passkey_notify = __le32_to_cpu(ev->passkey); 3214 conn->passkey_entered = 0; 3215 3216 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3217 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3218 conn->dst_type, conn->passkey_notify, 3219 conn->passkey_entered); 3220 } 3221 3222 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 3223 { 3224 struct hci_ev_keypress_notify *ev = (void *) skb->data; 3225 struct hci_conn *conn; 3226 3227 BT_DBG("%s", hdev->name); 3228 3229 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3230 if (!conn) 3231 return; 3232 3233 switch (ev->type) { 3234 case HCI_KEYPRESS_STARTED: 3235 conn->passkey_entered = 0; 3236 return; 3237 3238 case HCI_KEYPRESS_ENTERED: 3239 conn->passkey_entered++; 3240 break; 3241 3242 case HCI_KEYPRESS_ERASED: 3243 conn->passkey_entered--; 3244 break; 3245 3246 case HCI_KEYPRESS_CLEARED: 3247 conn->passkey_entered = 0; 3248 break; 3249 3250 case HCI_KEYPRESS_COMPLETED: 3251 return; 3252 } 3253 3254 if (test_bit(HCI_MGMT, &hdev->dev_flags)) 3255 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, 3256 conn->dst_type, conn->passkey_notify, 3257 conn->passkey_entered); 3258 } 3259 3260 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, 3261 struct sk_buff *skb) 3262 { 3263 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 3264 struct hci_conn *conn; 3265 3266 BT_DBG("%s", hdev->name); 3267 3268 hci_dev_lock(hdev); 3269 3270 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3271 if (!conn) 3272 goto unlock; 3273 3274 /* To avoid duplicate auth_failed events to user space we check 3275 * the HCI_CONN_AUTH_PEND flag which will be set if we 3276 * initiated the authentication. A traditional auth_complete 3277 * event gets always produced as initiator and is also mapped to 3278 * the mgmt_auth_failed event */ 3279 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) 3280 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type, 3281 ev->status); 3282 3283 hci_conn_drop(conn); 3284 3285 unlock: 3286 hci_dev_unlock(hdev); 3287 } 3288 3289 static void hci_remote_host_features_evt(struct hci_dev *hdev, 3290 struct sk_buff *skb) 3291 { 3292 struct hci_ev_remote_host_features *ev = (void *) skb->data; 3293 struct inquiry_entry *ie; 3294 struct hci_conn *conn; 3295 3296 BT_DBG("%s", hdev->name); 3297 3298 hci_dev_lock(hdev); 3299 3300 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 3301 if (conn) 3302 memcpy(conn->features[1], ev->features, 8); 3303 3304 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 3305 if (ie) 3306 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); 3307 3308 hci_dev_unlock(hdev); 3309 } 3310 3311 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 3312 struct sk_buff *skb) 3313 { 3314 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 3315 struct oob_data *data; 3316 3317 BT_DBG("%s", hdev->name); 3318 3319 hci_dev_lock(hdev); 3320 3321 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3322 goto unlock; 3323 3324 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 3325 if (data) { 3326 struct hci_cp_remote_oob_data_reply cp; 3327 3328 bacpy(&cp.bdaddr, &ev->bdaddr); 3329 memcpy(cp.hash, data->hash, sizeof(cp.hash)); 3330 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 3331 3332 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 3333 &cp); 3334 } else { 3335 struct hci_cp_remote_oob_data_neg_reply cp; 3336 3337 bacpy(&cp.bdaddr, &ev->bdaddr); 3338 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 3339 &cp); 3340 } 3341 3342 unlock: 3343 hci_dev_unlock(hdev); 3344 } 3345 3346 static void hci_phy_link_complete_evt(struct hci_dev *hdev, 3347 struct sk_buff *skb) 3348 { 3349 struct hci_ev_phy_link_complete *ev = (void *) skb->data; 3350 struct hci_conn *hcon, *bredr_hcon; 3351 3352 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle, 3353 ev->status); 3354 3355 hci_dev_lock(hdev); 3356 3357 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3358 if (!hcon) { 3359 hci_dev_unlock(hdev); 3360 return; 3361 } 3362 3363 if (ev->status) { 3364 hci_conn_del(hcon); 3365 hci_dev_unlock(hdev); 3366 return; 3367 } 3368 3369 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; 3370 3371 hcon->state = BT_CONNECTED; 3372 bacpy(&hcon->dst, &bredr_hcon->dst); 3373 3374 hci_conn_hold(hcon); 3375 hcon->disc_timeout = HCI_DISCONN_TIMEOUT; 3376 hci_conn_drop(hcon); 3377 3378 hci_conn_add_sysfs(hcon); 3379 3380 amp_physical_cfm(bredr_hcon, hcon); 3381 3382 hci_dev_unlock(hdev); 3383 } 3384 3385 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3386 { 3387 struct hci_ev_logical_link_complete *ev = (void *) skb->data; 3388 struct hci_conn *hcon; 3389 struct hci_chan *hchan; 3390 struct amp_mgr *mgr; 3391 3392 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", 3393 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle, 3394 ev->status); 3395 3396 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3397 if (!hcon) 3398 return; 3399 3400 /* Create AMP hchan */ 3401 hchan = hci_chan_create(hcon); 3402 if (!hchan) 3403 return; 3404 3405 hchan->handle = le16_to_cpu(ev->handle); 3406 3407 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); 3408 3409 mgr = hcon->amp_mgr; 3410 if (mgr && mgr->bredr_chan) { 3411 struct l2cap_chan *bredr_chan = mgr->bredr_chan; 3412 3413 l2cap_chan_lock(bredr_chan); 3414 3415 bredr_chan->conn->mtu = hdev->block_mtu; 3416 l2cap_logical_cfm(bredr_chan, hchan, 0); 3417 hci_conn_hold(hcon); 3418 3419 l2cap_chan_unlock(bredr_chan); 3420 } 3421 } 3422 3423 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, 3424 struct sk_buff *skb) 3425 { 3426 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data; 3427 struct hci_chan *hchan; 3428 3429 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name, 3430 le16_to_cpu(ev->handle), ev->status); 3431 3432 if (ev->status) 3433 return; 3434 3435 hci_dev_lock(hdev); 3436 3437 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); 3438 if (!hchan) 3439 goto unlock; 3440 3441 amp_destroy_logical_link(hchan, ev->reason); 3442 3443 unlock: 3444 hci_dev_unlock(hdev); 3445 } 3446 3447 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, 3448 struct sk_buff *skb) 3449 { 3450 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data; 3451 struct hci_conn *hcon; 3452 3453 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3454 3455 if (ev->status) 3456 return; 3457 3458 hci_dev_lock(hdev); 3459 3460 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3461 if (hcon) { 3462 hcon->state = BT_CLOSED; 3463 hci_conn_del(hcon); 3464 } 3465 3466 hci_dev_unlock(hdev); 3467 } 3468 3469 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 3470 { 3471 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 3472 struct hci_conn *conn; 3473 3474 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); 3475 3476 hci_dev_lock(hdev); 3477 3478 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); 3479 if (!conn) { 3480 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 3481 if (!conn) { 3482 BT_ERR("No memory for new connection"); 3483 goto unlock; 3484 } 3485 3486 conn->dst_type = ev->bdaddr_type; 3487 3488 if (ev->role == LE_CONN_ROLE_MASTER) { 3489 conn->out = true; 3490 conn->link_mode |= HCI_LM_MASTER; 3491 } 3492 } 3493 3494 if (ev->status) { 3495 mgmt_connect_failed(hdev, &conn->dst, conn->type, 3496 conn->dst_type, ev->status); 3497 hci_proto_connect_cfm(conn, ev->status); 3498 conn->state = BT_CLOSED; 3499 hci_conn_del(conn); 3500 goto unlock; 3501 } 3502 3503 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) 3504 mgmt_device_connected(hdev, &ev->bdaddr, conn->type, 3505 conn->dst_type, 0, NULL, 0, NULL); 3506 3507 conn->sec_level = BT_SECURITY_LOW; 3508 conn->handle = __le16_to_cpu(ev->handle); 3509 conn->state = BT_CONNECTED; 3510 3511 hci_conn_add_sysfs(conn); 3512 3513 hci_proto_connect_cfm(conn, ev->status); 3514 3515 unlock: 3516 hci_dev_unlock(hdev); 3517 } 3518 3519 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) 3520 { 3521 u8 num_reports = skb->data[0]; 3522 void *ptr = &skb->data[1]; 3523 s8 rssi; 3524 3525 while (num_reports--) { 3526 struct hci_ev_le_advertising_info *ev = ptr; 3527 3528 rssi = ev->data[ev->length]; 3529 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, 3530 NULL, rssi, 0, 1, ev->data, ev->length); 3531 3532 ptr += sizeof(*ev) + ev->length + 1; 3533 } 3534 } 3535 3536 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 3537 { 3538 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 3539 struct hci_cp_le_ltk_reply cp; 3540 struct hci_cp_le_ltk_neg_reply neg; 3541 struct hci_conn *conn; 3542 struct smp_ltk *ltk; 3543 3544 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle)); 3545 3546 hci_dev_lock(hdev); 3547 3548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 3549 if (conn == NULL) 3550 goto not_found; 3551 3552 ltk = hci_find_ltk(hdev, ev->ediv, ev->random); 3553 if (ltk == NULL) 3554 goto not_found; 3555 3556 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 3557 cp.handle = cpu_to_le16(conn->handle); 3558 3559 if (ltk->authenticated) 3560 conn->pending_sec_level = BT_SECURITY_HIGH; 3561 else 3562 conn->pending_sec_level = BT_SECURITY_MEDIUM; 3563 3564 conn->enc_key_size = ltk->enc_size; 3565 3566 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 3567 3568 if (ltk->type & HCI_SMP_STK) { 3569 list_del(<k->list); 3570 kfree(ltk); 3571 } 3572 3573 hci_dev_unlock(hdev); 3574 3575 return; 3576 3577 not_found: 3578 neg.handle = ev->handle; 3579 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 3580 hci_dev_unlock(hdev); 3581 } 3582 3583 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 3584 { 3585 struct hci_ev_le_meta *le_ev = (void *) skb->data; 3586 3587 skb_pull(skb, sizeof(*le_ev)); 3588 3589 switch (le_ev->subevent) { 3590 case HCI_EV_LE_CONN_COMPLETE: 3591 hci_le_conn_complete_evt(hdev, skb); 3592 break; 3593 3594 case HCI_EV_LE_ADVERTISING_REPORT: 3595 hci_le_adv_report_evt(hdev, skb); 3596 break; 3597 3598 case HCI_EV_LE_LTK_REQ: 3599 hci_le_ltk_request_evt(hdev, skb); 3600 break; 3601 3602 default: 3603 break; 3604 } 3605 } 3606 3607 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb) 3608 { 3609 struct hci_ev_channel_selected *ev = (void *) skb->data; 3610 struct hci_conn *hcon; 3611 3612 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle); 3613 3614 skb_pull(skb, sizeof(*ev)); 3615 3616 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); 3617 if (!hcon) 3618 return; 3619 3620 amp_read_loc_assoc_final_data(hdev, hcon); 3621 } 3622 3623 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 3624 { 3625 struct hci_event_hdr *hdr = (void *) skb->data; 3626 __u8 event = hdr->evt; 3627 3628 hci_dev_lock(hdev); 3629 3630 /* Received events are (currently) only needed when a request is 3631 * ongoing so avoid unnecessary memory allocation. 3632 */ 3633 if (hdev->req_status == HCI_REQ_PEND) { 3634 kfree_skb(hdev->recv_evt); 3635 hdev->recv_evt = skb_clone(skb, GFP_KERNEL); 3636 } 3637 3638 hci_dev_unlock(hdev); 3639 3640 skb_pull(skb, HCI_EVENT_HDR_SIZE); 3641 3642 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) { 3643 struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data; 3644 u16 opcode = __le16_to_cpu(hdr->opcode); 3645 3646 hci_req_cmd_complete(hdev, opcode, 0); 3647 } 3648 3649 switch (event) { 3650 case HCI_EV_INQUIRY_COMPLETE: 3651 hci_inquiry_complete_evt(hdev, skb); 3652 break; 3653 3654 case HCI_EV_INQUIRY_RESULT: 3655 hci_inquiry_result_evt(hdev, skb); 3656 break; 3657 3658 case HCI_EV_CONN_COMPLETE: 3659 hci_conn_complete_evt(hdev, skb); 3660 break; 3661 3662 case HCI_EV_CONN_REQUEST: 3663 hci_conn_request_evt(hdev, skb); 3664 break; 3665 3666 case HCI_EV_DISCONN_COMPLETE: 3667 hci_disconn_complete_evt(hdev, skb); 3668 break; 3669 3670 case HCI_EV_AUTH_COMPLETE: 3671 hci_auth_complete_evt(hdev, skb); 3672 break; 3673 3674 case HCI_EV_REMOTE_NAME: 3675 hci_remote_name_evt(hdev, skb); 3676 break; 3677 3678 case HCI_EV_ENCRYPT_CHANGE: 3679 hci_encrypt_change_evt(hdev, skb); 3680 break; 3681 3682 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 3683 hci_change_link_key_complete_evt(hdev, skb); 3684 break; 3685 3686 case HCI_EV_REMOTE_FEATURES: 3687 hci_remote_features_evt(hdev, skb); 3688 break; 3689 3690 case HCI_EV_CMD_COMPLETE: 3691 hci_cmd_complete_evt(hdev, skb); 3692 break; 3693 3694 case HCI_EV_CMD_STATUS: 3695 hci_cmd_status_evt(hdev, skb); 3696 break; 3697 3698 case HCI_EV_ROLE_CHANGE: 3699 hci_role_change_evt(hdev, skb); 3700 break; 3701 3702 case HCI_EV_NUM_COMP_PKTS: 3703 hci_num_comp_pkts_evt(hdev, skb); 3704 break; 3705 3706 case HCI_EV_MODE_CHANGE: 3707 hci_mode_change_evt(hdev, skb); 3708 break; 3709 3710 case HCI_EV_PIN_CODE_REQ: 3711 hci_pin_code_request_evt(hdev, skb); 3712 break; 3713 3714 case HCI_EV_LINK_KEY_REQ: 3715 hci_link_key_request_evt(hdev, skb); 3716 break; 3717 3718 case HCI_EV_LINK_KEY_NOTIFY: 3719 hci_link_key_notify_evt(hdev, skb); 3720 break; 3721 3722 case HCI_EV_CLOCK_OFFSET: 3723 hci_clock_offset_evt(hdev, skb); 3724 break; 3725 3726 case HCI_EV_PKT_TYPE_CHANGE: 3727 hci_pkt_type_change_evt(hdev, skb); 3728 break; 3729 3730 case HCI_EV_PSCAN_REP_MODE: 3731 hci_pscan_rep_mode_evt(hdev, skb); 3732 break; 3733 3734 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 3735 hci_inquiry_result_with_rssi_evt(hdev, skb); 3736 break; 3737 3738 case HCI_EV_REMOTE_EXT_FEATURES: 3739 hci_remote_ext_features_evt(hdev, skb); 3740 break; 3741 3742 case HCI_EV_SYNC_CONN_COMPLETE: 3743 hci_sync_conn_complete_evt(hdev, skb); 3744 break; 3745 3746 case HCI_EV_EXTENDED_INQUIRY_RESULT: 3747 hci_extended_inquiry_result_evt(hdev, skb); 3748 break; 3749 3750 case HCI_EV_KEY_REFRESH_COMPLETE: 3751 hci_key_refresh_complete_evt(hdev, skb); 3752 break; 3753 3754 case HCI_EV_IO_CAPA_REQUEST: 3755 hci_io_capa_request_evt(hdev, skb); 3756 break; 3757 3758 case HCI_EV_IO_CAPA_REPLY: 3759 hci_io_capa_reply_evt(hdev, skb); 3760 break; 3761 3762 case HCI_EV_USER_CONFIRM_REQUEST: 3763 hci_user_confirm_request_evt(hdev, skb); 3764 break; 3765 3766 case HCI_EV_USER_PASSKEY_REQUEST: 3767 hci_user_passkey_request_evt(hdev, skb); 3768 break; 3769 3770 case HCI_EV_USER_PASSKEY_NOTIFY: 3771 hci_user_passkey_notify_evt(hdev, skb); 3772 break; 3773 3774 case HCI_EV_KEYPRESS_NOTIFY: 3775 hci_keypress_notify_evt(hdev, skb); 3776 break; 3777 3778 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3779 hci_simple_pair_complete_evt(hdev, skb); 3780 break; 3781 3782 case HCI_EV_REMOTE_HOST_FEATURES: 3783 hci_remote_host_features_evt(hdev, skb); 3784 break; 3785 3786 case HCI_EV_LE_META: 3787 hci_le_meta_evt(hdev, skb); 3788 break; 3789 3790 case HCI_EV_CHANNEL_SELECTED: 3791 hci_chan_selected_evt(hdev, skb); 3792 break; 3793 3794 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 3795 hci_remote_oob_data_request_evt(hdev, skb); 3796 break; 3797 3798 case HCI_EV_PHY_LINK_COMPLETE: 3799 hci_phy_link_complete_evt(hdev, skb); 3800 break; 3801 3802 case HCI_EV_LOGICAL_LINK_COMPLETE: 3803 hci_loglink_complete_evt(hdev, skb); 3804 break; 3805 3806 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE: 3807 hci_disconn_loglink_complete_evt(hdev, skb); 3808 break; 3809 3810 case HCI_EV_DISCONN_PHY_LINK_COMPLETE: 3811 hci_disconn_phylink_complete_evt(hdev, skb); 3812 break; 3813 3814 case HCI_EV_NUM_COMP_BLOCKS: 3815 hci_num_comp_blocks_evt(hdev, skb); 3816 break; 3817 3818 default: 3819 BT_DBG("%s event 0x%2.2x", hdev->name, event); 3820 break; 3821 } 3822 3823 kfree_skb(skb); 3824 hdev->stat.evt_rx++; 3825 } 3826