1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <linux/module.h> 28 29 #include <linux/types.h> 30 #include <linux/errno.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/poll.h> 34 #include <linux/fcntl.h> 35 #include <linux/init.h> 36 #include <linux/skbuff.h> 37 #include <linux/interrupt.h> 38 #include <linux/notifier.h> 39 #include <net/sock.h> 40 41 #include <asm/system.h> 42 #include <linux/uaccess.h> 43 #include <asm/unaligned.h> 44 45 #include <net/bluetooth/bluetooth.h> 46 #include <net/bluetooth/hci_core.h> 47 48 /* Handle HCI Event packets */ 49 50 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 51 { 52 __u8 status = *((__u8 *) skb->data); 53 54 BT_DBG("%s status 0x%x", hdev->name, status); 55 56 if (status) 57 return; 58 59 if (test_bit(HCI_MGMT, &hdev->flags) && 60 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 61 mgmt_discovering(hdev->id, 0); 62 63 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 64 65 hci_conn_check_pending(hdev); 66 } 67 68 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 69 { 70 __u8 status = *((__u8 *) skb->data); 71 72 BT_DBG("%s status 0x%x", hdev->name, status); 73 74 if (status) 75 return; 76 77 if (test_bit(HCI_MGMT, &hdev->flags) && 78 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 79 mgmt_discovering(hdev->id, 0); 80 81 hci_conn_check_pending(hdev); 82 } 83 84 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 85 { 86 BT_DBG("%s", hdev->name); 87 } 88 89 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 90 { 91 struct hci_rp_role_discovery *rp = (void *) skb->data; 92 struct hci_conn *conn; 93 94 BT_DBG("%s status 0x%x", hdev->name, rp->status); 95 96 if (rp->status) 97 return; 98 99 hci_dev_lock(hdev); 100 101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 102 if (conn) { 103 if (rp->role) 104 conn->link_mode &= ~HCI_LM_MASTER; 105 else 106 conn->link_mode |= HCI_LM_MASTER; 107 } 108 109 hci_dev_unlock(hdev); 110 } 111 112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 113 { 114 struct hci_rp_read_link_policy *rp = (void *) skb->data; 115 struct hci_conn *conn; 116 117 BT_DBG("%s status 0x%x", hdev->name, rp->status); 118 119 if (rp->status) 120 return; 121 122 hci_dev_lock(hdev); 123 124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 125 if (conn) 126 conn->link_policy = __le16_to_cpu(rp->policy); 127 128 hci_dev_unlock(hdev); 129 } 130 131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 132 { 133 struct hci_rp_write_link_policy *rp = (void *) skb->data; 134 struct hci_conn *conn; 135 void *sent; 136 137 BT_DBG("%s status 0x%x", hdev->name, rp->status); 138 139 if (rp->status) 140 return; 141 142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 143 if (!sent) 144 return; 145 146 hci_dev_lock(hdev); 147 148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 149 if (conn) 150 conn->link_policy = get_unaligned_le16(sent + 2); 151 152 hci_dev_unlock(hdev); 153 } 154 155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 156 { 157 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 158 159 BT_DBG("%s status 0x%x", hdev->name, rp->status); 160 161 if (rp->status) 162 return; 163 164 hdev->link_policy = __le16_to_cpu(rp->policy); 165 } 166 167 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 168 { 169 __u8 status = *((__u8 *) skb->data); 170 void *sent; 171 172 BT_DBG("%s status 0x%x", hdev->name, status); 173 174 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 175 if (!sent) 176 return; 177 178 if (!status) 179 hdev->link_policy = get_unaligned_le16(sent); 180 181 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status); 182 } 183 184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 185 { 186 __u8 status = *((__u8 *) skb->data); 187 188 BT_DBG("%s status 0x%x", hdev->name, status); 189 190 clear_bit(HCI_RESET, &hdev->flags); 191 192 hci_req_complete(hdev, HCI_OP_RESET, status); 193 } 194 195 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 196 { 197 __u8 status = *((__u8 *) skb->data); 198 void *sent; 199 200 BT_DBG("%s status 0x%x", hdev->name, status); 201 202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 203 if (!sent) 204 return; 205 206 if (test_bit(HCI_MGMT, &hdev->flags)) 207 mgmt_set_local_name_complete(hdev->id, sent, status); 208 209 if (status) 210 return; 211 212 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 213 } 214 215 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 216 { 217 struct hci_rp_read_local_name *rp = (void *) skb->data; 218 219 BT_DBG("%s status 0x%x", hdev->name, rp->status); 220 221 if (rp->status) 222 return; 223 224 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 225 } 226 227 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 228 { 229 __u8 status = *((__u8 *) skb->data); 230 void *sent; 231 232 BT_DBG("%s status 0x%x", hdev->name, status); 233 234 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 235 if (!sent) 236 return; 237 238 if (!status) { 239 __u8 param = *((__u8 *) sent); 240 241 if (param == AUTH_ENABLED) 242 set_bit(HCI_AUTH, &hdev->flags); 243 else 244 clear_bit(HCI_AUTH, &hdev->flags); 245 } 246 247 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); 248 } 249 250 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 251 { 252 __u8 status = *((__u8 *) skb->data); 253 void *sent; 254 255 BT_DBG("%s status 0x%x", hdev->name, status); 256 257 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 258 if (!sent) 259 return; 260 261 if (!status) { 262 __u8 param = *((__u8 *) sent); 263 264 if (param) 265 set_bit(HCI_ENCRYPT, &hdev->flags); 266 else 267 clear_bit(HCI_ENCRYPT, &hdev->flags); 268 } 269 270 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status); 271 } 272 273 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 274 { 275 __u8 status = *((__u8 *) skb->data); 276 void *sent; 277 278 BT_DBG("%s status 0x%x", hdev->name, status); 279 280 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 281 if (!sent) 282 return; 283 284 if (!status) { 285 __u8 param = *((__u8 *) sent); 286 int old_pscan, old_iscan; 287 288 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); 289 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); 290 291 if (param & SCAN_INQUIRY) { 292 set_bit(HCI_ISCAN, &hdev->flags); 293 if (!old_iscan) 294 mgmt_discoverable(hdev->id, 1); 295 } else if (old_iscan) 296 mgmt_discoverable(hdev->id, 0); 297 298 if (param & SCAN_PAGE) { 299 set_bit(HCI_PSCAN, &hdev->flags); 300 if (!old_pscan) 301 mgmt_connectable(hdev->id, 1); 302 } else if (old_pscan) 303 mgmt_connectable(hdev->id, 0); 304 } 305 306 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 307 } 308 309 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 310 { 311 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 312 313 BT_DBG("%s status 0x%x", hdev->name, rp->status); 314 315 if (rp->status) 316 return; 317 318 memcpy(hdev->dev_class, rp->dev_class, 3); 319 320 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 321 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 322 } 323 324 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 325 { 326 __u8 status = *((__u8 *) skb->data); 327 void *sent; 328 329 BT_DBG("%s status 0x%x", hdev->name, status); 330 331 if (status) 332 return; 333 334 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 335 if (!sent) 336 return; 337 338 memcpy(hdev->dev_class, sent, 3); 339 } 340 341 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 342 { 343 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 344 __u16 setting; 345 346 BT_DBG("%s status 0x%x", hdev->name, rp->status); 347 348 if (rp->status) 349 return; 350 351 setting = __le16_to_cpu(rp->voice_setting); 352 353 if (hdev->voice_setting == setting) 354 return; 355 356 hdev->voice_setting = setting; 357 358 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 359 360 if (hdev->notify) { 361 tasklet_disable(&hdev->tx_task); 362 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 363 tasklet_enable(&hdev->tx_task); 364 } 365 } 366 367 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 368 { 369 __u8 status = *((__u8 *) skb->data); 370 __u16 setting; 371 void *sent; 372 373 BT_DBG("%s status 0x%x", hdev->name, status); 374 375 if (status) 376 return; 377 378 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 379 if (!sent) 380 return; 381 382 setting = get_unaligned_le16(sent); 383 384 if (hdev->voice_setting == setting) 385 return; 386 387 hdev->voice_setting = setting; 388 389 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 390 391 if (hdev->notify) { 392 tasklet_disable(&hdev->tx_task); 393 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 394 tasklet_enable(&hdev->tx_task); 395 } 396 } 397 398 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 399 { 400 __u8 status = *((__u8 *) skb->data); 401 402 BT_DBG("%s status 0x%x", hdev->name, status); 403 404 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 405 } 406 407 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 408 { 409 struct hci_rp_read_ssp_mode *rp = (void *) skb->data; 410 411 BT_DBG("%s status 0x%x", hdev->name, rp->status); 412 413 if (rp->status) 414 return; 415 416 hdev->ssp_mode = rp->mode; 417 } 418 419 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 420 { 421 __u8 status = *((__u8 *) skb->data); 422 void *sent; 423 424 BT_DBG("%s status 0x%x", hdev->name, status); 425 426 if (status) 427 return; 428 429 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 430 if (!sent) 431 return; 432 433 hdev->ssp_mode = *((__u8 *) sent); 434 } 435 436 static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 437 { 438 if (hdev->features[6] & LMP_EXT_INQ) 439 return 2; 440 441 if (hdev->features[3] & LMP_RSSI_INQ) 442 return 1; 443 444 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 445 hdev->lmp_subver == 0x0757) 446 return 1; 447 448 if (hdev->manufacturer == 15) { 449 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) 450 return 1; 451 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) 452 return 1; 453 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) 454 return 1; 455 } 456 457 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 458 hdev->lmp_subver == 0x1805) 459 return 1; 460 461 return 0; 462 } 463 464 static void hci_setup_inquiry_mode(struct hci_dev *hdev) 465 { 466 u8 mode; 467 468 mode = hci_get_inquiry_mode(hdev); 469 470 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 471 } 472 473 static void hci_setup_event_mask(struct hci_dev *hdev) 474 { 475 /* The second byte is 0xff instead of 0x9f (two reserved bits 476 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 477 * command otherwise */ 478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 479 480 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 481 * any event mask for pre 1.2 devices */ 482 if (hdev->lmp_ver <= 1) 483 return; 484 485 events[4] |= 0x01; /* Flow Specification Complete */ 486 events[4] |= 0x02; /* Inquiry Result with RSSI */ 487 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 488 events[5] |= 0x08; /* Synchronous Connection Complete */ 489 events[5] |= 0x10; /* Synchronous Connection Changed */ 490 491 if (hdev->features[3] & LMP_RSSI_INQ) 492 events[4] |= 0x04; /* Inquiry Result with RSSI */ 493 494 if (hdev->features[5] & LMP_SNIFF_SUBR) 495 events[5] |= 0x20; /* Sniff Subrating */ 496 497 if (hdev->features[5] & LMP_PAUSE_ENC) 498 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 499 500 if (hdev->features[6] & LMP_EXT_INQ) 501 events[5] |= 0x40; /* Extended Inquiry Result */ 502 503 if (hdev->features[6] & LMP_NO_FLUSH) 504 events[7] |= 0x01; /* Enhanced Flush Complete */ 505 506 if (hdev->features[7] & LMP_LSTO) 507 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 508 509 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 510 events[6] |= 0x01; /* IO Capability Request */ 511 events[6] |= 0x02; /* IO Capability Response */ 512 events[6] |= 0x04; /* User Confirmation Request */ 513 events[6] |= 0x08; /* User Passkey Request */ 514 events[6] |= 0x10; /* Remote OOB Data Request */ 515 events[6] |= 0x20; /* Simple Pairing Complete */ 516 events[7] |= 0x04; /* User Passkey Notification */ 517 events[7] |= 0x08; /* Keypress Notification */ 518 events[7] |= 0x10; /* Remote Host Supported 519 * Features Notification */ 520 } 521 522 if (hdev->features[4] & LMP_LE) 523 events[7] |= 0x20; /* LE Meta-Event */ 524 525 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 526 } 527 528 static void hci_setup(struct hci_dev *hdev) 529 { 530 hci_setup_event_mask(hdev); 531 532 if (hdev->lmp_ver > 1) 533 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 534 535 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 536 u8 mode = 0x01; 537 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); 538 } 539 540 if (hdev->features[3] & LMP_RSSI_INQ) 541 hci_setup_inquiry_mode(hdev); 542 543 if (hdev->features[7] & LMP_INQ_TX_PWR) 544 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 545 546 if (hdev->features[7] & LMP_EXTFEATURES) { 547 struct hci_cp_read_local_ext_features cp; 548 549 cp.page = 0x01; 550 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 551 sizeof(cp), &cp); 552 } 553 } 554 555 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 556 { 557 struct hci_rp_read_local_version *rp = (void *) skb->data; 558 559 BT_DBG("%s status 0x%x", hdev->name, rp->status); 560 561 if (rp->status) 562 return; 563 564 hdev->hci_ver = rp->hci_ver; 565 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 566 hdev->lmp_ver = rp->lmp_ver; 567 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 568 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 569 570 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 571 hdev->manufacturer, 572 hdev->hci_ver, hdev->hci_rev); 573 574 if (test_bit(HCI_INIT, &hdev->flags)) 575 hci_setup(hdev); 576 } 577 578 static void hci_setup_link_policy(struct hci_dev *hdev) 579 { 580 u16 link_policy = 0; 581 582 if (hdev->features[0] & LMP_RSWITCH) 583 link_policy |= HCI_LP_RSWITCH; 584 if (hdev->features[0] & LMP_HOLD) 585 link_policy |= HCI_LP_HOLD; 586 if (hdev->features[0] & LMP_SNIFF) 587 link_policy |= HCI_LP_SNIFF; 588 if (hdev->features[1] & LMP_PARK) 589 link_policy |= HCI_LP_PARK; 590 591 link_policy = cpu_to_le16(link_policy); 592 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 593 sizeof(link_policy), &link_policy); 594 } 595 596 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 597 { 598 struct hci_rp_read_local_commands *rp = (void *) skb->data; 599 600 BT_DBG("%s status 0x%x", hdev->name, rp->status); 601 602 if (rp->status) 603 goto done; 604 605 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 606 607 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10)) 608 hci_setup_link_policy(hdev); 609 610 done: 611 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 612 } 613 614 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 615 { 616 struct hci_rp_read_local_features *rp = (void *) skb->data; 617 618 BT_DBG("%s status 0x%x", hdev->name, rp->status); 619 620 if (rp->status) 621 return; 622 623 memcpy(hdev->features, rp->features, 8); 624 625 /* Adjust default settings according to features 626 * supported by device. */ 627 628 if (hdev->features[0] & LMP_3SLOT) 629 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 630 631 if (hdev->features[0] & LMP_5SLOT) 632 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 633 634 if (hdev->features[1] & LMP_HV2) { 635 hdev->pkt_type |= (HCI_HV2); 636 hdev->esco_type |= (ESCO_HV2); 637 } 638 639 if (hdev->features[1] & LMP_HV3) { 640 hdev->pkt_type |= (HCI_HV3); 641 hdev->esco_type |= (ESCO_HV3); 642 } 643 644 if (hdev->features[3] & LMP_ESCO) 645 hdev->esco_type |= (ESCO_EV3); 646 647 if (hdev->features[4] & LMP_EV4) 648 hdev->esco_type |= (ESCO_EV4); 649 650 if (hdev->features[4] & LMP_EV5) 651 hdev->esco_type |= (ESCO_EV5); 652 653 if (hdev->features[5] & LMP_EDR_ESCO_2M) 654 hdev->esco_type |= (ESCO_2EV3); 655 656 if (hdev->features[5] & LMP_EDR_ESCO_3M) 657 hdev->esco_type |= (ESCO_3EV3); 658 659 if (hdev->features[5] & LMP_EDR_3S_ESCO) 660 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 661 662 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 663 hdev->features[0], hdev->features[1], 664 hdev->features[2], hdev->features[3], 665 hdev->features[4], hdev->features[5], 666 hdev->features[6], hdev->features[7]); 667 } 668 669 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 670 struct sk_buff *skb) 671 { 672 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 673 674 BT_DBG("%s status 0x%x", hdev->name, rp->status); 675 676 if (rp->status) 677 return; 678 679 memcpy(hdev->extfeatures, rp->features, 8); 680 681 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); 682 } 683 684 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 685 { 686 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 687 688 BT_DBG("%s status 0x%x", hdev->name, rp->status); 689 690 if (rp->status) 691 return; 692 693 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 694 hdev->sco_mtu = rp->sco_mtu; 695 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 696 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 697 698 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 699 hdev->sco_mtu = 64; 700 hdev->sco_pkts = 8; 701 } 702 703 hdev->acl_cnt = hdev->acl_pkts; 704 hdev->sco_cnt = hdev->sco_pkts; 705 706 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 707 hdev->acl_mtu, hdev->acl_pkts, 708 hdev->sco_mtu, hdev->sco_pkts); 709 } 710 711 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 712 { 713 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 714 715 BT_DBG("%s status 0x%x", hdev->name, rp->status); 716 717 if (!rp->status) 718 bacpy(&hdev->bdaddr, &rp->bdaddr); 719 720 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); 721 } 722 723 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) 724 { 725 __u8 status = *((__u8 *) skb->data); 726 727 BT_DBG("%s status 0x%x", hdev->name, status); 728 729 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 730 } 731 732 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 733 struct sk_buff *skb) 734 { 735 __u8 status = *((__u8 *) skb->data); 736 737 BT_DBG("%s status 0x%x", hdev->name, status); 738 739 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); 740 } 741 742 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 743 { 744 __u8 status = *((__u8 *) skb->data); 745 746 BT_DBG("%s status 0x%x", hdev->name, status); 747 748 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); 749 } 750 751 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 752 struct sk_buff *skb) 753 { 754 __u8 status = *((__u8 *) skb->data); 755 756 BT_DBG("%s status 0x%x", hdev->name, status); 757 758 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); 759 } 760 761 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 762 struct sk_buff *skb) 763 { 764 __u8 status = *((__u8 *) skb->data); 765 766 BT_DBG("%s status 0x%x", hdev->name, status); 767 768 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status); 769 } 770 771 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 772 { 773 __u8 status = *((__u8 *) skb->data); 774 775 BT_DBG("%s status 0x%x", hdev->name, status); 776 777 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); 778 } 779 780 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 781 { 782 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 783 struct hci_cp_pin_code_reply *cp; 784 struct hci_conn *conn; 785 786 BT_DBG("%s status 0x%x", hdev->name, rp->status); 787 788 if (test_bit(HCI_MGMT, &hdev->flags)) 789 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status); 790 791 if (rp->status != 0) 792 return; 793 794 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 795 if (!cp) 796 return; 797 798 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 799 if (conn) 800 conn->pin_length = cp->pin_len; 801 } 802 803 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 804 { 805 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 806 807 BT_DBG("%s status 0x%x", hdev->name, rp->status); 808 809 if (test_bit(HCI_MGMT, &hdev->flags)) 810 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr, 811 rp->status); 812 } 813 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 814 struct sk_buff *skb) 815 { 816 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 817 818 BT_DBG("%s status 0x%x", hdev->name, rp->status); 819 820 if (rp->status) 821 return; 822 823 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 824 hdev->le_pkts = rp->le_max_pkt; 825 826 hdev->le_cnt = hdev->le_pkts; 827 828 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 829 830 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 831 } 832 833 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 834 { 835 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 836 837 BT_DBG("%s status 0x%x", hdev->name, rp->status); 838 839 if (test_bit(HCI_MGMT, &hdev->flags)) 840 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, 841 rp->status); 842 } 843 844 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 845 struct sk_buff *skb) 846 { 847 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 848 849 BT_DBG("%s status 0x%x", hdev->name, rp->status); 850 851 if (test_bit(HCI_MGMT, &hdev->flags)) 852 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, 853 rp->status); 854 } 855 856 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 857 struct sk_buff *skb) 858 { 859 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 860 861 BT_DBG("%s status 0x%x", hdev->name, rp->status); 862 863 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash, 864 rp->randomizer, rp->status); 865 } 866 867 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 868 struct sk_buff *skb) 869 { 870 struct hci_cp_le_set_scan_enable *cp; 871 __u8 status = *((__u8 *) skb->data); 872 873 BT_DBG("%s status 0x%x", hdev->name, status); 874 875 if (status) 876 return; 877 878 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 879 if (!cp) 880 return; 881 882 hci_dev_lock(hdev); 883 884 if (cp->enable == 0x01) { 885 del_timer(&hdev->adv_timer); 886 hci_adv_entries_clear(hdev); 887 } else if (cp->enable == 0x00) { 888 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); 889 } 890 891 hci_dev_unlock(hdev); 892 } 893 894 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 895 { 896 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 897 898 BT_DBG("%s status 0x%x", hdev->name, rp->status); 899 900 if (rp->status) 901 return; 902 903 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); 904 } 905 906 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 907 { 908 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; 909 910 BT_DBG("%s status 0x%x", hdev->name, rp->status); 911 912 if (rp->status) 913 return; 914 915 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 916 } 917 918 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 919 { 920 BT_DBG("%s status 0x%x", hdev->name, status); 921 922 if (status) { 923 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 924 hci_conn_check_pending(hdev); 925 return; 926 } 927 928 if (test_bit(HCI_MGMT, &hdev->flags) && 929 !test_and_set_bit(HCI_INQUIRY, 930 &hdev->flags)) 931 mgmt_discovering(hdev->id, 1); 932 } 933 934 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 935 { 936 struct hci_cp_create_conn *cp; 937 struct hci_conn *conn; 938 939 BT_DBG("%s status 0x%x", hdev->name, status); 940 941 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 942 if (!cp) 943 return; 944 945 hci_dev_lock(hdev); 946 947 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 948 949 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); 950 951 if (status) { 952 if (conn && conn->state == BT_CONNECT) { 953 if (status != 0x0c || conn->attempt > 2) { 954 conn->state = BT_CLOSED; 955 hci_proto_connect_cfm(conn, status); 956 hci_conn_del(conn); 957 } else 958 conn->state = BT_CONNECT2; 959 } 960 } else { 961 if (!conn) { 962 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 963 if (conn) { 964 conn->out = 1; 965 conn->link_mode |= HCI_LM_MASTER; 966 } else 967 BT_ERR("No memory for new connection"); 968 } 969 } 970 971 hci_dev_unlock(hdev); 972 } 973 974 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 975 { 976 struct hci_cp_add_sco *cp; 977 struct hci_conn *acl, *sco; 978 __u16 handle; 979 980 BT_DBG("%s status 0x%x", hdev->name, status); 981 982 if (!status) 983 return; 984 985 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 986 if (!cp) 987 return; 988 989 handle = __le16_to_cpu(cp->handle); 990 991 BT_DBG("%s handle %d", hdev->name, handle); 992 993 hci_dev_lock(hdev); 994 995 acl = hci_conn_hash_lookup_handle(hdev, handle); 996 if (acl) { 997 sco = acl->link; 998 if (sco) { 999 sco->state = BT_CLOSED; 1000 1001 hci_proto_connect_cfm(sco, status); 1002 hci_conn_del(sco); 1003 } 1004 } 1005 1006 hci_dev_unlock(hdev); 1007 } 1008 1009 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1010 { 1011 struct hci_cp_auth_requested *cp; 1012 struct hci_conn *conn; 1013 1014 BT_DBG("%s status 0x%x", hdev->name, status); 1015 1016 if (!status) 1017 return; 1018 1019 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1020 if (!cp) 1021 return; 1022 1023 hci_dev_lock(hdev); 1024 1025 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1026 if (conn) { 1027 if (conn->state == BT_CONFIG) { 1028 hci_proto_connect_cfm(conn, status); 1029 hci_conn_put(conn); 1030 } 1031 } 1032 1033 hci_dev_unlock(hdev); 1034 } 1035 1036 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1037 { 1038 struct hci_cp_set_conn_encrypt *cp; 1039 struct hci_conn *conn; 1040 1041 BT_DBG("%s status 0x%x", hdev->name, status); 1042 1043 if (!status) 1044 return; 1045 1046 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1047 if (!cp) 1048 return; 1049 1050 hci_dev_lock(hdev); 1051 1052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1053 if (conn) { 1054 if (conn->state == BT_CONFIG) { 1055 hci_proto_connect_cfm(conn, status); 1056 hci_conn_put(conn); 1057 } 1058 } 1059 1060 hci_dev_unlock(hdev); 1061 } 1062 1063 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1064 struct hci_conn *conn) 1065 { 1066 if (conn->state != BT_CONFIG || !conn->out) 1067 return 0; 1068 1069 if (conn->pending_sec_level == BT_SECURITY_SDP) 1070 return 0; 1071 1072 /* Only request authentication for SSP connections or non-SSP 1073 * devices with sec_level HIGH */ 1074 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 1075 conn->pending_sec_level != BT_SECURITY_HIGH) 1076 return 0; 1077 1078 return 1; 1079 } 1080 1081 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1082 { 1083 struct hci_cp_remote_name_req *cp; 1084 struct hci_conn *conn; 1085 1086 BT_DBG("%s status 0x%x", hdev->name, status); 1087 1088 /* If successful wait for the name req complete event before 1089 * checking for the need to do authentication */ 1090 if (!status) 1091 return; 1092 1093 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1094 if (!cp) 1095 return; 1096 1097 hci_dev_lock(hdev); 1098 1099 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1100 if (!conn) 1101 goto unlock; 1102 1103 if (!hci_outgoing_auth_needed(hdev, conn)) 1104 goto unlock; 1105 1106 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1107 struct hci_cp_auth_requested cp; 1108 cp.handle = __cpu_to_le16(conn->handle); 1109 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1110 } 1111 1112 unlock: 1113 hci_dev_unlock(hdev); 1114 } 1115 1116 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1117 { 1118 struct hci_cp_read_remote_features *cp; 1119 struct hci_conn *conn; 1120 1121 BT_DBG("%s status 0x%x", hdev->name, status); 1122 1123 if (!status) 1124 return; 1125 1126 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1127 if (!cp) 1128 return; 1129 1130 hci_dev_lock(hdev); 1131 1132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1133 if (conn) { 1134 if (conn->state == BT_CONFIG) { 1135 hci_proto_connect_cfm(conn, status); 1136 hci_conn_put(conn); 1137 } 1138 } 1139 1140 hci_dev_unlock(hdev); 1141 } 1142 1143 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1144 { 1145 struct hci_cp_read_remote_ext_features *cp; 1146 struct hci_conn *conn; 1147 1148 BT_DBG("%s status 0x%x", hdev->name, status); 1149 1150 if (!status) 1151 return; 1152 1153 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1154 if (!cp) 1155 return; 1156 1157 hci_dev_lock(hdev); 1158 1159 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1160 if (conn) { 1161 if (conn->state == BT_CONFIG) { 1162 hci_proto_connect_cfm(conn, status); 1163 hci_conn_put(conn); 1164 } 1165 } 1166 1167 hci_dev_unlock(hdev); 1168 } 1169 1170 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1171 { 1172 struct hci_cp_setup_sync_conn *cp; 1173 struct hci_conn *acl, *sco; 1174 __u16 handle; 1175 1176 BT_DBG("%s status 0x%x", hdev->name, status); 1177 1178 if (!status) 1179 return; 1180 1181 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1182 if (!cp) 1183 return; 1184 1185 handle = __le16_to_cpu(cp->handle); 1186 1187 BT_DBG("%s handle %d", hdev->name, handle); 1188 1189 hci_dev_lock(hdev); 1190 1191 acl = hci_conn_hash_lookup_handle(hdev, handle); 1192 if (acl) { 1193 sco = acl->link; 1194 if (sco) { 1195 sco->state = BT_CLOSED; 1196 1197 hci_proto_connect_cfm(sco, status); 1198 hci_conn_del(sco); 1199 } 1200 } 1201 1202 hci_dev_unlock(hdev); 1203 } 1204 1205 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1206 { 1207 struct hci_cp_sniff_mode *cp; 1208 struct hci_conn *conn; 1209 1210 BT_DBG("%s status 0x%x", hdev->name, status); 1211 1212 if (!status) 1213 return; 1214 1215 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1216 if (!cp) 1217 return; 1218 1219 hci_dev_lock(hdev); 1220 1221 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1222 if (conn) { 1223 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1224 1225 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1226 hci_sco_setup(conn, status); 1227 } 1228 1229 hci_dev_unlock(hdev); 1230 } 1231 1232 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1233 { 1234 struct hci_cp_exit_sniff_mode *cp; 1235 struct hci_conn *conn; 1236 1237 BT_DBG("%s status 0x%x", hdev->name, status); 1238 1239 if (!status) 1240 return; 1241 1242 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1243 if (!cp) 1244 return; 1245 1246 hci_dev_lock(hdev); 1247 1248 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1249 if (conn) { 1250 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1251 1252 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1253 hci_sco_setup(conn, status); 1254 } 1255 1256 hci_dev_unlock(hdev); 1257 } 1258 1259 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1260 { 1261 struct hci_cp_le_create_conn *cp; 1262 struct hci_conn *conn; 1263 1264 BT_DBG("%s status 0x%x", hdev->name, status); 1265 1266 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1267 if (!cp) 1268 return; 1269 1270 hci_dev_lock(hdev); 1271 1272 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1273 1274 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1275 conn); 1276 1277 if (status) { 1278 if (conn && conn->state == BT_CONNECT) { 1279 conn->state = BT_CLOSED; 1280 hci_proto_connect_cfm(conn, status); 1281 hci_conn_del(conn); 1282 } 1283 } else { 1284 if (!conn) { 1285 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1286 if (conn) { 1287 conn->dst_type = cp->peer_addr_type; 1288 conn->out = 1; 1289 } else { 1290 BT_ERR("No memory for new connection"); 1291 } 1292 } 1293 } 1294 1295 hci_dev_unlock(hdev); 1296 } 1297 1298 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1299 { 1300 BT_DBG("%s status 0x%x", hdev->name, status); 1301 } 1302 1303 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1304 { 1305 __u8 status = *((__u8 *) skb->data); 1306 1307 BT_DBG("%s status %d", hdev->name, status); 1308 1309 if (test_bit(HCI_MGMT, &hdev->flags) && 1310 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1311 mgmt_discovering(hdev->id, 0); 1312 1313 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1314 1315 hci_conn_check_pending(hdev); 1316 } 1317 1318 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1319 { 1320 struct inquiry_data data; 1321 struct inquiry_info *info = (void *) (skb->data + 1); 1322 int num_rsp = *((__u8 *) skb->data); 1323 1324 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1325 1326 if (!num_rsp) 1327 return; 1328 1329 hci_dev_lock(hdev); 1330 1331 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 1332 1333 if (test_bit(HCI_MGMT, &hdev->flags)) 1334 mgmt_discovering(hdev->id, 1); 1335 } 1336 1337 for (; num_rsp; num_rsp--, info++) { 1338 bacpy(&data.bdaddr, &info->bdaddr); 1339 data.pscan_rep_mode = info->pscan_rep_mode; 1340 data.pscan_period_mode = info->pscan_period_mode; 1341 data.pscan_mode = info->pscan_mode; 1342 memcpy(data.dev_class, info->dev_class, 3); 1343 data.clock_offset = info->clock_offset; 1344 data.rssi = 0x00; 1345 data.ssp_mode = 0x00; 1346 hci_inquiry_cache_update(hdev, &data); 1347 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0, 1348 NULL); 1349 } 1350 1351 hci_dev_unlock(hdev); 1352 } 1353 1354 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1355 { 1356 struct hci_ev_conn_complete *ev = (void *) skb->data; 1357 struct hci_conn *conn; 1358 1359 BT_DBG("%s", hdev->name); 1360 1361 hci_dev_lock(hdev); 1362 1363 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1364 if (!conn) { 1365 if (ev->link_type != SCO_LINK) 1366 goto unlock; 1367 1368 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 1369 if (!conn) 1370 goto unlock; 1371 1372 conn->type = SCO_LINK; 1373 } 1374 1375 if (!ev->status) { 1376 conn->handle = __le16_to_cpu(ev->handle); 1377 1378 if (conn->type == ACL_LINK) { 1379 conn->state = BT_CONFIG; 1380 hci_conn_hold(conn); 1381 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1382 mgmt_connected(hdev->id, &ev->bdaddr); 1383 } else 1384 conn->state = BT_CONNECTED; 1385 1386 hci_conn_hold_device(conn); 1387 hci_conn_add_sysfs(conn); 1388 1389 if (test_bit(HCI_AUTH, &hdev->flags)) 1390 conn->link_mode |= HCI_LM_AUTH; 1391 1392 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 1393 conn->link_mode |= HCI_LM_ENCRYPT; 1394 1395 /* Get remote features */ 1396 if (conn->type == ACL_LINK) { 1397 struct hci_cp_read_remote_features cp; 1398 cp.handle = ev->handle; 1399 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1400 sizeof(cp), &cp); 1401 } 1402 1403 /* Set packet type for incoming connection */ 1404 if (!conn->out && hdev->hci_ver < 3) { 1405 struct hci_cp_change_conn_ptype cp; 1406 cp.handle = ev->handle; 1407 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1408 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1409 sizeof(cp), &cp); 1410 } 1411 } else { 1412 conn->state = BT_CLOSED; 1413 if (conn->type == ACL_LINK) 1414 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 1415 } 1416 1417 if (conn->type == ACL_LINK) 1418 hci_sco_setup(conn, ev->status); 1419 1420 if (ev->status) { 1421 hci_proto_connect_cfm(conn, ev->status); 1422 hci_conn_del(conn); 1423 } else if (ev->link_type != ACL_LINK) 1424 hci_proto_connect_cfm(conn, ev->status); 1425 1426 unlock: 1427 hci_dev_unlock(hdev); 1428 1429 hci_conn_check_pending(hdev); 1430 } 1431 1432 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1433 { 1434 struct hci_ev_conn_request *ev = (void *) skb->data; 1435 int mask = hdev->link_mode; 1436 1437 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1438 batostr(&ev->bdaddr), ev->link_type); 1439 1440 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1441 1442 if ((mask & HCI_LM_ACCEPT) && 1443 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1444 /* Connection accepted */ 1445 struct inquiry_entry *ie; 1446 struct hci_conn *conn; 1447 1448 hci_dev_lock(hdev); 1449 1450 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 1451 if (ie) 1452 memcpy(ie->data.dev_class, ev->dev_class, 3); 1453 1454 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1455 if (!conn) { 1456 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1457 if (!conn) { 1458 BT_ERR("No memory for new connection"); 1459 hci_dev_unlock(hdev); 1460 return; 1461 } 1462 } 1463 1464 memcpy(conn->dev_class, ev->dev_class, 3); 1465 conn->state = BT_CONNECT; 1466 1467 hci_dev_unlock(hdev); 1468 1469 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { 1470 struct hci_cp_accept_conn_req cp; 1471 1472 bacpy(&cp.bdaddr, &ev->bdaddr); 1473 1474 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1475 cp.role = 0x00; /* Become master */ 1476 else 1477 cp.role = 0x01; /* Remain slave */ 1478 1479 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, 1480 sizeof(cp), &cp); 1481 } else { 1482 struct hci_cp_accept_sync_conn_req cp; 1483 1484 bacpy(&cp.bdaddr, &ev->bdaddr); 1485 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1486 1487 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1488 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1489 cp.max_latency = cpu_to_le16(0xffff); 1490 cp.content_format = cpu_to_le16(hdev->voice_setting); 1491 cp.retrans_effort = 0xff; 1492 1493 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1494 sizeof(cp), &cp); 1495 } 1496 } else { 1497 /* Connection rejected */ 1498 struct hci_cp_reject_conn_req cp; 1499 1500 bacpy(&cp.bdaddr, &ev->bdaddr); 1501 cp.reason = 0x0f; 1502 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1503 } 1504 } 1505 1506 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1507 { 1508 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1509 struct hci_conn *conn; 1510 1511 BT_DBG("%s status %d", hdev->name, ev->status); 1512 1513 if (ev->status) { 1514 mgmt_disconnect_failed(hdev->id); 1515 return; 1516 } 1517 1518 hci_dev_lock(hdev); 1519 1520 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1521 if (!conn) 1522 goto unlock; 1523 1524 conn->state = BT_CLOSED; 1525 1526 if (conn->type == ACL_LINK || conn->type == LE_LINK) 1527 mgmt_disconnected(hdev->id, &conn->dst); 1528 1529 hci_proto_disconn_cfm(conn, ev->reason); 1530 hci_conn_del(conn); 1531 1532 unlock: 1533 hci_dev_unlock(hdev); 1534 } 1535 1536 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1537 { 1538 struct hci_ev_auth_complete *ev = (void *) skb->data; 1539 struct hci_conn *conn; 1540 1541 BT_DBG("%s status %d", hdev->name, ev->status); 1542 1543 hci_dev_lock(hdev); 1544 1545 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1546 if (!conn) 1547 goto unlock; 1548 1549 if (!ev->status) { 1550 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) && 1551 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) { 1552 BT_INFO("re-auth of legacy device is not possible."); 1553 } else { 1554 conn->link_mode |= HCI_LM_AUTH; 1555 conn->sec_level = conn->pending_sec_level; 1556 } 1557 } else { 1558 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1559 } 1560 1561 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1562 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend); 1563 1564 if (conn->state == BT_CONFIG) { 1565 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { 1566 struct hci_cp_set_conn_encrypt cp; 1567 cp.handle = ev->handle; 1568 cp.encrypt = 0x01; 1569 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1570 &cp); 1571 } else { 1572 conn->state = BT_CONNECTED; 1573 hci_proto_connect_cfm(conn, ev->status); 1574 hci_conn_put(conn); 1575 } 1576 } else { 1577 hci_auth_cfm(conn, ev->status); 1578 1579 hci_conn_hold(conn); 1580 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1581 hci_conn_put(conn); 1582 } 1583 1584 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1585 if (!ev->status) { 1586 struct hci_cp_set_conn_encrypt cp; 1587 cp.handle = ev->handle; 1588 cp.encrypt = 0x01; 1589 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1590 &cp); 1591 } else { 1592 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1593 hci_encrypt_cfm(conn, ev->status, 0x00); 1594 } 1595 } 1596 1597 unlock: 1598 hci_dev_unlock(hdev); 1599 } 1600 1601 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1602 { 1603 struct hci_ev_remote_name *ev = (void *) skb->data; 1604 struct hci_conn *conn; 1605 1606 BT_DBG("%s", hdev->name); 1607 1608 hci_conn_check_pending(hdev); 1609 1610 hci_dev_lock(hdev); 1611 1612 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) 1613 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name); 1614 1615 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1616 if (!conn) 1617 goto unlock; 1618 1619 if (!hci_outgoing_auth_needed(hdev, conn)) 1620 goto unlock; 1621 1622 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1623 struct hci_cp_auth_requested cp; 1624 cp.handle = __cpu_to_le16(conn->handle); 1625 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1626 } 1627 1628 unlock: 1629 hci_dev_unlock(hdev); 1630 } 1631 1632 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 1633 { 1634 struct hci_ev_encrypt_change *ev = (void *) skb->data; 1635 struct hci_conn *conn; 1636 1637 BT_DBG("%s status %d", hdev->name, ev->status); 1638 1639 hci_dev_lock(hdev); 1640 1641 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1642 if (conn) { 1643 if (!ev->status) { 1644 if (ev->encrypt) { 1645 /* Encryption implies authentication */ 1646 conn->link_mode |= HCI_LM_AUTH; 1647 conn->link_mode |= HCI_LM_ENCRYPT; 1648 conn->sec_level = conn->pending_sec_level; 1649 } else 1650 conn->link_mode &= ~HCI_LM_ENCRYPT; 1651 } 1652 1653 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1654 1655 if (conn->state == BT_CONFIG) { 1656 if (!ev->status) 1657 conn->state = BT_CONNECTED; 1658 1659 hci_proto_connect_cfm(conn, ev->status); 1660 hci_conn_put(conn); 1661 } else 1662 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 1663 } 1664 1665 hci_dev_unlock(hdev); 1666 } 1667 1668 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1669 { 1670 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 1671 struct hci_conn *conn; 1672 1673 BT_DBG("%s status %d", hdev->name, ev->status); 1674 1675 hci_dev_lock(hdev); 1676 1677 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1678 if (conn) { 1679 if (!ev->status) 1680 conn->link_mode |= HCI_LM_SECURE; 1681 1682 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1683 1684 hci_key_change_cfm(conn, ev->status); 1685 } 1686 1687 hci_dev_unlock(hdev); 1688 } 1689 1690 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 1691 { 1692 struct hci_ev_remote_features *ev = (void *) skb->data; 1693 struct hci_conn *conn; 1694 1695 BT_DBG("%s status %d", hdev->name, ev->status); 1696 1697 hci_dev_lock(hdev); 1698 1699 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1700 if (!conn) 1701 goto unlock; 1702 1703 if (!ev->status) 1704 memcpy(conn->features, ev->features, 8); 1705 1706 if (conn->state != BT_CONFIG) 1707 goto unlock; 1708 1709 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 1710 struct hci_cp_read_remote_ext_features cp; 1711 cp.handle = ev->handle; 1712 cp.page = 0x01; 1713 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 1714 sizeof(cp), &cp); 1715 goto unlock; 1716 } 1717 1718 if (!ev->status) { 1719 struct hci_cp_remote_name_req cp; 1720 memset(&cp, 0, sizeof(cp)); 1721 bacpy(&cp.bdaddr, &conn->dst); 1722 cp.pscan_rep_mode = 0x02; 1723 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1724 } 1725 1726 if (!hci_outgoing_auth_needed(hdev, conn)) { 1727 conn->state = BT_CONNECTED; 1728 hci_proto_connect_cfm(conn, ev->status); 1729 hci_conn_put(conn); 1730 } 1731 1732 unlock: 1733 hci_dev_unlock(hdev); 1734 } 1735 1736 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 1737 { 1738 BT_DBG("%s", hdev->name); 1739 } 1740 1741 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1742 { 1743 BT_DBG("%s", hdev->name); 1744 } 1745 1746 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1747 { 1748 struct hci_ev_cmd_complete *ev = (void *) skb->data; 1749 __u16 opcode; 1750 1751 skb_pull(skb, sizeof(*ev)); 1752 1753 opcode = __le16_to_cpu(ev->opcode); 1754 1755 switch (opcode) { 1756 case HCI_OP_INQUIRY_CANCEL: 1757 hci_cc_inquiry_cancel(hdev, skb); 1758 break; 1759 1760 case HCI_OP_EXIT_PERIODIC_INQ: 1761 hci_cc_exit_periodic_inq(hdev, skb); 1762 break; 1763 1764 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 1765 hci_cc_remote_name_req_cancel(hdev, skb); 1766 break; 1767 1768 case HCI_OP_ROLE_DISCOVERY: 1769 hci_cc_role_discovery(hdev, skb); 1770 break; 1771 1772 case HCI_OP_READ_LINK_POLICY: 1773 hci_cc_read_link_policy(hdev, skb); 1774 break; 1775 1776 case HCI_OP_WRITE_LINK_POLICY: 1777 hci_cc_write_link_policy(hdev, skb); 1778 break; 1779 1780 case HCI_OP_READ_DEF_LINK_POLICY: 1781 hci_cc_read_def_link_policy(hdev, skb); 1782 break; 1783 1784 case HCI_OP_WRITE_DEF_LINK_POLICY: 1785 hci_cc_write_def_link_policy(hdev, skb); 1786 break; 1787 1788 case HCI_OP_RESET: 1789 hci_cc_reset(hdev, skb); 1790 break; 1791 1792 case HCI_OP_WRITE_LOCAL_NAME: 1793 hci_cc_write_local_name(hdev, skb); 1794 break; 1795 1796 case HCI_OP_READ_LOCAL_NAME: 1797 hci_cc_read_local_name(hdev, skb); 1798 break; 1799 1800 case HCI_OP_WRITE_AUTH_ENABLE: 1801 hci_cc_write_auth_enable(hdev, skb); 1802 break; 1803 1804 case HCI_OP_WRITE_ENCRYPT_MODE: 1805 hci_cc_write_encrypt_mode(hdev, skb); 1806 break; 1807 1808 case HCI_OP_WRITE_SCAN_ENABLE: 1809 hci_cc_write_scan_enable(hdev, skb); 1810 break; 1811 1812 case HCI_OP_READ_CLASS_OF_DEV: 1813 hci_cc_read_class_of_dev(hdev, skb); 1814 break; 1815 1816 case HCI_OP_WRITE_CLASS_OF_DEV: 1817 hci_cc_write_class_of_dev(hdev, skb); 1818 break; 1819 1820 case HCI_OP_READ_VOICE_SETTING: 1821 hci_cc_read_voice_setting(hdev, skb); 1822 break; 1823 1824 case HCI_OP_WRITE_VOICE_SETTING: 1825 hci_cc_write_voice_setting(hdev, skb); 1826 break; 1827 1828 case HCI_OP_HOST_BUFFER_SIZE: 1829 hci_cc_host_buffer_size(hdev, skb); 1830 break; 1831 1832 case HCI_OP_READ_SSP_MODE: 1833 hci_cc_read_ssp_mode(hdev, skb); 1834 break; 1835 1836 case HCI_OP_WRITE_SSP_MODE: 1837 hci_cc_write_ssp_mode(hdev, skb); 1838 break; 1839 1840 case HCI_OP_READ_LOCAL_VERSION: 1841 hci_cc_read_local_version(hdev, skb); 1842 break; 1843 1844 case HCI_OP_READ_LOCAL_COMMANDS: 1845 hci_cc_read_local_commands(hdev, skb); 1846 break; 1847 1848 case HCI_OP_READ_LOCAL_FEATURES: 1849 hci_cc_read_local_features(hdev, skb); 1850 break; 1851 1852 case HCI_OP_READ_LOCAL_EXT_FEATURES: 1853 hci_cc_read_local_ext_features(hdev, skb); 1854 break; 1855 1856 case HCI_OP_READ_BUFFER_SIZE: 1857 hci_cc_read_buffer_size(hdev, skb); 1858 break; 1859 1860 case HCI_OP_READ_BD_ADDR: 1861 hci_cc_read_bd_addr(hdev, skb); 1862 break; 1863 1864 case HCI_OP_WRITE_CA_TIMEOUT: 1865 hci_cc_write_ca_timeout(hdev, skb); 1866 break; 1867 1868 case HCI_OP_DELETE_STORED_LINK_KEY: 1869 hci_cc_delete_stored_link_key(hdev, skb); 1870 break; 1871 1872 case HCI_OP_SET_EVENT_MASK: 1873 hci_cc_set_event_mask(hdev, skb); 1874 break; 1875 1876 case HCI_OP_WRITE_INQUIRY_MODE: 1877 hci_cc_write_inquiry_mode(hdev, skb); 1878 break; 1879 1880 case HCI_OP_READ_INQ_RSP_TX_POWER: 1881 hci_cc_read_inq_rsp_tx_power(hdev, skb); 1882 break; 1883 1884 case HCI_OP_SET_EVENT_FLT: 1885 hci_cc_set_event_flt(hdev, skb); 1886 break; 1887 1888 case HCI_OP_PIN_CODE_REPLY: 1889 hci_cc_pin_code_reply(hdev, skb); 1890 break; 1891 1892 case HCI_OP_PIN_CODE_NEG_REPLY: 1893 hci_cc_pin_code_neg_reply(hdev, skb); 1894 break; 1895 1896 case HCI_OP_READ_LOCAL_OOB_DATA: 1897 hci_cc_read_local_oob_data_reply(hdev, skb); 1898 break; 1899 1900 case HCI_OP_LE_READ_BUFFER_SIZE: 1901 hci_cc_le_read_buffer_size(hdev, skb); 1902 break; 1903 1904 case HCI_OP_USER_CONFIRM_REPLY: 1905 hci_cc_user_confirm_reply(hdev, skb); 1906 break; 1907 1908 case HCI_OP_USER_CONFIRM_NEG_REPLY: 1909 hci_cc_user_confirm_neg_reply(hdev, skb); 1910 break; 1911 1912 case HCI_OP_LE_SET_SCAN_ENABLE: 1913 hci_cc_le_set_scan_enable(hdev, skb); 1914 break; 1915 1916 case HCI_OP_LE_LTK_REPLY: 1917 hci_cc_le_ltk_reply(hdev, skb); 1918 break; 1919 1920 case HCI_OP_LE_LTK_NEG_REPLY: 1921 hci_cc_le_ltk_neg_reply(hdev, skb); 1922 break; 1923 1924 default: 1925 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1926 break; 1927 } 1928 1929 if (ev->opcode != HCI_OP_NOP) 1930 del_timer(&hdev->cmd_timer); 1931 1932 if (ev->ncmd) { 1933 atomic_set(&hdev->cmd_cnt, 1); 1934 if (!skb_queue_empty(&hdev->cmd_q)) 1935 tasklet_schedule(&hdev->cmd_task); 1936 } 1937 } 1938 1939 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 1940 { 1941 struct hci_ev_cmd_status *ev = (void *) skb->data; 1942 __u16 opcode; 1943 1944 skb_pull(skb, sizeof(*ev)); 1945 1946 opcode = __le16_to_cpu(ev->opcode); 1947 1948 switch (opcode) { 1949 case HCI_OP_INQUIRY: 1950 hci_cs_inquiry(hdev, ev->status); 1951 break; 1952 1953 case HCI_OP_CREATE_CONN: 1954 hci_cs_create_conn(hdev, ev->status); 1955 break; 1956 1957 case HCI_OP_ADD_SCO: 1958 hci_cs_add_sco(hdev, ev->status); 1959 break; 1960 1961 case HCI_OP_AUTH_REQUESTED: 1962 hci_cs_auth_requested(hdev, ev->status); 1963 break; 1964 1965 case HCI_OP_SET_CONN_ENCRYPT: 1966 hci_cs_set_conn_encrypt(hdev, ev->status); 1967 break; 1968 1969 case HCI_OP_REMOTE_NAME_REQ: 1970 hci_cs_remote_name_req(hdev, ev->status); 1971 break; 1972 1973 case HCI_OP_READ_REMOTE_FEATURES: 1974 hci_cs_read_remote_features(hdev, ev->status); 1975 break; 1976 1977 case HCI_OP_READ_REMOTE_EXT_FEATURES: 1978 hci_cs_read_remote_ext_features(hdev, ev->status); 1979 break; 1980 1981 case HCI_OP_SETUP_SYNC_CONN: 1982 hci_cs_setup_sync_conn(hdev, ev->status); 1983 break; 1984 1985 case HCI_OP_SNIFF_MODE: 1986 hci_cs_sniff_mode(hdev, ev->status); 1987 break; 1988 1989 case HCI_OP_EXIT_SNIFF_MODE: 1990 hci_cs_exit_sniff_mode(hdev, ev->status); 1991 break; 1992 1993 case HCI_OP_DISCONNECT: 1994 if (ev->status != 0) 1995 mgmt_disconnect_failed(hdev->id); 1996 break; 1997 1998 case HCI_OP_LE_CREATE_CONN: 1999 hci_cs_le_create_conn(hdev, ev->status); 2000 break; 2001 2002 case HCI_OP_LE_START_ENC: 2003 hci_cs_le_start_enc(hdev, ev->status); 2004 break; 2005 2006 default: 2007 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2008 break; 2009 } 2010 2011 if (ev->opcode != HCI_OP_NOP) 2012 del_timer(&hdev->cmd_timer); 2013 2014 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2015 atomic_set(&hdev->cmd_cnt, 1); 2016 if (!skb_queue_empty(&hdev->cmd_q)) 2017 tasklet_schedule(&hdev->cmd_task); 2018 } 2019 } 2020 2021 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2022 { 2023 struct hci_ev_role_change *ev = (void *) skb->data; 2024 struct hci_conn *conn; 2025 2026 BT_DBG("%s status %d", hdev->name, ev->status); 2027 2028 hci_dev_lock(hdev); 2029 2030 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2031 if (conn) { 2032 if (!ev->status) { 2033 if (ev->role) 2034 conn->link_mode &= ~HCI_LM_MASTER; 2035 else 2036 conn->link_mode |= HCI_LM_MASTER; 2037 } 2038 2039 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); 2040 2041 hci_role_switch_cfm(conn, ev->status, ev->role); 2042 } 2043 2044 hci_dev_unlock(hdev); 2045 } 2046 2047 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2048 { 2049 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2050 __le16 *ptr; 2051 int i; 2052 2053 skb_pull(skb, sizeof(*ev)); 2054 2055 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2056 2057 if (skb->len < ev->num_hndl * 4) { 2058 BT_DBG("%s bad parameters", hdev->name); 2059 return; 2060 } 2061 2062 tasklet_disable(&hdev->tx_task); 2063 2064 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { 2065 struct hci_conn *conn; 2066 __u16 handle, count; 2067 2068 handle = get_unaligned_le16(ptr++); 2069 count = get_unaligned_le16(ptr++); 2070 2071 conn = hci_conn_hash_lookup_handle(hdev, handle); 2072 if (conn) { 2073 conn->sent -= count; 2074 2075 if (conn->type == ACL_LINK) { 2076 hdev->acl_cnt += count; 2077 if (hdev->acl_cnt > hdev->acl_pkts) 2078 hdev->acl_cnt = hdev->acl_pkts; 2079 } else if (conn->type == LE_LINK) { 2080 if (hdev->le_pkts) { 2081 hdev->le_cnt += count; 2082 if (hdev->le_cnt > hdev->le_pkts) 2083 hdev->le_cnt = hdev->le_pkts; 2084 } else { 2085 hdev->acl_cnt += count; 2086 if (hdev->acl_cnt > hdev->acl_pkts) 2087 hdev->acl_cnt = hdev->acl_pkts; 2088 } 2089 } else { 2090 hdev->sco_cnt += count; 2091 if (hdev->sco_cnt > hdev->sco_pkts) 2092 hdev->sco_cnt = hdev->sco_pkts; 2093 } 2094 } 2095 } 2096 2097 tasklet_schedule(&hdev->tx_task); 2098 2099 tasklet_enable(&hdev->tx_task); 2100 } 2101 2102 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2103 { 2104 struct hci_ev_mode_change *ev = (void *) skb->data; 2105 struct hci_conn *conn; 2106 2107 BT_DBG("%s status %d", hdev->name, ev->status); 2108 2109 hci_dev_lock(hdev); 2110 2111 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2112 if (conn) { 2113 conn->mode = ev->mode; 2114 conn->interval = __le16_to_cpu(ev->interval); 2115 2116 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 2117 if (conn->mode == HCI_CM_ACTIVE) 2118 conn->power_save = 1; 2119 else 2120 conn->power_save = 0; 2121 } 2122 2123 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 2124 hci_sco_setup(conn, ev->status); 2125 } 2126 2127 hci_dev_unlock(hdev); 2128 } 2129 2130 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2131 { 2132 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2133 struct hci_conn *conn; 2134 2135 BT_DBG("%s", hdev->name); 2136 2137 hci_dev_lock(hdev); 2138 2139 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2140 if (conn && conn->state == BT_CONNECTED) { 2141 hci_conn_hold(conn); 2142 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2143 hci_conn_put(conn); 2144 } 2145 2146 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2147 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2148 sizeof(ev->bdaddr), &ev->bdaddr); 2149 else if (test_bit(HCI_MGMT, &hdev->flags)) { 2150 u8 secure; 2151 2152 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2153 secure = 1; 2154 else 2155 secure = 0; 2156 2157 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); 2158 } 2159 2160 hci_dev_unlock(hdev); 2161 } 2162 2163 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2164 { 2165 struct hci_ev_link_key_req *ev = (void *) skb->data; 2166 struct hci_cp_link_key_reply cp; 2167 struct hci_conn *conn; 2168 struct link_key *key; 2169 2170 BT_DBG("%s", hdev->name); 2171 2172 if (!test_bit(HCI_LINK_KEYS, &hdev->flags)) 2173 return; 2174 2175 hci_dev_lock(hdev); 2176 2177 key = hci_find_link_key(hdev, &ev->bdaddr); 2178 if (!key) { 2179 BT_DBG("%s link key not found for %s", hdev->name, 2180 batostr(&ev->bdaddr)); 2181 goto not_found; 2182 } 2183 2184 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2185 batostr(&ev->bdaddr)); 2186 2187 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && 2188 key->type == HCI_LK_DEBUG_COMBINATION) { 2189 BT_DBG("%s ignoring debug key", hdev->name); 2190 goto not_found; 2191 } 2192 2193 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2194 if (conn) { 2195 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2196 conn->auth_type != 0xff && 2197 (conn->auth_type & 0x01)) { 2198 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2199 goto not_found; 2200 } 2201 2202 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2203 conn->pending_sec_level == BT_SECURITY_HIGH) { 2204 BT_DBG("%s ignoring key unauthenticated for high \ 2205 security", hdev->name); 2206 goto not_found; 2207 } 2208 2209 conn->key_type = key->type; 2210 conn->pin_length = key->pin_len; 2211 } 2212 2213 bacpy(&cp.bdaddr, &ev->bdaddr); 2214 memcpy(cp.link_key, key->val, 16); 2215 2216 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2217 2218 hci_dev_unlock(hdev); 2219 2220 return; 2221 2222 not_found: 2223 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 2224 hci_dev_unlock(hdev); 2225 } 2226 2227 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2228 { 2229 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2230 struct hci_conn *conn; 2231 u8 pin_len = 0; 2232 2233 BT_DBG("%s", hdev->name); 2234 2235 hci_dev_lock(hdev); 2236 2237 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2238 if (conn) { 2239 hci_conn_hold(conn); 2240 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2241 pin_len = conn->pin_length; 2242 2243 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 2244 conn->key_type = ev->key_type; 2245 2246 hci_conn_put(conn); 2247 } 2248 2249 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2250 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2251 ev->key_type, pin_len); 2252 2253 hci_dev_unlock(hdev); 2254 } 2255 2256 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2257 { 2258 struct hci_ev_clock_offset *ev = (void *) skb->data; 2259 struct hci_conn *conn; 2260 2261 BT_DBG("%s status %d", hdev->name, ev->status); 2262 2263 hci_dev_lock(hdev); 2264 2265 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2266 if (conn && !ev->status) { 2267 struct inquiry_entry *ie; 2268 2269 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2270 if (ie) { 2271 ie->data.clock_offset = ev->clock_offset; 2272 ie->timestamp = jiffies; 2273 } 2274 } 2275 2276 hci_dev_unlock(hdev); 2277 } 2278 2279 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2280 { 2281 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2282 struct hci_conn *conn; 2283 2284 BT_DBG("%s status %d", hdev->name, ev->status); 2285 2286 hci_dev_lock(hdev); 2287 2288 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2289 if (conn && !ev->status) 2290 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 2291 2292 hci_dev_unlock(hdev); 2293 } 2294 2295 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2296 { 2297 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2298 struct inquiry_entry *ie; 2299 2300 BT_DBG("%s", hdev->name); 2301 2302 hci_dev_lock(hdev); 2303 2304 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2305 if (ie) { 2306 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2307 ie->timestamp = jiffies; 2308 } 2309 2310 hci_dev_unlock(hdev); 2311 } 2312 2313 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2314 { 2315 struct inquiry_data data; 2316 int num_rsp = *((__u8 *) skb->data); 2317 2318 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2319 2320 if (!num_rsp) 2321 return; 2322 2323 hci_dev_lock(hdev); 2324 2325 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 2326 2327 if (test_bit(HCI_MGMT, &hdev->flags)) 2328 mgmt_discovering(hdev->id, 1); 2329 } 2330 2331 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2332 struct inquiry_info_with_rssi_and_pscan_mode *info; 2333 info = (void *) (skb->data + 1); 2334 2335 for (; num_rsp; num_rsp--, info++) { 2336 bacpy(&data.bdaddr, &info->bdaddr); 2337 data.pscan_rep_mode = info->pscan_rep_mode; 2338 data.pscan_period_mode = info->pscan_period_mode; 2339 data.pscan_mode = info->pscan_mode; 2340 memcpy(data.dev_class, info->dev_class, 3); 2341 data.clock_offset = info->clock_offset; 2342 data.rssi = info->rssi; 2343 data.ssp_mode = 0x00; 2344 hci_inquiry_cache_update(hdev, &data); 2345 mgmt_device_found(hdev->id, &info->bdaddr, 2346 info->dev_class, info->rssi, 2347 NULL); 2348 } 2349 } else { 2350 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2351 2352 for (; num_rsp; num_rsp--, info++) { 2353 bacpy(&data.bdaddr, &info->bdaddr); 2354 data.pscan_rep_mode = info->pscan_rep_mode; 2355 data.pscan_period_mode = info->pscan_period_mode; 2356 data.pscan_mode = 0x00; 2357 memcpy(data.dev_class, info->dev_class, 3); 2358 data.clock_offset = info->clock_offset; 2359 data.rssi = info->rssi; 2360 data.ssp_mode = 0x00; 2361 hci_inquiry_cache_update(hdev, &data); 2362 mgmt_device_found(hdev->id, &info->bdaddr, 2363 info->dev_class, info->rssi, 2364 NULL); 2365 } 2366 } 2367 2368 hci_dev_unlock(hdev); 2369 } 2370 2371 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2372 { 2373 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2374 struct hci_conn *conn; 2375 2376 BT_DBG("%s", hdev->name); 2377 2378 hci_dev_lock(hdev); 2379 2380 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2381 if (!conn) 2382 goto unlock; 2383 2384 if (!ev->status && ev->page == 0x01) { 2385 struct inquiry_entry *ie; 2386 2387 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2388 if (ie) 2389 ie->data.ssp_mode = (ev->features[0] & 0x01); 2390 2391 conn->ssp_mode = (ev->features[0] & 0x01); 2392 } 2393 2394 if (conn->state != BT_CONFIG) 2395 goto unlock; 2396 2397 if (!ev->status) { 2398 struct hci_cp_remote_name_req cp; 2399 memset(&cp, 0, sizeof(cp)); 2400 bacpy(&cp.bdaddr, &conn->dst); 2401 cp.pscan_rep_mode = 0x02; 2402 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2403 } 2404 2405 if (!hci_outgoing_auth_needed(hdev, conn)) { 2406 conn->state = BT_CONNECTED; 2407 hci_proto_connect_cfm(conn, ev->status); 2408 hci_conn_put(conn); 2409 } 2410 2411 unlock: 2412 hci_dev_unlock(hdev); 2413 } 2414 2415 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2416 { 2417 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2418 struct hci_conn *conn; 2419 2420 BT_DBG("%s status %d", hdev->name, ev->status); 2421 2422 hci_dev_lock(hdev); 2423 2424 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2425 if (!conn) { 2426 if (ev->link_type == ESCO_LINK) 2427 goto unlock; 2428 2429 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2430 if (!conn) 2431 goto unlock; 2432 2433 conn->type = SCO_LINK; 2434 } 2435 2436 switch (ev->status) { 2437 case 0x00: 2438 conn->handle = __le16_to_cpu(ev->handle); 2439 conn->state = BT_CONNECTED; 2440 2441 hci_conn_hold_device(conn); 2442 hci_conn_add_sysfs(conn); 2443 break; 2444 2445 case 0x11: /* Unsupported Feature or Parameter Value */ 2446 case 0x1c: /* SCO interval rejected */ 2447 case 0x1a: /* Unsupported Remote Feature */ 2448 case 0x1f: /* Unspecified error */ 2449 if (conn->out && conn->attempt < 2) { 2450 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 2451 (hdev->esco_type & EDR_ESCO_MASK); 2452 hci_setup_sync(conn, conn->link->handle); 2453 goto unlock; 2454 } 2455 /* fall through */ 2456 2457 default: 2458 conn->state = BT_CLOSED; 2459 break; 2460 } 2461 2462 hci_proto_connect_cfm(conn, ev->status); 2463 if (ev->status) 2464 hci_conn_del(conn); 2465 2466 unlock: 2467 hci_dev_unlock(hdev); 2468 } 2469 2470 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2471 { 2472 BT_DBG("%s", hdev->name); 2473 } 2474 2475 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2476 { 2477 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2478 2479 BT_DBG("%s status %d", hdev->name, ev->status); 2480 } 2481 2482 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2483 { 2484 struct inquiry_data data; 2485 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2486 int num_rsp = *((__u8 *) skb->data); 2487 2488 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2489 2490 if (!num_rsp) 2491 return; 2492 2493 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 2494 2495 if (test_bit(HCI_MGMT, &hdev->flags)) 2496 mgmt_discovering(hdev->id, 1); 2497 } 2498 2499 hci_dev_lock(hdev); 2500 2501 for (; num_rsp; num_rsp--, info++) { 2502 bacpy(&data.bdaddr, &info->bdaddr); 2503 data.pscan_rep_mode = info->pscan_rep_mode; 2504 data.pscan_period_mode = info->pscan_period_mode; 2505 data.pscan_mode = 0x00; 2506 memcpy(data.dev_class, info->dev_class, 3); 2507 data.clock_offset = info->clock_offset; 2508 data.rssi = info->rssi; 2509 data.ssp_mode = 0x01; 2510 hci_inquiry_cache_update(hdev, &data); 2511 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 2512 info->rssi, info->data); 2513 } 2514 2515 hci_dev_unlock(hdev); 2516 } 2517 2518 static inline u8 hci_get_auth_req(struct hci_conn *conn) 2519 { 2520 /* If remote requests dedicated bonding follow that lead */ 2521 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 2522 /* If both remote and local IO capabilities allow MITM 2523 * protection then require it, otherwise don't */ 2524 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) 2525 return 0x02; 2526 else 2527 return 0x03; 2528 } 2529 2530 /* If remote requests no-bonding follow that lead */ 2531 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 2532 return conn->remote_auth | (conn->auth_type & 0x01); 2533 2534 return conn->auth_type; 2535 } 2536 2537 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2538 { 2539 struct hci_ev_io_capa_request *ev = (void *) skb->data; 2540 struct hci_conn *conn; 2541 2542 BT_DBG("%s", hdev->name); 2543 2544 hci_dev_lock(hdev); 2545 2546 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2547 if (!conn) 2548 goto unlock; 2549 2550 hci_conn_hold(conn); 2551 2552 if (!test_bit(HCI_MGMT, &hdev->flags)) 2553 goto unlock; 2554 2555 if (test_bit(HCI_PAIRABLE, &hdev->flags) || 2556 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 2557 struct hci_cp_io_capability_reply cp; 2558 2559 bacpy(&cp.bdaddr, &ev->bdaddr); 2560 cp.capability = conn->io_capability; 2561 conn->auth_type = hci_get_auth_req(conn); 2562 cp.authentication = conn->auth_type; 2563 2564 if ((conn->out == 0x01 || conn->remote_oob == 0x01) && 2565 hci_find_remote_oob_data(hdev, &conn->dst)) 2566 cp.oob_data = 0x01; 2567 else 2568 cp.oob_data = 0x00; 2569 2570 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 2571 sizeof(cp), &cp); 2572 } else { 2573 struct hci_cp_io_capability_neg_reply cp; 2574 2575 bacpy(&cp.bdaddr, &ev->bdaddr); 2576 cp.reason = 0x18; /* Pairing not allowed */ 2577 2578 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 2579 sizeof(cp), &cp); 2580 } 2581 2582 unlock: 2583 hci_dev_unlock(hdev); 2584 } 2585 2586 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 2587 { 2588 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 2589 struct hci_conn *conn; 2590 2591 BT_DBG("%s", hdev->name); 2592 2593 hci_dev_lock(hdev); 2594 2595 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2596 if (!conn) 2597 goto unlock; 2598 2599 conn->remote_cap = ev->capability; 2600 conn->remote_oob = ev->oob_data; 2601 conn->remote_auth = ev->authentication; 2602 2603 unlock: 2604 hci_dev_unlock(hdev); 2605 } 2606 2607 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 2608 struct sk_buff *skb) 2609 { 2610 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 2611 int loc_mitm, rem_mitm, confirm_hint = 0; 2612 struct hci_conn *conn; 2613 2614 BT_DBG("%s", hdev->name); 2615 2616 hci_dev_lock(hdev); 2617 2618 if (!test_bit(HCI_MGMT, &hdev->flags)) 2619 goto unlock; 2620 2621 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2622 if (!conn) 2623 goto unlock; 2624 2625 loc_mitm = (conn->auth_type & 0x01); 2626 rem_mitm = (conn->remote_auth & 0x01); 2627 2628 /* If we require MITM but the remote device can't provide that 2629 * (it has NoInputNoOutput) then reject the confirmation 2630 * request. The only exception is when we're dedicated bonding 2631 * initiators (connect_cfm_cb set) since then we always have the MITM 2632 * bit set. */ 2633 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 2634 BT_DBG("Rejecting request: remote device can't provide MITM"); 2635 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 2636 sizeof(ev->bdaddr), &ev->bdaddr); 2637 goto unlock; 2638 } 2639 2640 /* If no side requires MITM protection; auto-accept */ 2641 if ((!loc_mitm || conn->remote_cap == 0x03) && 2642 (!rem_mitm || conn->io_capability == 0x03)) { 2643 2644 /* If we're not the initiators request authorization to 2645 * proceed from user space (mgmt_user_confirm with 2646 * confirm_hint set to 1). */ 2647 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 2648 BT_DBG("Confirming auto-accept as acceptor"); 2649 confirm_hint = 1; 2650 goto confirm; 2651 } 2652 2653 BT_DBG("Auto-accept of user confirmation with %ums delay", 2654 hdev->auto_accept_delay); 2655 2656 if (hdev->auto_accept_delay > 0) { 2657 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 2658 mod_timer(&conn->auto_accept_timer, jiffies + delay); 2659 goto unlock; 2660 } 2661 2662 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 2663 sizeof(ev->bdaddr), &ev->bdaddr); 2664 goto unlock; 2665 } 2666 2667 confirm: 2668 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey, 2669 confirm_hint); 2670 2671 unlock: 2672 hci_dev_unlock(hdev); 2673 } 2674 2675 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2676 { 2677 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 2678 struct hci_conn *conn; 2679 2680 BT_DBG("%s", hdev->name); 2681 2682 hci_dev_lock(hdev); 2683 2684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2685 if (!conn) 2686 goto unlock; 2687 2688 /* To avoid duplicate auth_failed events to user space we check 2689 * the HCI_CONN_AUTH_PEND flag which will be set if we 2690 * initiated the authentication. A traditional auth_complete 2691 * event gets always produced as initiator and is also mapped to 2692 * the mgmt_auth_failed event */ 2693 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) 2694 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 2695 2696 hci_conn_put(conn); 2697 2698 unlock: 2699 hci_dev_unlock(hdev); 2700 } 2701 2702 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2703 { 2704 struct hci_ev_remote_host_features *ev = (void *) skb->data; 2705 struct inquiry_entry *ie; 2706 2707 BT_DBG("%s", hdev->name); 2708 2709 hci_dev_lock(hdev); 2710 2711 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2712 if (ie) 2713 ie->data.ssp_mode = (ev->features[0] & 0x01); 2714 2715 hci_dev_unlock(hdev); 2716 } 2717 2718 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 2719 struct sk_buff *skb) 2720 { 2721 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 2722 struct oob_data *data; 2723 2724 BT_DBG("%s", hdev->name); 2725 2726 hci_dev_lock(hdev); 2727 2728 if (!test_bit(HCI_MGMT, &hdev->flags)) 2729 goto unlock; 2730 2731 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 2732 if (data) { 2733 struct hci_cp_remote_oob_data_reply cp; 2734 2735 bacpy(&cp.bdaddr, &ev->bdaddr); 2736 memcpy(cp.hash, data->hash, sizeof(cp.hash)); 2737 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 2738 2739 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 2740 &cp); 2741 } else { 2742 struct hci_cp_remote_oob_data_neg_reply cp; 2743 2744 bacpy(&cp.bdaddr, &ev->bdaddr); 2745 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 2746 &cp); 2747 } 2748 2749 unlock: 2750 hci_dev_unlock(hdev); 2751 } 2752 2753 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2754 { 2755 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 2756 struct hci_conn *conn; 2757 2758 BT_DBG("%s status %d", hdev->name, ev->status); 2759 2760 hci_dev_lock(hdev); 2761 2762 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 2763 if (!conn) { 2764 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 2765 if (!conn) { 2766 BT_ERR("No memory for new connection"); 2767 hci_dev_unlock(hdev); 2768 return; 2769 } 2770 2771 conn->dst_type = ev->bdaddr_type; 2772 } 2773 2774 if (ev->status) { 2775 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 2776 hci_proto_connect_cfm(conn, ev->status); 2777 conn->state = BT_CLOSED; 2778 hci_conn_del(conn); 2779 goto unlock; 2780 } 2781 2782 mgmt_connected(hdev->id, &ev->bdaddr); 2783 2784 conn->sec_level = BT_SECURITY_LOW; 2785 conn->handle = __le16_to_cpu(ev->handle); 2786 conn->state = BT_CONNECTED; 2787 2788 hci_conn_hold_device(conn); 2789 hci_conn_add_sysfs(conn); 2790 2791 hci_proto_connect_cfm(conn, ev->status); 2792 2793 unlock: 2794 hci_dev_unlock(hdev); 2795 } 2796 2797 static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 2798 struct sk_buff *skb) 2799 { 2800 struct hci_ev_le_advertising_info *ev; 2801 u8 num_reports; 2802 2803 num_reports = skb->data[0]; 2804 ev = (void *) &skb->data[1]; 2805 2806 hci_dev_lock(hdev); 2807 2808 hci_add_adv_entry(hdev, ev); 2809 2810 while (--num_reports) { 2811 ev = (void *) (ev->data + ev->length + 1); 2812 hci_add_adv_entry(hdev, ev); 2813 } 2814 2815 hci_dev_unlock(hdev); 2816 } 2817 2818 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 2819 struct sk_buff *skb) 2820 { 2821 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 2822 struct hci_cp_le_ltk_reply cp; 2823 struct hci_conn *conn; 2824 2825 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); 2826 2827 hci_dev_lock(hdev); 2828 2829 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2830 2831 memset(&cp, 0, sizeof(cp)); 2832 cp.handle = cpu_to_le16(conn->handle); 2833 memcpy(cp.ltk, conn->ltk, sizeof(conn->ltk)); 2834 2835 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 2836 2837 hci_dev_unlock(hdev); 2838 } 2839 2840 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 2841 { 2842 struct hci_ev_le_meta *le_ev = (void *) skb->data; 2843 2844 skb_pull(skb, sizeof(*le_ev)); 2845 2846 switch (le_ev->subevent) { 2847 case HCI_EV_LE_CONN_COMPLETE: 2848 hci_le_conn_complete_evt(hdev, skb); 2849 break; 2850 2851 case HCI_EV_LE_ADVERTISING_REPORT: 2852 hci_le_adv_report_evt(hdev, skb); 2853 break; 2854 2855 case HCI_EV_LE_LTK_REQ: 2856 hci_le_ltk_request_evt(hdev, skb); 2857 break; 2858 2859 default: 2860 break; 2861 } 2862 } 2863 2864 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 2865 { 2866 struct hci_event_hdr *hdr = (void *) skb->data; 2867 __u8 event = hdr->evt; 2868 2869 skb_pull(skb, HCI_EVENT_HDR_SIZE); 2870 2871 switch (event) { 2872 case HCI_EV_INQUIRY_COMPLETE: 2873 hci_inquiry_complete_evt(hdev, skb); 2874 break; 2875 2876 case HCI_EV_INQUIRY_RESULT: 2877 hci_inquiry_result_evt(hdev, skb); 2878 break; 2879 2880 case HCI_EV_CONN_COMPLETE: 2881 hci_conn_complete_evt(hdev, skb); 2882 break; 2883 2884 case HCI_EV_CONN_REQUEST: 2885 hci_conn_request_evt(hdev, skb); 2886 break; 2887 2888 case HCI_EV_DISCONN_COMPLETE: 2889 hci_disconn_complete_evt(hdev, skb); 2890 break; 2891 2892 case HCI_EV_AUTH_COMPLETE: 2893 hci_auth_complete_evt(hdev, skb); 2894 break; 2895 2896 case HCI_EV_REMOTE_NAME: 2897 hci_remote_name_evt(hdev, skb); 2898 break; 2899 2900 case HCI_EV_ENCRYPT_CHANGE: 2901 hci_encrypt_change_evt(hdev, skb); 2902 break; 2903 2904 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 2905 hci_change_link_key_complete_evt(hdev, skb); 2906 break; 2907 2908 case HCI_EV_REMOTE_FEATURES: 2909 hci_remote_features_evt(hdev, skb); 2910 break; 2911 2912 case HCI_EV_REMOTE_VERSION: 2913 hci_remote_version_evt(hdev, skb); 2914 break; 2915 2916 case HCI_EV_QOS_SETUP_COMPLETE: 2917 hci_qos_setup_complete_evt(hdev, skb); 2918 break; 2919 2920 case HCI_EV_CMD_COMPLETE: 2921 hci_cmd_complete_evt(hdev, skb); 2922 break; 2923 2924 case HCI_EV_CMD_STATUS: 2925 hci_cmd_status_evt(hdev, skb); 2926 break; 2927 2928 case HCI_EV_ROLE_CHANGE: 2929 hci_role_change_evt(hdev, skb); 2930 break; 2931 2932 case HCI_EV_NUM_COMP_PKTS: 2933 hci_num_comp_pkts_evt(hdev, skb); 2934 break; 2935 2936 case HCI_EV_MODE_CHANGE: 2937 hci_mode_change_evt(hdev, skb); 2938 break; 2939 2940 case HCI_EV_PIN_CODE_REQ: 2941 hci_pin_code_request_evt(hdev, skb); 2942 break; 2943 2944 case HCI_EV_LINK_KEY_REQ: 2945 hci_link_key_request_evt(hdev, skb); 2946 break; 2947 2948 case HCI_EV_LINK_KEY_NOTIFY: 2949 hci_link_key_notify_evt(hdev, skb); 2950 break; 2951 2952 case HCI_EV_CLOCK_OFFSET: 2953 hci_clock_offset_evt(hdev, skb); 2954 break; 2955 2956 case HCI_EV_PKT_TYPE_CHANGE: 2957 hci_pkt_type_change_evt(hdev, skb); 2958 break; 2959 2960 case HCI_EV_PSCAN_REP_MODE: 2961 hci_pscan_rep_mode_evt(hdev, skb); 2962 break; 2963 2964 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 2965 hci_inquiry_result_with_rssi_evt(hdev, skb); 2966 break; 2967 2968 case HCI_EV_REMOTE_EXT_FEATURES: 2969 hci_remote_ext_features_evt(hdev, skb); 2970 break; 2971 2972 case HCI_EV_SYNC_CONN_COMPLETE: 2973 hci_sync_conn_complete_evt(hdev, skb); 2974 break; 2975 2976 case HCI_EV_SYNC_CONN_CHANGED: 2977 hci_sync_conn_changed_evt(hdev, skb); 2978 break; 2979 2980 case HCI_EV_SNIFF_SUBRATE: 2981 hci_sniff_subrate_evt(hdev, skb); 2982 break; 2983 2984 case HCI_EV_EXTENDED_INQUIRY_RESULT: 2985 hci_extended_inquiry_result_evt(hdev, skb); 2986 break; 2987 2988 case HCI_EV_IO_CAPA_REQUEST: 2989 hci_io_capa_request_evt(hdev, skb); 2990 break; 2991 2992 case HCI_EV_IO_CAPA_REPLY: 2993 hci_io_capa_reply_evt(hdev, skb); 2994 break; 2995 2996 case HCI_EV_USER_CONFIRM_REQUEST: 2997 hci_user_confirm_request_evt(hdev, skb); 2998 break; 2999 3000 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3001 hci_simple_pair_complete_evt(hdev, skb); 3002 break; 3003 3004 case HCI_EV_REMOTE_HOST_FEATURES: 3005 hci_remote_host_features_evt(hdev, skb); 3006 break; 3007 3008 case HCI_EV_LE_META: 3009 hci_le_meta_evt(hdev, skb); 3010 break; 3011 3012 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 3013 hci_remote_oob_data_request_evt(hdev, skb); 3014 break; 3015 3016 default: 3017 BT_DBG("%s event 0x%x", hdev->name, event); 3018 break; 3019 } 3020 3021 kfree_skb(skb); 3022 hdev->stat.evt_rx++; 3023 } 3024 3025 /* Generate internal stack event */ 3026 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) 3027 { 3028 struct hci_event_hdr *hdr; 3029 struct hci_ev_stack_internal *ev; 3030 struct sk_buff *skb; 3031 3032 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); 3033 if (!skb) 3034 return; 3035 3036 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); 3037 hdr->evt = HCI_EV_STACK_INTERNAL; 3038 hdr->plen = sizeof(*ev) + dlen; 3039 3040 ev = (void *) skb_put(skb, sizeof(*ev) + dlen); 3041 ev->type = type; 3042 memcpy(ev->data, data, dlen); 3043 3044 bt_cb(skb)->incoming = 1; 3045 __net_timestamp(skb); 3046 3047 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 3048 skb->dev = (void *) hdev; 3049 hci_send_to_sock(hdev, skb, NULL); 3050 kfree_skb(skb); 3051 } 3052