1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <linux/module.h> 28 29 #include <linux/types.h> 30 #include <linux/errno.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/poll.h> 34 #include <linux/fcntl.h> 35 #include <linux/init.h> 36 #include <linux/skbuff.h> 37 #include <linux/interrupt.h> 38 #include <linux/notifier.h> 39 #include <net/sock.h> 40 41 #include <asm/system.h> 42 #include <linux/uaccess.h> 43 #include <asm/unaligned.h> 44 45 #include <net/bluetooth/bluetooth.h> 46 #include <net/bluetooth/hci_core.h> 47 48 static int enable_le; 49 50 /* Handle HCI Event packets */ 51 52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 53 { 54 __u8 status = *((__u8 *) skb->data); 55 56 BT_DBG("%s status 0x%x", hdev->name, status); 57 58 if (status) 59 return; 60 61 if (test_bit(HCI_MGMT, &hdev->flags) && 62 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 63 mgmt_discovering(hdev->id, 0); 64 65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 66 67 hci_conn_check_pending(hdev); 68 } 69 70 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 71 { 72 __u8 status = *((__u8 *) skb->data); 73 74 BT_DBG("%s status 0x%x", hdev->name, status); 75 76 if (status) 77 return; 78 79 if (test_bit(HCI_MGMT, &hdev->flags) && 80 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 81 mgmt_discovering(hdev->id, 0); 82 83 hci_conn_check_pending(hdev); 84 } 85 86 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 87 { 88 BT_DBG("%s", hdev->name); 89 } 90 91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 92 { 93 struct hci_rp_role_discovery *rp = (void *) skb->data; 94 struct hci_conn *conn; 95 96 BT_DBG("%s status 0x%x", hdev->name, rp->status); 97 98 if (rp->status) 99 return; 100 101 hci_dev_lock(hdev); 102 103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 104 if (conn) { 105 if (rp->role) 106 conn->link_mode &= ~HCI_LM_MASTER; 107 else 108 conn->link_mode |= HCI_LM_MASTER; 109 } 110 111 hci_dev_unlock(hdev); 112 } 113 114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 115 { 116 struct hci_rp_read_link_policy *rp = (void *) skb->data; 117 struct hci_conn *conn; 118 119 BT_DBG("%s status 0x%x", hdev->name, rp->status); 120 121 if (rp->status) 122 return; 123 124 hci_dev_lock(hdev); 125 126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 127 if (conn) 128 conn->link_policy = __le16_to_cpu(rp->policy); 129 130 hci_dev_unlock(hdev); 131 } 132 133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 134 { 135 struct hci_rp_write_link_policy *rp = (void *) skb->data; 136 struct hci_conn *conn; 137 void *sent; 138 139 BT_DBG("%s status 0x%x", hdev->name, rp->status); 140 141 if (rp->status) 142 return; 143 144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 145 if (!sent) 146 return; 147 148 hci_dev_lock(hdev); 149 150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 151 if (conn) 152 conn->link_policy = get_unaligned_le16(sent + 2); 153 154 hci_dev_unlock(hdev); 155 } 156 157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 158 { 159 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 160 161 BT_DBG("%s status 0x%x", hdev->name, rp->status); 162 163 if (rp->status) 164 return; 165 166 hdev->link_policy = __le16_to_cpu(rp->policy); 167 } 168 169 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170 { 171 __u8 status = *((__u8 *) skb->data); 172 void *sent; 173 174 BT_DBG("%s status 0x%x", hdev->name, status); 175 176 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 177 if (!sent) 178 return; 179 180 if (!status) 181 hdev->link_policy = get_unaligned_le16(sent); 182 183 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status); 184 } 185 186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 187 { 188 __u8 status = *((__u8 *) skb->data); 189 190 BT_DBG("%s status 0x%x", hdev->name, status); 191 192 clear_bit(HCI_RESET, &hdev->flags); 193 194 hci_req_complete(hdev, HCI_OP_RESET, status); 195 } 196 197 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 198 { 199 __u8 status = *((__u8 *) skb->data); 200 void *sent; 201 202 BT_DBG("%s status 0x%x", hdev->name, status); 203 204 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 205 if (!sent) 206 return; 207 208 if (test_bit(HCI_MGMT, &hdev->flags)) 209 mgmt_set_local_name_complete(hdev->id, sent, status); 210 211 if (status) 212 return; 213 214 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 215 } 216 217 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 218 { 219 struct hci_rp_read_local_name *rp = (void *) skb->data; 220 221 BT_DBG("%s status 0x%x", hdev->name, rp->status); 222 223 if (rp->status) 224 return; 225 226 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 227 } 228 229 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 230 { 231 __u8 status = *((__u8 *) skb->data); 232 void *sent; 233 234 BT_DBG("%s status 0x%x", hdev->name, status); 235 236 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 237 if (!sent) 238 return; 239 240 if (!status) { 241 __u8 param = *((__u8 *) sent); 242 243 if (param == AUTH_ENABLED) 244 set_bit(HCI_AUTH, &hdev->flags); 245 else 246 clear_bit(HCI_AUTH, &hdev->flags); 247 } 248 249 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); 250 } 251 252 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 253 { 254 __u8 status = *((__u8 *) skb->data); 255 void *sent; 256 257 BT_DBG("%s status 0x%x", hdev->name, status); 258 259 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 260 if (!sent) 261 return; 262 263 if (!status) { 264 __u8 param = *((__u8 *) sent); 265 266 if (param) 267 set_bit(HCI_ENCRYPT, &hdev->flags); 268 else 269 clear_bit(HCI_ENCRYPT, &hdev->flags); 270 } 271 272 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status); 273 } 274 275 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 276 { 277 __u8 status = *((__u8 *) skb->data); 278 void *sent; 279 280 BT_DBG("%s status 0x%x", hdev->name, status); 281 282 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 283 if (!sent) 284 return; 285 286 if (!status) { 287 __u8 param = *((__u8 *) sent); 288 int old_pscan, old_iscan; 289 290 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); 291 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); 292 293 if (param & SCAN_INQUIRY) { 294 set_bit(HCI_ISCAN, &hdev->flags); 295 if (!old_iscan) 296 mgmt_discoverable(hdev->id, 1); 297 } else if (old_iscan) 298 mgmt_discoverable(hdev->id, 0); 299 300 if (param & SCAN_PAGE) { 301 set_bit(HCI_PSCAN, &hdev->flags); 302 if (!old_pscan) 303 mgmt_connectable(hdev->id, 1); 304 } else if (old_pscan) 305 mgmt_connectable(hdev->id, 0); 306 } 307 308 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 309 } 310 311 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 312 { 313 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 314 315 BT_DBG("%s status 0x%x", hdev->name, rp->status); 316 317 if (rp->status) 318 return; 319 320 memcpy(hdev->dev_class, rp->dev_class, 3); 321 322 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 323 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 324 } 325 326 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 327 { 328 __u8 status = *((__u8 *) skb->data); 329 void *sent; 330 331 BT_DBG("%s status 0x%x", hdev->name, status); 332 333 if (status) 334 return; 335 336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 337 if (!sent) 338 return; 339 340 memcpy(hdev->dev_class, sent, 3); 341 } 342 343 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 344 { 345 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 346 __u16 setting; 347 348 BT_DBG("%s status 0x%x", hdev->name, rp->status); 349 350 if (rp->status) 351 return; 352 353 setting = __le16_to_cpu(rp->voice_setting); 354 355 if (hdev->voice_setting == setting) 356 return; 357 358 hdev->voice_setting = setting; 359 360 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 361 362 if (hdev->notify) { 363 tasklet_disable(&hdev->tx_task); 364 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 365 tasklet_enable(&hdev->tx_task); 366 } 367 } 368 369 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 370 { 371 __u8 status = *((__u8 *) skb->data); 372 __u16 setting; 373 void *sent; 374 375 BT_DBG("%s status 0x%x", hdev->name, status); 376 377 if (status) 378 return; 379 380 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 381 if (!sent) 382 return; 383 384 setting = get_unaligned_le16(sent); 385 386 if (hdev->voice_setting == setting) 387 return; 388 389 hdev->voice_setting = setting; 390 391 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 392 393 if (hdev->notify) { 394 tasklet_disable(&hdev->tx_task); 395 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 396 tasklet_enable(&hdev->tx_task); 397 } 398 } 399 400 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 401 { 402 __u8 status = *((__u8 *) skb->data); 403 404 BT_DBG("%s status 0x%x", hdev->name, status); 405 406 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 407 } 408 409 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 410 { 411 struct hci_rp_read_ssp_mode *rp = (void *) skb->data; 412 413 BT_DBG("%s status 0x%x", hdev->name, rp->status); 414 415 if (rp->status) 416 return; 417 418 hdev->ssp_mode = rp->mode; 419 } 420 421 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 422 { 423 __u8 status = *((__u8 *) skb->data); 424 void *sent; 425 426 BT_DBG("%s status 0x%x", hdev->name, status); 427 428 if (status) 429 return; 430 431 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 432 if (!sent) 433 return; 434 435 hdev->ssp_mode = *((__u8 *) sent); 436 } 437 438 static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 439 { 440 if (hdev->features[6] & LMP_EXT_INQ) 441 return 2; 442 443 if (hdev->features[3] & LMP_RSSI_INQ) 444 return 1; 445 446 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 447 hdev->lmp_subver == 0x0757) 448 return 1; 449 450 if (hdev->manufacturer == 15) { 451 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) 452 return 1; 453 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) 454 return 1; 455 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) 456 return 1; 457 } 458 459 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 460 hdev->lmp_subver == 0x1805) 461 return 1; 462 463 return 0; 464 } 465 466 static void hci_setup_inquiry_mode(struct hci_dev *hdev) 467 { 468 u8 mode; 469 470 mode = hci_get_inquiry_mode(hdev); 471 472 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 473 } 474 475 static void hci_setup_event_mask(struct hci_dev *hdev) 476 { 477 /* The second byte is 0xff instead of 0x9f (two reserved bits 478 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 479 * command otherwise */ 480 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 481 482 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 483 * any event mask for pre 1.2 devices */ 484 if (hdev->lmp_ver <= 1) 485 return; 486 487 events[4] |= 0x01; /* Flow Specification Complete */ 488 events[4] |= 0x02; /* Inquiry Result with RSSI */ 489 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 490 events[5] |= 0x08; /* Synchronous Connection Complete */ 491 events[5] |= 0x10; /* Synchronous Connection Changed */ 492 493 if (hdev->features[3] & LMP_RSSI_INQ) 494 events[4] |= 0x04; /* Inquiry Result with RSSI */ 495 496 if (hdev->features[5] & LMP_SNIFF_SUBR) 497 events[5] |= 0x20; /* Sniff Subrating */ 498 499 if (hdev->features[5] & LMP_PAUSE_ENC) 500 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 501 502 if (hdev->features[6] & LMP_EXT_INQ) 503 events[5] |= 0x40; /* Extended Inquiry Result */ 504 505 if (hdev->features[6] & LMP_NO_FLUSH) 506 events[7] |= 0x01; /* Enhanced Flush Complete */ 507 508 if (hdev->features[7] & LMP_LSTO) 509 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 510 511 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 512 events[6] |= 0x01; /* IO Capability Request */ 513 events[6] |= 0x02; /* IO Capability Response */ 514 events[6] |= 0x04; /* User Confirmation Request */ 515 events[6] |= 0x08; /* User Passkey Request */ 516 events[6] |= 0x10; /* Remote OOB Data Request */ 517 events[6] |= 0x20; /* Simple Pairing Complete */ 518 events[7] |= 0x04; /* User Passkey Notification */ 519 events[7] |= 0x08; /* Keypress Notification */ 520 events[7] |= 0x10; /* Remote Host Supported 521 * Features Notification */ 522 } 523 524 if (hdev->features[4] & LMP_LE) 525 events[7] |= 0x20; /* LE Meta-Event */ 526 527 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 528 } 529 530 static void hci_set_le_support(struct hci_dev *hdev) 531 { 532 struct hci_cp_write_le_host_supported cp; 533 534 memset(&cp, 0, sizeof(cp)); 535 536 if (enable_le) { 537 cp.le = 1; 538 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); 539 } 540 541 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp); 542 } 543 544 static void hci_setup(struct hci_dev *hdev) 545 { 546 hci_setup_event_mask(hdev); 547 548 if (hdev->lmp_ver > 1) 549 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 550 551 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 552 u8 mode = 0x01; 553 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); 554 } 555 556 if (hdev->features[3] & LMP_RSSI_INQ) 557 hci_setup_inquiry_mode(hdev); 558 559 if (hdev->features[7] & LMP_INQ_TX_PWR) 560 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 561 562 if (hdev->features[7] & LMP_EXTFEATURES) { 563 struct hci_cp_read_local_ext_features cp; 564 565 cp.page = 0x01; 566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 567 sizeof(cp), &cp); 568 } 569 570 if (hdev->features[4] & LMP_LE) 571 hci_set_le_support(hdev); 572 } 573 574 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 575 { 576 struct hci_rp_read_local_version *rp = (void *) skb->data; 577 578 BT_DBG("%s status 0x%x", hdev->name, rp->status); 579 580 if (rp->status) 581 return; 582 583 hdev->hci_ver = rp->hci_ver; 584 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 585 hdev->lmp_ver = rp->lmp_ver; 586 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 587 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 588 589 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 590 hdev->manufacturer, 591 hdev->hci_ver, hdev->hci_rev); 592 593 if (test_bit(HCI_INIT, &hdev->flags)) 594 hci_setup(hdev); 595 } 596 597 static void hci_setup_link_policy(struct hci_dev *hdev) 598 { 599 u16 link_policy = 0; 600 601 if (hdev->features[0] & LMP_RSWITCH) 602 link_policy |= HCI_LP_RSWITCH; 603 if (hdev->features[0] & LMP_HOLD) 604 link_policy |= HCI_LP_HOLD; 605 if (hdev->features[0] & LMP_SNIFF) 606 link_policy |= HCI_LP_SNIFF; 607 if (hdev->features[1] & LMP_PARK) 608 link_policy |= HCI_LP_PARK; 609 610 link_policy = cpu_to_le16(link_policy); 611 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 612 sizeof(link_policy), &link_policy); 613 } 614 615 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 616 { 617 struct hci_rp_read_local_commands *rp = (void *) skb->data; 618 619 BT_DBG("%s status 0x%x", hdev->name, rp->status); 620 621 if (rp->status) 622 goto done; 623 624 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 625 626 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10)) 627 hci_setup_link_policy(hdev); 628 629 done: 630 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 631 } 632 633 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 634 { 635 struct hci_rp_read_local_features *rp = (void *) skb->data; 636 637 BT_DBG("%s status 0x%x", hdev->name, rp->status); 638 639 if (rp->status) 640 return; 641 642 memcpy(hdev->features, rp->features, 8); 643 644 /* Adjust default settings according to features 645 * supported by device. */ 646 647 if (hdev->features[0] & LMP_3SLOT) 648 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 649 650 if (hdev->features[0] & LMP_5SLOT) 651 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 652 653 if (hdev->features[1] & LMP_HV2) { 654 hdev->pkt_type |= (HCI_HV2); 655 hdev->esco_type |= (ESCO_HV2); 656 } 657 658 if (hdev->features[1] & LMP_HV3) { 659 hdev->pkt_type |= (HCI_HV3); 660 hdev->esco_type |= (ESCO_HV3); 661 } 662 663 if (hdev->features[3] & LMP_ESCO) 664 hdev->esco_type |= (ESCO_EV3); 665 666 if (hdev->features[4] & LMP_EV4) 667 hdev->esco_type |= (ESCO_EV4); 668 669 if (hdev->features[4] & LMP_EV5) 670 hdev->esco_type |= (ESCO_EV5); 671 672 if (hdev->features[5] & LMP_EDR_ESCO_2M) 673 hdev->esco_type |= (ESCO_2EV3); 674 675 if (hdev->features[5] & LMP_EDR_ESCO_3M) 676 hdev->esco_type |= (ESCO_3EV3); 677 678 if (hdev->features[5] & LMP_EDR_3S_ESCO) 679 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 680 681 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 682 hdev->features[0], hdev->features[1], 683 hdev->features[2], hdev->features[3], 684 hdev->features[4], hdev->features[5], 685 hdev->features[6], hdev->features[7]); 686 } 687 688 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 689 struct sk_buff *skb) 690 { 691 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 692 693 BT_DBG("%s status 0x%x", hdev->name, rp->status); 694 695 if (rp->status) 696 return; 697 698 memcpy(hdev->extfeatures, rp->features, 8); 699 700 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); 701 } 702 703 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 704 { 705 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 706 707 BT_DBG("%s status 0x%x", hdev->name, rp->status); 708 709 if (rp->status) 710 return; 711 712 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 713 hdev->sco_mtu = rp->sco_mtu; 714 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 715 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 716 717 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 718 hdev->sco_mtu = 64; 719 hdev->sco_pkts = 8; 720 } 721 722 hdev->acl_cnt = hdev->acl_pkts; 723 hdev->sco_cnt = hdev->sco_pkts; 724 725 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 726 hdev->acl_mtu, hdev->acl_pkts, 727 hdev->sco_mtu, hdev->sco_pkts); 728 } 729 730 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 731 { 732 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 733 734 BT_DBG("%s status 0x%x", hdev->name, rp->status); 735 736 if (!rp->status) 737 bacpy(&hdev->bdaddr, &rp->bdaddr); 738 739 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); 740 } 741 742 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) 743 { 744 __u8 status = *((__u8 *) skb->data); 745 746 BT_DBG("%s status 0x%x", hdev->name, status); 747 748 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 749 } 750 751 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 752 struct sk_buff *skb) 753 { 754 __u8 status = *((__u8 *) skb->data); 755 756 BT_DBG("%s status 0x%x", hdev->name, status); 757 758 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); 759 } 760 761 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 762 { 763 __u8 status = *((__u8 *) skb->data); 764 765 BT_DBG("%s status 0x%x", hdev->name, status); 766 767 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); 768 } 769 770 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 771 struct sk_buff *skb) 772 { 773 __u8 status = *((__u8 *) skb->data); 774 775 BT_DBG("%s status 0x%x", hdev->name, status); 776 777 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); 778 } 779 780 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 781 struct sk_buff *skb) 782 { 783 __u8 status = *((__u8 *) skb->data); 784 785 BT_DBG("%s status 0x%x", hdev->name, status); 786 787 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status); 788 } 789 790 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 791 { 792 __u8 status = *((__u8 *) skb->data); 793 794 BT_DBG("%s status 0x%x", hdev->name, status); 795 796 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); 797 } 798 799 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 800 { 801 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 802 struct hci_cp_pin_code_reply *cp; 803 struct hci_conn *conn; 804 805 BT_DBG("%s status 0x%x", hdev->name, rp->status); 806 807 if (test_bit(HCI_MGMT, &hdev->flags)) 808 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status); 809 810 if (rp->status != 0) 811 return; 812 813 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 814 if (!cp) 815 return; 816 817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 818 if (conn) 819 conn->pin_length = cp->pin_len; 820 } 821 822 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 823 { 824 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 825 826 BT_DBG("%s status 0x%x", hdev->name, rp->status); 827 828 if (test_bit(HCI_MGMT, &hdev->flags)) 829 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr, 830 rp->status); 831 } 832 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 833 struct sk_buff *skb) 834 { 835 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 836 837 BT_DBG("%s status 0x%x", hdev->name, rp->status); 838 839 if (rp->status) 840 return; 841 842 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 843 hdev->le_pkts = rp->le_max_pkt; 844 845 hdev->le_cnt = hdev->le_pkts; 846 847 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 848 849 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 850 } 851 852 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 853 { 854 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 855 856 BT_DBG("%s status 0x%x", hdev->name, rp->status); 857 858 if (test_bit(HCI_MGMT, &hdev->flags)) 859 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, 860 rp->status); 861 } 862 863 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 864 struct sk_buff *skb) 865 { 866 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 867 868 BT_DBG("%s status 0x%x", hdev->name, rp->status); 869 870 if (test_bit(HCI_MGMT, &hdev->flags)) 871 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, 872 rp->status); 873 } 874 875 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 876 struct sk_buff *skb) 877 { 878 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 879 880 BT_DBG("%s status 0x%x", hdev->name, rp->status); 881 882 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash, 883 rp->randomizer, rp->status); 884 } 885 886 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 887 struct sk_buff *skb) 888 { 889 struct hci_cp_le_set_scan_enable *cp; 890 __u8 status = *((__u8 *) skb->data); 891 892 BT_DBG("%s status 0x%x", hdev->name, status); 893 894 if (status) 895 return; 896 897 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 898 if (!cp) 899 return; 900 901 hci_dev_lock(hdev); 902 903 if (cp->enable == 0x01) { 904 del_timer(&hdev->adv_timer); 905 hci_adv_entries_clear(hdev); 906 } else if (cp->enable == 0x00) { 907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); 908 } 909 910 hci_dev_unlock(hdev); 911 } 912 913 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 914 { 915 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 916 917 BT_DBG("%s status 0x%x", hdev->name, rp->status); 918 919 if (rp->status) 920 return; 921 922 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); 923 } 924 925 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 926 { 927 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; 928 929 BT_DBG("%s status 0x%x", hdev->name, rp->status); 930 931 if (rp->status) 932 return; 933 934 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 935 } 936 937 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 938 struct sk_buff *skb) 939 { 940 struct hci_cp_read_local_ext_features cp; 941 __u8 status = *((__u8 *) skb->data); 942 943 BT_DBG("%s status 0x%x", hdev->name, status); 944 945 if (status) 946 return; 947 948 cp.page = 0x01; 949 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp); 950 } 951 952 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 953 { 954 BT_DBG("%s status 0x%x", hdev->name, status); 955 956 if (status) { 957 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 958 hci_conn_check_pending(hdev); 959 return; 960 } 961 962 if (test_bit(HCI_MGMT, &hdev->flags) && 963 !test_and_set_bit(HCI_INQUIRY, 964 &hdev->flags)) 965 mgmt_discovering(hdev->id, 1); 966 } 967 968 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 969 { 970 struct hci_cp_create_conn *cp; 971 struct hci_conn *conn; 972 973 BT_DBG("%s status 0x%x", hdev->name, status); 974 975 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 976 if (!cp) 977 return; 978 979 hci_dev_lock(hdev); 980 981 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 982 983 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); 984 985 if (status) { 986 if (conn && conn->state == BT_CONNECT) { 987 if (status != 0x0c || conn->attempt > 2) { 988 conn->state = BT_CLOSED; 989 hci_proto_connect_cfm(conn, status); 990 hci_conn_del(conn); 991 } else 992 conn->state = BT_CONNECT2; 993 } 994 } else { 995 if (!conn) { 996 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 997 if (conn) { 998 conn->out = 1; 999 conn->link_mode |= HCI_LM_MASTER; 1000 } else 1001 BT_ERR("No memory for new connection"); 1002 } 1003 } 1004 1005 hci_dev_unlock(hdev); 1006 } 1007 1008 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1009 { 1010 struct hci_cp_add_sco *cp; 1011 struct hci_conn *acl, *sco; 1012 __u16 handle; 1013 1014 BT_DBG("%s status 0x%x", hdev->name, status); 1015 1016 if (!status) 1017 return; 1018 1019 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1020 if (!cp) 1021 return; 1022 1023 handle = __le16_to_cpu(cp->handle); 1024 1025 BT_DBG("%s handle %d", hdev->name, handle); 1026 1027 hci_dev_lock(hdev); 1028 1029 acl = hci_conn_hash_lookup_handle(hdev, handle); 1030 if (acl) { 1031 sco = acl->link; 1032 if (sco) { 1033 sco->state = BT_CLOSED; 1034 1035 hci_proto_connect_cfm(sco, status); 1036 hci_conn_del(sco); 1037 } 1038 } 1039 1040 hci_dev_unlock(hdev); 1041 } 1042 1043 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1044 { 1045 struct hci_cp_auth_requested *cp; 1046 struct hci_conn *conn; 1047 1048 BT_DBG("%s status 0x%x", hdev->name, status); 1049 1050 if (!status) 1051 return; 1052 1053 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1054 if (!cp) 1055 return; 1056 1057 hci_dev_lock(hdev); 1058 1059 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1060 if (conn) { 1061 if (conn->state == BT_CONFIG) { 1062 hci_proto_connect_cfm(conn, status); 1063 hci_conn_put(conn); 1064 } 1065 } 1066 1067 hci_dev_unlock(hdev); 1068 } 1069 1070 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1071 { 1072 struct hci_cp_set_conn_encrypt *cp; 1073 struct hci_conn *conn; 1074 1075 BT_DBG("%s status 0x%x", hdev->name, status); 1076 1077 if (!status) 1078 return; 1079 1080 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1081 if (!cp) 1082 return; 1083 1084 hci_dev_lock(hdev); 1085 1086 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1087 if (conn) { 1088 if (conn->state == BT_CONFIG) { 1089 hci_proto_connect_cfm(conn, status); 1090 hci_conn_put(conn); 1091 } 1092 } 1093 1094 hci_dev_unlock(hdev); 1095 } 1096 1097 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1098 struct hci_conn *conn) 1099 { 1100 if (conn->state != BT_CONFIG || !conn->out) 1101 return 0; 1102 1103 if (conn->pending_sec_level == BT_SECURITY_SDP) 1104 return 0; 1105 1106 /* Only request authentication for SSP connections or non-SSP 1107 * devices with sec_level HIGH */ 1108 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 1109 conn->pending_sec_level != BT_SECURITY_HIGH) 1110 return 0; 1111 1112 return 1; 1113 } 1114 1115 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1116 { 1117 struct hci_cp_remote_name_req *cp; 1118 struct hci_conn *conn; 1119 1120 BT_DBG("%s status 0x%x", hdev->name, status); 1121 1122 /* If successful wait for the name req complete event before 1123 * checking for the need to do authentication */ 1124 if (!status) 1125 return; 1126 1127 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1128 if (!cp) 1129 return; 1130 1131 hci_dev_lock(hdev); 1132 1133 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1134 if (!conn) 1135 goto unlock; 1136 1137 if (!hci_outgoing_auth_needed(hdev, conn)) 1138 goto unlock; 1139 1140 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1141 struct hci_cp_auth_requested cp; 1142 cp.handle = __cpu_to_le16(conn->handle); 1143 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1144 } 1145 1146 unlock: 1147 hci_dev_unlock(hdev); 1148 } 1149 1150 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1151 { 1152 struct hci_cp_read_remote_features *cp; 1153 struct hci_conn *conn; 1154 1155 BT_DBG("%s status 0x%x", hdev->name, status); 1156 1157 if (!status) 1158 return; 1159 1160 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1161 if (!cp) 1162 return; 1163 1164 hci_dev_lock(hdev); 1165 1166 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1167 if (conn) { 1168 if (conn->state == BT_CONFIG) { 1169 hci_proto_connect_cfm(conn, status); 1170 hci_conn_put(conn); 1171 } 1172 } 1173 1174 hci_dev_unlock(hdev); 1175 } 1176 1177 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1178 { 1179 struct hci_cp_read_remote_ext_features *cp; 1180 struct hci_conn *conn; 1181 1182 BT_DBG("%s status 0x%x", hdev->name, status); 1183 1184 if (!status) 1185 return; 1186 1187 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1188 if (!cp) 1189 return; 1190 1191 hci_dev_lock(hdev); 1192 1193 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1194 if (conn) { 1195 if (conn->state == BT_CONFIG) { 1196 hci_proto_connect_cfm(conn, status); 1197 hci_conn_put(conn); 1198 } 1199 } 1200 1201 hci_dev_unlock(hdev); 1202 } 1203 1204 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1205 { 1206 struct hci_cp_setup_sync_conn *cp; 1207 struct hci_conn *acl, *sco; 1208 __u16 handle; 1209 1210 BT_DBG("%s status 0x%x", hdev->name, status); 1211 1212 if (!status) 1213 return; 1214 1215 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1216 if (!cp) 1217 return; 1218 1219 handle = __le16_to_cpu(cp->handle); 1220 1221 BT_DBG("%s handle %d", hdev->name, handle); 1222 1223 hci_dev_lock(hdev); 1224 1225 acl = hci_conn_hash_lookup_handle(hdev, handle); 1226 if (acl) { 1227 sco = acl->link; 1228 if (sco) { 1229 sco->state = BT_CLOSED; 1230 1231 hci_proto_connect_cfm(sco, status); 1232 hci_conn_del(sco); 1233 } 1234 } 1235 1236 hci_dev_unlock(hdev); 1237 } 1238 1239 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1240 { 1241 struct hci_cp_sniff_mode *cp; 1242 struct hci_conn *conn; 1243 1244 BT_DBG("%s status 0x%x", hdev->name, status); 1245 1246 if (!status) 1247 return; 1248 1249 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1250 if (!cp) 1251 return; 1252 1253 hci_dev_lock(hdev); 1254 1255 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1256 if (conn) { 1257 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1258 1259 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1260 hci_sco_setup(conn, status); 1261 } 1262 1263 hci_dev_unlock(hdev); 1264 } 1265 1266 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1267 { 1268 struct hci_cp_exit_sniff_mode *cp; 1269 struct hci_conn *conn; 1270 1271 BT_DBG("%s status 0x%x", hdev->name, status); 1272 1273 if (!status) 1274 return; 1275 1276 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1277 if (!cp) 1278 return; 1279 1280 hci_dev_lock(hdev); 1281 1282 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1283 if (conn) { 1284 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1285 1286 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1287 hci_sco_setup(conn, status); 1288 } 1289 1290 hci_dev_unlock(hdev); 1291 } 1292 1293 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1294 { 1295 struct hci_cp_le_create_conn *cp; 1296 struct hci_conn *conn; 1297 1298 BT_DBG("%s status 0x%x", hdev->name, status); 1299 1300 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1301 if (!cp) 1302 return; 1303 1304 hci_dev_lock(hdev); 1305 1306 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1307 1308 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1309 conn); 1310 1311 if (status) { 1312 if (conn && conn->state == BT_CONNECT) { 1313 conn->state = BT_CLOSED; 1314 hci_proto_connect_cfm(conn, status); 1315 hci_conn_del(conn); 1316 } 1317 } else { 1318 if (!conn) { 1319 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1320 if (conn) { 1321 conn->dst_type = cp->peer_addr_type; 1322 conn->out = 1; 1323 } else { 1324 BT_ERR("No memory for new connection"); 1325 } 1326 } 1327 } 1328 1329 hci_dev_unlock(hdev); 1330 } 1331 1332 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1333 { 1334 BT_DBG("%s status 0x%x", hdev->name, status); 1335 } 1336 1337 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1338 { 1339 __u8 status = *((__u8 *) skb->data); 1340 1341 BT_DBG("%s status %d", hdev->name, status); 1342 1343 if (test_bit(HCI_MGMT, &hdev->flags) && 1344 test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1345 mgmt_discovering(hdev->id, 0); 1346 1347 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1348 1349 hci_conn_check_pending(hdev); 1350 } 1351 1352 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1353 { 1354 struct inquiry_data data; 1355 struct inquiry_info *info = (void *) (skb->data + 1); 1356 int num_rsp = *((__u8 *) skb->data); 1357 1358 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1359 1360 if (!num_rsp) 1361 return; 1362 1363 hci_dev_lock(hdev); 1364 1365 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 1366 1367 if (test_bit(HCI_MGMT, &hdev->flags)) 1368 mgmt_discovering(hdev->id, 1); 1369 } 1370 1371 for (; num_rsp; num_rsp--, info++) { 1372 bacpy(&data.bdaddr, &info->bdaddr); 1373 data.pscan_rep_mode = info->pscan_rep_mode; 1374 data.pscan_period_mode = info->pscan_period_mode; 1375 data.pscan_mode = info->pscan_mode; 1376 memcpy(data.dev_class, info->dev_class, 3); 1377 data.clock_offset = info->clock_offset; 1378 data.rssi = 0x00; 1379 data.ssp_mode = 0x00; 1380 hci_inquiry_cache_update(hdev, &data); 1381 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0, 1382 NULL); 1383 } 1384 1385 hci_dev_unlock(hdev); 1386 } 1387 1388 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1389 { 1390 struct hci_ev_conn_complete *ev = (void *) skb->data; 1391 struct hci_conn *conn; 1392 1393 BT_DBG("%s", hdev->name); 1394 1395 hci_dev_lock(hdev); 1396 1397 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1398 if (!conn) { 1399 if (ev->link_type != SCO_LINK) 1400 goto unlock; 1401 1402 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 1403 if (!conn) 1404 goto unlock; 1405 1406 conn->type = SCO_LINK; 1407 } 1408 1409 if (!ev->status) { 1410 conn->handle = __le16_to_cpu(ev->handle); 1411 1412 if (conn->type == ACL_LINK) { 1413 conn->state = BT_CONFIG; 1414 hci_conn_hold(conn); 1415 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1416 mgmt_connected(hdev->id, &ev->bdaddr); 1417 } else 1418 conn->state = BT_CONNECTED; 1419 1420 hci_conn_hold_device(conn); 1421 hci_conn_add_sysfs(conn); 1422 1423 if (test_bit(HCI_AUTH, &hdev->flags)) 1424 conn->link_mode |= HCI_LM_AUTH; 1425 1426 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 1427 conn->link_mode |= HCI_LM_ENCRYPT; 1428 1429 /* Get remote features */ 1430 if (conn->type == ACL_LINK) { 1431 struct hci_cp_read_remote_features cp; 1432 cp.handle = ev->handle; 1433 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1434 sizeof(cp), &cp); 1435 } 1436 1437 /* Set packet type for incoming connection */ 1438 if (!conn->out && hdev->hci_ver < 3) { 1439 struct hci_cp_change_conn_ptype cp; 1440 cp.handle = ev->handle; 1441 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1442 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1443 sizeof(cp), &cp); 1444 } 1445 } else { 1446 conn->state = BT_CLOSED; 1447 if (conn->type == ACL_LINK) 1448 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 1449 } 1450 1451 if (conn->type == ACL_LINK) 1452 hci_sco_setup(conn, ev->status); 1453 1454 if (ev->status) { 1455 hci_proto_connect_cfm(conn, ev->status); 1456 hci_conn_del(conn); 1457 } else if (ev->link_type != ACL_LINK) 1458 hci_proto_connect_cfm(conn, ev->status); 1459 1460 unlock: 1461 hci_dev_unlock(hdev); 1462 1463 hci_conn_check_pending(hdev); 1464 } 1465 1466 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1467 { 1468 struct hci_ev_conn_request *ev = (void *) skb->data; 1469 int mask = hdev->link_mode; 1470 1471 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1472 batostr(&ev->bdaddr), ev->link_type); 1473 1474 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1475 1476 if ((mask & HCI_LM_ACCEPT) && 1477 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1478 /* Connection accepted */ 1479 struct inquiry_entry *ie; 1480 struct hci_conn *conn; 1481 1482 hci_dev_lock(hdev); 1483 1484 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 1485 if (ie) 1486 memcpy(ie->data.dev_class, ev->dev_class, 3); 1487 1488 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1489 if (!conn) { 1490 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1491 if (!conn) { 1492 BT_ERR("No memory for new connection"); 1493 hci_dev_unlock(hdev); 1494 return; 1495 } 1496 } 1497 1498 memcpy(conn->dev_class, ev->dev_class, 3); 1499 conn->state = BT_CONNECT; 1500 1501 hci_dev_unlock(hdev); 1502 1503 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { 1504 struct hci_cp_accept_conn_req cp; 1505 1506 bacpy(&cp.bdaddr, &ev->bdaddr); 1507 1508 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1509 cp.role = 0x00; /* Become master */ 1510 else 1511 cp.role = 0x01; /* Remain slave */ 1512 1513 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, 1514 sizeof(cp), &cp); 1515 } else { 1516 struct hci_cp_accept_sync_conn_req cp; 1517 1518 bacpy(&cp.bdaddr, &ev->bdaddr); 1519 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1520 1521 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1522 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1523 cp.max_latency = cpu_to_le16(0xffff); 1524 cp.content_format = cpu_to_le16(hdev->voice_setting); 1525 cp.retrans_effort = 0xff; 1526 1527 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1528 sizeof(cp), &cp); 1529 } 1530 } else { 1531 /* Connection rejected */ 1532 struct hci_cp_reject_conn_req cp; 1533 1534 bacpy(&cp.bdaddr, &ev->bdaddr); 1535 cp.reason = 0x0f; 1536 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1537 } 1538 } 1539 1540 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1541 { 1542 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1543 struct hci_conn *conn; 1544 1545 BT_DBG("%s status %d", hdev->name, ev->status); 1546 1547 if (ev->status) { 1548 mgmt_disconnect_failed(hdev->id); 1549 return; 1550 } 1551 1552 hci_dev_lock(hdev); 1553 1554 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1555 if (!conn) 1556 goto unlock; 1557 1558 conn->state = BT_CLOSED; 1559 1560 if (conn->type == ACL_LINK || conn->type == LE_LINK) 1561 mgmt_disconnected(hdev->id, &conn->dst); 1562 1563 hci_proto_disconn_cfm(conn, ev->reason); 1564 hci_conn_del(conn); 1565 1566 unlock: 1567 hci_dev_unlock(hdev); 1568 } 1569 1570 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1571 { 1572 struct hci_ev_auth_complete *ev = (void *) skb->data; 1573 struct hci_conn *conn; 1574 1575 BT_DBG("%s status %d", hdev->name, ev->status); 1576 1577 hci_dev_lock(hdev); 1578 1579 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1580 if (!conn) 1581 goto unlock; 1582 1583 if (!ev->status) { 1584 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) && 1585 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) { 1586 BT_INFO("re-auth of legacy device is not possible."); 1587 } else { 1588 conn->link_mode |= HCI_LM_AUTH; 1589 conn->sec_level = conn->pending_sec_level; 1590 } 1591 } else { 1592 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1593 } 1594 1595 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1596 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend); 1597 1598 if (conn->state == BT_CONFIG) { 1599 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { 1600 struct hci_cp_set_conn_encrypt cp; 1601 cp.handle = ev->handle; 1602 cp.encrypt = 0x01; 1603 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1604 &cp); 1605 } else { 1606 conn->state = BT_CONNECTED; 1607 hci_proto_connect_cfm(conn, ev->status); 1608 hci_conn_put(conn); 1609 } 1610 } else { 1611 hci_auth_cfm(conn, ev->status); 1612 1613 hci_conn_hold(conn); 1614 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1615 hci_conn_put(conn); 1616 } 1617 1618 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1619 if (!ev->status) { 1620 struct hci_cp_set_conn_encrypt cp; 1621 cp.handle = ev->handle; 1622 cp.encrypt = 0x01; 1623 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1624 &cp); 1625 } else { 1626 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1627 hci_encrypt_cfm(conn, ev->status, 0x00); 1628 } 1629 } 1630 1631 unlock: 1632 hci_dev_unlock(hdev); 1633 } 1634 1635 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1636 { 1637 struct hci_ev_remote_name *ev = (void *) skb->data; 1638 struct hci_conn *conn; 1639 1640 BT_DBG("%s", hdev->name); 1641 1642 hci_conn_check_pending(hdev); 1643 1644 hci_dev_lock(hdev); 1645 1646 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) 1647 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name); 1648 1649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1650 if (!conn) 1651 goto unlock; 1652 1653 if (!hci_outgoing_auth_needed(hdev, conn)) 1654 goto unlock; 1655 1656 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1657 struct hci_cp_auth_requested cp; 1658 cp.handle = __cpu_to_le16(conn->handle); 1659 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1660 } 1661 1662 unlock: 1663 hci_dev_unlock(hdev); 1664 } 1665 1666 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 1667 { 1668 struct hci_ev_encrypt_change *ev = (void *) skb->data; 1669 struct hci_conn *conn; 1670 1671 BT_DBG("%s status %d", hdev->name, ev->status); 1672 1673 hci_dev_lock(hdev); 1674 1675 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1676 if (conn) { 1677 if (!ev->status) { 1678 if (ev->encrypt) { 1679 /* Encryption implies authentication */ 1680 conn->link_mode |= HCI_LM_AUTH; 1681 conn->link_mode |= HCI_LM_ENCRYPT; 1682 conn->sec_level = conn->pending_sec_level; 1683 } else 1684 conn->link_mode &= ~HCI_LM_ENCRYPT; 1685 } 1686 1687 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1688 1689 if (conn->state == BT_CONFIG) { 1690 if (!ev->status) 1691 conn->state = BT_CONNECTED; 1692 1693 hci_proto_connect_cfm(conn, ev->status); 1694 hci_conn_put(conn); 1695 } else 1696 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 1697 } 1698 1699 hci_dev_unlock(hdev); 1700 } 1701 1702 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1703 { 1704 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 1705 struct hci_conn *conn; 1706 1707 BT_DBG("%s status %d", hdev->name, ev->status); 1708 1709 hci_dev_lock(hdev); 1710 1711 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1712 if (conn) { 1713 if (!ev->status) 1714 conn->link_mode |= HCI_LM_SECURE; 1715 1716 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1717 1718 hci_key_change_cfm(conn, ev->status); 1719 } 1720 1721 hci_dev_unlock(hdev); 1722 } 1723 1724 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 1725 { 1726 struct hci_ev_remote_features *ev = (void *) skb->data; 1727 struct hci_conn *conn; 1728 1729 BT_DBG("%s status %d", hdev->name, ev->status); 1730 1731 hci_dev_lock(hdev); 1732 1733 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1734 if (!conn) 1735 goto unlock; 1736 1737 if (!ev->status) 1738 memcpy(conn->features, ev->features, 8); 1739 1740 if (conn->state != BT_CONFIG) 1741 goto unlock; 1742 1743 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 1744 struct hci_cp_read_remote_ext_features cp; 1745 cp.handle = ev->handle; 1746 cp.page = 0x01; 1747 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 1748 sizeof(cp), &cp); 1749 goto unlock; 1750 } 1751 1752 if (!ev->status) { 1753 struct hci_cp_remote_name_req cp; 1754 memset(&cp, 0, sizeof(cp)); 1755 bacpy(&cp.bdaddr, &conn->dst); 1756 cp.pscan_rep_mode = 0x02; 1757 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1758 } 1759 1760 if (!hci_outgoing_auth_needed(hdev, conn)) { 1761 conn->state = BT_CONNECTED; 1762 hci_proto_connect_cfm(conn, ev->status); 1763 hci_conn_put(conn); 1764 } 1765 1766 unlock: 1767 hci_dev_unlock(hdev); 1768 } 1769 1770 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 1771 { 1772 BT_DBG("%s", hdev->name); 1773 } 1774 1775 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1776 { 1777 BT_DBG("%s", hdev->name); 1778 } 1779 1780 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1781 { 1782 struct hci_ev_cmd_complete *ev = (void *) skb->data; 1783 __u16 opcode; 1784 1785 skb_pull(skb, sizeof(*ev)); 1786 1787 opcode = __le16_to_cpu(ev->opcode); 1788 1789 switch (opcode) { 1790 case HCI_OP_INQUIRY_CANCEL: 1791 hci_cc_inquiry_cancel(hdev, skb); 1792 break; 1793 1794 case HCI_OP_EXIT_PERIODIC_INQ: 1795 hci_cc_exit_periodic_inq(hdev, skb); 1796 break; 1797 1798 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 1799 hci_cc_remote_name_req_cancel(hdev, skb); 1800 break; 1801 1802 case HCI_OP_ROLE_DISCOVERY: 1803 hci_cc_role_discovery(hdev, skb); 1804 break; 1805 1806 case HCI_OP_READ_LINK_POLICY: 1807 hci_cc_read_link_policy(hdev, skb); 1808 break; 1809 1810 case HCI_OP_WRITE_LINK_POLICY: 1811 hci_cc_write_link_policy(hdev, skb); 1812 break; 1813 1814 case HCI_OP_READ_DEF_LINK_POLICY: 1815 hci_cc_read_def_link_policy(hdev, skb); 1816 break; 1817 1818 case HCI_OP_WRITE_DEF_LINK_POLICY: 1819 hci_cc_write_def_link_policy(hdev, skb); 1820 break; 1821 1822 case HCI_OP_RESET: 1823 hci_cc_reset(hdev, skb); 1824 break; 1825 1826 case HCI_OP_WRITE_LOCAL_NAME: 1827 hci_cc_write_local_name(hdev, skb); 1828 break; 1829 1830 case HCI_OP_READ_LOCAL_NAME: 1831 hci_cc_read_local_name(hdev, skb); 1832 break; 1833 1834 case HCI_OP_WRITE_AUTH_ENABLE: 1835 hci_cc_write_auth_enable(hdev, skb); 1836 break; 1837 1838 case HCI_OP_WRITE_ENCRYPT_MODE: 1839 hci_cc_write_encrypt_mode(hdev, skb); 1840 break; 1841 1842 case HCI_OP_WRITE_SCAN_ENABLE: 1843 hci_cc_write_scan_enable(hdev, skb); 1844 break; 1845 1846 case HCI_OP_READ_CLASS_OF_DEV: 1847 hci_cc_read_class_of_dev(hdev, skb); 1848 break; 1849 1850 case HCI_OP_WRITE_CLASS_OF_DEV: 1851 hci_cc_write_class_of_dev(hdev, skb); 1852 break; 1853 1854 case HCI_OP_READ_VOICE_SETTING: 1855 hci_cc_read_voice_setting(hdev, skb); 1856 break; 1857 1858 case HCI_OP_WRITE_VOICE_SETTING: 1859 hci_cc_write_voice_setting(hdev, skb); 1860 break; 1861 1862 case HCI_OP_HOST_BUFFER_SIZE: 1863 hci_cc_host_buffer_size(hdev, skb); 1864 break; 1865 1866 case HCI_OP_READ_SSP_MODE: 1867 hci_cc_read_ssp_mode(hdev, skb); 1868 break; 1869 1870 case HCI_OP_WRITE_SSP_MODE: 1871 hci_cc_write_ssp_mode(hdev, skb); 1872 break; 1873 1874 case HCI_OP_READ_LOCAL_VERSION: 1875 hci_cc_read_local_version(hdev, skb); 1876 break; 1877 1878 case HCI_OP_READ_LOCAL_COMMANDS: 1879 hci_cc_read_local_commands(hdev, skb); 1880 break; 1881 1882 case HCI_OP_READ_LOCAL_FEATURES: 1883 hci_cc_read_local_features(hdev, skb); 1884 break; 1885 1886 case HCI_OP_READ_LOCAL_EXT_FEATURES: 1887 hci_cc_read_local_ext_features(hdev, skb); 1888 break; 1889 1890 case HCI_OP_READ_BUFFER_SIZE: 1891 hci_cc_read_buffer_size(hdev, skb); 1892 break; 1893 1894 case HCI_OP_READ_BD_ADDR: 1895 hci_cc_read_bd_addr(hdev, skb); 1896 break; 1897 1898 case HCI_OP_WRITE_CA_TIMEOUT: 1899 hci_cc_write_ca_timeout(hdev, skb); 1900 break; 1901 1902 case HCI_OP_DELETE_STORED_LINK_KEY: 1903 hci_cc_delete_stored_link_key(hdev, skb); 1904 break; 1905 1906 case HCI_OP_SET_EVENT_MASK: 1907 hci_cc_set_event_mask(hdev, skb); 1908 break; 1909 1910 case HCI_OP_WRITE_INQUIRY_MODE: 1911 hci_cc_write_inquiry_mode(hdev, skb); 1912 break; 1913 1914 case HCI_OP_READ_INQ_RSP_TX_POWER: 1915 hci_cc_read_inq_rsp_tx_power(hdev, skb); 1916 break; 1917 1918 case HCI_OP_SET_EVENT_FLT: 1919 hci_cc_set_event_flt(hdev, skb); 1920 break; 1921 1922 case HCI_OP_PIN_CODE_REPLY: 1923 hci_cc_pin_code_reply(hdev, skb); 1924 break; 1925 1926 case HCI_OP_PIN_CODE_NEG_REPLY: 1927 hci_cc_pin_code_neg_reply(hdev, skb); 1928 break; 1929 1930 case HCI_OP_READ_LOCAL_OOB_DATA: 1931 hci_cc_read_local_oob_data_reply(hdev, skb); 1932 break; 1933 1934 case HCI_OP_LE_READ_BUFFER_SIZE: 1935 hci_cc_le_read_buffer_size(hdev, skb); 1936 break; 1937 1938 case HCI_OP_USER_CONFIRM_REPLY: 1939 hci_cc_user_confirm_reply(hdev, skb); 1940 break; 1941 1942 case HCI_OP_USER_CONFIRM_NEG_REPLY: 1943 hci_cc_user_confirm_neg_reply(hdev, skb); 1944 break; 1945 1946 case HCI_OP_LE_SET_SCAN_ENABLE: 1947 hci_cc_le_set_scan_enable(hdev, skb); 1948 break; 1949 1950 case HCI_OP_LE_LTK_REPLY: 1951 hci_cc_le_ltk_reply(hdev, skb); 1952 break; 1953 1954 case HCI_OP_LE_LTK_NEG_REPLY: 1955 hci_cc_le_ltk_neg_reply(hdev, skb); 1956 break; 1957 1958 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 1959 hci_cc_write_le_host_supported(hdev, skb); 1960 break; 1961 1962 default: 1963 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1964 break; 1965 } 1966 1967 if (ev->opcode != HCI_OP_NOP) 1968 del_timer(&hdev->cmd_timer); 1969 1970 if (ev->ncmd) { 1971 atomic_set(&hdev->cmd_cnt, 1); 1972 if (!skb_queue_empty(&hdev->cmd_q)) 1973 tasklet_schedule(&hdev->cmd_task); 1974 } 1975 } 1976 1977 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 1978 { 1979 struct hci_ev_cmd_status *ev = (void *) skb->data; 1980 __u16 opcode; 1981 1982 skb_pull(skb, sizeof(*ev)); 1983 1984 opcode = __le16_to_cpu(ev->opcode); 1985 1986 switch (opcode) { 1987 case HCI_OP_INQUIRY: 1988 hci_cs_inquiry(hdev, ev->status); 1989 break; 1990 1991 case HCI_OP_CREATE_CONN: 1992 hci_cs_create_conn(hdev, ev->status); 1993 break; 1994 1995 case HCI_OP_ADD_SCO: 1996 hci_cs_add_sco(hdev, ev->status); 1997 break; 1998 1999 case HCI_OP_AUTH_REQUESTED: 2000 hci_cs_auth_requested(hdev, ev->status); 2001 break; 2002 2003 case HCI_OP_SET_CONN_ENCRYPT: 2004 hci_cs_set_conn_encrypt(hdev, ev->status); 2005 break; 2006 2007 case HCI_OP_REMOTE_NAME_REQ: 2008 hci_cs_remote_name_req(hdev, ev->status); 2009 break; 2010 2011 case HCI_OP_READ_REMOTE_FEATURES: 2012 hci_cs_read_remote_features(hdev, ev->status); 2013 break; 2014 2015 case HCI_OP_READ_REMOTE_EXT_FEATURES: 2016 hci_cs_read_remote_ext_features(hdev, ev->status); 2017 break; 2018 2019 case HCI_OP_SETUP_SYNC_CONN: 2020 hci_cs_setup_sync_conn(hdev, ev->status); 2021 break; 2022 2023 case HCI_OP_SNIFF_MODE: 2024 hci_cs_sniff_mode(hdev, ev->status); 2025 break; 2026 2027 case HCI_OP_EXIT_SNIFF_MODE: 2028 hci_cs_exit_sniff_mode(hdev, ev->status); 2029 break; 2030 2031 case HCI_OP_DISCONNECT: 2032 if (ev->status != 0) 2033 mgmt_disconnect_failed(hdev->id); 2034 break; 2035 2036 case HCI_OP_LE_CREATE_CONN: 2037 hci_cs_le_create_conn(hdev, ev->status); 2038 break; 2039 2040 case HCI_OP_LE_START_ENC: 2041 hci_cs_le_start_enc(hdev, ev->status); 2042 break; 2043 2044 default: 2045 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2046 break; 2047 } 2048 2049 if (ev->opcode != HCI_OP_NOP) 2050 del_timer(&hdev->cmd_timer); 2051 2052 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2053 atomic_set(&hdev->cmd_cnt, 1); 2054 if (!skb_queue_empty(&hdev->cmd_q)) 2055 tasklet_schedule(&hdev->cmd_task); 2056 } 2057 } 2058 2059 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2060 { 2061 struct hci_ev_role_change *ev = (void *) skb->data; 2062 struct hci_conn *conn; 2063 2064 BT_DBG("%s status %d", hdev->name, ev->status); 2065 2066 hci_dev_lock(hdev); 2067 2068 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2069 if (conn) { 2070 if (!ev->status) { 2071 if (ev->role) 2072 conn->link_mode &= ~HCI_LM_MASTER; 2073 else 2074 conn->link_mode |= HCI_LM_MASTER; 2075 } 2076 2077 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); 2078 2079 hci_role_switch_cfm(conn, ev->status, ev->role); 2080 } 2081 2082 hci_dev_unlock(hdev); 2083 } 2084 2085 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2086 { 2087 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2088 __le16 *ptr; 2089 int i; 2090 2091 skb_pull(skb, sizeof(*ev)); 2092 2093 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2094 2095 if (skb->len < ev->num_hndl * 4) { 2096 BT_DBG("%s bad parameters", hdev->name); 2097 return; 2098 } 2099 2100 tasklet_disable(&hdev->tx_task); 2101 2102 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { 2103 struct hci_conn *conn; 2104 __u16 handle, count; 2105 2106 handle = get_unaligned_le16(ptr++); 2107 count = get_unaligned_le16(ptr++); 2108 2109 conn = hci_conn_hash_lookup_handle(hdev, handle); 2110 if (conn) { 2111 conn->sent -= count; 2112 2113 if (conn->type == ACL_LINK) { 2114 hdev->acl_cnt += count; 2115 if (hdev->acl_cnt > hdev->acl_pkts) 2116 hdev->acl_cnt = hdev->acl_pkts; 2117 } else if (conn->type == LE_LINK) { 2118 if (hdev->le_pkts) { 2119 hdev->le_cnt += count; 2120 if (hdev->le_cnt > hdev->le_pkts) 2121 hdev->le_cnt = hdev->le_pkts; 2122 } else { 2123 hdev->acl_cnt += count; 2124 if (hdev->acl_cnt > hdev->acl_pkts) 2125 hdev->acl_cnt = hdev->acl_pkts; 2126 } 2127 } else { 2128 hdev->sco_cnt += count; 2129 if (hdev->sco_cnt > hdev->sco_pkts) 2130 hdev->sco_cnt = hdev->sco_pkts; 2131 } 2132 } 2133 } 2134 2135 tasklet_schedule(&hdev->tx_task); 2136 2137 tasklet_enable(&hdev->tx_task); 2138 } 2139 2140 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2141 { 2142 struct hci_ev_mode_change *ev = (void *) skb->data; 2143 struct hci_conn *conn; 2144 2145 BT_DBG("%s status %d", hdev->name, ev->status); 2146 2147 hci_dev_lock(hdev); 2148 2149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2150 if (conn) { 2151 conn->mode = ev->mode; 2152 conn->interval = __le16_to_cpu(ev->interval); 2153 2154 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 2155 if (conn->mode == HCI_CM_ACTIVE) 2156 conn->power_save = 1; 2157 else 2158 conn->power_save = 0; 2159 } 2160 2161 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 2162 hci_sco_setup(conn, ev->status); 2163 } 2164 2165 hci_dev_unlock(hdev); 2166 } 2167 2168 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2169 { 2170 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2171 struct hci_conn *conn; 2172 2173 BT_DBG("%s", hdev->name); 2174 2175 hci_dev_lock(hdev); 2176 2177 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2178 if (conn && conn->state == BT_CONNECTED) { 2179 hci_conn_hold(conn); 2180 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2181 hci_conn_put(conn); 2182 } 2183 2184 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2185 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2186 sizeof(ev->bdaddr), &ev->bdaddr); 2187 else if (test_bit(HCI_MGMT, &hdev->flags)) { 2188 u8 secure; 2189 2190 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2191 secure = 1; 2192 else 2193 secure = 0; 2194 2195 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); 2196 } 2197 2198 hci_dev_unlock(hdev); 2199 } 2200 2201 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2202 { 2203 struct hci_ev_link_key_req *ev = (void *) skb->data; 2204 struct hci_cp_link_key_reply cp; 2205 struct hci_conn *conn; 2206 struct link_key *key; 2207 2208 BT_DBG("%s", hdev->name); 2209 2210 if (!test_bit(HCI_LINK_KEYS, &hdev->flags)) 2211 return; 2212 2213 hci_dev_lock(hdev); 2214 2215 key = hci_find_link_key(hdev, &ev->bdaddr); 2216 if (!key) { 2217 BT_DBG("%s link key not found for %s", hdev->name, 2218 batostr(&ev->bdaddr)); 2219 goto not_found; 2220 } 2221 2222 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2223 batostr(&ev->bdaddr)); 2224 2225 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && 2226 key->type == HCI_LK_DEBUG_COMBINATION) { 2227 BT_DBG("%s ignoring debug key", hdev->name); 2228 goto not_found; 2229 } 2230 2231 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2232 if (conn) { 2233 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2234 conn->auth_type != 0xff && 2235 (conn->auth_type & 0x01)) { 2236 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2237 goto not_found; 2238 } 2239 2240 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2241 conn->pending_sec_level == BT_SECURITY_HIGH) { 2242 BT_DBG("%s ignoring key unauthenticated for high \ 2243 security", hdev->name); 2244 goto not_found; 2245 } 2246 2247 conn->key_type = key->type; 2248 conn->pin_length = key->pin_len; 2249 } 2250 2251 bacpy(&cp.bdaddr, &ev->bdaddr); 2252 memcpy(cp.link_key, key->val, 16); 2253 2254 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2255 2256 hci_dev_unlock(hdev); 2257 2258 return; 2259 2260 not_found: 2261 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 2262 hci_dev_unlock(hdev); 2263 } 2264 2265 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2266 { 2267 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2268 struct hci_conn *conn; 2269 u8 pin_len = 0; 2270 2271 BT_DBG("%s", hdev->name); 2272 2273 hci_dev_lock(hdev); 2274 2275 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2276 if (conn) { 2277 hci_conn_hold(conn); 2278 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2279 pin_len = conn->pin_length; 2280 2281 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 2282 conn->key_type = ev->key_type; 2283 2284 hci_conn_put(conn); 2285 } 2286 2287 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2288 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2289 ev->key_type, pin_len); 2290 2291 hci_dev_unlock(hdev); 2292 } 2293 2294 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2295 { 2296 struct hci_ev_clock_offset *ev = (void *) skb->data; 2297 struct hci_conn *conn; 2298 2299 BT_DBG("%s status %d", hdev->name, ev->status); 2300 2301 hci_dev_lock(hdev); 2302 2303 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2304 if (conn && !ev->status) { 2305 struct inquiry_entry *ie; 2306 2307 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2308 if (ie) { 2309 ie->data.clock_offset = ev->clock_offset; 2310 ie->timestamp = jiffies; 2311 } 2312 } 2313 2314 hci_dev_unlock(hdev); 2315 } 2316 2317 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2318 { 2319 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2320 struct hci_conn *conn; 2321 2322 BT_DBG("%s status %d", hdev->name, ev->status); 2323 2324 hci_dev_lock(hdev); 2325 2326 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2327 if (conn && !ev->status) 2328 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 2329 2330 hci_dev_unlock(hdev); 2331 } 2332 2333 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2334 { 2335 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2336 struct inquiry_entry *ie; 2337 2338 BT_DBG("%s", hdev->name); 2339 2340 hci_dev_lock(hdev); 2341 2342 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2343 if (ie) { 2344 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2345 ie->timestamp = jiffies; 2346 } 2347 2348 hci_dev_unlock(hdev); 2349 } 2350 2351 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2352 { 2353 struct inquiry_data data; 2354 int num_rsp = *((__u8 *) skb->data); 2355 2356 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2357 2358 if (!num_rsp) 2359 return; 2360 2361 hci_dev_lock(hdev); 2362 2363 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 2364 2365 if (test_bit(HCI_MGMT, &hdev->flags)) 2366 mgmt_discovering(hdev->id, 1); 2367 } 2368 2369 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2370 struct inquiry_info_with_rssi_and_pscan_mode *info; 2371 info = (void *) (skb->data + 1); 2372 2373 for (; num_rsp; num_rsp--, info++) { 2374 bacpy(&data.bdaddr, &info->bdaddr); 2375 data.pscan_rep_mode = info->pscan_rep_mode; 2376 data.pscan_period_mode = info->pscan_period_mode; 2377 data.pscan_mode = info->pscan_mode; 2378 memcpy(data.dev_class, info->dev_class, 3); 2379 data.clock_offset = info->clock_offset; 2380 data.rssi = info->rssi; 2381 data.ssp_mode = 0x00; 2382 hci_inquiry_cache_update(hdev, &data); 2383 mgmt_device_found(hdev->id, &info->bdaddr, 2384 info->dev_class, info->rssi, 2385 NULL); 2386 } 2387 } else { 2388 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2389 2390 for (; num_rsp; num_rsp--, info++) { 2391 bacpy(&data.bdaddr, &info->bdaddr); 2392 data.pscan_rep_mode = info->pscan_rep_mode; 2393 data.pscan_period_mode = info->pscan_period_mode; 2394 data.pscan_mode = 0x00; 2395 memcpy(data.dev_class, info->dev_class, 3); 2396 data.clock_offset = info->clock_offset; 2397 data.rssi = info->rssi; 2398 data.ssp_mode = 0x00; 2399 hci_inquiry_cache_update(hdev, &data); 2400 mgmt_device_found(hdev->id, &info->bdaddr, 2401 info->dev_class, info->rssi, 2402 NULL); 2403 } 2404 } 2405 2406 hci_dev_unlock(hdev); 2407 } 2408 2409 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2410 { 2411 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2412 struct hci_conn *conn; 2413 2414 BT_DBG("%s", hdev->name); 2415 2416 hci_dev_lock(hdev); 2417 2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2419 if (!conn) 2420 goto unlock; 2421 2422 if (!ev->status && ev->page == 0x01) { 2423 struct inquiry_entry *ie; 2424 2425 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2426 if (ie) 2427 ie->data.ssp_mode = (ev->features[0] & 0x01); 2428 2429 conn->ssp_mode = (ev->features[0] & 0x01); 2430 } 2431 2432 if (conn->state != BT_CONFIG) 2433 goto unlock; 2434 2435 if (!ev->status) { 2436 struct hci_cp_remote_name_req cp; 2437 memset(&cp, 0, sizeof(cp)); 2438 bacpy(&cp.bdaddr, &conn->dst); 2439 cp.pscan_rep_mode = 0x02; 2440 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2441 } 2442 2443 if (!hci_outgoing_auth_needed(hdev, conn)) { 2444 conn->state = BT_CONNECTED; 2445 hci_proto_connect_cfm(conn, ev->status); 2446 hci_conn_put(conn); 2447 } 2448 2449 unlock: 2450 hci_dev_unlock(hdev); 2451 } 2452 2453 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2454 { 2455 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2456 struct hci_conn *conn; 2457 2458 BT_DBG("%s status %d", hdev->name, ev->status); 2459 2460 hci_dev_lock(hdev); 2461 2462 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2463 if (!conn) { 2464 if (ev->link_type == ESCO_LINK) 2465 goto unlock; 2466 2467 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2468 if (!conn) 2469 goto unlock; 2470 2471 conn->type = SCO_LINK; 2472 } 2473 2474 switch (ev->status) { 2475 case 0x00: 2476 conn->handle = __le16_to_cpu(ev->handle); 2477 conn->state = BT_CONNECTED; 2478 2479 hci_conn_hold_device(conn); 2480 hci_conn_add_sysfs(conn); 2481 break; 2482 2483 case 0x11: /* Unsupported Feature or Parameter Value */ 2484 case 0x1c: /* SCO interval rejected */ 2485 case 0x1a: /* Unsupported Remote Feature */ 2486 case 0x1f: /* Unspecified error */ 2487 if (conn->out && conn->attempt < 2) { 2488 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 2489 (hdev->esco_type & EDR_ESCO_MASK); 2490 hci_setup_sync(conn, conn->link->handle); 2491 goto unlock; 2492 } 2493 /* fall through */ 2494 2495 default: 2496 conn->state = BT_CLOSED; 2497 break; 2498 } 2499 2500 hci_proto_connect_cfm(conn, ev->status); 2501 if (ev->status) 2502 hci_conn_del(conn); 2503 2504 unlock: 2505 hci_dev_unlock(hdev); 2506 } 2507 2508 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2509 { 2510 BT_DBG("%s", hdev->name); 2511 } 2512 2513 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2514 { 2515 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2516 2517 BT_DBG("%s status %d", hdev->name, ev->status); 2518 } 2519 2520 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2521 { 2522 struct inquiry_data data; 2523 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2524 int num_rsp = *((__u8 *) skb->data); 2525 2526 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2527 2528 if (!num_rsp) 2529 return; 2530 2531 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 2532 2533 if (test_bit(HCI_MGMT, &hdev->flags)) 2534 mgmt_discovering(hdev->id, 1); 2535 } 2536 2537 hci_dev_lock(hdev); 2538 2539 for (; num_rsp; num_rsp--, info++) { 2540 bacpy(&data.bdaddr, &info->bdaddr); 2541 data.pscan_rep_mode = info->pscan_rep_mode; 2542 data.pscan_period_mode = info->pscan_period_mode; 2543 data.pscan_mode = 0x00; 2544 memcpy(data.dev_class, info->dev_class, 3); 2545 data.clock_offset = info->clock_offset; 2546 data.rssi = info->rssi; 2547 data.ssp_mode = 0x01; 2548 hci_inquiry_cache_update(hdev, &data); 2549 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 2550 info->rssi, info->data); 2551 } 2552 2553 hci_dev_unlock(hdev); 2554 } 2555 2556 static inline u8 hci_get_auth_req(struct hci_conn *conn) 2557 { 2558 /* If remote requests dedicated bonding follow that lead */ 2559 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 2560 /* If both remote and local IO capabilities allow MITM 2561 * protection then require it, otherwise don't */ 2562 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) 2563 return 0x02; 2564 else 2565 return 0x03; 2566 } 2567 2568 /* If remote requests no-bonding follow that lead */ 2569 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 2570 return conn->remote_auth | (conn->auth_type & 0x01); 2571 2572 return conn->auth_type; 2573 } 2574 2575 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2576 { 2577 struct hci_ev_io_capa_request *ev = (void *) skb->data; 2578 struct hci_conn *conn; 2579 2580 BT_DBG("%s", hdev->name); 2581 2582 hci_dev_lock(hdev); 2583 2584 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2585 if (!conn) 2586 goto unlock; 2587 2588 hci_conn_hold(conn); 2589 2590 if (!test_bit(HCI_MGMT, &hdev->flags)) 2591 goto unlock; 2592 2593 if (test_bit(HCI_PAIRABLE, &hdev->flags) || 2594 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 2595 struct hci_cp_io_capability_reply cp; 2596 2597 bacpy(&cp.bdaddr, &ev->bdaddr); 2598 cp.capability = conn->io_capability; 2599 conn->auth_type = hci_get_auth_req(conn); 2600 cp.authentication = conn->auth_type; 2601 2602 if ((conn->out == 0x01 || conn->remote_oob == 0x01) && 2603 hci_find_remote_oob_data(hdev, &conn->dst)) 2604 cp.oob_data = 0x01; 2605 else 2606 cp.oob_data = 0x00; 2607 2608 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 2609 sizeof(cp), &cp); 2610 } else { 2611 struct hci_cp_io_capability_neg_reply cp; 2612 2613 bacpy(&cp.bdaddr, &ev->bdaddr); 2614 cp.reason = 0x18; /* Pairing not allowed */ 2615 2616 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 2617 sizeof(cp), &cp); 2618 } 2619 2620 unlock: 2621 hci_dev_unlock(hdev); 2622 } 2623 2624 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 2625 { 2626 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 2627 struct hci_conn *conn; 2628 2629 BT_DBG("%s", hdev->name); 2630 2631 hci_dev_lock(hdev); 2632 2633 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2634 if (!conn) 2635 goto unlock; 2636 2637 conn->remote_cap = ev->capability; 2638 conn->remote_oob = ev->oob_data; 2639 conn->remote_auth = ev->authentication; 2640 2641 unlock: 2642 hci_dev_unlock(hdev); 2643 } 2644 2645 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 2646 struct sk_buff *skb) 2647 { 2648 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 2649 int loc_mitm, rem_mitm, confirm_hint = 0; 2650 struct hci_conn *conn; 2651 2652 BT_DBG("%s", hdev->name); 2653 2654 hci_dev_lock(hdev); 2655 2656 if (!test_bit(HCI_MGMT, &hdev->flags)) 2657 goto unlock; 2658 2659 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2660 if (!conn) 2661 goto unlock; 2662 2663 loc_mitm = (conn->auth_type & 0x01); 2664 rem_mitm = (conn->remote_auth & 0x01); 2665 2666 /* If we require MITM but the remote device can't provide that 2667 * (it has NoInputNoOutput) then reject the confirmation 2668 * request. The only exception is when we're dedicated bonding 2669 * initiators (connect_cfm_cb set) since then we always have the MITM 2670 * bit set. */ 2671 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 2672 BT_DBG("Rejecting request: remote device can't provide MITM"); 2673 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 2674 sizeof(ev->bdaddr), &ev->bdaddr); 2675 goto unlock; 2676 } 2677 2678 /* If no side requires MITM protection; auto-accept */ 2679 if ((!loc_mitm || conn->remote_cap == 0x03) && 2680 (!rem_mitm || conn->io_capability == 0x03)) { 2681 2682 /* If we're not the initiators request authorization to 2683 * proceed from user space (mgmt_user_confirm with 2684 * confirm_hint set to 1). */ 2685 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 2686 BT_DBG("Confirming auto-accept as acceptor"); 2687 confirm_hint = 1; 2688 goto confirm; 2689 } 2690 2691 BT_DBG("Auto-accept of user confirmation with %ums delay", 2692 hdev->auto_accept_delay); 2693 2694 if (hdev->auto_accept_delay > 0) { 2695 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 2696 mod_timer(&conn->auto_accept_timer, jiffies + delay); 2697 goto unlock; 2698 } 2699 2700 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 2701 sizeof(ev->bdaddr), &ev->bdaddr); 2702 goto unlock; 2703 } 2704 2705 confirm: 2706 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey, 2707 confirm_hint); 2708 2709 unlock: 2710 hci_dev_unlock(hdev); 2711 } 2712 2713 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2714 { 2715 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 2716 struct hci_conn *conn; 2717 2718 BT_DBG("%s", hdev->name); 2719 2720 hci_dev_lock(hdev); 2721 2722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2723 if (!conn) 2724 goto unlock; 2725 2726 /* To avoid duplicate auth_failed events to user space we check 2727 * the HCI_CONN_AUTH_PEND flag which will be set if we 2728 * initiated the authentication. A traditional auth_complete 2729 * event gets always produced as initiator and is also mapped to 2730 * the mgmt_auth_failed event */ 2731 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) 2732 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 2733 2734 hci_conn_put(conn); 2735 2736 unlock: 2737 hci_dev_unlock(hdev); 2738 } 2739 2740 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2741 { 2742 struct hci_ev_remote_host_features *ev = (void *) skb->data; 2743 struct inquiry_entry *ie; 2744 2745 BT_DBG("%s", hdev->name); 2746 2747 hci_dev_lock(hdev); 2748 2749 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2750 if (ie) 2751 ie->data.ssp_mode = (ev->features[0] & 0x01); 2752 2753 hci_dev_unlock(hdev); 2754 } 2755 2756 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 2757 struct sk_buff *skb) 2758 { 2759 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 2760 struct oob_data *data; 2761 2762 BT_DBG("%s", hdev->name); 2763 2764 hci_dev_lock(hdev); 2765 2766 if (!test_bit(HCI_MGMT, &hdev->flags)) 2767 goto unlock; 2768 2769 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 2770 if (data) { 2771 struct hci_cp_remote_oob_data_reply cp; 2772 2773 bacpy(&cp.bdaddr, &ev->bdaddr); 2774 memcpy(cp.hash, data->hash, sizeof(cp.hash)); 2775 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 2776 2777 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 2778 &cp); 2779 } else { 2780 struct hci_cp_remote_oob_data_neg_reply cp; 2781 2782 bacpy(&cp.bdaddr, &ev->bdaddr); 2783 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 2784 &cp); 2785 } 2786 2787 unlock: 2788 hci_dev_unlock(hdev); 2789 } 2790 2791 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2792 { 2793 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 2794 struct hci_conn *conn; 2795 2796 BT_DBG("%s status %d", hdev->name, ev->status); 2797 2798 hci_dev_lock(hdev); 2799 2800 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 2801 if (!conn) { 2802 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 2803 if (!conn) { 2804 BT_ERR("No memory for new connection"); 2805 hci_dev_unlock(hdev); 2806 return; 2807 } 2808 2809 conn->dst_type = ev->bdaddr_type; 2810 } 2811 2812 if (ev->status) { 2813 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 2814 hci_proto_connect_cfm(conn, ev->status); 2815 conn->state = BT_CLOSED; 2816 hci_conn_del(conn); 2817 goto unlock; 2818 } 2819 2820 mgmt_connected(hdev->id, &ev->bdaddr); 2821 2822 conn->sec_level = BT_SECURITY_LOW; 2823 conn->handle = __le16_to_cpu(ev->handle); 2824 conn->state = BT_CONNECTED; 2825 2826 hci_conn_hold_device(conn); 2827 hci_conn_add_sysfs(conn); 2828 2829 hci_proto_connect_cfm(conn, ev->status); 2830 2831 unlock: 2832 hci_dev_unlock(hdev); 2833 } 2834 2835 static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 2836 struct sk_buff *skb) 2837 { 2838 struct hci_ev_le_advertising_info *ev; 2839 u8 num_reports; 2840 2841 num_reports = skb->data[0]; 2842 ev = (void *) &skb->data[1]; 2843 2844 hci_dev_lock(hdev); 2845 2846 hci_add_adv_entry(hdev, ev); 2847 2848 while (--num_reports) { 2849 ev = (void *) (ev->data + ev->length + 1); 2850 hci_add_adv_entry(hdev, ev); 2851 } 2852 2853 hci_dev_unlock(hdev); 2854 } 2855 2856 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 2857 struct sk_buff *skb) 2858 { 2859 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 2860 struct hci_cp_le_ltk_reply cp; 2861 struct hci_cp_le_ltk_neg_reply neg; 2862 struct hci_conn *conn; 2863 struct link_key *ltk; 2864 2865 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); 2866 2867 hci_dev_lock(hdev); 2868 2869 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2870 if (conn == NULL) 2871 goto not_found; 2872 2873 ltk = hci_find_ltk(hdev, ev->ediv, ev->random); 2874 if (ltk == NULL) 2875 goto not_found; 2876 2877 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 2878 cp.handle = cpu_to_le16(conn->handle); 2879 conn->pin_length = ltk->pin_len; 2880 2881 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 2882 2883 hci_dev_unlock(hdev); 2884 2885 return; 2886 2887 not_found: 2888 neg.handle = ev->handle; 2889 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 2890 hci_dev_unlock(hdev); 2891 } 2892 2893 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 2894 { 2895 struct hci_ev_le_meta *le_ev = (void *) skb->data; 2896 2897 skb_pull(skb, sizeof(*le_ev)); 2898 2899 switch (le_ev->subevent) { 2900 case HCI_EV_LE_CONN_COMPLETE: 2901 hci_le_conn_complete_evt(hdev, skb); 2902 break; 2903 2904 case HCI_EV_LE_ADVERTISING_REPORT: 2905 hci_le_adv_report_evt(hdev, skb); 2906 break; 2907 2908 case HCI_EV_LE_LTK_REQ: 2909 hci_le_ltk_request_evt(hdev, skb); 2910 break; 2911 2912 default: 2913 break; 2914 } 2915 } 2916 2917 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 2918 { 2919 struct hci_event_hdr *hdr = (void *) skb->data; 2920 __u8 event = hdr->evt; 2921 2922 skb_pull(skb, HCI_EVENT_HDR_SIZE); 2923 2924 switch (event) { 2925 case HCI_EV_INQUIRY_COMPLETE: 2926 hci_inquiry_complete_evt(hdev, skb); 2927 break; 2928 2929 case HCI_EV_INQUIRY_RESULT: 2930 hci_inquiry_result_evt(hdev, skb); 2931 break; 2932 2933 case HCI_EV_CONN_COMPLETE: 2934 hci_conn_complete_evt(hdev, skb); 2935 break; 2936 2937 case HCI_EV_CONN_REQUEST: 2938 hci_conn_request_evt(hdev, skb); 2939 break; 2940 2941 case HCI_EV_DISCONN_COMPLETE: 2942 hci_disconn_complete_evt(hdev, skb); 2943 break; 2944 2945 case HCI_EV_AUTH_COMPLETE: 2946 hci_auth_complete_evt(hdev, skb); 2947 break; 2948 2949 case HCI_EV_REMOTE_NAME: 2950 hci_remote_name_evt(hdev, skb); 2951 break; 2952 2953 case HCI_EV_ENCRYPT_CHANGE: 2954 hci_encrypt_change_evt(hdev, skb); 2955 break; 2956 2957 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 2958 hci_change_link_key_complete_evt(hdev, skb); 2959 break; 2960 2961 case HCI_EV_REMOTE_FEATURES: 2962 hci_remote_features_evt(hdev, skb); 2963 break; 2964 2965 case HCI_EV_REMOTE_VERSION: 2966 hci_remote_version_evt(hdev, skb); 2967 break; 2968 2969 case HCI_EV_QOS_SETUP_COMPLETE: 2970 hci_qos_setup_complete_evt(hdev, skb); 2971 break; 2972 2973 case HCI_EV_CMD_COMPLETE: 2974 hci_cmd_complete_evt(hdev, skb); 2975 break; 2976 2977 case HCI_EV_CMD_STATUS: 2978 hci_cmd_status_evt(hdev, skb); 2979 break; 2980 2981 case HCI_EV_ROLE_CHANGE: 2982 hci_role_change_evt(hdev, skb); 2983 break; 2984 2985 case HCI_EV_NUM_COMP_PKTS: 2986 hci_num_comp_pkts_evt(hdev, skb); 2987 break; 2988 2989 case HCI_EV_MODE_CHANGE: 2990 hci_mode_change_evt(hdev, skb); 2991 break; 2992 2993 case HCI_EV_PIN_CODE_REQ: 2994 hci_pin_code_request_evt(hdev, skb); 2995 break; 2996 2997 case HCI_EV_LINK_KEY_REQ: 2998 hci_link_key_request_evt(hdev, skb); 2999 break; 3000 3001 case HCI_EV_LINK_KEY_NOTIFY: 3002 hci_link_key_notify_evt(hdev, skb); 3003 break; 3004 3005 case HCI_EV_CLOCK_OFFSET: 3006 hci_clock_offset_evt(hdev, skb); 3007 break; 3008 3009 case HCI_EV_PKT_TYPE_CHANGE: 3010 hci_pkt_type_change_evt(hdev, skb); 3011 break; 3012 3013 case HCI_EV_PSCAN_REP_MODE: 3014 hci_pscan_rep_mode_evt(hdev, skb); 3015 break; 3016 3017 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 3018 hci_inquiry_result_with_rssi_evt(hdev, skb); 3019 break; 3020 3021 case HCI_EV_REMOTE_EXT_FEATURES: 3022 hci_remote_ext_features_evt(hdev, skb); 3023 break; 3024 3025 case HCI_EV_SYNC_CONN_COMPLETE: 3026 hci_sync_conn_complete_evt(hdev, skb); 3027 break; 3028 3029 case HCI_EV_SYNC_CONN_CHANGED: 3030 hci_sync_conn_changed_evt(hdev, skb); 3031 break; 3032 3033 case HCI_EV_SNIFF_SUBRATE: 3034 hci_sniff_subrate_evt(hdev, skb); 3035 break; 3036 3037 case HCI_EV_EXTENDED_INQUIRY_RESULT: 3038 hci_extended_inquiry_result_evt(hdev, skb); 3039 break; 3040 3041 case HCI_EV_IO_CAPA_REQUEST: 3042 hci_io_capa_request_evt(hdev, skb); 3043 break; 3044 3045 case HCI_EV_IO_CAPA_REPLY: 3046 hci_io_capa_reply_evt(hdev, skb); 3047 break; 3048 3049 case HCI_EV_USER_CONFIRM_REQUEST: 3050 hci_user_confirm_request_evt(hdev, skb); 3051 break; 3052 3053 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3054 hci_simple_pair_complete_evt(hdev, skb); 3055 break; 3056 3057 case HCI_EV_REMOTE_HOST_FEATURES: 3058 hci_remote_host_features_evt(hdev, skb); 3059 break; 3060 3061 case HCI_EV_LE_META: 3062 hci_le_meta_evt(hdev, skb); 3063 break; 3064 3065 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 3066 hci_remote_oob_data_request_evt(hdev, skb); 3067 break; 3068 3069 default: 3070 BT_DBG("%s event 0x%x", hdev->name, event); 3071 break; 3072 } 3073 3074 kfree_skb(skb); 3075 hdev->stat.evt_rx++; 3076 } 3077 3078 /* Generate internal stack event */ 3079 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) 3080 { 3081 struct hci_event_hdr *hdr; 3082 struct hci_ev_stack_internal *ev; 3083 struct sk_buff *skb; 3084 3085 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); 3086 if (!skb) 3087 return; 3088 3089 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); 3090 hdr->evt = HCI_EV_STACK_INTERNAL; 3091 hdr->plen = sizeof(*ev) + dlen; 3092 3093 ev = (void *) skb_put(skb, sizeof(*ev) + dlen); 3094 ev->type = type; 3095 memcpy(ev->data, data, dlen); 3096 3097 bt_cb(skb)->incoming = 1; 3098 __net_timestamp(skb); 3099 3100 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 3101 skb->dev = (void *) hdev; 3102 hci_send_to_sock(hdev, skb, NULL); 3103 kfree_skb(skb); 3104 } 3105 3106 module_param(enable_le, bool, 0444); 3107 MODULE_PARM_DESC(enable_le, "Enable LE support"); 3108