1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI event handling. */ 26 27 #include <linux/module.h> 28 29 #include <linux/types.h> 30 #include <linux/errno.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/poll.h> 34 #include <linux/fcntl.h> 35 #include <linux/init.h> 36 #include <linux/skbuff.h> 37 #include <linux/interrupt.h> 38 #include <linux/notifier.h> 39 #include <net/sock.h> 40 41 #include <asm/system.h> 42 #include <linux/uaccess.h> 43 #include <asm/unaligned.h> 44 45 #include <net/bluetooth/bluetooth.h> 46 #include <net/bluetooth/hci_core.h> 47 48 static int enable_le; 49 50 /* Handle HCI Event packets */ 51 52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 53 { 54 __u8 status = *((__u8 *) skb->data); 55 56 BT_DBG("%s status 0x%x", hdev->name, status); 57 58 if (status) 59 return; 60 61 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && 62 test_bit(HCI_MGMT, &hdev->flags)) 63 mgmt_discovering(hdev->id, 0); 64 65 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 66 67 hci_conn_check_pending(hdev); 68 } 69 70 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) 71 { 72 __u8 status = *((__u8 *) skb->data); 73 74 BT_DBG("%s status 0x%x", hdev->name, status); 75 76 if (status) 77 return; 78 79 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && 80 test_bit(HCI_MGMT, &hdev->flags)) 81 mgmt_discovering(hdev->id, 0); 82 83 hci_conn_check_pending(hdev); 84 } 85 86 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) 87 { 88 BT_DBG("%s", hdev->name); 89 } 90 91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb) 92 { 93 struct hci_rp_role_discovery *rp = (void *) skb->data; 94 struct hci_conn *conn; 95 96 BT_DBG("%s status 0x%x", hdev->name, rp->status); 97 98 if (rp->status) 99 return; 100 101 hci_dev_lock(hdev); 102 103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 104 if (conn) { 105 if (rp->role) 106 conn->link_mode &= ~HCI_LM_MASTER; 107 else 108 conn->link_mode |= HCI_LM_MASTER; 109 } 110 111 hci_dev_unlock(hdev); 112 } 113 114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 115 { 116 struct hci_rp_read_link_policy *rp = (void *) skb->data; 117 struct hci_conn *conn; 118 119 BT_DBG("%s status 0x%x", hdev->name, rp->status); 120 121 if (rp->status) 122 return; 123 124 hci_dev_lock(hdev); 125 126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 127 if (conn) 128 conn->link_policy = __le16_to_cpu(rp->policy); 129 130 hci_dev_unlock(hdev); 131 } 132 133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 134 { 135 struct hci_rp_write_link_policy *rp = (void *) skb->data; 136 struct hci_conn *conn; 137 void *sent; 138 139 BT_DBG("%s status 0x%x", hdev->name, rp->status); 140 141 if (rp->status) 142 return; 143 144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); 145 if (!sent) 146 return; 147 148 hci_dev_lock(hdev); 149 150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); 151 if (conn) 152 conn->link_policy = get_unaligned_le16(sent + 2); 153 154 hci_dev_unlock(hdev); 155 } 156 157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 158 { 159 struct hci_rp_read_def_link_policy *rp = (void *) skb->data; 160 161 BT_DBG("%s status 0x%x", hdev->name, rp->status); 162 163 if (rp->status) 164 return; 165 166 hdev->link_policy = __le16_to_cpu(rp->policy); 167 } 168 169 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) 170 { 171 __u8 status = *((__u8 *) skb->data); 172 void *sent; 173 174 BT_DBG("%s status 0x%x", hdev->name, status); 175 176 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); 177 if (!sent) 178 return; 179 180 if (!status) 181 hdev->link_policy = get_unaligned_le16(sent); 182 183 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status); 184 } 185 186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) 187 { 188 __u8 status = *((__u8 *) skb->data); 189 190 BT_DBG("%s status 0x%x", hdev->name, status); 191 192 clear_bit(HCI_RESET, &hdev->flags); 193 194 hci_req_complete(hdev, HCI_OP_RESET, status); 195 } 196 197 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) 198 { 199 __u8 status = *((__u8 *) skb->data); 200 void *sent; 201 202 BT_DBG("%s status 0x%x", hdev->name, status); 203 204 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 205 if (!sent) 206 return; 207 208 if (test_bit(HCI_MGMT, &hdev->flags)) 209 mgmt_set_local_name_complete(hdev->id, sent, status); 210 211 if (status) 212 return; 213 214 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); 215 } 216 217 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 218 { 219 struct hci_rp_read_local_name *rp = (void *) skb->data; 220 221 BT_DBG("%s status 0x%x", hdev->name, rp->status); 222 223 if (rp->status) 224 return; 225 226 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); 227 } 228 229 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 230 { 231 __u8 status = *((__u8 *) skb->data); 232 void *sent; 233 234 BT_DBG("%s status 0x%x", hdev->name, status); 235 236 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); 237 if (!sent) 238 return; 239 240 if (!status) { 241 __u8 param = *((__u8 *) sent); 242 243 if (param == AUTH_ENABLED) 244 set_bit(HCI_AUTH, &hdev->flags); 245 else 246 clear_bit(HCI_AUTH, &hdev->flags); 247 } 248 249 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status); 250 } 251 252 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) 253 { 254 __u8 status = *((__u8 *) skb->data); 255 void *sent; 256 257 BT_DBG("%s status 0x%x", hdev->name, status); 258 259 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); 260 if (!sent) 261 return; 262 263 if (!status) { 264 __u8 param = *((__u8 *) sent); 265 266 if (param) 267 set_bit(HCI_ENCRYPT, &hdev->flags); 268 else 269 clear_bit(HCI_ENCRYPT, &hdev->flags); 270 } 271 272 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status); 273 } 274 275 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) 276 { 277 __u8 status = *((__u8 *) skb->data); 278 void *sent; 279 280 BT_DBG("%s status 0x%x", hdev->name, status); 281 282 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); 283 if (!sent) 284 return; 285 286 if (!status) { 287 __u8 param = *((__u8 *) sent); 288 int old_pscan, old_iscan; 289 290 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); 291 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); 292 293 if (param & SCAN_INQUIRY) { 294 set_bit(HCI_ISCAN, &hdev->flags); 295 if (!old_iscan) 296 mgmt_discoverable(hdev->id, 1); 297 } else if (old_iscan) 298 mgmt_discoverable(hdev->id, 0); 299 300 if (param & SCAN_PAGE) { 301 set_bit(HCI_PSCAN, &hdev->flags); 302 if (!old_pscan) 303 mgmt_connectable(hdev->id, 1); 304 } else if (old_pscan) 305 mgmt_connectable(hdev->id, 0); 306 } 307 308 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); 309 } 310 311 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 312 { 313 struct hci_rp_read_class_of_dev *rp = (void *) skb->data; 314 315 BT_DBG("%s status 0x%x", hdev->name, rp->status); 316 317 if (rp->status) 318 return; 319 320 memcpy(hdev->dev_class, rp->dev_class, 3); 321 322 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, 323 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); 324 } 325 326 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) 327 { 328 __u8 status = *((__u8 *) skb->data); 329 void *sent; 330 331 BT_DBG("%s status 0x%x", hdev->name, status); 332 333 if (status) 334 return; 335 336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); 337 if (!sent) 338 return; 339 340 memcpy(hdev->dev_class, sent, 3); 341 } 342 343 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 344 { 345 struct hci_rp_read_voice_setting *rp = (void *) skb->data; 346 __u16 setting; 347 348 BT_DBG("%s status 0x%x", hdev->name, rp->status); 349 350 if (rp->status) 351 return; 352 353 setting = __le16_to_cpu(rp->voice_setting); 354 355 if (hdev->voice_setting == setting) 356 return; 357 358 hdev->voice_setting = setting; 359 360 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 361 362 if (hdev->notify) { 363 tasklet_disable(&hdev->tx_task); 364 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 365 tasklet_enable(&hdev->tx_task); 366 } 367 } 368 369 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) 370 { 371 __u8 status = *((__u8 *) skb->data); 372 __u16 setting; 373 void *sent; 374 375 BT_DBG("%s status 0x%x", hdev->name, status); 376 377 if (status) 378 return; 379 380 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); 381 if (!sent) 382 return; 383 384 setting = get_unaligned_le16(sent); 385 386 if (hdev->voice_setting == setting) 387 return; 388 389 hdev->voice_setting = setting; 390 391 BT_DBG("%s voice setting 0x%04x", hdev->name, setting); 392 393 if (hdev->notify) { 394 tasklet_disable(&hdev->tx_task); 395 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); 396 tasklet_enable(&hdev->tx_task); 397 } 398 } 399 400 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 401 { 402 __u8 status = *((__u8 *) skb->data); 403 404 BT_DBG("%s status 0x%x", hdev->name, status); 405 406 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status); 407 } 408 409 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 410 { 411 struct hci_rp_read_ssp_mode *rp = (void *) skb->data; 412 413 BT_DBG("%s status 0x%x", hdev->name, rp->status); 414 415 if (rp->status) 416 return; 417 418 hdev->ssp_mode = rp->mode; 419 } 420 421 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb) 422 { 423 __u8 status = *((__u8 *) skb->data); 424 void *sent; 425 426 BT_DBG("%s status 0x%x", hdev->name, status); 427 428 if (status) 429 return; 430 431 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); 432 if (!sent) 433 return; 434 435 hdev->ssp_mode = *((__u8 *) sent); 436 } 437 438 static u8 hci_get_inquiry_mode(struct hci_dev *hdev) 439 { 440 if (hdev->features[6] & LMP_EXT_INQ) 441 return 2; 442 443 if (hdev->features[3] & LMP_RSSI_INQ) 444 return 1; 445 446 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && 447 hdev->lmp_subver == 0x0757) 448 return 1; 449 450 if (hdev->manufacturer == 15) { 451 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) 452 return 1; 453 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) 454 return 1; 455 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) 456 return 1; 457 } 458 459 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && 460 hdev->lmp_subver == 0x1805) 461 return 1; 462 463 return 0; 464 } 465 466 static void hci_setup_inquiry_mode(struct hci_dev *hdev) 467 { 468 u8 mode; 469 470 mode = hci_get_inquiry_mode(hdev); 471 472 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 473 } 474 475 static void hci_setup_event_mask(struct hci_dev *hdev) 476 { 477 /* The second byte is 0xff instead of 0x9f (two reserved bits 478 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 479 * command otherwise */ 480 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 481 482 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 483 * any event mask for pre 1.2 devices */ 484 if (hdev->lmp_ver <= 1) 485 return; 486 487 events[4] |= 0x01; /* Flow Specification Complete */ 488 events[4] |= 0x02; /* Inquiry Result with RSSI */ 489 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 490 events[5] |= 0x08; /* Synchronous Connection Complete */ 491 events[5] |= 0x10; /* Synchronous Connection Changed */ 492 493 if (hdev->features[3] & LMP_RSSI_INQ) 494 events[4] |= 0x04; /* Inquiry Result with RSSI */ 495 496 if (hdev->features[5] & LMP_SNIFF_SUBR) 497 events[5] |= 0x20; /* Sniff Subrating */ 498 499 if (hdev->features[5] & LMP_PAUSE_ENC) 500 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 501 502 if (hdev->features[6] & LMP_EXT_INQ) 503 events[5] |= 0x40; /* Extended Inquiry Result */ 504 505 if (hdev->features[6] & LMP_NO_FLUSH) 506 events[7] |= 0x01; /* Enhanced Flush Complete */ 507 508 if (hdev->features[7] & LMP_LSTO) 509 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 510 511 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 512 events[6] |= 0x01; /* IO Capability Request */ 513 events[6] |= 0x02; /* IO Capability Response */ 514 events[6] |= 0x04; /* User Confirmation Request */ 515 events[6] |= 0x08; /* User Passkey Request */ 516 events[6] |= 0x10; /* Remote OOB Data Request */ 517 events[6] |= 0x20; /* Simple Pairing Complete */ 518 events[7] |= 0x04; /* User Passkey Notification */ 519 events[7] |= 0x08; /* Keypress Notification */ 520 events[7] |= 0x10; /* Remote Host Supported 521 * Features Notification */ 522 } 523 524 if (hdev->features[4] & LMP_LE) 525 events[7] |= 0x20; /* LE Meta-Event */ 526 527 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 528 } 529 530 static void hci_set_le_support(struct hci_dev *hdev) 531 { 532 struct hci_cp_write_le_host_supported cp; 533 534 memset(&cp, 0, sizeof(cp)); 535 536 if (enable_le) { 537 cp.le = 1; 538 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); 539 } 540 541 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp); 542 } 543 544 static void hci_setup(struct hci_dev *hdev) 545 { 546 hci_setup_event_mask(hdev); 547 548 if (hdev->lmp_ver > 1) 549 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 550 551 if (hdev->features[6] & LMP_SIMPLE_PAIR) { 552 u8 mode = 0x01; 553 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); 554 } 555 556 if (hdev->features[3] & LMP_RSSI_INQ) 557 hci_setup_inquiry_mode(hdev); 558 559 if (hdev->features[7] & LMP_INQ_TX_PWR) 560 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 561 562 if (hdev->features[7] & LMP_EXTFEATURES) { 563 struct hci_cp_read_local_ext_features cp; 564 565 cp.page = 0x01; 566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 567 sizeof(cp), &cp); 568 } 569 570 if (hdev->features[4] & LMP_LE) 571 hci_set_le_support(hdev); 572 } 573 574 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 575 { 576 struct hci_rp_read_local_version *rp = (void *) skb->data; 577 578 BT_DBG("%s status 0x%x", hdev->name, rp->status); 579 580 if (rp->status) 581 return; 582 583 hdev->hci_ver = rp->hci_ver; 584 hdev->hci_rev = __le16_to_cpu(rp->hci_rev); 585 hdev->lmp_ver = rp->lmp_ver; 586 hdev->manufacturer = __le16_to_cpu(rp->manufacturer); 587 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); 588 589 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, 590 hdev->manufacturer, 591 hdev->hci_ver, hdev->hci_rev); 592 593 if (test_bit(HCI_INIT, &hdev->flags)) 594 hci_setup(hdev); 595 } 596 597 static void hci_setup_link_policy(struct hci_dev *hdev) 598 { 599 u16 link_policy = 0; 600 601 if (hdev->features[0] & LMP_RSWITCH) 602 link_policy |= HCI_LP_RSWITCH; 603 if (hdev->features[0] & LMP_HOLD) 604 link_policy |= HCI_LP_HOLD; 605 if (hdev->features[0] & LMP_SNIFF) 606 link_policy |= HCI_LP_SNIFF; 607 if (hdev->features[1] & LMP_PARK) 608 link_policy |= HCI_LP_PARK; 609 610 link_policy = cpu_to_le16(link_policy); 611 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 612 sizeof(link_policy), &link_policy); 613 } 614 615 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) 616 { 617 struct hci_rp_read_local_commands *rp = (void *) skb->data; 618 619 BT_DBG("%s status 0x%x", hdev->name, rp->status); 620 621 if (rp->status) 622 goto done; 623 624 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); 625 626 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10)) 627 hci_setup_link_policy(hdev); 628 629 done: 630 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); 631 } 632 633 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) 634 { 635 struct hci_rp_read_local_features *rp = (void *) skb->data; 636 637 BT_DBG("%s status 0x%x", hdev->name, rp->status); 638 639 if (rp->status) 640 return; 641 642 memcpy(hdev->features, rp->features, 8); 643 644 /* Adjust default settings according to features 645 * supported by device. */ 646 647 if (hdev->features[0] & LMP_3SLOT) 648 hdev->pkt_type |= (HCI_DM3 | HCI_DH3); 649 650 if (hdev->features[0] & LMP_5SLOT) 651 hdev->pkt_type |= (HCI_DM5 | HCI_DH5); 652 653 if (hdev->features[1] & LMP_HV2) { 654 hdev->pkt_type |= (HCI_HV2); 655 hdev->esco_type |= (ESCO_HV2); 656 } 657 658 if (hdev->features[1] & LMP_HV3) { 659 hdev->pkt_type |= (HCI_HV3); 660 hdev->esco_type |= (ESCO_HV3); 661 } 662 663 if (hdev->features[3] & LMP_ESCO) 664 hdev->esco_type |= (ESCO_EV3); 665 666 if (hdev->features[4] & LMP_EV4) 667 hdev->esco_type |= (ESCO_EV4); 668 669 if (hdev->features[4] & LMP_EV5) 670 hdev->esco_type |= (ESCO_EV5); 671 672 if (hdev->features[5] & LMP_EDR_ESCO_2M) 673 hdev->esco_type |= (ESCO_2EV3); 674 675 if (hdev->features[5] & LMP_EDR_ESCO_3M) 676 hdev->esco_type |= (ESCO_3EV3); 677 678 if (hdev->features[5] & LMP_EDR_3S_ESCO) 679 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); 680 681 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, 682 hdev->features[0], hdev->features[1], 683 hdev->features[2], hdev->features[3], 684 hdev->features[4], hdev->features[5], 685 hdev->features[6], hdev->features[7]); 686 } 687 688 static void hci_cc_read_local_ext_features(struct hci_dev *hdev, 689 struct sk_buff *skb) 690 { 691 struct hci_rp_read_local_ext_features *rp = (void *) skb->data; 692 693 BT_DBG("%s status 0x%x", hdev->name, rp->status); 694 695 if (rp->status) 696 return; 697 698 memcpy(hdev->extfeatures, rp->features, 8); 699 700 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status); 701 } 702 703 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 704 { 705 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 706 707 BT_DBG("%s status 0x%x", hdev->name, rp->status); 708 709 if (rp->status) 710 return; 711 712 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); 713 hdev->sco_mtu = rp->sco_mtu; 714 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); 715 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); 716 717 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { 718 hdev->sco_mtu = 64; 719 hdev->sco_pkts = 8; 720 } 721 722 hdev->acl_cnt = hdev->acl_pkts; 723 hdev->sco_cnt = hdev->sco_pkts; 724 725 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, 726 hdev->acl_mtu, hdev->acl_pkts, 727 hdev->sco_mtu, hdev->sco_pkts); 728 } 729 730 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) 731 { 732 struct hci_rp_read_bd_addr *rp = (void *) skb->data; 733 734 BT_DBG("%s status 0x%x", hdev->name, rp->status); 735 736 if (!rp->status) 737 bacpy(&hdev->bdaddr, &rp->bdaddr); 738 739 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status); 740 } 741 742 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) 743 { 744 __u8 status = *((__u8 *) skb->data); 745 746 BT_DBG("%s status 0x%x", hdev->name, status); 747 748 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); 749 } 750 751 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, 752 struct sk_buff *skb) 753 { 754 __u8 status = *((__u8 *) skb->data); 755 756 BT_DBG("%s status 0x%x", hdev->name, status); 757 758 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status); 759 } 760 761 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) 762 { 763 __u8 status = *((__u8 *) skb->data); 764 765 BT_DBG("%s status 0x%x", hdev->name, status); 766 767 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status); 768 } 769 770 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, 771 struct sk_buff *skb) 772 { 773 __u8 status = *((__u8 *) skb->data); 774 775 BT_DBG("%s status 0x%x", hdev->name, status); 776 777 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status); 778 } 779 780 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, 781 struct sk_buff *skb) 782 { 783 __u8 status = *((__u8 *) skb->data); 784 785 BT_DBG("%s status 0x%x", hdev->name, status); 786 787 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status); 788 } 789 790 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb) 791 { 792 __u8 status = *((__u8 *) skb->data); 793 794 BT_DBG("%s status 0x%x", hdev->name, status); 795 796 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status); 797 } 798 799 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) 800 { 801 struct hci_rp_pin_code_reply *rp = (void *) skb->data; 802 struct hci_cp_pin_code_reply *cp; 803 struct hci_conn *conn; 804 805 BT_DBG("%s status 0x%x", hdev->name, rp->status); 806 807 if (test_bit(HCI_MGMT, &hdev->flags)) 808 mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status); 809 810 if (rp->status != 0) 811 return; 812 813 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); 814 if (!cp) 815 return; 816 817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 818 if (conn) 819 conn->pin_length = cp->pin_len; 820 } 821 822 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 823 { 824 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data; 825 826 BT_DBG("%s status 0x%x", hdev->name, rp->status); 827 828 if (test_bit(HCI_MGMT, &hdev->flags)) 829 mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr, 830 rp->status); 831 } 832 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, 833 struct sk_buff *skb) 834 { 835 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data; 836 837 BT_DBG("%s status 0x%x", hdev->name, rp->status); 838 839 if (rp->status) 840 return; 841 842 hdev->le_mtu = __le16_to_cpu(rp->le_mtu); 843 hdev->le_pkts = rp->le_max_pkt; 844 845 hdev->le_cnt = hdev->le_pkts; 846 847 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); 848 849 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 850 } 851 852 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 853 { 854 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 855 856 BT_DBG("%s status 0x%x", hdev->name, rp->status); 857 858 if (test_bit(HCI_MGMT, &hdev->flags)) 859 mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, 860 rp->status); 861 } 862 863 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 864 struct sk_buff *skb) 865 { 866 struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 867 868 BT_DBG("%s status 0x%x", hdev->name, rp->status); 869 870 if (test_bit(HCI_MGMT, &hdev->flags)) 871 mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, 872 rp->status); 873 } 874 875 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, 876 struct sk_buff *skb) 877 { 878 struct hci_rp_read_local_oob_data *rp = (void *) skb->data; 879 880 BT_DBG("%s status 0x%x", hdev->name, rp->status); 881 882 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash, 883 rp->randomizer, rp->status); 884 } 885 886 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, 887 struct sk_buff *skb) 888 { 889 struct hci_cp_le_set_scan_enable *cp; 890 __u8 status = *((__u8 *) skb->data); 891 892 BT_DBG("%s status 0x%x", hdev->name, status); 893 894 if (status) 895 return; 896 897 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); 898 if (!cp) 899 return; 900 901 hci_dev_lock(hdev); 902 903 if (cp->enable == 0x01) { 904 del_timer(&hdev->adv_timer); 905 hci_adv_entries_clear(hdev); 906 } else if (cp->enable == 0x00) { 907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT); 908 } 909 910 hci_dev_unlock(hdev); 911 } 912 913 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb) 914 { 915 struct hci_rp_le_ltk_reply *rp = (void *) skb->data; 916 917 BT_DBG("%s status 0x%x", hdev->name, rp->status); 918 919 if (rp->status) 920 return; 921 922 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status); 923 } 924 925 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) 926 { 927 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data; 928 929 BT_DBG("%s status 0x%x", hdev->name, rp->status); 930 931 if (rp->status) 932 return; 933 934 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); 935 } 936 937 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, 938 struct sk_buff *skb) 939 { 940 struct hci_cp_read_local_ext_features cp; 941 __u8 status = *((__u8 *) skb->data); 942 943 BT_DBG("%s status 0x%x", hdev->name, status); 944 945 if (status) 946 return; 947 948 cp.page = 0x01; 949 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp); 950 } 951 952 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 953 { 954 BT_DBG("%s status 0x%x", hdev->name, status); 955 956 if (status) { 957 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 958 hci_conn_check_pending(hdev); 959 return; 960 } 961 962 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && 963 test_bit(HCI_MGMT, &hdev->flags)) 964 mgmt_discovering(hdev->id, 1); 965 } 966 967 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 968 { 969 struct hci_cp_create_conn *cp; 970 struct hci_conn *conn; 971 972 BT_DBG("%s status 0x%x", hdev->name, status); 973 974 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); 975 if (!cp) 976 return; 977 978 hci_dev_lock(hdev); 979 980 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 981 982 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn); 983 984 if (status) { 985 if (conn && conn->state == BT_CONNECT) { 986 if (status != 0x0c || conn->attempt > 2) { 987 conn->state = BT_CLOSED; 988 hci_proto_connect_cfm(conn, status); 989 hci_conn_del(conn); 990 } else 991 conn->state = BT_CONNECT2; 992 } 993 } else { 994 if (!conn) { 995 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr); 996 if (conn) { 997 conn->out = 1; 998 conn->link_mode |= HCI_LM_MASTER; 999 } else 1000 BT_ERR("No memory for new connection"); 1001 } 1002 } 1003 1004 hci_dev_unlock(hdev); 1005 } 1006 1007 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) 1008 { 1009 struct hci_cp_add_sco *cp; 1010 struct hci_conn *acl, *sco; 1011 __u16 handle; 1012 1013 BT_DBG("%s status 0x%x", hdev->name, status); 1014 1015 if (!status) 1016 return; 1017 1018 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); 1019 if (!cp) 1020 return; 1021 1022 handle = __le16_to_cpu(cp->handle); 1023 1024 BT_DBG("%s handle %d", hdev->name, handle); 1025 1026 hci_dev_lock(hdev); 1027 1028 acl = hci_conn_hash_lookup_handle(hdev, handle); 1029 if (acl) { 1030 sco = acl->link; 1031 if (sco) { 1032 sco->state = BT_CLOSED; 1033 1034 hci_proto_connect_cfm(sco, status); 1035 hci_conn_del(sco); 1036 } 1037 } 1038 1039 hci_dev_unlock(hdev); 1040 } 1041 1042 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) 1043 { 1044 struct hci_cp_auth_requested *cp; 1045 struct hci_conn *conn; 1046 1047 BT_DBG("%s status 0x%x", hdev->name, status); 1048 1049 if (!status) 1050 return; 1051 1052 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); 1053 if (!cp) 1054 return; 1055 1056 hci_dev_lock(hdev); 1057 1058 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1059 if (conn) { 1060 if (conn->state == BT_CONFIG) { 1061 hci_proto_connect_cfm(conn, status); 1062 hci_conn_put(conn); 1063 } 1064 } 1065 1066 hci_dev_unlock(hdev); 1067 } 1068 1069 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) 1070 { 1071 struct hci_cp_set_conn_encrypt *cp; 1072 struct hci_conn *conn; 1073 1074 BT_DBG("%s status 0x%x", hdev->name, status); 1075 1076 if (!status) 1077 return; 1078 1079 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); 1080 if (!cp) 1081 return; 1082 1083 hci_dev_lock(hdev); 1084 1085 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1086 if (conn) { 1087 if (conn->state == BT_CONFIG) { 1088 hci_proto_connect_cfm(conn, status); 1089 hci_conn_put(conn); 1090 } 1091 } 1092 1093 hci_dev_unlock(hdev); 1094 } 1095 1096 static int hci_outgoing_auth_needed(struct hci_dev *hdev, 1097 struct hci_conn *conn) 1098 { 1099 if (conn->state != BT_CONFIG || !conn->out) 1100 return 0; 1101 1102 if (conn->pending_sec_level == BT_SECURITY_SDP) 1103 return 0; 1104 1105 /* Only request authentication for SSP connections or non-SSP 1106 * devices with sec_level HIGH */ 1107 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) && 1108 conn->pending_sec_level != BT_SECURITY_HIGH) 1109 return 0; 1110 1111 return 1; 1112 } 1113 1114 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) 1115 { 1116 struct hci_cp_remote_name_req *cp; 1117 struct hci_conn *conn; 1118 1119 BT_DBG("%s status 0x%x", hdev->name, status); 1120 1121 /* If successful wait for the name req complete event before 1122 * checking for the need to do authentication */ 1123 if (!status) 1124 return; 1125 1126 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); 1127 if (!cp) 1128 return; 1129 1130 hci_dev_lock(hdev); 1131 1132 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1133 if (!conn) 1134 goto unlock; 1135 1136 if (!hci_outgoing_auth_needed(hdev, conn)) 1137 goto unlock; 1138 1139 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1140 struct hci_cp_auth_requested cp; 1141 cp.handle = __cpu_to_le16(conn->handle); 1142 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1143 } 1144 1145 unlock: 1146 hci_dev_unlock(hdev); 1147 } 1148 1149 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) 1150 { 1151 struct hci_cp_read_remote_features *cp; 1152 struct hci_conn *conn; 1153 1154 BT_DBG("%s status 0x%x", hdev->name, status); 1155 1156 if (!status) 1157 return; 1158 1159 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); 1160 if (!cp) 1161 return; 1162 1163 hci_dev_lock(hdev); 1164 1165 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1166 if (conn) { 1167 if (conn->state == BT_CONFIG) { 1168 hci_proto_connect_cfm(conn, status); 1169 hci_conn_put(conn); 1170 } 1171 } 1172 1173 hci_dev_unlock(hdev); 1174 } 1175 1176 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) 1177 { 1178 struct hci_cp_read_remote_ext_features *cp; 1179 struct hci_conn *conn; 1180 1181 BT_DBG("%s status 0x%x", hdev->name, status); 1182 1183 if (!status) 1184 return; 1185 1186 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); 1187 if (!cp) 1188 return; 1189 1190 hci_dev_lock(hdev); 1191 1192 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1193 if (conn) { 1194 if (conn->state == BT_CONFIG) { 1195 hci_proto_connect_cfm(conn, status); 1196 hci_conn_put(conn); 1197 } 1198 } 1199 1200 hci_dev_unlock(hdev); 1201 } 1202 1203 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) 1204 { 1205 struct hci_cp_setup_sync_conn *cp; 1206 struct hci_conn *acl, *sco; 1207 __u16 handle; 1208 1209 BT_DBG("%s status 0x%x", hdev->name, status); 1210 1211 if (!status) 1212 return; 1213 1214 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); 1215 if (!cp) 1216 return; 1217 1218 handle = __le16_to_cpu(cp->handle); 1219 1220 BT_DBG("%s handle %d", hdev->name, handle); 1221 1222 hci_dev_lock(hdev); 1223 1224 acl = hci_conn_hash_lookup_handle(hdev, handle); 1225 if (acl) { 1226 sco = acl->link; 1227 if (sco) { 1228 sco->state = BT_CLOSED; 1229 1230 hci_proto_connect_cfm(sco, status); 1231 hci_conn_del(sco); 1232 } 1233 } 1234 1235 hci_dev_unlock(hdev); 1236 } 1237 1238 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) 1239 { 1240 struct hci_cp_sniff_mode *cp; 1241 struct hci_conn *conn; 1242 1243 BT_DBG("%s status 0x%x", hdev->name, status); 1244 1245 if (!status) 1246 return; 1247 1248 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); 1249 if (!cp) 1250 return; 1251 1252 hci_dev_lock(hdev); 1253 1254 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1255 if (conn) { 1256 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1257 1258 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1259 hci_sco_setup(conn, status); 1260 } 1261 1262 hci_dev_unlock(hdev); 1263 } 1264 1265 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) 1266 { 1267 struct hci_cp_exit_sniff_mode *cp; 1268 struct hci_conn *conn; 1269 1270 BT_DBG("%s status 0x%x", hdev->name, status); 1271 1272 if (!status) 1273 return; 1274 1275 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); 1276 if (!cp) 1277 return; 1278 1279 hci_dev_lock(hdev); 1280 1281 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 1282 if (conn) { 1283 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 1284 1285 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 1286 hci_sco_setup(conn, status); 1287 } 1288 1289 hci_dev_unlock(hdev); 1290 } 1291 1292 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) 1293 { 1294 struct hci_cp_le_create_conn *cp; 1295 struct hci_conn *conn; 1296 1297 BT_DBG("%s status 0x%x", hdev->name, status); 1298 1299 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); 1300 if (!cp) 1301 return; 1302 1303 hci_dev_lock(hdev); 1304 1305 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); 1306 1307 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), 1308 conn); 1309 1310 if (status) { 1311 if (conn && conn->state == BT_CONNECT) { 1312 conn->state = BT_CLOSED; 1313 hci_proto_connect_cfm(conn, status); 1314 hci_conn_del(conn); 1315 } 1316 } else { 1317 if (!conn) { 1318 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1319 if (conn) { 1320 conn->dst_type = cp->peer_addr_type; 1321 conn->out = 1; 1322 } else { 1323 BT_ERR("No memory for new connection"); 1324 } 1325 } 1326 } 1327 1328 hci_dev_unlock(hdev); 1329 } 1330 1331 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) 1332 { 1333 BT_DBG("%s status 0x%x", hdev->name, status); 1334 } 1335 1336 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1337 { 1338 __u8 status = *((__u8 *) skb->data); 1339 1340 BT_DBG("%s status %d", hdev->name, status); 1341 1342 if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && 1343 test_bit(HCI_MGMT, &hdev->flags)) 1344 mgmt_discovering(hdev->id, 0); 1345 1346 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1347 1348 hci_conn_check_pending(hdev); 1349 } 1350 1351 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 1352 { 1353 struct inquiry_data data; 1354 struct inquiry_info *info = (void *) (skb->data + 1); 1355 int num_rsp = *((__u8 *) skb->data); 1356 1357 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 1358 1359 if (!num_rsp) 1360 return; 1361 1362 hci_dev_lock(hdev); 1363 1364 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 1365 1366 if (test_bit(HCI_MGMT, &hdev->flags)) 1367 mgmt_discovering(hdev->id, 1); 1368 } 1369 1370 for (; num_rsp; num_rsp--, info++) { 1371 bacpy(&data.bdaddr, &info->bdaddr); 1372 data.pscan_rep_mode = info->pscan_rep_mode; 1373 data.pscan_period_mode = info->pscan_period_mode; 1374 data.pscan_mode = info->pscan_mode; 1375 memcpy(data.dev_class, info->dev_class, 3); 1376 data.clock_offset = info->clock_offset; 1377 data.rssi = 0x00; 1378 data.ssp_mode = 0x00; 1379 hci_inquiry_cache_update(hdev, &data); 1380 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0, 1381 NULL); 1382 } 1383 1384 hci_dev_unlock(hdev); 1385 } 1386 1387 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1388 { 1389 struct hci_ev_conn_complete *ev = (void *) skb->data; 1390 struct hci_conn *conn; 1391 1392 BT_DBG("%s", hdev->name); 1393 1394 hci_dev_lock(hdev); 1395 1396 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1397 if (!conn) { 1398 if (ev->link_type != SCO_LINK) 1399 goto unlock; 1400 1401 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 1402 if (!conn) 1403 goto unlock; 1404 1405 conn->type = SCO_LINK; 1406 } 1407 1408 if (!ev->status) { 1409 conn->handle = __le16_to_cpu(ev->handle); 1410 1411 if (conn->type == ACL_LINK) { 1412 conn->state = BT_CONFIG; 1413 hci_conn_hold(conn); 1414 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1415 mgmt_connected(hdev->id, &ev->bdaddr); 1416 } else 1417 conn->state = BT_CONNECTED; 1418 1419 hci_conn_hold_device(conn); 1420 hci_conn_add_sysfs(conn); 1421 1422 if (test_bit(HCI_AUTH, &hdev->flags)) 1423 conn->link_mode |= HCI_LM_AUTH; 1424 1425 if (test_bit(HCI_ENCRYPT, &hdev->flags)) 1426 conn->link_mode |= HCI_LM_ENCRYPT; 1427 1428 /* Get remote features */ 1429 if (conn->type == ACL_LINK) { 1430 struct hci_cp_read_remote_features cp; 1431 cp.handle = ev->handle; 1432 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, 1433 sizeof(cp), &cp); 1434 } 1435 1436 /* Set packet type for incoming connection */ 1437 if (!conn->out && hdev->hci_ver < 3) { 1438 struct hci_cp_change_conn_ptype cp; 1439 cp.handle = ev->handle; 1440 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1441 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, 1442 sizeof(cp), &cp); 1443 } 1444 } else { 1445 conn->state = BT_CLOSED; 1446 if (conn->type == ACL_LINK) 1447 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 1448 } 1449 1450 if (conn->type == ACL_LINK) 1451 hci_sco_setup(conn, ev->status); 1452 1453 if (ev->status) { 1454 hci_proto_connect_cfm(conn, ev->status); 1455 hci_conn_del(conn); 1456 } else if (ev->link_type != ACL_LINK) 1457 hci_proto_connect_cfm(conn, ev->status); 1458 1459 unlock: 1460 hci_dev_unlock(hdev); 1461 1462 hci_conn_check_pending(hdev); 1463 } 1464 1465 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 1466 { 1467 struct hci_ev_conn_request *ev = (void *) skb->data; 1468 int mask = hdev->link_mode; 1469 1470 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, 1471 batostr(&ev->bdaddr), ev->link_type); 1472 1473 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 1474 1475 if ((mask & HCI_LM_ACCEPT) && 1476 !hci_blacklist_lookup(hdev, &ev->bdaddr)) { 1477 /* Connection accepted */ 1478 struct inquiry_entry *ie; 1479 struct hci_conn *conn; 1480 1481 hci_dev_lock(hdev); 1482 1483 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 1484 if (ie) 1485 memcpy(ie->data.dev_class, ev->dev_class, 3); 1486 1487 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 1488 if (!conn) { 1489 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); 1490 if (!conn) { 1491 BT_ERR("No memory for new connection"); 1492 hci_dev_unlock(hdev); 1493 return; 1494 } 1495 } 1496 1497 memcpy(conn->dev_class, ev->dev_class, 3); 1498 conn->state = BT_CONNECT; 1499 1500 hci_dev_unlock(hdev); 1501 1502 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) { 1503 struct hci_cp_accept_conn_req cp; 1504 1505 bacpy(&cp.bdaddr, &ev->bdaddr); 1506 1507 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) 1508 cp.role = 0x00; /* Become master */ 1509 else 1510 cp.role = 0x01; /* Remain slave */ 1511 1512 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, 1513 sizeof(cp), &cp); 1514 } else { 1515 struct hci_cp_accept_sync_conn_req cp; 1516 1517 bacpy(&cp.bdaddr, &ev->bdaddr); 1518 cp.pkt_type = cpu_to_le16(conn->pkt_type); 1519 1520 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 1521 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 1522 cp.max_latency = cpu_to_le16(0xffff); 1523 cp.content_format = cpu_to_le16(hdev->voice_setting); 1524 cp.retrans_effort = 0xff; 1525 1526 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, 1527 sizeof(cp), &cp); 1528 } 1529 } else { 1530 /* Connection rejected */ 1531 struct hci_cp_reject_conn_req cp; 1532 1533 bacpy(&cp.bdaddr, &ev->bdaddr); 1534 cp.reason = 0x0f; 1535 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); 1536 } 1537 } 1538 1539 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1540 { 1541 struct hci_ev_disconn_complete *ev = (void *) skb->data; 1542 struct hci_conn *conn; 1543 1544 BT_DBG("%s status %d", hdev->name, ev->status); 1545 1546 if (ev->status) { 1547 mgmt_disconnect_failed(hdev->id); 1548 return; 1549 } 1550 1551 hci_dev_lock(hdev); 1552 1553 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1554 if (!conn) 1555 goto unlock; 1556 1557 conn->state = BT_CLOSED; 1558 1559 if (conn->type == ACL_LINK || conn->type == LE_LINK) 1560 mgmt_disconnected(hdev->id, &conn->dst); 1561 1562 hci_proto_disconn_cfm(conn, ev->reason); 1563 hci_conn_del(conn); 1564 1565 unlock: 1566 hci_dev_unlock(hdev); 1567 } 1568 1569 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1570 { 1571 struct hci_ev_auth_complete *ev = (void *) skb->data; 1572 struct hci_conn *conn; 1573 1574 BT_DBG("%s status %d", hdev->name, ev->status); 1575 1576 hci_dev_lock(hdev); 1577 1578 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1579 if (!conn) 1580 goto unlock; 1581 1582 if (!ev->status) { 1583 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) && 1584 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) { 1585 BT_INFO("re-auth of legacy device is not possible."); 1586 } else { 1587 conn->link_mode |= HCI_LM_AUTH; 1588 conn->sec_level = conn->pending_sec_level; 1589 } 1590 } else { 1591 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1592 } 1593 1594 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1595 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend); 1596 1597 if (conn->state == BT_CONFIG) { 1598 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) { 1599 struct hci_cp_set_conn_encrypt cp; 1600 cp.handle = ev->handle; 1601 cp.encrypt = 0x01; 1602 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1603 &cp); 1604 } else { 1605 conn->state = BT_CONNECTED; 1606 hci_proto_connect_cfm(conn, ev->status); 1607 hci_conn_put(conn); 1608 } 1609 } else { 1610 hci_auth_cfm(conn, ev->status); 1611 1612 hci_conn_hold(conn); 1613 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 1614 hci_conn_put(conn); 1615 } 1616 1617 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1618 if (!ev->status) { 1619 struct hci_cp_set_conn_encrypt cp; 1620 cp.handle = ev->handle; 1621 cp.encrypt = 0x01; 1622 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 1623 &cp); 1624 } else { 1625 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1626 hci_encrypt_cfm(conn, ev->status, 0x00); 1627 } 1628 } 1629 1630 unlock: 1631 hci_dev_unlock(hdev); 1632 } 1633 1634 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) 1635 { 1636 struct hci_ev_remote_name *ev = (void *) skb->data; 1637 struct hci_conn *conn; 1638 1639 BT_DBG("%s", hdev->name); 1640 1641 hci_conn_check_pending(hdev); 1642 1643 hci_dev_lock(hdev); 1644 1645 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) 1646 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name); 1647 1648 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1649 if (!conn) 1650 goto unlock; 1651 1652 if (!hci_outgoing_auth_needed(hdev, conn)) 1653 goto unlock; 1654 1655 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 1656 struct hci_cp_auth_requested cp; 1657 cp.handle = __cpu_to_le16(conn->handle); 1658 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1659 } 1660 1661 unlock: 1662 hci_dev_unlock(hdev); 1663 } 1664 1665 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 1666 { 1667 struct hci_ev_encrypt_change *ev = (void *) skb->data; 1668 struct hci_conn *conn; 1669 1670 BT_DBG("%s status %d", hdev->name, ev->status); 1671 1672 hci_dev_lock(hdev); 1673 1674 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1675 if (conn) { 1676 if (!ev->status) { 1677 if (ev->encrypt) { 1678 /* Encryption implies authentication */ 1679 conn->link_mode |= HCI_LM_AUTH; 1680 conn->link_mode |= HCI_LM_ENCRYPT; 1681 conn->sec_level = conn->pending_sec_level; 1682 } else 1683 conn->link_mode &= ~HCI_LM_ENCRYPT; 1684 } 1685 1686 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1687 1688 if (conn->state == BT_CONFIG) { 1689 if (!ev->status) 1690 conn->state = BT_CONNECTED; 1691 1692 hci_proto_connect_cfm(conn, ev->status); 1693 hci_conn_put(conn); 1694 } else 1695 hci_encrypt_cfm(conn, ev->status, ev->encrypt); 1696 } 1697 1698 hci_dev_unlock(hdev); 1699 } 1700 1701 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1702 { 1703 struct hci_ev_change_link_key_complete *ev = (void *) skb->data; 1704 struct hci_conn *conn; 1705 1706 BT_DBG("%s status %d", hdev->name, ev->status); 1707 1708 hci_dev_lock(hdev); 1709 1710 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1711 if (conn) { 1712 if (!ev->status) 1713 conn->link_mode |= HCI_LM_SECURE; 1714 1715 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1716 1717 hci_key_change_cfm(conn, ev->status); 1718 } 1719 1720 hci_dev_unlock(hdev); 1721 } 1722 1723 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 1724 { 1725 struct hci_ev_remote_features *ev = (void *) skb->data; 1726 struct hci_conn *conn; 1727 1728 BT_DBG("%s status %d", hdev->name, ev->status); 1729 1730 hci_dev_lock(hdev); 1731 1732 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1733 if (!conn) 1734 goto unlock; 1735 1736 if (!ev->status) 1737 memcpy(conn->features, ev->features, 8); 1738 1739 if (conn->state != BT_CONFIG) 1740 goto unlock; 1741 1742 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) { 1743 struct hci_cp_read_remote_ext_features cp; 1744 cp.handle = ev->handle; 1745 cp.page = 0x01; 1746 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, 1747 sizeof(cp), &cp); 1748 goto unlock; 1749 } 1750 1751 if (!ev->status) { 1752 struct hci_cp_remote_name_req cp; 1753 memset(&cp, 0, sizeof(cp)); 1754 bacpy(&cp.bdaddr, &conn->dst); 1755 cp.pscan_rep_mode = 0x02; 1756 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 1757 } 1758 1759 if (!hci_outgoing_auth_needed(hdev, conn)) { 1760 conn->state = BT_CONNECTED; 1761 hci_proto_connect_cfm(conn, ev->status); 1762 hci_conn_put(conn); 1763 } 1764 1765 unlock: 1766 hci_dev_unlock(hdev); 1767 } 1768 1769 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) 1770 { 1771 BT_DBG("%s", hdev->name); 1772 } 1773 1774 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1775 { 1776 BT_DBG("%s", hdev->name); 1777 } 1778 1779 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1780 { 1781 struct hci_ev_cmd_complete *ev = (void *) skb->data; 1782 __u16 opcode; 1783 1784 skb_pull(skb, sizeof(*ev)); 1785 1786 opcode = __le16_to_cpu(ev->opcode); 1787 1788 switch (opcode) { 1789 case HCI_OP_INQUIRY_CANCEL: 1790 hci_cc_inquiry_cancel(hdev, skb); 1791 break; 1792 1793 case HCI_OP_EXIT_PERIODIC_INQ: 1794 hci_cc_exit_periodic_inq(hdev, skb); 1795 break; 1796 1797 case HCI_OP_REMOTE_NAME_REQ_CANCEL: 1798 hci_cc_remote_name_req_cancel(hdev, skb); 1799 break; 1800 1801 case HCI_OP_ROLE_DISCOVERY: 1802 hci_cc_role_discovery(hdev, skb); 1803 break; 1804 1805 case HCI_OP_READ_LINK_POLICY: 1806 hci_cc_read_link_policy(hdev, skb); 1807 break; 1808 1809 case HCI_OP_WRITE_LINK_POLICY: 1810 hci_cc_write_link_policy(hdev, skb); 1811 break; 1812 1813 case HCI_OP_READ_DEF_LINK_POLICY: 1814 hci_cc_read_def_link_policy(hdev, skb); 1815 break; 1816 1817 case HCI_OP_WRITE_DEF_LINK_POLICY: 1818 hci_cc_write_def_link_policy(hdev, skb); 1819 break; 1820 1821 case HCI_OP_RESET: 1822 hci_cc_reset(hdev, skb); 1823 break; 1824 1825 case HCI_OP_WRITE_LOCAL_NAME: 1826 hci_cc_write_local_name(hdev, skb); 1827 break; 1828 1829 case HCI_OP_READ_LOCAL_NAME: 1830 hci_cc_read_local_name(hdev, skb); 1831 break; 1832 1833 case HCI_OP_WRITE_AUTH_ENABLE: 1834 hci_cc_write_auth_enable(hdev, skb); 1835 break; 1836 1837 case HCI_OP_WRITE_ENCRYPT_MODE: 1838 hci_cc_write_encrypt_mode(hdev, skb); 1839 break; 1840 1841 case HCI_OP_WRITE_SCAN_ENABLE: 1842 hci_cc_write_scan_enable(hdev, skb); 1843 break; 1844 1845 case HCI_OP_READ_CLASS_OF_DEV: 1846 hci_cc_read_class_of_dev(hdev, skb); 1847 break; 1848 1849 case HCI_OP_WRITE_CLASS_OF_DEV: 1850 hci_cc_write_class_of_dev(hdev, skb); 1851 break; 1852 1853 case HCI_OP_READ_VOICE_SETTING: 1854 hci_cc_read_voice_setting(hdev, skb); 1855 break; 1856 1857 case HCI_OP_WRITE_VOICE_SETTING: 1858 hci_cc_write_voice_setting(hdev, skb); 1859 break; 1860 1861 case HCI_OP_HOST_BUFFER_SIZE: 1862 hci_cc_host_buffer_size(hdev, skb); 1863 break; 1864 1865 case HCI_OP_READ_SSP_MODE: 1866 hci_cc_read_ssp_mode(hdev, skb); 1867 break; 1868 1869 case HCI_OP_WRITE_SSP_MODE: 1870 hci_cc_write_ssp_mode(hdev, skb); 1871 break; 1872 1873 case HCI_OP_READ_LOCAL_VERSION: 1874 hci_cc_read_local_version(hdev, skb); 1875 break; 1876 1877 case HCI_OP_READ_LOCAL_COMMANDS: 1878 hci_cc_read_local_commands(hdev, skb); 1879 break; 1880 1881 case HCI_OP_READ_LOCAL_FEATURES: 1882 hci_cc_read_local_features(hdev, skb); 1883 break; 1884 1885 case HCI_OP_READ_LOCAL_EXT_FEATURES: 1886 hci_cc_read_local_ext_features(hdev, skb); 1887 break; 1888 1889 case HCI_OP_READ_BUFFER_SIZE: 1890 hci_cc_read_buffer_size(hdev, skb); 1891 break; 1892 1893 case HCI_OP_READ_BD_ADDR: 1894 hci_cc_read_bd_addr(hdev, skb); 1895 break; 1896 1897 case HCI_OP_WRITE_CA_TIMEOUT: 1898 hci_cc_write_ca_timeout(hdev, skb); 1899 break; 1900 1901 case HCI_OP_DELETE_STORED_LINK_KEY: 1902 hci_cc_delete_stored_link_key(hdev, skb); 1903 break; 1904 1905 case HCI_OP_SET_EVENT_MASK: 1906 hci_cc_set_event_mask(hdev, skb); 1907 break; 1908 1909 case HCI_OP_WRITE_INQUIRY_MODE: 1910 hci_cc_write_inquiry_mode(hdev, skb); 1911 break; 1912 1913 case HCI_OP_READ_INQ_RSP_TX_POWER: 1914 hci_cc_read_inq_rsp_tx_power(hdev, skb); 1915 break; 1916 1917 case HCI_OP_SET_EVENT_FLT: 1918 hci_cc_set_event_flt(hdev, skb); 1919 break; 1920 1921 case HCI_OP_PIN_CODE_REPLY: 1922 hci_cc_pin_code_reply(hdev, skb); 1923 break; 1924 1925 case HCI_OP_PIN_CODE_NEG_REPLY: 1926 hci_cc_pin_code_neg_reply(hdev, skb); 1927 break; 1928 1929 case HCI_OP_READ_LOCAL_OOB_DATA: 1930 hci_cc_read_local_oob_data_reply(hdev, skb); 1931 break; 1932 1933 case HCI_OP_LE_READ_BUFFER_SIZE: 1934 hci_cc_le_read_buffer_size(hdev, skb); 1935 break; 1936 1937 case HCI_OP_USER_CONFIRM_REPLY: 1938 hci_cc_user_confirm_reply(hdev, skb); 1939 break; 1940 1941 case HCI_OP_USER_CONFIRM_NEG_REPLY: 1942 hci_cc_user_confirm_neg_reply(hdev, skb); 1943 break; 1944 1945 case HCI_OP_LE_SET_SCAN_ENABLE: 1946 hci_cc_le_set_scan_enable(hdev, skb); 1947 break; 1948 1949 case HCI_OP_LE_LTK_REPLY: 1950 hci_cc_le_ltk_reply(hdev, skb); 1951 break; 1952 1953 case HCI_OP_LE_LTK_NEG_REPLY: 1954 hci_cc_le_ltk_neg_reply(hdev, skb); 1955 break; 1956 1957 case HCI_OP_WRITE_LE_HOST_SUPPORTED: 1958 hci_cc_write_le_host_supported(hdev, skb); 1959 break; 1960 1961 default: 1962 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1963 break; 1964 } 1965 1966 if (ev->opcode != HCI_OP_NOP) 1967 del_timer(&hdev->cmd_timer); 1968 1969 if (ev->ncmd) { 1970 atomic_set(&hdev->cmd_cnt, 1); 1971 if (!skb_queue_empty(&hdev->cmd_q)) 1972 tasklet_schedule(&hdev->cmd_task); 1973 } 1974 } 1975 1976 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) 1977 { 1978 struct hci_ev_cmd_status *ev = (void *) skb->data; 1979 __u16 opcode; 1980 1981 skb_pull(skb, sizeof(*ev)); 1982 1983 opcode = __le16_to_cpu(ev->opcode); 1984 1985 switch (opcode) { 1986 case HCI_OP_INQUIRY: 1987 hci_cs_inquiry(hdev, ev->status); 1988 break; 1989 1990 case HCI_OP_CREATE_CONN: 1991 hci_cs_create_conn(hdev, ev->status); 1992 break; 1993 1994 case HCI_OP_ADD_SCO: 1995 hci_cs_add_sco(hdev, ev->status); 1996 break; 1997 1998 case HCI_OP_AUTH_REQUESTED: 1999 hci_cs_auth_requested(hdev, ev->status); 2000 break; 2001 2002 case HCI_OP_SET_CONN_ENCRYPT: 2003 hci_cs_set_conn_encrypt(hdev, ev->status); 2004 break; 2005 2006 case HCI_OP_REMOTE_NAME_REQ: 2007 hci_cs_remote_name_req(hdev, ev->status); 2008 break; 2009 2010 case HCI_OP_READ_REMOTE_FEATURES: 2011 hci_cs_read_remote_features(hdev, ev->status); 2012 break; 2013 2014 case HCI_OP_READ_REMOTE_EXT_FEATURES: 2015 hci_cs_read_remote_ext_features(hdev, ev->status); 2016 break; 2017 2018 case HCI_OP_SETUP_SYNC_CONN: 2019 hci_cs_setup_sync_conn(hdev, ev->status); 2020 break; 2021 2022 case HCI_OP_SNIFF_MODE: 2023 hci_cs_sniff_mode(hdev, ev->status); 2024 break; 2025 2026 case HCI_OP_EXIT_SNIFF_MODE: 2027 hci_cs_exit_sniff_mode(hdev, ev->status); 2028 break; 2029 2030 case HCI_OP_DISCONNECT: 2031 if (ev->status != 0) 2032 mgmt_disconnect_failed(hdev->id); 2033 break; 2034 2035 case HCI_OP_LE_CREATE_CONN: 2036 hci_cs_le_create_conn(hdev, ev->status); 2037 break; 2038 2039 case HCI_OP_LE_START_ENC: 2040 hci_cs_le_start_enc(hdev, ev->status); 2041 break; 2042 2043 default: 2044 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2045 break; 2046 } 2047 2048 if (ev->opcode != HCI_OP_NOP) 2049 del_timer(&hdev->cmd_timer); 2050 2051 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) { 2052 atomic_set(&hdev->cmd_cnt, 1); 2053 if (!skb_queue_empty(&hdev->cmd_q)) 2054 tasklet_schedule(&hdev->cmd_task); 2055 } 2056 } 2057 2058 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2059 { 2060 struct hci_ev_role_change *ev = (void *) skb->data; 2061 struct hci_conn *conn; 2062 2063 BT_DBG("%s status %d", hdev->name, ev->status); 2064 2065 hci_dev_lock(hdev); 2066 2067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2068 if (conn) { 2069 if (!ev->status) { 2070 if (ev->role) 2071 conn->link_mode &= ~HCI_LM_MASTER; 2072 else 2073 conn->link_mode |= HCI_LM_MASTER; 2074 } 2075 2076 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend); 2077 2078 hci_role_switch_cfm(conn, ev->status, ev->role); 2079 } 2080 2081 hci_dev_unlock(hdev); 2082 } 2083 2084 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) 2085 { 2086 struct hci_ev_num_comp_pkts *ev = (void *) skb->data; 2087 __le16 *ptr; 2088 int i; 2089 2090 skb_pull(skb, sizeof(*ev)); 2091 2092 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl); 2093 2094 if (skb->len < ev->num_hndl * 4) { 2095 BT_DBG("%s bad parameters", hdev->name); 2096 return; 2097 } 2098 2099 tasklet_disable(&hdev->tx_task); 2100 2101 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) { 2102 struct hci_conn *conn; 2103 __u16 handle, count; 2104 2105 handle = get_unaligned_le16(ptr++); 2106 count = get_unaligned_le16(ptr++); 2107 2108 conn = hci_conn_hash_lookup_handle(hdev, handle); 2109 if (conn) { 2110 conn->sent -= count; 2111 2112 if (conn->type == ACL_LINK) { 2113 hdev->acl_cnt += count; 2114 if (hdev->acl_cnt > hdev->acl_pkts) 2115 hdev->acl_cnt = hdev->acl_pkts; 2116 } else if (conn->type == LE_LINK) { 2117 if (hdev->le_pkts) { 2118 hdev->le_cnt += count; 2119 if (hdev->le_cnt > hdev->le_pkts) 2120 hdev->le_cnt = hdev->le_pkts; 2121 } else { 2122 hdev->acl_cnt += count; 2123 if (hdev->acl_cnt > hdev->acl_pkts) 2124 hdev->acl_cnt = hdev->acl_pkts; 2125 } 2126 } else { 2127 hdev->sco_cnt += count; 2128 if (hdev->sco_cnt > hdev->sco_pkts) 2129 hdev->sco_cnt = hdev->sco_pkts; 2130 } 2131 } 2132 } 2133 2134 tasklet_schedule(&hdev->tx_task); 2135 2136 tasklet_enable(&hdev->tx_task); 2137 } 2138 2139 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2140 { 2141 struct hci_ev_mode_change *ev = (void *) skb->data; 2142 struct hci_conn *conn; 2143 2144 BT_DBG("%s status %d", hdev->name, ev->status); 2145 2146 hci_dev_lock(hdev); 2147 2148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2149 if (conn) { 2150 conn->mode = ev->mode; 2151 conn->interval = __le16_to_cpu(ev->interval); 2152 2153 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 2154 if (conn->mode == HCI_CM_ACTIVE) 2155 conn->power_save = 1; 2156 else 2157 conn->power_save = 0; 2158 } 2159 2160 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend)) 2161 hci_sco_setup(conn, ev->status); 2162 } 2163 2164 hci_dev_unlock(hdev); 2165 } 2166 2167 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2168 { 2169 struct hci_ev_pin_code_req *ev = (void *) skb->data; 2170 struct hci_conn *conn; 2171 2172 BT_DBG("%s", hdev->name); 2173 2174 hci_dev_lock(hdev); 2175 2176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2177 if (conn && conn->state == BT_CONNECTED) { 2178 hci_conn_hold(conn); 2179 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 2180 hci_conn_put(conn); 2181 } 2182 2183 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2184 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2185 sizeof(ev->bdaddr), &ev->bdaddr); 2186 else if (test_bit(HCI_MGMT, &hdev->flags)) { 2187 u8 secure; 2188 2189 if (conn->pending_sec_level == BT_SECURITY_HIGH) 2190 secure = 1; 2191 else 2192 secure = 0; 2193 2194 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); 2195 } 2196 2197 hci_dev_unlock(hdev); 2198 } 2199 2200 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2201 { 2202 struct hci_ev_link_key_req *ev = (void *) skb->data; 2203 struct hci_cp_link_key_reply cp; 2204 struct hci_conn *conn; 2205 struct link_key *key; 2206 2207 BT_DBG("%s", hdev->name); 2208 2209 if (!test_bit(HCI_LINK_KEYS, &hdev->flags)) 2210 return; 2211 2212 hci_dev_lock(hdev); 2213 2214 key = hci_find_link_key(hdev, &ev->bdaddr); 2215 if (!key) { 2216 BT_DBG("%s link key not found for %s", hdev->name, 2217 batostr(&ev->bdaddr)); 2218 goto not_found; 2219 } 2220 2221 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2222 batostr(&ev->bdaddr)); 2223 2224 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && 2225 key->type == HCI_LK_DEBUG_COMBINATION) { 2226 BT_DBG("%s ignoring debug key", hdev->name); 2227 goto not_found; 2228 } 2229 2230 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2231 if (conn) { 2232 if (key->type == HCI_LK_UNAUTH_COMBINATION && 2233 conn->auth_type != 0xff && 2234 (conn->auth_type & 0x01)) { 2235 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2236 goto not_found; 2237 } 2238 2239 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && 2240 conn->pending_sec_level == BT_SECURITY_HIGH) { 2241 BT_DBG("%s ignoring key unauthenticated for high \ 2242 security", hdev->name); 2243 goto not_found; 2244 } 2245 2246 conn->key_type = key->type; 2247 conn->pin_length = key->pin_len; 2248 } 2249 2250 bacpy(&cp.bdaddr, &ev->bdaddr); 2251 memcpy(cp.link_key, key->val, 16); 2252 2253 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); 2254 2255 hci_dev_unlock(hdev); 2256 2257 return; 2258 2259 not_found: 2260 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); 2261 hci_dev_unlock(hdev); 2262 } 2263 2264 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) 2265 { 2266 struct hci_ev_link_key_notify *ev = (void *) skb->data; 2267 struct hci_conn *conn; 2268 u8 pin_len = 0; 2269 2270 BT_DBG("%s", hdev->name); 2271 2272 hci_dev_lock(hdev); 2273 2274 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2275 if (conn) { 2276 hci_conn_hold(conn); 2277 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2278 pin_len = conn->pin_length; 2279 2280 if (ev->key_type != HCI_LK_CHANGED_COMBINATION) 2281 conn->key_type = ev->key_type; 2282 2283 hci_conn_put(conn); 2284 } 2285 2286 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2287 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, 2288 ev->key_type, pin_len); 2289 2290 hci_dev_unlock(hdev); 2291 } 2292 2293 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) 2294 { 2295 struct hci_ev_clock_offset *ev = (void *) skb->data; 2296 struct hci_conn *conn; 2297 2298 BT_DBG("%s status %d", hdev->name, ev->status); 2299 2300 hci_dev_lock(hdev); 2301 2302 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2303 if (conn && !ev->status) { 2304 struct inquiry_entry *ie; 2305 2306 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2307 if (ie) { 2308 ie->data.clock_offset = ev->clock_offset; 2309 ie->timestamp = jiffies; 2310 } 2311 } 2312 2313 hci_dev_unlock(hdev); 2314 } 2315 2316 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) 2317 { 2318 struct hci_ev_pkt_type_change *ev = (void *) skb->data; 2319 struct hci_conn *conn; 2320 2321 BT_DBG("%s status %d", hdev->name, ev->status); 2322 2323 hci_dev_lock(hdev); 2324 2325 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2326 if (conn && !ev->status) 2327 conn->pkt_type = __le16_to_cpu(ev->pkt_type); 2328 2329 hci_dev_unlock(hdev); 2330 } 2331 2332 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) 2333 { 2334 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; 2335 struct inquiry_entry *ie; 2336 2337 BT_DBG("%s", hdev->name); 2338 2339 hci_dev_lock(hdev); 2340 2341 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2342 if (ie) { 2343 ie->data.pscan_rep_mode = ev->pscan_rep_mode; 2344 ie->timestamp = jiffies; 2345 } 2346 2347 hci_dev_unlock(hdev); 2348 } 2349 2350 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) 2351 { 2352 struct inquiry_data data; 2353 int num_rsp = *((__u8 *) skb->data); 2354 2355 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2356 2357 if (!num_rsp) 2358 return; 2359 2360 hci_dev_lock(hdev); 2361 2362 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 2363 2364 if (test_bit(HCI_MGMT, &hdev->flags)) 2365 mgmt_discovering(hdev->id, 1); 2366 } 2367 2368 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2369 struct inquiry_info_with_rssi_and_pscan_mode *info; 2370 info = (void *) (skb->data + 1); 2371 2372 for (; num_rsp; num_rsp--, info++) { 2373 bacpy(&data.bdaddr, &info->bdaddr); 2374 data.pscan_rep_mode = info->pscan_rep_mode; 2375 data.pscan_period_mode = info->pscan_period_mode; 2376 data.pscan_mode = info->pscan_mode; 2377 memcpy(data.dev_class, info->dev_class, 3); 2378 data.clock_offset = info->clock_offset; 2379 data.rssi = info->rssi; 2380 data.ssp_mode = 0x00; 2381 hci_inquiry_cache_update(hdev, &data); 2382 mgmt_device_found(hdev->id, &info->bdaddr, 2383 info->dev_class, info->rssi, 2384 NULL); 2385 } 2386 } else { 2387 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2388 2389 for (; num_rsp; num_rsp--, info++) { 2390 bacpy(&data.bdaddr, &info->bdaddr); 2391 data.pscan_rep_mode = info->pscan_rep_mode; 2392 data.pscan_period_mode = info->pscan_period_mode; 2393 data.pscan_mode = 0x00; 2394 memcpy(data.dev_class, info->dev_class, 3); 2395 data.clock_offset = info->clock_offset; 2396 data.rssi = info->rssi; 2397 data.ssp_mode = 0x00; 2398 hci_inquiry_cache_update(hdev, &data); 2399 mgmt_device_found(hdev->id, &info->bdaddr, 2400 info->dev_class, info->rssi, 2401 NULL); 2402 } 2403 } 2404 2405 hci_dev_unlock(hdev); 2406 } 2407 2408 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2409 { 2410 struct hci_ev_remote_ext_features *ev = (void *) skb->data; 2411 struct hci_conn *conn; 2412 2413 BT_DBG("%s", hdev->name); 2414 2415 hci_dev_lock(hdev); 2416 2417 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2418 if (!conn) 2419 goto unlock; 2420 2421 if (!ev->status && ev->page == 0x01) { 2422 struct inquiry_entry *ie; 2423 2424 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 2425 if (ie) 2426 ie->data.ssp_mode = (ev->features[0] & 0x01); 2427 2428 conn->ssp_mode = (ev->features[0] & 0x01); 2429 } 2430 2431 if (conn->state != BT_CONFIG) 2432 goto unlock; 2433 2434 if (!ev->status) { 2435 struct hci_cp_remote_name_req cp; 2436 memset(&cp, 0, sizeof(cp)); 2437 bacpy(&cp.bdaddr, &conn->dst); 2438 cp.pscan_rep_mode = 0x02; 2439 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); 2440 } 2441 2442 if (!hci_outgoing_auth_needed(hdev, conn)) { 2443 conn->state = BT_CONNECTED; 2444 hci_proto_connect_cfm(conn, ev->status); 2445 hci_conn_put(conn); 2446 } 2447 2448 unlock: 2449 hci_dev_unlock(hdev); 2450 } 2451 2452 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2453 { 2454 struct hci_ev_sync_conn_complete *ev = (void *) skb->data; 2455 struct hci_conn *conn; 2456 2457 BT_DBG("%s status %d", hdev->name, ev->status); 2458 2459 hci_dev_lock(hdev); 2460 2461 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 2462 if (!conn) { 2463 if (ev->link_type == ESCO_LINK) 2464 goto unlock; 2465 2466 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); 2467 if (!conn) 2468 goto unlock; 2469 2470 conn->type = SCO_LINK; 2471 } 2472 2473 switch (ev->status) { 2474 case 0x00: 2475 conn->handle = __le16_to_cpu(ev->handle); 2476 conn->state = BT_CONNECTED; 2477 2478 hci_conn_hold_device(conn); 2479 hci_conn_add_sysfs(conn); 2480 break; 2481 2482 case 0x11: /* Unsupported Feature or Parameter Value */ 2483 case 0x1c: /* SCO interval rejected */ 2484 case 0x1a: /* Unsupported Remote Feature */ 2485 case 0x1f: /* Unspecified error */ 2486 if (conn->out && conn->attempt < 2) { 2487 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 2488 (hdev->esco_type & EDR_ESCO_MASK); 2489 hci_setup_sync(conn, conn->link->handle); 2490 goto unlock; 2491 } 2492 /* fall through */ 2493 2494 default: 2495 conn->state = BT_CLOSED; 2496 break; 2497 } 2498 2499 hci_proto_connect_cfm(conn, ev->status); 2500 if (ev->status) 2501 hci_conn_del(conn); 2502 2503 unlock: 2504 hci_dev_unlock(hdev); 2505 } 2506 2507 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) 2508 { 2509 BT_DBG("%s", hdev->name); 2510 } 2511 2512 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) 2513 { 2514 struct hci_ev_sniff_subrate *ev = (void *) skb->data; 2515 2516 BT_DBG("%s status %d", hdev->name, ev->status); 2517 } 2518 2519 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) 2520 { 2521 struct inquiry_data data; 2522 struct extended_inquiry_info *info = (void *) (skb->data + 1); 2523 int num_rsp = *((__u8 *) skb->data); 2524 2525 BT_DBG("%s num_rsp %d", hdev->name, num_rsp); 2526 2527 if (!num_rsp) 2528 return; 2529 2530 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { 2531 2532 if (test_bit(HCI_MGMT, &hdev->flags)) 2533 mgmt_discovering(hdev->id, 1); 2534 } 2535 2536 hci_dev_lock(hdev); 2537 2538 for (; num_rsp; num_rsp--, info++) { 2539 bacpy(&data.bdaddr, &info->bdaddr); 2540 data.pscan_rep_mode = info->pscan_rep_mode; 2541 data.pscan_period_mode = info->pscan_period_mode; 2542 data.pscan_mode = 0x00; 2543 memcpy(data.dev_class, info->dev_class, 3); 2544 data.clock_offset = info->clock_offset; 2545 data.rssi = info->rssi; 2546 data.ssp_mode = 0x01; 2547 hci_inquiry_cache_update(hdev, &data); 2548 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 2549 info->rssi, info->data); 2550 } 2551 2552 hci_dev_unlock(hdev); 2553 } 2554 2555 static inline u8 hci_get_auth_req(struct hci_conn *conn) 2556 { 2557 /* If remote requests dedicated bonding follow that lead */ 2558 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { 2559 /* If both remote and local IO capabilities allow MITM 2560 * protection then require it, otherwise don't */ 2561 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03) 2562 return 0x02; 2563 else 2564 return 0x03; 2565 } 2566 2567 /* If remote requests no-bonding follow that lead */ 2568 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 2569 return conn->remote_auth | (conn->auth_type & 0x01); 2570 2571 return conn->auth_type; 2572 } 2573 2574 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) 2575 { 2576 struct hci_ev_io_capa_request *ev = (void *) skb->data; 2577 struct hci_conn *conn; 2578 2579 BT_DBG("%s", hdev->name); 2580 2581 hci_dev_lock(hdev); 2582 2583 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2584 if (!conn) 2585 goto unlock; 2586 2587 hci_conn_hold(conn); 2588 2589 if (!test_bit(HCI_MGMT, &hdev->flags)) 2590 goto unlock; 2591 2592 if (test_bit(HCI_PAIRABLE, &hdev->flags) || 2593 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { 2594 struct hci_cp_io_capability_reply cp; 2595 2596 bacpy(&cp.bdaddr, &ev->bdaddr); 2597 cp.capability = conn->io_capability; 2598 conn->auth_type = hci_get_auth_req(conn); 2599 cp.authentication = conn->auth_type; 2600 2601 if ((conn->out == 0x01 || conn->remote_oob == 0x01) && 2602 hci_find_remote_oob_data(hdev, &conn->dst)) 2603 cp.oob_data = 0x01; 2604 else 2605 cp.oob_data = 0x00; 2606 2607 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 2608 sizeof(cp), &cp); 2609 } else { 2610 struct hci_cp_io_capability_neg_reply cp; 2611 2612 bacpy(&cp.bdaddr, &ev->bdaddr); 2613 cp.reason = 0x18; /* Pairing not allowed */ 2614 2615 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 2616 sizeof(cp), &cp); 2617 } 2618 2619 unlock: 2620 hci_dev_unlock(hdev); 2621 } 2622 2623 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) 2624 { 2625 struct hci_ev_io_capa_reply *ev = (void *) skb->data; 2626 struct hci_conn *conn; 2627 2628 BT_DBG("%s", hdev->name); 2629 2630 hci_dev_lock(hdev); 2631 2632 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2633 if (!conn) 2634 goto unlock; 2635 2636 conn->remote_cap = ev->capability; 2637 conn->remote_oob = ev->oob_data; 2638 conn->remote_auth = ev->authentication; 2639 2640 unlock: 2641 hci_dev_unlock(hdev); 2642 } 2643 2644 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 2645 struct sk_buff *skb) 2646 { 2647 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 2648 int loc_mitm, rem_mitm, confirm_hint = 0; 2649 struct hci_conn *conn; 2650 2651 BT_DBG("%s", hdev->name); 2652 2653 hci_dev_lock(hdev); 2654 2655 if (!test_bit(HCI_MGMT, &hdev->flags)) 2656 goto unlock; 2657 2658 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2659 if (!conn) 2660 goto unlock; 2661 2662 loc_mitm = (conn->auth_type & 0x01); 2663 rem_mitm = (conn->remote_auth & 0x01); 2664 2665 /* If we require MITM but the remote device can't provide that 2666 * (it has NoInputNoOutput) then reject the confirmation 2667 * request. The only exception is when we're dedicated bonding 2668 * initiators (connect_cfm_cb set) since then we always have the MITM 2669 * bit set. */ 2670 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { 2671 BT_DBG("Rejecting request: remote device can't provide MITM"); 2672 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, 2673 sizeof(ev->bdaddr), &ev->bdaddr); 2674 goto unlock; 2675 } 2676 2677 /* If no side requires MITM protection; auto-accept */ 2678 if ((!loc_mitm || conn->remote_cap == 0x03) && 2679 (!rem_mitm || conn->io_capability == 0x03)) { 2680 2681 /* If we're not the initiators request authorization to 2682 * proceed from user space (mgmt_user_confirm with 2683 * confirm_hint set to 1). */ 2684 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 2685 BT_DBG("Confirming auto-accept as acceptor"); 2686 confirm_hint = 1; 2687 goto confirm; 2688 } 2689 2690 BT_DBG("Auto-accept of user confirmation with %ums delay", 2691 hdev->auto_accept_delay); 2692 2693 if (hdev->auto_accept_delay > 0) { 2694 int delay = msecs_to_jiffies(hdev->auto_accept_delay); 2695 mod_timer(&conn->auto_accept_timer, jiffies + delay); 2696 goto unlock; 2697 } 2698 2699 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, 2700 sizeof(ev->bdaddr), &ev->bdaddr); 2701 goto unlock; 2702 } 2703 2704 confirm: 2705 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey, 2706 confirm_hint); 2707 2708 unlock: 2709 hci_dev_unlock(hdev); 2710 } 2711 2712 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2713 { 2714 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; 2715 struct hci_conn *conn; 2716 2717 BT_DBG("%s", hdev->name); 2718 2719 hci_dev_lock(hdev); 2720 2721 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2722 if (!conn) 2723 goto unlock; 2724 2725 /* To avoid duplicate auth_failed events to user space we check 2726 * the HCI_CONN_AUTH_PEND flag which will be set if we 2727 * initiated the authentication. A traditional auth_complete 2728 * event gets always produced as initiator and is also mapped to 2729 * the mgmt_auth_failed event */ 2730 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) 2731 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 2732 2733 hci_conn_put(conn); 2734 2735 unlock: 2736 hci_dev_unlock(hdev); 2737 } 2738 2739 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) 2740 { 2741 struct hci_ev_remote_host_features *ev = (void *) skb->data; 2742 struct inquiry_entry *ie; 2743 2744 BT_DBG("%s", hdev->name); 2745 2746 hci_dev_lock(hdev); 2747 2748 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); 2749 if (ie) 2750 ie->data.ssp_mode = (ev->features[0] & 0x01); 2751 2752 hci_dev_unlock(hdev); 2753 } 2754 2755 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, 2756 struct sk_buff *skb) 2757 { 2758 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; 2759 struct oob_data *data; 2760 2761 BT_DBG("%s", hdev->name); 2762 2763 hci_dev_lock(hdev); 2764 2765 if (!test_bit(HCI_MGMT, &hdev->flags)) 2766 goto unlock; 2767 2768 data = hci_find_remote_oob_data(hdev, &ev->bdaddr); 2769 if (data) { 2770 struct hci_cp_remote_oob_data_reply cp; 2771 2772 bacpy(&cp.bdaddr, &ev->bdaddr); 2773 memcpy(cp.hash, data->hash, sizeof(cp.hash)); 2774 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); 2775 2776 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), 2777 &cp); 2778 } else { 2779 struct hci_cp_remote_oob_data_neg_reply cp; 2780 2781 bacpy(&cp.bdaddr, &ev->bdaddr); 2782 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), 2783 &cp); 2784 } 2785 2786 unlock: 2787 hci_dev_unlock(hdev); 2788 } 2789 2790 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2791 { 2792 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 2793 struct hci_conn *conn; 2794 2795 BT_DBG("%s status %d", hdev->name, ev->status); 2796 2797 hci_dev_lock(hdev); 2798 2799 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); 2800 if (!conn) { 2801 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); 2802 if (!conn) { 2803 BT_ERR("No memory for new connection"); 2804 hci_dev_unlock(hdev); 2805 return; 2806 } 2807 2808 conn->dst_type = ev->bdaddr_type; 2809 } 2810 2811 if (ev->status) { 2812 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); 2813 hci_proto_connect_cfm(conn, ev->status); 2814 conn->state = BT_CLOSED; 2815 hci_conn_del(conn); 2816 goto unlock; 2817 } 2818 2819 mgmt_connected(hdev->id, &ev->bdaddr); 2820 2821 conn->sec_level = BT_SECURITY_LOW; 2822 conn->handle = __le16_to_cpu(ev->handle); 2823 conn->state = BT_CONNECTED; 2824 2825 hci_conn_hold_device(conn); 2826 hci_conn_add_sysfs(conn); 2827 2828 hci_proto_connect_cfm(conn, ev->status); 2829 2830 unlock: 2831 hci_dev_unlock(hdev); 2832 } 2833 2834 static inline void hci_le_adv_report_evt(struct hci_dev *hdev, 2835 struct sk_buff *skb) 2836 { 2837 struct hci_ev_le_advertising_info *ev; 2838 u8 num_reports; 2839 2840 num_reports = skb->data[0]; 2841 ev = (void *) &skb->data[1]; 2842 2843 hci_dev_lock(hdev); 2844 2845 hci_add_adv_entry(hdev, ev); 2846 2847 while (--num_reports) { 2848 ev = (void *) (ev->data + ev->length + 1); 2849 hci_add_adv_entry(hdev, ev); 2850 } 2851 2852 hci_dev_unlock(hdev); 2853 } 2854 2855 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, 2856 struct sk_buff *skb) 2857 { 2858 struct hci_ev_le_ltk_req *ev = (void *) skb->data; 2859 struct hci_cp_le_ltk_reply cp; 2860 struct hci_cp_le_ltk_neg_reply neg; 2861 struct hci_conn *conn; 2862 struct link_key *ltk; 2863 2864 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle)); 2865 2866 hci_dev_lock(hdev); 2867 2868 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 2869 if (conn == NULL) 2870 goto not_found; 2871 2872 ltk = hci_find_ltk(hdev, ev->ediv, ev->random); 2873 if (ltk == NULL) 2874 goto not_found; 2875 2876 memcpy(cp.ltk, ltk->val, sizeof(ltk->val)); 2877 cp.handle = cpu_to_le16(conn->handle); 2878 conn->pin_length = ltk->pin_len; 2879 2880 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); 2881 2882 hci_dev_unlock(hdev); 2883 2884 return; 2885 2886 not_found: 2887 neg.handle = ev->handle; 2888 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); 2889 hci_dev_unlock(hdev); 2890 } 2891 2892 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 2893 { 2894 struct hci_ev_le_meta *le_ev = (void *) skb->data; 2895 2896 skb_pull(skb, sizeof(*le_ev)); 2897 2898 switch (le_ev->subevent) { 2899 case HCI_EV_LE_CONN_COMPLETE: 2900 hci_le_conn_complete_evt(hdev, skb); 2901 break; 2902 2903 case HCI_EV_LE_ADVERTISING_REPORT: 2904 hci_le_adv_report_evt(hdev, skb); 2905 break; 2906 2907 case HCI_EV_LE_LTK_REQ: 2908 hci_le_ltk_request_evt(hdev, skb); 2909 break; 2910 2911 default: 2912 break; 2913 } 2914 } 2915 2916 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) 2917 { 2918 struct hci_event_hdr *hdr = (void *) skb->data; 2919 __u8 event = hdr->evt; 2920 2921 skb_pull(skb, HCI_EVENT_HDR_SIZE); 2922 2923 switch (event) { 2924 case HCI_EV_INQUIRY_COMPLETE: 2925 hci_inquiry_complete_evt(hdev, skb); 2926 break; 2927 2928 case HCI_EV_INQUIRY_RESULT: 2929 hci_inquiry_result_evt(hdev, skb); 2930 break; 2931 2932 case HCI_EV_CONN_COMPLETE: 2933 hci_conn_complete_evt(hdev, skb); 2934 break; 2935 2936 case HCI_EV_CONN_REQUEST: 2937 hci_conn_request_evt(hdev, skb); 2938 break; 2939 2940 case HCI_EV_DISCONN_COMPLETE: 2941 hci_disconn_complete_evt(hdev, skb); 2942 break; 2943 2944 case HCI_EV_AUTH_COMPLETE: 2945 hci_auth_complete_evt(hdev, skb); 2946 break; 2947 2948 case HCI_EV_REMOTE_NAME: 2949 hci_remote_name_evt(hdev, skb); 2950 break; 2951 2952 case HCI_EV_ENCRYPT_CHANGE: 2953 hci_encrypt_change_evt(hdev, skb); 2954 break; 2955 2956 case HCI_EV_CHANGE_LINK_KEY_COMPLETE: 2957 hci_change_link_key_complete_evt(hdev, skb); 2958 break; 2959 2960 case HCI_EV_REMOTE_FEATURES: 2961 hci_remote_features_evt(hdev, skb); 2962 break; 2963 2964 case HCI_EV_REMOTE_VERSION: 2965 hci_remote_version_evt(hdev, skb); 2966 break; 2967 2968 case HCI_EV_QOS_SETUP_COMPLETE: 2969 hci_qos_setup_complete_evt(hdev, skb); 2970 break; 2971 2972 case HCI_EV_CMD_COMPLETE: 2973 hci_cmd_complete_evt(hdev, skb); 2974 break; 2975 2976 case HCI_EV_CMD_STATUS: 2977 hci_cmd_status_evt(hdev, skb); 2978 break; 2979 2980 case HCI_EV_ROLE_CHANGE: 2981 hci_role_change_evt(hdev, skb); 2982 break; 2983 2984 case HCI_EV_NUM_COMP_PKTS: 2985 hci_num_comp_pkts_evt(hdev, skb); 2986 break; 2987 2988 case HCI_EV_MODE_CHANGE: 2989 hci_mode_change_evt(hdev, skb); 2990 break; 2991 2992 case HCI_EV_PIN_CODE_REQ: 2993 hci_pin_code_request_evt(hdev, skb); 2994 break; 2995 2996 case HCI_EV_LINK_KEY_REQ: 2997 hci_link_key_request_evt(hdev, skb); 2998 break; 2999 3000 case HCI_EV_LINK_KEY_NOTIFY: 3001 hci_link_key_notify_evt(hdev, skb); 3002 break; 3003 3004 case HCI_EV_CLOCK_OFFSET: 3005 hci_clock_offset_evt(hdev, skb); 3006 break; 3007 3008 case HCI_EV_PKT_TYPE_CHANGE: 3009 hci_pkt_type_change_evt(hdev, skb); 3010 break; 3011 3012 case HCI_EV_PSCAN_REP_MODE: 3013 hci_pscan_rep_mode_evt(hdev, skb); 3014 break; 3015 3016 case HCI_EV_INQUIRY_RESULT_WITH_RSSI: 3017 hci_inquiry_result_with_rssi_evt(hdev, skb); 3018 break; 3019 3020 case HCI_EV_REMOTE_EXT_FEATURES: 3021 hci_remote_ext_features_evt(hdev, skb); 3022 break; 3023 3024 case HCI_EV_SYNC_CONN_COMPLETE: 3025 hci_sync_conn_complete_evt(hdev, skb); 3026 break; 3027 3028 case HCI_EV_SYNC_CONN_CHANGED: 3029 hci_sync_conn_changed_evt(hdev, skb); 3030 break; 3031 3032 case HCI_EV_SNIFF_SUBRATE: 3033 hci_sniff_subrate_evt(hdev, skb); 3034 break; 3035 3036 case HCI_EV_EXTENDED_INQUIRY_RESULT: 3037 hci_extended_inquiry_result_evt(hdev, skb); 3038 break; 3039 3040 case HCI_EV_IO_CAPA_REQUEST: 3041 hci_io_capa_request_evt(hdev, skb); 3042 break; 3043 3044 case HCI_EV_IO_CAPA_REPLY: 3045 hci_io_capa_reply_evt(hdev, skb); 3046 break; 3047 3048 case HCI_EV_USER_CONFIRM_REQUEST: 3049 hci_user_confirm_request_evt(hdev, skb); 3050 break; 3051 3052 case HCI_EV_SIMPLE_PAIR_COMPLETE: 3053 hci_simple_pair_complete_evt(hdev, skb); 3054 break; 3055 3056 case HCI_EV_REMOTE_HOST_FEATURES: 3057 hci_remote_host_features_evt(hdev, skb); 3058 break; 3059 3060 case HCI_EV_LE_META: 3061 hci_le_meta_evt(hdev, skb); 3062 break; 3063 3064 case HCI_EV_REMOTE_OOB_DATA_REQUEST: 3065 hci_remote_oob_data_request_evt(hdev, skb); 3066 break; 3067 3068 default: 3069 BT_DBG("%s event 0x%x", hdev->name, event); 3070 break; 3071 } 3072 3073 kfree_skb(skb); 3074 hdev->stat.evt_rx++; 3075 } 3076 3077 /* Generate internal stack event */ 3078 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) 3079 { 3080 struct hci_event_hdr *hdr; 3081 struct hci_ev_stack_internal *ev; 3082 struct sk_buff *skb; 3083 3084 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); 3085 if (!skb) 3086 return; 3087 3088 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); 3089 hdr->evt = HCI_EV_STACK_INTERNAL; 3090 hdr->plen = sizeof(*ev) + dlen; 3091 3092 ev = (void *) skb_put(skb, sizeof(*ev) + dlen); 3093 ev->type = type; 3094 memcpy(ev->data, data, dlen); 3095 3096 bt_cb(skb)->incoming = 1; 3097 __net_timestamp(skb); 3098 3099 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 3100 skb->dev = (void *) hdev; 3101 hci_send_to_sock(hdev, skb, NULL); 3102 kfree_skb(skb); 3103 } 3104 3105 module_param(enable_le, bool, 0444); 3106 MODULE_PARM_DESC(enable_le, "Enable LE support"); 3107