1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 4 Copyright (C) 2010 Nokia Corporation 5 Copyright (C) 2011-2012 Intel Corporation 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI Management interface */ 26 27 #include <linux/module.h> 28 #include <asm/unaligned.h> 29 30 #include <net/bluetooth/bluetooth.h> 31 #include <net/bluetooth/hci_core.h> 32 #include <net/bluetooth/mgmt.h> 33 #include <net/bluetooth/smp.h> 34 35 bool enable_hs; 36 37 #define MGMT_VERSION 1 38 #define MGMT_REVISION 3 39 40 static const u16 mgmt_commands[] = { 41 MGMT_OP_READ_INDEX_LIST, 42 MGMT_OP_READ_INFO, 43 MGMT_OP_SET_POWERED, 44 MGMT_OP_SET_DISCOVERABLE, 45 MGMT_OP_SET_CONNECTABLE, 46 MGMT_OP_SET_FAST_CONNECTABLE, 47 MGMT_OP_SET_PAIRABLE, 48 MGMT_OP_SET_LINK_SECURITY, 49 MGMT_OP_SET_SSP, 50 MGMT_OP_SET_HS, 51 MGMT_OP_SET_LE, 52 MGMT_OP_SET_DEV_CLASS, 53 MGMT_OP_SET_LOCAL_NAME, 54 MGMT_OP_ADD_UUID, 55 MGMT_OP_REMOVE_UUID, 56 MGMT_OP_LOAD_LINK_KEYS, 57 MGMT_OP_LOAD_LONG_TERM_KEYS, 58 MGMT_OP_DISCONNECT, 59 MGMT_OP_GET_CONNECTIONS, 60 MGMT_OP_PIN_CODE_REPLY, 61 MGMT_OP_PIN_CODE_NEG_REPLY, 62 MGMT_OP_SET_IO_CAPABILITY, 63 MGMT_OP_PAIR_DEVICE, 64 MGMT_OP_CANCEL_PAIR_DEVICE, 65 MGMT_OP_UNPAIR_DEVICE, 66 MGMT_OP_USER_CONFIRM_REPLY, 67 MGMT_OP_USER_CONFIRM_NEG_REPLY, 68 MGMT_OP_USER_PASSKEY_REPLY, 69 MGMT_OP_USER_PASSKEY_NEG_REPLY, 70 MGMT_OP_READ_LOCAL_OOB_DATA, 71 MGMT_OP_ADD_REMOTE_OOB_DATA, 72 MGMT_OP_REMOVE_REMOTE_OOB_DATA, 73 MGMT_OP_START_DISCOVERY, 74 MGMT_OP_STOP_DISCOVERY, 75 MGMT_OP_CONFIRM_NAME, 76 MGMT_OP_BLOCK_DEVICE, 77 MGMT_OP_UNBLOCK_DEVICE, 78 MGMT_OP_SET_DEVICE_ID, 79 }; 80 81 static const u16 mgmt_events[] = { 82 MGMT_EV_CONTROLLER_ERROR, 83 MGMT_EV_INDEX_ADDED, 84 MGMT_EV_INDEX_REMOVED, 85 MGMT_EV_NEW_SETTINGS, 86 MGMT_EV_CLASS_OF_DEV_CHANGED, 87 MGMT_EV_LOCAL_NAME_CHANGED, 88 MGMT_EV_NEW_LINK_KEY, 89 MGMT_EV_NEW_LONG_TERM_KEY, 90 MGMT_EV_DEVICE_CONNECTED, 91 MGMT_EV_DEVICE_DISCONNECTED, 92 MGMT_EV_CONNECT_FAILED, 93 MGMT_EV_PIN_CODE_REQUEST, 94 MGMT_EV_USER_CONFIRM_REQUEST, 95 MGMT_EV_USER_PASSKEY_REQUEST, 96 MGMT_EV_AUTH_FAILED, 97 MGMT_EV_DEVICE_FOUND, 98 MGMT_EV_DISCOVERING, 99 MGMT_EV_DEVICE_BLOCKED, 100 MGMT_EV_DEVICE_UNBLOCKED, 101 MGMT_EV_DEVICE_UNPAIRED, 102 MGMT_EV_PASSKEY_NOTIFY, 103 }; 104 105 /* 106 * These LE scan and inquiry parameters were chosen according to LE General 107 * Discovery Procedure specification. 108 */ 109 #define LE_SCAN_WIN 0x12 110 #define LE_SCAN_INT 0x12 111 #define LE_SCAN_TIMEOUT_LE_ONLY msecs_to_jiffies(10240) 112 #define LE_SCAN_TIMEOUT_BREDR_LE msecs_to_jiffies(5120) 113 114 #define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ 115 #define INQUIRY_LEN_BREDR_LE 0x04 /* TGAP(100)/2 */ 116 117 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) 118 119 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ 120 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) 121 122 struct pending_cmd { 123 struct list_head list; 124 u16 opcode; 125 int index; 126 void *param; 127 struct sock *sk; 128 void *user_data; 129 }; 130 131 /* HCI to MGMT error code conversion table */ 132 static u8 mgmt_status_table[] = { 133 MGMT_STATUS_SUCCESS, 134 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */ 135 MGMT_STATUS_NOT_CONNECTED, /* No Connection */ 136 MGMT_STATUS_FAILED, /* Hardware Failure */ 137 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */ 138 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */ 139 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */ 140 MGMT_STATUS_NO_RESOURCES, /* Memory Full */ 141 MGMT_STATUS_TIMEOUT, /* Connection Timeout */ 142 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */ 143 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */ 144 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */ 145 MGMT_STATUS_BUSY, /* Command Disallowed */ 146 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */ 147 MGMT_STATUS_REJECTED, /* Rejected Security */ 148 MGMT_STATUS_REJECTED, /* Rejected Personal */ 149 MGMT_STATUS_TIMEOUT, /* Host Timeout */ 150 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */ 151 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */ 152 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */ 153 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */ 154 MGMT_STATUS_DISCONNECTED, /* OE Power Off */ 155 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */ 156 MGMT_STATUS_BUSY, /* Repeated Attempts */ 157 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */ 158 MGMT_STATUS_FAILED, /* Unknown LMP PDU */ 159 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */ 160 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */ 161 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */ 162 MGMT_STATUS_REJECTED, /* Air Mode Rejected */ 163 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */ 164 MGMT_STATUS_FAILED, /* Unspecified Error */ 165 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */ 166 MGMT_STATUS_FAILED, /* Role Change Not Allowed */ 167 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */ 168 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */ 169 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */ 170 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */ 171 MGMT_STATUS_FAILED, /* Unit Link Key Used */ 172 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */ 173 MGMT_STATUS_TIMEOUT, /* Instant Passed */ 174 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ 175 MGMT_STATUS_FAILED, /* Transaction Collision */ 176 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ 177 MGMT_STATUS_REJECTED, /* QoS Rejected */ 178 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ 179 MGMT_STATUS_REJECTED, /* Insufficient Security */ 180 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ 181 MGMT_STATUS_BUSY, /* Role Switch Pending */ 182 MGMT_STATUS_FAILED, /* Slot Violation */ 183 MGMT_STATUS_FAILED, /* Role Switch Failed */ 184 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ 185 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */ 186 MGMT_STATUS_BUSY, /* Host Busy Pairing */ 187 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */ 188 MGMT_STATUS_BUSY, /* Controller Busy */ 189 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */ 190 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */ 191 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */ 192 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */ 193 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ 194 }; 195 196 bool mgmt_valid_hdev(struct hci_dev *hdev) 197 { 198 return hdev->dev_type == HCI_BREDR; 199 } 200 201 static u8 mgmt_status(u8 hci_status) 202 { 203 if (hci_status < ARRAY_SIZE(mgmt_status_table)) 204 return mgmt_status_table[hci_status]; 205 206 return MGMT_STATUS_FAILED; 207 } 208 209 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 210 { 211 struct sk_buff *skb; 212 struct mgmt_hdr *hdr; 213 struct mgmt_ev_cmd_status *ev; 214 int err; 215 216 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 217 218 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); 219 if (!skb) 220 return -ENOMEM; 221 222 hdr = (void *) skb_put(skb, sizeof(*hdr)); 223 224 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS); 225 hdr->index = cpu_to_le16(index); 226 hdr->len = cpu_to_le16(sizeof(*ev)); 227 228 ev = (void *) skb_put(skb, sizeof(*ev)); 229 ev->status = status; 230 ev->opcode = cpu_to_le16(cmd); 231 232 err = sock_queue_rcv_skb(sk, skb); 233 if (err < 0) 234 kfree_skb(skb); 235 236 return err; 237 } 238 239 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, 240 void *rp, size_t rp_len) 241 { 242 struct sk_buff *skb; 243 struct mgmt_hdr *hdr; 244 struct mgmt_ev_cmd_complete *ev; 245 int err; 246 247 BT_DBG("sock %p", sk); 248 249 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); 250 if (!skb) 251 return -ENOMEM; 252 253 hdr = (void *) skb_put(skb, sizeof(*hdr)); 254 255 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE); 256 hdr->index = cpu_to_le16(index); 257 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 258 259 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); 260 ev->opcode = cpu_to_le16(cmd); 261 ev->status = status; 262 263 if (rp) 264 memcpy(ev->data, rp, rp_len); 265 266 err = sock_queue_rcv_skb(sk, skb); 267 if (err < 0) 268 kfree_skb(skb); 269 270 return err; 271 } 272 273 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, 274 u16 data_len) 275 { 276 struct mgmt_rp_read_version rp; 277 278 BT_DBG("sock %p", sk); 279 280 rp.version = MGMT_VERSION; 281 rp.revision = __constant_cpu_to_le16(MGMT_REVISION); 282 283 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp, 284 sizeof(rp)); 285 } 286 287 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, 288 u16 data_len) 289 { 290 struct mgmt_rp_read_commands *rp; 291 const u16 num_commands = ARRAY_SIZE(mgmt_commands); 292 const u16 num_events = ARRAY_SIZE(mgmt_events); 293 __le16 *opcode; 294 size_t rp_size; 295 int i, err; 296 297 BT_DBG("sock %p", sk); 298 299 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16)); 300 301 rp = kmalloc(rp_size, GFP_KERNEL); 302 if (!rp) 303 return -ENOMEM; 304 305 rp->num_commands = __constant_cpu_to_le16(num_commands); 306 rp->num_events = __constant_cpu_to_le16(num_events); 307 308 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++) 309 put_unaligned_le16(mgmt_commands[i], opcode); 310 311 for (i = 0; i < num_events; i++, opcode++) 312 put_unaligned_le16(mgmt_events[i], opcode); 313 314 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp, 315 rp_size); 316 kfree(rp); 317 318 return err; 319 } 320 321 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, 322 u16 data_len) 323 { 324 struct mgmt_rp_read_index_list *rp; 325 struct hci_dev *d; 326 size_t rp_len; 327 u16 count; 328 int err; 329 330 BT_DBG("sock %p", sk); 331 332 read_lock(&hci_dev_list_lock); 333 334 count = 0; 335 list_for_each_entry(d, &hci_dev_list, list) { 336 if (!mgmt_valid_hdev(d)) 337 continue; 338 339 count++; 340 } 341 342 rp_len = sizeof(*rp) + (2 * count); 343 rp = kmalloc(rp_len, GFP_ATOMIC); 344 if (!rp) { 345 read_unlock(&hci_dev_list_lock); 346 return -ENOMEM; 347 } 348 349 count = 0; 350 list_for_each_entry(d, &hci_dev_list, list) { 351 if (test_bit(HCI_SETUP, &d->dev_flags)) 352 continue; 353 354 if (!mgmt_valid_hdev(d)) 355 continue; 356 357 rp->index[count++] = cpu_to_le16(d->id); 358 BT_DBG("Added hci%u", d->id); 359 } 360 361 rp->num_controllers = cpu_to_le16(count); 362 rp_len = sizeof(*rp) + (2 * count); 363 364 read_unlock(&hci_dev_list_lock); 365 366 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, 367 rp_len); 368 369 kfree(rp); 370 371 return err; 372 } 373 374 static u32 get_supported_settings(struct hci_dev *hdev) 375 { 376 u32 settings = 0; 377 378 settings |= MGMT_SETTING_POWERED; 379 settings |= MGMT_SETTING_PAIRABLE; 380 381 if (lmp_ssp_capable(hdev)) 382 settings |= MGMT_SETTING_SSP; 383 384 if (lmp_bredr_capable(hdev)) { 385 settings |= MGMT_SETTING_CONNECTABLE; 386 if (hdev->hci_ver >= BLUETOOTH_VER_1_2) 387 settings |= MGMT_SETTING_FAST_CONNECTABLE; 388 settings |= MGMT_SETTING_DISCOVERABLE; 389 settings |= MGMT_SETTING_BREDR; 390 settings |= MGMT_SETTING_LINK_SECURITY; 391 } 392 393 if (enable_hs) 394 settings |= MGMT_SETTING_HS; 395 396 if (lmp_le_capable(hdev)) 397 settings |= MGMT_SETTING_LE; 398 399 return settings; 400 } 401 402 static u32 get_current_settings(struct hci_dev *hdev) 403 { 404 u32 settings = 0; 405 406 if (hdev_is_powered(hdev)) 407 settings |= MGMT_SETTING_POWERED; 408 409 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 410 settings |= MGMT_SETTING_CONNECTABLE; 411 412 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) 413 settings |= MGMT_SETTING_FAST_CONNECTABLE; 414 415 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 416 settings |= MGMT_SETTING_DISCOVERABLE; 417 418 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags)) 419 settings |= MGMT_SETTING_PAIRABLE; 420 421 if (lmp_bredr_capable(hdev)) 422 settings |= MGMT_SETTING_BREDR; 423 424 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 425 settings |= MGMT_SETTING_LE; 426 427 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 428 settings |= MGMT_SETTING_LINK_SECURITY; 429 430 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 431 settings |= MGMT_SETTING_SSP; 432 433 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags)) 434 settings |= MGMT_SETTING_HS; 435 436 return settings; 437 } 438 439 #define PNP_INFO_SVCLASS_ID 0x1200 440 441 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 442 { 443 u8 *ptr = data, *uuids_start = NULL; 444 struct bt_uuid *uuid; 445 446 if (len < 4) 447 return ptr; 448 449 list_for_each_entry(uuid, &hdev->uuids, list) { 450 u16 uuid16; 451 452 if (uuid->size != 16) 453 continue; 454 455 uuid16 = get_unaligned_le16(&uuid->uuid[12]); 456 if (uuid16 < 0x1100) 457 continue; 458 459 if (uuid16 == PNP_INFO_SVCLASS_ID) 460 continue; 461 462 if (!uuids_start) { 463 uuids_start = ptr; 464 uuids_start[0] = 1; 465 uuids_start[1] = EIR_UUID16_ALL; 466 ptr += 2; 467 } 468 469 /* Stop if not enough space to put next UUID */ 470 if ((ptr - data) + sizeof(u16) > len) { 471 uuids_start[1] = EIR_UUID16_SOME; 472 break; 473 } 474 475 *ptr++ = (uuid16 & 0x00ff); 476 *ptr++ = (uuid16 & 0xff00) >> 8; 477 uuids_start[0] += sizeof(uuid16); 478 } 479 480 return ptr; 481 } 482 483 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 484 { 485 u8 *ptr = data, *uuids_start = NULL; 486 struct bt_uuid *uuid; 487 488 if (len < 6) 489 return ptr; 490 491 list_for_each_entry(uuid, &hdev->uuids, list) { 492 if (uuid->size != 32) 493 continue; 494 495 if (!uuids_start) { 496 uuids_start = ptr; 497 uuids_start[0] = 1; 498 uuids_start[1] = EIR_UUID32_ALL; 499 ptr += 2; 500 } 501 502 /* Stop if not enough space to put next UUID */ 503 if ((ptr - data) + sizeof(u32) > len) { 504 uuids_start[1] = EIR_UUID32_SOME; 505 break; 506 } 507 508 memcpy(ptr, &uuid->uuid[12], sizeof(u32)); 509 ptr += sizeof(u32); 510 uuids_start[0] += sizeof(u32); 511 } 512 513 return ptr; 514 } 515 516 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) 517 { 518 u8 *ptr = data, *uuids_start = NULL; 519 struct bt_uuid *uuid; 520 521 if (len < 18) 522 return ptr; 523 524 list_for_each_entry(uuid, &hdev->uuids, list) { 525 if (uuid->size != 128) 526 continue; 527 528 if (!uuids_start) { 529 uuids_start = ptr; 530 uuids_start[0] = 1; 531 uuids_start[1] = EIR_UUID128_ALL; 532 ptr += 2; 533 } 534 535 /* Stop if not enough space to put next UUID */ 536 if ((ptr - data) + 16 > len) { 537 uuids_start[1] = EIR_UUID128_SOME; 538 break; 539 } 540 541 memcpy(ptr, uuid->uuid, 16); 542 ptr += 16; 543 uuids_start[0] += 16; 544 } 545 546 return ptr; 547 } 548 549 static void create_eir(struct hci_dev *hdev, u8 *data) 550 { 551 u8 *ptr = data; 552 size_t name_len; 553 554 name_len = strlen(hdev->dev_name); 555 556 if (name_len > 0) { 557 /* EIR Data type */ 558 if (name_len > 48) { 559 name_len = 48; 560 ptr[1] = EIR_NAME_SHORT; 561 } else 562 ptr[1] = EIR_NAME_COMPLETE; 563 564 /* EIR Data length */ 565 ptr[0] = name_len + 1; 566 567 memcpy(ptr + 2, hdev->dev_name, name_len); 568 569 ptr += (name_len + 2); 570 } 571 572 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { 573 ptr[0] = 2; 574 ptr[1] = EIR_TX_POWER; 575 ptr[2] = (u8) hdev->inq_tx_power; 576 577 ptr += 3; 578 } 579 580 if (hdev->devid_source > 0) { 581 ptr[0] = 9; 582 ptr[1] = EIR_DEVICE_ID; 583 584 put_unaligned_le16(hdev->devid_source, ptr + 2); 585 put_unaligned_le16(hdev->devid_vendor, ptr + 4); 586 put_unaligned_le16(hdev->devid_product, ptr + 6); 587 put_unaligned_le16(hdev->devid_version, ptr + 8); 588 589 ptr += 10; 590 } 591 592 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 593 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 594 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); 595 } 596 597 static void update_eir(struct hci_request *req) 598 { 599 struct hci_dev *hdev = req->hdev; 600 struct hci_cp_write_eir cp; 601 602 if (!hdev_is_powered(hdev)) 603 return; 604 605 if (!lmp_ext_inq_capable(hdev)) 606 return; 607 608 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 609 return; 610 611 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 612 return; 613 614 memset(&cp, 0, sizeof(cp)); 615 616 create_eir(hdev, cp.data); 617 618 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) 619 return; 620 621 memcpy(hdev->eir, cp.data, sizeof(cp.data)); 622 623 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 624 } 625 626 static u8 get_service_classes(struct hci_dev *hdev) 627 { 628 struct bt_uuid *uuid; 629 u8 val = 0; 630 631 list_for_each_entry(uuid, &hdev->uuids, list) 632 val |= uuid->svc_hint; 633 634 return val; 635 } 636 637 static void update_class(struct hci_request *req) 638 { 639 struct hci_dev *hdev = req->hdev; 640 u8 cod[3]; 641 642 BT_DBG("%s", hdev->name); 643 644 if (!hdev_is_powered(hdev)) 645 return; 646 647 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 648 return; 649 650 cod[0] = hdev->minor_class; 651 cod[1] = hdev->major_class; 652 cod[2] = get_service_classes(hdev); 653 654 if (memcmp(cod, hdev->dev_class, 3) == 0) 655 return; 656 657 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); 658 } 659 660 static void service_cache_off(struct work_struct *work) 661 { 662 struct hci_dev *hdev = container_of(work, struct hci_dev, 663 service_cache.work); 664 struct hci_request req; 665 666 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) 667 return; 668 669 hci_req_init(&req, hdev); 670 671 hci_dev_lock(hdev); 672 673 update_eir(&req); 674 update_class(&req); 675 676 hci_dev_unlock(hdev); 677 678 hci_req_run(&req, NULL); 679 } 680 681 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) 682 { 683 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags)) 684 return; 685 686 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); 687 688 /* Non-mgmt controlled devices get this bit set 689 * implicitly so that pairing works for them, however 690 * for mgmt we require user-space to explicitly enable 691 * it 692 */ 693 clear_bit(HCI_PAIRABLE, &hdev->dev_flags); 694 } 695 696 static int read_controller_info(struct sock *sk, struct hci_dev *hdev, 697 void *data, u16 data_len) 698 { 699 struct mgmt_rp_read_info rp; 700 701 BT_DBG("sock %p %s", sk, hdev->name); 702 703 hci_dev_lock(hdev); 704 705 memset(&rp, 0, sizeof(rp)); 706 707 bacpy(&rp.bdaddr, &hdev->bdaddr); 708 709 rp.version = hdev->hci_ver; 710 rp.manufacturer = cpu_to_le16(hdev->manufacturer); 711 712 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); 713 rp.current_settings = cpu_to_le32(get_current_settings(hdev)); 714 715 memcpy(rp.dev_class, hdev->dev_class, 3); 716 717 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); 718 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name)); 719 720 hci_dev_unlock(hdev); 721 722 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, 723 sizeof(rp)); 724 } 725 726 static void mgmt_pending_free(struct pending_cmd *cmd) 727 { 728 sock_put(cmd->sk); 729 kfree(cmd->param); 730 kfree(cmd); 731 } 732 733 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 734 struct hci_dev *hdev, void *data, 735 u16 len) 736 { 737 struct pending_cmd *cmd; 738 739 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 740 if (!cmd) 741 return NULL; 742 743 cmd->opcode = opcode; 744 cmd->index = hdev->id; 745 746 cmd->param = kmalloc(len, GFP_KERNEL); 747 if (!cmd->param) { 748 kfree(cmd); 749 return NULL; 750 } 751 752 if (data) 753 memcpy(cmd->param, data, len); 754 755 cmd->sk = sk; 756 sock_hold(sk); 757 758 list_add(&cmd->list, &hdev->mgmt_pending); 759 760 return cmd; 761 } 762 763 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, 764 void (*cb)(struct pending_cmd *cmd, 765 void *data), 766 void *data) 767 { 768 struct pending_cmd *cmd, *tmp; 769 770 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { 771 if (opcode > 0 && cmd->opcode != opcode) 772 continue; 773 774 cb(cmd, data); 775 } 776 } 777 778 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) 779 { 780 struct pending_cmd *cmd; 781 782 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 783 if (cmd->opcode == opcode) 784 return cmd; 785 } 786 787 return NULL; 788 } 789 790 static void mgmt_pending_remove(struct pending_cmd *cmd) 791 { 792 list_del(&cmd->list); 793 mgmt_pending_free(cmd); 794 } 795 796 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) 797 { 798 __le32 settings = cpu_to_le32(get_current_settings(hdev)); 799 800 return cmd_complete(sk, hdev->id, opcode, 0, &settings, 801 sizeof(settings)); 802 } 803 804 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, 805 u16 len) 806 { 807 struct mgmt_mode *cp = data; 808 struct pending_cmd *cmd; 809 int err; 810 811 BT_DBG("request for %s", hdev->name); 812 813 if (cp->val != 0x00 && cp->val != 0x01) 814 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, 815 MGMT_STATUS_INVALID_PARAMS); 816 817 hci_dev_lock(hdev); 818 819 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { 820 cancel_delayed_work(&hdev->power_off); 821 822 if (cp->val) { 823 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, 824 data, len); 825 err = mgmt_powered(hdev, 1); 826 goto failed; 827 } 828 } 829 830 if (!!cp->val == hdev_is_powered(hdev)) { 831 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); 832 goto failed; 833 } 834 835 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { 836 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, 837 MGMT_STATUS_BUSY); 838 goto failed; 839 } 840 841 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); 842 if (!cmd) { 843 err = -ENOMEM; 844 goto failed; 845 } 846 847 if (cp->val) 848 queue_work(hdev->req_workqueue, &hdev->power_on); 849 else 850 queue_work(hdev->req_workqueue, &hdev->power_off.work); 851 852 err = 0; 853 854 failed: 855 hci_dev_unlock(hdev); 856 return err; 857 } 858 859 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len, 860 struct sock *skip_sk) 861 { 862 struct sk_buff *skb; 863 struct mgmt_hdr *hdr; 864 865 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL); 866 if (!skb) 867 return -ENOMEM; 868 869 hdr = (void *) skb_put(skb, sizeof(*hdr)); 870 hdr->opcode = cpu_to_le16(event); 871 if (hdev) 872 hdr->index = cpu_to_le16(hdev->id); 873 else 874 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE); 875 hdr->len = cpu_to_le16(data_len); 876 877 if (data) 878 memcpy(skb_put(skb, data_len), data, data_len); 879 880 /* Time stamp */ 881 __net_timestamp(skb); 882 883 hci_send_to_control(skb, skip_sk); 884 kfree_skb(skb); 885 886 return 0; 887 } 888 889 static int new_settings(struct hci_dev *hdev, struct sock *skip) 890 { 891 __le32 ev; 892 893 ev = cpu_to_le32(get_current_settings(hdev)); 894 895 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip); 896 } 897 898 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, 899 u16 len) 900 { 901 struct mgmt_cp_set_discoverable *cp = data; 902 struct pending_cmd *cmd; 903 u16 timeout; 904 u8 scan; 905 int err; 906 907 BT_DBG("request for %s", hdev->name); 908 909 if (!lmp_bredr_capable(hdev)) 910 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 911 MGMT_STATUS_NOT_SUPPORTED); 912 913 if (cp->val != 0x00 && cp->val != 0x01) 914 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 915 MGMT_STATUS_INVALID_PARAMS); 916 917 timeout = __le16_to_cpu(cp->timeout); 918 if (!cp->val && timeout > 0) 919 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 920 MGMT_STATUS_INVALID_PARAMS); 921 922 hci_dev_lock(hdev); 923 924 if (!hdev_is_powered(hdev) && timeout > 0) { 925 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 926 MGMT_STATUS_NOT_POWERED); 927 goto failed; 928 } 929 930 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 931 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 932 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 933 MGMT_STATUS_BUSY); 934 goto failed; 935 } 936 937 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) { 938 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, 939 MGMT_STATUS_REJECTED); 940 goto failed; 941 } 942 943 if (!hdev_is_powered(hdev)) { 944 bool changed = false; 945 946 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { 947 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 948 changed = true; 949 } 950 951 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); 952 if (err < 0) 953 goto failed; 954 955 if (changed) 956 err = new_settings(hdev, sk); 957 958 goto failed; 959 } 960 961 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) { 962 if (hdev->discov_timeout > 0) { 963 cancel_delayed_work(&hdev->discov_off); 964 hdev->discov_timeout = 0; 965 } 966 967 if (cp->val && timeout > 0) { 968 hdev->discov_timeout = timeout; 969 queue_delayed_work(hdev->workqueue, &hdev->discov_off, 970 msecs_to_jiffies(hdev->discov_timeout * 1000)); 971 } 972 973 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); 974 goto failed; 975 } 976 977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); 978 if (!cmd) { 979 err = -ENOMEM; 980 goto failed; 981 } 982 983 scan = SCAN_PAGE; 984 985 if (cp->val) 986 scan |= SCAN_INQUIRY; 987 else 988 cancel_delayed_work(&hdev->discov_off); 989 990 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 991 if (err < 0) 992 mgmt_pending_remove(cmd); 993 994 if (cp->val) 995 hdev->discov_timeout = timeout; 996 997 failed: 998 hci_dev_unlock(hdev); 999 return err; 1000 } 1001 1002 static void write_fast_connectable(struct hci_request *req, bool enable) 1003 { 1004 struct hci_dev *hdev = req->hdev; 1005 struct hci_cp_write_page_scan_activity acp; 1006 u8 type; 1007 1008 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 1009 return; 1010 1011 if (enable) { 1012 type = PAGE_SCAN_TYPE_INTERLACED; 1013 1014 /* 160 msec page scan interval */ 1015 acp.interval = __constant_cpu_to_le16(0x0100); 1016 } else { 1017 type = PAGE_SCAN_TYPE_STANDARD; /* default */ 1018 1019 /* default 1.28 sec page scan */ 1020 acp.interval = __constant_cpu_to_le16(0x0800); 1021 } 1022 1023 acp.window = __constant_cpu_to_le16(0x0012); 1024 1025 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || 1026 __cpu_to_le16(hdev->page_scan_window) != acp.window) 1027 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 1028 sizeof(acp), &acp); 1029 1030 if (hdev->page_scan_type != type) 1031 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); 1032 } 1033 1034 static void set_connectable_complete(struct hci_dev *hdev, u8 status) 1035 { 1036 struct pending_cmd *cmd; 1037 1038 BT_DBG("status 0x%02x", status); 1039 1040 hci_dev_lock(hdev); 1041 1042 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); 1043 if (!cmd) 1044 goto unlock; 1045 1046 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); 1047 1048 mgmt_pending_remove(cmd); 1049 1050 unlock: 1051 hci_dev_unlock(hdev); 1052 } 1053 1054 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, 1055 u16 len) 1056 { 1057 struct mgmt_mode *cp = data; 1058 struct pending_cmd *cmd; 1059 struct hci_request req; 1060 u8 scan; 1061 int err; 1062 1063 BT_DBG("request for %s", hdev->name); 1064 1065 if (!lmp_bredr_capable(hdev)) 1066 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1067 MGMT_STATUS_NOT_SUPPORTED); 1068 1069 if (cp->val != 0x00 && cp->val != 0x01) 1070 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1071 MGMT_STATUS_INVALID_PARAMS); 1072 1073 hci_dev_lock(hdev); 1074 1075 if (!hdev_is_powered(hdev)) { 1076 bool changed = false; 1077 1078 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 1079 changed = true; 1080 1081 if (cp->val) { 1082 set_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1083 } else { 1084 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags); 1085 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); 1086 } 1087 1088 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); 1089 if (err < 0) 1090 goto failed; 1091 1092 if (changed) 1093 err = new_settings(hdev, sk); 1094 1095 goto failed; 1096 } 1097 1098 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || 1099 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { 1100 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, 1101 MGMT_STATUS_BUSY); 1102 goto failed; 1103 } 1104 1105 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { 1106 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); 1107 goto failed; 1108 } 1109 1110 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); 1111 if (!cmd) { 1112 err = -ENOMEM; 1113 goto failed; 1114 } 1115 1116 if (cp->val) { 1117 scan = SCAN_PAGE; 1118 } else { 1119 scan = 0; 1120 1121 if (test_bit(HCI_ISCAN, &hdev->flags) && 1122 hdev->discov_timeout > 0) 1123 cancel_delayed_work(&hdev->discov_off); 1124 } 1125 1126 hci_req_init(&req, hdev); 1127 1128 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 1129 1130 /* If we're going from non-connectable to connectable or 1131 * vice-versa when fast connectable is enabled ensure that fast 1132 * connectable gets disabled. write_fast_connectable won't do 1133 * anything if the page scan parameters are already what they 1134 * should be. 1135 */ 1136 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) 1137 write_fast_connectable(&req, false); 1138 1139 err = hci_req_run(&req, set_connectable_complete); 1140 if (err < 0) 1141 mgmt_pending_remove(cmd); 1142 1143 failed: 1144 hci_dev_unlock(hdev); 1145 return err; 1146 } 1147 1148 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data, 1149 u16 len) 1150 { 1151 struct mgmt_mode *cp = data; 1152 int err; 1153 1154 BT_DBG("request for %s", hdev->name); 1155 1156 if (cp->val != 0x00 && cp->val != 0x01) 1157 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE, 1158 MGMT_STATUS_INVALID_PARAMS); 1159 1160 hci_dev_lock(hdev); 1161 1162 if (cp->val) 1163 set_bit(HCI_PAIRABLE, &hdev->dev_flags); 1164 else 1165 clear_bit(HCI_PAIRABLE, &hdev->dev_flags); 1166 1167 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev); 1168 if (err < 0) 1169 goto failed; 1170 1171 err = new_settings(hdev, sk); 1172 1173 failed: 1174 hci_dev_unlock(hdev); 1175 return err; 1176 } 1177 1178 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, 1179 u16 len) 1180 { 1181 struct mgmt_mode *cp = data; 1182 struct pending_cmd *cmd; 1183 u8 val; 1184 int err; 1185 1186 BT_DBG("request for %s", hdev->name); 1187 1188 if (!lmp_bredr_capable(hdev)) 1189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 1190 MGMT_STATUS_NOT_SUPPORTED); 1191 1192 if (cp->val != 0x00 && cp->val != 0x01) 1193 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 1194 MGMT_STATUS_INVALID_PARAMS); 1195 1196 hci_dev_lock(hdev); 1197 1198 if (!hdev_is_powered(hdev)) { 1199 bool changed = false; 1200 1201 if (!!cp->val != test_bit(HCI_LINK_SECURITY, 1202 &hdev->dev_flags)) { 1203 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 1204 changed = true; 1205 } 1206 1207 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); 1208 if (err < 0) 1209 goto failed; 1210 1211 if (changed) 1212 err = new_settings(hdev, sk); 1213 1214 goto failed; 1215 } 1216 1217 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { 1218 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, 1219 MGMT_STATUS_BUSY); 1220 goto failed; 1221 } 1222 1223 val = !!cp->val; 1224 1225 if (test_bit(HCI_AUTH, &hdev->flags) == val) { 1226 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); 1227 goto failed; 1228 } 1229 1230 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len); 1231 if (!cmd) { 1232 err = -ENOMEM; 1233 goto failed; 1234 } 1235 1236 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val); 1237 if (err < 0) { 1238 mgmt_pending_remove(cmd); 1239 goto failed; 1240 } 1241 1242 failed: 1243 hci_dev_unlock(hdev); 1244 return err; 1245 } 1246 1247 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1248 { 1249 struct mgmt_mode *cp = data; 1250 struct pending_cmd *cmd; 1251 u8 val; 1252 int err; 1253 1254 BT_DBG("request for %s", hdev->name); 1255 1256 if (!lmp_ssp_capable(hdev)) 1257 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 1258 MGMT_STATUS_NOT_SUPPORTED); 1259 1260 if (cp->val != 0x00 && cp->val != 0x01) 1261 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 1262 MGMT_STATUS_INVALID_PARAMS); 1263 1264 hci_dev_lock(hdev); 1265 1266 val = !!cp->val; 1267 1268 if (!hdev_is_powered(hdev)) { 1269 bool changed = false; 1270 1271 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { 1272 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags); 1273 changed = true; 1274 } 1275 1276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); 1277 if (err < 0) 1278 goto failed; 1279 1280 if (changed) 1281 err = new_settings(hdev, sk); 1282 1283 goto failed; 1284 } 1285 1286 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) { 1287 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, 1288 MGMT_STATUS_BUSY); 1289 goto failed; 1290 } 1291 1292 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) { 1293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); 1294 goto failed; 1295 } 1296 1297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len); 1298 if (!cmd) { 1299 err = -ENOMEM; 1300 goto failed; 1301 } 1302 1303 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val); 1304 if (err < 0) { 1305 mgmt_pending_remove(cmd); 1306 goto failed; 1307 } 1308 1309 failed: 1310 hci_dev_unlock(hdev); 1311 return err; 1312 } 1313 1314 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1315 { 1316 struct mgmt_mode *cp = data; 1317 1318 BT_DBG("request for %s", hdev->name); 1319 1320 if (!enable_hs) 1321 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 1322 MGMT_STATUS_NOT_SUPPORTED); 1323 1324 if (cp->val != 0x00 && cp->val != 0x01) 1325 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, 1326 MGMT_STATUS_INVALID_PARAMS); 1327 1328 if (cp->val) 1329 set_bit(HCI_HS_ENABLED, &hdev->dev_flags); 1330 else 1331 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags); 1332 1333 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); 1334 } 1335 1336 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1337 { 1338 struct mgmt_mode *cp = data; 1339 struct hci_cp_write_le_host_supported hci_cp; 1340 struct pending_cmd *cmd; 1341 int err; 1342 u8 val, enabled; 1343 1344 BT_DBG("request for %s", hdev->name); 1345 1346 if (!lmp_le_capable(hdev)) 1347 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1348 MGMT_STATUS_NOT_SUPPORTED); 1349 1350 if (cp->val != 0x00 && cp->val != 0x01) 1351 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1352 MGMT_STATUS_INVALID_PARAMS); 1353 1354 /* LE-only devices do not allow toggling LE on/off */ 1355 if (!lmp_bredr_capable(hdev)) 1356 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1357 MGMT_STATUS_REJECTED); 1358 1359 hci_dev_lock(hdev); 1360 1361 val = !!cp->val; 1362 enabled = lmp_host_le_capable(hdev); 1363 1364 if (!hdev_is_powered(hdev) || val == enabled) { 1365 bool changed = false; 1366 1367 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { 1368 change_bit(HCI_LE_ENABLED, &hdev->dev_flags); 1369 changed = true; 1370 } 1371 1372 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); 1373 if (err < 0) 1374 goto unlock; 1375 1376 if (changed) 1377 err = new_settings(hdev, sk); 1378 1379 goto unlock; 1380 } 1381 1382 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) { 1383 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE, 1384 MGMT_STATUS_BUSY); 1385 goto unlock; 1386 } 1387 1388 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len); 1389 if (!cmd) { 1390 err = -ENOMEM; 1391 goto unlock; 1392 } 1393 1394 memset(&hci_cp, 0, sizeof(hci_cp)); 1395 1396 if (val) { 1397 hci_cp.le = val; 1398 hci_cp.simul = lmp_le_br_capable(hdev); 1399 } 1400 1401 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), 1402 &hci_cp); 1403 if (err < 0) 1404 mgmt_pending_remove(cmd); 1405 1406 unlock: 1407 hci_dev_unlock(hdev); 1408 return err; 1409 } 1410 1411 /* This is a helper function to test for pending mgmt commands that can 1412 * cause CoD or EIR HCI commands. We can only allow one such pending 1413 * mgmt command at a time since otherwise we cannot easily track what 1414 * the current values are, will be, and based on that calculate if a new 1415 * HCI command needs to be sent and if yes with what value. 1416 */ 1417 static bool pending_eir_or_class(struct hci_dev *hdev) 1418 { 1419 struct pending_cmd *cmd; 1420 1421 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 1422 switch (cmd->opcode) { 1423 case MGMT_OP_ADD_UUID: 1424 case MGMT_OP_REMOVE_UUID: 1425 case MGMT_OP_SET_DEV_CLASS: 1426 case MGMT_OP_SET_POWERED: 1427 return true; 1428 } 1429 } 1430 1431 return false; 1432 } 1433 1434 static const u8 bluetooth_base_uuid[] = { 1435 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80, 1436 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1437 }; 1438 1439 static u8 get_uuid_size(const u8 *uuid) 1440 { 1441 u32 val; 1442 1443 if (memcmp(uuid, bluetooth_base_uuid, 12)) 1444 return 128; 1445 1446 val = get_unaligned_le32(&uuid[12]); 1447 if (val > 0xffff) 1448 return 32; 1449 1450 return 16; 1451 } 1452 1453 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status) 1454 { 1455 struct pending_cmd *cmd; 1456 1457 hci_dev_lock(hdev); 1458 1459 cmd = mgmt_pending_find(mgmt_op, hdev); 1460 if (!cmd) 1461 goto unlock; 1462 1463 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), 1464 hdev->dev_class, 3); 1465 1466 mgmt_pending_remove(cmd); 1467 1468 unlock: 1469 hci_dev_unlock(hdev); 1470 } 1471 1472 static void add_uuid_complete(struct hci_dev *hdev, u8 status) 1473 { 1474 BT_DBG("status 0x%02x", status); 1475 1476 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status); 1477 } 1478 1479 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) 1480 { 1481 struct mgmt_cp_add_uuid *cp = data; 1482 struct pending_cmd *cmd; 1483 struct hci_request req; 1484 struct bt_uuid *uuid; 1485 int err; 1486 1487 BT_DBG("request for %s", hdev->name); 1488 1489 hci_dev_lock(hdev); 1490 1491 if (pending_eir_or_class(hdev)) { 1492 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, 1493 MGMT_STATUS_BUSY); 1494 goto failed; 1495 } 1496 1497 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL); 1498 if (!uuid) { 1499 err = -ENOMEM; 1500 goto failed; 1501 } 1502 1503 memcpy(uuid->uuid, cp->uuid, 16); 1504 uuid->svc_hint = cp->svc_hint; 1505 uuid->size = get_uuid_size(cp->uuid); 1506 1507 list_add_tail(&uuid->list, &hdev->uuids); 1508 1509 hci_req_init(&req, hdev); 1510 1511 update_class(&req); 1512 update_eir(&req); 1513 1514 err = hci_req_run(&req, add_uuid_complete); 1515 if (err < 0) { 1516 if (err != -ENODATA) 1517 goto failed; 1518 1519 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, 1520 hdev->dev_class, 3); 1521 goto failed; 1522 } 1523 1524 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); 1525 if (!cmd) { 1526 err = -ENOMEM; 1527 goto failed; 1528 } 1529 1530 err = 0; 1531 1532 failed: 1533 hci_dev_unlock(hdev); 1534 return err; 1535 } 1536 1537 static bool enable_service_cache(struct hci_dev *hdev) 1538 { 1539 if (!hdev_is_powered(hdev)) 1540 return false; 1541 1542 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { 1543 queue_delayed_work(hdev->workqueue, &hdev->service_cache, 1544 CACHE_TIMEOUT); 1545 return true; 1546 } 1547 1548 return false; 1549 } 1550 1551 static void remove_uuid_complete(struct hci_dev *hdev, u8 status) 1552 { 1553 BT_DBG("status 0x%02x", status); 1554 1555 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status); 1556 } 1557 1558 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, 1559 u16 len) 1560 { 1561 struct mgmt_cp_remove_uuid *cp = data; 1562 struct pending_cmd *cmd; 1563 struct bt_uuid *match, *tmp; 1564 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 1565 struct hci_request req; 1566 int err, found; 1567 1568 BT_DBG("request for %s", hdev->name); 1569 1570 hci_dev_lock(hdev); 1571 1572 if (pending_eir_or_class(hdev)) { 1573 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, 1574 MGMT_STATUS_BUSY); 1575 goto unlock; 1576 } 1577 1578 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 1579 err = hci_uuids_clear(hdev); 1580 1581 if (enable_service_cache(hdev)) { 1582 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 1583 0, hdev->dev_class, 3); 1584 goto unlock; 1585 } 1586 1587 goto update_class; 1588 } 1589 1590 found = 0; 1591 1592 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) { 1593 if (memcmp(match->uuid, cp->uuid, 16) != 0) 1594 continue; 1595 1596 list_del(&match->list); 1597 kfree(match); 1598 found++; 1599 } 1600 1601 if (found == 0) { 1602 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, 1603 MGMT_STATUS_INVALID_PARAMS); 1604 goto unlock; 1605 } 1606 1607 update_class: 1608 hci_req_init(&req, hdev); 1609 1610 update_class(&req); 1611 update_eir(&req); 1612 1613 err = hci_req_run(&req, remove_uuid_complete); 1614 if (err < 0) { 1615 if (err != -ENODATA) 1616 goto unlock; 1617 1618 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, 1619 hdev->dev_class, 3); 1620 goto unlock; 1621 } 1622 1623 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); 1624 if (!cmd) { 1625 err = -ENOMEM; 1626 goto unlock; 1627 } 1628 1629 err = 0; 1630 1631 unlock: 1632 hci_dev_unlock(hdev); 1633 return err; 1634 } 1635 1636 static void set_class_complete(struct hci_dev *hdev, u8 status) 1637 { 1638 BT_DBG("status 0x%02x", status); 1639 1640 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status); 1641 } 1642 1643 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, 1644 u16 len) 1645 { 1646 struct mgmt_cp_set_dev_class *cp = data; 1647 struct pending_cmd *cmd; 1648 struct hci_request req; 1649 int err; 1650 1651 BT_DBG("request for %s", hdev->name); 1652 1653 if (!lmp_bredr_capable(hdev)) 1654 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 1655 MGMT_STATUS_NOT_SUPPORTED); 1656 1657 hci_dev_lock(hdev); 1658 1659 if (pending_eir_or_class(hdev)) { 1660 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 1661 MGMT_STATUS_BUSY); 1662 goto unlock; 1663 } 1664 1665 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { 1666 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 1667 MGMT_STATUS_INVALID_PARAMS); 1668 goto unlock; 1669 } 1670 1671 hdev->major_class = cp->major; 1672 hdev->minor_class = cp->minor; 1673 1674 if (!hdev_is_powered(hdev)) { 1675 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, 1676 hdev->dev_class, 3); 1677 goto unlock; 1678 } 1679 1680 hci_req_init(&req, hdev); 1681 1682 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) { 1683 hci_dev_unlock(hdev); 1684 cancel_delayed_work_sync(&hdev->service_cache); 1685 hci_dev_lock(hdev); 1686 update_eir(&req); 1687 } 1688 1689 update_class(&req); 1690 1691 err = hci_req_run(&req, set_class_complete); 1692 if (err < 0) { 1693 if (err != -ENODATA) 1694 goto unlock; 1695 1696 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, 1697 hdev->dev_class, 3); 1698 goto unlock; 1699 } 1700 1701 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); 1702 if (!cmd) { 1703 err = -ENOMEM; 1704 goto unlock; 1705 } 1706 1707 err = 0; 1708 1709 unlock: 1710 hci_dev_unlock(hdev); 1711 return err; 1712 } 1713 1714 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, 1715 u16 len) 1716 { 1717 struct mgmt_cp_load_link_keys *cp = data; 1718 u16 key_count, expected_len; 1719 int i; 1720 1721 key_count = __le16_to_cpu(cp->key_count); 1722 1723 expected_len = sizeof(*cp) + key_count * 1724 sizeof(struct mgmt_link_key_info); 1725 if (expected_len != len) { 1726 BT_ERR("load_link_keys: expected %u bytes, got %u bytes", 1727 len, expected_len); 1728 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1729 MGMT_STATUS_INVALID_PARAMS); 1730 } 1731 1732 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01) 1733 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1734 MGMT_STATUS_INVALID_PARAMS); 1735 1736 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, 1737 key_count); 1738 1739 for (i = 0; i < key_count; i++) { 1740 struct mgmt_link_key_info *key = &cp->keys[i]; 1741 1742 if (key->addr.type != BDADDR_BREDR) 1743 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 1744 MGMT_STATUS_INVALID_PARAMS); 1745 } 1746 1747 hci_dev_lock(hdev); 1748 1749 hci_link_keys_clear(hdev); 1750 1751 set_bit(HCI_LINK_KEYS, &hdev->dev_flags); 1752 1753 if (cp->debug_keys) 1754 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 1755 else 1756 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags); 1757 1758 for (i = 0; i < key_count; i++) { 1759 struct mgmt_link_key_info *key = &cp->keys[i]; 1760 1761 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val, 1762 key->type, key->pin_len); 1763 } 1764 1765 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); 1766 1767 hci_dev_unlock(hdev); 1768 1769 return 0; 1770 } 1771 1772 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr, 1773 u8 addr_type, struct sock *skip_sk) 1774 { 1775 struct mgmt_ev_device_unpaired ev; 1776 1777 bacpy(&ev.addr.bdaddr, bdaddr); 1778 ev.addr.type = addr_type; 1779 1780 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev), 1781 skip_sk); 1782 } 1783 1784 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, 1785 u16 len) 1786 { 1787 struct mgmt_cp_unpair_device *cp = data; 1788 struct mgmt_rp_unpair_device rp; 1789 struct hci_cp_disconnect dc; 1790 struct pending_cmd *cmd; 1791 struct hci_conn *conn; 1792 int err; 1793 1794 memset(&rp, 0, sizeof(rp)); 1795 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 1796 rp.addr.type = cp->addr.type; 1797 1798 if (!bdaddr_type_is_valid(cp->addr.type)) 1799 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 1800 MGMT_STATUS_INVALID_PARAMS, 1801 &rp, sizeof(rp)); 1802 1803 if (cp->disconnect != 0x00 && cp->disconnect != 0x01) 1804 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 1805 MGMT_STATUS_INVALID_PARAMS, 1806 &rp, sizeof(rp)); 1807 1808 hci_dev_lock(hdev); 1809 1810 if (!hdev_is_powered(hdev)) { 1811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 1812 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 1813 goto unlock; 1814 } 1815 1816 if (cp->addr.type == BDADDR_BREDR) 1817 err = hci_remove_link_key(hdev, &cp->addr.bdaddr); 1818 else 1819 err = hci_remove_ltk(hdev, &cp->addr.bdaddr); 1820 1821 if (err < 0) { 1822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 1823 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp)); 1824 goto unlock; 1825 } 1826 1827 if (cp->disconnect) { 1828 if (cp->addr.type == BDADDR_BREDR) 1829 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1830 &cp->addr.bdaddr); 1831 else 1832 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, 1833 &cp->addr.bdaddr); 1834 } else { 1835 conn = NULL; 1836 } 1837 1838 if (!conn) { 1839 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, 1840 &rp, sizeof(rp)); 1841 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); 1842 goto unlock; 1843 } 1844 1845 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, 1846 sizeof(*cp)); 1847 if (!cmd) { 1848 err = -ENOMEM; 1849 goto unlock; 1850 } 1851 1852 dc.handle = cpu_to_le16(conn->handle); 1853 dc.reason = 0x13; /* Remote User Terminated Connection */ 1854 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1855 if (err < 0) 1856 mgmt_pending_remove(cmd); 1857 1858 unlock: 1859 hci_dev_unlock(hdev); 1860 return err; 1861 } 1862 1863 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, 1864 u16 len) 1865 { 1866 struct mgmt_cp_disconnect *cp = data; 1867 struct mgmt_rp_disconnect rp; 1868 struct hci_cp_disconnect dc; 1869 struct pending_cmd *cmd; 1870 struct hci_conn *conn; 1871 int err; 1872 1873 BT_DBG(""); 1874 1875 memset(&rp, 0, sizeof(rp)); 1876 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 1877 rp.addr.type = cp->addr.type; 1878 1879 if (!bdaddr_type_is_valid(cp->addr.type)) 1880 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 1881 MGMT_STATUS_INVALID_PARAMS, 1882 &rp, sizeof(rp)); 1883 1884 hci_dev_lock(hdev); 1885 1886 if (!test_bit(HCI_UP, &hdev->flags)) { 1887 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 1888 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 1889 goto failed; 1890 } 1891 1892 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { 1893 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 1894 MGMT_STATUS_BUSY, &rp, sizeof(rp)); 1895 goto failed; 1896 } 1897 1898 if (cp->addr.type == BDADDR_BREDR) 1899 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, 1900 &cp->addr.bdaddr); 1901 else 1902 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); 1903 1904 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { 1905 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, 1906 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); 1907 goto failed; 1908 } 1909 1910 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); 1911 if (!cmd) { 1912 err = -ENOMEM; 1913 goto failed; 1914 } 1915 1916 dc.handle = cpu_to_le16(conn->handle); 1917 dc.reason = HCI_ERROR_REMOTE_USER_TERM; 1918 1919 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 1920 if (err < 0) 1921 mgmt_pending_remove(cmd); 1922 1923 failed: 1924 hci_dev_unlock(hdev); 1925 return err; 1926 } 1927 1928 static u8 link_to_bdaddr(u8 link_type, u8 addr_type) 1929 { 1930 switch (link_type) { 1931 case LE_LINK: 1932 switch (addr_type) { 1933 case ADDR_LE_DEV_PUBLIC: 1934 return BDADDR_LE_PUBLIC; 1935 1936 default: 1937 /* Fallback to LE Random address type */ 1938 return BDADDR_LE_RANDOM; 1939 } 1940 1941 default: 1942 /* Fallback to BR/EDR type */ 1943 return BDADDR_BREDR; 1944 } 1945 } 1946 1947 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, 1948 u16 data_len) 1949 { 1950 struct mgmt_rp_get_connections *rp; 1951 struct hci_conn *c; 1952 size_t rp_len; 1953 int err; 1954 u16 i; 1955 1956 BT_DBG(""); 1957 1958 hci_dev_lock(hdev); 1959 1960 if (!hdev_is_powered(hdev)) { 1961 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 1962 MGMT_STATUS_NOT_POWERED); 1963 goto unlock; 1964 } 1965 1966 i = 0; 1967 list_for_each_entry(c, &hdev->conn_hash.list, list) { 1968 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) 1969 i++; 1970 } 1971 1972 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 1973 rp = kmalloc(rp_len, GFP_KERNEL); 1974 if (!rp) { 1975 err = -ENOMEM; 1976 goto unlock; 1977 } 1978 1979 i = 0; 1980 list_for_each_entry(c, &hdev->conn_hash.list, list) { 1981 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) 1982 continue; 1983 bacpy(&rp->addr[i].bdaddr, &c->dst); 1984 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type); 1985 if (c->type == SCO_LINK || c->type == ESCO_LINK) 1986 continue; 1987 i++; 1988 } 1989 1990 rp->conn_count = cpu_to_le16(i); 1991 1992 /* Recalculate length in case of filtered SCO connections, etc */ 1993 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); 1994 1995 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, 1996 rp_len); 1997 1998 kfree(rp); 1999 2000 unlock: 2001 hci_dev_unlock(hdev); 2002 return err; 2003 } 2004 2005 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, 2006 struct mgmt_cp_pin_code_neg_reply *cp) 2007 { 2008 struct pending_cmd *cmd; 2009 int err; 2010 2011 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, 2012 sizeof(*cp)); 2013 if (!cmd) 2014 return -ENOMEM; 2015 2016 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2017 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr); 2018 if (err < 0) 2019 mgmt_pending_remove(cmd); 2020 2021 return err; 2022 } 2023 2024 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, 2025 u16 len) 2026 { 2027 struct hci_conn *conn; 2028 struct mgmt_cp_pin_code_reply *cp = data; 2029 struct hci_cp_pin_code_reply reply; 2030 struct pending_cmd *cmd; 2031 int err; 2032 2033 BT_DBG(""); 2034 2035 hci_dev_lock(hdev); 2036 2037 if (!hdev_is_powered(hdev)) { 2038 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 2039 MGMT_STATUS_NOT_POWERED); 2040 goto failed; 2041 } 2042 2043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); 2044 if (!conn) { 2045 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 2046 MGMT_STATUS_NOT_CONNECTED); 2047 goto failed; 2048 } 2049 2050 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { 2051 struct mgmt_cp_pin_code_neg_reply ncp; 2052 2053 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr)); 2054 2055 BT_ERR("PIN code is not 16 bytes long"); 2056 2057 err = send_pin_code_neg_reply(sk, hdev, &ncp); 2058 if (err >= 0) 2059 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 2060 MGMT_STATUS_INVALID_PARAMS); 2061 2062 goto failed; 2063 } 2064 2065 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); 2066 if (!cmd) { 2067 err = -ENOMEM; 2068 goto failed; 2069 } 2070 2071 bacpy(&reply.bdaddr, &cp->addr.bdaddr); 2072 reply.pin_len = cp->pin_len; 2073 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); 2074 2075 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); 2076 if (err < 0) 2077 mgmt_pending_remove(cmd); 2078 2079 failed: 2080 hci_dev_unlock(hdev); 2081 return err; 2082 } 2083 2084 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, 2085 u16 len) 2086 { 2087 struct mgmt_cp_set_io_capability *cp = data; 2088 2089 BT_DBG(""); 2090 2091 hci_dev_lock(hdev); 2092 2093 hdev->io_capability = cp->io_capability; 2094 2095 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 2096 hdev->io_capability); 2097 2098 hci_dev_unlock(hdev); 2099 2100 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL, 2101 0); 2102 } 2103 2104 static struct pending_cmd *find_pairing(struct hci_conn *conn) 2105 { 2106 struct hci_dev *hdev = conn->hdev; 2107 struct pending_cmd *cmd; 2108 2109 list_for_each_entry(cmd, &hdev->mgmt_pending, list) { 2110 if (cmd->opcode != MGMT_OP_PAIR_DEVICE) 2111 continue; 2112 2113 if (cmd->user_data != conn) 2114 continue; 2115 2116 return cmd; 2117 } 2118 2119 return NULL; 2120 } 2121 2122 static void pairing_complete(struct pending_cmd *cmd, u8 status) 2123 { 2124 struct mgmt_rp_pair_device rp; 2125 struct hci_conn *conn = cmd->user_data; 2126 2127 bacpy(&rp.addr.bdaddr, &conn->dst); 2128 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); 2129 2130 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, 2131 &rp, sizeof(rp)); 2132 2133 /* So we don't get further callbacks for this connection */ 2134 conn->connect_cfm_cb = NULL; 2135 conn->security_cfm_cb = NULL; 2136 conn->disconn_cfm_cb = NULL; 2137 2138 hci_conn_drop(conn); 2139 2140 mgmt_pending_remove(cmd); 2141 } 2142 2143 static void pairing_complete_cb(struct hci_conn *conn, u8 status) 2144 { 2145 struct pending_cmd *cmd; 2146 2147 BT_DBG("status %u", status); 2148 2149 cmd = find_pairing(conn); 2150 if (!cmd) 2151 BT_DBG("Unable to find a pending command"); 2152 else 2153 pairing_complete(cmd, mgmt_status(status)); 2154 } 2155 2156 static void le_connect_complete_cb(struct hci_conn *conn, u8 status) 2157 { 2158 struct pending_cmd *cmd; 2159 2160 BT_DBG("status %u", status); 2161 2162 if (!status) 2163 return; 2164 2165 cmd = find_pairing(conn); 2166 if (!cmd) 2167 BT_DBG("Unable to find a pending command"); 2168 else 2169 pairing_complete(cmd, mgmt_status(status)); 2170 } 2171 2172 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, 2173 u16 len) 2174 { 2175 struct mgmt_cp_pair_device *cp = data; 2176 struct mgmt_rp_pair_device rp; 2177 struct pending_cmd *cmd; 2178 u8 sec_level, auth_type; 2179 struct hci_conn *conn; 2180 int err; 2181 2182 BT_DBG(""); 2183 2184 memset(&rp, 0, sizeof(rp)); 2185 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 2186 rp.addr.type = cp->addr.type; 2187 2188 if (!bdaddr_type_is_valid(cp->addr.type)) 2189 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 2190 MGMT_STATUS_INVALID_PARAMS, 2191 &rp, sizeof(rp)); 2192 2193 hci_dev_lock(hdev); 2194 2195 if (!hdev_is_powered(hdev)) { 2196 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 2197 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); 2198 goto unlock; 2199 } 2200 2201 sec_level = BT_SECURITY_MEDIUM; 2202 if (cp->io_cap == 0x03) 2203 auth_type = HCI_AT_DEDICATED_BONDING; 2204 else 2205 auth_type = HCI_AT_DEDICATED_BONDING_MITM; 2206 2207 if (cp->addr.type == BDADDR_BREDR) 2208 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr, 2209 cp->addr.type, sec_level, auth_type); 2210 else 2211 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr, 2212 cp->addr.type, sec_level, auth_type); 2213 2214 if (IS_ERR(conn)) { 2215 int status; 2216 2217 if (PTR_ERR(conn) == -EBUSY) 2218 status = MGMT_STATUS_BUSY; 2219 else 2220 status = MGMT_STATUS_CONNECT_FAILED; 2221 2222 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 2223 status, &rp, 2224 sizeof(rp)); 2225 goto unlock; 2226 } 2227 2228 if (conn->connect_cfm_cb) { 2229 hci_conn_drop(conn); 2230 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, 2231 MGMT_STATUS_BUSY, &rp, sizeof(rp)); 2232 goto unlock; 2233 } 2234 2235 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); 2236 if (!cmd) { 2237 err = -ENOMEM; 2238 hci_conn_drop(conn); 2239 goto unlock; 2240 } 2241 2242 /* For LE, just connecting isn't a proof that the pairing finished */ 2243 if (cp->addr.type == BDADDR_BREDR) 2244 conn->connect_cfm_cb = pairing_complete_cb; 2245 else 2246 conn->connect_cfm_cb = le_connect_complete_cb; 2247 2248 conn->security_cfm_cb = pairing_complete_cb; 2249 conn->disconn_cfm_cb = pairing_complete_cb; 2250 conn->io_capability = cp->io_cap; 2251 cmd->user_data = conn; 2252 2253 if (conn->state == BT_CONNECTED && 2254 hci_conn_security(conn, sec_level, auth_type)) 2255 pairing_complete(cmd, 0); 2256 2257 err = 0; 2258 2259 unlock: 2260 hci_dev_unlock(hdev); 2261 return err; 2262 } 2263 2264 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, 2265 u16 len) 2266 { 2267 struct mgmt_addr_info *addr = data; 2268 struct pending_cmd *cmd; 2269 struct hci_conn *conn; 2270 int err; 2271 2272 BT_DBG(""); 2273 2274 hci_dev_lock(hdev); 2275 2276 if (!hdev_is_powered(hdev)) { 2277 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 2278 MGMT_STATUS_NOT_POWERED); 2279 goto unlock; 2280 } 2281 2282 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev); 2283 if (!cmd) { 2284 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 2285 MGMT_STATUS_INVALID_PARAMS); 2286 goto unlock; 2287 } 2288 2289 conn = cmd->user_data; 2290 2291 if (bacmp(&addr->bdaddr, &conn->dst) != 0) { 2292 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 2293 MGMT_STATUS_INVALID_PARAMS); 2294 goto unlock; 2295 } 2296 2297 pairing_complete(cmd, MGMT_STATUS_CANCELLED); 2298 2299 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, 2300 addr, sizeof(*addr)); 2301 unlock: 2302 hci_dev_unlock(hdev); 2303 return err; 2304 } 2305 2306 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, 2307 struct mgmt_addr_info *addr, u16 mgmt_op, 2308 u16 hci_op, __le32 passkey) 2309 { 2310 struct pending_cmd *cmd; 2311 struct hci_conn *conn; 2312 int err; 2313 2314 hci_dev_lock(hdev); 2315 2316 if (!hdev_is_powered(hdev)) { 2317 err = cmd_complete(sk, hdev->id, mgmt_op, 2318 MGMT_STATUS_NOT_POWERED, addr, 2319 sizeof(*addr)); 2320 goto done; 2321 } 2322 2323 if (addr->type == BDADDR_BREDR) 2324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr); 2325 else 2326 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr); 2327 2328 if (!conn) { 2329 err = cmd_complete(sk, hdev->id, mgmt_op, 2330 MGMT_STATUS_NOT_CONNECTED, addr, 2331 sizeof(*addr)); 2332 goto done; 2333 } 2334 2335 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 2336 /* Continue with pairing via SMP */ 2337 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 2338 2339 if (!err) 2340 err = cmd_complete(sk, hdev->id, mgmt_op, 2341 MGMT_STATUS_SUCCESS, addr, 2342 sizeof(*addr)); 2343 else 2344 err = cmd_complete(sk, hdev->id, mgmt_op, 2345 MGMT_STATUS_FAILED, addr, 2346 sizeof(*addr)); 2347 2348 goto done; 2349 } 2350 2351 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr)); 2352 if (!cmd) { 2353 err = -ENOMEM; 2354 goto done; 2355 } 2356 2357 /* Continue with pairing via HCI */ 2358 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { 2359 struct hci_cp_user_passkey_reply cp; 2360 2361 bacpy(&cp.bdaddr, &addr->bdaddr); 2362 cp.passkey = passkey; 2363 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); 2364 } else 2365 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr), 2366 &addr->bdaddr); 2367 2368 if (err < 0) 2369 mgmt_pending_remove(cmd); 2370 2371 done: 2372 hci_dev_unlock(hdev); 2373 return err; 2374 } 2375 2376 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, 2377 void *data, u16 len) 2378 { 2379 struct mgmt_cp_pin_code_neg_reply *cp = data; 2380 2381 BT_DBG(""); 2382 2383 return user_pairing_resp(sk, hdev, &cp->addr, 2384 MGMT_OP_PIN_CODE_NEG_REPLY, 2385 HCI_OP_PIN_CODE_NEG_REPLY, 0); 2386 } 2387 2388 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, 2389 u16 len) 2390 { 2391 struct mgmt_cp_user_confirm_reply *cp = data; 2392 2393 BT_DBG(""); 2394 2395 if (len != sizeof(*cp)) 2396 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, 2397 MGMT_STATUS_INVALID_PARAMS); 2398 2399 return user_pairing_resp(sk, hdev, &cp->addr, 2400 MGMT_OP_USER_CONFIRM_REPLY, 2401 HCI_OP_USER_CONFIRM_REPLY, 0); 2402 } 2403 2404 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev, 2405 void *data, u16 len) 2406 { 2407 struct mgmt_cp_user_confirm_neg_reply *cp = data; 2408 2409 BT_DBG(""); 2410 2411 return user_pairing_resp(sk, hdev, &cp->addr, 2412 MGMT_OP_USER_CONFIRM_NEG_REPLY, 2413 HCI_OP_USER_CONFIRM_NEG_REPLY, 0); 2414 } 2415 2416 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data, 2417 u16 len) 2418 { 2419 struct mgmt_cp_user_passkey_reply *cp = data; 2420 2421 BT_DBG(""); 2422 2423 return user_pairing_resp(sk, hdev, &cp->addr, 2424 MGMT_OP_USER_PASSKEY_REPLY, 2425 HCI_OP_USER_PASSKEY_REPLY, cp->passkey); 2426 } 2427 2428 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, 2429 void *data, u16 len) 2430 { 2431 struct mgmt_cp_user_passkey_neg_reply *cp = data; 2432 2433 BT_DBG(""); 2434 2435 return user_pairing_resp(sk, hdev, &cp->addr, 2436 MGMT_OP_USER_PASSKEY_NEG_REPLY, 2437 HCI_OP_USER_PASSKEY_NEG_REPLY, 0); 2438 } 2439 2440 static void update_name(struct hci_request *req) 2441 { 2442 struct hci_dev *hdev = req->hdev; 2443 struct hci_cp_write_local_name cp; 2444 2445 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); 2446 2447 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); 2448 } 2449 2450 static void set_name_complete(struct hci_dev *hdev, u8 status) 2451 { 2452 struct mgmt_cp_set_local_name *cp; 2453 struct pending_cmd *cmd; 2454 2455 BT_DBG("status 0x%02x", status); 2456 2457 hci_dev_lock(hdev); 2458 2459 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 2460 if (!cmd) 2461 goto unlock; 2462 2463 cp = cmd->param; 2464 2465 if (status) 2466 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 2467 mgmt_status(status)); 2468 else 2469 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, 2470 cp, sizeof(*cp)); 2471 2472 mgmt_pending_remove(cmd); 2473 2474 unlock: 2475 hci_dev_unlock(hdev); 2476 } 2477 2478 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, 2479 u16 len) 2480 { 2481 struct mgmt_cp_set_local_name *cp = data; 2482 struct pending_cmd *cmd; 2483 struct hci_request req; 2484 int err; 2485 2486 BT_DBG(""); 2487 2488 hci_dev_lock(hdev); 2489 2490 /* If the old values are the same as the new ones just return a 2491 * direct command complete event. 2492 */ 2493 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && 2494 !memcmp(hdev->short_name, cp->short_name, 2495 sizeof(hdev->short_name))) { 2496 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, 2497 data, len); 2498 goto failed; 2499 } 2500 2501 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); 2502 2503 if (!hdev_is_powered(hdev)) { 2504 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); 2505 2506 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, 2507 data, len); 2508 if (err < 0) 2509 goto failed; 2510 2511 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len, 2512 sk); 2513 2514 goto failed; 2515 } 2516 2517 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); 2518 if (!cmd) { 2519 err = -ENOMEM; 2520 goto failed; 2521 } 2522 2523 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); 2524 2525 hci_req_init(&req, hdev); 2526 2527 if (lmp_bredr_capable(hdev)) { 2528 update_name(&req); 2529 update_eir(&req); 2530 } 2531 2532 if (lmp_le_capable(hdev)) 2533 hci_update_ad(&req); 2534 2535 err = hci_req_run(&req, set_name_complete); 2536 if (err < 0) 2537 mgmt_pending_remove(cmd); 2538 2539 failed: 2540 hci_dev_unlock(hdev); 2541 return err; 2542 } 2543 2544 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, 2545 void *data, u16 data_len) 2546 { 2547 struct pending_cmd *cmd; 2548 int err; 2549 2550 BT_DBG("%s", hdev->name); 2551 2552 hci_dev_lock(hdev); 2553 2554 if (!hdev_is_powered(hdev)) { 2555 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 2556 MGMT_STATUS_NOT_POWERED); 2557 goto unlock; 2558 } 2559 2560 if (!lmp_ssp_capable(hdev)) { 2561 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 2562 MGMT_STATUS_NOT_SUPPORTED); 2563 goto unlock; 2564 } 2565 2566 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { 2567 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 2568 MGMT_STATUS_BUSY); 2569 goto unlock; 2570 } 2571 2572 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); 2573 if (!cmd) { 2574 err = -ENOMEM; 2575 goto unlock; 2576 } 2577 2578 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); 2579 if (err < 0) 2580 mgmt_pending_remove(cmd); 2581 2582 unlock: 2583 hci_dev_unlock(hdev); 2584 return err; 2585 } 2586 2587 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 2588 void *data, u16 len) 2589 { 2590 struct mgmt_cp_add_remote_oob_data *cp = data; 2591 u8 status; 2592 int err; 2593 2594 BT_DBG("%s ", hdev->name); 2595 2596 hci_dev_lock(hdev); 2597 2598 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash, 2599 cp->randomizer); 2600 if (err < 0) 2601 status = MGMT_STATUS_FAILED; 2602 else 2603 status = MGMT_STATUS_SUCCESS; 2604 2605 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, 2606 &cp->addr, sizeof(cp->addr)); 2607 2608 hci_dev_unlock(hdev); 2609 return err; 2610 } 2611 2612 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, 2613 void *data, u16 len) 2614 { 2615 struct mgmt_cp_remove_remote_oob_data *cp = data; 2616 u8 status; 2617 int err; 2618 2619 BT_DBG("%s", hdev->name); 2620 2621 hci_dev_lock(hdev); 2622 2623 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr); 2624 if (err < 0) 2625 status = MGMT_STATUS_INVALID_PARAMS; 2626 else 2627 status = MGMT_STATUS_SUCCESS; 2628 2629 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 2630 status, &cp->addr, sizeof(cp->addr)); 2631 2632 hci_dev_unlock(hdev); 2633 return err; 2634 } 2635 2636 int mgmt_interleaved_discovery(struct hci_dev *hdev) 2637 { 2638 int err; 2639 2640 BT_DBG("%s", hdev->name); 2641 2642 hci_dev_lock(hdev); 2643 2644 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE); 2645 if (err < 0) 2646 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2647 2648 hci_dev_unlock(hdev); 2649 2650 return err; 2651 } 2652 2653 static int start_discovery(struct sock *sk, struct hci_dev *hdev, 2654 void *data, u16 len) 2655 { 2656 struct mgmt_cp_start_discovery *cp = data; 2657 struct pending_cmd *cmd; 2658 int err; 2659 2660 BT_DBG("%s", hdev->name); 2661 2662 hci_dev_lock(hdev); 2663 2664 if (!hdev_is_powered(hdev)) { 2665 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2666 MGMT_STATUS_NOT_POWERED); 2667 goto failed; 2668 } 2669 2670 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) { 2671 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2672 MGMT_STATUS_BUSY); 2673 goto failed; 2674 } 2675 2676 if (hdev->discovery.state != DISCOVERY_STOPPED) { 2677 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2678 MGMT_STATUS_BUSY); 2679 goto failed; 2680 } 2681 2682 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0); 2683 if (!cmd) { 2684 err = -ENOMEM; 2685 goto failed; 2686 } 2687 2688 hdev->discovery.type = cp->type; 2689 2690 switch (hdev->discovery.type) { 2691 case DISCOV_TYPE_BREDR: 2692 if (!lmp_bredr_capable(hdev)) { 2693 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2694 MGMT_STATUS_NOT_SUPPORTED); 2695 mgmt_pending_remove(cmd); 2696 goto failed; 2697 } 2698 2699 err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); 2700 break; 2701 2702 case DISCOV_TYPE_LE: 2703 if (!lmp_host_le_capable(hdev)) { 2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2705 MGMT_STATUS_NOT_SUPPORTED); 2706 mgmt_pending_remove(cmd); 2707 goto failed; 2708 } 2709 2710 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT, 2711 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY); 2712 break; 2713 2714 case DISCOV_TYPE_INTERLEAVED: 2715 if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) { 2716 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2717 MGMT_STATUS_NOT_SUPPORTED); 2718 mgmt_pending_remove(cmd); 2719 goto failed; 2720 } 2721 2722 err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT, 2723 LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE); 2724 break; 2725 2726 default: 2727 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY, 2728 MGMT_STATUS_INVALID_PARAMS); 2729 mgmt_pending_remove(cmd); 2730 goto failed; 2731 } 2732 2733 if (err < 0) 2734 mgmt_pending_remove(cmd); 2735 else 2736 hci_discovery_set_state(hdev, DISCOVERY_STARTING); 2737 2738 failed: 2739 hci_dev_unlock(hdev); 2740 return err; 2741 } 2742 2743 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, 2744 u16 len) 2745 { 2746 struct mgmt_cp_stop_discovery *mgmt_cp = data; 2747 struct pending_cmd *cmd; 2748 struct hci_cp_remote_name_req_cancel cp; 2749 struct inquiry_entry *e; 2750 int err; 2751 2752 BT_DBG("%s", hdev->name); 2753 2754 hci_dev_lock(hdev); 2755 2756 if (!hci_discovery_active(hdev)) { 2757 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 2758 MGMT_STATUS_REJECTED, &mgmt_cp->type, 2759 sizeof(mgmt_cp->type)); 2760 goto unlock; 2761 } 2762 2763 if (hdev->discovery.type != mgmt_cp->type) { 2764 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 2765 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type, 2766 sizeof(mgmt_cp->type)); 2767 goto unlock; 2768 } 2769 2770 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); 2771 if (!cmd) { 2772 err = -ENOMEM; 2773 goto unlock; 2774 } 2775 2776 switch (hdev->discovery.state) { 2777 case DISCOVERY_FINDING: 2778 if (test_bit(HCI_INQUIRY, &hdev->flags)) 2779 err = hci_cancel_inquiry(hdev); 2780 else 2781 err = hci_cancel_le_scan(hdev); 2782 2783 break; 2784 2785 case DISCOVERY_RESOLVING: 2786 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 2787 NAME_PENDING); 2788 if (!e) { 2789 mgmt_pending_remove(cmd); 2790 err = cmd_complete(sk, hdev->id, 2791 MGMT_OP_STOP_DISCOVERY, 0, 2792 &mgmt_cp->type, 2793 sizeof(mgmt_cp->type)); 2794 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 2795 goto unlock; 2796 } 2797 2798 bacpy(&cp.bdaddr, &e->data.bdaddr); 2799 err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, 2800 sizeof(cp), &cp); 2801 2802 break; 2803 2804 default: 2805 BT_DBG("unknown discovery state %u", hdev->discovery.state); 2806 err = -EFAULT; 2807 } 2808 2809 if (err < 0) 2810 mgmt_pending_remove(cmd); 2811 else 2812 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 2813 2814 unlock: 2815 hci_dev_unlock(hdev); 2816 return err; 2817 } 2818 2819 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, 2820 u16 len) 2821 { 2822 struct mgmt_cp_confirm_name *cp = data; 2823 struct inquiry_entry *e; 2824 int err; 2825 2826 BT_DBG("%s", hdev->name); 2827 2828 hci_dev_lock(hdev); 2829 2830 if (!hci_discovery_active(hdev)) { 2831 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 2832 MGMT_STATUS_FAILED); 2833 goto failed; 2834 } 2835 2836 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); 2837 if (!e) { 2838 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 2839 MGMT_STATUS_INVALID_PARAMS); 2840 goto failed; 2841 } 2842 2843 if (cp->name_known) { 2844 e->name_state = NAME_KNOWN; 2845 list_del(&e->list); 2846 } else { 2847 e->name_state = NAME_NEEDED; 2848 hci_inquiry_cache_update_resolve(hdev, e); 2849 } 2850 2851 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr, 2852 sizeof(cp->addr)); 2853 2854 failed: 2855 hci_dev_unlock(hdev); 2856 return err; 2857 } 2858 2859 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data, 2860 u16 len) 2861 { 2862 struct mgmt_cp_block_device *cp = data; 2863 u8 status; 2864 int err; 2865 2866 BT_DBG("%s", hdev->name); 2867 2868 if (!bdaddr_type_is_valid(cp->addr.type)) 2869 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, 2870 MGMT_STATUS_INVALID_PARAMS, 2871 &cp->addr, sizeof(cp->addr)); 2872 2873 hci_dev_lock(hdev); 2874 2875 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type); 2876 if (err < 0) 2877 status = MGMT_STATUS_FAILED; 2878 else 2879 status = MGMT_STATUS_SUCCESS; 2880 2881 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, 2882 &cp->addr, sizeof(cp->addr)); 2883 2884 hci_dev_unlock(hdev); 2885 2886 return err; 2887 } 2888 2889 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, 2890 u16 len) 2891 { 2892 struct mgmt_cp_unblock_device *cp = data; 2893 u8 status; 2894 int err; 2895 2896 BT_DBG("%s", hdev->name); 2897 2898 if (!bdaddr_type_is_valid(cp->addr.type)) 2899 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, 2900 MGMT_STATUS_INVALID_PARAMS, 2901 &cp->addr, sizeof(cp->addr)); 2902 2903 hci_dev_lock(hdev); 2904 2905 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type); 2906 if (err < 0) 2907 status = MGMT_STATUS_INVALID_PARAMS; 2908 else 2909 status = MGMT_STATUS_SUCCESS; 2910 2911 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, 2912 &cp->addr, sizeof(cp->addr)); 2913 2914 hci_dev_unlock(hdev); 2915 2916 return err; 2917 } 2918 2919 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, 2920 u16 len) 2921 { 2922 struct mgmt_cp_set_device_id *cp = data; 2923 struct hci_request req; 2924 int err; 2925 __u16 source; 2926 2927 BT_DBG("%s", hdev->name); 2928 2929 source = __le16_to_cpu(cp->source); 2930 2931 if (source > 0x0002) 2932 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 2933 MGMT_STATUS_INVALID_PARAMS); 2934 2935 hci_dev_lock(hdev); 2936 2937 hdev->devid_source = source; 2938 hdev->devid_vendor = __le16_to_cpu(cp->vendor); 2939 hdev->devid_product = __le16_to_cpu(cp->product); 2940 hdev->devid_version = __le16_to_cpu(cp->version); 2941 2942 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); 2943 2944 hci_req_init(&req, hdev); 2945 update_eir(&req); 2946 hci_req_run(&req, NULL); 2947 2948 hci_dev_unlock(hdev); 2949 2950 return err; 2951 } 2952 2953 static void fast_connectable_complete(struct hci_dev *hdev, u8 status) 2954 { 2955 struct pending_cmd *cmd; 2956 2957 BT_DBG("status 0x%02x", status); 2958 2959 hci_dev_lock(hdev); 2960 2961 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); 2962 if (!cmd) 2963 goto unlock; 2964 2965 if (status) { 2966 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2967 mgmt_status(status)); 2968 } else { 2969 struct mgmt_mode *cp = cmd->param; 2970 2971 if (cp->val) 2972 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); 2973 else 2974 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags); 2975 2976 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); 2977 new_settings(hdev, cmd->sk); 2978 } 2979 2980 mgmt_pending_remove(cmd); 2981 2982 unlock: 2983 hci_dev_unlock(hdev); 2984 } 2985 2986 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, 2987 void *data, u16 len) 2988 { 2989 struct mgmt_mode *cp = data; 2990 struct pending_cmd *cmd; 2991 struct hci_request req; 2992 int err; 2993 2994 BT_DBG("%s", hdev->name); 2995 2996 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2) 2997 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 2998 MGMT_STATUS_NOT_SUPPORTED); 2999 3000 if (cp->val != 0x00 && cp->val != 0x01) 3001 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 3002 MGMT_STATUS_INVALID_PARAMS); 3003 3004 if (!hdev_is_powered(hdev)) 3005 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 3006 MGMT_STATUS_NOT_POWERED); 3007 3008 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 3009 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 3010 MGMT_STATUS_REJECTED); 3011 3012 hci_dev_lock(hdev); 3013 3014 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { 3015 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 3016 MGMT_STATUS_BUSY); 3017 goto unlock; 3018 } 3019 3020 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) { 3021 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, 3022 hdev); 3023 goto unlock; 3024 } 3025 3026 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, 3027 data, len); 3028 if (!cmd) { 3029 err = -ENOMEM; 3030 goto unlock; 3031 } 3032 3033 hci_req_init(&req, hdev); 3034 3035 write_fast_connectable(&req, cp->val); 3036 3037 err = hci_req_run(&req, fast_connectable_complete); 3038 if (err < 0) { 3039 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, 3040 MGMT_STATUS_FAILED); 3041 mgmt_pending_remove(cmd); 3042 } 3043 3044 unlock: 3045 hci_dev_unlock(hdev); 3046 3047 return err; 3048 } 3049 3050 static bool ltk_is_valid(struct mgmt_ltk_info *key) 3051 { 3052 if (key->authenticated != 0x00 && key->authenticated != 0x01) 3053 return false; 3054 if (key->master != 0x00 && key->master != 0x01) 3055 return false; 3056 if (!bdaddr_type_is_le(key->addr.type)) 3057 return false; 3058 return true; 3059 } 3060 3061 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, 3062 void *cp_data, u16 len) 3063 { 3064 struct mgmt_cp_load_long_term_keys *cp = cp_data; 3065 u16 key_count, expected_len; 3066 int i, err; 3067 3068 key_count = __le16_to_cpu(cp->key_count); 3069 3070 expected_len = sizeof(*cp) + key_count * 3071 sizeof(struct mgmt_ltk_info); 3072 if (expected_len != len) { 3073 BT_ERR("load_keys: expected %u bytes, got %u bytes", 3074 len, expected_len); 3075 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 3076 MGMT_STATUS_INVALID_PARAMS); 3077 } 3078 3079 BT_DBG("%s key_count %u", hdev->name, key_count); 3080 3081 for (i = 0; i < key_count; i++) { 3082 struct mgmt_ltk_info *key = &cp->keys[i]; 3083 3084 if (!ltk_is_valid(key)) 3085 return cmd_status(sk, hdev->id, 3086 MGMT_OP_LOAD_LONG_TERM_KEYS, 3087 MGMT_STATUS_INVALID_PARAMS); 3088 } 3089 3090 hci_dev_lock(hdev); 3091 3092 hci_smp_ltks_clear(hdev); 3093 3094 for (i = 0; i < key_count; i++) { 3095 struct mgmt_ltk_info *key = &cp->keys[i]; 3096 u8 type; 3097 3098 if (key->master) 3099 type = HCI_SMP_LTK; 3100 else 3101 type = HCI_SMP_LTK_SLAVE; 3102 3103 hci_add_ltk(hdev, &key->addr.bdaddr, 3104 bdaddr_to_le(key->addr.type), 3105 type, 0, key->authenticated, key->val, 3106 key->enc_size, key->ediv, key->rand); 3107 } 3108 3109 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, 3110 NULL, 0); 3111 3112 hci_dev_unlock(hdev); 3113 3114 return err; 3115 } 3116 3117 static const struct mgmt_handler { 3118 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 3119 u16 data_len); 3120 bool var_len; 3121 size_t data_len; 3122 } mgmt_handlers[] = { 3123 { NULL }, /* 0x0000 (no command) */ 3124 { read_version, false, MGMT_READ_VERSION_SIZE }, 3125 { read_commands, false, MGMT_READ_COMMANDS_SIZE }, 3126 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE }, 3127 { read_controller_info, false, MGMT_READ_INFO_SIZE }, 3128 { set_powered, false, MGMT_SETTING_SIZE }, 3129 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE }, 3130 { set_connectable, false, MGMT_SETTING_SIZE }, 3131 { set_fast_connectable, false, MGMT_SETTING_SIZE }, 3132 { set_pairable, false, MGMT_SETTING_SIZE }, 3133 { set_link_security, false, MGMT_SETTING_SIZE }, 3134 { set_ssp, false, MGMT_SETTING_SIZE }, 3135 { set_hs, false, MGMT_SETTING_SIZE }, 3136 { set_le, false, MGMT_SETTING_SIZE }, 3137 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE }, 3138 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE }, 3139 { add_uuid, false, MGMT_ADD_UUID_SIZE }, 3140 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE }, 3141 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE }, 3142 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE }, 3143 { disconnect, false, MGMT_DISCONNECT_SIZE }, 3144 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE }, 3145 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE }, 3146 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE }, 3147 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE }, 3148 { pair_device, false, MGMT_PAIR_DEVICE_SIZE }, 3149 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE }, 3150 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE }, 3151 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE }, 3152 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, 3153 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE }, 3154 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, 3155 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE }, 3156 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE }, 3157 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, 3158 { start_discovery, false, MGMT_START_DISCOVERY_SIZE }, 3159 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE }, 3160 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE }, 3161 { block_device, false, MGMT_BLOCK_DEVICE_SIZE }, 3162 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE }, 3163 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE }, 3164 }; 3165 3166 3167 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 3168 { 3169 void *buf; 3170 u8 *cp; 3171 struct mgmt_hdr *hdr; 3172 u16 opcode, index, len; 3173 struct hci_dev *hdev = NULL; 3174 const struct mgmt_handler *handler; 3175 int err; 3176 3177 BT_DBG("got %zu bytes", msglen); 3178 3179 if (msglen < sizeof(*hdr)) 3180 return -EINVAL; 3181 3182 buf = kmalloc(msglen, GFP_KERNEL); 3183 if (!buf) 3184 return -ENOMEM; 3185 3186 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) { 3187 err = -EFAULT; 3188 goto done; 3189 } 3190 3191 hdr = buf; 3192 opcode = __le16_to_cpu(hdr->opcode); 3193 index = __le16_to_cpu(hdr->index); 3194 len = __le16_to_cpu(hdr->len); 3195 3196 if (len != msglen - sizeof(*hdr)) { 3197 err = -EINVAL; 3198 goto done; 3199 } 3200 3201 if (index != MGMT_INDEX_NONE) { 3202 hdev = hci_dev_get(index); 3203 if (!hdev) { 3204 err = cmd_status(sk, index, opcode, 3205 MGMT_STATUS_INVALID_INDEX); 3206 goto done; 3207 } 3208 } 3209 3210 if (opcode >= ARRAY_SIZE(mgmt_handlers) || 3211 mgmt_handlers[opcode].func == NULL) { 3212 BT_DBG("Unknown op %u", opcode); 3213 err = cmd_status(sk, index, opcode, 3214 MGMT_STATUS_UNKNOWN_COMMAND); 3215 goto done; 3216 } 3217 3218 if ((hdev && opcode < MGMT_OP_READ_INFO) || 3219 (!hdev && opcode >= MGMT_OP_READ_INFO)) { 3220 err = cmd_status(sk, index, opcode, 3221 MGMT_STATUS_INVALID_INDEX); 3222 goto done; 3223 } 3224 3225 handler = &mgmt_handlers[opcode]; 3226 3227 if ((handler->var_len && len < handler->data_len) || 3228 (!handler->var_len && len != handler->data_len)) { 3229 err = cmd_status(sk, index, opcode, 3230 MGMT_STATUS_INVALID_PARAMS); 3231 goto done; 3232 } 3233 3234 if (hdev) 3235 mgmt_init_hdev(sk, hdev); 3236 3237 cp = buf + sizeof(*hdr); 3238 3239 err = handler->func(sk, hdev, cp, len); 3240 if (err < 0) 3241 goto done; 3242 3243 err = msglen; 3244 3245 done: 3246 if (hdev) 3247 hci_dev_put(hdev); 3248 3249 kfree(buf); 3250 return err; 3251 } 3252 3253 static void cmd_status_rsp(struct pending_cmd *cmd, void *data) 3254 { 3255 u8 *status = data; 3256 3257 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); 3258 mgmt_pending_remove(cmd); 3259 } 3260 3261 int mgmt_index_added(struct hci_dev *hdev) 3262 { 3263 if (!mgmt_valid_hdev(hdev)) 3264 return -ENOTSUPP; 3265 3266 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); 3267 } 3268 3269 int mgmt_index_removed(struct hci_dev *hdev) 3270 { 3271 u8 status = MGMT_STATUS_INVALID_INDEX; 3272 3273 if (!mgmt_valid_hdev(hdev)) 3274 return -ENOTSUPP; 3275 3276 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); 3277 3278 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); 3279 } 3280 3281 struct cmd_lookup { 3282 struct sock *sk; 3283 struct hci_dev *hdev; 3284 u8 mgmt_status; 3285 }; 3286 3287 static void settings_rsp(struct pending_cmd *cmd, void *data) 3288 { 3289 struct cmd_lookup *match = data; 3290 3291 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); 3292 3293 list_del(&cmd->list); 3294 3295 if (match->sk == NULL) { 3296 match->sk = cmd->sk; 3297 sock_hold(match->sk); 3298 } 3299 3300 mgmt_pending_free(cmd); 3301 } 3302 3303 static void set_bredr_scan(struct hci_request *req) 3304 { 3305 struct hci_dev *hdev = req->hdev; 3306 u8 scan = 0; 3307 3308 /* Ensure that fast connectable is disabled. This function will 3309 * not do anything if the page scan parameters are already what 3310 * they should be. 3311 */ 3312 write_fast_connectable(req, false); 3313 3314 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 3315 scan |= SCAN_PAGE; 3316 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 3317 scan |= SCAN_INQUIRY; 3318 3319 if (scan) 3320 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 3321 } 3322 3323 static void powered_complete(struct hci_dev *hdev, u8 status) 3324 { 3325 struct cmd_lookup match = { NULL, hdev }; 3326 3327 BT_DBG("status 0x%02x", status); 3328 3329 hci_dev_lock(hdev); 3330 3331 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 3332 3333 new_settings(hdev, match.sk); 3334 3335 hci_dev_unlock(hdev); 3336 3337 if (match.sk) 3338 sock_put(match.sk); 3339 } 3340 3341 static int powered_update_hci(struct hci_dev *hdev) 3342 { 3343 struct hci_request req; 3344 u8 link_sec; 3345 3346 hci_req_init(&req, hdev); 3347 3348 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && 3349 !lmp_host_ssp_capable(hdev)) { 3350 u8 ssp = 1; 3351 3352 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp); 3353 } 3354 3355 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) && 3356 lmp_bredr_capable(hdev)) { 3357 struct hci_cp_write_le_host_supported cp; 3358 3359 cp.le = 1; 3360 cp.simul = lmp_le_br_capable(hdev); 3361 3362 /* Check first if we already have the right 3363 * host state (host features set) 3364 */ 3365 if (cp.le != lmp_host_le_capable(hdev) || 3366 cp.simul != lmp_host_le_br_capable(hdev)) 3367 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, 3368 sizeof(cp), &cp); 3369 } 3370 3371 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags); 3372 if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) 3373 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE, 3374 sizeof(link_sec), &link_sec); 3375 3376 if (lmp_bredr_capable(hdev)) { 3377 set_bredr_scan(&req); 3378 update_class(&req); 3379 update_name(&req); 3380 update_eir(&req); 3381 } 3382 3383 return hci_req_run(&req, powered_complete); 3384 } 3385 3386 int mgmt_powered(struct hci_dev *hdev, u8 powered) 3387 { 3388 struct cmd_lookup match = { NULL, hdev }; 3389 u8 status_not_powered = MGMT_STATUS_NOT_POWERED; 3390 u8 zero_cod[] = { 0, 0, 0 }; 3391 int err; 3392 3393 if (!test_bit(HCI_MGMT, &hdev->dev_flags)) 3394 return 0; 3395 3396 if (powered) { 3397 if (powered_update_hci(hdev) == 0) 3398 return 0; 3399 3400 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, 3401 &match); 3402 goto new_settings; 3403 } 3404 3405 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); 3406 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered); 3407 3408 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) 3409 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, 3410 zero_cod, sizeof(zero_cod), NULL); 3411 3412 new_settings: 3413 err = new_settings(hdev, match.sk); 3414 3415 if (match.sk) 3416 sock_put(match.sk); 3417 3418 return err; 3419 } 3420 3421 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) 3422 { 3423 struct cmd_lookup match = { NULL, hdev }; 3424 bool changed = false; 3425 int err = 0; 3426 3427 if (discoverable) { 3428 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 3429 changed = true; 3430 } else { 3431 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) 3432 changed = true; 3433 } 3434 3435 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp, 3436 &match); 3437 3438 if (changed) 3439 err = new_settings(hdev, match.sk); 3440 3441 if (match.sk) 3442 sock_put(match.sk); 3443 3444 return err; 3445 } 3446 3447 int mgmt_connectable(struct hci_dev *hdev, u8 connectable) 3448 { 3449 struct pending_cmd *cmd; 3450 bool changed = false; 3451 int err = 0; 3452 3453 if (connectable) { 3454 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 3455 changed = true; 3456 } else { 3457 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags)) 3458 changed = true; 3459 } 3460 3461 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev); 3462 3463 if (changed) 3464 err = new_settings(hdev, cmd ? cmd->sk : NULL); 3465 3466 return err; 3467 } 3468 3469 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) 3470 { 3471 u8 mgmt_err = mgmt_status(status); 3472 3473 if (scan & SCAN_PAGE) 3474 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, 3475 cmd_status_rsp, &mgmt_err); 3476 3477 if (scan & SCAN_INQUIRY) 3478 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, 3479 cmd_status_rsp, &mgmt_err); 3480 3481 return 0; 3482 } 3483 3484 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 3485 bool persistent) 3486 { 3487 struct mgmt_ev_new_link_key ev; 3488 3489 memset(&ev, 0, sizeof(ev)); 3490 3491 ev.store_hint = persistent; 3492 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 3493 ev.key.addr.type = BDADDR_BREDR; 3494 ev.key.type = key->type; 3495 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); 3496 ev.key.pin_len = key->pin_len; 3497 3498 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); 3499 } 3500 3501 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent) 3502 { 3503 struct mgmt_ev_new_long_term_key ev; 3504 3505 memset(&ev, 0, sizeof(ev)); 3506 3507 ev.store_hint = persistent; 3508 bacpy(&ev.key.addr.bdaddr, &key->bdaddr); 3509 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); 3510 ev.key.authenticated = key->authenticated; 3511 ev.key.enc_size = key->enc_size; 3512 ev.key.ediv = key->ediv; 3513 3514 if (key->type == HCI_SMP_LTK) 3515 ev.key.master = 1; 3516 3517 memcpy(ev.key.rand, key->rand, sizeof(key->rand)); 3518 memcpy(ev.key.val, key->val, sizeof(key->val)); 3519 3520 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), 3521 NULL); 3522 } 3523 3524 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3525 u8 addr_type, u32 flags, u8 *name, u8 name_len, 3526 u8 *dev_class) 3527 { 3528 char buf[512]; 3529 struct mgmt_ev_device_connected *ev = (void *) buf; 3530 u16 eir_len = 0; 3531 3532 bacpy(&ev->addr.bdaddr, bdaddr); 3533 ev->addr.type = link_to_bdaddr(link_type, addr_type); 3534 3535 ev->flags = __cpu_to_le32(flags); 3536 3537 if (name_len > 0) 3538 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, 3539 name, name_len); 3540 3541 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0) 3542 eir_len = eir_append_data(ev->eir, eir_len, 3543 EIR_CLASS_OF_DEV, dev_class, 3); 3544 3545 ev->eir_len = cpu_to_le16(eir_len); 3546 3547 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, 3548 sizeof(*ev) + eir_len, NULL); 3549 } 3550 3551 static void disconnect_rsp(struct pending_cmd *cmd, void *data) 3552 { 3553 struct mgmt_cp_disconnect *cp = cmd->param; 3554 struct sock **sk = data; 3555 struct mgmt_rp_disconnect rp; 3556 3557 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 3558 rp.addr.type = cp->addr.type; 3559 3560 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp, 3561 sizeof(rp)); 3562 3563 *sk = cmd->sk; 3564 sock_hold(*sk); 3565 3566 mgmt_pending_remove(cmd); 3567 } 3568 3569 static void unpair_device_rsp(struct pending_cmd *cmd, void *data) 3570 { 3571 struct hci_dev *hdev = data; 3572 struct mgmt_cp_unpair_device *cp = cmd->param; 3573 struct mgmt_rp_unpair_device rp; 3574 3575 memset(&rp, 0, sizeof(rp)); 3576 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); 3577 rp.addr.type = cp->addr.type; 3578 3579 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); 3580 3581 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp)); 3582 3583 mgmt_pending_remove(cmd); 3584 } 3585 3586 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, 3587 u8 link_type, u8 addr_type, u8 reason) 3588 { 3589 struct mgmt_ev_device_disconnected ev; 3590 struct sock *sk = NULL; 3591 int err; 3592 3593 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); 3594 3595 bacpy(&ev.addr.bdaddr, bdaddr); 3596 ev.addr.type = link_to_bdaddr(link_type, addr_type); 3597 ev.reason = reason; 3598 3599 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), 3600 sk); 3601 3602 if (sk) 3603 sock_put(sk); 3604 3605 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3606 hdev); 3607 3608 return err; 3609 } 3610 3611 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, 3612 u8 link_type, u8 addr_type, u8 status) 3613 { 3614 struct mgmt_rp_disconnect rp; 3615 struct pending_cmd *cmd; 3616 int err; 3617 3618 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, 3619 hdev); 3620 3621 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); 3622 if (!cmd) 3623 return -ENOENT; 3624 3625 bacpy(&rp.addr.bdaddr, bdaddr); 3626 rp.addr.type = link_to_bdaddr(link_type, addr_type); 3627 3628 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 3629 mgmt_status(status), &rp, sizeof(rp)); 3630 3631 mgmt_pending_remove(cmd); 3632 3633 return err; 3634 } 3635 3636 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3637 u8 addr_type, u8 status) 3638 { 3639 struct mgmt_ev_connect_failed ev; 3640 3641 bacpy(&ev.addr.bdaddr, bdaddr); 3642 ev.addr.type = link_to_bdaddr(link_type, addr_type); 3643 ev.status = mgmt_status(status); 3644 3645 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); 3646 } 3647 3648 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) 3649 { 3650 struct mgmt_ev_pin_code_request ev; 3651 3652 bacpy(&ev.addr.bdaddr, bdaddr); 3653 ev.addr.type = BDADDR_BREDR; 3654 ev.secure = secure; 3655 3656 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), 3657 NULL); 3658 } 3659 3660 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3661 u8 status) 3662 { 3663 struct pending_cmd *cmd; 3664 struct mgmt_rp_pin_code_reply rp; 3665 int err; 3666 3667 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); 3668 if (!cmd) 3669 return -ENOENT; 3670 3671 bacpy(&rp.addr.bdaddr, bdaddr); 3672 rp.addr.type = BDADDR_BREDR; 3673 3674 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, 3675 mgmt_status(status), &rp, sizeof(rp)); 3676 3677 mgmt_pending_remove(cmd); 3678 3679 return err; 3680 } 3681 3682 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3683 u8 status) 3684 { 3685 struct pending_cmd *cmd; 3686 struct mgmt_rp_pin_code_reply rp; 3687 int err; 3688 3689 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); 3690 if (!cmd) 3691 return -ENOENT; 3692 3693 bacpy(&rp.addr.bdaddr, bdaddr); 3694 rp.addr.type = BDADDR_BREDR; 3695 3696 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, 3697 mgmt_status(status), &rp, sizeof(rp)); 3698 3699 mgmt_pending_remove(cmd); 3700 3701 return err; 3702 } 3703 3704 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3705 u8 link_type, u8 addr_type, __le32 value, 3706 u8 confirm_hint) 3707 { 3708 struct mgmt_ev_user_confirm_request ev; 3709 3710 BT_DBG("%s", hdev->name); 3711 3712 bacpy(&ev.addr.bdaddr, bdaddr); 3713 ev.addr.type = link_to_bdaddr(link_type, addr_type); 3714 ev.confirm_hint = confirm_hint; 3715 ev.value = value; 3716 3717 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), 3718 NULL); 3719 } 3720 3721 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 3722 u8 link_type, u8 addr_type) 3723 { 3724 struct mgmt_ev_user_passkey_request ev; 3725 3726 BT_DBG("%s", hdev->name); 3727 3728 bacpy(&ev.addr.bdaddr, bdaddr); 3729 ev.addr.type = link_to_bdaddr(link_type, addr_type); 3730 3731 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), 3732 NULL); 3733 } 3734 3735 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3736 u8 link_type, u8 addr_type, u8 status, 3737 u8 opcode) 3738 { 3739 struct pending_cmd *cmd; 3740 struct mgmt_rp_user_confirm_reply rp; 3741 int err; 3742 3743 cmd = mgmt_pending_find(opcode, hdev); 3744 if (!cmd) 3745 return -ENOENT; 3746 3747 bacpy(&rp.addr.bdaddr, bdaddr); 3748 rp.addr.type = link_to_bdaddr(link_type, addr_type); 3749 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status), 3750 &rp, sizeof(rp)); 3751 3752 mgmt_pending_remove(cmd); 3753 3754 return err; 3755 } 3756 3757 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3758 u8 link_type, u8 addr_type, u8 status) 3759 { 3760 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3761 status, MGMT_OP_USER_CONFIRM_REPLY); 3762 } 3763 3764 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3765 u8 link_type, u8 addr_type, u8 status) 3766 { 3767 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3768 status, 3769 MGMT_OP_USER_CONFIRM_NEG_REPLY); 3770 } 3771 3772 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3773 u8 link_type, u8 addr_type, u8 status) 3774 { 3775 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3776 status, MGMT_OP_USER_PASSKEY_REPLY); 3777 } 3778 3779 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 3780 u8 link_type, u8 addr_type, u8 status) 3781 { 3782 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, 3783 status, 3784 MGMT_OP_USER_PASSKEY_NEG_REPLY); 3785 } 3786 3787 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, 3788 u8 link_type, u8 addr_type, u32 passkey, 3789 u8 entered) 3790 { 3791 struct mgmt_ev_passkey_notify ev; 3792 3793 BT_DBG("%s", hdev->name); 3794 3795 bacpy(&ev.addr.bdaddr, bdaddr); 3796 ev.addr.type = link_to_bdaddr(link_type, addr_type); 3797 ev.passkey = __cpu_to_le32(passkey); 3798 ev.entered = entered; 3799 3800 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL); 3801 } 3802 3803 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 3804 u8 addr_type, u8 status) 3805 { 3806 struct mgmt_ev_auth_failed ev; 3807 3808 bacpy(&ev.addr.bdaddr, bdaddr); 3809 ev.addr.type = link_to_bdaddr(link_type, addr_type); 3810 ev.status = mgmt_status(status); 3811 3812 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); 3813 } 3814 3815 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) 3816 { 3817 struct cmd_lookup match = { NULL, hdev }; 3818 bool changed = false; 3819 int err = 0; 3820 3821 if (status) { 3822 u8 mgmt_err = mgmt_status(status); 3823 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, 3824 cmd_status_rsp, &mgmt_err); 3825 return 0; 3826 } 3827 3828 if (test_bit(HCI_AUTH, &hdev->flags)) { 3829 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 3830 changed = true; 3831 } else { 3832 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) 3833 changed = true; 3834 } 3835 3836 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, 3837 &match); 3838 3839 if (changed) 3840 err = new_settings(hdev, match.sk); 3841 3842 if (match.sk) 3843 sock_put(match.sk); 3844 3845 return err; 3846 } 3847 3848 static void clear_eir(struct hci_request *req) 3849 { 3850 struct hci_dev *hdev = req->hdev; 3851 struct hci_cp_write_eir cp; 3852 3853 if (!lmp_ext_inq_capable(hdev)) 3854 return; 3855 3856 memset(hdev->eir, 0, sizeof(hdev->eir)); 3857 3858 memset(&cp, 0, sizeof(cp)); 3859 3860 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 3861 } 3862 3863 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) 3864 { 3865 struct cmd_lookup match = { NULL, hdev }; 3866 struct hci_request req; 3867 bool changed = false; 3868 int err = 0; 3869 3870 if (status) { 3871 u8 mgmt_err = mgmt_status(status); 3872 3873 if (enable && test_and_clear_bit(HCI_SSP_ENABLED, 3874 &hdev->dev_flags)) 3875 err = new_settings(hdev, NULL); 3876 3877 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, 3878 &mgmt_err); 3879 3880 return err; 3881 } 3882 3883 if (enable) { 3884 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 3885 changed = true; 3886 } else { 3887 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 3888 changed = true; 3889 } 3890 3891 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); 3892 3893 if (changed) 3894 err = new_settings(hdev, match.sk); 3895 3896 if (match.sk) 3897 sock_put(match.sk); 3898 3899 hci_req_init(&req, hdev); 3900 3901 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) 3902 update_eir(&req); 3903 else 3904 clear_eir(&req); 3905 3906 hci_req_run(&req, NULL); 3907 3908 return err; 3909 } 3910 3911 static void sk_lookup(struct pending_cmd *cmd, void *data) 3912 { 3913 struct cmd_lookup *match = data; 3914 3915 if (match->sk == NULL) { 3916 match->sk = cmd->sk; 3917 sock_hold(match->sk); 3918 } 3919 } 3920 3921 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 3922 u8 status) 3923 { 3924 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; 3925 int err = 0; 3926 3927 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); 3928 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); 3929 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); 3930 3931 if (!status) 3932 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3933 3, NULL); 3934 3935 if (match.sk) 3936 sock_put(match.sk); 3937 3938 return err; 3939 } 3940 3941 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) 3942 { 3943 struct mgmt_cp_set_local_name ev; 3944 struct pending_cmd *cmd; 3945 3946 if (status) 3947 return 0; 3948 3949 memset(&ev, 0, sizeof(ev)); 3950 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); 3951 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); 3952 3953 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); 3954 if (!cmd) { 3955 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); 3956 3957 /* If this is a HCI command related to powering on the 3958 * HCI dev don't send any mgmt signals. 3959 */ 3960 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) 3961 return 0; 3962 } 3963 3964 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), 3965 cmd ? cmd->sk : NULL); 3966 } 3967 3968 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, 3969 u8 *randomizer, u8 status) 3970 { 3971 struct pending_cmd *cmd; 3972 int err; 3973 3974 BT_DBG("%s status %u", hdev->name, status); 3975 3976 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); 3977 if (!cmd) 3978 return -ENOENT; 3979 3980 if (status) { 3981 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, 3982 mgmt_status(status)); 3983 } else { 3984 struct mgmt_rp_read_local_oob_data rp; 3985 3986 memcpy(rp.hash, hash, sizeof(rp.hash)); 3987 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); 3988 3989 err = cmd_complete(cmd->sk, hdev->id, 3990 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp, 3991 sizeof(rp)); 3992 } 3993 3994 mgmt_pending_remove(cmd); 3995 3996 return err; 3997 } 3998 3999 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) 4000 { 4001 struct cmd_lookup match = { NULL, hdev }; 4002 bool changed = false; 4003 int err = 0; 4004 4005 if (status) { 4006 u8 mgmt_err = mgmt_status(status); 4007 4008 if (enable && test_and_clear_bit(HCI_LE_ENABLED, 4009 &hdev->dev_flags)) 4010 err = new_settings(hdev, NULL); 4011 4012 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, 4013 &mgmt_err); 4014 4015 return err; 4016 } 4017 4018 if (enable) { 4019 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 4020 changed = true; 4021 } else { 4022 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags)) 4023 changed = true; 4024 } 4025 4026 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); 4027 4028 if (changed) 4029 err = new_settings(hdev, match.sk); 4030 4031 if (match.sk) 4032 sock_put(match.sk); 4033 4034 return err; 4035 } 4036 4037 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4038 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8 4039 ssp, u8 *eir, u16 eir_len) 4040 { 4041 char buf[512]; 4042 struct mgmt_ev_device_found *ev = (void *) buf; 4043 size_t ev_size; 4044 4045 /* Leave 5 bytes for a potential CoD field */ 4046 if (sizeof(*ev) + eir_len + 5 > sizeof(buf)) 4047 return -EINVAL; 4048 4049 memset(buf, 0, sizeof(buf)); 4050 4051 bacpy(&ev->addr.bdaddr, bdaddr); 4052 ev->addr.type = link_to_bdaddr(link_type, addr_type); 4053 ev->rssi = rssi; 4054 if (cfm_name) 4055 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME); 4056 if (!ssp) 4057 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING); 4058 4059 if (eir_len > 0) 4060 memcpy(ev->eir, eir, eir_len); 4061 4062 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV)) 4063 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, 4064 dev_class, 3); 4065 4066 ev->eir_len = cpu_to_le16(eir_len); 4067 ev_size = sizeof(*ev) + eir_len; 4068 4069 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); 4070 } 4071 4072 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 4073 u8 addr_type, s8 rssi, u8 *name, u8 name_len) 4074 { 4075 struct mgmt_ev_device_found *ev; 4076 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2]; 4077 u16 eir_len; 4078 4079 ev = (struct mgmt_ev_device_found *) buf; 4080 4081 memset(buf, 0, sizeof(buf)); 4082 4083 bacpy(&ev->addr.bdaddr, bdaddr); 4084 ev->addr.type = link_to_bdaddr(link_type, addr_type); 4085 ev->rssi = rssi; 4086 4087 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, 4088 name_len); 4089 4090 ev->eir_len = cpu_to_le16(eir_len); 4091 4092 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, 4093 sizeof(*ev) + eir_len, NULL); 4094 } 4095 4096 int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status) 4097 { 4098 struct pending_cmd *cmd; 4099 u8 type; 4100 int err; 4101 4102 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 4103 4104 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); 4105 if (!cmd) 4106 return -ENOENT; 4107 4108 type = hdev->discovery.type; 4109 4110 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), 4111 &type, sizeof(type)); 4112 mgmt_pending_remove(cmd); 4113 4114 return err; 4115 } 4116 4117 int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status) 4118 { 4119 struct pending_cmd *cmd; 4120 int err; 4121 4122 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); 4123 if (!cmd) 4124 return -ENOENT; 4125 4126 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status), 4127 &hdev->discovery.type, sizeof(hdev->discovery.type)); 4128 mgmt_pending_remove(cmd); 4129 4130 return err; 4131 } 4132 4133 int mgmt_discovering(struct hci_dev *hdev, u8 discovering) 4134 { 4135 struct mgmt_ev_discovering ev; 4136 struct pending_cmd *cmd; 4137 4138 BT_DBG("%s discovering %u", hdev->name, discovering); 4139 4140 if (discovering) 4141 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); 4142 else 4143 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); 4144 4145 if (cmd != NULL) { 4146 u8 type = hdev->discovery.type; 4147 4148 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type, 4149 sizeof(type)); 4150 mgmt_pending_remove(cmd); 4151 } 4152 4153 memset(&ev, 0, sizeof(ev)); 4154 ev.type = hdev->discovery.type; 4155 ev.discovering = discovering; 4156 4157 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); 4158 } 4159 4160 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 4161 { 4162 struct pending_cmd *cmd; 4163 struct mgmt_ev_device_blocked ev; 4164 4165 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev); 4166 4167 bacpy(&ev.addr.bdaddr, bdaddr); 4168 ev.addr.type = type; 4169 4170 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev), 4171 cmd ? cmd->sk : NULL); 4172 } 4173 4174 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 4175 { 4176 struct pending_cmd *cmd; 4177 struct mgmt_ev_device_unblocked ev; 4178 4179 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev); 4180 4181 bacpy(&ev.addr.bdaddr, bdaddr); 4182 ev.addr.type = type; 4183 4184 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev), 4185 cmd ? cmd->sk : NULL); 4186 } 4187 4188 module_param(enable_hs, bool, 0644); 4189 MODULE_PARM_DESC(enable_hs, "Enable High Speed support"); 4190