1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * BlueZ - Bluetooth protocol stack for Linux 4 * 5 * Copyright (C) 2021 Intel Corporation 6 * Copyright 2023 NXP 7 */ 8 9 #include <linux/property.h> 10 11 #include <net/bluetooth/bluetooth.h> 12 #include <net/bluetooth/hci_core.h> 13 #include <net/bluetooth/mgmt.h> 14 15 #include "hci_request.h" 16 #include "hci_codec.h" 17 #include "hci_debugfs.h" 18 #include "smp.h" 19 #include "eir.h" 20 #include "msft.h" 21 #include "aosp.h" 22 #include "leds.h" 23 24 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, 25 struct sk_buff *skb) 26 { 27 bt_dev_dbg(hdev, "result 0x%2.2x", result); 28 29 if (hdev->req_status != HCI_REQ_PEND) 30 return; 31 32 hdev->req_result = result; 33 hdev->req_status = HCI_REQ_DONE; 34 35 /* Free the request command so it is not used as response */ 36 kfree_skb(hdev->req_skb); 37 hdev->req_skb = NULL; 38 39 if (skb) { 40 struct sock *sk = hci_skb_sk(skb); 41 42 /* Drop sk reference if set */ 43 if (sk) 44 sock_put(sk); 45 46 hdev->req_rsp = skb_get(skb); 47 } 48 49 wake_up_interruptible(&hdev->req_wait_q); 50 } 51 52 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, 53 u32 plen, const void *param, 54 struct sock *sk) 55 { 56 int len = HCI_COMMAND_HDR_SIZE + plen; 57 struct hci_command_hdr *hdr; 58 struct sk_buff *skb; 59 60 skb = bt_skb_alloc(len, GFP_ATOMIC); 61 if (!skb) 62 return NULL; 63 64 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); 65 hdr->opcode = cpu_to_le16(opcode); 66 hdr->plen = plen; 67 68 if (plen) 69 skb_put_data(skb, param, plen); 70 71 bt_dev_dbg(hdev, "skb len %d", skb->len); 72 73 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; 74 hci_skb_opcode(skb) = opcode; 75 76 /* Grab a reference if command needs to be associated with a sock (e.g. 77 * likely mgmt socket that initiated the command). 78 */ 79 if (sk) { 80 hci_skb_sk(skb) = sk; 81 sock_hold(sk); 82 } 83 84 return skb; 85 } 86 87 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, 88 const void *param, u8 event, struct sock *sk) 89 { 90 struct hci_dev *hdev = req->hdev; 91 struct sk_buff *skb; 92 93 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 94 95 /* If an error occurred during request building, there is no point in 96 * queueing the HCI command. We can simply return. 97 */ 98 if (req->err) 99 return; 100 101 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); 102 if (!skb) { 103 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 104 opcode); 105 req->err = -ENOMEM; 106 return; 107 } 108 109 if (skb_queue_empty(&req->cmd_q)) 110 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 111 112 hci_skb_event(skb) = event; 113 114 skb_queue_tail(&req->cmd_q, skb); 115 } 116 117 static int hci_cmd_sync_run(struct hci_request *req) 118 { 119 struct hci_dev *hdev = req->hdev; 120 struct sk_buff *skb; 121 unsigned long flags; 122 123 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); 124 125 /* If an error occurred during request building, remove all HCI 126 * commands queued on the HCI request queue. 127 */ 128 if (req->err) { 129 skb_queue_purge(&req->cmd_q); 130 return req->err; 131 } 132 133 /* Do not allow empty requests */ 134 if (skb_queue_empty(&req->cmd_q)) 135 return -ENODATA; 136 137 skb = skb_peek_tail(&req->cmd_q); 138 bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; 139 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; 140 141 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 142 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); 143 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 144 145 queue_work(hdev->workqueue, &hdev->cmd_work); 146 147 return 0; 148 } 149 150 /* This function requires the caller holds hdev->req_lock. */ 151 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, 152 const void *param, u8 event, u32 timeout, 153 struct sock *sk) 154 { 155 struct hci_request req; 156 struct sk_buff *skb; 157 int err = 0; 158 159 bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); 160 161 hci_req_init(&req, hdev); 162 163 hci_cmd_sync_add(&req, opcode, plen, param, event, sk); 164 165 hdev->req_status = HCI_REQ_PEND; 166 167 err = hci_cmd_sync_run(&req); 168 if (err < 0) 169 return ERR_PTR(err); 170 171 err = wait_event_interruptible_timeout(hdev->req_wait_q, 172 hdev->req_status != HCI_REQ_PEND, 173 timeout); 174 175 if (err == -ERESTARTSYS) 176 return ERR_PTR(-EINTR); 177 178 switch (hdev->req_status) { 179 case HCI_REQ_DONE: 180 err = -bt_to_errno(hdev->req_result); 181 break; 182 183 case HCI_REQ_CANCELED: 184 err = -hdev->req_result; 185 break; 186 187 default: 188 err = -ETIMEDOUT; 189 break; 190 } 191 192 hdev->req_status = 0; 193 hdev->req_result = 0; 194 skb = hdev->req_rsp; 195 hdev->req_rsp = NULL; 196 197 bt_dev_dbg(hdev, "end: err %d", err); 198 199 if (err < 0) { 200 kfree_skb(skb); 201 return ERR_PTR(err); 202 } 203 204 return skb; 205 } 206 EXPORT_SYMBOL(__hci_cmd_sync_sk); 207 208 /* This function requires the caller holds hdev->req_lock. */ 209 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 210 const void *param, u32 timeout) 211 { 212 return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); 213 } 214 EXPORT_SYMBOL(__hci_cmd_sync); 215 216 /* Send HCI command and wait for command complete event */ 217 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 218 const void *param, u32 timeout) 219 { 220 struct sk_buff *skb; 221 222 if (!test_bit(HCI_UP, &hdev->flags)) 223 return ERR_PTR(-ENETDOWN); 224 225 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 226 227 hci_req_sync_lock(hdev); 228 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); 229 hci_req_sync_unlock(hdev); 230 231 return skb; 232 } 233 EXPORT_SYMBOL(hci_cmd_sync); 234 235 /* This function requires the caller holds hdev->req_lock. */ 236 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, 237 const void *param, u8 event, u32 timeout) 238 { 239 return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, 240 NULL); 241 } 242 EXPORT_SYMBOL(__hci_cmd_sync_ev); 243 244 /* This function requires the caller holds hdev->req_lock. */ 245 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, 246 const void *param, u8 event, u32 timeout, 247 struct sock *sk) 248 { 249 struct sk_buff *skb; 250 u8 status; 251 252 skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); 253 if (IS_ERR(skb)) { 254 if (!event) 255 bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, 256 PTR_ERR(skb)); 257 return PTR_ERR(skb); 258 } 259 260 /* If command return a status event skb will be set to NULL as there are 261 * no parameters, in case of failure IS_ERR(skb) would have be set to 262 * the actual error would be found with PTR_ERR(skb). 263 */ 264 if (!skb) 265 return 0; 266 267 status = skb->data[0]; 268 269 kfree_skb(skb); 270 271 return status; 272 } 273 EXPORT_SYMBOL(__hci_cmd_sync_status_sk); 274 275 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, 276 const void *param, u32 timeout) 277 { 278 return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, 279 NULL); 280 } 281 EXPORT_SYMBOL(__hci_cmd_sync_status); 282 283 static void hci_cmd_sync_work(struct work_struct *work) 284 { 285 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); 286 287 bt_dev_dbg(hdev, ""); 288 289 /* Dequeue all entries and run them */ 290 while (1) { 291 struct hci_cmd_sync_work_entry *entry; 292 293 mutex_lock(&hdev->cmd_sync_work_lock); 294 entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, 295 struct hci_cmd_sync_work_entry, 296 list); 297 if (entry) 298 list_del(&entry->list); 299 mutex_unlock(&hdev->cmd_sync_work_lock); 300 301 if (!entry) 302 break; 303 304 bt_dev_dbg(hdev, "entry %p", entry); 305 306 if (entry->func) { 307 int err; 308 309 hci_req_sync_lock(hdev); 310 err = entry->func(hdev, entry->data); 311 if (entry->destroy) 312 entry->destroy(hdev, entry->data, err); 313 hci_req_sync_unlock(hdev); 314 } 315 316 kfree(entry); 317 } 318 } 319 320 static void hci_cmd_sync_cancel_work(struct work_struct *work) 321 { 322 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); 323 324 cancel_delayed_work_sync(&hdev->cmd_timer); 325 cancel_delayed_work_sync(&hdev->ncmd_timer); 326 atomic_set(&hdev->cmd_cnt, 1); 327 328 wake_up_interruptible(&hdev->req_wait_q); 329 } 330 331 static int hci_scan_disable_sync(struct hci_dev *hdev); 332 static int scan_disable_sync(struct hci_dev *hdev, void *data) 333 { 334 return hci_scan_disable_sync(hdev); 335 } 336 337 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); 338 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) 339 { 340 return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); 341 } 342 343 static void le_scan_disable(struct work_struct *work) 344 { 345 struct hci_dev *hdev = container_of(work, struct hci_dev, 346 le_scan_disable.work); 347 int status; 348 349 bt_dev_dbg(hdev, ""); 350 hci_dev_lock(hdev); 351 352 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 353 goto _return; 354 355 cancel_delayed_work(&hdev->le_scan_restart); 356 357 status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); 358 if (status) { 359 bt_dev_err(hdev, "failed to disable LE scan: %d", status); 360 goto _return; 361 } 362 363 hdev->discovery.scan_start = 0; 364 365 /* If we were running LE only scan, change discovery state. If 366 * we were running both LE and BR/EDR inquiry simultaneously, 367 * and BR/EDR inquiry is already finished, stop discovery, 368 * otherwise BR/EDR inquiry will stop discovery when finished. 369 * If we will resolve remote device name, do not change 370 * discovery state. 371 */ 372 373 if (hdev->discovery.type == DISCOV_TYPE_LE) 374 goto discov_stopped; 375 376 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) 377 goto _return; 378 379 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { 380 if (!test_bit(HCI_INQUIRY, &hdev->flags) && 381 hdev->discovery.state != DISCOVERY_RESOLVING) 382 goto discov_stopped; 383 384 goto _return; 385 } 386 387 status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); 388 if (status) { 389 bt_dev_err(hdev, "inquiry failed: status %d", status); 390 goto discov_stopped; 391 } 392 393 goto _return; 394 395 discov_stopped: 396 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 397 398 _return: 399 hci_dev_unlock(hdev); 400 } 401 402 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, 403 u8 filter_dup); 404 static int hci_le_scan_restart_sync(struct hci_dev *hdev) 405 { 406 /* If controller is not scanning we are done. */ 407 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 408 return 0; 409 410 if (hdev->scanning_paused) { 411 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 412 return 0; 413 } 414 415 hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); 416 return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, 417 LE_SCAN_FILTER_DUP_ENABLE); 418 } 419 420 static void le_scan_restart(struct work_struct *work) 421 { 422 struct hci_dev *hdev = container_of(work, struct hci_dev, 423 le_scan_restart.work); 424 unsigned long timeout, duration, scan_start, now; 425 int status; 426 427 bt_dev_dbg(hdev, ""); 428 429 status = hci_le_scan_restart_sync(hdev); 430 if (status) { 431 bt_dev_err(hdev, "failed to restart LE scan: status %d", 432 status); 433 return; 434 } 435 436 hci_dev_lock(hdev); 437 438 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || 439 !hdev->discovery.scan_start) 440 goto unlock; 441 442 /* When the scan was started, hdev->le_scan_disable has been queued 443 * after duration from scan_start. During scan restart this job 444 * has been canceled, and we need to queue it again after proper 445 * timeout, to make sure that scan does not run indefinitely. 446 */ 447 duration = hdev->discovery.scan_duration; 448 scan_start = hdev->discovery.scan_start; 449 now = jiffies; 450 if (now - scan_start <= duration) { 451 int elapsed; 452 453 if (now >= scan_start) 454 elapsed = now - scan_start; 455 else 456 elapsed = ULONG_MAX - scan_start + now; 457 458 timeout = duration - elapsed; 459 } else { 460 timeout = 0; 461 } 462 463 queue_delayed_work(hdev->req_workqueue, 464 &hdev->le_scan_disable, timeout); 465 466 unlock: 467 hci_dev_unlock(hdev); 468 } 469 470 static int reenable_adv_sync(struct hci_dev *hdev, void *data) 471 { 472 bt_dev_dbg(hdev, ""); 473 474 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && 475 list_empty(&hdev->adv_instances)) 476 return 0; 477 478 if (hdev->cur_adv_instance) { 479 return hci_schedule_adv_instance_sync(hdev, 480 hdev->cur_adv_instance, 481 true); 482 } else { 483 if (ext_adv_capable(hdev)) { 484 hci_start_ext_adv_sync(hdev, 0x00); 485 } else { 486 hci_update_adv_data_sync(hdev, 0x00); 487 hci_update_scan_rsp_data_sync(hdev, 0x00); 488 hci_enable_advertising_sync(hdev); 489 } 490 } 491 492 return 0; 493 } 494 495 static void reenable_adv(struct work_struct *work) 496 { 497 struct hci_dev *hdev = container_of(work, struct hci_dev, 498 reenable_adv_work); 499 int status; 500 501 bt_dev_dbg(hdev, ""); 502 503 hci_dev_lock(hdev); 504 505 status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); 506 if (status) 507 bt_dev_err(hdev, "failed to reenable ADV: %d", status); 508 509 hci_dev_unlock(hdev); 510 } 511 512 static void cancel_adv_timeout(struct hci_dev *hdev) 513 { 514 if (hdev->adv_instance_timeout) { 515 hdev->adv_instance_timeout = 0; 516 cancel_delayed_work(&hdev->adv_instance_expire); 517 } 518 } 519 520 /* For a single instance: 521 * - force == true: The instance will be removed even when its remaining 522 * lifetime is not zero. 523 * - force == false: the instance will be deactivated but kept stored unless 524 * the remaining lifetime is zero. 525 * 526 * For instance == 0x00: 527 * - force == true: All instances will be removed regardless of their timeout 528 * setting. 529 * - force == false: Only instances that have a timeout will be removed. 530 */ 531 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, 532 u8 instance, bool force) 533 { 534 struct adv_info *adv_instance, *n, *next_instance = NULL; 535 int err; 536 u8 rem_inst; 537 538 /* Cancel any timeout concerning the removed instance(s). */ 539 if (!instance || hdev->cur_adv_instance == instance) 540 cancel_adv_timeout(hdev); 541 542 /* Get the next instance to advertise BEFORE we remove 543 * the current one. This can be the same instance again 544 * if there is only one instance. 545 */ 546 if (instance && hdev->cur_adv_instance == instance) 547 next_instance = hci_get_next_instance(hdev, instance); 548 549 if (instance == 0x00) { 550 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, 551 list) { 552 if (!(force || adv_instance->timeout)) 553 continue; 554 555 rem_inst = adv_instance->instance; 556 err = hci_remove_adv_instance(hdev, rem_inst); 557 if (!err) 558 mgmt_advertising_removed(sk, hdev, rem_inst); 559 } 560 } else { 561 adv_instance = hci_find_adv_instance(hdev, instance); 562 563 if (force || (adv_instance && adv_instance->timeout && 564 !adv_instance->remaining_time)) { 565 /* Don't advertise a removed instance. */ 566 if (next_instance && 567 next_instance->instance == instance) 568 next_instance = NULL; 569 570 err = hci_remove_adv_instance(hdev, instance); 571 if (!err) 572 mgmt_advertising_removed(sk, hdev, instance); 573 } 574 } 575 576 if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) 577 return 0; 578 579 if (next_instance && !ext_adv_capable(hdev)) 580 return hci_schedule_adv_instance_sync(hdev, 581 next_instance->instance, 582 false); 583 584 return 0; 585 } 586 587 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) 588 { 589 u8 instance = *(u8 *)data; 590 591 kfree(data); 592 593 hci_clear_adv_instance_sync(hdev, NULL, instance, false); 594 595 if (list_empty(&hdev->adv_instances)) 596 return hci_disable_advertising_sync(hdev); 597 598 return 0; 599 } 600 601 static void adv_timeout_expire(struct work_struct *work) 602 { 603 u8 *inst_ptr; 604 struct hci_dev *hdev = container_of(work, struct hci_dev, 605 adv_instance_expire.work); 606 607 bt_dev_dbg(hdev, ""); 608 609 hci_dev_lock(hdev); 610 611 hdev->adv_instance_timeout = 0; 612 613 if (hdev->cur_adv_instance == 0x00) 614 goto unlock; 615 616 inst_ptr = kmalloc(1, GFP_KERNEL); 617 if (!inst_ptr) 618 goto unlock; 619 620 *inst_ptr = hdev->cur_adv_instance; 621 hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); 622 623 unlock: 624 hci_dev_unlock(hdev); 625 } 626 627 void hci_cmd_sync_init(struct hci_dev *hdev) 628 { 629 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); 630 INIT_LIST_HEAD(&hdev->cmd_sync_work_list); 631 mutex_init(&hdev->cmd_sync_work_lock); 632 mutex_init(&hdev->unregister_lock); 633 634 INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); 635 INIT_WORK(&hdev->reenable_adv_work, reenable_adv); 636 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); 637 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart); 638 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); 639 } 640 641 void hci_cmd_sync_clear(struct hci_dev *hdev) 642 { 643 struct hci_cmd_sync_work_entry *entry, *tmp; 644 645 cancel_work_sync(&hdev->cmd_sync_work); 646 cancel_work_sync(&hdev->reenable_adv_work); 647 648 mutex_lock(&hdev->cmd_sync_work_lock); 649 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { 650 if (entry->destroy) 651 entry->destroy(hdev, entry->data, -ECANCELED); 652 653 list_del(&entry->list); 654 kfree(entry); 655 } 656 mutex_unlock(&hdev->cmd_sync_work_lock); 657 } 658 659 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) 660 { 661 bt_dev_dbg(hdev, "err 0x%2.2x", err); 662 663 if (hdev->req_status == HCI_REQ_PEND) { 664 hdev->req_result = err; 665 hdev->req_status = HCI_REQ_CANCELED; 666 667 queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); 668 } 669 } 670 EXPORT_SYMBOL(hci_cmd_sync_cancel); 671 672 /* Cancel ongoing command request synchronously: 673 * 674 * - Set result and mark status to HCI_REQ_CANCELED 675 * - Wakeup command sync thread 676 */ 677 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) 678 { 679 bt_dev_dbg(hdev, "err 0x%2.2x", err); 680 681 if (hdev->req_status == HCI_REQ_PEND) { 682 hdev->req_result = err; 683 hdev->req_status = HCI_REQ_CANCELED; 684 685 wake_up_interruptible(&hdev->req_wait_q); 686 } 687 } 688 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); 689 690 /* Submit HCI command to be run in as cmd_sync_work: 691 * 692 * - hdev must _not_ be unregistered 693 */ 694 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 695 void *data, hci_cmd_sync_work_destroy_t destroy) 696 { 697 struct hci_cmd_sync_work_entry *entry; 698 int err = 0; 699 700 mutex_lock(&hdev->unregister_lock); 701 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 702 err = -ENODEV; 703 goto unlock; 704 } 705 706 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 707 if (!entry) { 708 err = -ENOMEM; 709 goto unlock; 710 } 711 entry->func = func; 712 entry->data = data; 713 entry->destroy = destroy; 714 715 mutex_lock(&hdev->cmd_sync_work_lock); 716 list_add_tail(&entry->list, &hdev->cmd_sync_work_list); 717 mutex_unlock(&hdev->cmd_sync_work_lock); 718 719 queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); 720 721 unlock: 722 mutex_unlock(&hdev->unregister_lock); 723 return err; 724 } 725 EXPORT_SYMBOL(hci_cmd_sync_submit); 726 727 /* Queue HCI command: 728 * 729 * - hdev must be running 730 */ 731 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 732 void *data, hci_cmd_sync_work_destroy_t destroy) 733 { 734 /* Only queue command if hdev is running which means it had been opened 735 * and is either on init phase or is already up. 736 */ 737 if (!test_bit(HCI_RUNNING, &hdev->flags)) 738 return -ENETDOWN; 739 740 return hci_cmd_sync_submit(hdev, func, data, destroy); 741 } 742 EXPORT_SYMBOL(hci_cmd_sync_queue); 743 744 int hci_update_eir_sync(struct hci_dev *hdev) 745 { 746 struct hci_cp_write_eir cp; 747 748 bt_dev_dbg(hdev, ""); 749 750 if (!hdev_is_powered(hdev)) 751 return 0; 752 753 if (!lmp_ext_inq_capable(hdev)) 754 return 0; 755 756 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 757 return 0; 758 759 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) 760 return 0; 761 762 memset(&cp, 0, sizeof(cp)); 763 764 eir_create(hdev, cp.data); 765 766 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) 767 return 0; 768 769 memcpy(hdev->eir, cp.data, sizeof(cp.data)); 770 771 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, 772 HCI_CMD_TIMEOUT); 773 } 774 775 static u8 get_service_classes(struct hci_dev *hdev) 776 { 777 struct bt_uuid *uuid; 778 u8 val = 0; 779 780 list_for_each_entry(uuid, &hdev->uuids, list) 781 val |= uuid->svc_hint; 782 783 return val; 784 } 785 786 int hci_update_class_sync(struct hci_dev *hdev) 787 { 788 u8 cod[3]; 789 790 bt_dev_dbg(hdev, ""); 791 792 if (!hdev_is_powered(hdev)) 793 return 0; 794 795 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 796 return 0; 797 798 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) 799 return 0; 800 801 cod[0] = hdev->minor_class; 802 cod[1] = hdev->major_class; 803 cod[2] = get_service_classes(hdev); 804 805 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 806 cod[1] |= 0x20; 807 808 if (memcmp(cod, hdev->dev_class, 3) == 0) 809 return 0; 810 811 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, 812 sizeof(cod), cod, HCI_CMD_TIMEOUT); 813 } 814 815 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) 816 { 817 /* If there is no connection we are OK to advertise. */ 818 if (hci_conn_num(hdev, LE_LINK) == 0) 819 return true; 820 821 /* Check le_states if there is any connection in peripheral role. */ 822 if (hdev->conn_hash.le_num_peripheral > 0) { 823 /* Peripheral connection state and non connectable mode 824 * bit 20. 825 */ 826 if (!connectable && !(hdev->le_states[2] & 0x10)) 827 return false; 828 829 /* Peripheral connection state and connectable mode bit 38 830 * and scannable bit 21. 831 */ 832 if (connectable && (!(hdev->le_states[4] & 0x40) || 833 !(hdev->le_states[2] & 0x20))) 834 return false; 835 } 836 837 /* Check le_states if there is any connection in central role. */ 838 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { 839 /* Central connection state and non connectable mode bit 18. */ 840 if (!connectable && !(hdev->le_states[2] & 0x02)) 841 return false; 842 843 /* Central connection state and connectable mode bit 35 and 844 * scannable 19. 845 */ 846 if (connectable && (!(hdev->le_states[4] & 0x08) || 847 !(hdev->le_states[2] & 0x08))) 848 return false; 849 } 850 851 return true; 852 } 853 854 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) 855 { 856 /* If privacy is not enabled don't use RPA */ 857 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 858 return false; 859 860 /* If basic privacy mode is enabled use RPA */ 861 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) 862 return true; 863 864 /* If limited privacy mode is enabled don't use RPA if we're 865 * both discoverable and bondable. 866 */ 867 if ((flags & MGMT_ADV_FLAG_DISCOV) && 868 hci_dev_test_flag(hdev, HCI_BONDABLE)) 869 return false; 870 871 /* We're neither bondable nor discoverable in the limited 872 * privacy mode, therefore use RPA. 873 */ 874 return true; 875 } 876 877 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) 878 { 879 /* If we're advertising or initiating an LE connection we can't 880 * go ahead and change the random address at this time. This is 881 * because the eventual initiator address used for the 882 * subsequently created connection will be undefined (some 883 * controllers use the new address and others the one we had 884 * when the operation started). 885 * 886 * In this kind of scenario skip the update and let the random 887 * address be updated at the next cycle. 888 */ 889 if (hci_dev_test_flag(hdev, HCI_LE_ADV) || 890 hci_lookup_le_connect(hdev)) { 891 bt_dev_dbg(hdev, "Deferring random address update"); 892 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 893 return 0; 894 } 895 896 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, 897 6, rpa, HCI_CMD_TIMEOUT); 898 } 899 900 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, 901 bool rpa, u8 *own_addr_type) 902 { 903 int err; 904 905 /* If privacy is enabled use a resolvable private address. If 906 * current RPA has expired or there is something else than 907 * the current RPA in use, then generate a new one. 908 */ 909 if (rpa) { 910 /* If Controller supports LL Privacy use own address type is 911 * 0x03 912 */ 913 if (use_ll_privacy(hdev)) 914 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; 915 else 916 *own_addr_type = ADDR_LE_DEV_RANDOM; 917 918 /* Check if RPA is valid */ 919 if (rpa_valid(hdev)) 920 return 0; 921 922 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 923 if (err < 0) { 924 bt_dev_err(hdev, "failed to generate new RPA"); 925 return err; 926 } 927 928 err = hci_set_random_addr_sync(hdev, &hdev->rpa); 929 if (err) 930 return err; 931 932 return 0; 933 } 934 935 /* In case of required privacy without resolvable private address, 936 * use an non-resolvable private address. This is useful for active 937 * scanning and non-connectable advertising. 938 */ 939 if (require_privacy) { 940 bdaddr_t nrpa; 941 942 while (true) { 943 /* The non-resolvable private address is generated 944 * from random six bytes with the two most significant 945 * bits cleared. 946 */ 947 get_random_bytes(&nrpa, 6); 948 nrpa.b[5] &= 0x3f; 949 950 /* The non-resolvable private address shall not be 951 * equal to the public address. 952 */ 953 if (bacmp(&hdev->bdaddr, &nrpa)) 954 break; 955 } 956 957 *own_addr_type = ADDR_LE_DEV_RANDOM; 958 959 return hci_set_random_addr_sync(hdev, &nrpa); 960 } 961 962 /* If forcing static address is in use or there is no public 963 * address use the static address as random address (but skip 964 * the HCI command if the current random address is already the 965 * static one. 966 * 967 * In case BR/EDR has been disabled on a dual-mode controller 968 * and a static address has been configured, then use that 969 * address instead of the public BR/EDR address. 970 */ 971 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 972 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 973 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 974 bacmp(&hdev->static_addr, BDADDR_ANY))) { 975 *own_addr_type = ADDR_LE_DEV_RANDOM; 976 if (bacmp(&hdev->static_addr, &hdev->random_addr)) 977 return hci_set_random_addr_sync(hdev, 978 &hdev->static_addr); 979 return 0; 980 } 981 982 /* Neither privacy nor static address is being used so use a 983 * public address. 984 */ 985 *own_addr_type = ADDR_LE_DEV_PUBLIC; 986 987 return 0; 988 } 989 990 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) 991 { 992 struct hci_cp_le_set_ext_adv_enable *cp; 993 struct hci_cp_ext_adv_set *set; 994 u8 data[sizeof(*cp) + sizeof(*set) * 1]; 995 u8 size; 996 997 /* If request specifies an instance that doesn't exist, fail */ 998 if (instance > 0) { 999 struct adv_info *adv; 1000 1001 adv = hci_find_adv_instance(hdev, instance); 1002 if (!adv) 1003 return -EINVAL; 1004 1005 /* If not enabled there is nothing to do */ 1006 if (!adv->enabled) 1007 return 0; 1008 } 1009 1010 memset(data, 0, sizeof(data)); 1011 1012 cp = (void *)data; 1013 set = (void *)cp->data; 1014 1015 /* Instance 0x00 indicates all advertising instances will be disabled */ 1016 cp->num_of_sets = !!instance; 1017 cp->enable = 0x00; 1018 1019 set->handle = instance; 1020 1021 size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; 1022 1023 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, 1024 size, data, HCI_CMD_TIMEOUT); 1025 } 1026 1027 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, 1028 bdaddr_t *random_addr) 1029 { 1030 struct hci_cp_le_set_adv_set_rand_addr cp; 1031 int err; 1032 1033 if (!instance) { 1034 /* Instance 0x00 doesn't have an adv_info, instead it uses 1035 * hdev->random_addr to track its address so whenever it needs 1036 * to be updated this also set the random address since 1037 * hdev->random_addr is shared with scan state machine. 1038 */ 1039 err = hci_set_random_addr_sync(hdev, random_addr); 1040 if (err) 1041 return err; 1042 } 1043 1044 memset(&cp, 0, sizeof(cp)); 1045 1046 cp.handle = instance; 1047 bacpy(&cp.bdaddr, random_addr); 1048 1049 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 1050 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1051 } 1052 1053 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) 1054 { 1055 struct hci_cp_le_set_ext_adv_params cp; 1056 bool connectable; 1057 u32 flags; 1058 bdaddr_t random_addr; 1059 u8 own_addr_type; 1060 int err; 1061 struct adv_info *adv; 1062 bool secondary_adv; 1063 1064 if (instance > 0) { 1065 adv = hci_find_adv_instance(hdev, instance); 1066 if (!adv) 1067 return -EINVAL; 1068 } else { 1069 adv = NULL; 1070 } 1071 1072 /* Updating parameters of an active instance will return a 1073 * Command Disallowed error, so we must first disable the 1074 * instance if it is active. 1075 */ 1076 if (adv && !adv->pending) { 1077 err = hci_disable_ext_adv_instance_sync(hdev, instance); 1078 if (err) 1079 return err; 1080 } 1081 1082 flags = hci_adv_instance_flags(hdev, instance); 1083 1084 /* If the "connectable" instance flag was not set, then choose between 1085 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. 1086 */ 1087 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || 1088 mgmt_get_connectable(hdev); 1089 1090 if (!is_advertising_allowed(hdev, connectable)) 1091 return -EPERM; 1092 1093 /* Set require_privacy to true only when non-connectable 1094 * advertising is used. In that case it is fine to use a 1095 * non-resolvable private address. 1096 */ 1097 err = hci_get_random_address(hdev, !connectable, 1098 adv_use_rpa(hdev, flags), adv, 1099 &own_addr_type, &random_addr); 1100 if (err < 0) 1101 return err; 1102 1103 memset(&cp, 0, sizeof(cp)); 1104 1105 if (adv) { 1106 hci_cpu_to_le24(adv->min_interval, cp.min_interval); 1107 hci_cpu_to_le24(adv->max_interval, cp.max_interval); 1108 cp.tx_power = adv->tx_power; 1109 } else { 1110 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); 1111 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); 1112 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; 1113 } 1114 1115 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); 1116 1117 if (connectable) { 1118 if (secondary_adv) 1119 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); 1120 else 1121 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); 1122 } else if (hci_adv_instance_is_scannable(hdev, instance) || 1123 (flags & MGMT_ADV_PARAM_SCAN_RSP)) { 1124 if (secondary_adv) 1125 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); 1126 else 1127 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); 1128 } else { 1129 if (secondary_adv) 1130 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); 1131 else 1132 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); 1133 } 1134 1135 /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter 1136 * contains the peer’s Identity Address and the Peer_Address_Type 1137 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). 1138 * These parameters are used to locate the corresponding local IRK in 1139 * the resolving list; this IRK is used to generate their own address 1140 * used in the advertisement. 1141 */ 1142 if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) 1143 hci_copy_identity_address(hdev, &cp.peer_addr, 1144 &cp.peer_addr_type); 1145 1146 cp.own_addr_type = own_addr_type; 1147 cp.channel_map = hdev->le_adv_channel_map; 1148 cp.handle = instance; 1149 1150 if (flags & MGMT_ADV_FLAG_SEC_2M) { 1151 cp.primary_phy = HCI_ADV_PHY_1M; 1152 cp.secondary_phy = HCI_ADV_PHY_2M; 1153 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { 1154 cp.primary_phy = HCI_ADV_PHY_CODED; 1155 cp.secondary_phy = HCI_ADV_PHY_CODED; 1156 } else { 1157 /* In all other cases use 1M */ 1158 cp.primary_phy = HCI_ADV_PHY_1M; 1159 cp.secondary_phy = HCI_ADV_PHY_1M; 1160 } 1161 1162 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, 1163 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1164 if (err) 1165 return err; 1166 1167 if ((own_addr_type == ADDR_LE_DEV_RANDOM || 1168 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && 1169 bacmp(&random_addr, BDADDR_ANY)) { 1170 /* Check if random address need to be updated */ 1171 if (adv) { 1172 if (!bacmp(&random_addr, &adv->random_addr)) 1173 return 0; 1174 } else { 1175 if (!bacmp(&random_addr, &hdev->random_addr)) 1176 return 0; 1177 } 1178 1179 return hci_set_adv_set_random_addr_sync(hdev, instance, 1180 &random_addr); 1181 } 1182 1183 return 0; 1184 } 1185 1186 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1187 { 1188 struct { 1189 struct hci_cp_le_set_ext_scan_rsp_data cp; 1190 u8 data[HCI_MAX_EXT_AD_LENGTH]; 1191 } pdu; 1192 u8 len; 1193 struct adv_info *adv = NULL; 1194 int err; 1195 1196 memset(&pdu, 0, sizeof(pdu)); 1197 1198 if (instance) { 1199 adv = hci_find_adv_instance(hdev, instance); 1200 if (!adv || !adv->scan_rsp_changed) 1201 return 0; 1202 } 1203 1204 len = eir_create_scan_rsp(hdev, instance, pdu.data); 1205 1206 pdu.cp.handle = instance; 1207 pdu.cp.length = len; 1208 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1209 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1210 1211 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, 1212 sizeof(pdu.cp) + len, &pdu.cp, 1213 HCI_CMD_TIMEOUT); 1214 if (err) 1215 return err; 1216 1217 if (adv) { 1218 adv->scan_rsp_changed = false; 1219 } else { 1220 memcpy(hdev->scan_rsp_data, pdu.data, len); 1221 hdev->scan_rsp_data_len = len; 1222 } 1223 1224 return 0; 1225 } 1226 1227 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1228 { 1229 struct hci_cp_le_set_scan_rsp_data cp; 1230 u8 len; 1231 1232 memset(&cp, 0, sizeof(cp)); 1233 1234 len = eir_create_scan_rsp(hdev, instance, cp.data); 1235 1236 if (hdev->scan_rsp_data_len == len && 1237 !memcmp(cp.data, hdev->scan_rsp_data, len)) 1238 return 0; 1239 1240 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); 1241 hdev->scan_rsp_data_len = len; 1242 1243 cp.length = len; 1244 1245 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, 1246 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1247 } 1248 1249 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1250 { 1251 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1252 return 0; 1253 1254 if (ext_adv_capable(hdev)) 1255 return hci_set_ext_scan_rsp_data_sync(hdev, instance); 1256 1257 return __hci_set_scan_rsp_data_sync(hdev, instance); 1258 } 1259 1260 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) 1261 { 1262 struct hci_cp_le_set_ext_adv_enable *cp; 1263 struct hci_cp_ext_adv_set *set; 1264 u8 data[sizeof(*cp) + sizeof(*set) * 1]; 1265 struct adv_info *adv; 1266 1267 if (instance > 0) { 1268 adv = hci_find_adv_instance(hdev, instance); 1269 if (!adv) 1270 return -EINVAL; 1271 /* If already enabled there is nothing to do */ 1272 if (adv->enabled) 1273 return 0; 1274 } else { 1275 adv = NULL; 1276 } 1277 1278 cp = (void *)data; 1279 set = (void *)cp->data; 1280 1281 memset(cp, 0, sizeof(*cp)); 1282 1283 cp->enable = 0x01; 1284 cp->num_of_sets = 0x01; 1285 1286 memset(set, 0, sizeof(*set)); 1287 1288 set->handle = instance; 1289 1290 /* Set duration per instance since controller is responsible for 1291 * scheduling it. 1292 */ 1293 if (adv && adv->timeout) { 1294 u16 duration = adv->timeout * MSEC_PER_SEC; 1295 1296 /* Time = N * 10 ms */ 1297 set->duration = cpu_to_le16(duration / 10); 1298 } 1299 1300 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, 1301 sizeof(*cp) + 1302 sizeof(*set) * cp->num_of_sets, 1303 data, HCI_CMD_TIMEOUT); 1304 } 1305 1306 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) 1307 { 1308 int err; 1309 1310 err = hci_setup_ext_adv_instance_sync(hdev, instance); 1311 if (err) 1312 return err; 1313 1314 err = hci_set_ext_scan_rsp_data_sync(hdev, instance); 1315 if (err) 1316 return err; 1317 1318 return hci_enable_ext_advertising_sync(hdev, instance); 1319 } 1320 1321 static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) 1322 { 1323 struct hci_cp_le_set_per_adv_enable cp; 1324 struct adv_info *adv = NULL; 1325 1326 /* If periodic advertising already disabled there is nothing to do. */ 1327 adv = hci_find_adv_instance(hdev, instance); 1328 if (!adv || !adv->periodic || !adv->enabled) 1329 return 0; 1330 1331 memset(&cp, 0, sizeof(cp)); 1332 1333 cp.enable = 0x00; 1334 cp.handle = instance; 1335 1336 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, 1337 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1338 } 1339 1340 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, 1341 u16 min_interval, u16 max_interval) 1342 { 1343 struct hci_cp_le_set_per_adv_params cp; 1344 1345 memset(&cp, 0, sizeof(cp)); 1346 1347 if (!min_interval) 1348 min_interval = DISCOV_LE_PER_ADV_INT_MIN; 1349 1350 if (!max_interval) 1351 max_interval = DISCOV_LE_PER_ADV_INT_MAX; 1352 1353 cp.handle = instance; 1354 cp.min_interval = cpu_to_le16(min_interval); 1355 cp.max_interval = cpu_to_le16(max_interval); 1356 cp.periodic_properties = 0x0000; 1357 1358 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, 1359 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1360 } 1361 1362 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) 1363 { 1364 struct { 1365 struct hci_cp_le_set_per_adv_data cp; 1366 u8 data[HCI_MAX_PER_AD_LENGTH]; 1367 } pdu; 1368 u8 len; 1369 1370 memset(&pdu, 0, sizeof(pdu)); 1371 1372 if (instance) { 1373 struct adv_info *adv = hci_find_adv_instance(hdev, instance); 1374 1375 if (!adv || !adv->periodic) 1376 return 0; 1377 } 1378 1379 len = eir_create_per_adv_data(hdev, instance, pdu.data); 1380 1381 pdu.cp.length = len; 1382 pdu.cp.handle = instance; 1383 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1384 1385 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, 1386 sizeof(pdu.cp) + len, &pdu, 1387 HCI_CMD_TIMEOUT); 1388 } 1389 1390 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) 1391 { 1392 struct hci_cp_le_set_per_adv_enable cp; 1393 struct adv_info *adv = NULL; 1394 1395 /* If periodic advertising already enabled there is nothing to do. */ 1396 adv = hci_find_adv_instance(hdev, instance); 1397 if (adv && adv->periodic && adv->enabled) 1398 return 0; 1399 1400 memset(&cp, 0, sizeof(cp)); 1401 1402 cp.enable = 0x01; 1403 cp.handle = instance; 1404 1405 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, 1406 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1407 } 1408 1409 /* Checks if periodic advertising data contains a Basic Announcement and if it 1410 * does generates a Broadcast ID and add Broadcast Announcement. 1411 */ 1412 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) 1413 { 1414 u8 bid[3]; 1415 u8 ad[4 + 3]; 1416 1417 /* Skip if NULL adv as instance 0x00 is used for general purpose 1418 * advertising so it cannot used for the likes of Broadcast Announcement 1419 * as it can be overwritten at any point. 1420 */ 1421 if (!adv) 1422 return 0; 1423 1424 /* Check if PA data doesn't contains a Basic Audio Announcement then 1425 * there is nothing to do. 1426 */ 1427 if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, 1428 0x1851, NULL)) 1429 return 0; 1430 1431 /* Check if advertising data already has a Broadcast Announcement since 1432 * the process may want to control the Broadcast ID directly and in that 1433 * case the kernel shall no interfere. 1434 */ 1435 if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, 1436 NULL)) 1437 return 0; 1438 1439 /* Generate Broadcast ID */ 1440 get_random_bytes(bid, sizeof(bid)); 1441 eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); 1442 hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); 1443 1444 return hci_update_adv_data_sync(hdev, adv->instance); 1445 } 1446 1447 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, 1448 u8 *data, u32 flags, u16 min_interval, 1449 u16 max_interval, u16 sync_interval) 1450 { 1451 struct adv_info *adv = NULL; 1452 int err; 1453 bool added = false; 1454 1455 hci_disable_per_advertising_sync(hdev, instance); 1456 1457 if (instance) { 1458 adv = hci_find_adv_instance(hdev, instance); 1459 /* Create an instance if that could not be found */ 1460 if (!adv) { 1461 adv = hci_add_per_instance(hdev, instance, flags, 1462 data_len, data, 1463 sync_interval, 1464 sync_interval); 1465 if (IS_ERR(adv)) 1466 return PTR_ERR(adv); 1467 adv->pending = false; 1468 added = true; 1469 } 1470 } 1471 1472 /* Start advertising */ 1473 err = hci_start_ext_adv_sync(hdev, instance); 1474 if (err < 0) 1475 goto fail; 1476 1477 err = hci_adv_bcast_annoucement(hdev, adv); 1478 if (err < 0) 1479 goto fail; 1480 1481 err = hci_set_per_adv_params_sync(hdev, instance, min_interval, 1482 max_interval); 1483 if (err < 0) 1484 goto fail; 1485 1486 err = hci_set_per_adv_data_sync(hdev, instance); 1487 if (err < 0) 1488 goto fail; 1489 1490 err = hci_enable_per_advertising_sync(hdev, instance); 1491 if (err < 0) 1492 goto fail; 1493 1494 return 0; 1495 1496 fail: 1497 if (added) 1498 hci_remove_adv_instance(hdev, instance); 1499 1500 return err; 1501 } 1502 1503 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) 1504 { 1505 int err; 1506 1507 if (ext_adv_capable(hdev)) 1508 return hci_start_ext_adv_sync(hdev, instance); 1509 1510 err = hci_update_adv_data_sync(hdev, instance); 1511 if (err) 1512 return err; 1513 1514 err = hci_update_scan_rsp_data_sync(hdev, instance); 1515 if (err) 1516 return err; 1517 1518 return hci_enable_advertising_sync(hdev); 1519 } 1520 1521 int hci_enable_advertising_sync(struct hci_dev *hdev) 1522 { 1523 struct adv_info *adv_instance; 1524 struct hci_cp_le_set_adv_param cp; 1525 u8 own_addr_type, enable = 0x01; 1526 bool connectable; 1527 u16 adv_min_interval, adv_max_interval; 1528 u32 flags; 1529 u8 status; 1530 1531 if (ext_adv_capable(hdev)) 1532 return hci_enable_ext_advertising_sync(hdev, 1533 hdev->cur_adv_instance); 1534 1535 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); 1536 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 1537 1538 /* If the "connectable" instance flag was not set, then choose between 1539 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. 1540 */ 1541 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || 1542 mgmt_get_connectable(hdev); 1543 1544 if (!is_advertising_allowed(hdev, connectable)) 1545 return -EINVAL; 1546 1547 status = hci_disable_advertising_sync(hdev); 1548 if (status) 1549 return status; 1550 1551 /* Clear the HCI_LE_ADV bit temporarily so that the 1552 * hci_update_random_address knows that it's safe to go ahead 1553 * and write a new random address. The flag will be set back on 1554 * as soon as the SET_ADV_ENABLE HCI command completes. 1555 */ 1556 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1557 1558 /* Set require_privacy to true only when non-connectable 1559 * advertising is used. In that case it is fine to use a 1560 * non-resolvable private address. 1561 */ 1562 status = hci_update_random_address_sync(hdev, !connectable, 1563 adv_use_rpa(hdev, flags), 1564 &own_addr_type); 1565 if (status) 1566 return status; 1567 1568 memset(&cp, 0, sizeof(cp)); 1569 1570 if (adv_instance) { 1571 adv_min_interval = adv_instance->min_interval; 1572 adv_max_interval = adv_instance->max_interval; 1573 } else { 1574 adv_min_interval = hdev->le_adv_min_interval; 1575 adv_max_interval = hdev->le_adv_max_interval; 1576 } 1577 1578 if (connectable) { 1579 cp.type = LE_ADV_IND; 1580 } else { 1581 if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) 1582 cp.type = LE_ADV_SCAN_IND; 1583 else 1584 cp.type = LE_ADV_NONCONN_IND; 1585 1586 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || 1587 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { 1588 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; 1589 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; 1590 } 1591 } 1592 1593 cp.min_interval = cpu_to_le16(adv_min_interval); 1594 cp.max_interval = cpu_to_le16(adv_max_interval); 1595 cp.own_address_type = own_addr_type; 1596 cp.channel_map = hdev->le_adv_channel_map; 1597 1598 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, 1599 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1600 if (status) 1601 return status; 1602 1603 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 1604 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 1605 } 1606 1607 static int enable_advertising_sync(struct hci_dev *hdev, void *data) 1608 { 1609 return hci_enable_advertising_sync(hdev); 1610 } 1611 1612 int hci_enable_advertising(struct hci_dev *hdev) 1613 { 1614 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && 1615 list_empty(&hdev->adv_instances)) 1616 return 0; 1617 1618 return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); 1619 } 1620 1621 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, 1622 struct sock *sk) 1623 { 1624 int err; 1625 1626 if (!ext_adv_capable(hdev)) 1627 return 0; 1628 1629 err = hci_disable_ext_adv_instance_sync(hdev, instance); 1630 if (err) 1631 return err; 1632 1633 /* If request specifies an instance that doesn't exist, fail */ 1634 if (instance > 0 && !hci_find_adv_instance(hdev, instance)) 1635 return -EINVAL; 1636 1637 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, 1638 sizeof(instance), &instance, 0, 1639 HCI_CMD_TIMEOUT, sk); 1640 } 1641 1642 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data) 1643 { 1644 struct adv_info *adv = data; 1645 u8 instance = 0; 1646 1647 if (adv) 1648 instance = adv->instance; 1649 1650 return hci_remove_ext_adv_instance_sync(hdev, instance, NULL); 1651 } 1652 1653 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance) 1654 { 1655 struct adv_info *adv = NULL; 1656 1657 if (instance) { 1658 adv = hci_find_adv_instance(hdev, instance); 1659 if (!adv) 1660 return -EINVAL; 1661 } 1662 1663 return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL); 1664 } 1665 1666 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) 1667 { 1668 struct hci_cp_le_term_big cp; 1669 1670 memset(&cp, 0, sizeof(cp)); 1671 cp.handle = handle; 1672 cp.reason = reason; 1673 1674 return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, 1675 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1676 } 1677 1678 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) 1679 { 1680 struct { 1681 struct hci_cp_le_set_ext_adv_data cp; 1682 u8 data[HCI_MAX_EXT_AD_LENGTH]; 1683 } pdu; 1684 u8 len; 1685 struct adv_info *adv = NULL; 1686 int err; 1687 1688 memset(&pdu, 0, sizeof(pdu)); 1689 1690 if (instance) { 1691 adv = hci_find_adv_instance(hdev, instance); 1692 if (!adv || !adv->adv_data_changed) 1693 return 0; 1694 } 1695 1696 len = eir_create_adv_data(hdev, instance, pdu.data); 1697 1698 pdu.cp.length = len; 1699 pdu.cp.handle = instance; 1700 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1701 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1702 1703 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, 1704 sizeof(pdu.cp) + len, &pdu.cp, 1705 HCI_CMD_TIMEOUT); 1706 if (err) 1707 return err; 1708 1709 /* Update data if the command succeed */ 1710 if (adv) { 1711 adv->adv_data_changed = false; 1712 } else { 1713 memcpy(hdev->adv_data, pdu.data, len); 1714 hdev->adv_data_len = len; 1715 } 1716 1717 return 0; 1718 } 1719 1720 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) 1721 { 1722 struct hci_cp_le_set_adv_data cp; 1723 u8 len; 1724 1725 memset(&cp, 0, sizeof(cp)); 1726 1727 len = eir_create_adv_data(hdev, instance, cp.data); 1728 1729 /* There's nothing to do if the data hasn't changed */ 1730 if (hdev->adv_data_len == len && 1731 memcmp(cp.data, hdev->adv_data, len) == 0) 1732 return 0; 1733 1734 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); 1735 hdev->adv_data_len = len; 1736 1737 cp.length = len; 1738 1739 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, 1740 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1741 } 1742 1743 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) 1744 { 1745 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1746 return 0; 1747 1748 if (ext_adv_capable(hdev)) 1749 return hci_set_ext_adv_data_sync(hdev, instance); 1750 1751 return hci_set_adv_data_sync(hdev, instance); 1752 } 1753 1754 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, 1755 bool force) 1756 { 1757 struct adv_info *adv = NULL; 1758 u16 timeout; 1759 1760 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) 1761 return -EPERM; 1762 1763 if (hdev->adv_instance_timeout) 1764 return -EBUSY; 1765 1766 adv = hci_find_adv_instance(hdev, instance); 1767 if (!adv) 1768 return -ENOENT; 1769 1770 /* A zero timeout means unlimited advertising. As long as there is 1771 * only one instance, duration should be ignored. We still set a timeout 1772 * in case further instances are being added later on. 1773 * 1774 * If the remaining lifetime of the instance is more than the duration 1775 * then the timeout corresponds to the duration, otherwise it will be 1776 * reduced to the remaining instance lifetime. 1777 */ 1778 if (adv->timeout == 0 || adv->duration <= adv->remaining_time) 1779 timeout = adv->duration; 1780 else 1781 timeout = adv->remaining_time; 1782 1783 /* The remaining time is being reduced unless the instance is being 1784 * advertised without time limit. 1785 */ 1786 if (adv->timeout) 1787 adv->remaining_time = adv->remaining_time - timeout; 1788 1789 /* Only use work for scheduling instances with legacy advertising */ 1790 if (!ext_adv_capable(hdev)) { 1791 hdev->adv_instance_timeout = timeout; 1792 queue_delayed_work(hdev->req_workqueue, 1793 &hdev->adv_instance_expire, 1794 msecs_to_jiffies(timeout * 1000)); 1795 } 1796 1797 /* If we're just re-scheduling the same instance again then do not 1798 * execute any HCI commands. This happens when a single instance is 1799 * being advertised. 1800 */ 1801 if (!force && hdev->cur_adv_instance == instance && 1802 hci_dev_test_flag(hdev, HCI_LE_ADV)) 1803 return 0; 1804 1805 hdev->cur_adv_instance = instance; 1806 1807 return hci_start_adv_sync(hdev, instance); 1808 } 1809 1810 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) 1811 { 1812 int err; 1813 1814 if (!ext_adv_capable(hdev)) 1815 return 0; 1816 1817 /* Disable instance 0x00 to disable all instances */ 1818 err = hci_disable_ext_adv_instance_sync(hdev, 0x00); 1819 if (err) 1820 return err; 1821 1822 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, 1823 0, NULL, 0, HCI_CMD_TIMEOUT, sk); 1824 } 1825 1826 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) 1827 { 1828 struct adv_info *adv, *n; 1829 int err = 0; 1830 1831 if (ext_adv_capable(hdev)) 1832 /* Remove all existing sets */ 1833 err = hci_clear_adv_sets_sync(hdev, sk); 1834 if (ext_adv_capable(hdev)) 1835 return err; 1836 1837 /* This is safe as long as there is no command send while the lock is 1838 * held. 1839 */ 1840 hci_dev_lock(hdev); 1841 1842 /* Cleanup non-ext instances */ 1843 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1844 u8 instance = adv->instance; 1845 int err; 1846 1847 if (!(force || adv->timeout)) 1848 continue; 1849 1850 err = hci_remove_adv_instance(hdev, instance); 1851 if (!err) 1852 mgmt_advertising_removed(sk, hdev, instance); 1853 } 1854 1855 hci_dev_unlock(hdev); 1856 1857 return 0; 1858 } 1859 1860 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, 1861 struct sock *sk) 1862 { 1863 int err = 0; 1864 1865 /* If we use extended advertising, instance has to be removed first. */ 1866 if (ext_adv_capable(hdev)) 1867 err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); 1868 if (ext_adv_capable(hdev)) 1869 return err; 1870 1871 /* This is safe as long as there is no command send while the lock is 1872 * held. 1873 */ 1874 hci_dev_lock(hdev); 1875 1876 err = hci_remove_adv_instance(hdev, instance); 1877 if (!err) 1878 mgmt_advertising_removed(sk, hdev, instance); 1879 1880 hci_dev_unlock(hdev); 1881 1882 return err; 1883 } 1884 1885 /* For a single instance: 1886 * - force == true: The instance will be removed even when its remaining 1887 * lifetime is not zero. 1888 * - force == false: the instance will be deactivated but kept stored unless 1889 * the remaining lifetime is zero. 1890 * 1891 * For instance == 0x00: 1892 * - force == true: All instances will be removed regardless of their timeout 1893 * setting. 1894 * - force == false: Only instances that have a timeout will be removed. 1895 */ 1896 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, 1897 u8 instance, bool force) 1898 { 1899 struct adv_info *next = NULL; 1900 int err; 1901 1902 /* Cancel any timeout concerning the removed instance(s). */ 1903 if (!instance || hdev->cur_adv_instance == instance) 1904 cancel_adv_timeout(hdev); 1905 1906 /* Get the next instance to advertise BEFORE we remove 1907 * the current one. This can be the same instance again 1908 * if there is only one instance. 1909 */ 1910 if (hdev->cur_adv_instance == instance) 1911 next = hci_get_next_instance(hdev, instance); 1912 1913 if (!instance) { 1914 err = hci_clear_adv_sync(hdev, sk, force); 1915 if (err) 1916 return err; 1917 } else { 1918 struct adv_info *adv = hci_find_adv_instance(hdev, instance); 1919 1920 if (force || (adv && adv->timeout && !adv->remaining_time)) { 1921 /* Don't advertise a removed instance. */ 1922 if (next && next->instance == instance) 1923 next = NULL; 1924 1925 err = hci_remove_adv_sync(hdev, instance, sk); 1926 if (err) 1927 return err; 1928 } 1929 } 1930 1931 if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) 1932 return 0; 1933 1934 if (next && !ext_adv_capable(hdev)) 1935 hci_schedule_adv_instance_sync(hdev, next->instance, false); 1936 1937 return 0; 1938 } 1939 1940 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) 1941 { 1942 struct hci_cp_read_rssi cp; 1943 1944 cp.handle = handle; 1945 return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, 1946 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1947 } 1948 1949 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) 1950 { 1951 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, 1952 sizeof(*cp), cp, HCI_CMD_TIMEOUT); 1953 } 1954 1955 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) 1956 { 1957 struct hci_cp_read_tx_power cp; 1958 1959 cp.handle = handle; 1960 cp.type = type; 1961 return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, 1962 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1963 } 1964 1965 int hci_disable_advertising_sync(struct hci_dev *hdev) 1966 { 1967 u8 enable = 0x00; 1968 int err = 0; 1969 1970 /* If controller is not advertising we are done. */ 1971 if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) 1972 return 0; 1973 1974 if (ext_adv_capable(hdev)) 1975 err = hci_disable_ext_adv_instance_sync(hdev, 0x00); 1976 if (ext_adv_capable(hdev)) 1977 return err; 1978 1979 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 1980 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 1981 } 1982 1983 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, 1984 u8 filter_dup) 1985 { 1986 struct hci_cp_le_set_ext_scan_enable cp; 1987 1988 memset(&cp, 0, sizeof(cp)); 1989 cp.enable = val; 1990 1991 if (hci_dev_test_flag(hdev, HCI_MESH)) 1992 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 1993 else 1994 cp.filter_dup = filter_dup; 1995 1996 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, 1997 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1998 } 1999 2000 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, 2001 u8 filter_dup) 2002 { 2003 struct hci_cp_le_set_scan_enable cp; 2004 2005 if (use_ext_scan(hdev)) 2006 return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); 2007 2008 memset(&cp, 0, sizeof(cp)); 2009 cp.enable = val; 2010 2011 if (val && hci_dev_test_flag(hdev, HCI_MESH)) 2012 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 2013 else 2014 cp.filter_dup = filter_dup; 2015 2016 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, 2017 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2018 } 2019 2020 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) 2021 { 2022 if (!use_ll_privacy(hdev)) 2023 return 0; 2024 2025 /* If controller is not/already resolving we are done. */ 2026 if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) 2027 return 0; 2028 2029 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 2030 sizeof(val), &val, HCI_CMD_TIMEOUT); 2031 } 2032 2033 static int hci_scan_disable_sync(struct hci_dev *hdev) 2034 { 2035 int err; 2036 2037 /* If controller is not scanning we are done. */ 2038 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 2039 return 0; 2040 2041 if (hdev->scanning_paused) { 2042 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2043 return 0; 2044 } 2045 2046 err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); 2047 if (err) { 2048 bt_dev_err(hdev, "Unable to disable scanning: %d", err); 2049 return err; 2050 } 2051 2052 return err; 2053 } 2054 2055 static bool scan_use_rpa(struct hci_dev *hdev) 2056 { 2057 return hci_dev_test_flag(hdev, HCI_PRIVACY); 2058 } 2059 2060 static void hci_start_interleave_scan(struct hci_dev *hdev) 2061 { 2062 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; 2063 queue_delayed_work(hdev->req_workqueue, 2064 &hdev->interleave_scan, 0); 2065 } 2066 2067 static bool is_interleave_scanning(struct hci_dev *hdev) 2068 { 2069 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; 2070 } 2071 2072 static void cancel_interleave_scan(struct hci_dev *hdev) 2073 { 2074 bt_dev_dbg(hdev, "cancelling interleave scan"); 2075 2076 cancel_delayed_work_sync(&hdev->interleave_scan); 2077 2078 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; 2079 } 2080 2081 /* Return true if interleave_scan wasn't started until exiting this function, 2082 * otherwise, return false 2083 */ 2084 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) 2085 { 2086 /* Do interleaved scan only if all of the following are true: 2087 * - There is at least one ADV monitor 2088 * - At least one pending LE connection or one device to be scanned for 2089 * - Monitor offloading is not supported 2090 * If so, we should alternate between allowlist scan and one without 2091 * any filters to save power. 2092 */ 2093 bool use_interleaving = hci_is_adv_monitoring(hdev) && 2094 !(list_empty(&hdev->pend_le_conns) && 2095 list_empty(&hdev->pend_le_reports)) && 2096 hci_get_adv_monitor_offload_ext(hdev) == 2097 HCI_ADV_MONITOR_EXT_NONE; 2098 bool is_interleaving = is_interleave_scanning(hdev); 2099 2100 if (use_interleaving && !is_interleaving) { 2101 hci_start_interleave_scan(hdev); 2102 bt_dev_dbg(hdev, "starting interleave scan"); 2103 return true; 2104 } 2105 2106 if (!use_interleaving && is_interleaving) 2107 cancel_interleave_scan(hdev); 2108 2109 return false; 2110 } 2111 2112 /* Removes connection to resolve list if needed.*/ 2113 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, 2114 bdaddr_t *bdaddr, u8 bdaddr_type) 2115 { 2116 struct hci_cp_le_del_from_resolv_list cp; 2117 struct bdaddr_list_with_irk *entry; 2118 2119 if (!use_ll_privacy(hdev)) 2120 return 0; 2121 2122 /* Check if the IRK has been programmed */ 2123 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, 2124 bdaddr_type); 2125 if (!entry) 2126 return 0; 2127 2128 cp.bdaddr_type = bdaddr_type; 2129 bacpy(&cp.bdaddr, bdaddr); 2130 2131 return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, 2132 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2133 } 2134 2135 static int hci_le_del_accept_list_sync(struct hci_dev *hdev, 2136 bdaddr_t *bdaddr, u8 bdaddr_type) 2137 { 2138 struct hci_cp_le_del_from_accept_list cp; 2139 int err; 2140 2141 /* Check if device is on accept list before removing it */ 2142 if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) 2143 return 0; 2144 2145 cp.bdaddr_type = bdaddr_type; 2146 bacpy(&cp.bdaddr, bdaddr); 2147 2148 /* Ignore errors when removing from resolving list as that is likely 2149 * that the device was never added. 2150 */ 2151 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); 2152 2153 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 2154 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2155 if (err) { 2156 bt_dev_err(hdev, "Unable to remove from allow list: %d", err); 2157 return err; 2158 } 2159 2160 bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, 2161 cp.bdaddr_type); 2162 2163 return 0; 2164 } 2165 2166 struct conn_params { 2167 bdaddr_t addr; 2168 u8 addr_type; 2169 hci_conn_flags_t flags; 2170 u8 privacy_mode; 2171 }; 2172 2173 /* Adds connection to resolve list if needed. 2174 * Setting params to NULL programs local hdev->irk 2175 */ 2176 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, 2177 struct conn_params *params) 2178 { 2179 struct hci_cp_le_add_to_resolv_list cp; 2180 struct smp_irk *irk; 2181 struct bdaddr_list_with_irk *entry; 2182 struct hci_conn_params *p; 2183 2184 if (!use_ll_privacy(hdev)) 2185 return 0; 2186 2187 /* Attempt to program local identity address, type and irk if params is 2188 * NULL. 2189 */ 2190 if (!params) { 2191 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 2192 return 0; 2193 2194 hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); 2195 memcpy(cp.peer_irk, hdev->irk, 16); 2196 goto done; 2197 } 2198 2199 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); 2200 if (!irk) 2201 return 0; 2202 2203 /* Check if the IK has _not_ been programmed yet. */ 2204 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, 2205 ¶ms->addr, 2206 params->addr_type); 2207 if (entry) 2208 return 0; 2209 2210 cp.bdaddr_type = params->addr_type; 2211 bacpy(&cp.bdaddr, ¶ms->addr); 2212 memcpy(cp.peer_irk, irk->val, 16); 2213 2214 /* Default privacy mode is always Network */ 2215 params->privacy_mode = HCI_NETWORK_PRIVACY; 2216 2217 rcu_read_lock(); 2218 p = hci_pend_le_action_lookup(&hdev->pend_le_conns, 2219 ¶ms->addr, params->addr_type); 2220 if (!p) 2221 p = hci_pend_le_action_lookup(&hdev->pend_le_reports, 2222 ¶ms->addr, params->addr_type); 2223 if (p) 2224 WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); 2225 rcu_read_unlock(); 2226 2227 done: 2228 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) 2229 memcpy(cp.local_irk, hdev->irk, 16); 2230 else 2231 memset(cp.local_irk, 0, 16); 2232 2233 return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, 2234 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2235 } 2236 2237 /* Set Device Privacy Mode. */ 2238 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, 2239 struct conn_params *params) 2240 { 2241 struct hci_cp_le_set_privacy_mode cp; 2242 struct smp_irk *irk; 2243 2244 /* If device privacy mode has already been set there is nothing to do */ 2245 if (params->privacy_mode == HCI_DEVICE_PRIVACY) 2246 return 0; 2247 2248 /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also 2249 * indicates that LL Privacy has been enabled and 2250 * HCI_OP_LE_SET_PRIVACY_MODE is supported. 2251 */ 2252 if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) 2253 return 0; 2254 2255 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); 2256 if (!irk) 2257 return 0; 2258 2259 memset(&cp, 0, sizeof(cp)); 2260 cp.bdaddr_type = irk->addr_type; 2261 bacpy(&cp.bdaddr, &irk->bdaddr); 2262 cp.mode = HCI_DEVICE_PRIVACY; 2263 2264 /* Note: params->privacy_mode is not updated since it is a copy */ 2265 2266 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, 2267 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2268 } 2269 2270 /* Adds connection to allow list if needed, if the device uses RPA (has IRK) 2271 * this attempts to program the device in the resolving list as well and 2272 * properly set the privacy mode. 2273 */ 2274 static int hci_le_add_accept_list_sync(struct hci_dev *hdev, 2275 struct conn_params *params, 2276 u8 *num_entries) 2277 { 2278 struct hci_cp_le_add_to_accept_list cp; 2279 int err; 2280 2281 /* During suspend, only wakeable devices can be in acceptlist */ 2282 if (hdev->suspended && 2283 !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { 2284 hci_le_del_accept_list_sync(hdev, ¶ms->addr, 2285 params->addr_type); 2286 return 0; 2287 } 2288 2289 /* Select filter policy to accept all advertising */ 2290 if (*num_entries >= hdev->le_accept_list_size) 2291 return -ENOSPC; 2292 2293 /* Accept list can not be used with RPAs */ 2294 if (!use_ll_privacy(hdev) && 2295 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) 2296 return -EINVAL; 2297 2298 /* Attempt to program the device in the resolving list first to avoid 2299 * having to rollback in case it fails since the resolving list is 2300 * dynamic it can probably be smaller than the accept list. 2301 */ 2302 err = hci_le_add_resolve_list_sync(hdev, params); 2303 if (err) { 2304 bt_dev_err(hdev, "Unable to add to resolve list: %d", err); 2305 return err; 2306 } 2307 2308 /* Set Privacy Mode */ 2309 err = hci_le_set_privacy_mode_sync(hdev, params); 2310 if (err) { 2311 bt_dev_err(hdev, "Unable to set privacy mode: %d", err); 2312 return err; 2313 } 2314 2315 /* Check if already in accept list */ 2316 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, 2317 params->addr_type)) 2318 return 0; 2319 2320 *num_entries += 1; 2321 cp.bdaddr_type = params->addr_type; 2322 bacpy(&cp.bdaddr, ¶ms->addr); 2323 2324 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, 2325 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2326 if (err) { 2327 bt_dev_err(hdev, "Unable to add to allow list: %d", err); 2328 /* Rollback the device from the resolving list */ 2329 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); 2330 return err; 2331 } 2332 2333 bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, 2334 cp.bdaddr_type); 2335 2336 return 0; 2337 } 2338 2339 /* This function disables/pause all advertising instances */ 2340 static int hci_pause_advertising_sync(struct hci_dev *hdev) 2341 { 2342 int err; 2343 int old_state; 2344 2345 /* If already been paused there is nothing to do. */ 2346 if (hdev->advertising_paused) 2347 return 0; 2348 2349 bt_dev_dbg(hdev, "Pausing directed advertising"); 2350 2351 /* Stop directed advertising */ 2352 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); 2353 if (old_state) { 2354 /* When discoverable timeout triggers, then just make sure 2355 * the limited discoverable flag is cleared. Even in the case 2356 * of a timeout triggered from general discoverable, it is 2357 * safe to unconditionally clear the flag. 2358 */ 2359 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 2360 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 2361 hdev->discov_timeout = 0; 2362 } 2363 2364 bt_dev_dbg(hdev, "Pausing advertising instances"); 2365 2366 /* Call to disable any advertisements active on the controller. 2367 * This will succeed even if no advertisements are configured. 2368 */ 2369 err = hci_disable_advertising_sync(hdev); 2370 if (err) 2371 return err; 2372 2373 /* If we are using software rotation, pause the loop */ 2374 if (!ext_adv_capable(hdev)) 2375 cancel_adv_timeout(hdev); 2376 2377 hdev->advertising_paused = true; 2378 hdev->advertising_old_state = old_state; 2379 2380 return 0; 2381 } 2382 2383 /* This function enables all user advertising instances */ 2384 static int hci_resume_advertising_sync(struct hci_dev *hdev) 2385 { 2386 struct adv_info *adv, *tmp; 2387 int err; 2388 2389 /* If advertising has not been paused there is nothing to do. */ 2390 if (!hdev->advertising_paused) 2391 return 0; 2392 2393 /* Resume directed advertising */ 2394 hdev->advertising_paused = false; 2395 if (hdev->advertising_old_state) { 2396 hci_dev_set_flag(hdev, HCI_ADVERTISING); 2397 hdev->advertising_old_state = 0; 2398 } 2399 2400 bt_dev_dbg(hdev, "Resuming advertising instances"); 2401 2402 if (ext_adv_capable(hdev)) { 2403 /* Call for each tracked instance to be re-enabled */ 2404 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { 2405 err = hci_enable_ext_advertising_sync(hdev, 2406 adv->instance); 2407 if (!err) 2408 continue; 2409 2410 /* If the instance cannot be resumed remove it */ 2411 hci_remove_ext_adv_instance_sync(hdev, adv->instance, 2412 NULL); 2413 } 2414 } else { 2415 /* Schedule for most recent instance to be restarted and begin 2416 * the software rotation loop 2417 */ 2418 err = hci_schedule_adv_instance_sync(hdev, 2419 hdev->cur_adv_instance, 2420 true); 2421 } 2422 2423 hdev->advertising_paused = false; 2424 2425 return err; 2426 } 2427 2428 static int hci_pause_addr_resolution(struct hci_dev *hdev) 2429 { 2430 int err; 2431 2432 if (!use_ll_privacy(hdev)) 2433 return 0; 2434 2435 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) 2436 return 0; 2437 2438 /* Cannot disable addr resolution if scanning is enabled or 2439 * when initiating an LE connection. 2440 */ 2441 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || 2442 hci_lookup_le_connect(hdev)) { 2443 bt_dev_err(hdev, "Command not allowed when scan/LE connect"); 2444 return -EPERM; 2445 } 2446 2447 /* Cannot disable addr resolution if advertising is enabled. */ 2448 err = hci_pause_advertising_sync(hdev); 2449 if (err) { 2450 bt_dev_err(hdev, "Pause advertising failed: %d", err); 2451 return err; 2452 } 2453 2454 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); 2455 if (err) 2456 bt_dev_err(hdev, "Unable to disable Address Resolution: %d", 2457 err); 2458 2459 /* Return if address resolution is disabled and RPA is not used. */ 2460 if (!err && scan_use_rpa(hdev)) 2461 return 0; 2462 2463 hci_resume_advertising_sync(hdev); 2464 return err; 2465 } 2466 2467 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, 2468 bool extended, struct sock *sk) 2469 { 2470 u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : 2471 HCI_OP_READ_LOCAL_OOB_DATA; 2472 2473 return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); 2474 } 2475 2476 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) 2477 { 2478 struct hci_conn_params *params; 2479 struct conn_params *p; 2480 size_t i; 2481 2482 rcu_read_lock(); 2483 2484 i = 0; 2485 list_for_each_entry_rcu(params, list, action) 2486 ++i; 2487 *n = i; 2488 2489 rcu_read_unlock(); 2490 2491 p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); 2492 if (!p) 2493 return NULL; 2494 2495 rcu_read_lock(); 2496 2497 i = 0; 2498 list_for_each_entry_rcu(params, list, action) { 2499 /* Racing adds are handled in next scan update */ 2500 if (i >= *n) 2501 break; 2502 2503 /* No hdev->lock, but: addr, addr_type are immutable. 2504 * privacy_mode is only written by us or in 2505 * hci_cc_le_set_privacy_mode that we wait for. 2506 * We should be idempotent so MGMT updating flags 2507 * while we are processing is OK. 2508 */ 2509 bacpy(&p[i].addr, ¶ms->addr); 2510 p[i].addr_type = params->addr_type; 2511 p[i].flags = READ_ONCE(params->flags); 2512 p[i].privacy_mode = READ_ONCE(params->privacy_mode); 2513 ++i; 2514 } 2515 2516 rcu_read_unlock(); 2517 2518 *n = i; 2519 return p; 2520 } 2521 2522 /* Device must not be scanning when updating the accept list. 2523 * 2524 * Update is done using the following sequence: 2525 * 2526 * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> 2527 * Remove Devices From Accept List -> 2528 * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> 2529 * Add Devices to Accept List -> 2530 * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> 2531 * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> 2532 * Enable Scanning 2533 * 2534 * In case of failure advertising shall be restored to its original state and 2535 * return would disable accept list since either accept or resolving list could 2536 * not be programmed. 2537 * 2538 */ 2539 static u8 hci_update_accept_list_sync(struct hci_dev *hdev) 2540 { 2541 struct conn_params *params; 2542 struct bdaddr_list *b, *t; 2543 u8 num_entries = 0; 2544 bool pend_conn, pend_report; 2545 u8 filter_policy; 2546 size_t i, n; 2547 int err; 2548 2549 /* Pause advertising if resolving list can be used as controllers 2550 * cannot accept resolving list modifications while advertising. 2551 */ 2552 if (use_ll_privacy(hdev)) { 2553 err = hci_pause_advertising_sync(hdev); 2554 if (err) { 2555 bt_dev_err(hdev, "pause advertising failed: %d", err); 2556 return 0x00; 2557 } 2558 } 2559 2560 /* Disable address resolution while reprogramming accept list since 2561 * devices that do have an IRK will be programmed in the resolving list 2562 * when LL Privacy is enabled. 2563 */ 2564 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); 2565 if (err) { 2566 bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); 2567 goto done; 2568 } 2569 2570 /* Go through the current accept list programmed into the 2571 * controller one by one and check if that address is connected or is 2572 * still in the list of pending connections or list of devices to 2573 * report. If not present in either list, then remove it from 2574 * the controller. 2575 */ 2576 list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { 2577 if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) 2578 continue; 2579 2580 /* Pointers not dereferenced, no locks needed */ 2581 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, 2582 &b->bdaddr, 2583 b->bdaddr_type); 2584 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, 2585 &b->bdaddr, 2586 b->bdaddr_type); 2587 2588 /* If the device is not likely to connect or report, 2589 * remove it from the acceptlist. 2590 */ 2591 if (!pend_conn && !pend_report) { 2592 hci_le_del_accept_list_sync(hdev, &b->bdaddr, 2593 b->bdaddr_type); 2594 continue; 2595 } 2596 2597 num_entries++; 2598 } 2599 2600 /* Since all no longer valid accept list entries have been 2601 * removed, walk through the list of pending connections 2602 * and ensure that any new device gets programmed into 2603 * the controller. 2604 * 2605 * If the list of the devices is larger than the list of 2606 * available accept list entries in the controller, then 2607 * just abort and return filer policy value to not use the 2608 * accept list. 2609 * 2610 * The list and params may be mutated while we wait for events, 2611 * so make a copy and iterate it. 2612 */ 2613 2614 params = conn_params_copy(&hdev->pend_le_conns, &n); 2615 if (!params) { 2616 err = -ENOMEM; 2617 goto done; 2618 } 2619 2620 for (i = 0; i < n; ++i) { 2621 err = hci_le_add_accept_list_sync(hdev, ¶ms[i], 2622 &num_entries); 2623 if (err) { 2624 kvfree(params); 2625 goto done; 2626 } 2627 } 2628 2629 kvfree(params); 2630 2631 /* After adding all new pending connections, walk through 2632 * the list of pending reports and also add these to the 2633 * accept list if there is still space. Abort if space runs out. 2634 */ 2635 2636 params = conn_params_copy(&hdev->pend_le_reports, &n); 2637 if (!params) { 2638 err = -ENOMEM; 2639 goto done; 2640 } 2641 2642 for (i = 0; i < n; ++i) { 2643 err = hci_le_add_accept_list_sync(hdev, ¶ms[i], 2644 &num_entries); 2645 if (err) { 2646 kvfree(params); 2647 goto done; 2648 } 2649 } 2650 2651 kvfree(params); 2652 2653 /* Use the allowlist unless the following conditions are all true: 2654 * - We are not currently suspending 2655 * - There are 1 or more ADV monitors registered and it's not offloaded 2656 * - Interleaved scanning is not currently using the allowlist 2657 */ 2658 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && 2659 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && 2660 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) 2661 err = -EINVAL; 2662 2663 done: 2664 filter_policy = err ? 0x00 : 0x01; 2665 2666 /* Enable address resolution when LL Privacy is enabled. */ 2667 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); 2668 if (err) 2669 bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); 2670 2671 /* Resume advertising if it was paused */ 2672 if (use_ll_privacy(hdev)) 2673 hci_resume_advertising_sync(hdev); 2674 2675 /* Select filter policy to use accept list */ 2676 return filter_policy; 2677 } 2678 2679 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, 2680 u16 interval, u16 window, 2681 u8 own_addr_type, u8 filter_policy) 2682 { 2683 struct hci_cp_le_set_ext_scan_params *cp; 2684 struct hci_cp_le_scan_phy_params *phy; 2685 u8 data[sizeof(*cp) + sizeof(*phy) * 2]; 2686 u8 num_phy = 0; 2687 2688 cp = (void *)data; 2689 phy = (void *)cp->data; 2690 2691 memset(data, 0, sizeof(data)); 2692 2693 cp->own_addr_type = own_addr_type; 2694 cp->filter_policy = filter_policy; 2695 2696 if (scan_1m(hdev) || scan_2m(hdev)) { 2697 cp->scanning_phys |= LE_SCAN_PHY_1M; 2698 2699 phy->type = type; 2700 phy->interval = cpu_to_le16(interval); 2701 phy->window = cpu_to_le16(window); 2702 2703 num_phy++; 2704 phy++; 2705 } 2706 2707 if (scan_coded(hdev)) { 2708 cp->scanning_phys |= LE_SCAN_PHY_CODED; 2709 2710 phy->type = type; 2711 phy->interval = cpu_to_le16(interval); 2712 phy->window = cpu_to_le16(window); 2713 2714 num_phy++; 2715 phy++; 2716 } 2717 2718 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, 2719 sizeof(*cp) + sizeof(*phy) * num_phy, 2720 data, HCI_CMD_TIMEOUT); 2721 } 2722 2723 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, 2724 u16 interval, u16 window, 2725 u8 own_addr_type, u8 filter_policy) 2726 { 2727 struct hci_cp_le_set_scan_param cp; 2728 2729 if (use_ext_scan(hdev)) 2730 return hci_le_set_ext_scan_param_sync(hdev, type, interval, 2731 window, own_addr_type, 2732 filter_policy); 2733 2734 memset(&cp, 0, sizeof(cp)); 2735 cp.type = type; 2736 cp.interval = cpu_to_le16(interval); 2737 cp.window = cpu_to_le16(window); 2738 cp.own_address_type = own_addr_type; 2739 cp.filter_policy = filter_policy; 2740 2741 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, 2742 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2743 } 2744 2745 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, 2746 u16 window, u8 own_addr_type, u8 filter_policy, 2747 u8 filter_dup) 2748 { 2749 int err; 2750 2751 if (hdev->scanning_paused) { 2752 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2753 return 0; 2754 } 2755 2756 err = hci_le_set_scan_param_sync(hdev, type, interval, window, 2757 own_addr_type, filter_policy); 2758 if (err) 2759 return err; 2760 2761 return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); 2762 } 2763 2764 static int hci_passive_scan_sync(struct hci_dev *hdev) 2765 { 2766 u8 own_addr_type; 2767 u8 filter_policy; 2768 u16 window, interval; 2769 u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; 2770 int err; 2771 2772 if (hdev->scanning_paused) { 2773 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2774 return 0; 2775 } 2776 2777 err = hci_scan_disable_sync(hdev); 2778 if (err) { 2779 bt_dev_err(hdev, "disable scanning failed: %d", err); 2780 return err; 2781 } 2782 2783 /* Set require_privacy to false since no SCAN_REQ are send 2784 * during passive scanning. Not using an non-resolvable address 2785 * here is important so that peer devices using direct 2786 * advertising with our address will be correctly reported 2787 * by the controller. 2788 */ 2789 if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), 2790 &own_addr_type)) 2791 return 0; 2792 2793 if (hdev->enable_advmon_interleave_scan && 2794 hci_update_interleaved_scan_sync(hdev)) 2795 return 0; 2796 2797 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); 2798 2799 /* Adding or removing entries from the accept list must 2800 * happen before enabling scanning. The controller does 2801 * not allow accept list modification while scanning. 2802 */ 2803 filter_policy = hci_update_accept_list_sync(hdev); 2804 2805 /* When the controller is using random resolvable addresses and 2806 * with that having LE privacy enabled, then controllers with 2807 * Extended Scanner Filter Policies support can now enable support 2808 * for handling directed advertising. 2809 * 2810 * So instead of using filter polices 0x00 (no acceptlist) 2811 * and 0x01 (acceptlist enabled) use the new filter policies 2812 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). 2813 */ 2814 if (hci_dev_test_flag(hdev, HCI_PRIVACY) && 2815 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) 2816 filter_policy |= 0x02; 2817 2818 if (hdev->suspended) { 2819 window = hdev->le_scan_window_suspend; 2820 interval = hdev->le_scan_int_suspend; 2821 } else if (hci_is_le_conn_scanning(hdev)) { 2822 window = hdev->le_scan_window_connect; 2823 interval = hdev->le_scan_int_connect; 2824 } else if (hci_is_adv_monitoring(hdev)) { 2825 window = hdev->le_scan_window_adv_monitor; 2826 interval = hdev->le_scan_int_adv_monitor; 2827 } else { 2828 window = hdev->le_scan_window; 2829 interval = hdev->le_scan_interval; 2830 } 2831 2832 /* Disable all filtering for Mesh */ 2833 if (hci_dev_test_flag(hdev, HCI_MESH)) { 2834 filter_policy = 0; 2835 filter_dups = LE_SCAN_FILTER_DUP_DISABLE; 2836 } 2837 2838 bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); 2839 2840 return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, 2841 own_addr_type, filter_policy, filter_dups); 2842 } 2843 2844 /* This function controls the passive scanning based on hdev->pend_le_conns 2845 * list. If there are pending LE connection we start the background scanning, 2846 * otherwise we stop it in the following sequence: 2847 * 2848 * If there are devices to scan: 2849 * 2850 * Disable Scanning -> Update Accept List -> 2851 * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> 2852 * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> 2853 * Enable Scanning 2854 * 2855 * Otherwise: 2856 * 2857 * Disable Scanning 2858 */ 2859 int hci_update_passive_scan_sync(struct hci_dev *hdev) 2860 { 2861 int err; 2862 2863 if (!test_bit(HCI_UP, &hdev->flags) || 2864 test_bit(HCI_INIT, &hdev->flags) || 2865 hci_dev_test_flag(hdev, HCI_SETUP) || 2866 hci_dev_test_flag(hdev, HCI_CONFIG) || 2867 hci_dev_test_flag(hdev, HCI_AUTO_OFF) || 2868 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2869 return 0; 2870 2871 /* No point in doing scanning if LE support hasn't been enabled */ 2872 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 2873 return 0; 2874 2875 /* If discovery is active don't interfere with it */ 2876 if (hdev->discovery.state != DISCOVERY_STOPPED) 2877 return 0; 2878 2879 /* Reset RSSI and UUID filters when starting background scanning 2880 * since these filters are meant for service discovery only. 2881 * 2882 * The Start Discovery and Start Service Discovery operations 2883 * ensure to set proper values for RSSI threshold and UUID 2884 * filter list. So it is safe to just reset them here. 2885 */ 2886 hci_discovery_filter_clear(hdev); 2887 2888 bt_dev_dbg(hdev, "ADV monitoring is %s", 2889 hci_is_adv_monitoring(hdev) ? "on" : "off"); 2890 2891 if (!hci_dev_test_flag(hdev, HCI_MESH) && 2892 list_empty(&hdev->pend_le_conns) && 2893 list_empty(&hdev->pend_le_reports) && 2894 !hci_is_adv_monitoring(hdev) && 2895 !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { 2896 /* If there is no pending LE connections or devices 2897 * to be scanned for or no ADV monitors, we should stop the 2898 * background scanning. 2899 */ 2900 2901 bt_dev_dbg(hdev, "stopping background scanning"); 2902 2903 err = hci_scan_disable_sync(hdev); 2904 if (err) 2905 bt_dev_err(hdev, "stop background scanning failed: %d", 2906 err); 2907 } else { 2908 /* If there is at least one pending LE connection, we should 2909 * keep the background scan running. 2910 */ 2911 2912 /* If controller is connecting, we should not start scanning 2913 * since some controllers are not able to scan and connect at 2914 * the same time. 2915 */ 2916 if (hci_lookup_le_connect(hdev)) 2917 return 0; 2918 2919 bt_dev_dbg(hdev, "start background scanning"); 2920 2921 err = hci_passive_scan_sync(hdev); 2922 if (err) 2923 bt_dev_err(hdev, "start background scanning failed: %d", 2924 err); 2925 } 2926 2927 return err; 2928 } 2929 2930 static int update_scan_sync(struct hci_dev *hdev, void *data) 2931 { 2932 return hci_update_scan_sync(hdev); 2933 } 2934 2935 int hci_update_scan(struct hci_dev *hdev) 2936 { 2937 return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); 2938 } 2939 2940 static int update_passive_scan_sync(struct hci_dev *hdev, void *data) 2941 { 2942 return hci_update_passive_scan_sync(hdev); 2943 } 2944 2945 int hci_update_passive_scan(struct hci_dev *hdev) 2946 { 2947 /* Only queue if it would have any effect */ 2948 if (!test_bit(HCI_UP, &hdev->flags) || 2949 test_bit(HCI_INIT, &hdev->flags) || 2950 hci_dev_test_flag(hdev, HCI_SETUP) || 2951 hci_dev_test_flag(hdev, HCI_CONFIG) || 2952 hci_dev_test_flag(hdev, HCI_AUTO_OFF) || 2953 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2954 return 0; 2955 2956 return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); 2957 } 2958 2959 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) 2960 { 2961 int err; 2962 2963 if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) 2964 return 0; 2965 2966 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, 2967 sizeof(val), &val, HCI_CMD_TIMEOUT); 2968 2969 if (!err) { 2970 if (val) { 2971 hdev->features[1][0] |= LMP_HOST_SC; 2972 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 2973 } else { 2974 hdev->features[1][0] &= ~LMP_HOST_SC; 2975 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 2976 } 2977 } 2978 2979 return err; 2980 } 2981 2982 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) 2983 { 2984 int err; 2985 2986 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || 2987 lmp_host_ssp_capable(hdev)) 2988 return 0; 2989 2990 if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { 2991 __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, 2992 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 2993 } 2994 2995 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, 2996 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 2997 if (err) 2998 return err; 2999 3000 return hci_write_sc_support_sync(hdev, 0x01); 3001 } 3002 3003 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) 3004 { 3005 struct hci_cp_write_le_host_supported cp; 3006 3007 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || 3008 !lmp_bredr_capable(hdev)) 3009 return 0; 3010 3011 /* Check first if we already have the right host state 3012 * (host features set) 3013 */ 3014 if (le == lmp_host_le_capable(hdev) && 3015 simul == lmp_host_le_br_capable(hdev)) 3016 return 0; 3017 3018 memset(&cp, 0, sizeof(cp)); 3019 3020 cp.le = le; 3021 cp.simul = simul; 3022 3023 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, 3024 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3025 } 3026 3027 static int hci_powered_update_adv_sync(struct hci_dev *hdev) 3028 { 3029 struct adv_info *adv, *tmp; 3030 int err; 3031 3032 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 3033 return 0; 3034 3035 /* If RPA Resolution has not been enable yet it means the 3036 * resolving list is empty and we should attempt to program the 3037 * local IRK in order to support using own_addr_type 3038 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). 3039 */ 3040 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { 3041 hci_le_add_resolve_list_sync(hdev, NULL); 3042 hci_le_set_addr_resolution_enable_sync(hdev, 0x01); 3043 } 3044 3045 /* Make sure the controller has a good default for 3046 * advertising data. This also applies to the case 3047 * where BR/EDR was toggled during the AUTO_OFF phase. 3048 */ 3049 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 3050 list_empty(&hdev->adv_instances)) { 3051 if (ext_adv_capable(hdev)) { 3052 err = hci_setup_ext_adv_instance_sync(hdev, 0x00); 3053 if (!err) 3054 hci_update_scan_rsp_data_sync(hdev, 0x00); 3055 } else { 3056 err = hci_update_adv_data_sync(hdev, 0x00); 3057 if (!err) 3058 hci_update_scan_rsp_data_sync(hdev, 0x00); 3059 } 3060 3061 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 3062 hci_enable_advertising_sync(hdev); 3063 } 3064 3065 /* Call for each tracked instance to be scheduled */ 3066 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) 3067 hci_schedule_adv_instance_sync(hdev, adv->instance, true); 3068 3069 return 0; 3070 } 3071 3072 static int hci_write_auth_enable_sync(struct hci_dev *hdev) 3073 { 3074 u8 link_sec; 3075 3076 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); 3077 if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) 3078 return 0; 3079 3080 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 3081 sizeof(link_sec), &link_sec, 3082 HCI_CMD_TIMEOUT); 3083 } 3084 3085 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) 3086 { 3087 struct hci_cp_write_page_scan_activity cp; 3088 u8 type; 3089 int err = 0; 3090 3091 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3092 return 0; 3093 3094 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 3095 return 0; 3096 3097 memset(&cp, 0, sizeof(cp)); 3098 3099 if (enable) { 3100 type = PAGE_SCAN_TYPE_INTERLACED; 3101 3102 /* 160 msec page scan interval */ 3103 cp.interval = cpu_to_le16(0x0100); 3104 } else { 3105 type = hdev->def_page_scan_type; 3106 cp.interval = cpu_to_le16(hdev->def_page_scan_int); 3107 } 3108 3109 cp.window = cpu_to_le16(hdev->def_page_scan_window); 3110 3111 if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || 3112 __cpu_to_le16(hdev->page_scan_window) != cp.window) { 3113 err = __hci_cmd_sync_status(hdev, 3114 HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 3115 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3116 if (err) 3117 return err; 3118 } 3119 3120 if (hdev->page_scan_type != type) 3121 err = __hci_cmd_sync_status(hdev, 3122 HCI_OP_WRITE_PAGE_SCAN_TYPE, 3123 sizeof(type), &type, 3124 HCI_CMD_TIMEOUT); 3125 3126 return err; 3127 } 3128 3129 static bool disconnected_accept_list_entries(struct hci_dev *hdev) 3130 { 3131 struct bdaddr_list *b; 3132 3133 list_for_each_entry(b, &hdev->accept_list, list) { 3134 struct hci_conn *conn; 3135 3136 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); 3137 if (!conn) 3138 return true; 3139 3140 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 3141 return true; 3142 } 3143 3144 return false; 3145 } 3146 3147 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) 3148 { 3149 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 3150 sizeof(val), &val, 3151 HCI_CMD_TIMEOUT); 3152 } 3153 3154 int hci_update_scan_sync(struct hci_dev *hdev) 3155 { 3156 u8 scan; 3157 3158 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3159 return 0; 3160 3161 if (!hdev_is_powered(hdev)) 3162 return 0; 3163 3164 if (mgmt_powering_down(hdev)) 3165 return 0; 3166 3167 if (hdev->scanning_paused) 3168 return 0; 3169 3170 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || 3171 disconnected_accept_list_entries(hdev)) 3172 scan = SCAN_PAGE; 3173 else 3174 scan = SCAN_DISABLED; 3175 3176 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 3177 scan |= SCAN_INQUIRY; 3178 3179 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && 3180 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) 3181 return 0; 3182 3183 return hci_write_scan_enable_sync(hdev, scan); 3184 } 3185 3186 int hci_update_name_sync(struct hci_dev *hdev) 3187 { 3188 struct hci_cp_write_local_name cp; 3189 3190 memset(&cp, 0, sizeof(cp)); 3191 3192 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); 3193 3194 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, 3195 sizeof(cp), &cp, 3196 HCI_CMD_TIMEOUT); 3197 } 3198 3199 /* This function perform powered update HCI command sequence after the HCI init 3200 * sequence which end up resetting all states, the sequence is as follows: 3201 * 3202 * HCI_SSP_ENABLED(Enable SSP) 3203 * HCI_LE_ENABLED(Enable LE) 3204 * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> 3205 * Update adv data) 3206 * Enable Authentication 3207 * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> 3208 * Set Name -> Set EIR) 3209 * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) 3210 */ 3211 int hci_powered_update_sync(struct hci_dev *hdev) 3212 { 3213 int err; 3214 3215 /* Register the available SMP channels (BR/EDR and LE) only when 3216 * successfully powering on the controller. This late 3217 * registration is required so that LE SMP can clearly decide if 3218 * the public address or static address is used. 3219 */ 3220 smp_register(hdev); 3221 3222 err = hci_write_ssp_mode_sync(hdev, 0x01); 3223 if (err) 3224 return err; 3225 3226 err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); 3227 if (err) 3228 return err; 3229 3230 err = hci_powered_update_adv_sync(hdev); 3231 if (err) 3232 return err; 3233 3234 err = hci_write_auth_enable_sync(hdev); 3235 if (err) 3236 return err; 3237 3238 if (lmp_bredr_capable(hdev)) { 3239 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) 3240 hci_write_fast_connectable_sync(hdev, true); 3241 else 3242 hci_write_fast_connectable_sync(hdev, false); 3243 hci_update_scan_sync(hdev); 3244 hci_update_class_sync(hdev); 3245 hci_update_name_sync(hdev); 3246 hci_update_eir_sync(hdev); 3247 } 3248 3249 /* If forcing static address is in use or there is no public 3250 * address use the static address as random address (but skip 3251 * the HCI command if the current random address is already the 3252 * static one. 3253 * 3254 * In case BR/EDR has been disabled on a dual-mode controller 3255 * and a static address has been configured, then use that 3256 * address instead of the public BR/EDR address. 3257 */ 3258 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 3259 (!bacmp(&hdev->bdaddr, BDADDR_ANY) && 3260 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { 3261 if (bacmp(&hdev->static_addr, BDADDR_ANY)) 3262 return hci_set_random_addr_sync(hdev, 3263 &hdev->static_addr); 3264 } 3265 3266 return 0; 3267 } 3268 3269 /** 3270 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address 3271 * (BD_ADDR) for a HCI device from 3272 * a firmware node property. 3273 * @hdev: The HCI device 3274 * 3275 * Search the firmware node for 'local-bd-address'. 3276 * 3277 * All-zero BD addresses are rejected, because those could be properties 3278 * that exist in the firmware tables, but were not updated by the firmware. For 3279 * example, the DTS could define 'local-bd-address', with zero BD addresses. 3280 */ 3281 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) 3282 { 3283 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); 3284 bdaddr_t ba; 3285 int ret; 3286 3287 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", 3288 (u8 *)&ba, sizeof(ba)); 3289 if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) 3290 return; 3291 3292 bacpy(&hdev->public_addr, &ba); 3293 } 3294 3295 struct hci_init_stage { 3296 int (*func)(struct hci_dev *hdev); 3297 }; 3298 3299 /* Run init stage NULL terminated function table */ 3300 static int hci_init_stage_sync(struct hci_dev *hdev, 3301 const struct hci_init_stage *stage) 3302 { 3303 size_t i; 3304 3305 for (i = 0; stage[i].func; i++) { 3306 int err; 3307 3308 err = stage[i].func(hdev); 3309 if (err) 3310 return err; 3311 } 3312 3313 return 0; 3314 } 3315 3316 /* Read Local Version */ 3317 static int hci_read_local_version_sync(struct hci_dev *hdev) 3318 { 3319 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, 3320 0, NULL, HCI_CMD_TIMEOUT); 3321 } 3322 3323 /* Read BD Address */ 3324 static int hci_read_bd_addr_sync(struct hci_dev *hdev) 3325 { 3326 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, 3327 0, NULL, HCI_CMD_TIMEOUT); 3328 } 3329 3330 #define HCI_INIT(_func) \ 3331 { \ 3332 .func = _func, \ 3333 } 3334 3335 static const struct hci_init_stage hci_init0[] = { 3336 /* HCI_OP_READ_LOCAL_VERSION */ 3337 HCI_INIT(hci_read_local_version_sync), 3338 /* HCI_OP_READ_BD_ADDR */ 3339 HCI_INIT(hci_read_bd_addr_sync), 3340 {} 3341 }; 3342 3343 int hci_reset_sync(struct hci_dev *hdev) 3344 { 3345 int err; 3346 3347 set_bit(HCI_RESET, &hdev->flags); 3348 3349 err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, 3350 HCI_CMD_TIMEOUT); 3351 if (err) 3352 return err; 3353 3354 return 0; 3355 } 3356 3357 static int hci_init0_sync(struct hci_dev *hdev) 3358 { 3359 int err; 3360 3361 bt_dev_dbg(hdev, ""); 3362 3363 /* Reset */ 3364 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 3365 err = hci_reset_sync(hdev); 3366 if (err) 3367 return err; 3368 } 3369 3370 return hci_init_stage_sync(hdev, hci_init0); 3371 } 3372 3373 static int hci_unconf_init_sync(struct hci_dev *hdev) 3374 { 3375 int err; 3376 3377 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 3378 return 0; 3379 3380 err = hci_init0_sync(hdev); 3381 if (err < 0) 3382 return err; 3383 3384 if (hci_dev_test_flag(hdev, HCI_SETUP)) 3385 hci_debugfs_create_basic(hdev); 3386 3387 return 0; 3388 } 3389 3390 /* Read Local Supported Features. */ 3391 static int hci_read_local_features_sync(struct hci_dev *hdev) 3392 { 3393 /* Not all AMP controllers support this command */ 3394 if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) 3395 return 0; 3396 3397 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 3398 0, NULL, HCI_CMD_TIMEOUT); 3399 } 3400 3401 /* BR Controller init stage 1 command sequence */ 3402 static const struct hci_init_stage br_init1[] = { 3403 /* HCI_OP_READ_LOCAL_FEATURES */ 3404 HCI_INIT(hci_read_local_features_sync), 3405 /* HCI_OP_READ_LOCAL_VERSION */ 3406 HCI_INIT(hci_read_local_version_sync), 3407 /* HCI_OP_READ_BD_ADDR */ 3408 HCI_INIT(hci_read_bd_addr_sync), 3409 {} 3410 }; 3411 3412 /* Read Local Commands */ 3413 static int hci_read_local_cmds_sync(struct hci_dev *hdev) 3414 { 3415 /* All Bluetooth 1.2 and later controllers should support the 3416 * HCI command for reading the local supported commands. 3417 * 3418 * Unfortunately some controllers indicate Bluetooth 1.2 support, 3419 * but do not have support for this command. If that is the case, 3420 * the driver can quirk the behavior and skip reading the local 3421 * supported commands. 3422 */ 3423 if (hdev->hci_ver > BLUETOOTH_VER_1_1 && 3424 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) 3425 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 3426 0, NULL, HCI_CMD_TIMEOUT); 3427 3428 return 0; 3429 } 3430 3431 /* Read Local AMP Info */ 3432 static int hci_read_local_amp_info_sync(struct hci_dev *hdev) 3433 { 3434 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 3435 0, NULL, HCI_CMD_TIMEOUT); 3436 } 3437 3438 /* Read Data Blk size */ 3439 static int hci_read_data_block_size_sync(struct hci_dev *hdev) 3440 { 3441 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 3442 0, NULL, HCI_CMD_TIMEOUT); 3443 } 3444 3445 /* Read Flow Control Mode */ 3446 static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) 3447 { 3448 return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 3449 0, NULL, HCI_CMD_TIMEOUT); 3450 } 3451 3452 /* Read Location Data */ 3453 static int hci_read_location_data_sync(struct hci_dev *hdev) 3454 { 3455 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, 3456 0, NULL, HCI_CMD_TIMEOUT); 3457 } 3458 3459 /* AMP Controller init stage 1 command sequence */ 3460 static const struct hci_init_stage amp_init1[] = { 3461 /* HCI_OP_READ_LOCAL_VERSION */ 3462 HCI_INIT(hci_read_local_version_sync), 3463 /* HCI_OP_READ_LOCAL_COMMANDS */ 3464 HCI_INIT(hci_read_local_cmds_sync), 3465 /* HCI_OP_READ_LOCAL_AMP_INFO */ 3466 HCI_INIT(hci_read_local_amp_info_sync), 3467 /* HCI_OP_READ_DATA_BLOCK_SIZE */ 3468 HCI_INIT(hci_read_data_block_size_sync), 3469 /* HCI_OP_READ_FLOW_CONTROL_MODE */ 3470 HCI_INIT(hci_read_flow_control_mode_sync), 3471 /* HCI_OP_READ_LOCATION_DATA */ 3472 HCI_INIT(hci_read_location_data_sync), 3473 {} 3474 }; 3475 3476 static int hci_init1_sync(struct hci_dev *hdev) 3477 { 3478 int err; 3479 3480 bt_dev_dbg(hdev, ""); 3481 3482 /* Reset */ 3483 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 3484 err = hci_reset_sync(hdev); 3485 if (err) 3486 return err; 3487 } 3488 3489 switch (hdev->dev_type) { 3490 case HCI_PRIMARY: 3491 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 3492 return hci_init_stage_sync(hdev, br_init1); 3493 case HCI_AMP: 3494 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 3495 return hci_init_stage_sync(hdev, amp_init1); 3496 default: 3497 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); 3498 break; 3499 } 3500 3501 return 0; 3502 } 3503 3504 /* AMP Controller init stage 2 command sequence */ 3505 static const struct hci_init_stage amp_init2[] = { 3506 /* HCI_OP_READ_LOCAL_FEATURES */ 3507 HCI_INIT(hci_read_local_features_sync), 3508 {} 3509 }; 3510 3511 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 3512 static int hci_read_buffer_size_sync(struct hci_dev *hdev) 3513 { 3514 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 3515 0, NULL, HCI_CMD_TIMEOUT); 3516 } 3517 3518 /* Read Class of Device */ 3519 static int hci_read_dev_class_sync(struct hci_dev *hdev) 3520 { 3521 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, 3522 0, NULL, HCI_CMD_TIMEOUT); 3523 } 3524 3525 /* Read Local Name */ 3526 static int hci_read_local_name_sync(struct hci_dev *hdev) 3527 { 3528 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, 3529 0, NULL, HCI_CMD_TIMEOUT); 3530 } 3531 3532 /* Read Voice Setting */ 3533 static int hci_read_voice_setting_sync(struct hci_dev *hdev) 3534 { 3535 return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 3536 0, NULL, HCI_CMD_TIMEOUT); 3537 } 3538 3539 /* Read Number of Supported IAC */ 3540 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) 3541 { 3542 return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, 3543 0, NULL, HCI_CMD_TIMEOUT); 3544 } 3545 3546 /* Read Current IAC LAP */ 3547 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) 3548 { 3549 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, 3550 0, NULL, HCI_CMD_TIMEOUT); 3551 } 3552 3553 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, 3554 u8 cond_type, bdaddr_t *bdaddr, 3555 u8 auto_accept) 3556 { 3557 struct hci_cp_set_event_filter cp; 3558 3559 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3560 return 0; 3561 3562 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 3563 return 0; 3564 3565 memset(&cp, 0, sizeof(cp)); 3566 cp.flt_type = flt_type; 3567 3568 if (flt_type != HCI_FLT_CLEAR_ALL) { 3569 cp.cond_type = cond_type; 3570 bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); 3571 cp.addr_conn_flt.auto_accept = auto_accept; 3572 } 3573 3574 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, 3575 flt_type == HCI_FLT_CLEAR_ALL ? 3576 sizeof(cp.flt_type) : sizeof(cp), &cp, 3577 HCI_CMD_TIMEOUT); 3578 } 3579 3580 static int hci_clear_event_filter_sync(struct hci_dev *hdev) 3581 { 3582 if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) 3583 return 0; 3584 3585 /* In theory the state machine should not reach here unless 3586 * a hci_set_event_filter_sync() call succeeds, but we do 3587 * the check both for parity and as a future reminder. 3588 */ 3589 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 3590 return 0; 3591 3592 return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, 3593 BDADDR_ANY, 0x00); 3594 } 3595 3596 /* Connection accept timeout ~20 secs */ 3597 static int hci_write_ca_timeout_sync(struct hci_dev *hdev) 3598 { 3599 __le16 param = cpu_to_le16(0x7d00); 3600 3601 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, 3602 sizeof(param), ¶m, HCI_CMD_TIMEOUT); 3603 } 3604 3605 /* BR Controller init stage 2 command sequence */ 3606 static const struct hci_init_stage br_init2[] = { 3607 /* HCI_OP_READ_BUFFER_SIZE */ 3608 HCI_INIT(hci_read_buffer_size_sync), 3609 /* HCI_OP_READ_CLASS_OF_DEV */ 3610 HCI_INIT(hci_read_dev_class_sync), 3611 /* HCI_OP_READ_LOCAL_NAME */ 3612 HCI_INIT(hci_read_local_name_sync), 3613 /* HCI_OP_READ_VOICE_SETTING */ 3614 HCI_INIT(hci_read_voice_setting_sync), 3615 /* HCI_OP_READ_NUM_SUPPORTED_IAC */ 3616 HCI_INIT(hci_read_num_supported_iac_sync), 3617 /* HCI_OP_READ_CURRENT_IAC_LAP */ 3618 HCI_INIT(hci_read_current_iac_lap_sync), 3619 /* HCI_OP_SET_EVENT_FLT */ 3620 HCI_INIT(hci_clear_event_filter_sync), 3621 /* HCI_OP_WRITE_CA_TIMEOUT */ 3622 HCI_INIT(hci_write_ca_timeout_sync), 3623 {} 3624 }; 3625 3626 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) 3627 { 3628 u8 mode = 0x01; 3629 3630 if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 3631 return 0; 3632 3633 /* When SSP is available, then the host features page 3634 * should also be available as well. However some 3635 * controllers list the max_page as 0 as long as SSP 3636 * has not been enabled. To achieve proper debugging 3637 * output, force the minimum max_page to 1 at least. 3638 */ 3639 hdev->max_page = 0x01; 3640 3641 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, 3642 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3643 } 3644 3645 static int hci_write_eir_sync(struct hci_dev *hdev) 3646 { 3647 struct hci_cp_write_eir cp; 3648 3649 if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 3650 return 0; 3651 3652 memset(hdev->eir, 0, sizeof(hdev->eir)); 3653 memset(&cp, 0, sizeof(cp)); 3654 3655 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, 3656 HCI_CMD_TIMEOUT); 3657 } 3658 3659 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) 3660 { 3661 u8 mode; 3662 3663 if (!lmp_inq_rssi_capable(hdev) && 3664 !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 3665 return 0; 3666 3667 /* If Extended Inquiry Result events are supported, then 3668 * they are clearly preferred over Inquiry Result with RSSI 3669 * events. 3670 */ 3671 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; 3672 3673 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, 3674 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3675 } 3676 3677 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) 3678 { 3679 if (!lmp_inq_tx_pwr_capable(hdev)) 3680 return 0; 3681 3682 return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 3683 0, NULL, HCI_CMD_TIMEOUT); 3684 } 3685 3686 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) 3687 { 3688 struct hci_cp_read_local_ext_features cp; 3689 3690 if (!lmp_ext_feat_capable(hdev)) 3691 return 0; 3692 3693 memset(&cp, 0, sizeof(cp)); 3694 cp.page = page; 3695 3696 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 3697 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3698 } 3699 3700 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) 3701 { 3702 return hci_read_local_ext_features_sync(hdev, 0x01); 3703 } 3704 3705 /* HCI Controller init stage 2 command sequence */ 3706 static const struct hci_init_stage hci_init2[] = { 3707 /* HCI_OP_READ_LOCAL_COMMANDS */ 3708 HCI_INIT(hci_read_local_cmds_sync), 3709 /* HCI_OP_WRITE_SSP_MODE */ 3710 HCI_INIT(hci_write_ssp_mode_1_sync), 3711 /* HCI_OP_WRITE_EIR */ 3712 HCI_INIT(hci_write_eir_sync), 3713 /* HCI_OP_WRITE_INQUIRY_MODE */ 3714 HCI_INIT(hci_write_inquiry_mode_sync), 3715 /* HCI_OP_READ_INQ_RSP_TX_POWER */ 3716 HCI_INIT(hci_read_inq_rsp_tx_power_sync), 3717 /* HCI_OP_READ_LOCAL_EXT_FEATURES */ 3718 HCI_INIT(hci_read_local_ext_features_1_sync), 3719 /* HCI_OP_WRITE_AUTH_ENABLE */ 3720 HCI_INIT(hci_write_auth_enable_sync), 3721 {} 3722 }; 3723 3724 /* Read LE Buffer Size */ 3725 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) 3726 { 3727 /* Use Read LE Buffer Size V2 if supported */ 3728 if (iso_capable(hdev) && hdev->commands[41] & 0x20) 3729 return __hci_cmd_sync_status(hdev, 3730 HCI_OP_LE_READ_BUFFER_SIZE_V2, 3731 0, NULL, HCI_CMD_TIMEOUT); 3732 3733 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 3734 0, NULL, HCI_CMD_TIMEOUT); 3735 } 3736 3737 /* Read LE Local Supported Features */ 3738 static int hci_le_read_local_features_sync(struct hci_dev *hdev) 3739 { 3740 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 3741 0, NULL, HCI_CMD_TIMEOUT); 3742 } 3743 3744 /* Read LE Supported States */ 3745 static int hci_le_read_supported_states_sync(struct hci_dev *hdev) 3746 { 3747 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 3748 0, NULL, HCI_CMD_TIMEOUT); 3749 } 3750 3751 /* LE Controller init stage 2 command sequence */ 3752 static const struct hci_init_stage le_init2[] = { 3753 /* HCI_OP_LE_READ_LOCAL_FEATURES */ 3754 HCI_INIT(hci_le_read_local_features_sync), 3755 /* HCI_OP_LE_READ_BUFFER_SIZE */ 3756 HCI_INIT(hci_le_read_buffer_size_sync), 3757 /* HCI_OP_LE_READ_SUPPORTED_STATES */ 3758 HCI_INIT(hci_le_read_supported_states_sync), 3759 {} 3760 }; 3761 3762 static int hci_init2_sync(struct hci_dev *hdev) 3763 { 3764 int err; 3765 3766 bt_dev_dbg(hdev, ""); 3767 3768 if (hdev->dev_type == HCI_AMP) 3769 return hci_init_stage_sync(hdev, amp_init2); 3770 3771 err = hci_init_stage_sync(hdev, hci_init2); 3772 if (err) 3773 return err; 3774 3775 if (lmp_bredr_capable(hdev)) { 3776 err = hci_init_stage_sync(hdev, br_init2); 3777 if (err) 3778 return err; 3779 } else { 3780 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); 3781 } 3782 3783 if (lmp_le_capable(hdev)) { 3784 err = hci_init_stage_sync(hdev, le_init2); 3785 if (err) 3786 return err; 3787 /* LE-only controllers have LE implicitly enabled */ 3788 if (!lmp_bredr_capable(hdev)) 3789 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 3790 } 3791 3792 return 0; 3793 } 3794 3795 static int hci_set_event_mask_sync(struct hci_dev *hdev) 3796 { 3797 /* The second byte is 0xff instead of 0x9f (two reserved bits 3798 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 3799 * command otherwise. 3800 */ 3801 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 3802 3803 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 3804 * any event mask for pre 1.2 devices. 3805 */ 3806 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 3807 return 0; 3808 3809 if (lmp_bredr_capable(hdev)) { 3810 events[4] |= 0x01; /* Flow Specification Complete */ 3811 3812 /* Don't set Disconnect Complete and mode change when 3813 * suspended as that would wakeup the host when disconnecting 3814 * due to suspend. 3815 */ 3816 if (hdev->suspended) { 3817 events[0] &= 0xef; 3818 events[2] &= 0xf7; 3819 } 3820 } else { 3821 /* Use a different default for LE-only devices */ 3822 memset(events, 0, sizeof(events)); 3823 events[1] |= 0x20; /* Command Complete */ 3824 events[1] |= 0x40; /* Command Status */ 3825 events[1] |= 0x80; /* Hardware Error */ 3826 3827 /* If the controller supports the Disconnect command, enable 3828 * the corresponding event. In addition enable packet flow 3829 * control related events. 3830 */ 3831 if (hdev->commands[0] & 0x20) { 3832 /* Don't set Disconnect Complete when suspended as that 3833 * would wakeup the host when disconnecting due to 3834 * suspend. 3835 */ 3836 if (!hdev->suspended) 3837 events[0] |= 0x10; /* Disconnection Complete */ 3838 events[2] |= 0x04; /* Number of Completed Packets */ 3839 events[3] |= 0x02; /* Data Buffer Overflow */ 3840 } 3841 3842 /* If the controller supports the Read Remote Version 3843 * Information command, enable the corresponding event. 3844 */ 3845 if (hdev->commands[2] & 0x80) 3846 events[1] |= 0x08; /* Read Remote Version Information 3847 * Complete 3848 */ 3849 3850 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { 3851 events[0] |= 0x80; /* Encryption Change */ 3852 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 3853 } 3854 } 3855 3856 if (lmp_inq_rssi_capable(hdev) || 3857 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 3858 events[4] |= 0x02; /* Inquiry Result with RSSI */ 3859 3860 if (lmp_ext_feat_capable(hdev)) 3861 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 3862 3863 if (lmp_esco_capable(hdev)) { 3864 events[5] |= 0x08; /* Synchronous Connection Complete */ 3865 events[5] |= 0x10; /* Synchronous Connection Changed */ 3866 } 3867 3868 if (lmp_sniffsubr_capable(hdev)) 3869 events[5] |= 0x20; /* Sniff Subrating */ 3870 3871 if (lmp_pause_enc_capable(hdev)) 3872 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 3873 3874 if (lmp_ext_inq_capable(hdev)) 3875 events[5] |= 0x40; /* Extended Inquiry Result */ 3876 3877 if (lmp_no_flush_capable(hdev)) 3878 events[7] |= 0x01; /* Enhanced Flush Complete */ 3879 3880 if (lmp_lsto_capable(hdev)) 3881 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 3882 3883 if (lmp_ssp_capable(hdev)) { 3884 events[6] |= 0x01; /* IO Capability Request */ 3885 events[6] |= 0x02; /* IO Capability Response */ 3886 events[6] |= 0x04; /* User Confirmation Request */ 3887 events[6] |= 0x08; /* User Passkey Request */ 3888 events[6] |= 0x10; /* Remote OOB Data Request */ 3889 events[6] |= 0x20; /* Simple Pairing Complete */ 3890 events[7] |= 0x04; /* User Passkey Notification */ 3891 events[7] |= 0x08; /* Keypress Notification */ 3892 events[7] |= 0x10; /* Remote Host Supported 3893 * Features Notification 3894 */ 3895 } 3896 3897 if (lmp_le_capable(hdev)) 3898 events[7] |= 0x20; /* LE Meta-Event */ 3899 3900 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, 3901 sizeof(events), events, HCI_CMD_TIMEOUT); 3902 } 3903 3904 static int hci_read_stored_link_key_sync(struct hci_dev *hdev) 3905 { 3906 struct hci_cp_read_stored_link_key cp; 3907 3908 if (!(hdev->commands[6] & 0x20) || 3909 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) 3910 return 0; 3911 3912 memset(&cp, 0, sizeof(cp)); 3913 bacpy(&cp.bdaddr, BDADDR_ANY); 3914 cp.read_all = 0x01; 3915 3916 return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, 3917 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3918 } 3919 3920 static int hci_setup_link_policy_sync(struct hci_dev *hdev) 3921 { 3922 struct hci_cp_write_def_link_policy cp; 3923 u16 link_policy = 0; 3924 3925 if (!(hdev->commands[5] & 0x10)) 3926 return 0; 3927 3928 memset(&cp, 0, sizeof(cp)); 3929 3930 if (lmp_rswitch_capable(hdev)) 3931 link_policy |= HCI_LP_RSWITCH; 3932 if (lmp_hold_capable(hdev)) 3933 link_policy |= HCI_LP_HOLD; 3934 if (lmp_sniff_capable(hdev)) 3935 link_policy |= HCI_LP_SNIFF; 3936 if (lmp_park_capable(hdev)) 3937 link_policy |= HCI_LP_PARK; 3938 3939 cp.policy = cpu_to_le16(link_policy); 3940 3941 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 3942 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3943 } 3944 3945 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) 3946 { 3947 if (!(hdev->commands[8] & 0x01)) 3948 return 0; 3949 3950 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 3951 0, NULL, HCI_CMD_TIMEOUT); 3952 } 3953 3954 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) 3955 { 3956 if (!(hdev->commands[18] & 0x04) || 3957 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || 3958 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) 3959 return 0; 3960 3961 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 3962 0, NULL, HCI_CMD_TIMEOUT); 3963 } 3964 3965 static int hci_read_page_scan_type_sync(struct hci_dev *hdev) 3966 { 3967 /* Some older Broadcom based Bluetooth 1.2 controllers do not 3968 * support the Read Page Scan Type command. Check support for 3969 * this command in the bit mask of supported commands. 3970 */ 3971 if (!(hdev->commands[13] & 0x01)) 3972 return 0; 3973 3974 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, 3975 0, NULL, HCI_CMD_TIMEOUT); 3976 } 3977 3978 /* Read features beyond page 1 if available */ 3979 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) 3980 { 3981 u8 page; 3982 int err; 3983 3984 if (!lmp_ext_feat_capable(hdev)) 3985 return 0; 3986 3987 for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; 3988 page++) { 3989 err = hci_read_local_ext_features_sync(hdev, page); 3990 if (err) 3991 return err; 3992 } 3993 3994 return 0; 3995 } 3996 3997 /* HCI Controller init stage 3 command sequence */ 3998 static const struct hci_init_stage hci_init3[] = { 3999 /* HCI_OP_SET_EVENT_MASK */ 4000 HCI_INIT(hci_set_event_mask_sync), 4001 /* HCI_OP_READ_STORED_LINK_KEY */ 4002 HCI_INIT(hci_read_stored_link_key_sync), 4003 /* HCI_OP_WRITE_DEF_LINK_POLICY */ 4004 HCI_INIT(hci_setup_link_policy_sync), 4005 /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ 4006 HCI_INIT(hci_read_page_scan_activity_sync), 4007 /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ 4008 HCI_INIT(hci_read_def_err_data_reporting_sync), 4009 /* HCI_OP_READ_PAGE_SCAN_TYPE */ 4010 HCI_INIT(hci_read_page_scan_type_sync), 4011 /* HCI_OP_READ_LOCAL_EXT_FEATURES */ 4012 HCI_INIT(hci_read_local_ext_features_all_sync), 4013 {} 4014 }; 4015 4016 static int hci_le_set_event_mask_sync(struct hci_dev *hdev) 4017 { 4018 u8 events[8]; 4019 4020 if (!lmp_le_capable(hdev)) 4021 return 0; 4022 4023 memset(events, 0, sizeof(events)); 4024 4025 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) 4026 events[0] |= 0x10; /* LE Long Term Key Request */ 4027 4028 /* If controller supports the Connection Parameters Request 4029 * Link Layer Procedure, enable the corresponding event. 4030 */ 4031 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) 4032 /* LE Remote Connection Parameter Request */ 4033 events[0] |= 0x20; 4034 4035 /* If the controller supports the Data Length Extension 4036 * feature, enable the corresponding event. 4037 */ 4038 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) 4039 events[0] |= 0x40; /* LE Data Length Change */ 4040 4041 /* If the controller supports LL Privacy feature or LE Extended Adv, 4042 * enable the corresponding event. 4043 */ 4044 if (use_enhanced_conn_complete(hdev)) 4045 events[1] |= 0x02; /* LE Enhanced Connection Complete */ 4046 4047 /* If the controller supports Extended Scanner Filter 4048 * Policies, enable the corresponding event. 4049 */ 4050 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) 4051 events[1] |= 0x04; /* LE Direct Advertising Report */ 4052 4053 /* If the controller supports Channel Selection Algorithm #2 4054 * feature, enable the corresponding event. 4055 */ 4056 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) 4057 events[2] |= 0x08; /* LE Channel Selection Algorithm */ 4058 4059 /* If the controller supports the LE Set Scan Enable command, 4060 * enable the corresponding advertising report event. 4061 */ 4062 if (hdev->commands[26] & 0x08) 4063 events[0] |= 0x02; /* LE Advertising Report */ 4064 4065 /* If the controller supports the LE Create Connection 4066 * command, enable the corresponding event. 4067 */ 4068 if (hdev->commands[26] & 0x10) 4069 events[0] |= 0x01; /* LE Connection Complete */ 4070 4071 /* If the controller supports the LE Connection Update 4072 * command, enable the corresponding event. 4073 */ 4074 if (hdev->commands[27] & 0x04) 4075 events[0] |= 0x04; /* LE Connection Update Complete */ 4076 4077 /* If the controller supports the LE Read Remote Used Features 4078 * command, enable the corresponding event. 4079 */ 4080 if (hdev->commands[27] & 0x20) 4081 /* LE Read Remote Used Features Complete */ 4082 events[0] |= 0x08; 4083 4084 /* If the controller supports the LE Read Local P-256 4085 * Public Key command, enable the corresponding event. 4086 */ 4087 if (hdev->commands[34] & 0x02) 4088 /* LE Read Local P-256 Public Key Complete */ 4089 events[0] |= 0x80; 4090 4091 /* If the controller supports the LE Generate DHKey 4092 * command, enable the corresponding event. 4093 */ 4094 if (hdev->commands[34] & 0x04) 4095 events[1] |= 0x01; /* LE Generate DHKey Complete */ 4096 4097 /* If the controller supports the LE Set Default PHY or 4098 * LE Set PHY commands, enable the corresponding event. 4099 */ 4100 if (hdev->commands[35] & (0x20 | 0x40)) 4101 events[1] |= 0x08; /* LE PHY Update Complete */ 4102 4103 /* If the controller supports LE Set Extended Scan Parameters 4104 * and LE Set Extended Scan Enable commands, enable the 4105 * corresponding event. 4106 */ 4107 if (use_ext_scan(hdev)) 4108 events[1] |= 0x10; /* LE Extended Advertising Report */ 4109 4110 /* If the controller supports the LE Extended Advertising 4111 * command, enable the corresponding event. 4112 */ 4113 if (ext_adv_capable(hdev)) 4114 events[2] |= 0x02; /* LE Advertising Set Terminated */ 4115 4116 if (cis_capable(hdev)) { 4117 events[3] |= 0x01; /* LE CIS Established */ 4118 if (cis_peripheral_capable(hdev)) 4119 events[3] |= 0x02; /* LE CIS Request */ 4120 } 4121 4122 if (bis_capable(hdev)) { 4123 events[1] |= 0x20; /* LE PA Report */ 4124 events[1] |= 0x40; /* LE PA Sync Established */ 4125 events[3] |= 0x04; /* LE Create BIG Complete */ 4126 events[3] |= 0x08; /* LE Terminate BIG Complete */ 4127 events[3] |= 0x10; /* LE BIG Sync Established */ 4128 events[3] |= 0x20; /* LE BIG Sync Loss */ 4129 events[4] |= 0x02; /* LE BIG Info Advertising Report */ 4130 } 4131 4132 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, 4133 sizeof(events), events, HCI_CMD_TIMEOUT); 4134 } 4135 4136 /* Read LE Advertising Channel TX Power */ 4137 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) 4138 { 4139 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { 4140 /* HCI TS spec forbids mixing of legacy and extended 4141 * advertising commands wherein READ_ADV_TX_POWER is 4142 * also included. So do not call it if extended adv 4143 * is supported otherwise controller will return 4144 * COMMAND_DISALLOWED for extended commands. 4145 */ 4146 return __hci_cmd_sync_status(hdev, 4147 HCI_OP_LE_READ_ADV_TX_POWER, 4148 0, NULL, HCI_CMD_TIMEOUT); 4149 } 4150 4151 return 0; 4152 } 4153 4154 /* Read LE Min/Max Tx Power*/ 4155 static int hci_le_read_tx_power_sync(struct hci_dev *hdev) 4156 { 4157 if (!(hdev->commands[38] & 0x80) || 4158 test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) 4159 return 0; 4160 4161 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, 4162 0, NULL, HCI_CMD_TIMEOUT); 4163 } 4164 4165 /* Read LE Accept List Size */ 4166 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) 4167 { 4168 if (!(hdev->commands[26] & 0x40)) 4169 return 0; 4170 4171 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 4172 0, NULL, HCI_CMD_TIMEOUT); 4173 } 4174 4175 /* Clear LE Accept List */ 4176 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) 4177 { 4178 if (!(hdev->commands[26] & 0x80)) 4179 return 0; 4180 4181 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, 4182 HCI_CMD_TIMEOUT); 4183 } 4184 4185 /* Read LE Resolving List Size */ 4186 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) 4187 { 4188 if (!(hdev->commands[34] & 0x40)) 4189 return 0; 4190 4191 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 4192 0, NULL, HCI_CMD_TIMEOUT); 4193 } 4194 4195 /* Clear LE Resolving List */ 4196 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) 4197 { 4198 if (!(hdev->commands[34] & 0x20)) 4199 return 0; 4200 4201 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, 4202 HCI_CMD_TIMEOUT); 4203 } 4204 4205 /* Set RPA timeout */ 4206 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) 4207 { 4208 __le16 timeout = cpu_to_le16(hdev->rpa_timeout); 4209 4210 if (!(hdev->commands[35] & 0x04) || 4211 test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) 4212 return 0; 4213 4214 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, 4215 sizeof(timeout), &timeout, 4216 HCI_CMD_TIMEOUT); 4217 } 4218 4219 /* Read LE Maximum Data Length */ 4220 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) 4221 { 4222 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4223 return 0; 4224 4225 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, 4226 HCI_CMD_TIMEOUT); 4227 } 4228 4229 /* Read LE Suggested Default Data Length */ 4230 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) 4231 { 4232 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4233 return 0; 4234 4235 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, 4236 HCI_CMD_TIMEOUT); 4237 } 4238 4239 /* Read LE Number of Supported Advertising Sets */ 4240 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) 4241 { 4242 if (!ext_adv_capable(hdev)) 4243 return 0; 4244 4245 return __hci_cmd_sync_status(hdev, 4246 HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4247 0, NULL, HCI_CMD_TIMEOUT); 4248 } 4249 4250 /* Write LE Host Supported */ 4251 static int hci_set_le_support_sync(struct hci_dev *hdev) 4252 { 4253 struct hci_cp_write_le_host_supported cp; 4254 4255 /* LE-only devices do not support explicit enablement */ 4256 if (!lmp_bredr_capable(hdev)) 4257 return 0; 4258 4259 memset(&cp, 0, sizeof(cp)); 4260 4261 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 4262 cp.le = 0x01; 4263 cp.simul = 0x00; 4264 } 4265 4266 if (cp.le == lmp_host_le_capable(hdev)) 4267 return 0; 4268 4269 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, 4270 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4271 } 4272 4273 /* LE Set Host Feature */ 4274 static int hci_le_set_host_feature_sync(struct hci_dev *hdev) 4275 { 4276 struct hci_cp_le_set_host_feature cp; 4277 4278 if (!iso_capable(hdev)) 4279 return 0; 4280 4281 memset(&cp, 0, sizeof(cp)); 4282 4283 /* Isochronous Channels (Host Support) */ 4284 cp.bit_number = 32; 4285 cp.bit_value = 1; 4286 4287 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, 4288 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4289 } 4290 4291 /* LE Controller init stage 3 command sequence */ 4292 static const struct hci_init_stage le_init3[] = { 4293 /* HCI_OP_LE_SET_EVENT_MASK */ 4294 HCI_INIT(hci_le_set_event_mask_sync), 4295 /* HCI_OP_LE_READ_ADV_TX_POWER */ 4296 HCI_INIT(hci_le_read_adv_tx_power_sync), 4297 /* HCI_OP_LE_READ_TRANSMIT_POWER */ 4298 HCI_INIT(hci_le_read_tx_power_sync), 4299 /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ 4300 HCI_INIT(hci_le_read_accept_list_size_sync), 4301 /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ 4302 HCI_INIT(hci_le_clear_accept_list_sync), 4303 /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ 4304 HCI_INIT(hci_le_read_resolv_list_size_sync), 4305 /* HCI_OP_LE_CLEAR_RESOLV_LIST */ 4306 HCI_INIT(hci_le_clear_resolv_list_sync), 4307 /* HCI_OP_LE_SET_RPA_TIMEOUT */ 4308 HCI_INIT(hci_le_set_rpa_timeout_sync), 4309 /* HCI_OP_LE_READ_MAX_DATA_LEN */ 4310 HCI_INIT(hci_le_read_max_data_len_sync), 4311 /* HCI_OP_LE_READ_DEF_DATA_LEN */ 4312 HCI_INIT(hci_le_read_def_data_len_sync), 4313 /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ 4314 HCI_INIT(hci_le_read_num_support_adv_sets_sync), 4315 /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ 4316 HCI_INIT(hci_set_le_support_sync), 4317 /* HCI_OP_LE_SET_HOST_FEATURE */ 4318 HCI_INIT(hci_le_set_host_feature_sync), 4319 {} 4320 }; 4321 4322 static int hci_init3_sync(struct hci_dev *hdev) 4323 { 4324 int err; 4325 4326 bt_dev_dbg(hdev, ""); 4327 4328 err = hci_init_stage_sync(hdev, hci_init3); 4329 if (err) 4330 return err; 4331 4332 if (lmp_le_capable(hdev)) 4333 return hci_init_stage_sync(hdev, le_init3); 4334 4335 return 0; 4336 } 4337 4338 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) 4339 { 4340 struct hci_cp_delete_stored_link_key cp; 4341 4342 /* Some Broadcom based Bluetooth controllers do not support the 4343 * Delete Stored Link Key command. They are clearly indicating its 4344 * absence in the bit mask of supported commands. 4345 * 4346 * Check the supported commands and only if the command is marked 4347 * as supported send it. If not supported assume that the controller 4348 * does not have actual support for stored link keys which makes this 4349 * command redundant anyway. 4350 * 4351 * Some controllers indicate that they support handling deleting 4352 * stored link keys, but they don't. The quirk lets a driver 4353 * just disable this command. 4354 */ 4355 if (!(hdev->commands[6] & 0x80) || 4356 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) 4357 return 0; 4358 4359 memset(&cp, 0, sizeof(cp)); 4360 bacpy(&cp.bdaddr, BDADDR_ANY); 4361 cp.delete_all = 0x01; 4362 4363 return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, 4364 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4365 } 4366 4367 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) 4368 { 4369 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 4370 bool changed = false; 4371 4372 /* Set event mask page 2 if the HCI command for it is supported */ 4373 if (!(hdev->commands[22] & 0x04)) 4374 return 0; 4375 4376 /* If Connectionless Peripheral Broadcast central role is supported 4377 * enable all necessary events for it. 4378 */ 4379 if (lmp_cpb_central_capable(hdev)) { 4380 events[1] |= 0x40; /* Triggered Clock Capture */ 4381 events[1] |= 0x80; /* Synchronization Train Complete */ 4382 events[2] |= 0x08; /* Truncated Page Complete */ 4383 events[2] |= 0x20; /* CPB Channel Map Change */ 4384 changed = true; 4385 } 4386 4387 /* If Connectionless Peripheral Broadcast peripheral role is supported 4388 * enable all necessary events for it. 4389 */ 4390 if (lmp_cpb_peripheral_capable(hdev)) { 4391 events[2] |= 0x01; /* Synchronization Train Received */ 4392 events[2] |= 0x02; /* CPB Receive */ 4393 events[2] |= 0x04; /* CPB Timeout */ 4394 events[2] |= 0x10; /* Peripheral Page Response Timeout */ 4395 changed = true; 4396 } 4397 4398 /* Enable Authenticated Payload Timeout Expired event if supported */ 4399 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { 4400 events[2] |= 0x80; 4401 changed = true; 4402 } 4403 4404 /* Some Broadcom based controllers indicate support for Set Event 4405 * Mask Page 2 command, but then actually do not support it. Since 4406 * the default value is all bits set to zero, the command is only 4407 * required if the event mask has to be changed. In case no change 4408 * to the event mask is needed, skip this command. 4409 */ 4410 if (!changed) 4411 return 0; 4412 4413 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, 4414 sizeof(events), events, HCI_CMD_TIMEOUT); 4415 } 4416 4417 /* Read local codec list if the HCI command is supported */ 4418 static int hci_read_local_codecs_sync(struct hci_dev *hdev) 4419 { 4420 if (hdev->commands[45] & 0x04) 4421 hci_read_supported_codecs_v2(hdev); 4422 else if (hdev->commands[29] & 0x20) 4423 hci_read_supported_codecs(hdev); 4424 4425 return 0; 4426 } 4427 4428 /* Read local pairing options if the HCI command is supported */ 4429 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) 4430 { 4431 if (!(hdev->commands[41] & 0x08)) 4432 return 0; 4433 4434 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, 4435 0, NULL, HCI_CMD_TIMEOUT); 4436 } 4437 4438 /* Get MWS transport configuration if the HCI command is supported */ 4439 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) 4440 { 4441 if (!mws_transport_config_capable(hdev)) 4442 return 0; 4443 4444 return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 4445 0, NULL, HCI_CMD_TIMEOUT); 4446 } 4447 4448 /* Check for Synchronization Train support */ 4449 static int hci_read_sync_train_params_sync(struct hci_dev *hdev) 4450 { 4451 if (!lmp_sync_train_capable(hdev)) 4452 return 0; 4453 4454 return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, 4455 0, NULL, HCI_CMD_TIMEOUT); 4456 } 4457 4458 /* Enable Secure Connections if supported and configured */ 4459 static int hci_write_sc_support_1_sync(struct hci_dev *hdev) 4460 { 4461 u8 support = 0x01; 4462 4463 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || 4464 !bredr_sc_enabled(hdev)) 4465 return 0; 4466 4467 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, 4468 sizeof(support), &support, 4469 HCI_CMD_TIMEOUT); 4470 } 4471 4472 /* Set erroneous data reporting if supported to the wideband speech 4473 * setting value 4474 */ 4475 static int hci_set_err_data_report_sync(struct hci_dev *hdev) 4476 { 4477 struct hci_cp_write_def_err_data_reporting cp; 4478 bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); 4479 4480 if (!(hdev->commands[18] & 0x08) || 4481 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || 4482 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) 4483 return 0; 4484 4485 if (enabled == hdev->err_data_reporting) 4486 return 0; 4487 4488 memset(&cp, 0, sizeof(cp)); 4489 cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : 4490 ERR_DATA_REPORTING_DISABLED; 4491 4492 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4493 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4494 } 4495 4496 static const struct hci_init_stage hci_init4[] = { 4497 /* HCI_OP_DELETE_STORED_LINK_KEY */ 4498 HCI_INIT(hci_delete_stored_link_key_sync), 4499 /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ 4500 HCI_INIT(hci_set_event_mask_page_2_sync), 4501 /* HCI_OP_READ_LOCAL_CODECS */ 4502 HCI_INIT(hci_read_local_codecs_sync), 4503 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ 4504 HCI_INIT(hci_read_local_pairing_opts_sync), 4505 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ 4506 HCI_INIT(hci_get_mws_transport_config_sync), 4507 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ 4508 HCI_INIT(hci_read_sync_train_params_sync), 4509 /* HCI_OP_WRITE_SC_SUPPORT */ 4510 HCI_INIT(hci_write_sc_support_1_sync), 4511 /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ 4512 HCI_INIT(hci_set_err_data_report_sync), 4513 {} 4514 }; 4515 4516 /* Set Suggested Default Data Length to maximum if supported */ 4517 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) 4518 { 4519 struct hci_cp_le_write_def_data_len cp; 4520 4521 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4522 return 0; 4523 4524 memset(&cp, 0, sizeof(cp)); 4525 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); 4526 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); 4527 4528 return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, 4529 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4530 } 4531 4532 /* Set Default PHY parameters if command is supported, enables all supported 4533 * PHYs according to the LE Features bits. 4534 */ 4535 static int hci_le_set_default_phy_sync(struct hci_dev *hdev) 4536 { 4537 struct hci_cp_le_set_default_phy cp; 4538 4539 if (!(hdev->commands[35] & 0x20)) { 4540 /* If the command is not supported it means only 1M PHY is 4541 * supported. 4542 */ 4543 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 4544 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 4545 return 0; 4546 } 4547 4548 memset(&cp, 0, sizeof(cp)); 4549 cp.all_phys = 0x00; 4550 cp.tx_phys = HCI_LE_SET_PHY_1M; 4551 cp.rx_phys = HCI_LE_SET_PHY_1M; 4552 4553 /* Enables 2M PHY if supported */ 4554 if (le_2m_capable(hdev)) { 4555 cp.tx_phys |= HCI_LE_SET_PHY_2M; 4556 cp.rx_phys |= HCI_LE_SET_PHY_2M; 4557 } 4558 4559 /* Enables Coded PHY if supported */ 4560 if (le_coded_capable(hdev)) { 4561 cp.tx_phys |= HCI_LE_SET_PHY_CODED; 4562 cp.rx_phys |= HCI_LE_SET_PHY_CODED; 4563 } 4564 4565 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, 4566 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4567 } 4568 4569 static const struct hci_init_stage le_init4[] = { 4570 /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ 4571 HCI_INIT(hci_le_set_write_def_data_len_sync), 4572 /* HCI_OP_LE_SET_DEFAULT_PHY */ 4573 HCI_INIT(hci_le_set_default_phy_sync), 4574 {} 4575 }; 4576 4577 static int hci_init4_sync(struct hci_dev *hdev) 4578 { 4579 int err; 4580 4581 bt_dev_dbg(hdev, ""); 4582 4583 err = hci_init_stage_sync(hdev, hci_init4); 4584 if (err) 4585 return err; 4586 4587 if (lmp_le_capable(hdev)) 4588 return hci_init_stage_sync(hdev, le_init4); 4589 4590 return 0; 4591 } 4592 4593 static int hci_init_sync(struct hci_dev *hdev) 4594 { 4595 int err; 4596 4597 err = hci_init1_sync(hdev); 4598 if (err < 0) 4599 return err; 4600 4601 if (hci_dev_test_flag(hdev, HCI_SETUP)) 4602 hci_debugfs_create_basic(hdev); 4603 4604 err = hci_init2_sync(hdev); 4605 if (err < 0) 4606 return err; 4607 4608 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode 4609 * BR/EDR/LE type controllers. AMP controllers only need the 4610 * first two stages of init. 4611 */ 4612 if (hdev->dev_type != HCI_PRIMARY) 4613 return 0; 4614 4615 err = hci_init3_sync(hdev); 4616 if (err < 0) 4617 return err; 4618 4619 err = hci_init4_sync(hdev); 4620 if (err < 0) 4621 return err; 4622 4623 /* This function is only called when the controller is actually in 4624 * configured state. When the controller is marked as unconfigured, 4625 * this initialization procedure is not run. 4626 * 4627 * It means that it is possible that a controller runs through its 4628 * setup phase and then discovers missing settings. If that is the 4629 * case, then this function will not be called. It then will only 4630 * be called during the config phase. 4631 * 4632 * So only when in setup phase or config phase, create the debugfs 4633 * entries and register the SMP channels. 4634 */ 4635 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4636 !hci_dev_test_flag(hdev, HCI_CONFIG)) 4637 return 0; 4638 4639 if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) 4640 return 0; 4641 4642 hci_debugfs_create_common(hdev); 4643 4644 if (lmp_bredr_capable(hdev)) 4645 hci_debugfs_create_bredr(hdev); 4646 4647 if (lmp_le_capable(hdev)) 4648 hci_debugfs_create_le(hdev); 4649 4650 return 0; 4651 } 4652 4653 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } 4654 4655 static const struct { 4656 unsigned long quirk; 4657 const char *desc; 4658 } hci_broken_table[] = { 4659 HCI_QUIRK_BROKEN(LOCAL_COMMANDS, 4660 "HCI Read Local Supported Commands not supported"), 4661 HCI_QUIRK_BROKEN(STORED_LINK_KEY, 4662 "HCI Delete Stored Link Key command is advertised, " 4663 "but not supported."), 4664 HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, 4665 "HCI Read Default Erroneous Data Reporting command is " 4666 "advertised, but not supported."), 4667 HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, 4668 "HCI Read Transmit Power Level command is advertised, " 4669 "but not supported."), 4670 HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, 4671 "HCI Set Event Filter command not supported."), 4672 HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, 4673 "HCI Enhanced Setup Synchronous Connection command is " 4674 "advertised, but not supported."), 4675 HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, 4676 "HCI LE Set Random Private Address Timeout command is " 4677 "advertised, but not supported."), 4678 HCI_QUIRK_BROKEN(LE_CODED, 4679 "HCI LE Coded PHY feature bit is set, " 4680 "but its usage is not supported.") 4681 }; 4682 4683 /* This function handles hdev setup stage: 4684 * 4685 * Calls hdev->setup 4686 * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. 4687 */ 4688 static int hci_dev_setup_sync(struct hci_dev *hdev) 4689 { 4690 int ret = 0; 4691 bool invalid_bdaddr; 4692 size_t i; 4693 4694 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4695 !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) 4696 return 0; 4697 4698 bt_dev_dbg(hdev, ""); 4699 4700 hci_sock_dev_event(hdev, HCI_DEV_SETUP); 4701 4702 if (hdev->setup) 4703 ret = hdev->setup(hdev); 4704 4705 for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { 4706 if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) 4707 bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); 4708 } 4709 4710 /* The transport driver can set the quirk to mark the 4711 * BD_ADDR invalid before creating the HCI device or in 4712 * its setup callback. 4713 */ 4714 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || 4715 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); 4716 if (!ret) { 4717 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && 4718 !bacmp(&hdev->public_addr, BDADDR_ANY)) 4719 hci_dev_get_bd_addr_from_property(hdev); 4720 4721 if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && 4722 hdev->set_bdaddr) { 4723 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 4724 if (!ret) 4725 invalid_bdaddr = false; 4726 } 4727 } 4728 4729 /* The transport driver can set these quirks before 4730 * creating the HCI device or in its setup callback. 4731 * 4732 * For the invalid BD_ADDR quirk it is possible that 4733 * it becomes a valid address if the bootloader does 4734 * provide it (see above). 4735 * 4736 * In case any of them is set, the controller has to 4737 * start up as unconfigured. 4738 */ 4739 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 4740 invalid_bdaddr) 4741 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 4742 4743 /* For an unconfigured controller it is required to 4744 * read at least the version information provided by 4745 * the Read Local Version Information command. 4746 * 4747 * If the set_bdaddr driver callback is provided, then 4748 * also the original Bluetooth public device address 4749 * will be read using the Read BD Address command. 4750 */ 4751 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 4752 return hci_unconf_init_sync(hdev); 4753 4754 return ret; 4755 } 4756 4757 /* This function handles hdev init stage: 4758 * 4759 * Calls hci_dev_setup_sync to perform setup stage 4760 * Calls hci_init_sync to perform HCI command init sequence 4761 */ 4762 static int hci_dev_init_sync(struct hci_dev *hdev) 4763 { 4764 int ret; 4765 4766 bt_dev_dbg(hdev, ""); 4767 4768 atomic_set(&hdev->cmd_cnt, 1); 4769 set_bit(HCI_INIT, &hdev->flags); 4770 4771 ret = hci_dev_setup_sync(hdev); 4772 4773 if (hci_dev_test_flag(hdev, HCI_CONFIG)) { 4774 /* If public address change is configured, ensure that 4775 * the address gets programmed. If the driver does not 4776 * support changing the public address, fail the power 4777 * on procedure. 4778 */ 4779 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 4780 hdev->set_bdaddr) 4781 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 4782 else 4783 ret = -EADDRNOTAVAIL; 4784 } 4785 4786 if (!ret) { 4787 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 4788 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4789 ret = hci_init_sync(hdev); 4790 if (!ret && hdev->post_init) 4791 ret = hdev->post_init(hdev); 4792 } 4793 } 4794 4795 /* If the HCI Reset command is clearing all diagnostic settings, 4796 * then they need to be reprogrammed after the init procedure 4797 * completed. 4798 */ 4799 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 4800 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4801 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) 4802 ret = hdev->set_diag(hdev, true); 4803 4804 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4805 msft_do_open(hdev); 4806 aosp_do_open(hdev); 4807 } 4808 4809 clear_bit(HCI_INIT, &hdev->flags); 4810 4811 return ret; 4812 } 4813 4814 int hci_dev_open_sync(struct hci_dev *hdev) 4815 { 4816 int ret; 4817 4818 bt_dev_dbg(hdev, ""); 4819 4820 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 4821 ret = -ENODEV; 4822 goto done; 4823 } 4824 4825 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4826 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 4827 /* Check for rfkill but allow the HCI setup stage to 4828 * proceed (which in itself doesn't cause any RF activity). 4829 */ 4830 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { 4831 ret = -ERFKILL; 4832 goto done; 4833 } 4834 4835 /* Check for valid public address or a configured static 4836 * random address, but let the HCI setup proceed to 4837 * be able to determine if there is a public address 4838 * or not. 4839 * 4840 * In case of user channel usage, it is not important 4841 * if a public address or static random address is 4842 * available. 4843 * 4844 * This check is only valid for BR/EDR controllers 4845 * since AMP controllers do not have an address. 4846 */ 4847 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4848 hdev->dev_type == HCI_PRIMARY && 4849 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 4850 !bacmp(&hdev->static_addr, BDADDR_ANY)) { 4851 ret = -EADDRNOTAVAIL; 4852 goto done; 4853 } 4854 } 4855 4856 if (test_bit(HCI_UP, &hdev->flags)) { 4857 ret = -EALREADY; 4858 goto done; 4859 } 4860 4861 if (hdev->open(hdev)) { 4862 ret = -EIO; 4863 goto done; 4864 } 4865 4866 hci_devcd_reset(hdev); 4867 4868 set_bit(HCI_RUNNING, &hdev->flags); 4869 hci_sock_dev_event(hdev, HCI_DEV_OPEN); 4870 4871 ret = hci_dev_init_sync(hdev); 4872 if (!ret) { 4873 hci_dev_hold(hdev); 4874 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 4875 hci_adv_instances_set_rpa_expired(hdev, true); 4876 set_bit(HCI_UP, &hdev->flags); 4877 hci_sock_dev_event(hdev, HCI_DEV_UP); 4878 hci_leds_update_powered(hdev, true); 4879 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4880 !hci_dev_test_flag(hdev, HCI_CONFIG) && 4881 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 4882 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4883 hci_dev_test_flag(hdev, HCI_MGMT) && 4884 hdev->dev_type == HCI_PRIMARY) { 4885 ret = hci_powered_update_sync(hdev); 4886 mgmt_power_on(hdev, ret); 4887 } 4888 } else { 4889 /* Init failed, cleanup */ 4890 flush_work(&hdev->tx_work); 4891 4892 /* Since hci_rx_work() is possible to awake new cmd_work 4893 * it should be flushed first to avoid unexpected call of 4894 * hci_cmd_work() 4895 */ 4896 flush_work(&hdev->rx_work); 4897 flush_work(&hdev->cmd_work); 4898 4899 skb_queue_purge(&hdev->cmd_q); 4900 skb_queue_purge(&hdev->rx_q); 4901 4902 if (hdev->flush) 4903 hdev->flush(hdev); 4904 4905 if (hdev->sent_cmd) { 4906 cancel_delayed_work_sync(&hdev->cmd_timer); 4907 kfree_skb(hdev->sent_cmd); 4908 hdev->sent_cmd = NULL; 4909 } 4910 4911 if (hdev->req_skb) { 4912 kfree_skb(hdev->req_skb); 4913 hdev->req_skb = NULL; 4914 } 4915 4916 clear_bit(HCI_RUNNING, &hdev->flags); 4917 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 4918 4919 hdev->close(hdev); 4920 hdev->flags &= BIT(HCI_RAW); 4921 } 4922 4923 done: 4924 return ret; 4925 } 4926 4927 /* This function requires the caller holds hdev->lock */ 4928 static void hci_pend_le_actions_clear(struct hci_dev *hdev) 4929 { 4930 struct hci_conn_params *p; 4931 4932 list_for_each_entry(p, &hdev->le_conn_params, list) { 4933 hci_pend_le_list_del_init(p); 4934 if (p->conn) { 4935 hci_conn_drop(p->conn); 4936 hci_conn_put(p->conn); 4937 p->conn = NULL; 4938 } 4939 } 4940 4941 BT_DBG("All LE pending actions cleared"); 4942 } 4943 4944 static int hci_dev_shutdown(struct hci_dev *hdev) 4945 { 4946 int err = 0; 4947 /* Similar to how we first do setup and then set the exclusive access 4948 * bit for userspace, we must first unset userchannel and then clean up. 4949 * Otherwise, the kernel can't properly use the hci channel to clean up 4950 * the controller (some shutdown routines require sending additional 4951 * commands to the controller for example). 4952 */ 4953 bool was_userchannel = 4954 hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); 4955 4956 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && 4957 test_bit(HCI_UP, &hdev->flags)) { 4958 /* Execute vendor specific shutdown routine */ 4959 if (hdev->shutdown) 4960 err = hdev->shutdown(hdev); 4961 } 4962 4963 if (was_userchannel) 4964 hci_dev_set_flag(hdev, HCI_USER_CHANNEL); 4965 4966 return err; 4967 } 4968 4969 int hci_dev_close_sync(struct hci_dev *hdev) 4970 { 4971 bool auto_off; 4972 int err = 0; 4973 4974 bt_dev_dbg(hdev, ""); 4975 4976 cancel_delayed_work(&hdev->power_off); 4977 cancel_delayed_work(&hdev->ncmd_timer); 4978 cancel_delayed_work(&hdev->le_scan_disable); 4979 cancel_delayed_work(&hdev->le_scan_restart); 4980 4981 hci_request_cancel_all(hdev); 4982 4983 if (hdev->adv_instance_timeout) { 4984 cancel_delayed_work_sync(&hdev->adv_instance_expire); 4985 hdev->adv_instance_timeout = 0; 4986 } 4987 4988 err = hci_dev_shutdown(hdev); 4989 4990 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 4991 cancel_delayed_work_sync(&hdev->cmd_timer); 4992 return err; 4993 } 4994 4995 hci_leds_update_powered(hdev, false); 4996 4997 /* Flush RX and TX works */ 4998 flush_work(&hdev->tx_work); 4999 flush_work(&hdev->rx_work); 5000 5001 if (hdev->discov_timeout > 0) { 5002 hdev->discov_timeout = 0; 5003 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 5004 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 5005 } 5006 5007 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 5008 cancel_delayed_work(&hdev->service_cache); 5009 5010 if (hci_dev_test_flag(hdev, HCI_MGMT)) { 5011 struct adv_info *adv_instance; 5012 5013 cancel_delayed_work_sync(&hdev->rpa_expired); 5014 5015 list_for_each_entry(adv_instance, &hdev->adv_instances, list) 5016 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 5017 } 5018 5019 /* Avoid potential lockdep warnings from the *_flush() calls by 5020 * ensuring the workqueue is empty up front. 5021 */ 5022 drain_workqueue(hdev->workqueue); 5023 5024 hci_dev_lock(hdev); 5025 5026 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 5027 5028 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); 5029 5030 if (!auto_off && hdev->dev_type == HCI_PRIMARY && 5031 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 5032 hci_dev_test_flag(hdev, HCI_MGMT)) 5033 __mgmt_power_off(hdev); 5034 5035 hci_inquiry_cache_flush(hdev); 5036 hci_pend_le_actions_clear(hdev); 5037 hci_conn_hash_flush(hdev); 5038 /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ 5039 smp_unregister(hdev); 5040 hci_dev_unlock(hdev); 5041 5042 hci_sock_dev_event(hdev, HCI_DEV_DOWN); 5043 5044 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 5045 aosp_do_close(hdev); 5046 msft_do_close(hdev); 5047 } 5048 5049 if (hdev->flush) 5050 hdev->flush(hdev); 5051 5052 /* Reset device */ 5053 skb_queue_purge(&hdev->cmd_q); 5054 atomic_set(&hdev->cmd_cnt, 1); 5055 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && 5056 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 5057 set_bit(HCI_INIT, &hdev->flags); 5058 hci_reset_sync(hdev); 5059 clear_bit(HCI_INIT, &hdev->flags); 5060 } 5061 5062 /* flush cmd work */ 5063 flush_work(&hdev->cmd_work); 5064 5065 /* Drop queues */ 5066 skb_queue_purge(&hdev->rx_q); 5067 skb_queue_purge(&hdev->cmd_q); 5068 skb_queue_purge(&hdev->raw_q); 5069 5070 /* Drop last sent command */ 5071 if (hdev->sent_cmd) { 5072 cancel_delayed_work_sync(&hdev->cmd_timer); 5073 kfree_skb(hdev->sent_cmd); 5074 hdev->sent_cmd = NULL; 5075 } 5076 5077 /* Drop last request */ 5078 if (hdev->req_skb) { 5079 kfree_skb(hdev->req_skb); 5080 hdev->req_skb = NULL; 5081 } 5082 5083 clear_bit(HCI_RUNNING, &hdev->flags); 5084 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 5085 5086 /* After this point our queues are empty and no tasks are scheduled. */ 5087 hdev->close(hdev); 5088 5089 /* Clear flags */ 5090 hdev->flags &= BIT(HCI_RAW); 5091 hci_dev_clear_volatile_flags(hdev); 5092 5093 /* Controller radio is available but is currently powered down */ 5094 hdev->amp_status = AMP_STATUS_POWERED_DOWN; 5095 5096 memset(hdev->eir, 0, sizeof(hdev->eir)); 5097 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 5098 bacpy(&hdev->random_addr, BDADDR_ANY); 5099 hci_codec_list_clear(&hdev->local_codecs); 5100 5101 hci_dev_put(hdev); 5102 return err; 5103 } 5104 5105 /* This function perform power on HCI command sequence as follows: 5106 * 5107 * If controller is already up (HCI_UP) performs hci_powered_update_sync 5108 * sequence otherwise run hci_dev_open_sync which will follow with 5109 * hci_powered_update_sync after the init sequence is completed. 5110 */ 5111 static int hci_power_on_sync(struct hci_dev *hdev) 5112 { 5113 int err; 5114 5115 if (test_bit(HCI_UP, &hdev->flags) && 5116 hci_dev_test_flag(hdev, HCI_MGMT) && 5117 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 5118 cancel_delayed_work(&hdev->power_off); 5119 return hci_powered_update_sync(hdev); 5120 } 5121 5122 err = hci_dev_open_sync(hdev); 5123 if (err < 0) 5124 return err; 5125 5126 /* During the HCI setup phase, a few error conditions are 5127 * ignored and they need to be checked now. If they are still 5128 * valid, it is important to return the device back off. 5129 */ 5130 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 5131 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 5132 (hdev->dev_type == HCI_PRIMARY && 5133 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 5134 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 5135 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 5136 hci_dev_close_sync(hdev); 5137 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 5138 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 5139 HCI_AUTO_OFF_TIMEOUT); 5140 } 5141 5142 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 5143 /* For unconfigured devices, set the HCI_RAW flag 5144 * so that userspace can easily identify them. 5145 */ 5146 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 5147 set_bit(HCI_RAW, &hdev->flags); 5148 5149 /* For fully configured devices, this will send 5150 * the Index Added event. For unconfigured devices, 5151 * it will send Unconfigued Index Added event. 5152 * 5153 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 5154 * and no event will be send. 5155 */ 5156 mgmt_index_added(hdev); 5157 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 5158 /* When the controller is now configured, then it 5159 * is important to clear the HCI_RAW flag. 5160 */ 5161 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 5162 clear_bit(HCI_RAW, &hdev->flags); 5163 5164 /* Powering on the controller with HCI_CONFIG set only 5165 * happens with the transition from unconfigured to 5166 * configured. This will send the Index Added event. 5167 */ 5168 mgmt_index_added(hdev); 5169 } 5170 5171 return 0; 5172 } 5173 5174 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) 5175 { 5176 struct hci_cp_remote_name_req_cancel cp; 5177 5178 memset(&cp, 0, sizeof(cp)); 5179 bacpy(&cp.bdaddr, addr); 5180 5181 return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, 5182 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5183 } 5184 5185 int hci_stop_discovery_sync(struct hci_dev *hdev) 5186 { 5187 struct discovery_state *d = &hdev->discovery; 5188 struct inquiry_entry *e; 5189 int err; 5190 5191 bt_dev_dbg(hdev, "state %u", hdev->discovery.state); 5192 5193 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { 5194 if (test_bit(HCI_INQUIRY, &hdev->flags)) { 5195 err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 5196 0, NULL, HCI_CMD_TIMEOUT); 5197 if (err) 5198 return err; 5199 } 5200 5201 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 5202 cancel_delayed_work(&hdev->le_scan_disable); 5203 cancel_delayed_work(&hdev->le_scan_restart); 5204 5205 err = hci_scan_disable_sync(hdev); 5206 if (err) 5207 return err; 5208 } 5209 5210 } else { 5211 err = hci_scan_disable_sync(hdev); 5212 if (err) 5213 return err; 5214 } 5215 5216 /* Resume advertising if it was paused */ 5217 if (use_ll_privacy(hdev)) 5218 hci_resume_advertising_sync(hdev); 5219 5220 /* No further actions needed for LE-only discovery */ 5221 if (d->type == DISCOV_TYPE_LE) 5222 return 0; 5223 5224 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { 5225 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 5226 NAME_PENDING); 5227 if (!e) 5228 return 0; 5229 5230 return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); 5231 } 5232 5233 return 0; 5234 } 5235 5236 static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, 5237 u8 reason) 5238 { 5239 struct hci_cp_disconn_phy_link cp; 5240 5241 memset(&cp, 0, sizeof(cp)); 5242 cp.phy_handle = HCI_PHY_HANDLE(handle); 5243 cp.reason = reason; 5244 5245 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, 5246 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5247 } 5248 5249 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, 5250 u8 reason) 5251 { 5252 struct hci_cp_disconnect cp; 5253 5254 if (conn->type == AMP_LINK) 5255 return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); 5256 5257 memset(&cp, 0, sizeof(cp)); 5258 cp.handle = cpu_to_le16(conn->handle); 5259 cp.reason = reason; 5260 5261 /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the 5262 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is 5263 * used when suspending or powering off, where we don't want to wait 5264 * for the peer's response. 5265 */ 5266 if (reason != HCI_ERROR_REMOTE_POWER_OFF) 5267 return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, 5268 sizeof(cp), &cp, 5269 HCI_EV_DISCONN_COMPLETE, 5270 HCI_CMD_TIMEOUT, NULL); 5271 5272 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, 5273 HCI_CMD_TIMEOUT); 5274 } 5275 5276 static int hci_le_connect_cancel_sync(struct hci_dev *hdev, 5277 struct hci_conn *conn, u8 reason) 5278 { 5279 /* Return reason if scanning since the connection shall probably be 5280 * cleanup directly. 5281 */ 5282 if (test_bit(HCI_CONN_SCANNING, &conn->flags)) 5283 return reason; 5284 5285 if (conn->role == HCI_ROLE_SLAVE || 5286 test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) 5287 return 0; 5288 5289 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 5290 0, NULL, HCI_CMD_TIMEOUT); 5291 } 5292 5293 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, 5294 u8 reason) 5295 { 5296 if (conn->type == LE_LINK) 5297 return hci_le_connect_cancel_sync(hdev, conn, reason); 5298 5299 if (conn->type == ISO_LINK) { 5300 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 5301 * page 1857: 5302 * 5303 * If this command is issued for a CIS on the Central and the 5304 * CIS is successfully terminated before being established, 5305 * then an HCI_LE_CIS_Established event shall also be sent for 5306 * this CIS with the Status Operation Cancelled by Host (0x44). 5307 */ 5308 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) 5309 return hci_disconnect_sync(hdev, conn, reason); 5310 5311 /* CIS with no Create CIS sent have nothing to cancel */ 5312 if (bacmp(&conn->dst, BDADDR_ANY)) 5313 return HCI_ERROR_LOCAL_HOST_TERM; 5314 5315 /* There is no way to cancel a BIS without terminating the BIG 5316 * which is done later on connection cleanup. 5317 */ 5318 return 0; 5319 } 5320 5321 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 5322 return 0; 5323 5324 /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the 5325 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is 5326 * used when suspending or powering off, where we don't want to wait 5327 * for the peer's response. 5328 */ 5329 if (reason != HCI_ERROR_REMOTE_POWER_OFF) 5330 return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, 5331 6, &conn->dst, 5332 HCI_EV_CONN_COMPLETE, 5333 HCI_CMD_TIMEOUT, NULL); 5334 5335 return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 5336 6, &conn->dst, HCI_CMD_TIMEOUT); 5337 } 5338 5339 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, 5340 u8 reason) 5341 { 5342 struct hci_cp_reject_sync_conn_req cp; 5343 5344 memset(&cp, 0, sizeof(cp)); 5345 bacpy(&cp.bdaddr, &conn->dst); 5346 cp.reason = reason; 5347 5348 /* SCO rejection has its own limited set of 5349 * allowed error values (0x0D-0x0F). 5350 */ 5351 if (reason < 0x0d || reason > 0x0f) 5352 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; 5353 5354 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, 5355 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5356 } 5357 5358 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, 5359 u8 reason) 5360 { 5361 struct hci_cp_le_reject_cis cp; 5362 5363 memset(&cp, 0, sizeof(cp)); 5364 cp.handle = cpu_to_le16(conn->handle); 5365 cp.reason = reason; 5366 5367 return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, 5368 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5369 } 5370 5371 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, 5372 u8 reason) 5373 { 5374 struct hci_cp_reject_conn_req cp; 5375 5376 if (conn->type == ISO_LINK) 5377 return hci_le_reject_cis_sync(hdev, conn, reason); 5378 5379 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) 5380 return hci_reject_sco_sync(hdev, conn, reason); 5381 5382 memset(&cp, 0, sizeof(cp)); 5383 bacpy(&cp.bdaddr, &conn->dst); 5384 cp.reason = reason; 5385 5386 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, 5387 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5388 } 5389 5390 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) 5391 { 5392 int err = 0; 5393 u16 handle = conn->handle; 5394 bool disconnect = false; 5395 struct hci_conn *c; 5396 5397 switch (conn->state) { 5398 case BT_CONNECTED: 5399 case BT_CONFIG: 5400 err = hci_disconnect_sync(hdev, conn, reason); 5401 break; 5402 case BT_CONNECT: 5403 err = hci_connect_cancel_sync(hdev, conn, reason); 5404 break; 5405 case BT_CONNECT2: 5406 err = hci_reject_conn_sync(hdev, conn, reason); 5407 break; 5408 case BT_OPEN: 5409 hci_dev_lock(hdev); 5410 5411 /* Cleanup bis or pa sync connections */ 5412 if (test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags) || 5413 test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags)) { 5414 hci_conn_failed(conn, reason); 5415 } else if (test_bit(HCI_CONN_PA_SYNC, &conn->flags) || 5416 test_bit(HCI_CONN_BIG_SYNC, &conn->flags)) { 5417 conn->state = BT_CLOSED; 5418 hci_disconn_cfm(conn, reason); 5419 hci_conn_del(conn); 5420 } 5421 5422 hci_dev_unlock(hdev); 5423 return 0; 5424 case BT_BOUND: 5425 break; 5426 default: 5427 disconnect = true; 5428 break; 5429 } 5430 5431 hci_dev_lock(hdev); 5432 5433 /* Check if the connection has been cleaned up concurrently */ 5434 c = hci_conn_hash_lookup_handle(hdev, handle); 5435 if (!c || c != conn) { 5436 err = 0; 5437 goto unlock; 5438 } 5439 5440 /* Cleanup hci_conn object if it cannot be cancelled as it 5441 * likelly means the controller and host stack are out of sync 5442 * or in case of LE it was still scanning so it can be cleanup 5443 * safely. 5444 */ 5445 if (disconnect) { 5446 conn->state = BT_CLOSED; 5447 hci_disconn_cfm(conn, reason); 5448 hci_conn_del(conn); 5449 } else { 5450 hci_conn_failed(conn, reason); 5451 } 5452 5453 unlock: 5454 hci_dev_unlock(hdev); 5455 return err; 5456 } 5457 5458 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) 5459 { 5460 struct list_head *head = &hdev->conn_hash.list; 5461 struct hci_conn *conn; 5462 5463 rcu_read_lock(); 5464 while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { 5465 /* Make sure the connection is not freed while unlocking */ 5466 conn = hci_conn_get(conn); 5467 rcu_read_unlock(); 5468 /* Disregard possible errors since hci_conn_del shall have been 5469 * called even in case of errors had occurred since it would 5470 * then cause hci_conn_failed to be called which calls 5471 * hci_conn_del internally. 5472 */ 5473 hci_abort_conn_sync(hdev, conn, reason); 5474 hci_conn_put(conn); 5475 rcu_read_lock(); 5476 } 5477 rcu_read_unlock(); 5478 5479 return 0; 5480 } 5481 5482 /* This function perform power off HCI command sequence as follows: 5483 * 5484 * Clear Advertising 5485 * Stop Discovery 5486 * Disconnect all connections 5487 * hci_dev_close_sync 5488 */ 5489 static int hci_power_off_sync(struct hci_dev *hdev) 5490 { 5491 int err; 5492 5493 /* If controller is already down there is nothing to do */ 5494 if (!test_bit(HCI_UP, &hdev->flags)) 5495 return 0; 5496 5497 if (test_bit(HCI_ISCAN, &hdev->flags) || 5498 test_bit(HCI_PSCAN, &hdev->flags)) { 5499 err = hci_write_scan_enable_sync(hdev, 0x00); 5500 if (err) 5501 return err; 5502 } 5503 5504 err = hci_clear_adv_sync(hdev, NULL, false); 5505 if (err) 5506 return err; 5507 5508 err = hci_stop_discovery_sync(hdev); 5509 if (err) 5510 return err; 5511 5512 /* Terminated due to Power Off */ 5513 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); 5514 if (err) 5515 return err; 5516 5517 return hci_dev_close_sync(hdev); 5518 } 5519 5520 int hci_set_powered_sync(struct hci_dev *hdev, u8 val) 5521 { 5522 if (val) 5523 return hci_power_on_sync(hdev); 5524 5525 return hci_power_off_sync(hdev); 5526 } 5527 5528 static int hci_write_iac_sync(struct hci_dev *hdev) 5529 { 5530 struct hci_cp_write_current_iac_lap cp; 5531 5532 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 5533 return 0; 5534 5535 memset(&cp, 0, sizeof(cp)); 5536 5537 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { 5538 /* Limited discoverable mode */ 5539 cp.num_iac = min_t(u8, hdev->num_iac, 2); 5540 cp.iac_lap[0] = 0x00; /* LIAC */ 5541 cp.iac_lap[1] = 0x8b; 5542 cp.iac_lap[2] = 0x9e; 5543 cp.iac_lap[3] = 0x33; /* GIAC */ 5544 cp.iac_lap[4] = 0x8b; 5545 cp.iac_lap[5] = 0x9e; 5546 } else { 5547 /* General discoverable mode */ 5548 cp.num_iac = 1; 5549 cp.iac_lap[0] = 0x33; /* GIAC */ 5550 cp.iac_lap[1] = 0x8b; 5551 cp.iac_lap[2] = 0x9e; 5552 } 5553 5554 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, 5555 (cp.num_iac * 3) + 1, &cp, 5556 HCI_CMD_TIMEOUT); 5557 } 5558 5559 int hci_update_discoverable_sync(struct hci_dev *hdev) 5560 { 5561 int err = 0; 5562 5563 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 5564 err = hci_write_iac_sync(hdev); 5565 if (err) 5566 return err; 5567 5568 err = hci_update_scan_sync(hdev); 5569 if (err) 5570 return err; 5571 5572 err = hci_update_class_sync(hdev); 5573 if (err) 5574 return err; 5575 } 5576 5577 /* Advertising instances don't use the global discoverable setting, so 5578 * only update AD if advertising was enabled using Set Advertising. 5579 */ 5580 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { 5581 err = hci_update_adv_data_sync(hdev, 0x00); 5582 if (err) 5583 return err; 5584 5585 /* Discoverable mode affects the local advertising 5586 * address in limited privacy mode. 5587 */ 5588 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { 5589 if (ext_adv_capable(hdev)) 5590 err = hci_start_ext_adv_sync(hdev, 0x00); 5591 else 5592 err = hci_enable_advertising_sync(hdev); 5593 } 5594 } 5595 5596 return err; 5597 } 5598 5599 static int update_discoverable_sync(struct hci_dev *hdev, void *data) 5600 { 5601 return hci_update_discoverable_sync(hdev); 5602 } 5603 5604 int hci_update_discoverable(struct hci_dev *hdev) 5605 { 5606 /* Only queue if it would have any effect */ 5607 if (hdev_is_powered(hdev) && 5608 hci_dev_test_flag(hdev, HCI_ADVERTISING) && 5609 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && 5610 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) 5611 return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, 5612 NULL); 5613 5614 return 0; 5615 } 5616 5617 int hci_update_connectable_sync(struct hci_dev *hdev) 5618 { 5619 int err; 5620 5621 err = hci_update_scan_sync(hdev); 5622 if (err) 5623 return err; 5624 5625 /* If BR/EDR is not enabled and we disable advertising as a 5626 * by-product of disabling connectable, we need to update the 5627 * advertising flags. 5628 */ 5629 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 5630 err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); 5631 5632 /* Update the advertising parameters if necessary */ 5633 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 5634 !list_empty(&hdev->adv_instances)) { 5635 if (ext_adv_capable(hdev)) 5636 err = hci_start_ext_adv_sync(hdev, 5637 hdev->cur_adv_instance); 5638 else 5639 err = hci_enable_advertising_sync(hdev); 5640 5641 if (err) 5642 return err; 5643 } 5644 5645 return hci_update_passive_scan_sync(hdev); 5646 } 5647 5648 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) 5649 { 5650 const u8 giac[3] = { 0x33, 0x8b, 0x9e }; 5651 const u8 liac[3] = { 0x00, 0x8b, 0x9e }; 5652 struct hci_cp_inquiry cp; 5653 5654 bt_dev_dbg(hdev, ""); 5655 5656 if (test_bit(HCI_INQUIRY, &hdev->flags)) 5657 return 0; 5658 5659 hci_dev_lock(hdev); 5660 hci_inquiry_cache_flush(hdev); 5661 hci_dev_unlock(hdev); 5662 5663 memset(&cp, 0, sizeof(cp)); 5664 5665 if (hdev->discovery.limited) 5666 memcpy(&cp.lap, liac, sizeof(cp.lap)); 5667 else 5668 memcpy(&cp.lap, giac, sizeof(cp.lap)); 5669 5670 cp.length = length; 5671 5672 return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, 5673 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5674 } 5675 5676 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) 5677 { 5678 u8 own_addr_type; 5679 /* Accept list is not used for discovery */ 5680 u8 filter_policy = 0x00; 5681 /* Default is to enable duplicates filter */ 5682 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 5683 int err; 5684 5685 bt_dev_dbg(hdev, ""); 5686 5687 /* If controller is scanning, it means the passive scanning is 5688 * running. Thus, we should temporarily stop it in order to set the 5689 * discovery scanning parameters. 5690 */ 5691 err = hci_scan_disable_sync(hdev); 5692 if (err) { 5693 bt_dev_err(hdev, "Unable to disable scanning: %d", err); 5694 return err; 5695 } 5696 5697 cancel_interleave_scan(hdev); 5698 5699 /* Pause address resolution for active scan and stop advertising if 5700 * privacy is enabled. 5701 */ 5702 err = hci_pause_addr_resolution(hdev); 5703 if (err) 5704 goto failed; 5705 5706 /* All active scans will be done with either a resolvable private 5707 * address (when privacy feature has been enabled) or non-resolvable 5708 * private address. 5709 */ 5710 err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), 5711 &own_addr_type); 5712 if (err < 0) 5713 own_addr_type = ADDR_LE_DEV_PUBLIC; 5714 5715 if (hci_is_adv_monitoring(hdev)) { 5716 /* Duplicate filter should be disabled when some advertisement 5717 * monitor is activated, otherwise AdvMon can only receive one 5718 * advertisement for one peer(*) during active scanning, and 5719 * might report loss to these peers. 5720 * 5721 * Note that different controllers have different meanings of 5722 * |duplicate|. Some of them consider packets with the same 5723 * address as duplicate, and others consider packets with the 5724 * same address and the same RSSI as duplicate. Although in the 5725 * latter case we don't need to disable duplicate filter, but 5726 * it is common to have active scanning for a short period of 5727 * time, the power impact should be neglectable. 5728 */ 5729 filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 5730 } 5731 5732 err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, 5733 hdev->le_scan_window_discovery, 5734 own_addr_type, filter_policy, filter_dup); 5735 if (!err) 5736 return err; 5737 5738 failed: 5739 /* Resume advertising if it was paused */ 5740 if (use_ll_privacy(hdev)) 5741 hci_resume_advertising_sync(hdev); 5742 5743 /* Resume passive scanning */ 5744 hci_update_passive_scan_sync(hdev); 5745 return err; 5746 } 5747 5748 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) 5749 { 5750 int err; 5751 5752 bt_dev_dbg(hdev, ""); 5753 5754 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); 5755 if (err) 5756 return err; 5757 5758 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5759 } 5760 5761 int hci_start_discovery_sync(struct hci_dev *hdev) 5762 { 5763 unsigned long timeout; 5764 int err; 5765 5766 bt_dev_dbg(hdev, "type %u", hdev->discovery.type); 5767 5768 switch (hdev->discovery.type) { 5769 case DISCOV_TYPE_BREDR: 5770 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5771 case DISCOV_TYPE_INTERLEAVED: 5772 /* When running simultaneous discovery, the LE scanning time 5773 * should occupy the whole discovery time sine BR/EDR inquiry 5774 * and LE scanning are scheduled by the controller. 5775 * 5776 * For interleaving discovery in comparison, BR/EDR inquiry 5777 * and LE scanning are done sequentially with separate 5778 * timeouts. 5779 */ 5780 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 5781 &hdev->quirks)) { 5782 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 5783 /* During simultaneous discovery, we double LE scan 5784 * interval. We must leave some time for the controller 5785 * to do BR/EDR inquiry. 5786 */ 5787 err = hci_start_interleaved_discovery_sync(hdev); 5788 break; 5789 } 5790 5791 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); 5792 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); 5793 break; 5794 case DISCOV_TYPE_LE: 5795 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 5796 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); 5797 break; 5798 default: 5799 return -EINVAL; 5800 } 5801 5802 if (err) 5803 return err; 5804 5805 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); 5806 5807 /* When service discovery is used and the controller has a 5808 * strict duplicate filter, it is important to remember the 5809 * start and duration of the scan. This is required for 5810 * restarting scanning during the discovery phase. 5811 */ 5812 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && 5813 hdev->discovery.result_filtering) { 5814 hdev->discovery.scan_start = jiffies; 5815 hdev->discovery.scan_duration = timeout; 5816 } 5817 5818 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, 5819 timeout); 5820 return 0; 5821 } 5822 5823 static void hci_suspend_monitor_sync(struct hci_dev *hdev) 5824 { 5825 switch (hci_get_adv_monitor_offload_ext(hdev)) { 5826 case HCI_ADV_MONITOR_EXT_MSFT: 5827 msft_suspend_sync(hdev); 5828 break; 5829 default: 5830 return; 5831 } 5832 } 5833 5834 /* This function disables discovery and mark it as paused */ 5835 static int hci_pause_discovery_sync(struct hci_dev *hdev) 5836 { 5837 int old_state = hdev->discovery.state; 5838 int err; 5839 5840 /* If discovery already stopped/stopping/paused there nothing to do */ 5841 if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || 5842 hdev->discovery_paused) 5843 return 0; 5844 5845 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 5846 err = hci_stop_discovery_sync(hdev); 5847 if (err) 5848 return err; 5849 5850 hdev->discovery_paused = true; 5851 hdev->discovery_old_state = old_state; 5852 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 5853 5854 return 0; 5855 } 5856 5857 static int hci_update_event_filter_sync(struct hci_dev *hdev) 5858 { 5859 struct bdaddr_list_with_flags *b; 5860 u8 scan = SCAN_DISABLED; 5861 bool scanning = test_bit(HCI_PSCAN, &hdev->flags); 5862 int err; 5863 5864 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 5865 return 0; 5866 5867 /* Some fake CSR controllers lock up after setting this type of 5868 * filter, so avoid sending the request altogether. 5869 */ 5870 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 5871 return 0; 5872 5873 /* Always clear event filter when starting */ 5874 hci_clear_event_filter_sync(hdev); 5875 5876 list_for_each_entry(b, &hdev->accept_list, list) { 5877 if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) 5878 continue; 5879 5880 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); 5881 5882 err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, 5883 HCI_CONN_SETUP_ALLOW_BDADDR, 5884 &b->bdaddr, 5885 HCI_CONN_SETUP_AUTO_ON); 5886 if (err) 5887 bt_dev_dbg(hdev, "Failed to set event filter for %pMR", 5888 &b->bdaddr); 5889 else 5890 scan = SCAN_PAGE; 5891 } 5892 5893 if (scan && !scanning) 5894 hci_write_scan_enable_sync(hdev, scan); 5895 else if (!scan && scanning) 5896 hci_write_scan_enable_sync(hdev, scan); 5897 5898 return 0; 5899 } 5900 5901 /* This function disables scan (BR and LE) and mark it as paused */ 5902 static int hci_pause_scan_sync(struct hci_dev *hdev) 5903 { 5904 if (hdev->scanning_paused) 5905 return 0; 5906 5907 /* Disable page scan if enabled */ 5908 if (test_bit(HCI_PSCAN, &hdev->flags)) 5909 hci_write_scan_enable_sync(hdev, SCAN_DISABLED); 5910 5911 hci_scan_disable_sync(hdev); 5912 5913 hdev->scanning_paused = true; 5914 5915 return 0; 5916 } 5917 5918 /* This function performs the HCI suspend procedures in the follow order: 5919 * 5920 * Pause discovery (active scanning/inquiry) 5921 * Pause Directed Advertising/Advertising 5922 * Pause Scanning (passive scanning in case discovery was not active) 5923 * Disconnect all connections 5924 * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup 5925 * otherwise: 5926 * Update event mask (only set events that are allowed to wake up the host) 5927 * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) 5928 * Update passive scanning (lower duty cycle) 5929 * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE 5930 */ 5931 int hci_suspend_sync(struct hci_dev *hdev) 5932 { 5933 int err; 5934 5935 /* If marked as suspended there nothing to do */ 5936 if (hdev->suspended) 5937 return 0; 5938 5939 /* Mark device as suspended */ 5940 hdev->suspended = true; 5941 5942 /* Pause discovery if not already stopped */ 5943 hci_pause_discovery_sync(hdev); 5944 5945 /* Pause other advertisements */ 5946 hci_pause_advertising_sync(hdev); 5947 5948 /* Suspend monitor filters */ 5949 hci_suspend_monitor_sync(hdev); 5950 5951 /* Prevent disconnects from causing scanning to be re-enabled */ 5952 hci_pause_scan_sync(hdev); 5953 5954 if (hci_conn_count(hdev)) { 5955 /* Soft disconnect everything (power off) */ 5956 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); 5957 if (err) { 5958 /* Set state to BT_RUNNING so resume doesn't notify */ 5959 hdev->suspend_state = BT_RUNNING; 5960 hci_resume_sync(hdev); 5961 return err; 5962 } 5963 5964 /* Update event mask so only the allowed event can wakeup the 5965 * host. 5966 */ 5967 hci_set_event_mask_sync(hdev); 5968 } 5969 5970 /* Only configure accept list if disconnect succeeded and wake 5971 * isn't being prevented. 5972 */ 5973 if (!hdev->wakeup || !hdev->wakeup(hdev)) { 5974 hdev->suspend_state = BT_SUSPEND_DISCONNECT; 5975 return 0; 5976 } 5977 5978 /* Unpause to take care of updating scanning params */ 5979 hdev->scanning_paused = false; 5980 5981 /* Enable event filter for paired devices */ 5982 hci_update_event_filter_sync(hdev); 5983 5984 /* Update LE passive scan if enabled */ 5985 hci_update_passive_scan_sync(hdev); 5986 5987 /* Pause scan changes again. */ 5988 hdev->scanning_paused = true; 5989 5990 hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; 5991 5992 return 0; 5993 } 5994 5995 /* This function resumes discovery */ 5996 static int hci_resume_discovery_sync(struct hci_dev *hdev) 5997 { 5998 int err; 5999 6000 /* If discovery not paused there nothing to do */ 6001 if (!hdev->discovery_paused) 6002 return 0; 6003 6004 hdev->discovery_paused = false; 6005 6006 hci_discovery_set_state(hdev, DISCOVERY_STARTING); 6007 6008 err = hci_start_discovery_sync(hdev); 6009 6010 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : 6011 DISCOVERY_FINDING); 6012 6013 return err; 6014 } 6015 6016 static void hci_resume_monitor_sync(struct hci_dev *hdev) 6017 { 6018 switch (hci_get_adv_monitor_offload_ext(hdev)) { 6019 case HCI_ADV_MONITOR_EXT_MSFT: 6020 msft_resume_sync(hdev); 6021 break; 6022 default: 6023 return; 6024 } 6025 } 6026 6027 /* This function resume scan and reset paused flag */ 6028 static int hci_resume_scan_sync(struct hci_dev *hdev) 6029 { 6030 if (!hdev->scanning_paused) 6031 return 0; 6032 6033 hdev->scanning_paused = false; 6034 6035 hci_update_scan_sync(hdev); 6036 6037 /* Reset passive scanning to normal */ 6038 hci_update_passive_scan_sync(hdev); 6039 6040 return 0; 6041 } 6042 6043 /* This function performs the HCI suspend procedures in the follow order: 6044 * 6045 * Restore event mask 6046 * Clear event filter 6047 * Update passive scanning (normal duty cycle) 6048 * Resume Directed Advertising/Advertising 6049 * Resume discovery (active scanning/inquiry) 6050 */ 6051 int hci_resume_sync(struct hci_dev *hdev) 6052 { 6053 /* If not marked as suspended there nothing to do */ 6054 if (!hdev->suspended) 6055 return 0; 6056 6057 hdev->suspended = false; 6058 6059 /* Restore event mask */ 6060 hci_set_event_mask_sync(hdev); 6061 6062 /* Clear any event filters and restore scan state */ 6063 hci_clear_event_filter_sync(hdev); 6064 6065 /* Resume scanning */ 6066 hci_resume_scan_sync(hdev); 6067 6068 /* Resume monitor filters */ 6069 hci_resume_monitor_sync(hdev); 6070 6071 /* Resume other advertisements */ 6072 hci_resume_advertising_sync(hdev); 6073 6074 /* Resume discovery */ 6075 hci_resume_discovery_sync(hdev); 6076 6077 return 0; 6078 } 6079 6080 static bool conn_use_rpa(struct hci_conn *conn) 6081 { 6082 struct hci_dev *hdev = conn->hdev; 6083 6084 return hci_dev_test_flag(hdev, HCI_PRIVACY); 6085 } 6086 6087 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, 6088 struct hci_conn *conn) 6089 { 6090 struct hci_cp_le_set_ext_adv_params cp; 6091 int err; 6092 bdaddr_t random_addr; 6093 u8 own_addr_type; 6094 6095 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 6096 &own_addr_type); 6097 if (err) 6098 return err; 6099 6100 /* Set require_privacy to false so that the remote device has a 6101 * chance of identifying us. 6102 */ 6103 err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, 6104 &own_addr_type, &random_addr); 6105 if (err) 6106 return err; 6107 6108 memset(&cp, 0, sizeof(cp)); 6109 6110 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); 6111 cp.channel_map = hdev->le_adv_channel_map; 6112 cp.tx_power = HCI_TX_POWER_INVALID; 6113 cp.primary_phy = HCI_ADV_PHY_1M; 6114 cp.secondary_phy = HCI_ADV_PHY_1M; 6115 cp.handle = 0x00; /* Use instance 0 for directed adv */ 6116 cp.own_addr_type = own_addr_type; 6117 cp.peer_addr_type = conn->dst_type; 6118 bacpy(&cp.peer_addr, &conn->dst); 6119 6120 /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for 6121 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND 6122 * does not supports advertising data when the advertising set already 6123 * contains some, the controller shall return erroc code 'Invalid 6124 * HCI Command Parameters(0x12). 6125 * So it is required to remove adv set for handle 0x00. since we use 6126 * instance 0 for directed adv. 6127 */ 6128 err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); 6129 if (err) 6130 return err; 6131 6132 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, 6133 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6134 if (err) 6135 return err; 6136 6137 /* Check if random address need to be updated */ 6138 if (own_addr_type == ADDR_LE_DEV_RANDOM && 6139 bacmp(&random_addr, BDADDR_ANY) && 6140 bacmp(&random_addr, &hdev->random_addr)) { 6141 err = hci_set_adv_set_random_addr_sync(hdev, 0x00, 6142 &random_addr); 6143 if (err) 6144 return err; 6145 } 6146 6147 return hci_enable_ext_advertising_sync(hdev, 0x00); 6148 } 6149 6150 static int hci_le_directed_advertising_sync(struct hci_dev *hdev, 6151 struct hci_conn *conn) 6152 { 6153 struct hci_cp_le_set_adv_param cp; 6154 u8 status; 6155 u8 own_addr_type; 6156 u8 enable; 6157 6158 if (ext_adv_capable(hdev)) 6159 return hci_le_ext_directed_advertising_sync(hdev, conn); 6160 6161 /* Clear the HCI_LE_ADV bit temporarily so that the 6162 * hci_update_random_address knows that it's safe to go ahead 6163 * and write a new random address. The flag will be set back on 6164 * as soon as the SET_ADV_ENABLE HCI command completes. 6165 */ 6166 hci_dev_clear_flag(hdev, HCI_LE_ADV); 6167 6168 /* Set require_privacy to false so that the remote device has a 6169 * chance of identifying us. 6170 */ 6171 status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 6172 &own_addr_type); 6173 if (status) 6174 return status; 6175 6176 memset(&cp, 0, sizeof(cp)); 6177 6178 /* Some controllers might reject command if intervals are not 6179 * within range for undirected advertising. 6180 * BCM20702A0 is known to be affected by this. 6181 */ 6182 cp.min_interval = cpu_to_le16(0x0020); 6183 cp.max_interval = cpu_to_le16(0x0020); 6184 6185 cp.type = LE_ADV_DIRECT_IND; 6186 cp.own_address_type = own_addr_type; 6187 cp.direct_addr_type = conn->dst_type; 6188 bacpy(&cp.direct_addr, &conn->dst); 6189 cp.channel_map = hdev->le_adv_channel_map; 6190 6191 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, 6192 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6193 if (status) 6194 return status; 6195 6196 enable = 0x01; 6197 6198 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 6199 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 6200 } 6201 6202 static void set_ext_conn_params(struct hci_conn *conn, 6203 struct hci_cp_le_ext_conn_param *p) 6204 { 6205 struct hci_dev *hdev = conn->hdev; 6206 6207 memset(p, 0, sizeof(*p)); 6208 6209 p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); 6210 p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); 6211 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 6212 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 6213 p->conn_latency = cpu_to_le16(conn->le_conn_latency); 6214 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 6215 p->min_ce_len = cpu_to_le16(0x0000); 6216 p->max_ce_len = cpu_to_le16(0x0000); 6217 } 6218 6219 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, 6220 struct hci_conn *conn, u8 own_addr_type) 6221 { 6222 struct hci_cp_le_ext_create_conn *cp; 6223 struct hci_cp_le_ext_conn_param *p; 6224 u8 data[sizeof(*cp) + sizeof(*p) * 3]; 6225 u32 plen; 6226 6227 cp = (void *)data; 6228 p = (void *)cp->data; 6229 6230 memset(cp, 0, sizeof(*cp)); 6231 6232 bacpy(&cp->peer_addr, &conn->dst); 6233 cp->peer_addr_type = conn->dst_type; 6234 cp->own_addr_type = own_addr_type; 6235 6236 plen = sizeof(*cp); 6237 6238 if (scan_1m(hdev)) { 6239 cp->phys |= LE_SCAN_PHY_1M; 6240 set_ext_conn_params(conn, p); 6241 6242 p++; 6243 plen += sizeof(*p); 6244 } 6245 6246 if (scan_2m(hdev)) { 6247 cp->phys |= LE_SCAN_PHY_2M; 6248 set_ext_conn_params(conn, p); 6249 6250 p++; 6251 plen += sizeof(*p); 6252 } 6253 6254 if (scan_coded(hdev)) { 6255 cp->phys |= LE_SCAN_PHY_CODED; 6256 set_ext_conn_params(conn, p); 6257 6258 plen += sizeof(*p); 6259 } 6260 6261 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, 6262 plen, data, 6263 HCI_EV_LE_ENHANCED_CONN_COMPLETE, 6264 conn->conn_timeout, NULL); 6265 } 6266 6267 int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) 6268 { 6269 struct hci_cp_le_create_conn cp; 6270 struct hci_conn_params *params; 6271 u8 own_addr_type; 6272 int err; 6273 6274 /* If requested to connect as peripheral use directed advertising */ 6275 if (conn->role == HCI_ROLE_SLAVE) { 6276 /* If we're active scanning and simultaneous roles is not 6277 * enabled simply reject the attempt. 6278 */ 6279 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && 6280 hdev->le_scan_type == LE_SCAN_ACTIVE && 6281 !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { 6282 hci_conn_del(conn); 6283 return -EBUSY; 6284 } 6285 6286 /* Pause advertising while doing directed advertising. */ 6287 hci_pause_advertising_sync(hdev); 6288 6289 err = hci_le_directed_advertising_sync(hdev, conn); 6290 goto done; 6291 } 6292 6293 /* Disable advertising if simultaneous roles is not in use. */ 6294 if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) 6295 hci_pause_advertising_sync(hdev); 6296 6297 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 6298 if (params) { 6299 conn->le_conn_min_interval = params->conn_min_interval; 6300 conn->le_conn_max_interval = params->conn_max_interval; 6301 conn->le_conn_latency = params->conn_latency; 6302 conn->le_supv_timeout = params->supervision_timeout; 6303 } else { 6304 conn->le_conn_min_interval = hdev->le_conn_min_interval; 6305 conn->le_conn_max_interval = hdev->le_conn_max_interval; 6306 conn->le_conn_latency = hdev->le_conn_latency; 6307 conn->le_supv_timeout = hdev->le_supv_timeout; 6308 } 6309 6310 /* If controller is scanning, we stop it since some controllers are 6311 * not able to scan and connect at the same time. Also set the 6312 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete 6313 * handler for scan disabling knows to set the correct discovery 6314 * state. 6315 */ 6316 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 6317 hci_scan_disable_sync(hdev); 6318 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); 6319 } 6320 6321 /* Update random address, but set require_privacy to false so 6322 * that we never connect with an non-resolvable address. 6323 */ 6324 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 6325 &own_addr_type); 6326 if (err) 6327 goto done; 6328 6329 if (use_ext_conn(hdev)) { 6330 err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); 6331 goto done; 6332 } 6333 6334 memset(&cp, 0, sizeof(cp)); 6335 6336 cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); 6337 cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); 6338 6339 bacpy(&cp.peer_addr, &conn->dst); 6340 cp.peer_addr_type = conn->dst_type; 6341 cp.own_address_type = own_addr_type; 6342 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 6343 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 6344 cp.conn_latency = cpu_to_le16(conn->le_conn_latency); 6345 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 6346 cp.min_ce_len = cpu_to_le16(0x0000); 6347 cp.max_ce_len = cpu_to_le16(0x0000); 6348 6349 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: 6350 * 6351 * If this event is unmasked and the HCI_LE_Connection_Complete event 6352 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is 6353 * sent when a new connection has been created. 6354 */ 6355 err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, 6356 sizeof(cp), &cp, 6357 use_enhanced_conn_complete(hdev) ? 6358 HCI_EV_LE_ENHANCED_CONN_COMPLETE : 6359 HCI_EV_LE_CONN_COMPLETE, 6360 conn->conn_timeout, NULL); 6361 6362 done: 6363 if (err == -ETIMEDOUT) 6364 hci_le_connect_cancel_sync(hdev, conn, 0x00); 6365 6366 /* Re-enable advertising after the connection attempt is finished. */ 6367 hci_resume_advertising_sync(hdev); 6368 return err; 6369 } 6370 6371 int hci_le_create_cis_sync(struct hci_dev *hdev) 6372 { 6373 struct { 6374 struct hci_cp_le_create_cis cp; 6375 struct hci_cis cis[0x1f]; 6376 } cmd; 6377 struct hci_conn *conn; 6378 u8 cig = BT_ISO_QOS_CIG_UNSET; 6379 6380 /* The spec allows only one pending LE Create CIS command at a time. If 6381 * the command is pending now, don't do anything. We check for pending 6382 * connections after each CIS Established event. 6383 * 6384 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 6385 * page 2566: 6386 * 6387 * If the Host issues this command before all the 6388 * HCI_LE_CIS_Established events from the previous use of the 6389 * command have been generated, the Controller shall return the 6390 * error code Command Disallowed (0x0C). 6391 * 6392 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E 6393 * page 2567: 6394 * 6395 * When the Controller receives the HCI_LE_Create_CIS command, the 6396 * Controller sends the HCI_Command_Status event to the Host. An 6397 * HCI_LE_CIS_Established event will be generated for each CIS when it 6398 * is established or if it is disconnected or considered lost before 6399 * being established; until all the events are generated, the command 6400 * remains pending. 6401 */ 6402 6403 memset(&cmd, 0, sizeof(cmd)); 6404 6405 hci_dev_lock(hdev); 6406 6407 rcu_read_lock(); 6408 6409 /* Wait until previous Create CIS has completed */ 6410 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 6411 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) 6412 goto done; 6413 } 6414 6415 /* Find CIG with all CIS ready */ 6416 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 6417 struct hci_conn *link; 6418 6419 if (hci_conn_check_create_cis(conn)) 6420 continue; 6421 6422 cig = conn->iso_qos.ucast.cig; 6423 6424 list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { 6425 if (hci_conn_check_create_cis(link) > 0 && 6426 link->iso_qos.ucast.cig == cig && 6427 link->state != BT_CONNECTED) { 6428 cig = BT_ISO_QOS_CIG_UNSET; 6429 break; 6430 } 6431 } 6432 6433 if (cig != BT_ISO_QOS_CIG_UNSET) 6434 break; 6435 } 6436 6437 if (cig == BT_ISO_QOS_CIG_UNSET) 6438 goto done; 6439 6440 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 6441 struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; 6442 6443 if (hci_conn_check_create_cis(conn) || 6444 conn->iso_qos.ucast.cig != cig) 6445 continue; 6446 6447 set_bit(HCI_CONN_CREATE_CIS, &conn->flags); 6448 cis->acl_handle = cpu_to_le16(conn->parent->handle); 6449 cis->cis_handle = cpu_to_le16(conn->handle); 6450 cmd.cp.num_cis++; 6451 6452 if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis)) 6453 break; 6454 } 6455 6456 done: 6457 rcu_read_unlock(); 6458 6459 hci_dev_unlock(hdev); 6460 6461 if (!cmd.cp.num_cis) 6462 return 0; 6463 6464 /* Wait for HCI_LE_CIS_Established */ 6465 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, 6466 sizeof(cmd.cp) + sizeof(cmd.cis[0]) * 6467 cmd.cp.num_cis, &cmd, 6468 HCI_EVT_LE_CIS_ESTABLISHED, 6469 conn->conn_timeout, NULL); 6470 } 6471 6472 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) 6473 { 6474 struct hci_cp_le_remove_cig cp; 6475 6476 memset(&cp, 0, sizeof(cp)); 6477 cp.cig_id = handle; 6478 6479 return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), 6480 &cp, HCI_CMD_TIMEOUT); 6481 } 6482 6483 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) 6484 { 6485 struct hci_cp_le_big_term_sync cp; 6486 6487 memset(&cp, 0, sizeof(cp)); 6488 cp.handle = handle; 6489 6490 return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, 6491 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6492 } 6493 6494 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) 6495 { 6496 struct hci_cp_le_pa_term_sync cp; 6497 6498 memset(&cp, 0, sizeof(cp)); 6499 cp.handle = cpu_to_le16(handle); 6500 6501 return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, 6502 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6503 } 6504 6505 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, 6506 bool use_rpa, struct adv_info *adv_instance, 6507 u8 *own_addr_type, bdaddr_t *rand_addr) 6508 { 6509 int err; 6510 6511 bacpy(rand_addr, BDADDR_ANY); 6512 6513 /* If privacy is enabled use a resolvable private address. If 6514 * current RPA has expired then generate a new one. 6515 */ 6516 if (use_rpa) { 6517 /* If Controller supports LL Privacy use own address type is 6518 * 0x03 6519 */ 6520 if (use_ll_privacy(hdev)) 6521 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; 6522 else 6523 *own_addr_type = ADDR_LE_DEV_RANDOM; 6524 6525 if (adv_instance) { 6526 if (adv_rpa_valid(adv_instance)) 6527 return 0; 6528 } else { 6529 if (rpa_valid(hdev)) 6530 return 0; 6531 } 6532 6533 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 6534 if (err < 0) { 6535 bt_dev_err(hdev, "failed to generate new RPA"); 6536 return err; 6537 } 6538 6539 bacpy(rand_addr, &hdev->rpa); 6540 6541 return 0; 6542 } 6543 6544 /* In case of required privacy without resolvable private address, 6545 * use an non-resolvable private address. This is useful for 6546 * non-connectable advertising. 6547 */ 6548 if (require_privacy) { 6549 bdaddr_t nrpa; 6550 6551 while (true) { 6552 /* The non-resolvable private address is generated 6553 * from random six bytes with the two most significant 6554 * bits cleared. 6555 */ 6556 get_random_bytes(&nrpa, 6); 6557 nrpa.b[5] &= 0x3f; 6558 6559 /* The non-resolvable private address shall not be 6560 * equal to the public address. 6561 */ 6562 if (bacmp(&hdev->bdaddr, &nrpa)) 6563 break; 6564 } 6565 6566 *own_addr_type = ADDR_LE_DEV_RANDOM; 6567 bacpy(rand_addr, &nrpa); 6568 6569 return 0; 6570 } 6571 6572 /* No privacy so use a public address. */ 6573 *own_addr_type = ADDR_LE_DEV_PUBLIC; 6574 6575 return 0; 6576 } 6577 6578 static int _update_adv_data_sync(struct hci_dev *hdev, void *data) 6579 { 6580 u8 instance = PTR_UINT(data); 6581 6582 return hci_update_adv_data_sync(hdev, instance); 6583 } 6584 6585 int hci_update_adv_data(struct hci_dev *hdev, u8 instance) 6586 { 6587 return hci_cmd_sync_queue(hdev, _update_adv_data_sync, 6588 UINT_PTR(instance), NULL); 6589 } 6590