1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 #ifndef __HCI_CORE_H 26 #define __HCI_CORE_H 27 28 #include <linux/idr.h> 29 #include <linux/leds.h> 30 #include <linux/rculist.h> 31 32 #include <net/bluetooth/hci.h> 33 #include <net/bluetooth/hci_sync.h> 34 #include <net/bluetooth/hci_sock.h> 35 36 /* HCI priority */ 37 #define HCI_PRIO_MAX 7 38 39 /* HCI maximum id value */ 40 #define HCI_MAX_ID 10000 41 42 /* HCI Core structures */ 43 struct inquiry_data { 44 bdaddr_t bdaddr; 45 __u8 pscan_rep_mode; 46 __u8 pscan_period_mode; 47 __u8 pscan_mode; 48 __u8 dev_class[3]; 49 __le16 clock_offset; 50 __s8 rssi; 51 __u8 ssp_mode; 52 }; 53 54 struct inquiry_entry { 55 struct list_head all; /* inq_cache.all */ 56 struct list_head list; /* unknown or resolve */ 57 enum { 58 NAME_NOT_KNOWN, 59 NAME_NEEDED, 60 NAME_PENDING, 61 NAME_KNOWN, 62 } name_state; 63 __u32 timestamp; 64 struct inquiry_data data; 65 }; 66 67 struct discovery_state { 68 int type; 69 enum { 70 DISCOVERY_STOPPED, 71 DISCOVERY_STARTING, 72 DISCOVERY_FINDING, 73 DISCOVERY_RESOLVING, 74 DISCOVERY_STOPPING, 75 } state; 76 struct list_head all; /* All devices found during inquiry */ 77 struct list_head unknown; /* Name state not known */ 78 struct list_head resolve; /* Name needs to be resolved */ 79 __u32 timestamp; 80 bdaddr_t last_adv_addr; 81 u8 last_adv_addr_type; 82 s8 last_adv_rssi; 83 u32 last_adv_flags; 84 u8 last_adv_data[HCI_MAX_AD_LENGTH]; 85 u8 last_adv_data_len; 86 bool report_invalid_rssi; 87 bool result_filtering; 88 bool limited; 89 s8 rssi; 90 u16 uuid_count; 91 u8 (*uuids)[16]; 92 unsigned long scan_start; 93 unsigned long scan_duration; 94 unsigned long name_resolve_timeout; 95 }; 96 97 #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ 98 99 enum suspend_tasks { 100 SUSPEND_PAUSE_DISCOVERY, 101 SUSPEND_UNPAUSE_DISCOVERY, 102 103 SUSPEND_PAUSE_ADVERTISING, 104 SUSPEND_UNPAUSE_ADVERTISING, 105 106 SUSPEND_SCAN_DISABLE, 107 SUSPEND_SCAN_ENABLE, 108 SUSPEND_DISCONNECTING, 109 110 SUSPEND_POWERING_DOWN, 111 112 SUSPEND_PREPARE_NOTIFIER, 113 114 SUSPEND_SET_ADV_FILTER, 115 __SUSPEND_NUM_TASKS 116 }; 117 118 enum suspended_state { 119 BT_RUNNING = 0, 120 BT_SUSPEND_DISCONNECT, 121 BT_SUSPEND_CONFIGURE_WAKE, 122 }; 123 124 struct hci_conn_hash { 125 struct list_head list; 126 unsigned int acl_num; 127 unsigned int amp_num; 128 unsigned int sco_num; 129 unsigned int le_num; 130 unsigned int le_num_peripheral; 131 }; 132 133 struct bdaddr_list { 134 struct list_head list; 135 bdaddr_t bdaddr; 136 u8 bdaddr_type; 137 }; 138 139 struct codec_list { 140 struct list_head list; 141 u8 id; 142 __u16 cid; 143 __u16 vid; 144 u8 transport; 145 u8 num_caps; 146 u32 len; 147 struct hci_codec_caps caps[]; 148 }; 149 150 struct bdaddr_list_with_irk { 151 struct list_head list; 152 bdaddr_t bdaddr; 153 u8 bdaddr_type; 154 u8 peer_irk[16]; 155 u8 local_irk[16]; 156 }; 157 158 /* Bitmask of connection flags */ 159 enum hci_conn_flags { 160 HCI_CONN_FLAG_REMOTE_WAKEUP = 1, 161 HCI_CONN_FLAG_DEVICE_PRIVACY = 2, 162 }; 163 typedef u8 hci_conn_flags_t; 164 165 struct bdaddr_list_with_flags { 166 struct list_head list; 167 bdaddr_t bdaddr; 168 u8 bdaddr_type; 169 hci_conn_flags_t flags; 170 }; 171 172 struct bt_uuid { 173 struct list_head list; 174 u8 uuid[16]; 175 u8 size; 176 u8 svc_hint; 177 }; 178 179 struct blocked_key { 180 struct list_head list; 181 struct rcu_head rcu; 182 u8 type; 183 u8 val[16]; 184 }; 185 186 struct smp_csrk { 187 bdaddr_t bdaddr; 188 u8 bdaddr_type; 189 u8 type; 190 u8 val[16]; 191 }; 192 193 struct smp_ltk { 194 struct list_head list; 195 struct rcu_head rcu; 196 bdaddr_t bdaddr; 197 u8 bdaddr_type; 198 u8 authenticated; 199 u8 type; 200 u8 enc_size; 201 __le16 ediv; 202 __le64 rand; 203 u8 val[16]; 204 }; 205 206 struct smp_irk { 207 struct list_head list; 208 struct rcu_head rcu; 209 bdaddr_t rpa; 210 bdaddr_t bdaddr; 211 u8 addr_type; 212 u8 val[16]; 213 }; 214 215 struct link_key { 216 struct list_head list; 217 struct rcu_head rcu; 218 bdaddr_t bdaddr; 219 u8 type; 220 u8 val[HCI_LINK_KEY_SIZE]; 221 u8 pin_len; 222 }; 223 224 struct oob_data { 225 struct list_head list; 226 bdaddr_t bdaddr; 227 u8 bdaddr_type; 228 u8 present; 229 u8 hash192[16]; 230 u8 rand192[16]; 231 u8 hash256[16]; 232 u8 rand256[16]; 233 }; 234 235 struct adv_info { 236 struct list_head list; 237 bool enabled; 238 bool pending; 239 __u8 instance; 240 __u32 flags; 241 __u16 timeout; 242 __u16 remaining_time; 243 __u16 duration; 244 __u16 adv_data_len; 245 __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; 246 __u16 scan_rsp_len; 247 __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; 248 __s8 tx_power; 249 __u32 min_interval; 250 __u32 max_interval; 251 bdaddr_t random_addr; 252 bool rpa_expired; 253 struct delayed_work rpa_expired_cb; 254 }; 255 256 #define HCI_MAX_ADV_INSTANCES 5 257 #define HCI_DEFAULT_ADV_DURATION 2 258 259 #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F 260 261 struct monitored_device { 262 struct list_head list; 263 264 bdaddr_t bdaddr; 265 __u8 addr_type; 266 __u16 handle; 267 bool notified; 268 }; 269 270 struct adv_pattern { 271 struct list_head list; 272 __u8 ad_type; 273 __u8 offset; 274 __u8 length; 275 __u8 value[HCI_MAX_AD_LENGTH]; 276 }; 277 278 struct adv_rssi_thresholds { 279 __s8 low_threshold; 280 __s8 high_threshold; 281 __u16 low_threshold_timeout; 282 __u16 high_threshold_timeout; 283 __u8 sampling_period; 284 }; 285 286 struct adv_monitor { 287 struct list_head patterns; 288 struct adv_rssi_thresholds rssi; 289 __u16 handle; 290 291 enum { 292 ADV_MONITOR_STATE_NOT_REGISTERED, 293 ADV_MONITOR_STATE_REGISTERED, 294 ADV_MONITOR_STATE_OFFLOADED 295 } state; 296 }; 297 298 #define HCI_MIN_ADV_MONITOR_HANDLE 1 299 #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 300 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 301 #define HCI_ADV_MONITOR_EXT_NONE 1 302 #define HCI_ADV_MONITOR_EXT_MSFT 2 303 304 #define HCI_MAX_SHORT_NAME_LENGTH 10 305 306 #define HCI_CONN_HANDLE_UNSET 0xffff 307 #define HCI_CONN_HANDLE_MAX 0x0eff 308 309 /* Min encryption key size to match with SMP */ 310 #define HCI_MIN_ENC_KEY_SIZE 7 311 312 /* Default LE RPA expiry time, 15 minutes */ 313 #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) 314 315 /* Default min/max age of connection information (1s/3s) */ 316 #define DEFAULT_CONN_INFO_MIN_AGE 1000 317 #define DEFAULT_CONN_INFO_MAX_AGE 3000 318 /* Default authenticated payload timeout 30s */ 319 #define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8 320 321 struct amp_assoc { 322 __u16 len; 323 __u16 offset; 324 __u16 rem_len; 325 __u16 len_so_far; 326 __u8 data[HCI_MAX_AMP_ASSOC_SIZE]; 327 }; 328 329 #define HCI_MAX_PAGES 3 330 331 struct hci_dev { 332 struct list_head list; 333 struct mutex lock; 334 335 char name[8]; 336 unsigned long flags; 337 __u16 id; 338 __u8 bus; 339 __u8 dev_type; 340 bdaddr_t bdaddr; 341 bdaddr_t setup_addr; 342 bdaddr_t public_addr; 343 bdaddr_t random_addr; 344 bdaddr_t static_addr; 345 __u8 adv_addr_type; 346 __u8 dev_name[HCI_MAX_NAME_LENGTH]; 347 __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; 348 __u8 eir[HCI_MAX_EIR_LENGTH]; 349 __u16 appearance; 350 __u8 dev_class[3]; 351 __u8 major_class; 352 __u8 minor_class; 353 __u8 max_page; 354 __u8 features[HCI_MAX_PAGES][8]; 355 __u8 le_features[8]; 356 __u8 le_accept_list_size; 357 __u8 le_resolv_list_size; 358 __u8 le_num_of_adv_sets; 359 __u8 le_states[8]; 360 __u8 commands[64]; 361 __u8 hci_ver; 362 __u16 hci_rev; 363 __u8 lmp_ver; 364 __u16 manufacturer; 365 __u16 lmp_subver; 366 __u16 voice_setting; 367 __u8 num_iac; 368 __u16 stored_max_keys; 369 __u16 stored_num_keys; 370 __u8 io_capability; 371 __s8 inq_tx_power; 372 __u8 err_data_reporting; 373 __u16 page_scan_interval; 374 __u16 page_scan_window; 375 __u8 page_scan_type; 376 __u8 le_adv_channel_map; 377 __u16 le_adv_min_interval; 378 __u16 le_adv_max_interval; 379 __u8 le_scan_type; 380 __u16 le_scan_interval; 381 __u16 le_scan_window; 382 __u16 le_scan_int_suspend; 383 __u16 le_scan_window_suspend; 384 __u16 le_scan_int_discovery; 385 __u16 le_scan_window_discovery; 386 __u16 le_scan_int_adv_monitor; 387 __u16 le_scan_window_adv_monitor; 388 __u16 le_scan_int_connect; 389 __u16 le_scan_window_connect; 390 __u16 le_conn_min_interval; 391 __u16 le_conn_max_interval; 392 __u16 le_conn_latency; 393 __u16 le_supv_timeout; 394 __u16 le_def_tx_len; 395 __u16 le_def_tx_time; 396 __u16 le_max_tx_len; 397 __u16 le_max_tx_time; 398 __u16 le_max_rx_len; 399 __u16 le_max_rx_time; 400 __u8 le_max_key_size; 401 __u8 le_min_key_size; 402 __u16 discov_interleaved_timeout; 403 __u16 conn_info_min_age; 404 __u16 conn_info_max_age; 405 __u16 auth_payload_timeout; 406 __u8 min_enc_key_size; 407 __u8 max_enc_key_size; 408 __u8 pairing_opts; 409 __u8 ssp_debug_mode; 410 __u8 hw_error_code; 411 __u32 clock; 412 __u16 advmon_allowlist_duration; 413 __u16 advmon_no_filter_duration; 414 __u8 enable_advmon_interleave_scan; 415 416 __u16 devid_source; 417 __u16 devid_vendor; 418 __u16 devid_product; 419 __u16 devid_version; 420 421 __u8 def_page_scan_type; 422 __u16 def_page_scan_int; 423 __u16 def_page_scan_window; 424 __u8 def_inq_scan_type; 425 __u16 def_inq_scan_int; 426 __u16 def_inq_scan_window; 427 __u16 def_br_lsto; 428 __u16 def_page_timeout; 429 __u16 def_multi_adv_rotation_duration; 430 __u16 def_le_autoconnect_timeout; 431 __s8 min_le_tx_power; 432 __s8 max_le_tx_power; 433 434 __u16 pkt_type; 435 __u16 esco_type; 436 __u16 link_policy; 437 __u16 link_mode; 438 439 __u32 idle_timeout; 440 __u16 sniff_min_interval; 441 __u16 sniff_max_interval; 442 443 __u8 amp_status; 444 __u32 amp_total_bw; 445 __u32 amp_max_bw; 446 __u32 amp_min_latency; 447 __u32 amp_max_pdu; 448 __u8 amp_type; 449 __u16 amp_pal_cap; 450 __u16 amp_assoc_size; 451 __u32 amp_max_flush_to; 452 __u32 amp_be_flush_to; 453 454 struct amp_assoc loc_assoc; 455 456 __u8 flow_ctl_mode; 457 458 unsigned int auto_accept_delay; 459 460 unsigned long quirks; 461 462 atomic_t cmd_cnt; 463 unsigned int acl_cnt; 464 unsigned int sco_cnt; 465 unsigned int le_cnt; 466 467 unsigned int acl_mtu; 468 unsigned int sco_mtu; 469 unsigned int le_mtu; 470 unsigned int acl_pkts; 471 unsigned int sco_pkts; 472 unsigned int le_pkts; 473 474 __u16 block_len; 475 __u16 block_mtu; 476 __u16 num_blocks; 477 __u16 block_cnt; 478 479 unsigned long acl_last_tx; 480 unsigned long sco_last_tx; 481 unsigned long le_last_tx; 482 483 __u8 le_tx_def_phys; 484 __u8 le_rx_def_phys; 485 486 struct workqueue_struct *workqueue; 487 struct workqueue_struct *req_workqueue; 488 489 struct work_struct power_on; 490 struct delayed_work power_off; 491 struct work_struct error_reset; 492 struct work_struct cmd_sync_work; 493 struct list_head cmd_sync_work_list; 494 struct mutex cmd_sync_work_lock; 495 struct work_struct cmd_sync_cancel_work; 496 497 __u16 discov_timeout; 498 struct delayed_work discov_off; 499 500 struct delayed_work service_cache; 501 502 struct delayed_work cmd_timer; 503 struct delayed_work ncmd_timer; 504 505 struct work_struct rx_work; 506 struct work_struct cmd_work; 507 struct work_struct tx_work; 508 509 struct work_struct discov_update; 510 struct work_struct scan_update; 511 struct delayed_work le_scan_disable; 512 struct delayed_work le_scan_restart; 513 514 struct sk_buff_head rx_q; 515 struct sk_buff_head raw_q; 516 struct sk_buff_head cmd_q; 517 518 struct sk_buff *sent_cmd; 519 520 struct mutex req_lock; 521 wait_queue_head_t req_wait_q; 522 __u32 req_status; 523 __u32 req_result; 524 struct sk_buff *req_skb; 525 526 void *smp_data; 527 void *smp_bredr_data; 528 529 struct discovery_state discovery; 530 531 int discovery_old_state; 532 bool discovery_paused; 533 int advertising_old_state; 534 bool advertising_paused; 535 536 struct notifier_block suspend_notifier; 537 enum suspended_state suspend_state_next; 538 enum suspended_state suspend_state; 539 bool scanning_paused; 540 bool suspended; 541 u8 wake_reason; 542 bdaddr_t wake_addr; 543 u8 wake_addr_type; 544 545 struct hci_conn_hash conn_hash; 546 547 struct list_head mgmt_pending; 548 struct list_head reject_list; 549 struct list_head accept_list; 550 struct list_head uuids; 551 struct list_head link_keys; 552 struct list_head long_term_keys; 553 struct list_head identity_resolving_keys; 554 struct list_head remote_oob_data; 555 struct list_head le_accept_list; 556 struct list_head le_resolv_list; 557 struct list_head le_conn_params; 558 struct list_head pend_le_conns; 559 struct list_head pend_le_reports; 560 struct list_head blocked_keys; 561 struct list_head local_codecs; 562 563 struct hci_dev_stats stat; 564 565 atomic_t promisc; 566 567 const char *hw_info; 568 const char *fw_info; 569 struct dentry *debugfs; 570 571 struct device dev; 572 573 struct rfkill *rfkill; 574 575 DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); 576 hci_conn_flags_t conn_flags; 577 578 __s8 adv_tx_power; 579 __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; 580 __u8 adv_data_len; 581 __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; 582 __u8 scan_rsp_data_len; 583 584 struct list_head adv_instances; 585 unsigned int adv_instance_cnt; 586 __u8 cur_adv_instance; 587 __u16 adv_instance_timeout; 588 struct delayed_work adv_instance_expire; 589 590 struct idr adv_monitors_idr; 591 unsigned int adv_monitors_cnt; 592 593 __u8 irk[16]; 594 __u32 rpa_timeout; 595 struct delayed_work rpa_expired; 596 bdaddr_t rpa; 597 598 enum { 599 INTERLEAVE_SCAN_NONE, 600 INTERLEAVE_SCAN_NO_FILTER, 601 INTERLEAVE_SCAN_ALLOWLIST 602 } interleave_scan_state; 603 604 struct delayed_work interleave_scan; 605 606 struct list_head monitored_devices; 607 bool advmon_pend_notify; 608 609 #if IS_ENABLED(CONFIG_BT_LEDS) 610 struct led_trigger *power_led; 611 #endif 612 613 #if IS_ENABLED(CONFIG_BT_MSFTEXT) 614 __u16 msft_opcode; 615 void *msft_data; 616 bool msft_curve_validity; 617 #endif 618 619 #if IS_ENABLED(CONFIG_BT_AOSPEXT) 620 bool aosp_capable; 621 bool aosp_quality_report; 622 #endif 623 624 int (*open)(struct hci_dev *hdev); 625 int (*close)(struct hci_dev *hdev); 626 int (*flush)(struct hci_dev *hdev); 627 int (*setup)(struct hci_dev *hdev); 628 int (*shutdown)(struct hci_dev *hdev); 629 int (*send)(struct hci_dev *hdev, struct sk_buff *skb); 630 void (*notify)(struct hci_dev *hdev, unsigned int evt); 631 void (*hw_error)(struct hci_dev *hdev, u8 code); 632 int (*post_init)(struct hci_dev *hdev); 633 int (*set_diag)(struct hci_dev *hdev, bool enable); 634 int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); 635 void (*cmd_timeout)(struct hci_dev *hdev); 636 bool (*wakeup)(struct hci_dev *hdev); 637 int (*set_quality_report)(struct hci_dev *hdev, bool enable); 638 int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); 639 int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, 640 struct bt_codec *codec, __u8 *vnd_len, 641 __u8 **vnd_data); 642 }; 643 644 #define HCI_PHY_HANDLE(handle) (handle & 0xff) 645 646 enum conn_reasons { 647 CONN_REASON_PAIR_DEVICE, 648 CONN_REASON_L2CAP_CHAN, 649 CONN_REASON_SCO_CONNECT, 650 }; 651 652 struct hci_conn { 653 struct list_head list; 654 655 atomic_t refcnt; 656 657 bdaddr_t dst; 658 __u8 dst_type; 659 bdaddr_t src; 660 __u8 src_type; 661 bdaddr_t init_addr; 662 __u8 init_addr_type; 663 bdaddr_t resp_addr; 664 __u8 resp_addr_type; 665 __u8 adv_instance; 666 __u16 handle; 667 __u16 state; 668 __u8 mode; 669 __u8 type; 670 __u8 role; 671 bool out; 672 __u8 attempt; 673 __u8 dev_class[3]; 674 __u8 features[HCI_MAX_PAGES][8]; 675 __u16 pkt_type; 676 __u16 link_policy; 677 __u8 key_type; 678 __u8 auth_type; 679 __u8 sec_level; 680 __u8 pending_sec_level; 681 __u8 pin_length; 682 __u8 enc_key_size; 683 __u8 io_capability; 684 __u32 passkey_notify; 685 __u8 passkey_entered; 686 __u16 disc_timeout; 687 __u16 conn_timeout; 688 __u16 setting; 689 __u16 auth_payload_timeout; 690 __u16 le_conn_min_interval; 691 __u16 le_conn_max_interval; 692 __u16 le_conn_interval; 693 __u16 le_conn_latency; 694 __u16 le_supv_timeout; 695 __u8 le_adv_data[HCI_MAX_AD_LENGTH]; 696 __u8 le_adv_data_len; 697 __u8 le_tx_phy; 698 __u8 le_rx_phy; 699 __s8 rssi; 700 __s8 tx_power; 701 __s8 max_tx_power; 702 unsigned long flags; 703 704 enum conn_reasons conn_reason; 705 706 __u32 clock; 707 __u16 clock_accuracy; 708 709 unsigned long conn_info_timestamp; 710 711 __u8 remote_cap; 712 __u8 remote_auth; 713 __u8 remote_id; 714 715 unsigned int sent; 716 717 struct sk_buff_head data_q; 718 struct list_head chan_list; 719 720 struct delayed_work disc_work; 721 struct delayed_work auto_accept_work; 722 struct delayed_work idle_work; 723 struct delayed_work le_conn_timeout; 724 struct work_struct le_scan_cleanup; 725 726 struct device dev; 727 struct dentry *debugfs; 728 729 struct hci_dev *hdev; 730 void *l2cap_data; 731 void *sco_data; 732 struct amp_mgr *amp_mgr; 733 734 struct hci_conn *link; 735 struct bt_codec codec; 736 737 void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); 738 void (*security_cfm_cb) (struct hci_conn *conn, u8 status); 739 void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); 740 }; 741 742 struct hci_chan { 743 struct list_head list; 744 __u16 handle; 745 struct hci_conn *conn; 746 struct sk_buff_head data_q; 747 unsigned int sent; 748 __u8 state; 749 bool amp; 750 }; 751 752 struct hci_conn_params { 753 struct list_head list; 754 struct list_head action; 755 756 bdaddr_t addr; 757 u8 addr_type; 758 759 u16 conn_min_interval; 760 u16 conn_max_interval; 761 u16 conn_latency; 762 u16 supervision_timeout; 763 764 enum { 765 HCI_AUTO_CONN_DISABLED, 766 HCI_AUTO_CONN_REPORT, 767 HCI_AUTO_CONN_DIRECT, 768 HCI_AUTO_CONN_ALWAYS, 769 HCI_AUTO_CONN_LINK_LOSS, 770 HCI_AUTO_CONN_EXPLICIT, 771 } auto_connect; 772 773 struct hci_conn *conn; 774 bool explicit_connect; 775 hci_conn_flags_t flags; 776 u8 privacy_mode; 777 }; 778 779 extern struct list_head hci_dev_list; 780 extern struct list_head hci_cb_list; 781 extern rwlock_t hci_dev_list_lock; 782 extern struct mutex hci_cb_list_lock; 783 784 #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) 785 #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) 786 #define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) 787 #define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) 788 #define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) 789 #define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) 790 #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) 791 792 #define hci_dev_clear_volatile_flags(hdev) \ 793 do { \ 794 hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ 795 hci_dev_clear_flag(hdev, HCI_LE_ADV); \ 796 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ 797 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ 798 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ 799 } while (0) 800 801 #define hci_dev_le_state_simultaneous(hdev) \ 802 (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && \ 803 (hdev->le_states[4] & 0x08) && /* Central */ \ 804 (hdev->le_states[4] & 0x40) && /* Peripheral */ \ 805 (hdev->le_states[3] & 0x10)) /* Simultaneous */ 806 807 /* ----- HCI interface to upper protocols ----- */ 808 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); 809 int l2cap_disconn_ind(struct hci_conn *hcon); 810 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); 811 812 #if IS_ENABLED(CONFIG_BT_BREDR) 813 int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); 814 void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); 815 #else 816 static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, 817 __u8 *flags) 818 { 819 return 0; 820 } 821 822 static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) 823 { 824 } 825 #endif 826 827 /* ----- Inquiry cache ----- */ 828 #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ 829 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ 830 831 static inline void discovery_init(struct hci_dev *hdev) 832 { 833 hdev->discovery.state = DISCOVERY_STOPPED; 834 INIT_LIST_HEAD(&hdev->discovery.all); 835 INIT_LIST_HEAD(&hdev->discovery.unknown); 836 INIT_LIST_HEAD(&hdev->discovery.resolve); 837 hdev->discovery.report_invalid_rssi = true; 838 hdev->discovery.rssi = HCI_RSSI_INVALID; 839 } 840 841 static inline void hci_discovery_filter_clear(struct hci_dev *hdev) 842 { 843 hdev->discovery.result_filtering = false; 844 hdev->discovery.report_invalid_rssi = true; 845 hdev->discovery.rssi = HCI_RSSI_INVALID; 846 hdev->discovery.uuid_count = 0; 847 kfree(hdev->discovery.uuids); 848 hdev->discovery.uuids = NULL; 849 hdev->discovery.scan_start = 0; 850 hdev->discovery.scan_duration = 0; 851 } 852 853 bool hci_discovery_active(struct hci_dev *hdev); 854 855 void hci_discovery_set_state(struct hci_dev *hdev, int state); 856 857 static inline int inquiry_cache_empty(struct hci_dev *hdev) 858 { 859 return list_empty(&hdev->discovery.all); 860 } 861 862 static inline long inquiry_cache_age(struct hci_dev *hdev) 863 { 864 struct discovery_state *c = &hdev->discovery; 865 return jiffies - c->timestamp; 866 } 867 868 static inline long inquiry_entry_age(struct inquiry_entry *e) 869 { 870 return jiffies - e->timestamp; 871 } 872 873 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 874 bdaddr_t *bdaddr); 875 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 876 bdaddr_t *bdaddr); 877 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 878 bdaddr_t *bdaddr, 879 int state); 880 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 881 struct inquiry_entry *ie); 882 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 883 bool name_known); 884 void hci_inquiry_cache_flush(struct hci_dev *hdev); 885 886 /* ----- HCI Connections ----- */ 887 enum { 888 HCI_CONN_AUTH_PEND, 889 HCI_CONN_REAUTH_PEND, 890 HCI_CONN_ENCRYPT_PEND, 891 HCI_CONN_RSWITCH_PEND, 892 HCI_CONN_MODE_CHANGE_PEND, 893 HCI_CONN_SCO_SETUP_PEND, 894 HCI_CONN_MGMT_CONNECTED, 895 HCI_CONN_SSP_ENABLED, 896 HCI_CONN_SC_ENABLED, 897 HCI_CONN_AES_CCM, 898 HCI_CONN_POWER_SAVE, 899 HCI_CONN_FLUSH_KEY, 900 HCI_CONN_ENCRYPT, 901 HCI_CONN_AUTH, 902 HCI_CONN_SECURE, 903 HCI_CONN_FIPS, 904 HCI_CONN_STK_ENCRYPT, 905 HCI_CONN_AUTH_INITIATOR, 906 HCI_CONN_DROP, 907 HCI_CONN_PARAM_REMOVAL_PEND, 908 HCI_CONN_NEW_LINK_KEY, 909 HCI_CONN_SCANNING, 910 HCI_CONN_AUTH_FAILURE, 911 }; 912 913 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 914 { 915 struct hci_dev *hdev = conn->hdev; 916 return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && 917 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); 918 } 919 920 static inline bool hci_conn_sc_enabled(struct hci_conn *conn) 921 { 922 struct hci_dev *hdev = conn->hdev; 923 return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && 924 test_bit(HCI_CONN_SC_ENABLED, &conn->flags); 925 } 926 927 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) 928 { 929 struct hci_conn_hash *h = &hdev->conn_hash; 930 list_add_rcu(&c->list, &h->list); 931 switch (c->type) { 932 case ACL_LINK: 933 h->acl_num++; 934 break; 935 case AMP_LINK: 936 h->amp_num++; 937 break; 938 case LE_LINK: 939 h->le_num++; 940 if (c->role == HCI_ROLE_SLAVE) 941 h->le_num_peripheral++; 942 break; 943 case SCO_LINK: 944 case ESCO_LINK: 945 h->sco_num++; 946 break; 947 } 948 } 949 950 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) 951 { 952 struct hci_conn_hash *h = &hdev->conn_hash; 953 954 list_del_rcu(&c->list); 955 synchronize_rcu(); 956 957 switch (c->type) { 958 case ACL_LINK: 959 h->acl_num--; 960 break; 961 case AMP_LINK: 962 h->amp_num--; 963 break; 964 case LE_LINK: 965 h->le_num--; 966 if (c->role == HCI_ROLE_SLAVE) 967 h->le_num_peripheral--; 968 break; 969 case SCO_LINK: 970 case ESCO_LINK: 971 h->sco_num--; 972 break; 973 } 974 } 975 976 static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) 977 { 978 struct hci_conn_hash *h = &hdev->conn_hash; 979 switch (type) { 980 case ACL_LINK: 981 return h->acl_num; 982 case AMP_LINK: 983 return h->amp_num; 984 case LE_LINK: 985 return h->le_num; 986 case SCO_LINK: 987 case ESCO_LINK: 988 return h->sco_num; 989 default: 990 return 0; 991 } 992 } 993 994 static inline unsigned int hci_conn_count(struct hci_dev *hdev) 995 { 996 struct hci_conn_hash *c = &hdev->conn_hash; 997 998 return c->acl_num + c->amp_num + c->sco_num + c->le_num; 999 } 1000 1001 static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) 1002 { 1003 struct hci_conn_hash *h = &hdev->conn_hash; 1004 struct hci_conn *c; 1005 __u8 type = INVALID_LINK; 1006 1007 rcu_read_lock(); 1008 1009 list_for_each_entry_rcu(c, &h->list, list) { 1010 if (c->handle == handle) { 1011 type = c->type; 1012 break; 1013 } 1014 } 1015 1016 rcu_read_unlock(); 1017 1018 return type; 1019 } 1020 1021 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, 1022 __u16 handle) 1023 { 1024 struct hci_conn_hash *h = &hdev->conn_hash; 1025 struct hci_conn *c; 1026 1027 rcu_read_lock(); 1028 1029 list_for_each_entry_rcu(c, &h->list, list) { 1030 if (c->handle == handle) { 1031 rcu_read_unlock(); 1032 return c; 1033 } 1034 } 1035 rcu_read_unlock(); 1036 1037 return NULL; 1038 } 1039 1040 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, 1041 __u8 type, bdaddr_t *ba) 1042 { 1043 struct hci_conn_hash *h = &hdev->conn_hash; 1044 struct hci_conn *c; 1045 1046 rcu_read_lock(); 1047 1048 list_for_each_entry_rcu(c, &h->list, list) { 1049 if (c->type == type && !bacmp(&c->dst, ba)) { 1050 rcu_read_unlock(); 1051 return c; 1052 } 1053 } 1054 1055 rcu_read_unlock(); 1056 1057 return NULL; 1058 } 1059 1060 static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, 1061 bdaddr_t *ba, 1062 __u8 ba_type) 1063 { 1064 struct hci_conn_hash *h = &hdev->conn_hash; 1065 struct hci_conn *c; 1066 1067 rcu_read_lock(); 1068 1069 list_for_each_entry_rcu(c, &h->list, list) { 1070 if (c->type != LE_LINK) 1071 continue; 1072 1073 if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { 1074 rcu_read_unlock(); 1075 return c; 1076 } 1077 } 1078 1079 rcu_read_unlock(); 1080 1081 return NULL; 1082 } 1083 1084 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, 1085 __u8 type, __u16 state) 1086 { 1087 struct hci_conn_hash *h = &hdev->conn_hash; 1088 struct hci_conn *c; 1089 1090 rcu_read_lock(); 1091 1092 list_for_each_entry_rcu(c, &h->list, list) { 1093 if (c->type == type && c->state == state) { 1094 rcu_read_unlock(); 1095 return c; 1096 } 1097 } 1098 1099 rcu_read_unlock(); 1100 1101 return NULL; 1102 } 1103 1104 static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) 1105 { 1106 struct hci_conn_hash *h = &hdev->conn_hash; 1107 struct hci_conn *c; 1108 1109 rcu_read_lock(); 1110 1111 list_for_each_entry_rcu(c, &h->list, list) { 1112 if (c->type == LE_LINK && c->state == BT_CONNECT && 1113 !test_bit(HCI_CONN_SCANNING, &c->flags)) { 1114 rcu_read_unlock(); 1115 return c; 1116 } 1117 } 1118 1119 rcu_read_unlock(); 1120 1121 return NULL; 1122 } 1123 1124 int hci_disconnect(struct hci_conn *conn, __u8 reason); 1125 bool hci_setup_sync(struct hci_conn *conn, __u16 handle); 1126 void hci_sco_setup(struct hci_conn *conn, __u8 status); 1127 1128 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, 1129 u8 role); 1130 int hci_conn_del(struct hci_conn *conn); 1131 void hci_conn_hash_flush(struct hci_dev *hdev); 1132 void hci_conn_check_pending(struct hci_dev *hdev); 1133 1134 struct hci_chan *hci_chan_create(struct hci_conn *conn); 1135 void hci_chan_del(struct hci_chan *chan); 1136 void hci_chan_list_flush(struct hci_conn *conn); 1137 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); 1138 1139 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, 1140 u8 dst_type, u8 sec_level, 1141 u16 conn_timeout, 1142 enum conn_reasons conn_reason); 1143 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 1144 u8 dst_type, bool dst_resolved, u8 sec_level, 1145 u16 conn_timeout, u8 role); 1146 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, 1147 u8 sec_level, u8 auth_type, 1148 enum conn_reasons conn_reason); 1149 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, 1150 __u16 setting, struct bt_codec *codec); 1151 int hci_conn_check_link_mode(struct hci_conn *conn); 1152 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); 1153 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, 1154 bool initiator); 1155 int hci_conn_switch_role(struct hci_conn *conn, __u8 role); 1156 1157 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); 1158 1159 void hci_conn_failed(struct hci_conn *conn, u8 status); 1160 1161 /* 1162 * hci_conn_get() and hci_conn_put() are used to control the life-time of an 1163 * "hci_conn" object. They do not guarantee that the hci_conn object is running, 1164 * working or anything else. They just guarantee that the object is available 1165 * and can be dereferenced. So you can use its locks, local variables and any 1166 * other constant data. 1167 * Before accessing runtime data, you _must_ lock the object and then check that 1168 * it is still running. As soon as you release the locks, the connection might 1169 * get dropped, though. 1170 * 1171 * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control 1172 * how long the underlying connection is held. So every channel that runs on the 1173 * hci_conn object calls this to prevent the connection from disappearing. As 1174 * long as you hold a device, you must also guarantee that you have a valid 1175 * reference to the device via hci_conn_get() (or the initial reference from 1176 * hci_conn_add()). 1177 * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't 1178 * break because nobody cares for that. But this means, we cannot use 1179 * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). 1180 */ 1181 1182 static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) 1183 { 1184 get_device(&conn->dev); 1185 return conn; 1186 } 1187 1188 static inline void hci_conn_put(struct hci_conn *conn) 1189 { 1190 put_device(&conn->dev); 1191 } 1192 1193 static inline void hci_conn_hold(struct hci_conn *conn) 1194 { 1195 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); 1196 1197 atomic_inc(&conn->refcnt); 1198 cancel_delayed_work(&conn->disc_work); 1199 } 1200 1201 static inline void hci_conn_drop(struct hci_conn *conn) 1202 { 1203 BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); 1204 1205 if (atomic_dec_and_test(&conn->refcnt)) { 1206 unsigned long timeo; 1207 1208 switch (conn->type) { 1209 case ACL_LINK: 1210 case LE_LINK: 1211 cancel_delayed_work(&conn->idle_work); 1212 if (conn->state == BT_CONNECTED) { 1213 timeo = conn->disc_timeout; 1214 if (!conn->out) 1215 timeo *= 2; 1216 } else { 1217 timeo = 0; 1218 } 1219 break; 1220 1221 case AMP_LINK: 1222 timeo = conn->disc_timeout; 1223 break; 1224 1225 default: 1226 timeo = 0; 1227 break; 1228 } 1229 1230 cancel_delayed_work(&conn->disc_work); 1231 queue_delayed_work(conn->hdev->workqueue, 1232 &conn->disc_work, timeo); 1233 } 1234 } 1235 1236 /* ----- HCI Devices ----- */ 1237 static inline void hci_dev_put(struct hci_dev *d) 1238 { 1239 BT_DBG("%s orig refcnt %d", d->name, 1240 kref_read(&d->dev.kobj.kref)); 1241 1242 put_device(&d->dev); 1243 } 1244 1245 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) 1246 { 1247 BT_DBG("%s orig refcnt %d", d->name, 1248 kref_read(&d->dev.kobj.kref)); 1249 1250 get_device(&d->dev); 1251 return d; 1252 } 1253 1254 #define hci_dev_lock(d) mutex_lock(&d->lock) 1255 #define hci_dev_unlock(d) mutex_unlock(&d->lock) 1256 1257 #define to_hci_dev(d) container_of(d, struct hci_dev, dev) 1258 #define to_hci_conn(c) container_of(c, struct hci_conn, dev) 1259 1260 static inline void *hci_get_drvdata(struct hci_dev *hdev) 1261 { 1262 return dev_get_drvdata(&hdev->dev); 1263 } 1264 1265 static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) 1266 { 1267 dev_set_drvdata(&hdev->dev, data); 1268 } 1269 1270 static inline void *hci_get_priv(struct hci_dev *hdev) 1271 { 1272 return (char *)hdev + sizeof(*hdev); 1273 } 1274 1275 struct hci_dev *hci_dev_get(int index); 1276 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); 1277 1278 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv); 1279 1280 static inline struct hci_dev *hci_alloc_dev(void) 1281 { 1282 return hci_alloc_dev_priv(0); 1283 } 1284 1285 void hci_free_dev(struct hci_dev *hdev); 1286 int hci_register_dev(struct hci_dev *hdev); 1287 void hci_unregister_dev(struct hci_dev *hdev); 1288 void hci_release_dev(struct hci_dev *hdev); 1289 int hci_suspend_dev(struct hci_dev *hdev); 1290 int hci_resume_dev(struct hci_dev *hdev); 1291 int hci_reset_dev(struct hci_dev *hdev); 1292 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); 1293 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); 1294 __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); 1295 __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); 1296 1297 static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) 1298 { 1299 #if IS_ENABLED(CONFIG_BT_MSFTEXT) 1300 hdev->msft_opcode = opcode; 1301 #endif 1302 } 1303 1304 static inline void hci_set_aosp_capable(struct hci_dev *hdev) 1305 { 1306 #if IS_ENABLED(CONFIG_BT_AOSPEXT) 1307 hdev->aosp_capable = true; 1308 #endif 1309 } 1310 1311 int hci_dev_open(__u16 dev); 1312 int hci_dev_close(__u16 dev); 1313 int hci_dev_do_close(struct hci_dev *hdev); 1314 int hci_dev_reset(__u16 dev); 1315 int hci_dev_reset_stat(__u16 dev); 1316 int hci_dev_cmd(unsigned int cmd, void __user *arg); 1317 int hci_get_dev_list(void __user *arg); 1318 int hci_get_dev_info(void __user *arg); 1319 int hci_get_conn_list(void __user *arg); 1320 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); 1321 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); 1322 int hci_inquiry(void __user *arg); 1323 1324 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, 1325 bdaddr_t *bdaddr, u8 type); 1326 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 1327 struct list_head *list, bdaddr_t *bdaddr, 1328 u8 type); 1329 struct bdaddr_list_with_flags * 1330 hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, 1331 u8 type); 1332 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); 1333 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 1334 u8 type, u8 *peer_irk, u8 *local_irk); 1335 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, 1336 u8 type, u32 flags); 1337 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); 1338 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 1339 u8 type); 1340 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, 1341 u8 type); 1342 void hci_bdaddr_list_clear(struct list_head *list); 1343 1344 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 1345 bdaddr_t *addr, u8 addr_type); 1346 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 1347 bdaddr_t *addr, u8 addr_type); 1348 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); 1349 void hci_conn_params_clear_disabled(struct hci_dev *hdev); 1350 1351 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 1352 bdaddr_t *addr, 1353 u8 addr_type); 1354 1355 void hci_uuids_clear(struct hci_dev *hdev); 1356 1357 void hci_link_keys_clear(struct hci_dev *hdev); 1358 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); 1359 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 1360 bdaddr_t *bdaddr, u8 *val, u8 type, 1361 u8 pin_len, bool *persistent); 1362 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1363 u8 addr_type, u8 type, u8 authenticated, 1364 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); 1365 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1366 u8 addr_type, u8 role); 1367 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); 1368 void hci_smp_ltks_clear(struct hci_dev *hdev); 1369 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); 1370 1371 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa); 1372 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 1373 u8 addr_type); 1374 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 1375 u8 addr_type, u8 val[16], bdaddr_t *rpa); 1376 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); 1377 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]); 1378 void hci_blocked_keys_clear(struct hci_dev *hdev); 1379 void hci_smp_irks_clear(struct hci_dev *hdev); 1380 1381 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); 1382 1383 void hci_remote_oob_data_clear(struct hci_dev *hdev); 1384 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 1385 bdaddr_t *bdaddr, u8 bdaddr_type); 1386 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1387 u8 bdaddr_type, u8 *hash192, u8 *rand192, 1388 u8 *hash256, u8 *rand256); 1389 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 1390 u8 bdaddr_type); 1391 1392 void hci_adv_instances_clear(struct hci_dev *hdev); 1393 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance); 1394 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); 1395 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, 1396 u16 adv_data_len, u8 *adv_data, 1397 u16 scan_rsp_len, u8 *scan_rsp_data, 1398 u16 timeout, u16 duration, s8 tx_power, 1399 u32 min_interval, u32 max_interval); 1400 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, 1401 u16 adv_data_len, u8 *adv_data, 1402 u16 scan_rsp_len, u8 *scan_rsp_data); 1403 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); 1404 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); 1405 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); 1406 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); 1407 1408 void hci_adv_monitors_clear(struct hci_dev *hdev); 1409 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); 1410 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); 1411 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); 1412 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 1413 int *err); 1414 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err); 1415 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err); 1416 bool hci_is_adv_monitoring(struct hci_dev *hdev); 1417 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); 1418 1419 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1420 1421 void hci_init_sysfs(struct hci_dev *hdev); 1422 void hci_conn_init_sysfs(struct hci_conn *conn); 1423 void hci_conn_add_sysfs(struct hci_conn *conn); 1424 void hci_conn_del_sysfs(struct hci_conn *conn); 1425 1426 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) 1427 1428 /* ----- LMP capabilities ----- */ 1429 #define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT) 1430 #define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH) 1431 #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) 1432 #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) 1433 #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) 1434 #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) 1435 #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) 1436 #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) 1437 #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) 1438 #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) 1439 #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) 1440 #define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) 1441 #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) 1442 #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) 1443 #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) 1444 #define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH) 1445 #define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) 1446 #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) 1447 #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) 1448 #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) 1449 #define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) 1450 #define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) 1451 #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) 1452 #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) 1453 1454 /* ----- Extended LMP capabilities ----- */ 1455 #define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) 1456 #define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) 1457 #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) 1458 #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) 1459 #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) 1460 #define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING) 1461 1462 /* ----- Host capabilities ----- */ 1463 #define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) 1464 #define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC) 1465 #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) 1466 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) 1467 1468 #define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ 1469 !hci_dev_test_flag(dev, HCI_AUTO_OFF)) 1470 #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ 1471 hci_dev_test_flag(dev, HCI_SC_ENABLED)) 1472 #define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \ 1473 !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) 1474 #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ 1475 !adv->rpa_expired) 1476 1477 #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ 1478 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) 1479 1480 #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ 1481 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) 1482 1483 #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ 1484 ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) 1485 1486 #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) 1487 1488 /* Use LL Privacy based address resolution if supported */ 1489 #define use_ll_privacy(dev) (ll_privacy_capable(dev) && \ 1490 hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY)) 1491 1492 #define privacy_mode_capable(dev) (use_ll_privacy(dev) && \ 1493 (hdev->commands[39] & 0x04)) 1494 1495 /* Use enhanced synchronous connection if command is supported and its quirk 1496 * has not been set. 1497 */ 1498 #define enhanced_sync_conn_capable(dev) \ 1499 (((dev)->commands[29] & 0x08) && \ 1500 !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks)) 1501 1502 /* Use ext scanning if set ext scan param and ext scan enable is supported */ 1503 #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ 1504 ((dev)->commands[37] & 0x40)) 1505 /* Use ext create connection if command is supported */ 1506 #define use_ext_conn(dev) ((dev)->commands[37] & 0x80) 1507 1508 /* Extended advertising support */ 1509 #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) 1510 1511 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789: 1512 * 1513 * C24: Mandatory if the LE Controller supports Connection State and either 1514 * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported 1515 */ 1516 #define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \ 1517 ext_adv_capable(dev)) 1518 1519 /* ----- HCI protocols ----- */ 1520 #define HCI_PROTO_DEFER 0x01 1521 1522 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, 1523 __u8 type, __u8 *flags) 1524 { 1525 switch (type) { 1526 case ACL_LINK: 1527 return l2cap_connect_ind(hdev, bdaddr); 1528 1529 case SCO_LINK: 1530 case ESCO_LINK: 1531 return sco_connect_ind(hdev, bdaddr, flags); 1532 1533 default: 1534 BT_ERR("unknown link type %d", type); 1535 return -EINVAL; 1536 } 1537 } 1538 1539 static inline int hci_proto_disconn_ind(struct hci_conn *conn) 1540 { 1541 if (conn->type != ACL_LINK && conn->type != LE_LINK) 1542 return HCI_ERROR_REMOTE_USER_TERM; 1543 1544 return l2cap_disconn_ind(conn); 1545 } 1546 1547 /* ----- HCI callbacks ----- */ 1548 struct hci_cb { 1549 struct list_head list; 1550 1551 char *name; 1552 1553 void (*connect_cfm) (struct hci_conn *conn, __u8 status); 1554 void (*disconn_cfm) (struct hci_conn *conn, __u8 status); 1555 void (*security_cfm) (struct hci_conn *conn, __u8 status, 1556 __u8 encrypt); 1557 void (*key_change_cfm) (struct hci_conn *conn, __u8 status); 1558 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); 1559 }; 1560 1561 static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) 1562 { 1563 struct hci_cb *cb; 1564 1565 mutex_lock(&hci_cb_list_lock); 1566 list_for_each_entry(cb, &hci_cb_list, list) { 1567 if (cb->connect_cfm) 1568 cb->connect_cfm(conn, status); 1569 } 1570 mutex_unlock(&hci_cb_list_lock); 1571 1572 if (conn->connect_cfm_cb) 1573 conn->connect_cfm_cb(conn, status); 1574 } 1575 1576 static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) 1577 { 1578 struct hci_cb *cb; 1579 1580 mutex_lock(&hci_cb_list_lock); 1581 list_for_each_entry(cb, &hci_cb_list, list) { 1582 if (cb->disconn_cfm) 1583 cb->disconn_cfm(conn, reason); 1584 } 1585 mutex_unlock(&hci_cb_list_lock); 1586 1587 if (conn->disconn_cfm_cb) 1588 conn->disconn_cfm_cb(conn, reason); 1589 } 1590 1591 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) 1592 { 1593 struct hci_cb *cb; 1594 __u8 encrypt; 1595 1596 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) 1597 return; 1598 1599 encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; 1600 1601 mutex_lock(&hci_cb_list_lock); 1602 list_for_each_entry(cb, &hci_cb_list, list) { 1603 if (cb->security_cfm) 1604 cb->security_cfm(conn, status, encrypt); 1605 } 1606 mutex_unlock(&hci_cb_list_lock); 1607 1608 if (conn->security_cfm_cb) 1609 conn->security_cfm_cb(conn, status); 1610 } 1611 1612 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) 1613 { 1614 struct hci_cb *cb; 1615 __u8 encrypt; 1616 1617 if (conn->state == BT_CONFIG) { 1618 if (!status) 1619 conn->state = BT_CONNECTED; 1620 1621 hci_connect_cfm(conn, status); 1622 hci_conn_drop(conn); 1623 return; 1624 } 1625 1626 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 1627 encrypt = 0x00; 1628 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) 1629 encrypt = 0x02; 1630 else 1631 encrypt = 0x01; 1632 1633 if (!status) { 1634 if (conn->sec_level == BT_SECURITY_SDP) 1635 conn->sec_level = BT_SECURITY_LOW; 1636 1637 if (conn->pending_sec_level > conn->sec_level) 1638 conn->sec_level = conn->pending_sec_level; 1639 } 1640 1641 mutex_lock(&hci_cb_list_lock); 1642 list_for_each_entry(cb, &hci_cb_list, list) { 1643 if (cb->security_cfm) 1644 cb->security_cfm(conn, status, encrypt); 1645 } 1646 mutex_unlock(&hci_cb_list_lock); 1647 1648 if (conn->security_cfm_cb) 1649 conn->security_cfm_cb(conn, status); 1650 } 1651 1652 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) 1653 { 1654 struct hci_cb *cb; 1655 1656 mutex_lock(&hci_cb_list_lock); 1657 list_for_each_entry(cb, &hci_cb_list, list) { 1658 if (cb->key_change_cfm) 1659 cb->key_change_cfm(conn, status); 1660 } 1661 mutex_unlock(&hci_cb_list_lock); 1662 } 1663 1664 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, 1665 __u8 role) 1666 { 1667 struct hci_cb *cb; 1668 1669 mutex_lock(&hci_cb_list_lock); 1670 list_for_each_entry(cb, &hci_cb_list, list) { 1671 if (cb->role_switch_cfm) 1672 cb->role_switch_cfm(conn, status, role); 1673 } 1674 mutex_unlock(&hci_cb_list_lock); 1675 } 1676 1677 static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) 1678 { 1679 if (addr_type != ADDR_LE_DEV_RANDOM) 1680 return false; 1681 1682 if ((bdaddr->b[5] & 0xc0) == 0x40) 1683 return true; 1684 1685 return false; 1686 } 1687 1688 static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type) 1689 { 1690 if (addr_type == ADDR_LE_DEV_PUBLIC) 1691 return true; 1692 1693 /* Check for Random Static address type */ 1694 if ((addr->b[5] & 0xc0) == 0xc0) 1695 return true; 1696 1697 return false; 1698 } 1699 1700 static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev, 1701 bdaddr_t *bdaddr, u8 addr_type) 1702 { 1703 if (!hci_bdaddr_is_rpa(bdaddr, addr_type)) 1704 return NULL; 1705 1706 return hci_find_irk_by_rpa(hdev, bdaddr); 1707 } 1708 1709 static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, 1710 u16 to_multiplier) 1711 { 1712 u16 max_latency; 1713 1714 if (min > max || min < 6 || max > 3200) 1715 return -EINVAL; 1716 1717 if (to_multiplier < 10 || to_multiplier > 3200) 1718 return -EINVAL; 1719 1720 if (max >= to_multiplier * 8) 1721 return -EINVAL; 1722 1723 max_latency = (to_multiplier * 4 / max) - 1; 1724 if (latency > 499 || latency > max_latency) 1725 return -EINVAL; 1726 1727 return 0; 1728 } 1729 1730 int hci_register_cb(struct hci_cb *hcb); 1731 int hci_unregister_cb(struct hci_cb *hcb); 1732 1733 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 1734 const void *param); 1735 1736 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 1737 const void *param); 1738 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); 1739 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 1740 1741 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); 1742 1743 u32 hci_conn_get_phy(struct hci_conn *conn); 1744 1745 /* ----- HCI Sockets ----- */ 1746 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); 1747 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 1748 int flag, struct sock *skip_sk); 1749 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); 1750 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, 1751 void *data, u16 data_len, ktime_t tstamp, 1752 int flag, struct sock *skip_sk); 1753 1754 void hci_sock_dev_event(struct hci_dev *hdev, int event); 1755 1756 #define HCI_MGMT_VAR_LEN BIT(0) 1757 #define HCI_MGMT_NO_HDEV BIT(1) 1758 #define HCI_MGMT_UNTRUSTED BIT(2) 1759 #define HCI_MGMT_UNCONFIGURED BIT(3) 1760 #define HCI_MGMT_HDEV_OPTIONAL BIT(4) 1761 1762 struct hci_mgmt_handler { 1763 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, 1764 u16 data_len); 1765 size_t data_len; 1766 unsigned long flags; 1767 }; 1768 1769 struct hci_mgmt_chan { 1770 struct list_head list; 1771 unsigned short channel; 1772 size_t handler_count; 1773 const struct hci_mgmt_handler *handlers; 1774 void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); 1775 }; 1776 1777 int hci_mgmt_chan_register(struct hci_mgmt_chan *c); 1778 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); 1779 1780 /* Management interface */ 1781 #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) 1782 #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ 1783 BIT(BDADDR_LE_RANDOM)) 1784 #define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \ 1785 BIT(BDADDR_LE_PUBLIC) | \ 1786 BIT(BDADDR_LE_RANDOM)) 1787 1788 /* These LE scan and inquiry parameters were chosen according to LE General 1789 * Discovery Procedure specification. 1790 */ 1791 #define DISCOV_LE_SCAN_WIN 0x12 1792 #define DISCOV_LE_SCAN_INT 0x12 1793 #define DISCOV_LE_TIMEOUT 10240 /* msec */ 1794 #define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ 1795 #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 1796 #define DISCOV_BREDR_INQUIRY_LEN 0x08 1797 #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ 1798 #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ 1799 #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ 1800 1801 #define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */ 1802 1803 void mgmt_fill_version_info(void *ver); 1804 int mgmt_new_settings(struct hci_dev *hdev); 1805 void mgmt_index_added(struct hci_dev *hdev); 1806 void mgmt_index_removed(struct hci_dev *hdev); 1807 void mgmt_set_powered_failed(struct hci_dev *hdev, int err); 1808 void mgmt_power_on(struct hci_dev *hdev, int err); 1809 void __mgmt_power_off(struct hci_dev *hdev); 1810 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, 1811 bool persistent); 1812 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, 1813 u8 *name, u8 name_len); 1814 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, 1815 u8 link_type, u8 addr_type, u8 reason, 1816 bool mgmt_connected); 1817 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, 1818 u8 link_type, u8 addr_type, u8 status); 1819 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1820 u8 addr_type, u8 status); 1821 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); 1822 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1823 u8 status); 1824 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1825 u8 status); 1826 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 1827 u8 link_type, u8 addr_type, u32 value, 1828 u8 confirm_hint); 1829 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1830 u8 link_type, u8 addr_type, u8 status); 1831 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1832 u8 link_type, u8 addr_type, u8 status); 1833 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, 1834 u8 link_type, u8 addr_type); 1835 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1836 u8 link_type, u8 addr_type, u8 status); 1837 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, 1838 u8 link_type, u8 addr_type, u8 status); 1839 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, 1840 u8 link_type, u8 addr_type, u32 passkey, 1841 u8 entered); 1842 void mgmt_auth_failed(struct hci_conn *conn, u8 status); 1843 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); 1844 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, 1845 u8 status); 1846 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); 1847 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status); 1848 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status); 1849 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1850 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, 1851 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); 1852 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, 1853 u8 addr_type, s8 rssi, u8 *name, u8 name_len); 1854 void mgmt_discovering(struct hci_dev *hdev, u8 discovering); 1855 void mgmt_suspending(struct hci_dev *hdev, u8 state); 1856 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, 1857 u8 addr_type); 1858 bool mgmt_powering_down(struct hci_dev *hdev); 1859 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); 1860 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent); 1861 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, 1862 bool persistent); 1863 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, 1864 u8 bdaddr_type, u8 store_hint, u16 min_interval, 1865 u16 max_interval, u16 latency, u16 timeout); 1866 void mgmt_smp_complete(struct hci_conn *conn, bool complete); 1867 bool mgmt_get_connectable(struct hci_dev *hdev); 1868 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); 1869 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, 1870 u8 instance); 1871 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, 1872 u8 instance); 1873 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); 1874 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); 1875 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); 1876 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); 1877 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, 1878 bdaddr_t *bdaddr, u8 addr_type); 1879 1880 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 1881 u16 to_multiplier); 1882 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, 1883 __u8 ltk[16], __u8 key_size); 1884 1885 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 1886 u8 *bdaddr_type); 1887 1888 #define SCO_AIRMODE_MASK 0x0003 1889 #define SCO_AIRMODE_CVSD 0x0000 1890 #define SCO_AIRMODE_TRANSP 0x0003 1891 1892 #define LOCAL_CODEC_ACL_MASK BIT(0) 1893 #define LOCAL_CODEC_SCO_MASK BIT(1) 1894 1895 #define TRANSPORT_TYPE_MAX 0x04 1896 1897 #endif /* __HCI_CORE_H */ 1898