1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 Copyright 2023 NXP 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI connection handling. */ 27 28 #include <linux/export.h> 29 #include <linux/debugfs.h> 30 31 #include <net/bluetooth/bluetooth.h> 32 #include <net/bluetooth/hci_core.h> 33 #include <net/bluetooth/l2cap.h> 34 #include <net/bluetooth/iso.h> 35 #include <net/bluetooth/mgmt.h> 36 37 #include "hci_request.h" 38 #include "smp.h" 39 #include "eir.h" 40 41 struct sco_param { 42 u16 pkt_type; 43 u16 max_latency; 44 u8 retrans_effort; 45 }; 46 47 struct conn_handle_t { 48 struct hci_conn *conn; 49 __u16 handle; 50 }; 51 52 static const struct sco_param esco_param_cvsd[] = { 53 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */ 54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */ 55 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */ 56 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */ 57 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */ 58 }; 59 60 static const struct sco_param sco_param_cvsd[] = { 61 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */ 62 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */ 63 }; 64 65 static const struct sco_param esco_param_msbc[] = { 66 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */ 67 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */ 68 }; 69 70 /* This function requires the caller holds hdev->lock */ 71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) 72 { 73 struct hci_conn_params *params; 74 struct hci_dev *hdev = conn->hdev; 75 struct smp_irk *irk; 76 bdaddr_t *bdaddr; 77 u8 bdaddr_type; 78 79 bdaddr = &conn->dst; 80 bdaddr_type = conn->dst_type; 81 82 /* Check if we need to convert to identity address */ 83 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 84 if (irk) { 85 bdaddr = &irk->bdaddr; 86 bdaddr_type = irk->addr_type; 87 } 88 89 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr, 90 bdaddr_type); 91 if (!params) 92 return; 93 94 if (params->conn) { 95 hci_conn_drop(params->conn); 96 hci_conn_put(params->conn); 97 params->conn = NULL; 98 } 99 100 if (!params->explicit_connect) 101 return; 102 103 /* If the status indicates successful cancellation of 104 * the attempt (i.e. Unknown Connection Id) there's no point of 105 * notifying failure since we'll go back to keep trying to 106 * connect. The only exception is explicit connect requests 107 * where a timeout + cancel does indicate an actual failure. 108 */ 109 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) 110 mgmt_connect_failed(hdev, &conn->dst, conn->type, 111 conn->dst_type, status); 112 113 /* The connection attempt was doing scan for new RPA, and is 114 * in scan phase. If params are not associated with any other 115 * autoconnect action, remove them completely. If they are, just unmark 116 * them as waiting for connection, by clearing explicit_connect field. 117 */ 118 params->explicit_connect = false; 119 120 hci_pend_le_list_del_init(params); 121 122 switch (params->auto_connect) { 123 case HCI_AUTO_CONN_EXPLICIT: 124 hci_conn_params_del(hdev, bdaddr, bdaddr_type); 125 /* return instead of break to avoid duplicate scan update */ 126 return; 127 case HCI_AUTO_CONN_DIRECT: 128 case HCI_AUTO_CONN_ALWAYS: 129 hci_pend_le_list_add(params, &hdev->pend_le_conns); 130 break; 131 case HCI_AUTO_CONN_REPORT: 132 hci_pend_le_list_add(params, &hdev->pend_le_reports); 133 break; 134 default: 135 break; 136 } 137 138 hci_update_passive_scan(hdev); 139 } 140 141 static void hci_conn_cleanup(struct hci_conn *conn) 142 { 143 struct hci_dev *hdev = conn->hdev; 144 145 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) 146 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); 147 148 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 149 hci_remove_link_key(hdev, &conn->dst); 150 151 hci_chan_list_flush(conn); 152 153 hci_conn_hash_del(hdev, conn); 154 155 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 156 ida_free(&hdev->unset_handle_ida, conn->handle); 157 158 if (conn->cleanup) 159 conn->cleanup(conn); 160 161 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { 162 switch (conn->setting & SCO_AIRMODE_MASK) { 163 case SCO_AIRMODE_CVSD: 164 case SCO_AIRMODE_TRANSP: 165 if (hdev->notify) 166 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO); 167 break; 168 } 169 } else { 170 if (hdev->notify) 171 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 172 } 173 174 debugfs_remove_recursive(conn->debugfs); 175 176 hci_conn_del_sysfs(conn); 177 178 hci_dev_put(hdev); 179 } 180 181 int hci_disconnect(struct hci_conn *conn, __u8 reason) 182 { 183 BT_DBG("hcon %p", conn); 184 185 /* When we are central of an established connection and it enters 186 * the disconnect timeout, then go ahead and try to read the 187 * current clock offset. Processing of the result is done 188 * within the event handling and hci_clock_offset_evt function. 189 */ 190 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER && 191 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) { 192 struct hci_dev *hdev = conn->hdev; 193 struct hci_cp_read_clock_offset clkoff_cp; 194 195 clkoff_cp.handle = cpu_to_le16(conn->handle); 196 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp), 197 &clkoff_cp); 198 } 199 200 return hci_abort_conn(conn, reason); 201 } 202 203 static void hci_add_sco(struct hci_conn *conn, __u16 handle) 204 { 205 struct hci_dev *hdev = conn->hdev; 206 struct hci_cp_add_sco cp; 207 208 BT_DBG("hcon %p", conn); 209 210 conn->state = BT_CONNECT; 211 conn->out = true; 212 213 conn->attempt++; 214 215 cp.handle = cpu_to_le16(handle); 216 cp.pkt_type = cpu_to_le16(conn->pkt_type); 217 218 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); 219 } 220 221 static bool find_next_esco_param(struct hci_conn *conn, 222 const struct sco_param *esco_param, int size) 223 { 224 if (!conn->parent) 225 return false; 226 227 for (; conn->attempt <= size; conn->attempt++) { 228 if (lmp_esco_2m_capable(conn->parent) || 229 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) 230 break; 231 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", 232 conn, conn->attempt); 233 } 234 235 return conn->attempt <= size; 236 } 237 238 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec) 239 { 240 int err; 241 __u8 vnd_len, *vnd_data = NULL; 242 struct hci_op_configure_data_path *cmd = NULL; 243 244 if (!codec->data_path || !hdev->get_codec_config_data) 245 return 0; 246 247 /* Do not take me as error */ 248 if (!hdev->get_codec_config_data) 249 return 0; 250 251 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, 252 &vnd_data); 253 if (err < 0) 254 goto error; 255 256 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); 257 if (!cmd) { 258 err = -ENOMEM; 259 goto error; 260 } 261 262 err = hdev->get_data_path_id(hdev, &cmd->data_path_id); 263 if (err < 0) 264 goto error; 265 266 cmd->vnd_len = vnd_len; 267 memcpy(cmd->vnd_data, vnd_data, vnd_len); 268 269 cmd->direction = 0x00; 270 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, 271 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT); 272 273 cmd->direction = 0x01; 274 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, 275 sizeof(*cmd) + vnd_len, cmd, 276 HCI_CMD_TIMEOUT); 277 error: 278 279 kfree(cmd); 280 kfree(vnd_data); 281 return err; 282 } 283 284 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) 285 { 286 struct conn_handle_t *conn_handle = data; 287 struct hci_conn *conn = conn_handle->conn; 288 __u16 handle = conn_handle->handle; 289 struct hci_cp_enhanced_setup_sync_conn cp; 290 const struct sco_param *param; 291 292 kfree(conn_handle); 293 294 bt_dev_dbg(hdev, "hcon %p", conn); 295 296 configure_datapath_sync(hdev, &conn->codec); 297 298 conn->state = BT_CONNECT; 299 conn->out = true; 300 301 conn->attempt++; 302 303 memset(&cp, 0x00, sizeof(cp)); 304 305 cp.handle = cpu_to_le16(handle); 306 307 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 308 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 309 310 switch (conn->codec.id) { 311 case BT_CODEC_MSBC: 312 if (!find_next_esco_param(conn, esco_param_msbc, 313 ARRAY_SIZE(esco_param_msbc))) 314 return -EINVAL; 315 316 param = &esco_param_msbc[conn->attempt - 1]; 317 cp.tx_coding_format.id = 0x05; 318 cp.rx_coding_format.id = 0x05; 319 cp.tx_codec_frame_size = __cpu_to_le16(60); 320 cp.rx_codec_frame_size = __cpu_to_le16(60); 321 cp.in_bandwidth = __cpu_to_le32(32000); 322 cp.out_bandwidth = __cpu_to_le32(32000); 323 cp.in_coding_format.id = 0x04; 324 cp.out_coding_format.id = 0x04; 325 cp.in_coded_data_size = __cpu_to_le16(16); 326 cp.out_coded_data_size = __cpu_to_le16(16); 327 cp.in_pcm_data_format = 2; 328 cp.out_pcm_data_format = 2; 329 cp.in_pcm_sample_payload_msb_pos = 0; 330 cp.out_pcm_sample_payload_msb_pos = 0; 331 cp.in_data_path = conn->codec.data_path; 332 cp.out_data_path = conn->codec.data_path; 333 cp.in_transport_unit_size = 1; 334 cp.out_transport_unit_size = 1; 335 break; 336 337 case BT_CODEC_TRANSPARENT: 338 if (!find_next_esco_param(conn, esco_param_msbc, 339 ARRAY_SIZE(esco_param_msbc))) 340 return false; 341 param = &esco_param_msbc[conn->attempt - 1]; 342 cp.tx_coding_format.id = 0x03; 343 cp.rx_coding_format.id = 0x03; 344 cp.tx_codec_frame_size = __cpu_to_le16(60); 345 cp.rx_codec_frame_size = __cpu_to_le16(60); 346 cp.in_bandwidth = __cpu_to_le32(0x1f40); 347 cp.out_bandwidth = __cpu_to_le32(0x1f40); 348 cp.in_coding_format.id = 0x03; 349 cp.out_coding_format.id = 0x03; 350 cp.in_coded_data_size = __cpu_to_le16(16); 351 cp.out_coded_data_size = __cpu_to_le16(16); 352 cp.in_pcm_data_format = 2; 353 cp.out_pcm_data_format = 2; 354 cp.in_pcm_sample_payload_msb_pos = 0; 355 cp.out_pcm_sample_payload_msb_pos = 0; 356 cp.in_data_path = conn->codec.data_path; 357 cp.out_data_path = conn->codec.data_path; 358 cp.in_transport_unit_size = 1; 359 cp.out_transport_unit_size = 1; 360 break; 361 362 case BT_CODEC_CVSD: 363 if (conn->parent && lmp_esco_capable(conn->parent)) { 364 if (!find_next_esco_param(conn, esco_param_cvsd, 365 ARRAY_SIZE(esco_param_cvsd))) 366 return -EINVAL; 367 param = &esco_param_cvsd[conn->attempt - 1]; 368 } else { 369 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) 370 return -EINVAL; 371 param = &sco_param_cvsd[conn->attempt - 1]; 372 } 373 cp.tx_coding_format.id = 2; 374 cp.rx_coding_format.id = 2; 375 cp.tx_codec_frame_size = __cpu_to_le16(60); 376 cp.rx_codec_frame_size = __cpu_to_le16(60); 377 cp.in_bandwidth = __cpu_to_le32(16000); 378 cp.out_bandwidth = __cpu_to_le32(16000); 379 cp.in_coding_format.id = 4; 380 cp.out_coding_format.id = 4; 381 cp.in_coded_data_size = __cpu_to_le16(16); 382 cp.out_coded_data_size = __cpu_to_le16(16); 383 cp.in_pcm_data_format = 2; 384 cp.out_pcm_data_format = 2; 385 cp.in_pcm_sample_payload_msb_pos = 0; 386 cp.out_pcm_sample_payload_msb_pos = 0; 387 cp.in_data_path = conn->codec.data_path; 388 cp.out_data_path = conn->codec.data_path; 389 cp.in_transport_unit_size = 16; 390 cp.out_transport_unit_size = 16; 391 break; 392 default: 393 return -EINVAL; 394 } 395 396 cp.retrans_effort = param->retrans_effort; 397 cp.pkt_type = __cpu_to_le16(param->pkt_type); 398 cp.max_latency = __cpu_to_le16(param->max_latency); 399 400 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) 401 return -EIO; 402 403 return 0; 404 } 405 406 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) 407 { 408 struct hci_dev *hdev = conn->hdev; 409 struct hci_cp_setup_sync_conn cp; 410 const struct sco_param *param; 411 412 bt_dev_dbg(hdev, "hcon %p", conn); 413 414 conn->state = BT_CONNECT; 415 conn->out = true; 416 417 conn->attempt++; 418 419 cp.handle = cpu_to_le16(handle); 420 421 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 422 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 423 cp.voice_setting = cpu_to_le16(conn->setting); 424 425 switch (conn->setting & SCO_AIRMODE_MASK) { 426 case SCO_AIRMODE_TRANSP: 427 if (!find_next_esco_param(conn, esco_param_msbc, 428 ARRAY_SIZE(esco_param_msbc))) 429 return false; 430 param = &esco_param_msbc[conn->attempt - 1]; 431 break; 432 case SCO_AIRMODE_CVSD: 433 if (conn->parent && lmp_esco_capable(conn->parent)) { 434 if (!find_next_esco_param(conn, esco_param_cvsd, 435 ARRAY_SIZE(esco_param_cvsd))) 436 return false; 437 param = &esco_param_cvsd[conn->attempt - 1]; 438 } else { 439 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) 440 return false; 441 param = &sco_param_cvsd[conn->attempt - 1]; 442 } 443 break; 444 default: 445 return false; 446 } 447 448 cp.retrans_effort = param->retrans_effort; 449 cp.pkt_type = __cpu_to_le16(param->pkt_type); 450 cp.max_latency = __cpu_to_le16(param->max_latency); 451 452 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) 453 return false; 454 455 return true; 456 } 457 458 bool hci_setup_sync(struct hci_conn *conn, __u16 handle) 459 { 460 int result; 461 struct conn_handle_t *conn_handle; 462 463 if (enhanced_sync_conn_capable(conn->hdev)) { 464 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL); 465 466 if (!conn_handle) 467 return false; 468 469 conn_handle->conn = conn; 470 conn_handle->handle = handle; 471 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync, 472 conn_handle, NULL); 473 if (result < 0) 474 kfree(conn_handle); 475 476 return result == 0; 477 } 478 479 return hci_setup_sync_conn(conn, handle); 480 } 481 482 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 483 u16 to_multiplier) 484 { 485 struct hci_dev *hdev = conn->hdev; 486 struct hci_conn_params *params; 487 struct hci_cp_le_conn_update cp; 488 489 hci_dev_lock(hdev); 490 491 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 492 if (params) { 493 params->conn_min_interval = min; 494 params->conn_max_interval = max; 495 params->conn_latency = latency; 496 params->supervision_timeout = to_multiplier; 497 } 498 499 hci_dev_unlock(hdev); 500 501 memset(&cp, 0, sizeof(cp)); 502 cp.handle = cpu_to_le16(conn->handle); 503 cp.conn_interval_min = cpu_to_le16(min); 504 cp.conn_interval_max = cpu_to_le16(max); 505 cp.conn_latency = cpu_to_le16(latency); 506 cp.supervision_timeout = cpu_to_le16(to_multiplier); 507 cp.min_ce_len = cpu_to_le16(0x0000); 508 cp.max_ce_len = cpu_to_le16(0x0000); 509 510 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 511 512 if (params) 513 return 0x01; 514 515 return 0x00; 516 } 517 518 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, 519 __u8 ltk[16], __u8 key_size) 520 { 521 struct hci_dev *hdev = conn->hdev; 522 struct hci_cp_le_start_enc cp; 523 524 BT_DBG("hcon %p", conn); 525 526 memset(&cp, 0, sizeof(cp)); 527 528 cp.handle = cpu_to_le16(conn->handle); 529 cp.rand = rand; 530 cp.ediv = ediv; 531 memcpy(cp.ltk, ltk, key_size); 532 533 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 534 } 535 536 /* Device _must_ be locked */ 537 void hci_sco_setup(struct hci_conn *conn, __u8 status) 538 { 539 struct hci_link *link; 540 541 link = list_first_entry_or_null(&conn->link_list, struct hci_link, list); 542 if (!link || !link->conn) 543 return; 544 545 BT_DBG("hcon %p", conn); 546 547 if (!status) { 548 if (lmp_esco_capable(conn->hdev)) 549 hci_setup_sync(link->conn, conn->handle); 550 else 551 hci_add_sco(link->conn, conn->handle); 552 } else { 553 hci_connect_cfm(link->conn, status); 554 hci_conn_del(link->conn); 555 } 556 } 557 558 static void hci_conn_timeout(struct work_struct *work) 559 { 560 struct hci_conn *conn = container_of(work, struct hci_conn, 561 disc_work.work); 562 int refcnt = atomic_read(&conn->refcnt); 563 564 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 565 566 WARN_ON(refcnt < 0); 567 568 /* FIXME: It was observed that in pairing failed scenario, refcnt 569 * drops below 0. Probably this is because l2cap_conn_del calls 570 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is 571 * dropped. After that loop hci_chan_del is called which also drops 572 * conn. For now make sure that ACL is alive if refcnt is higher then 0, 573 * otherwise drop it. 574 */ 575 if (refcnt > 0) 576 return; 577 578 hci_abort_conn(conn, hci_proto_disconn_ind(conn)); 579 } 580 581 /* Enter sniff mode */ 582 static void hci_conn_idle(struct work_struct *work) 583 { 584 struct hci_conn *conn = container_of(work, struct hci_conn, 585 idle_work.work); 586 struct hci_dev *hdev = conn->hdev; 587 588 BT_DBG("hcon %p mode %d", conn, conn->mode); 589 590 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) 591 return; 592 593 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) 594 return; 595 596 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 597 struct hci_cp_sniff_subrate cp; 598 cp.handle = cpu_to_le16(conn->handle); 599 cp.max_latency = cpu_to_le16(0); 600 cp.min_remote_timeout = cpu_to_le16(0); 601 cp.min_local_timeout = cpu_to_le16(0); 602 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 603 } 604 605 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 606 struct hci_cp_sniff_mode cp; 607 cp.handle = cpu_to_le16(conn->handle); 608 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 609 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 610 cp.attempt = cpu_to_le16(4); 611 cp.timeout = cpu_to_le16(1); 612 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 613 } 614 } 615 616 static void hci_conn_auto_accept(struct work_struct *work) 617 { 618 struct hci_conn *conn = container_of(work, struct hci_conn, 619 auto_accept_work.work); 620 621 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 622 &conn->dst); 623 } 624 625 static void le_disable_advertising(struct hci_dev *hdev) 626 { 627 if (ext_adv_capable(hdev)) { 628 struct hci_cp_le_set_ext_adv_enable cp; 629 630 cp.enable = 0x00; 631 cp.num_of_sets = 0x00; 632 633 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), 634 &cp); 635 } else { 636 u8 enable = 0x00; 637 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), 638 &enable); 639 } 640 } 641 642 static void le_conn_timeout(struct work_struct *work) 643 { 644 struct hci_conn *conn = container_of(work, struct hci_conn, 645 le_conn_timeout.work); 646 struct hci_dev *hdev = conn->hdev; 647 648 BT_DBG(""); 649 650 /* We could end up here due to having done directed advertising, 651 * so clean up the state if necessary. This should however only 652 * happen with broken hardware or if low duty cycle was used 653 * (which doesn't have a timeout of its own). 654 */ 655 if (conn->role == HCI_ROLE_SLAVE) { 656 /* Disable LE Advertising */ 657 le_disable_advertising(hdev); 658 hci_dev_lock(hdev); 659 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); 660 hci_dev_unlock(hdev); 661 return; 662 } 663 664 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); 665 } 666 667 struct iso_cig_params { 668 struct hci_cp_le_set_cig_params cp; 669 struct hci_cis_params cis[0x1f]; 670 }; 671 672 struct iso_list_data { 673 union { 674 u8 cig; 675 u8 big; 676 }; 677 union { 678 u8 cis; 679 u8 bis; 680 u16 sync_handle; 681 }; 682 int count; 683 bool big_term; 684 bool pa_sync_term; 685 bool big_sync_term; 686 }; 687 688 static void bis_list(struct hci_conn *conn, void *data) 689 { 690 struct iso_list_data *d = data; 691 692 /* Skip if not broadcast/ANY address */ 693 if (bacmp(&conn->dst, BDADDR_ANY)) 694 return; 695 696 if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || 697 d->bis != conn->iso_qos.bcast.bis) 698 return; 699 700 d->count++; 701 } 702 703 static int terminate_big_sync(struct hci_dev *hdev, void *data) 704 { 705 struct iso_list_data *d = data; 706 707 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis); 708 709 hci_disable_per_advertising_sync(hdev, d->bis); 710 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL); 711 712 /* Only terminate BIG if it has been created */ 713 if (!d->big_term) 714 return 0; 715 716 return hci_le_terminate_big_sync(hdev, d->big, 717 HCI_ERROR_LOCAL_HOST_TERM); 718 } 719 720 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err) 721 { 722 kfree(data); 723 } 724 725 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn) 726 { 727 struct iso_list_data *d; 728 int ret; 729 730 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big, 731 conn->iso_qos.bcast.bis); 732 733 d = kzalloc(sizeof(*d), GFP_KERNEL); 734 if (!d) 735 return -ENOMEM; 736 737 d->big = conn->iso_qos.bcast.big; 738 d->bis = conn->iso_qos.bcast.bis; 739 d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags); 740 741 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d, 742 terminate_big_destroy); 743 if (ret) 744 kfree(d); 745 746 return ret; 747 } 748 749 static int big_terminate_sync(struct hci_dev *hdev, void *data) 750 { 751 struct iso_list_data *d = data; 752 753 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big, 754 d->sync_handle); 755 756 if (d->big_sync_term) 757 hci_le_big_terminate_sync(hdev, d->big); 758 759 if (d->pa_sync_term) 760 return hci_le_pa_terminate_sync(hdev, d->sync_handle); 761 762 return 0; 763 } 764 765 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn) 766 { 767 struct iso_list_data *d; 768 int ret; 769 770 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle); 771 772 d = kzalloc(sizeof(*d), GFP_KERNEL); 773 if (!d) 774 return -ENOMEM; 775 776 d->big = big; 777 d->sync_handle = conn->sync_handle; 778 d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags); 779 d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags); 780 781 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, 782 terminate_big_destroy); 783 if (ret) 784 kfree(d); 785 786 return ret; 787 } 788 789 /* Cleanup BIS connection 790 * 791 * Detects if there any BIS left connected in a BIG 792 * broadcaster: Remove advertising instance and terminate BIG. 793 * broadcaster receiver: Teminate BIG sync and terminate PA sync. 794 */ 795 static void bis_cleanup(struct hci_conn *conn) 796 { 797 struct hci_dev *hdev = conn->hdev; 798 struct hci_conn *bis; 799 800 bt_dev_dbg(hdev, "conn %p", conn); 801 802 if (conn->role == HCI_ROLE_MASTER) { 803 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) 804 return; 805 806 /* Check if ISO connection is a BIS and terminate advertising 807 * set and BIG if there are no other connections using it. 808 */ 809 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big); 810 if (bis) 811 return; 812 813 hci_le_terminate_big(hdev, conn); 814 } else { 815 bis = hci_conn_hash_lookup_big_any_dst(hdev, 816 conn->iso_qos.bcast.big); 817 818 if (bis) 819 return; 820 821 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, 822 conn); 823 } 824 } 825 826 static int remove_cig_sync(struct hci_dev *hdev, void *data) 827 { 828 u8 handle = PTR_UINT(data); 829 830 return hci_le_remove_cig_sync(hdev, handle); 831 } 832 833 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle) 834 { 835 bt_dev_dbg(hdev, "handle 0x%2.2x", handle); 836 837 return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle), 838 NULL); 839 } 840 841 static void find_cis(struct hci_conn *conn, void *data) 842 { 843 struct iso_list_data *d = data; 844 845 /* Ignore broadcast or if CIG don't match */ 846 if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) 847 return; 848 849 d->count++; 850 } 851 852 /* Cleanup CIS connection: 853 * 854 * Detects if there any CIS left connected in a CIG and remove it. 855 */ 856 static void cis_cleanup(struct hci_conn *conn) 857 { 858 struct hci_dev *hdev = conn->hdev; 859 struct iso_list_data d; 860 861 if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) 862 return; 863 864 memset(&d, 0, sizeof(d)); 865 d.cig = conn->iso_qos.ucast.cig; 866 867 /* Check if ISO connection is a CIS and remove CIG if there are 868 * no other connections using it. 869 */ 870 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d); 871 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d); 872 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); 873 if (d.count) 874 return; 875 876 hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); 877 } 878 879 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev) 880 { 881 return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1, 882 U16_MAX, GFP_ATOMIC); 883 } 884 885 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, 886 u8 role, u16 handle) 887 { 888 struct hci_conn *conn; 889 890 switch (type) { 891 case ACL_LINK: 892 if (!hdev->acl_mtu) 893 return ERR_PTR(-ECONNREFUSED); 894 break; 895 case ISO_LINK: 896 if (hdev->iso_mtu) 897 /* Dedicated ISO Buffer exists */ 898 break; 899 fallthrough; 900 case LE_LINK: 901 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) 902 return ERR_PTR(-ECONNREFUSED); 903 if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU) 904 return ERR_PTR(-ECONNREFUSED); 905 break; 906 case SCO_LINK: 907 case ESCO_LINK: 908 if (!hdev->sco_pkts) 909 /* Controller does not support SCO or eSCO over HCI */ 910 return ERR_PTR(-ECONNREFUSED); 911 break; 912 default: 913 return ERR_PTR(-ECONNREFUSED); 914 } 915 916 bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle); 917 918 conn = kzalloc(sizeof(*conn), GFP_KERNEL); 919 if (!conn) 920 return ERR_PTR(-ENOMEM); 921 922 bacpy(&conn->dst, dst); 923 bacpy(&conn->src, &hdev->bdaddr); 924 conn->handle = handle; 925 conn->hdev = hdev; 926 conn->type = type; 927 conn->role = role; 928 conn->mode = HCI_CM_ACTIVE; 929 conn->state = BT_OPEN; 930 conn->auth_type = HCI_AT_GENERAL_BONDING; 931 conn->io_capability = hdev->io_capability; 932 conn->remote_auth = 0xff; 933 conn->key_type = 0xff; 934 conn->rssi = HCI_RSSI_INVALID; 935 conn->tx_power = HCI_TX_POWER_INVALID; 936 conn->max_tx_power = HCI_TX_POWER_INVALID; 937 conn->sync_handle = HCI_SYNC_HANDLE_INVALID; 938 939 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 940 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 941 942 /* Set Default Authenticated payload timeout to 30s */ 943 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 944 945 if (conn->role == HCI_ROLE_MASTER) 946 conn->out = true; 947 948 switch (type) { 949 case ACL_LINK: 950 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 951 conn->mtu = hdev->acl_mtu; 952 break; 953 case LE_LINK: 954 /* conn->src should reflect the local identity address */ 955 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); 956 conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; 957 break; 958 case ISO_LINK: 959 /* conn->src should reflect the local identity address */ 960 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); 961 962 /* set proper cleanup function */ 963 if (!bacmp(dst, BDADDR_ANY)) 964 conn->cleanup = bis_cleanup; 965 else if (conn->role == HCI_ROLE_MASTER) 966 conn->cleanup = cis_cleanup; 967 968 conn->mtu = hdev->iso_mtu ? hdev->iso_mtu : 969 hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; 970 break; 971 case SCO_LINK: 972 if (lmp_esco_capable(hdev)) 973 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 974 (hdev->esco_type & EDR_ESCO_MASK); 975 else 976 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; 977 978 conn->mtu = hdev->sco_mtu; 979 break; 980 case ESCO_LINK: 981 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; 982 conn->mtu = hdev->sco_mtu; 983 break; 984 } 985 986 skb_queue_head_init(&conn->data_q); 987 988 INIT_LIST_HEAD(&conn->chan_list); 989 INIT_LIST_HEAD(&conn->link_list); 990 991 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 992 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); 993 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); 994 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); 995 996 atomic_set(&conn->refcnt, 0); 997 998 hci_dev_hold(hdev); 999 1000 hci_conn_hash_add(hdev, conn); 1001 1002 /* The SCO and eSCO connections will only be notified when their 1003 * setup has been completed. This is different to ACL links which 1004 * can be notified right away. 1005 */ 1006 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) { 1007 if (hdev->notify) 1008 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 1009 } 1010 1011 hci_conn_init_sysfs(conn); 1012 1013 return conn; 1014 } 1015 1016 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, 1017 bdaddr_t *dst, u8 role) 1018 { 1019 int handle; 1020 1021 bt_dev_dbg(hdev, "dst %pMR", dst); 1022 1023 handle = hci_conn_hash_alloc_unset(hdev); 1024 if (unlikely(handle < 0)) 1025 return ERR_PTR(-ECONNREFUSED); 1026 1027 return __hci_conn_add(hdev, type, dst, role, handle); 1028 } 1029 1030 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, 1031 u8 role, u16 handle) 1032 { 1033 if (handle > HCI_CONN_HANDLE_MAX) 1034 return ERR_PTR(-EINVAL); 1035 1036 return __hci_conn_add(hdev, type, dst, role, handle); 1037 } 1038 1039 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason) 1040 { 1041 if (!reason) 1042 reason = HCI_ERROR_REMOTE_USER_TERM; 1043 1044 /* Due to race, SCO/ISO conn might be not established yet at this point, 1045 * and nothing else will clean it up. In other cases it is done via HCI 1046 * events. 1047 */ 1048 switch (conn->type) { 1049 case SCO_LINK: 1050 case ESCO_LINK: 1051 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 1052 hci_conn_failed(conn, reason); 1053 break; 1054 case ISO_LINK: 1055 if (conn->state != BT_CONNECTED && 1056 !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) 1057 hci_conn_failed(conn, reason); 1058 break; 1059 } 1060 } 1061 1062 static void hci_conn_unlink(struct hci_conn *conn) 1063 { 1064 struct hci_dev *hdev = conn->hdev; 1065 1066 bt_dev_dbg(hdev, "hcon %p", conn); 1067 1068 if (!conn->parent) { 1069 struct hci_link *link, *t; 1070 1071 list_for_each_entry_safe(link, t, &conn->link_list, list) { 1072 struct hci_conn *child = link->conn; 1073 1074 hci_conn_unlink(child); 1075 1076 /* If hdev is down it means 1077 * hci_dev_close_sync/hci_conn_hash_flush is in progress 1078 * and links don't need to be cleanup as all connections 1079 * would be cleanup. 1080 */ 1081 if (!test_bit(HCI_UP, &hdev->flags)) 1082 continue; 1083 1084 hci_conn_cleanup_child(child, conn->abort_reason); 1085 } 1086 1087 return; 1088 } 1089 1090 if (!conn->link) 1091 return; 1092 1093 list_del_rcu(&conn->link->list); 1094 synchronize_rcu(); 1095 1096 hci_conn_drop(conn->parent); 1097 hci_conn_put(conn->parent); 1098 conn->parent = NULL; 1099 1100 kfree(conn->link); 1101 conn->link = NULL; 1102 } 1103 1104 void hci_conn_del(struct hci_conn *conn) 1105 { 1106 struct hci_dev *hdev = conn->hdev; 1107 1108 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); 1109 1110 hci_conn_unlink(conn); 1111 1112 cancel_delayed_work_sync(&conn->disc_work); 1113 cancel_delayed_work_sync(&conn->auto_accept_work); 1114 cancel_delayed_work_sync(&conn->idle_work); 1115 1116 if (conn->type == ACL_LINK) { 1117 /* Unacked frames */ 1118 hdev->acl_cnt += conn->sent; 1119 } else if (conn->type == LE_LINK) { 1120 cancel_delayed_work(&conn->le_conn_timeout); 1121 1122 if (hdev->le_pkts) 1123 hdev->le_cnt += conn->sent; 1124 else 1125 hdev->acl_cnt += conn->sent; 1126 } else { 1127 /* Unacked ISO frames */ 1128 if (conn->type == ISO_LINK) { 1129 if (hdev->iso_pkts) 1130 hdev->iso_cnt += conn->sent; 1131 else if (hdev->le_pkts) 1132 hdev->le_cnt += conn->sent; 1133 else 1134 hdev->acl_cnt += conn->sent; 1135 } 1136 } 1137 1138 skb_queue_purge(&conn->data_q); 1139 1140 /* Remove the connection from the list and cleanup its remaining 1141 * state. This is a separate function since for some cases like 1142 * BT_CONNECT_SCAN we *only* want the cleanup part without the 1143 * rest of hci_conn_del. 1144 */ 1145 hci_conn_cleanup(conn); 1146 1147 /* Dequeue callbacks using connection pointer as data */ 1148 hci_cmd_sync_dequeue(hdev, NULL, conn, NULL); 1149 } 1150 1151 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) 1152 { 1153 int use_src = bacmp(src, BDADDR_ANY); 1154 struct hci_dev *hdev = NULL, *d; 1155 1156 BT_DBG("%pMR -> %pMR", src, dst); 1157 1158 read_lock(&hci_dev_list_lock); 1159 1160 list_for_each_entry(d, &hci_dev_list, list) { 1161 if (!test_bit(HCI_UP, &d->flags) || 1162 hci_dev_test_flag(d, HCI_USER_CHANNEL)) 1163 continue; 1164 1165 /* Simple routing: 1166 * No source address - find interface with bdaddr != dst 1167 * Source address - find interface with bdaddr == src 1168 */ 1169 1170 if (use_src) { 1171 bdaddr_t id_addr; 1172 u8 id_addr_type; 1173 1174 if (src_type == BDADDR_BREDR) { 1175 if (!lmp_bredr_capable(d)) 1176 continue; 1177 bacpy(&id_addr, &d->bdaddr); 1178 id_addr_type = BDADDR_BREDR; 1179 } else { 1180 if (!lmp_le_capable(d)) 1181 continue; 1182 1183 hci_copy_identity_address(d, &id_addr, 1184 &id_addr_type); 1185 1186 /* Convert from HCI to three-value type */ 1187 if (id_addr_type == ADDR_LE_DEV_PUBLIC) 1188 id_addr_type = BDADDR_LE_PUBLIC; 1189 else 1190 id_addr_type = BDADDR_LE_RANDOM; 1191 } 1192 1193 if (!bacmp(&id_addr, src) && id_addr_type == src_type) { 1194 hdev = d; break; 1195 } 1196 } else { 1197 if (bacmp(&d->bdaddr, dst)) { 1198 hdev = d; break; 1199 } 1200 } 1201 } 1202 1203 if (hdev) 1204 hdev = hci_dev_hold(hdev); 1205 1206 read_unlock(&hci_dev_list_lock); 1207 return hdev; 1208 } 1209 EXPORT_SYMBOL(hci_get_route); 1210 1211 /* This function requires the caller holds hdev->lock */ 1212 static void hci_le_conn_failed(struct hci_conn *conn, u8 status) 1213 { 1214 struct hci_dev *hdev = conn->hdev; 1215 1216 hci_connect_le_scan_cleanup(conn, status); 1217 1218 /* Enable advertising in case this was a failed connection 1219 * attempt as a peripheral. 1220 */ 1221 hci_enable_advertising(hdev); 1222 } 1223 1224 /* This function requires the caller holds hdev->lock */ 1225 void hci_conn_failed(struct hci_conn *conn, u8 status) 1226 { 1227 struct hci_dev *hdev = conn->hdev; 1228 1229 bt_dev_dbg(hdev, "status 0x%2.2x", status); 1230 1231 switch (conn->type) { 1232 case LE_LINK: 1233 hci_le_conn_failed(conn, status); 1234 break; 1235 case ACL_LINK: 1236 mgmt_connect_failed(hdev, &conn->dst, conn->type, 1237 conn->dst_type, status); 1238 break; 1239 } 1240 1241 /* In case of BIG/PA sync failed, clear conn flags so that 1242 * the conns will be correctly cleaned up by ISO layer 1243 */ 1244 test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags); 1245 test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags); 1246 1247 conn->state = BT_CLOSED; 1248 hci_connect_cfm(conn, status); 1249 hci_conn_del(conn); 1250 } 1251 1252 /* This function requires the caller holds hdev->lock */ 1253 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle) 1254 { 1255 struct hci_dev *hdev = conn->hdev; 1256 1257 bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle); 1258 1259 if (conn->handle == handle) 1260 return 0; 1261 1262 if (handle > HCI_CONN_HANDLE_MAX) { 1263 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 1264 handle, HCI_CONN_HANDLE_MAX); 1265 return HCI_ERROR_INVALID_PARAMETERS; 1266 } 1267 1268 /* If abort_reason has been sent it means the connection is being 1269 * aborted and the handle shall not be changed. 1270 */ 1271 if (conn->abort_reason) 1272 return conn->abort_reason; 1273 1274 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 1275 ida_free(&hdev->unset_handle_ida, conn->handle); 1276 1277 conn->handle = handle; 1278 1279 return 0; 1280 } 1281 1282 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 1283 u8 dst_type, bool dst_resolved, u8 sec_level, 1284 u16 conn_timeout, u8 role) 1285 { 1286 struct hci_conn *conn; 1287 struct smp_irk *irk; 1288 int err; 1289 1290 /* Let's make sure that le is enabled.*/ 1291 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 1292 if (lmp_le_capable(hdev)) 1293 return ERR_PTR(-ECONNREFUSED); 1294 1295 return ERR_PTR(-EOPNOTSUPP); 1296 } 1297 1298 /* Since the controller supports only one LE connection attempt at a 1299 * time, we return -EBUSY if there is any connection attempt running. 1300 */ 1301 if (hci_lookup_le_connect(hdev)) 1302 return ERR_PTR(-EBUSY); 1303 1304 /* If there's already a connection object but it's not in 1305 * scanning state it means it must already be established, in 1306 * which case we can't do anything else except report a failure 1307 * to connect. 1308 */ 1309 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); 1310 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) { 1311 return ERR_PTR(-EBUSY); 1312 } 1313 1314 /* Check if the destination address has been resolved by the controller 1315 * since if it did then the identity address shall be used. 1316 */ 1317 if (!dst_resolved) { 1318 /* When given an identity address with existing identity 1319 * resolving key, the connection needs to be established 1320 * to a resolvable random address. 1321 * 1322 * Storing the resolvable random address is required here 1323 * to handle connection failures. The address will later 1324 * be resolved back into the original identity address 1325 * from the connect request. 1326 */ 1327 irk = hci_find_irk_by_addr(hdev, dst, dst_type); 1328 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { 1329 dst = &irk->rpa; 1330 dst_type = ADDR_LE_DEV_RANDOM; 1331 } 1332 } 1333 1334 if (conn) { 1335 bacpy(&conn->dst, dst); 1336 } else { 1337 conn = hci_conn_add_unset(hdev, LE_LINK, dst, role); 1338 if (IS_ERR(conn)) 1339 return conn; 1340 hci_conn_hold(conn); 1341 conn->pending_sec_level = sec_level; 1342 } 1343 1344 conn->dst_type = dst_type; 1345 conn->sec_level = BT_SECURITY_LOW; 1346 conn->conn_timeout = conn_timeout; 1347 1348 err = hci_connect_le_sync(hdev, conn); 1349 if (err) { 1350 hci_conn_del(conn); 1351 return ERR_PTR(err); 1352 } 1353 1354 return conn; 1355 } 1356 1357 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) 1358 { 1359 struct hci_conn *conn; 1360 1361 conn = hci_conn_hash_lookup_le(hdev, addr, type); 1362 if (!conn) 1363 return false; 1364 1365 if (conn->state != BT_CONNECTED) 1366 return false; 1367 1368 return true; 1369 } 1370 1371 /* This function requires the caller holds hdev->lock */ 1372 static int hci_explicit_conn_params_set(struct hci_dev *hdev, 1373 bdaddr_t *addr, u8 addr_type) 1374 { 1375 struct hci_conn_params *params; 1376 1377 if (is_connected(hdev, addr, addr_type)) 1378 return -EISCONN; 1379 1380 params = hci_conn_params_lookup(hdev, addr, addr_type); 1381 if (!params) { 1382 params = hci_conn_params_add(hdev, addr, addr_type); 1383 if (!params) 1384 return -ENOMEM; 1385 1386 /* If we created new params, mark them to be deleted in 1387 * hci_connect_le_scan_cleanup. It's different case than 1388 * existing disabled params, those will stay after cleanup. 1389 */ 1390 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 1391 } 1392 1393 /* We're trying to connect, so make sure params are at pend_le_conns */ 1394 if (params->auto_connect == HCI_AUTO_CONN_DISABLED || 1395 params->auto_connect == HCI_AUTO_CONN_REPORT || 1396 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { 1397 hci_pend_le_list_del_init(params); 1398 hci_pend_le_list_add(params, &hdev->pend_le_conns); 1399 } 1400 1401 params->explicit_connect = true; 1402 1403 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type, 1404 params->auto_connect); 1405 1406 return 0; 1407 } 1408 1409 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) 1410 { 1411 struct hci_conn *conn; 1412 u8 big; 1413 1414 /* Allocate a BIG if not set */ 1415 if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { 1416 for (big = 0x00; big < 0xef; big++) { 1417 1418 conn = hci_conn_hash_lookup_big(hdev, big); 1419 if (!conn) 1420 break; 1421 } 1422 1423 if (big == 0xef) 1424 return -EADDRNOTAVAIL; 1425 1426 /* Update BIG */ 1427 qos->bcast.big = big; 1428 } 1429 1430 return 0; 1431 } 1432 1433 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) 1434 { 1435 struct hci_conn *conn; 1436 u8 bis; 1437 1438 /* Allocate BIS if not set */ 1439 if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { 1440 /* Find an unused adv set to advertise BIS, skip instance 0x00 1441 * since it is reserved as general purpose set. 1442 */ 1443 for (bis = 0x01; bis < hdev->le_num_of_adv_sets; 1444 bis++) { 1445 1446 conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis); 1447 if (!conn) 1448 break; 1449 } 1450 1451 if (bis == hdev->le_num_of_adv_sets) 1452 return -EADDRNOTAVAIL; 1453 1454 /* Update BIS */ 1455 qos->bcast.bis = bis; 1456 } 1457 1458 return 0; 1459 } 1460 1461 /* This function requires the caller holds hdev->lock */ 1462 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, 1463 struct bt_iso_qos *qos, __u8 base_len, 1464 __u8 *base) 1465 { 1466 struct hci_conn *conn; 1467 int err; 1468 1469 /* Let's make sure that le is enabled.*/ 1470 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 1471 if (lmp_le_capable(hdev)) 1472 return ERR_PTR(-ECONNREFUSED); 1473 return ERR_PTR(-EOPNOTSUPP); 1474 } 1475 1476 err = qos_set_big(hdev, qos); 1477 if (err) 1478 return ERR_PTR(err); 1479 1480 err = qos_set_bis(hdev, qos); 1481 if (err) 1482 return ERR_PTR(err); 1483 1484 /* Check if the LE Create BIG command has already been sent */ 1485 conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big, 1486 qos->bcast.big); 1487 if (conn) 1488 return ERR_PTR(-EADDRINUSE); 1489 1490 /* Check BIS settings against other bound BISes, since all 1491 * BISes in a BIG must have the same value for all parameters 1492 */ 1493 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); 1494 1495 if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) || 1496 base_len != conn->le_per_adv_data_len || 1497 memcmp(conn->le_per_adv_data, base, base_len))) 1498 return ERR_PTR(-EADDRINUSE); 1499 1500 conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); 1501 if (IS_ERR(conn)) 1502 return conn; 1503 1504 conn->state = BT_CONNECT; 1505 1506 hci_conn_hold(conn); 1507 return conn; 1508 } 1509 1510 /* This function requires the caller holds hdev->lock */ 1511 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, 1512 u8 dst_type, u8 sec_level, 1513 u16 conn_timeout, 1514 enum conn_reasons conn_reason) 1515 { 1516 struct hci_conn *conn; 1517 1518 /* Let's make sure that le is enabled.*/ 1519 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 1520 if (lmp_le_capable(hdev)) 1521 return ERR_PTR(-ECONNREFUSED); 1522 1523 return ERR_PTR(-EOPNOTSUPP); 1524 } 1525 1526 /* Some devices send ATT messages as soon as the physical link is 1527 * established. To be able to handle these ATT messages, the user- 1528 * space first establishes the connection and then starts the pairing 1529 * process. 1530 * 1531 * So if a hci_conn object already exists for the following connection 1532 * attempt, we simply update pending_sec_level and auth_type fields 1533 * and return the object found. 1534 */ 1535 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); 1536 if (conn) { 1537 if (conn->pending_sec_level < sec_level) 1538 conn->pending_sec_level = sec_level; 1539 goto done; 1540 } 1541 1542 BT_DBG("requesting refresh of dst_addr"); 1543 1544 conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER); 1545 if (IS_ERR(conn)) 1546 return conn; 1547 1548 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { 1549 hci_conn_del(conn); 1550 return ERR_PTR(-EBUSY); 1551 } 1552 1553 conn->state = BT_CONNECT; 1554 set_bit(HCI_CONN_SCANNING, &conn->flags); 1555 conn->dst_type = dst_type; 1556 conn->sec_level = BT_SECURITY_LOW; 1557 conn->pending_sec_level = sec_level; 1558 conn->conn_timeout = conn_timeout; 1559 conn->conn_reason = conn_reason; 1560 1561 hci_update_passive_scan(hdev); 1562 1563 done: 1564 hci_conn_hold(conn); 1565 return conn; 1566 } 1567 1568 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, 1569 u8 sec_level, u8 auth_type, 1570 enum conn_reasons conn_reason) 1571 { 1572 struct hci_conn *acl; 1573 1574 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1575 if (lmp_bredr_capable(hdev)) 1576 return ERR_PTR(-ECONNREFUSED); 1577 1578 return ERR_PTR(-EOPNOTSUPP); 1579 } 1580 1581 /* Reject outgoing connection to device with same BD ADDR against 1582 * CVE-2020-26555 1583 */ 1584 if (!bacmp(&hdev->bdaddr, dst)) { 1585 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", 1586 dst); 1587 return ERR_PTR(-ECONNREFUSED); 1588 } 1589 1590 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 1591 if (!acl) { 1592 acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); 1593 if (IS_ERR(acl)) 1594 return acl; 1595 } 1596 1597 hci_conn_hold(acl); 1598 1599 acl->conn_reason = conn_reason; 1600 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 1601 int err; 1602 1603 acl->sec_level = BT_SECURITY_LOW; 1604 acl->pending_sec_level = sec_level; 1605 acl->auth_type = auth_type; 1606 1607 err = hci_connect_acl_sync(hdev, acl); 1608 if (err) { 1609 hci_conn_del(acl); 1610 return ERR_PTR(err); 1611 } 1612 } 1613 1614 return acl; 1615 } 1616 1617 static struct hci_link *hci_conn_link(struct hci_conn *parent, 1618 struct hci_conn *conn) 1619 { 1620 struct hci_dev *hdev = parent->hdev; 1621 struct hci_link *link; 1622 1623 bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn); 1624 1625 if (conn->link) 1626 return conn->link; 1627 1628 if (conn->parent) 1629 return NULL; 1630 1631 link = kzalloc(sizeof(*link), GFP_KERNEL); 1632 if (!link) 1633 return NULL; 1634 1635 link->conn = hci_conn_hold(conn); 1636 conn->link = link; 1637 conn->parent = hci_conn_get(parent); 1638 1639 /* Use list_add_tail_rcu append to the list */ 1640 list_add_tail_rcu(&link->list, &parent->link_list); 1641 1642 return link; 1643 } 1644 1645 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, 1646 __u16 setting, struct bt_codec *codec) 1647 { 1648 struct hci_conn *acl; 1649 struct hci_conn *sco; 1650 struct hci_link *link; 1651 1652 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, 1653 CONN_REASON_SCO_CONNECT); 1654 if (IS_ERR(acl)) 1655 return acl; 1656 1657 sco = hci_conn_hash_lookup_ba(hdev, type, dst); 1658 if (!sco) { 1659 sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER); 1660 if (IS_ERR(sco)) { 1661 hci_conn_drop(acl); 1662 return sco; 1663 } 1664 } 1665 1666 link = hci_conn_link(acl, sco); 1667 if (!link) { 1668 hci_conn_drop(acl); 1669 hci_conn_drop(sco); 1670 return ERR_PTR(-ENOLINK); 1671 } 1672 1673 sco->setting = setting; 1674 sco->codec = *codec; 1675 1676 if (acl->state == BT_CONNECTED && 1677 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 1678 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); 1679 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 1680 1681 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) { 1682 /* defer SCO setup until mode change completed */ 1683 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags); 1684 return sco; 1685 } 1686 1687 hci_sco_setup(acl, 0x00); 1688 } 1689 1690 return sco; 1691 } 1692 1693 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) 1694 { 1695 struct hci_dev *hdev = conn->hdev; 1696 struct hci_cp_le_create_big cp; 1697 struct iso_list_data data; 1698 1699 memset(&cp, 0, sizeof(cp)); 1700 1701 data.big = qos->bcast.big; 1702 data.bis = qos->bcast.bis; 1703 data.count = 0; 1704 1705 /* Create a BIS for each bound connection */ 1706 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, 1707 BT_BOUND, &data); 1708 1709 cp.handle = qos->bcast.big; 1710 cp.adv_handle = qos->bcast.bis; 1711 cp.num_bis = data.count; 1712 hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); 1713 cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); 1714 cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); 1715 cp.bis.rtn = qos->bcast.out.rtn; 1716 cp.bis.phy = qos->bcast.out.phy; 1717 cp.bis.packing = qos->bcast.packing; 1718 cp.bis.framing = qos->bcast.framing; 1719 cp.bis.encryption = qos->bcast.encryption; 1720 memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); 1721 1722 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); 1723 } 1724 1725 static int set_cig_params_sync(struct hci_dev *hdev, void *data) 1726 { 1727 u8 cig_id = PTR_UINT(data); 1728 struct hci_conn *conn; 1729 struct bt_iso_qos *qos; 1730 struct iso_cig_params pdu; 1731 u8 cis_id; 1732 1733 conn = hci_conn_hash_lookup_cig(hdev, cig_id); 1734 if (!conn) 1735 return 0; 1736 1737 memset(&pdu, 0, sizeof(pdu)); 1738 1739 qos = &conn->iso_qos; 1740 pdu.cp.cig_id = cig_id; 1741 hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval); 1742 hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval); 1743 pdu.cp.sca = qos->ucast.sca; 1744 pdu.cp.packing = qos->ucast.packing; 1745 pdu.cp.framing = qos->ucast.framing; 1746 pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency); 1747 pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency); 1748 1749 /* Reprogram all CIS(s) with the same CIG, valid range are: 1750 * num_cis: 0x00 to 0x1F 1751 * cis_id: 0x00 to 0xEF 1752 */ 1753 for (cis_id = 0x00; cis_id < 0xf0 && 1754 pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) { 1755 struct hci_cis_params *cis; 1756 1757 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id); 1758 if (!conn) 1759 continue; 1760 1761 qos = &conn->iso_qos; 1762 1763 cis = &pdu.cis[pdu.cp.num_cis++]; 1764 cis->cis_id = cis_id; 1765 cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu); 1766 cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu); 1767 cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : 1768 qos->ucast.in.phy; 1769 cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : 1770 qos->ucast.out.phy; 1771 cis->c_rtn = qos->ucast.out.rtn; 1772 cis->p_rtn = qos->ucast.in.rtn; 1773 } 1774 1775 if (!pdu.cp.num_cis) 1776 return 0; 1777 1778 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, 1779 sizeof(pdu.cp) + 1780 pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu, 1781 HCI_CMD_TIMEOUT); 1782 } 1783 1784 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) 1785 { 1786 struct hci_dev *hdev = conn->hdev; 1787 struct iso_list_data data; 1788 1789 memset(&data, 0, sizeof(data)); 1790 1791 /* Allocate first still reconfigurable CIG if not set */ 1792 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { 1793 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { 1794 data.count = 0; 1795 1796 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, 1797 BT_CONNECT, &data); 1798 if (data.count) 1799 continue; 1800 1801 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, 1802 BT_CONNECTED, &data); 1803 if (!data.count) 1804 break; 1805 } 1806 1807 if (data.cig == 0xf0) 1808 return false; 1809 1810 /* Update CIG */ 1811 qos->ucast.cig = data.cig; 1812 } 1813 1814 if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { 1815 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig, 1816 qos->ucast.cis)) 1817 return false; 1818 goto done; 1819 } 1820 1821 /* Allocate first available CIS if not set */ 1822 for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0; 1823 data.cis++) { 1824 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig, 1825 data.cis)) { 1826 /* Update CIS */ 1827 qos->ucast.cis = data.cis; 1828 break; 1829 } 1830 } 1831 1832 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) 1833 return false; 1834 1835 done: 1836 if (hci_cmd_sync_queue(hdev, set_cig_params_sync, 1837 UINT_PTR(qos->ucast.cig), NULL) < 0) 1838 return false; 1839 1840 return true; 1841 } 1842 1843 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, 1844 __u8 dst_type, struct bt_iso_qos *qos) 1845 { 1846 struct hci_conn *cis; 1847 1848 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, 1849 qos->ucast.cis); 1850 if (!cis) { 1851 cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); 1852 if (IS_ERR(cis)) 1853 return cis; 1854 cis->cleanup = cis_cleanup; 1855 cis->dst_type = dst_type; 1856 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET; 1857 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET; 1858 } 1859 1860 if (cis->state == BT_CONNECTED) 1861 return cis; 1862 1863 /* Check if CIS has been set and the settings matches */ 1864 if (cis->state == BT_BOUND && 1865 !memcmp(&cis->iso_qos, qos, sizeof(*qos))) 1866 return cis; 1867 1868 /* Update LINK PHYs according to QoS preference */ 1869 cis->le_tx_phy = qos->ucast.out.phy; 1870 cis->le_rx_phy = qos->ucast.in.phy; 1871 1872 /* If output interval is not set use the input interval as it cannot be 1873 * 0x000000. 1874 */ 1875 if (!qos->ucast.out.interval) 1876 qos->ucast.out.interval = qos->ucast.in.interval; 1877 1878 /* If input interval is not set use the output interval as it cannot be 1879 * 0x000000. 1880 */ 1881 if (!qos->ucast.in.interval) 1882 qos->ucast.in.interval = qos->ucast.out.interval; 1883 1884 /* If output latency is not set use the input latency as it cannot be 1885 * 0x0000. 1886 */ 1887 if (!qos->ucast.out.latency) 1888 qos->ucast.out.latency = qos->ucast.in.latency; 1889 1890 /* If input latency is not set use the output latency as it cannot be 1891 * 0x0000. 1892 */ 1893 if (!qos->ucast.in.latency) 1894 qos->ucast.in.latency = qos->ucast.out.latency; 1895 1896 if (!hci_le_set_cig_params(cis, qos)) { 1897 hci_conn_drop(cis); 1898 return ERR_PTR(-EINVAL); 1899 } 1900 1901 hci_conn_hold(cis); 1902 1903 cis->iso_qos = *qos; 1904 cis->state = BT_BOUND; 1905 1906 return cis; 1907 } 1908 1909 bool hci_iso_setup_path(struct hci_conn *conn) 1910 { 1911 struct hci_dev *hdev = conn->hdev; 1912 struct hci_cp_le_setup_iso_path cmd; 1913 1914 memset(&cmd, 0, sizeof(cmd)); 1915 1916 if (conn->iso_qos.ucast.out.sdu) { 1917 cmd.handle = cpu_to_le16(conn->handle); 1918 cmd.direction = 0x00; /* Input (Host to Controller) */ 1919 cmd.path = 0x00; /* HCI path if enabled */ 1920 cmd.codec = 0x03; /* Transparent Data */ 1921 1922 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), 1923 &cmd) < 0) 1924 return false; 1925 } 1926 1927 if (conn->iso_qos.ucast.in.sdu) { 1928 cmd.handle = cpu_to_le16(conn->handle); 1929 cmd.direction = 0x01; /* Output (Controller to Host) */ 1930 cmd.path = 0x00; /* HCI path if enabled */ 1931 cmd.codec = 0x03; /* Transparent Data */ 1932 1933 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), 1934 &cmd) < 0) 1935 return false; 1936 } 1937 1938 return true; 1939 } 1940 1941 int hci_conn_check_create_cis(struct hci_conn *conn) 1942 { 1943 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY)) 1944 return -EINVAL; 1945 1946 if (!conn->parent || conn->parent->state != BT_CONNECTED || 1947 conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle)) 1948 return 1; 1949 1950 return 0; 1951 } 1952 1953 static int hci_create_cis_sync(struct hci_dev *hdev, void *data) 1954 { 1955 return hci_le_create_cis_sync(hdev); 1956 } 1957 1958 int hci_le_create_cis_pending(struct hci_dev *hdev) 1959 { 1960 struct hci_conn *conn; 1961 bool pending = false; 1962 1963 rcu_read_lock(); 1964 1965 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 1966 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) { 1967 rcu_read_unlock(); 1968 return -EBUSY; 1969 } 1970 1971 if (!hci_conn_check_create_cis(conn)) 1972 pending = true; 1973 } 1974 1975 rcu_read_unlock(); 1976 1977 if (!pending) 1978 return 0; 1979 1980 /* Queue Create CIS */ 1981 return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL); 1982 } 1983 1984 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn, 1985 struct bt_iso_io_qos *qos, __u8 phy) 1986 { 1987 /* Only set MTU if PHY is enabled */ 1988 if (!qos->sdu && qos->phy) 1989 qos->sdu = conn->mtu; 1990 1991 /* Use the same PHY as ACL if set to any */ 1992 if (qos->phy == BT_ISO_PHY_ANY) 1993 qos->phy = phy; 1994 1995 /* Use LE ACL connection interval if not set */ 1996 if (!qos->interval) 1997 /* ACL interval unit in 1.25 ms to us */ 1998 qos->interval = conn->le_conn_interval * 1250; 1999 2000 /* Use LE ACL connection latency if not set */ 2001 if (!qos->latency) 2002 qos->latency = conn->le_conn_latency; 2003 } 2004 2005 static int create_big_sync(struct hci_dev *hdev, void *data) 2006 { 2007 struct hci_conn *conn = data; 2008 struct bt_iso_qos *qos = &conn->iso_qos; 2009 u16 interval, sync_interval = 0; 2010 u32 flags = 0; 2011 int err; 2012 2013 if (qos->bcast.out.phy == 0x02) 2014 flags |= MGMT_ADV_FLAG_SEC_2M; 2015 2016 /* Align intervals */ 2017 interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor; 2018 2019 if (qos->bcast.bis) 2020 sync_interval = interval * 4; 2021 2022 err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, 2023 conn->le_per_adv_data, flags, interval, 2024 interval, sync_interval); 2025 if (err) 2026 return err; 2027 2028 return hci_le_create_big(conn, &conn->iso_qos); 2029 } 2030 2031 static void create_pa_complete(struct hci_dev *hdev, void *data, int err) 2032 { 2033 struct hci_cp_le_pa_create_sync *cp = data; 2034 2035 bt_dev_dbg(hdev, ""); 2036 2037 if (err) 2038 bt_dev_err(hdev, "Unable to create PA: %d", err); 2039 2040 kfree(cp); 2041 } 2042 2043 static int create_pa_sync(struct hci_dev *hdev, void *data) 2044 { 2045 struct hci_cp_le_pa_create_sync *cp = data; 2046 int err; 2047 2048 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC, 2049 sizeof(*cp), cp, HCI_CMD_TIMEOUT); 2050 if (err) { 2051 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 2052 return err; 2053 } 2054 2055 return hci_update_passive_scan_sync(hdev); 2056 } 2057 2058 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, 2059 __u8 sid, struct bt_iso_qos *qos) 2060 { 2061 struct hci_cp_le_pa_create_sync *cp; 2062 2063 if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) 2064 return -EBUSY; 2065 2066 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 2067 if (!cp) { 2068 hci_dev_clear_flag(hdev, HCI_PA_SYNC); 2069 return -ENOMEM; 2070 } 2071 2072 cp->options = qos->bcast.options; 2073 cp->sid = sid; 2074 cp->addr_type = dst_type; 2075 bacpy(&cp->addr, dst); 2076 cp->skip = cpu_to_le16(qos->bcast.skip); 2077 cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); 2078 cp->sync_cte_type = qos->bcast.sync_cte_type; 2079 2080 /* Queue start pa_create_sync and scan */ 2081 return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); 2082 } 2083 2084 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, 2085 struct bt_iso_qos *qos, 2086 __u16 sync_handle, __u8 num_bis, __u8 bis[]) 2087 { 2088 struct _packed { 2089 struct hci_cp_le_big_create_sync cp; 2090 __u8 bis[0x11]; 2091 } pdu; 2092 int err; 2093 2094 if (num_bis > sizeof(pdu.bis)) 2095 return -EINVAL; 2096 2097 err = qos_set_big(hdev, qos); 2098 if (err) 2099 return err; 2100 2101 if (hcon) 2102 hcon->iso_qos.bcast.big = qos->bcast.big; 2103 2104 memset(&pdu, 0, sizeof(pdu)); 2105 pdu.cp.handle = qos->bcast.big; 2106 pdu.cp.sync_handle = cpu_to_le16(sync_handle); 2107 pdu.cp.encryption = qos->bcast.encryption; 2108 memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode)); 2109 pdu.cp.mse = qos->bcast.mse; 2110 pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout); 2111 pdu.cp.num_bis = num_bis; 2112 memcpy(pdu.bis, bis, num_bis); 2113 2114 return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC, 2115 sizeof(pdu.cp) + num_bis, &pdu); 2116 } 2117 2118 static void create_big_complete(struct hci_dev *hdev, void *data, int err) 2119 { 2120 struct hci_conn *conn = data; 2121 2122 bt_dev_dbg(hdev, "conn %p", conn); 2123 2124 if (err) { 2125 bt_dev_err(hdev, "Unable to create BIG: %d", err); 2126 hci_connect_cfm(conn, err); 2127 hci_conn_del(conn); 2128 } 2129 } 2130 2131 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, 2132 struct bt_iso_qos *qos, 2133 __u8 base_len, __u8 *base) 2134 { 2135 struct hci_conn *conn; 2136 __u8 eir[HCI_MAX_PER_AD_LENGTH]; 2137 2138 if (base_len && base) 2139 base_len = eir_append_service_data(eir, 0, 0x1851, 2140 base, base_len); 2141 2142 /* We need hci_conn object using the BDADDR_ANY as dst */ 2143 conn = hci_add_bis(hdev, dst, qos, base_len, eir); 2144 if (IS_ERR(conn)) 2145 return conn; 2146 2147 /* Update LINK PHYs according to QoS preference */ 2148 conn->le_tx_phy = qos->bcast.out.phy; 2149 conn->le_tx_phy = qos->bcast.out.phy; 2150 2151 /* Add Basic Announcement into Peridic Adv Data if BASE is set */ 2152 if (base_len && base) { 2153 memcpy(conn->le_per_adv_data, eir, sizeof(eir)); 2154 conn->le_per_adv_data_len = base_len; 2155 } 2156 2157 hci_iso_qos_setup(hdev, conn, &qos->bcast.out, 2158 conn->le_tx_phy ? conn->le_tx_phy : 2159 hdev->le_tx_def_phys); 2160 2161 conn->iso_qos = *qos; 2162 conn->state = BT_BOUND; 2163 2164 return conn; 2165 } 2166 2167 static void bis_mark_per_adv(struct hci_conn *conn, void *data) 2168 { 2169 struct iso_list_data *d = data; 2170 2171 /* Skip if not broadcast/ANY address */ 2172 if (bacmp(&conn->dst, BDADDR_ANY)) 2173 return; 2174 2175 if (d->big != conn->iso_qos.bcast.big || 2176 d->bis == BT_ISO_QOS_BIS_UNSET || 2177 d->bis != conn->iso_qos.bcast.bis) 2178 return; 2179 2180 set_bit(HCI_CONN_PER_ADV, &conn->flags); 2181 } 2182 2183 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, 2184 __u8 dst_type, struct bt_iso_qos *qos, 2185 __u8 base_len, __u8 *base) 2186 { 2187 struct hci_conn *conn; 2188 int err; 2189 struct iso_list_data data; 2190 2191 conn = hci_bind_bis(hdev, dst, qos, base_len, base); 2192 if (IS_ERR(conn)) 2193 return conn; 2194 2195 data.big = qos->bcast.big; 2196 data.bis = qos->bcast.bis; 2197 2198 /* Set HCI_CONN_PER_ADV for all bound connections, to mark that 2199 * the start periodic advertising and create BIG commands have 2200 * been queued 2201 */ 2202 hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK, 2203 BT_BOUND, &data); 2204 2205 /* Queue start periodic advertising and create BIG */ 2206 err = hci_cmd_sync_queue(hdev, create_big_sync, conn, 2207 create_big_complete); 2208 if (err < 0) { 2209 hci_conn_drop(conn); 2210 return ERR_PTR(err); 2211 } 2212 2213 return conn; 2214 } 2215 2216 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, 2217 __u8 dst_type, struct bt_iso_qos *qos) 2218 { 2219 struct hci_conn *le; 2220 struct hci_conn *cis; 2221 struct hci_link *link; 2222 2223 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 2224 le = hci_connect_le(hdev, dst, dst_type, false, 2225 BT_SECURITY_LOW, 2226 HCI_LE_CONN_TIMEOUT, 2227 HCI_ROLE_SLAVE); 2228 else 2229 le = hci_connect_le_scan(hdev, dst, dst_type, 2230 BT_SECURITY_LOW, 2231 HCI_LE_CONN_TIMEOUT, 2232 CONN_REASON_ISO_CONNECT); 2233 if (IS_ERR(le)) 2234 return le; 2235 2236 hci_iso_qos_setup(hdev, le, &qos->ucast.out, 2237 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); 2238 hci_iso_qos_setup(hdev, le, &qos->ucast.in, 2239 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); 2240 2241 cis = hci_bind_cis(hdev, dst, dst_type, qos); 2242 if (IS_ERR(cis)) { 2243 hci_conn_drop(le); 2244 return cis; 2245 } 2246 2247 link = hci_conn_link(le, cis); 2248 if (!link) { 2249 hci_conn_drop(le); 2250 hci_conn_drop(cis); 2251 return ERR_PTR(-ENOLINK); 2252 } 2253 2254 /* Link takes the refcount */ 2255 hci_conn_drop(cis); 2256 2257 cis->state = BT_CONNECT; 2258 2259 hci_le_create_cis_pending(hdev); 2260 2261 return cis; 2262 } 2263 2264 /* Check link security requirement */ 2265 int hci_conn_check_link_mode(struct hci_conn *conn) 2266 { 2267 BT_DBG("hcon %p", conn); 2268 2269 /* In Secure Connections Only mode, it is required that Secure 2270 * Connections is used and the link is encrypted with AES-CCM 2271 * using a P-256 authenticated combination key. 2272 */ 2273 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) { 2274 if (!hci_conn_sc_enabled(conn) || 2275 !test_bit(HCI_CONN_AES_CCM, &conn->flags) || 2276 conn->key_type != HCI_LK_AUTH_COMBINATION_P256) 2277 return 0; 2278 } 2279 2280 /* AES encryption is required for Level 4: 2281 * 2282 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C 2283 * page 1319: 2284 * 2285 * 128-bit equivalent strength for link and encryption keys 2286 * required using FIPS approved algorithms (E0 not allowed, 2287 * SAFER+ not allowed, and P-192 not allowed; encryption key 2288 * not shortened) 2289 */ 2290 if (conn->sec_level == BT_SECURITY_FIPS && 2291 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { 2292 bt_dev_err(conn->hdev, 2293 "Invalid security: Missing AES-CCM usage"); 2294 return 0; 2295 } 2296 2297 if (hci_conn_ssp_enabled(conn) && 2298 !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2299 return 0; 2300 2301 return 1; 2302 } 2303 2304 /* Authenticate remote device */ 2305 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 2306 { 2307 BT_DBG("hcon %p", conn); 2308 2309 if (conn->pending_sec_level > sec_level) 2310 sec_level = conn->pending_sec_level; 2311 2312 if (sec_level > conn->sec_level) 2313 conn->pending_sec_level = sec_level; 2314 else if (test_bit(HCI_CONN_AUTH, &conn->flags)) 2315 return 1; 2316 2317 /* Make sure we preserve an existing MITM requirement*/ 2318 auth_type |= (conn->auth_type & 0x01); 2319 2320 conn->auth_type = auth_type; 2321 2322 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2323 struct hci_cp_auth_requested cp; 2324 2325 cp.handle = cpu_to_le16(conn->handle); 2326 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 2327 sizeof(cp), &cp); 2328 2329 /* Set the ENCRYPT_PEND to trigger encryption after 2330 * authentication. 2331 */ 2332 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2333 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2334 } 2335 2336 return 0; 2337 } 2338 2339 /* Encrypt the link */ 2340 static void hci_conn_encrypt(struct hci_conn *conn) 2341 { 2342 BT_DBG("hcon %p", conn); 2343 2344 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 2345 struct hci_cp_set_conn_encrypt cp; 2346 cp.handle = cpu_to_le16(conn->handle); 2347 cp.encrypt = 0x01; 2348 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2349 &cp); 2350 } 2351 } 2352 2353 /* Enable security */ 2354 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, 2355 bool initiator) 2356 { 2357 BT_DBG("hcon %p", conn); 2358 2359 if (conn->type == LE_LINK) 2360 return smp_conn_security(conn, sec_level); 2361 2362 /* For sdp we don't need the link key. */ 2363 if (sec_level == BT_SECURITY_SDP) 2364 return 1; 2365 2366 /* For non 2.1 devices and low security level we don't need the link 2367 key. */ 2368 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn)) 2369 return 1; 2370 2371 /* For other security levels we need the link key. */ 2372 if (!test_bit(HCI_CONN_AUTH, &conn->flags)) 2373 goto auth; 2374 2375 switch (conn->key_type) { 2376 case HCI_LK_AUTH_COMBINATION_P256: 2377 /* An authenticated FIPS approved combination key has 2378 * sufficient security for security level 4 or lower. 2379 */ 2380 if (sec_level <= BT_SECURITY_FIPS) 2381 goto encrypt; 2382 break; 2383 case HCI_LK_AUTH_COMBINATION_P192: 2384 /* An authenticated combination key has sufficient security for 2385 * security level 3 or lower. 2386 */ 2387 if (sec_level <= BT_SECURITY_HIGH) 2388 goto encrypt; 2389 break; 2390 case HCI_LK_UNAUTH_COMBINATION_P192: 2391 case HCI_LK_UNAUTH_COMBINATION_P256: 2392 /* An unauthenticated combination key has sufficient security 2393 * for security level 2 or lower. 2394 */ 2395 if (sec_level <= BT_SECURITY_MEDIUM) 2396 goto encrypt; 2397 break; 2398 case HCI_LK_COMBINATION: 2399 /* A combination key has always sufficient security for the 2400 * security levels 2 or lower. High security level requires the 2401 * combination key is generated using maximum PIN code length 2402 * (16). For pre 2.1 units. 2403 */ 2404 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16) 2405 goto encrypt; 2406 break; 2407 default: 2408 break; 2409 } 2410 2411 auth: 2412 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) 2413 return 0; 2414 2415 if (initiator) 2416 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2417 2418 if (!hci_conn_auth(conn, sec_level, auth_type)) 2419 return 0; 2420 2421 encrypt: 2422 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { 2423 /* Ensure that the encryption key size has been read, 2424 * otherwise stall the upper layer responses. 2425 */ 2426 if (!conn->enc_key_size) 2427 return 0; 2428 2429 /* Nothing else needed, all requirements are met */ 2430 return 1; 2431 } 2432 2433 hci_conn_encrypt(conn); 2434 return 0; 2435 } 2436 EXPORT_SYMBOL(hci_conn_security); 2437 2438 /* Check secure link requirement */ 2439 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) 2440 { 2441 BT_DBG("hcon %p", conn); 2442 2443 /* Accept if non-secure or higher security level is required */ 2444 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS) 2445 return 1; 2446 2447 /* Accept if secure or higher security level is already present */ 2448 if (conn->sec_level == BT_SECURITY_HIGH || 2449 conn->sec_level == BT_SECURITY_FIPS) 2450 return 1; 2451 2452 /* Reject not secure link */ 2453 return 0; 2454 } 2455 EXPORT_SYMBOL(hci_conn_check_secure); 2456 2457 /* Switch role */ 2458 int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 2459 { 2460 BT_DBG("hcon %p", conn); 2461 2462 if (role == conn->role) 2463 return 1; 2464 2465 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { 2466 struct hci_cp_switch_role cp; 2467 bacpy(&cp.bdaddr, &conn->dst); 2468 cp.role = role; 2469 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); 2470 } 2471 2472 return 0; 2473 } 2474 EXPORT_SYMBOL(hci_conn_switch_role); 2475 2476 /* Enter active mode */ 2477 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) 2478 { 2479 struct hci_dev *hdev = conn->hdev; 2480 2481 BT_DBG("hcon %p mode %d", conn, conn->mode); 2482 2483 if (conn->mode != HCI_CM_SNIFF) 2484 goto timer; 2485 2486 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active) 2487 goto timer; 2488 2489 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2490 struct hci_cp_exit_sniff_mode cp; 2491 cp.handle = cpu_to_le16(conn->handle); 2492 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); 2493 } 2494 2495 timer: 2496 if (hdev->idle_timeout > 0) 2497 queue_delayed_work(hdev->workqueue, &conn->idle_work, 2498 msecs_to_jiffies(hdev->idle_timeout)); 2499 } 2500 2501 /* Drop all connection on the device */ 2502 void hci_conn_hash_flush(struct hci_dev *hdev) 2503 { 2504 struct list_head *head = &hdev->conn_hash.list; 2505 struct hci_conn *conn; 2506 2507 BT_DBG("hdev %s", hdev->name); 2508 2509 /* We should not traverse the list here, because hci_conn_del 2510 * can remove extra links, which may cause the list traversal 2511 * to hit items that have already been released. 2512 */ 2513 while ((conn = list_first_entry_or_null(head, 2514 struct hci_conn, 2515 list)) != NULL) { 2516 conn->state = BT_CLOSED; 2517 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM); 2518 hci_conn_del(conn); 2519 } 2520 } 2521 2522 static u32 get_link_mode(struct hci_conn *conn) 2523 { 2524 u32 link_mode = 0; 2525 2526 if (conn->role == HCI_ROLE_MASTER) 2527 link_mode |= HCI_LM_MASTER; 2528 2529 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2530 link_mode |= HCI_LM_ENCRYPT; 2531 2532 if (test_bit(HCI_CONN_AUTH, &conn->flags)) 2533 link_mode |= HCI_LM_AUTH; 2534 2535 if (test_bit(HCI_CONN_SECURE, &conn->flags)) 2536 link_mode |= HCI_LM_SECURE; 2537 2538 if (test_bit(HCI_CONN_FIPS, &conn->flags)) 2539 link_mode |= HCI_LM_FIPS; 2540 2541 return link_mode; 2542 } 2543 2544 int hci_get_conn_list(void __user *arg) 2545 { 2546 struct hci_conn *c; 2547 struct hci_conn_list_req req, *cl; 2548 struct hci_conn_info *ci; 2549 struct hci_dev *hdev; 2550 int n = 0, size, err; 2551 2552 if (copy_from_user(&req, arg, sizeof(req))) 2553 return -EFAULT; 2554 2555 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) 2556 return -EINVAL; 2557 2558 size = sizeof(req) + req.conn_num * sizeof(*ci); 2559 2560 cl = kmalloc(size, GFP_KERNEL); 2561 if (!cl) 2562 return -ENOMEM; 2563 2564 hdev = hci_dev_get(req.dev_id); 2565 if (!hdev) { 2566 kfree(cl); 2567 return -ENODEV; 2568 } 2569 2570 ci = cl->conn_info; 2571 2572 hci_dev_lock(hdev); 2573 list_for_each_entry(c, &hdev->conn_hash.list, list) { 2574 bacpy(&(ci + n)->bdaddr, &c->dst); 2575 (ci + n)->handle = c->handle; 2576 (ci + n)->type = c->type; 2577 (ci + n)->out = c->out; 2578 (ci + n)->state = c->state; 2579 (ci + n)->link_mode = get_link_mode(c); 2580 if (++n >= req.conn_num) 2581 break; 2582 } 2583 hci_dev_unlock(hdev); 2584 2585 cl->dev_id = hdev->id; 2586 cl->conn_num = n; 2587 size = sizeof(req) + n * sizeof(*ci); 2588 2589 hci_dev_put(hdev); 2590 2591 err = copy_to_user(arg, cl, size); 2592 kfree(cl); 2593 2594 return err ? -EFAULT : 0; 2595 } 2596 2597 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) 2598 { 2599 struct hci_conn_info_req req; 2600 struct hci_conn_info ci; 2601 struct hci_conn *conn; 2602 char __user *ptr = arg + sizeof(req); 2603 2604 if (copy_from_user(&req, arg, sizeof(req))) 2605 return -EFAULT; 2606 2607 hci_dev_lock(hdev); 2608 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); 2609 if (conn) { 2610 bacpy(&ci.bdaddr, &conn->dst); 2611 ci.handle = conn->handle; 2612 ci.type = conn->type; 2613 ci.out = conn->out; 2614 ci.state = conn->state; 2615 ci.link_mode = get_link_mode(conn); 2616 } 2617 hci_dev_unlock(hdev); 2618 2619 if (!conn) 2620 return -ENOENT; 2621 2622 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; 2623 } 2624 2625 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) 2626 { 2627 struct hci_auth_info_req req; 2628 struct hci_conn *conn; 2629 2630 if (copy_from_user(&req, arg, sizeof(req))) 2631 return -EFAULT; 2632 2633 hci_dev_lock(hdev); 2634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); 2635 if (conn) 2636 req.type = conn->auth_type; 2637 hci_dev_unlock(hdev); 2638 2639 if (!conn) 2640 return -ENOENT; 2641 2642 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; 2643 } 2644 2645 struct hci_chan *hci_chan_create(struct hci_conn *conn) 2646 { 2647 struct hci_dev *hdev = conn->hdev; 2648 struct hci_chan *chan; 2649 2650 BT_DBG("%s hcon %p", hdev->name, conn); 2651 2652 if (test_bit(HCI_CONN_DROP, &conn->flags)) { 2653 BT_DBG("Refusing to create new hci_chan"); 2654 return NULL; 2655 } 2656 2657 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 2658 if (!chan) 2659 return NULL; 2660 2661 chan->conn = hci_conn_get(conn); 2662 skb_queue_head_init(&chan->data_q); 2663 chan->state = BT_CONNECTED; 2664 2665 list_add_rcu(&chan->list, &conn->chan_list); 2666 2667 return chan; 2668 } 2669 2670 void hci_chan_del(struct hci_chan *chan) 2671 { 2672 struct hci_conn *conn = chan->conn; 2673 struct hci_dev *hdev = conn->hdev; 2674 2675 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan); 2676 2677 list_del_rcu(&chan->list); 2678 2679 synchronize_rcu(); 2680 2681 /* Prevent new hci_chan's to be created for this hci_conn */ 2682 set_bit(HCI_CONN_DROP, &conn->flags); 2683 2684 hci_conn_put(conn); 2685 2686 skb_queue_purge(&chan->data_q); 2687 kfree(chan); 2688 } 2689 2690 void hci_chan_list_flush(struct hci_conn *conn) 2691 { 2692 struct hci_chan *chan, *n; 2693 2694 BT_DBG("hcon %p", conn); 2695 2696 list_for_each_entry_safe(chan, n, &conn->chan_list, list) 2697 hci_chan_del(chan); 2698 } 2699 2700 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon, 2701 __u16 handle) 2702 { 2703 struct hci_chan *hchan; 2704 2705 list_for_each_entry(hchan, &hcon->chan_list, list) { 2706 if (hchan->handle == handle) 2707 return hchan; 2708 } 2709 2710 return NULL; 2711 } 2712 2713 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle) 2714 { 2715 struct hci_conn_hash *h = &hdev->conn_hash; 2716 struct hci_conn *hcon; 2717 struct hci_chan *hchan = NULL; 2718 2719 rcu_read_lock(); 2720 2721 list_for_each_entry_rcu(hcon, &h->list, list) { 2722 hchan = __hci_chan_lookup_handle(hcon, handle); 2723 if (hchan) 2724 break; 2725 } 2726 2727 rcu_read_unlock(); 2728 2729 return hchan; 2730 } 2731 2732 u32 hci_conn_get_phy(struct hci_conn *conn) 2733 { 2734 u32 phys = 0; 2735 2736 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471: 2737 * Table 6.2: Packets defined for synchronous, asynchronous, and 2738 * CPB logical transport types. 2739 */ 2740 switch (conn->type) { 2741 case SCO_LINK: 2742 /* SCO logical transport (1 Mb/s): 2743 * HV1, HV2, HV3 and DV. 2744 */ 2745 phys |= BT_PHY_BR_1M_1SLOT; 2746 2747 break; 2748 2749 case ACL_LINK: 2750 /* ACL logical transport (1 Mb/s) ptt=0: 2751 * DH1, DM3, DH3, DM5 and DH5. 2752 */ 2753 phys |= BT_PHY_BR_1M_1SLOT; 2754 2755 if (conn->pkt_type & (HCI_DM3 | HCI_DH3)) 2756 phys |= BT_PHY_BR_1M_3SLOT; 2757 2758 if (conn->pkt_type & (HCI_DM5 | HCI_DH5)) 2759 phys |= BT_PHY_BR_1M_5SLOT; 2760 2761 /* ACL logical transport (2 Mb/s) ptt=1: 2762 * 2-DH1, 2-DH3 and 2-DH5. 2763 */ 2764 if (!(conn->pkt_type & HCI_2DH1)) 2765 phys |= BT_PHY_EDR_2M_1SLOT; 2766 2767 if (!(conn->pkt_type & HCI_2DH3)) 2768 phys |= BT_PHY_EDR_2M_3SLOT; 2769 2770 if (!(conn->pkt_type & HCI_2DH5)) 2771 phys |= BT_PHY_EDR_2M_5SLOT; 2772 2773 /* ACL logical transport (3 Mb/s) ptt=1: 2774 * 3-DH1, 3-DH3 and 3-DH5. 2775 */ 2776 if (!(conn->pkt_type & HCI_3DH1)) 2777 phys |= BT_PHY_EDR_3M_1SLOT; 2778 2779 if (!(conn->pkt_type & HCI_3DH3)) 2780 phys |= BT_PHY_EDR_3M_3SLOT; 2781 2782 if (!(conn->pkt_type & HCI_3DH5)) 2783 phys |= BT_PHY_EDR_3M_5SLOT; 2784 2785 break; 2786 2787 case ESCO_LINK: 2788 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */ 2789 phys |= BT_PHY_BR_1M_1SLOT; 2790 2791 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5))) 2792 phys |= BT_PHY_BR_1M_3SLOT; 2793 2794 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */ 2795 if (!(conn->pkt_type & ESCO_2EV3)) 2796 phys |= BT_PHY_EDR_2M_1SLOT; 2797 2798 if (!(conn->pkt_type & ESCO_2EV5)) 2799 phys |= BT_PHY_EDR_2M_3SLOT; 2800 2801 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */ 2802 if (!(conn->pkt_type & ESCO_3EV3)) 2803 phys |= BT_PHY_EDR_3M_1SLOT; 2804 2805 if (!(conn->pkt_type & ESCO_3EV5)) 2806 phys |= BT_PHY_EDR_3M_3SLOT; 2807 2808 break; 2809 2810 case LE_LINK: 2811 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M) 2812 phys |= BT_PHY_LE_1M_TX; 2813 2814 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M) 2815 phys |= BT_PHY_LE_1M_RX; 2816 2817 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M) 2818 phys |= BT_PHY_LE_2M_TX; 2819 2820 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M) 2821 phys |= BT_PHY_LE_2M_RX; 2822 2823 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED) 2824 phys |= BT_PHY_LE_CODED_TX; 2825 2826 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED) 2827 phys |= BT_PHY_LE_CODED_RX; 2828 2829 break; 2830 } 2831 2832 return phys; 2833 } 2834 2835 static int abort_conn_sync(struct hci_dev *hdev, void *data) 2836 { 2837 struct hci_conn *conn = data; 2838 2839 if (!hci_conn_valid(hdev, conn)) 2840 return -ECANCELED; 2841 2842 return hci_abort_conn_sync(hdev, conn, conn->abort_reason); 2843 } 2844 2845 int hci_abort_conn(struct hci_conn *conn, u8 reason) 2846 { 2847 struct hci_dev *hdev = conn->hdev; 2848 2849 /* If abort_reason has already been set it means the connection is 2850 * already being aborted so don't attempt to overwrite it. 2851 */ 2852 if (conn->abort_reason) 2853 return 0; 2854 2855 bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason); 2856 2857 conn->abort_reason = reason; 2858 2859 /* If the connection is pending check the command opcode since that 2860 * might be blocking on hci_cmd_sync_work while waiting its respective 2861 * event so we need to hci_cmd_sync_cancel to cancel it. 2862 * 2863 * hci_connect_le serializes the connection attempts so only one 2864 * connection can be in BT_CONNECT at time. 2865 */ 2866 if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) { 2867 switch (hci_skb_event(hdev->sent_cmd)) { 2868 case HCI_EV_CONN_COMPLETE: 2869 case HCI_EV_LE_CONN_COMPLETE: 2870 case HCI_EV_LE_ENHANCED_CONN_COMPLETE: 2871 case HCI_EVT_LE_CIS_ESTABLISHED: 2872 hci_cmd_sync_cancel(hdev, ECANCELED); 2873 break; 2874 } 2875 /* Cancel connect attempt if still queued/pending */ 2876 } else if (!hci_cancel_connect_sync(hdev, conn)) { 2877 return 0; 2878 } 2879 2880 /* Run immediately if on cmd_sync_work since this may be called 2881 * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does 2882 * already queue its callback on cmd_sync_work. 2883 */ 2884 return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL); 2885 } 2886