1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI connection handling. */ 26 27 #include <linux/module.h> 28 29 #include <linux/types.h> 30 #include <linux/errno.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/poll.h> 34 #include <linux/fcntl.h> 35 #include <linux/init.h> 36 #include <linux/skbuff.h> 37 #include <linux/interrupt.h> 38 #include <linux/notifier.h> 39 #include <net/sock.h> 40 41 #include <asm/system.h> 42 #include <linux/uaccess.h> 43 #include <asm/unaligned.h> 44 45 #include <net/bluetooth/bluetooth.h> 46 #include <net/bluetooth/hci_core.h> 47 48 static void hci_le_connect(struct hci_conn *conn) 49 { 50 struct hci_dev *hdev = conn->hdev; 51 struct hci_cp_le_create_conn cp; 52 53 conn->state = BT_CONNECT; 54 conn->out = 1; 55 56 memset(&cp, 0, sizeof(cp)); 57 cp.scan_interval = cpu_to_le16(0x0004); 58 cp.scan_window = cpu_to_le16(0x0004); 59 bacpy(&cp.peer_addr, &conn->dst); 60 cp.conn_interval_min = cpu_to_le16(0x0008); 61 cp.conn_interval_max = cpu_to_le16(0x0100); 62 cp.supervision_timeout = cpu_to_le16(0x0064); 63 cp.min_ce_len = cpu_to_le16(0x0001); 64 cp.max_ce_len = cpu_to_le16(0x0001); 65 66 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); 67 } 68 69 static void hci_le_connect_cancel(struct hci_conn *conn) 70 { 71 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); 72 } 73 74 void hci_acl_connect(struct hci_conn *conn) 75 { 76 struct hci_dev *hdev = conn->hdev; 77 struct inquiry_entry *ie; 78 struct hci_cp_create_conn cp; 79 80 BT_DBG("%p", conn); 81 82 conn->state = BT_CONNECT; 83 conn->out = 1; 84 85 conn->link_mode = HCI_LM_MASTER; 86 87 conn->attempt++; 88 89 conn->link_policy = hdev->link_policy; 90 91 memset(&cp, 0, sizeof(cp)); 92 bacpy(&cp.bdaddr, &conn->dst); 93 cp.pscan_rep_mode = 0x02; 94 95 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); 96 if (ie) { 97 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { 98 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 99 cp.pscan_mode = ie->data.pscan_mode; 100 cp.clock_offset = ie->data.clock_offset | 101 cpu_to_le16(0x8000); 102 } 103 104 memcpy(conn->dev_class, ie->data.dev_class, 3); 105 conn->ssp_mode = ie->data.ssp_mode; 106 } 107 108 cp.pkt_type = cpu_to_le16(conn->pkt_type); 109 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) 110 cp.role_switch = 0x01; 111 else 112 cp.role_switch = 0x00; 113 114 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); 115 } 116 117 static void hci_acl_connect_cancel(struct hci_conn *conn) 118 { 119 struct hci_cp_create_conn_cancel cp; 120 121 BT_DBG("%p", conn); 122 123 if (conn->hdev->hci_ver < 2) 124 return; 125 126 bacpy(&cp.bdaddr, &conn->dst); 127 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); 128 } 129 130 void hci_acl_disconn(struct hci_conn *conn, __u8 reason) 131 { 132 struct hci_cp_disconnect cp; 133 134 BT_DBG("%p", conn); 135 136 conn->state = BT_DISCONN; 137 138 cp.handle = cpu_to_le16(conn->handle); 139 cp.reason = reason; 140 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); 141 } 142 143 void hci_add_sco(struct hci_conn *conn, __u16 handle) 144 { 145 struct hci_dev *hdev = conn->hdev; 146 struct hci_cp_add_sco cp; 147 148 BT_DBG("%p", conn); 149 150 conn->state = BT_CONNECT; 151 conn->out = 1; 152 153 conn->attempt++; 154 155 cp.handle = cpu_to_le16(handle); 156 cp.pkt_type = cpu_to_le16(conn->pkt_type); 157 158 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); 159 } 160 161 void hci_setup_sync(struct hci_conn *conn, __u16 handle) 162 { 163 struct hci_dev *hdev = conn->hdev; 164 struct hci_cp_setup_sync_conn cp; 165 166 BT_DBG("%p", conn); 167 168 conn->state = BT_CONNECT; 169 conn->out = 1; 170 171 conn->attempt++; 172 173 cp.handle = cpu_to_le16(handle); 174 cp.pkt_type = cpu_to_le16(conn->pkt_type); 175 176 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 177 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 178 cp.max_latency = cpu_to_le16(0xffff); 179 cp.voice_setting = cpu_to_le16(hdev->voice_setting); 180 cp.retrans_effort = 0xff; 181 182 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 183 } 184 185 /* Device _must_ be locked */ 186 void hci_sco_setup(struct hci_conn *conn, __u8 status) 187 { 188 struct hci_conn *sco = conn->link; 189 190 BT_DBG("%p", conn); 191 192 if (!sco) 193 return; 194 195 if (!status) { 196 if (lmp_esco_capable(conn->hdev)) 197 hci_setup_sync(sco, conn->handle); 198 else 199 hci_add_sco(sco, conn->handle); 200 } else { 201 hci_proto_connect_cfm(sco, status); 202 hci_conn_del(sco); 203 } 204 } 205 206 static void hci_conn_timeout(unsigned long arg) 207 { 208 struct hci_conn *conn = (void *) arg; 209 struct hci_dev *hdev = conn->hdev; 210 __u8 reason; 211 212 BT_DBG("conn %p state %d", conn, conn->state); 213 214 if (atomic_read(&conn->refcnt)) 215 return; 216 217 hci_dev_lock(hdev); 218 219 switch (conn->state) { 220 case BT_CONNECT: 221 case BT_CONNECT2: 222 if (conn->out) { 223 if (conn->type == ACL_LINK) 224 hci_acl_connect_cancel(conn); 225 else if (conn->type == LE_LINK) 226 hci_le_connect_cancel(conn); 227 } 228 break; 229 case BT_CONFIG: 230 case BT_CONNECTED: 231 reason = hci_proto_disconn_ind(conn); 232 hci_acl_disconn(conn, reason); 233 break; 234 default: 235 conn->state = BT_CLOSED; 236 break; 237 } 238 239 hci_dev_unlock(hdev); 240 } 241 242 static void hci_conn_idle(unsigned long arg) 243 { 244 struct hci_conn *conn = (void *) arg; 245 246 BT_DBG("conn %p mode %d", conn, conn->mode); 247 248 hci_conn_enter_sniff_mode(conn); 249 } 250 251 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 252 { 253 struct hci_conn *conn; 254 255 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 256 257 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); 258 if (!conn) 259 return NULL; 260 261 bacpy(&conn->dst, dst); 262 conn->hdev = hdev; 263 conn->type = type; 264 conn->mode = HCI_CM_ACTIVE; 265 conn->state = BT_OPEN; 266 conn->auth_type = HCI_AT_GENERAL_BONDING; 267 conn->io_capability = hdev->io_capability; 268 269 conn->power_save = 1; 270 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 271 272 switch (type) { 273 case ACL_LINK: 274 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 275 break; 276 case SCO_LINK: 277 if (lmp_esco_capable(hdev)) 278 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 279 (hdev->esco_type & EDR_ESCO_MASK); 280 else 281 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; 282 break; 283 case ESCO_LINK: 284 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; 285 break; 286 } 287 288 skb_queue_head_init(&conn->data_q); 289 290 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 291 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 292 293 atomic_set(&conn->refcnt, 0); 294 295 hci_dev_hold(hdev); 296 297 tasklet_disable(&hdev->tx_task); 298 299 hci_conn_hash_add(hdev, conn); 300 if (hdev->notify) 301 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 302 303 atomic_set(&conn->devref, 0); 304 305 hci_conn_init_sysfs(conn); 306 307 tasklet_enable(&hdev->tx_task); 308 309 return conn; 310 } 311 312 int hci_conn_del(struct hci_conn *conn) 313 { 314 struct hci_dev *hdev = conn->hdev; 315 316 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); 317 318 del_timer(&conn->idle_timer); 319 320 del_timer(&conn->disc_timer); 321 322 if (conn->type == ACL_LINK) { 323 struct hci_conn *sco = conn->link; 324 if (sco) 325 sco->link = NULL; 326 327 /* Unacked frames */ 328 hdev->acl_cnt += conn->sent; 329 } else { 330 struct hci_conn *acl = conn->link; 331 if (acl) { 332 acl->link = NULL; 333 hci_conn_put(acl); 334 } 335 } 336 337 tasklet_disable(&hdev->tx_task); 338 339 hci_conn_hash_del(hdev, conn); 340 if (hdev->notify) 341 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 342 343 tasklet_enable(&hdev->tx_task); 344 345 skb_queue_purge(&conn->data_q); 346 347 hci_conn_put_device(conn); 348 349 hci_dev_put(hdev); 350 351 return 0; 352 } 353 354 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) 355 { 356 int use_src = bacmp(src, BDADDR_ANY); 357 struct hci_dev *hdev = NULL; 358 struct list_head *p; 359 360 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 361 362 read_lock_bh(&hci_dev_list_lock); 363 364 list_for_each(p, &hci_dev_list) { 365 struct hci_dev *d = list_entry(p, struct hci_dev, list); 366 367 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 368 continue; 369 370 /* Simple routing: 371 * No source address - find interface with bdaddr != dst 372 * Source address - find interface with bdaddr == src 373 */ 374 375 if (use_src) { 376 if (!bacmp(&d->bdaddr, src)) { 377 hdev = d; break; 378 } 379 } else { 380 if (bacmp(&d->bdaddr, dst)) { 381 hdev = d; break; 382 } 383 } 384 } 385 386 if (hdev) 387 hdev = hci_dev_hold(hdev); 388 389 read_unlock_bh(&hci_dev_list_lock); 390 return hdev; 391 } 392 EXPORT_SYMBOL(hci_get_route); 393 394 /* Create SCO, ACL or LE connection. 395 * Device _must_ be locked */ 396 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 397 { 398 struct hci_conn *acl; 399 struct hci_conn *sco; 400 struct hci_conn *le; 401 402 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 403 404 if (type == LE_LINK) { 405 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 406 if (!le) 407 le = hci_conn_add(hdev, LE_LINK, dst); 408 if (!le) 409 return NULL; 410 if (le->state == BT_OPEN) 411 hci_le_connect(le); 412 413 hci_conn_hold(le); 414 415 return le; 416 } 417 418 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 419 if (!acl) { 420 acl = hci_conn_add(hdev, ACL_LINK, dst); 421 if (!acl) 422 return NULL; 423 } 424 425 hci_conn_hold(acl); 426 427 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 428 acl->sec_level = BT_SECURITY_LOW; 429 acl->pending_sec_level = sec_level; 430 acl->auth_type = auth_type; 431 hci_acl_connect(acl); 432 } 433 434 if (type == ACL_LINK) 435 return acl; 436 437 sco = hci_conn_hash_lookup_ba(hdev, type, dst); 438 if (!sco) { 439 sco = hci_conn_add(hdev, type, dst); 440 if (!sco) { 441 hci_conn_put(acl); 442 return NULL; 443 } 444 } 445 446 acl->link = sco; 447 sco->link = acl; 448 449 hci_conn_hold(sco); 450 451 if (acl->state == BT_CONNECTED && 452 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 453 acl->power_save = 1; 454 hci_conn_enter_active_mode(acl); 455 456 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { 457 /* defer SCO setup until mode change completed */ 458 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend); 459 return sco; 460 } 461 462 hci_sco_setup(acl, 0x00); 463 } 464 465 return sco; 466 } 467 EXPORT_SYMBOL(hci_connect); 468 469 /* Check link security requirement */ 470 int hci_conn_check_link_mode(struct hci_conn *conn) 471 { 472 BT_DBG("conn %p", conn); 473 474 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 && 475 !(conn->link_mode & HCI_LM_ENCRYPT)) 476 return 0; 477 478 return 1; 479 } 480 EXPORT_SYMBOL(hci_conn_check_link_mode); 481 482 /* Authenticate remote device */ 483 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 484 { 485 BT_DBG("conn %p", conn); 486 487 if (conn->pending_sec_level > sec_level) 488 sec_level = conn->pending_sec_level; 489 490 if (sec_level > conn->sec_level) 491 conn->pending_sec_level = sec_level; 492 else if (conn->link_mode & HCI_LM_AUTH) 493 return 1; 494 495 /* Make sure we preserve an existing MITM requirement*/ 496 auth_type |= (conn->auth_type & 0x01); 497 498 conn->auth_type = auth_type; 499 500 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 501 struct hci_cp_auth_requested cp; 502 cp.handle = cpu_to_le16(conn->handle); 503 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 504 sizeof(cp), &cp); 505 } 506 507 return 0; 508 } 509 510 /* Enable security */ 511 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 512 { 513 BT_DBG("conn %p", conn); 514 515 if (sec_level == BT_SECURITY_SDP) 516 return 1; 517 518 if (sec_level == BT_SECURITY_LOW && 519 (!conn->ssp_mode || !conn->hdev->ssp_mode)) 520 return 1; 521 522 if (conn->link_mode & HCI_LM_ENCRYPT) 523 return hci_conn_auth(conn, sec_level, auth_type); 524 525 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 526 return 0; 527 528 if (hci_conn_auth(conn, sec_level, auth_type)) { 529 struct hci_cp_set_conn_encrypt cp; 530 cp.handle = cpu_to_le16(conn->handle); 531 cp.encrypt = 1; 532 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, 533 sizeof(cp), &cp); 534 } 535 536 return 0; 537 } 538 EXPORT_SYMBOL(hci_conn_security); 539 540 /* Change link key */ 541 int hci_conn_change_link_key(struct hci_conn *conn) 542 { 543 BT_DBG("conn %p", conn); 544 545 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 546 struct hci_cp_change_conn_link_key cp; 547 cp.handle = cpu_to_le16(conn->handle); 548 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 549 sizeof(cp), &cp); 550 } 551 552 return 0; 553 } 554 EXPORT_SYMBOL(hci_conn_change_link_key); 555 556 /* Switch role */ 557 int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 558 { 559 BT_DBG("conn %p", conn); 560 561 if (!role && conn->link_mode & HCI_LM_MASTER) 562 return 1; 563 564 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) { 565 struct hci_cp_switch_role cp; 566 bacpy(&cp.bdaddr, &conn->dst); 567 cp.role = role; 568 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); 569 } 570 571 return 0; 572 } 573 EXPORT_SYMBOL(hci_conn_switch_role); 574 575 /* Enter active mode */ 576 void hci_conn_enter_active_mode(struct hci_conn *conn) 577 { 578 struct hci_dev *hdev = conn->hdev; 579 580 BT_DBG("conn %p mode %d", conn, conn->mode); 581 582 if (test_bit(HCI_RAW, &hdev->flags)) 583 return; 584 585 if (conn->mode != HCI_CM_SNIFF || !conn->power_save) 586 goto timer; 587 588 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 589 struct hci_cp_exit_sniff_mode cp; 590 cp.handle = cpu_to_le16(conn->handle); 591 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); 592 } 593 594 timer: 595 if (hdev->idle_timeout > 0) 596 mod_timer(&conn->idle_timer, 597 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 598 } 599 600 /* Enter sniff mode */ 601 void hci_conn_enter_sniff_mode(struct hci_conn *conn) 602 { 603 struct hci_dev *hdev = conn->hdev; 604 605 BT_DBG("conn %p mode %d", conn, conn->mode); 606 607 if (test_bit(HCI_RAW, &hdev->flags)) 608 return; 609 610 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) 611 return; 612 613 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) 614 return; 615 616 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 617 struct hci_cp_sniff_subrate cp; 618 cp.handle = cpu_to_le16(conn->handle); 619 cp.max_latency = cpu_to_le16(0); 620 cp.min_remote_timeout = cpu_to_le16(0); 621 cp.min_local_timeout = cpu_to_le16(0); 622 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 623 } 624 625 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 626 struct hci_cp_sniff_mode cp; 627 cp.handle = cpu_to_le16(conn->handle); 628 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 629 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 630 cp.attempt = cpu_to_le16(4); 631 cp.timeout = cpu_to_le16(1); 632 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 633 } 634 } 635 636 /* Drop all connection on the device */ 637 void hci_conn_hash_flush(struct hci_dev *hdev) 638 { 639 struct hci_conn_hash *h = &hdev->conn_hash; 640 struct list_head *p; 641 642 BT_DBG("hdev %s", hdev->name); 643 644 p = h->list.next; 645 while (p != &h->list) { 646 struct hci_conn *c; 647 648 c = list_entry(p, struct hci_conn, list); 649 p = p->next; 650 651 c->state = BT_CLOSED; 652 653 hci_proto_disconn_cfm(c, 0x16); 654 hci_conn_del(c); 655 } 656 } 657 658 /* Check pending connect attempts */ 659 void hci_conn_check_pending(struct hci_dev *hdev) 660 { 661 struct hci_conn *conn; 662 663 BT_DBG("hdev %s", hdev->name); 664 665 hci_dev_lock(hdev); 666 667 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 668 if (conn) 669 hci_acl_connect(conn); 670 671 hci_dev_unlock(hdev); 672 } 673 674 void hci_conn_hold_device(struct hci_conn *conn) 675 { 676 atomic_inc(&conn->devref); 677 } 678 EXPORT_SYMBOL(hci_conn_hold_device); 679 680 void hci_conn_put_device(struct hci_conn *conn) 681 { 682 if (atomic_dec_and_test(&conn->devref)) 683 hci_conn_del_sysfs(conn); 684 } 685 EXPORT_SYMBOL(hci_conn_put_device); 686 687 int hci_get_conn_list(void __user *arg) 688 { 689 struct hci_conn_list_req req, *cl; 690 struct hci_conn_info *ci; 691 struct hci_dev *hdev; 692 struct list_head *p; 693 int n = 0, size, err; 694 695 if (copy_from_user(&req, arg, sizeof(req))) 696 return -EFAULT; 697 698 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) 699 return -EINVAL; 700 701 size = sizeof(req) + req.conn_num * sizeof(*ci); 702 703 cl = kmalloc(size, GFP_KERNEL); 704 if (!cl) 705 return -ENOMEM; 706 707 hdev = hci_dev_get(req.dev_id); 708 if (!hdev) { 709 kfree(cl); 710 return -ENODEV; 711 } 712 713 ci = cl->conn_info; 714 715 hci_dev_lock_bh(hdev); 716 list_for_each(p, &hdev->conn_hash.list) { 717 register struct hci_conn *c; 718 c = list_entry(p, struct hci_conn, list); 719 720 bacpy(&(ci + n)->bdaddr, &c->dst); 721 (ci + n)->handle = c->handle; 722 (ci + n)->type = c->type; 723 (ci + n)->out = c->out; 724 (ci + n)->state = c->state; 725 (ci + n)->link_mode = c->link_mode; 726 if (++n >= req.conn_num) 727 break; 728 } 729 hci_dev_unlock_bh(hdev); 730 731 cl->dev_id = hdev->id; 732 cl->conn_num = n; 733 size = sizeof(req) + n * sizeof(*ci); 734 735 hci_dev_put(hdev); 736 737 err = copy_to_user(arg, cl, size); 738 kfree(cl); 739 740 return err ? -EFAULT : 0; 741 } 742 743 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) 744 { 745 struct hci_conn_info_req req; 746 struct hci_conn_info ci; 747 struct hci_conn *conn; 748 char __user *ptr = arg + sizeof(req); 749 750 if (copy_from_user(&req, arg, sizeof(req))) 751 return -EFAULT; 752 753 hci_dev_lock_bh(hdev); 754 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); 755 if (conn) { 756 bacpy(&ci.bdaddr, &conn->dst); 757 ci.handle = conn->handle; 758 ci.type = conn->type; 759 ci.out = conn->out; 760 ci.state = conn->state; 761 ci.link_mode = conn->link_mode; 762 } 763 hci_dev_unlock_bh(hdev); 764 765 if (!conn) 766 return -ENOENT; 767 768 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; 769 } 770 771 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) 772 { 773 struct hci_auth_info_req req; 774 struct hci_conn *conn; 775 776 if (copy_from_user(&req, arg, sizeof(req))) 777 return -EFAULT; 778 779 hci_dev_lock_bh(hdev); 780 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); 781 if (conn) 782 req.type = conn->auth_type; 783 hci_dev_unlock_bh(hdev); 784 785 if (!conn) 786 return -ENOENT; 787 788 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; 789 } 790