1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI sockets. */ 26 27 #include <linux/module.h> 28 29 #include <linux/types.h> 30 #include <linux/capability.h> 31 #include <linux/errno.h> 32 #include <linux/kernel.h> 33 #include <linux/slab.h> 34 #include <linux/poll.h> 35 #include <linux/fcntl.h> 36 #include <linux/init.h> 37 #include <linux/skbuff.h> 38 #include <linux/workqueue.h> 39 #include <linux/interrupt.h> 40 #include <linux/compat.h> 41 #include <linux/socket.h> 42 #include <linux/ioctl.h> 43 #include <net/sock.h> 44 45 #include <linux/uaccess.h> 46 #include <asm/unaligned.h> 47 48 #include <net/bluetooth/bluetooth.h> 49 #include <net/bluetooth/hci_core.h> 50 #include <net/bluetooth/hci_mon.h> 51 52 static atomic_t monitor_promisc = ATOMIC_INIT(0); 53 54 /* ----- HCI socket interface ----- */ 55 56 static inline int hci_test_bit(int nr, void *addr) 57 { 58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); 59 } 60 61 /* Security filter */ 62 static struct hci_sec_filter hci_sec_filter = { 63 /* Packet types */ 64 0x10, 65 /* Events */ 66 { 0x1000d9fe, 0x0000b00c }, 67 /* Commands */ 68 { 69 { 0x0 }, 70 /* OGF_LINK_CTL */ 71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, 72 /* OGF_LINK_POLICY */ 73 { 0x00005200, 0x00000000, 0x00000000, 0x00 }, 74 /* OGF_HOST_CTL */ 75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, 76 /* OGF_INFO_PARAM */ 77 { 0x000002be, 0x00000000, 0x00000000, 0x00 }, 78 /* OGF_STATUS_PARAM */ 79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 } 80 } 81 }; 82 83 static struct bt_sock_list hci_sk_list = { 84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) 85 }; 86 87 /* Send frame to RAW socket */ 88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 89 { 90 struct sock *sk; 91 struct hlist_node *node; 92 struct sk_buff *skb_copy = NULL; 93 94 BT_DBG("hdev %p len %d", hdev, skb->len); 95 96 read_lock(&hci_sk_list.lock); 97 98 sk_for_each(sk, node, &hci_sk_list.head) { 99 struct hci_filter *flt; 100 struct sk_buff *nskb; 101 102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 103 continue; 104 105 /* Don't send frame to the socket it came from */ 106 if (skb->sk == sk) 107 continue; 108 109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) 110 continue; 111 112 /* Apply filter */ 113 flt = &hci_pi(sk)->filter; 114 115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? 116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) 117 continue; 118 119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { 120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 121 122 if (!hci_test_bit(evt, &flt->event_mask)) 123 continue; 124 125 if (flt->opcode && 126 ((evt == HCI_EV_CMD_COMPLETE && 127 flt->opcode != 128 get_unaligned((__le16 *)(skb->data + 3))) || 129 (evt == HCI_EV_CMD_STATUS && 130 flt->opcode != 131 get_unaligned((__le16 *)(skb->data + 4))))) 132 continue; 133 } 134 135 if (!skb_copy) { 136 /* Create a private copy with headroom */ 137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC); 138 if (!skb_copy) 139 continue; 140 141 /* Put type byte before the data */ 142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1); 143 } 144 145 nskb = skb_clone(skb_copy, GFP_ATOMIC); 146 if (!nskb) 147 continue; 148 149 if (sock_queue_rcv_skb(sk, nskb)) 150 kfree_skb(nskb); 151 } 152 153 read_unlock(&hci_sk_list.lock); 154 155 kfree_skb(skb_copy); 156 } 157 158 /* Send frame to control socket */ 159 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk) 160 { 161 struct sock *sk; 162 struct hlist_node *node; 163 164 BT_DBG("len %d", skb->len); 165 166 read_lock(&hci_sk_list.lock); 167 168 sk_for_each(sk, node, &hci_sk_list.head) { 169 struct sk_buff *nskb; 170 171 /* Skip the original socket */ 172 if (sk == skip_sk) 173 continue; 174 175 if (sk->sk_state != BT_BOUND) 176 continue; 177 178 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) 179 continue; 180 181 nskb = skb_clone(skb, GFP_ATOMIC); 182 if (!nskb) 183 continue; 184 185 if (sock_queue_rcv_skb(sk, nskb)) 186 kfree_skb(nskb); 187 } 188 189 read_unlock(&hci_sk_list.lock); 190 } 191 192 /* Send frame to monitor socket */ 193 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) 194 { 195 struct sock *sk; 196 struct hlist_node *node; 197 struct sk_buff *skb_copy = NULL; 198 __le16 opcode; 199 200 if (!atomic_read(&monitor_promisc)) 201 return; 202 203 BT_DBG("hdev %p len %d", hdev, skb->len); 204 205 switch (bt_cb(skb)->pkt_type) { 206 case HCI_COMMAND_PKT: 207 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT); 208 break; 209 case HCI_EVENT_PKT: 210 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT); 211 break; 212 case HCI_ACLDATA_PKT: 213 if (bt_cb(skb)->incoming) 214 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT); 215 else 216 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT); 217 break; 218 case HCI_SCODATA_PKT: 219 if (bt_cb(skb)->incoming) 220 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT); 221 else 222 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT); 223 break; 224 default: 225 return; 226 } 227 228 read_lock(&hci_sk_list.lock); 229 230 sk_for_each(sk, node, &hci_sk_list.head) { 231 struct sk_buff *nskb; 232 233 if (sk->sk_state != BT_BOUND) 234 continue; 235 236 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR) 237 continue; 238 239 if (!skb_copy) { 240 struct hci_mon_hdr *hdr; 241 242 /* Create a private copy with headroom */ 243 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); 244 if (!skb_copy) 245 continue; 246 247 /* Put header before the data */ 248 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE); 249 hdr->opcode = opcode; 250 hdr->index = cpu_to_le16(hdev->id); 251 hdr->len = cpu_to_le16(skb->len); 252 } 253 254 nskb = skb_clone(skb_copy, GFP_ATOMIC); 255 if (!nskb) 256 continue; 257 258 if (sock_queue_rcv_skb(sk, nskb)) 259 kfree_skb(nskb); 260 } 261 262 read_unlock(&hci_sk_list.lock); 263 264 kfree_skb(skb_copy); 265 } 266 267 static void send_monitor_event(struct sk_buff *skb) 268 { 269 struct sock *sk; 270 struct hlist_node *node; 271 272 BT_DBG("len %d", skb->len); 273 274 read_lock(&hci_sk_list.lock); 275 276 sk_for_each(sk, node, &hci_sk_list.head) { 277 struct sk_buff *nskb; 278 279 if (sk->sk_state != BT_BOUND) 280 continue; 281 282 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR) 283 continue; 284 285 nskb = skb_clone(skb, GFP_ATOMIC); 286 if (!nskb) 287 continue; 288 289 if (sock_queue_rcv_skb(sk, nskb)) 290 kfree_skb(nskb); 291 } 292 293 read_unlock(&hci_sk_list.lock); 294 } 295 296 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) 297 { 298 struct hci_mon_hdr *hdr; 299 struct hci_mon_new_index *ni; 300 struct sk_buff *skb; 301 __le16 opcode; 302 303 switch (event) { 304 case HCI_DEV_REG: 305 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC); 306 if (!skb) 307 return NULL; 308 309 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE); 310 ni->type = hdev->dev_type; 311 ni->bus = hdev->bus; 312 bacpy(&ni->bdaddr, &hdev->bdaddr); 313 memcpy(ni->name, hdev->name, 8); 314 315 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX); 316 break; 317 318 case HCI_DEV_UNREG: 319 skb = bt_skb_alloc(0, GFP_ATOMIC); 320 if (!skb) 321 return NULL; 322 323 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX); 324 break; 325 326 default: 327 return NULL; 328 } 329 330 __net_timestamp(skb); 331 332 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE); 333 hdr->opcode = opcode; 334 hdr->index = cpu_to_le16(hdev->id); 335 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 336 337 return skb; 338 } 339 340 static void send_monitor_replay(struct sock *sk) 341 { 342 struct hci_dev *hdev; 343 344 read_lock(&hci_dev_list_lock); 345 346 list_for_each_entry(hdev, &hci_dev_list, list) { 347 struct sk_buff *skb; 348 349 skb = create_monitor_event(hdev, HCI_DEV_REG); 350 if (!skb) 351 continue; 352 353 if (sock_queue_rcv_skb(sk, skb)) 354 kfree_skb(skb); 355 } 356 357 read_unlock(&hci_dev_list_lock); 358 } 359 360 /* Generate internal stack event */ 361 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) 362 { 363 struct hci_event_hdr *hdr; 364 struct hci_ev_stack_internal *ev; 365 struct sk_buff *skb; 366 367 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); 368 if (!skb) 369 return; 370 371 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE); 372 hdr->evt = HCI_EV_STACK_INTERNAL; 373 hdr->plen = sizeof(*ev) + dlen; 374 375 ev = (void *) skb_put(skb, sizeof(*ev) + dlen); 376 ev->type = type; 377 memcpy(ev->data, data, dlen); 378 379 bt_cb(skb)->incoming = 1; 380 __net_timestamp(skb); 381 382 bt_cb(skb)->pkt_type = HCI_EVENT_PKT; 383 skb->dev = (void *) hdev; 384 hci_send_to_sock(hdev, skb); 385 kfree_skb(skb); 386 } 387 388 void hci_sock_dev_event(struct hci_dev *hdev, int event) 389 { 390 struct hci_ev_si_device ev; 391 392 BT_DBG("hdev %s event %d", hdev->name, event); 393 394 /* Send event to monitor */ 395 if (atomic_read(&monitor_promisc)) { 396 struct sk_buff *skb; 397 398 skb = create_monitor_event(hdev, event); 399 if (skb) { 400 send_monitor_event(skb); 401 kfree_skb(skb); 402 } 403 } 404 405 /* Send event to sockets */ 406 ev.event = event; 407 ev.dev_id = hdev->id; 408 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); 409 410 if (event == HCI_DEV_UNREG) { 411 struct sock *sk; 412 struct hlist_node *node; 413 414 /* Detach sockets from device */ 415 read_lock(&hci_sk_list.lock); 416 sk_for_each(sk, node, &hci_sk_list.head) { 417 bh_lock_sock_nested(sk); 418 if (hci_pi(sk)->hdev == hdev) { 419 hci_pi(sk)->hdev = NULL; 420 sk->sk_err = EPIPE; 421 sk->sk_state = BT_OPEN; 422 sk->sk_state_change(sk); 423 424 hci_dev_put(hdev); 425 } 426 bh_unlock_sock(sk); 427 } 428 read_unlock(&hci_sk_list.lock); 429 } 430 } 431 432 static int hci_sock_release(struct socket *sock) 433 { 434 struct sock *sk = sock->sk; 435 struct hci_dev *hdev; 436 437 BT_DBG("sock %p sk %p", sock, sk); 438 439 if (!sk) 440 return 0; 441 442 hdev = hci_pi(sk)->hdev; 443 444 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR) 445 atomic_dec(&monitor_promisc); 446 447 bt_sock_unlink(&hci_sk_list, sk); 448 449 if (hdev) { 450 atomic_dec(&hdev->promisc); 451 hci_dev_put(hdev); 452 } 453 454 sock_orphan(sk); 455 456 skb_queue_purge(&sk->sk_receive_queue); 457 skb_queue_purge(&sk->sk_write_queue); 458 459 sock_put(sk); 460 return 0; 461 } 462 463 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) 464 { 465 bdaddr_t bdaddr; 466 int err; 467 468 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 469 return -EFAULT; 470 471 hci_dev_lock(hdev); 472 473 err = hci_blacklist_add(hdev, &bdaddr, 0); 474 475 hci_dev_unlock(hdev); 476 477 return err; 478 } 479 480 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) 481 { 482 bdaddr_t bdaddr; 483 int err; 484 485 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 486 return -EFAULT; 487 488 hci_dev_lock(hdev); 489 490 err = hci_blacklist_del(hdev, &bdaddr, 0); 491 492 hci_dev_unlock(hdev); 493 494 return err; 495 } 496 497 /* Ioctls that require bound socket */ 498 static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 499 { 500 struct hci_dev *hdev = hci_pi(sk)->hdev; 501 502 if (!hdev) 503 return -EBADFD; 504 505 switch (cmd) { 506 case HCISETRAW: 507 if (!capable(CAP_NET_ADMIN)) 508 return -EACCES; 509 510 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 511 return -EPERM; 512 513 if (arg) 514 set_bit(HCI_RAW, &hdev->flags); 515 else 516 clear_bit(HCI_RAW, &hdev->flags); 517 518 return 0; 519 520 case HCIGETCONNINFO: 521 return hci_get_conn_info(hdev, (void __user *) arg); 522 523 case HCIGETAUTHINFO: 524 return hci_get_auth_info(hdev, (void __user *) arg); 525 526 case HCIBLOCKADDR: 527 if (!capable(CAP_NET_ADMIN)) 528 return -EACCES; 529 return hci_sock_blacklist_add(hdev, (void __user *) arg); 530 531 case HCIUNBLOCKADDR: 532 if (!capable(CAP_NET_ADMIN)) 533 return -EACCES; 534 return hci_sock_blacklist_del(hdev, (void __user *) arg); 535 536 default: 537 if (hdev->ioctl) 538 return hdev->ioctl(hdev, cmd, arg); 539 return -EINVAL; 540 } 541 } 542 543 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 544 { 545 struct sock *sk = sock->sk; 546 void __user *argp = (void __user *) arg; 547 int err; 548 549 BT_DBG("cmd %x arg %lx", cmd, arg); 550 551 switch (cmd) { 552 case HCIGETDEVLIST: 553 return hci_get_dev_list(argp); 554 555 case HCIGETDEVINFO: 556 return hci_get_dev_info(argp); 557 558 case HCIGETCONNLIST: 559 return hci_get_conn_list(argp); 560 561 case HCIDEVUP: 562 if (!capable(CAP_NET_ADMIN)) 563 return -EACCES; 564 return hci_dev_open(arg); 565 566 case HCIDEVDOWN: 567 if (!capable(CAP_NET_ADMIN)) 568 return -EACCES; 569 return hci_dev_close(arg); 570 571 case HCIDEVRESET: 572 if (!capable(CAP_NET_ADMIN)) 573 return -EACCES; 574 return hci_dev_reset(arg); 575 576 case HCIDEVRESTAT: 577 if (!capable(CAP_NET_ADMIN)) 578 return -EACCES; 579 return hci_dev_reset_stat(arg); 580 581 case HCISETSCAN: 582 case HCISETAUTH: 583 case HCISETENCRYPT: 584 case HCISETPTYPE: 585 case HCISETLINKPOL: 586 case HCISETLINKMODE: 587 case HCISETACLMTU: 588 case HCISETSCOMTU: 589 if (!capable(CAP_NET_ADMIN)) 590 return -EACCES; 591 return hci_dev_cmd(cmd, argp); 592 593 case HCIINQUIRY: 594 return hci_inquiry(argp); 595 596 default: 597 lock_sock(sk); 598 err = hci_sock_bound_ioctl(sk, cmd, arg); 599 release_sock(sk); 600 return err; 601 } 602 } 603 604 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 605 { 606 struct sockaddr_hci haddr; 607 struct sock *sk = sock->sk; 608 struct hci_dev *hdev = NULL; 609 int len, err = 0; 610 611 BT_DBG("sock %p sk %p", sock, sk); 612 613 if (!addr) 614 return -EINVAL; 615 616 memset(&haddr, 0, sizeof(haddr)); 617 len = min_t(unsigned int, sizeof(haddr), addr_len); 618 memcpy(&haddr, addr, len); 619 620 if (haddr.hci_family != AF_BLUETOOTH) 621 return -EINVAL; 622 623 lock_sock(sk); 624 625 if (sk->sk_state == BT_BOUND) { 626 err = -EALREADY; 627 goto done; 628 } 629 630 switch (haddr.hci_channel) { 631 case HCI_CHANNEL_RAW: 632 if (hci_pi(sk)->hdev) { 633 err = -EALREADY; 634 goto done; 635 } 636 637 if (haddr.hci_dev != HCI_DEV_NONE) { 638 hdev = hci_dev_get(haddr.hci_dev); 639 if (!hdev) { 640 err = -ENODEV; 641 goto done; 642 } 643 644 atomic_inc(&hdev->promisc); 645 } 646 647 hci_pi(sk)->hdev = hdev; 648 break; 649 650 case HCI_CHANNEL_CONTROL: 651 if (haddr.hci_dev != HCI_DEV_NONE) { 652 err = -EINVAL; 653 goto done; 654 } 655 656 if (!capable(CAP_NET_ADMIN)) { 657 err = -EPERM; 658 goto done; 659 } 660 661 break; 662 663 case HCI_CHANNEL_MONITOR: 664 if (haddr.hci_dev != HCI_DEV_NONE) { 665 err = -EINVAL; 666 goto done; 667 } 668 669 if (!capable(CAP_NET_RAW)) { 670 err = -EPERM; 671 goto done; 672 } 673 674 send_monitor_replay(sk); 675 676 atomic_inc(&monitor_promisc); 677 break; 678 679 default: 680 err = -EINVAL; 681 goto done; 682 } 683 684 685 hci_pi(sk)->channel = haddr.hci_channel; 686 sk->sk_state = BT_BOUND; 687 688 done: 689 release_sock(sk); 690 return err; 691 } 692 693 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) 694 { 695 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; 696 struct sock *sk = sock->sk; 697 struct hci_dev *hdev = hci_pi(sk)->hdev; 698 699 BT_DBG("sock %p sk %p", sock, sk); 700 701 if (!hdev) 702 return -EBADFD; 703 704 lock_sock(sk); 705 706 *addr_len = sizeof(*haddr); 707 haddr->hci_family = AF_BLUETOOTH; 708 haddr->hci_dev = hdev->id; 709 710 release_sock(sk); 711 return 0; 712 } 713 714 static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) 715 { 716 __u32 mask = hci_pi(sk)->cmsg_mask; 717 718 if (mask & HCI_CMSG_DIR) { 719 int incoming = bt_cb(skb)->incoming; 720 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); 721 } 722 723 if (mask & HCI_CMSG_TSTAMP) { 724 #ifdef CONFIG_COMPAT 725 struct compat_timeval ctv; 726 #endif 727 struct timeval tv; 728 void *data; 729 int len; 730 731 skb_get_timestamp(skb, &tv); 732 733 data = &tv; 734 len = sizeof(tv); 735 #ifdef CONFIG_COMPAT 736 if (!COMPAT_USE_64BIT_TIME && 737 (msg->msg_flags & MSG_CMSG_COMPAT)) { 738 ctv.tv_sec = tv.tv_sec; 739 ctv.tv_usec = tv.tv_usec; 740 data = &ctv; 741 len = sizeof(ctv); 742 } 743 #endif 744 745 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); 746 } 747 } 748 749 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 750 struct msghdr *msg, size_t len, int flags) 751 { 752 int noblock = flags & MSG_DONTWAIT; 753 struct sock *sk = sock->sk; 754 struct sk_buff *skb; 755 int copied, err; 756 757 BT_DBG("sock %p, sk %p", sock, sk); 758 759 if (flags & (MSG_OOB)) 760 return -EOPNOTSUPP; 761 762 if (sk->sk_state == BT_CLOSED) 763 return 0; 764 765 skb = skb_recv_datagram(sk, flags, noblock, &err); 766 if (!skb) 767 return err; 768 769 msg->msg_namelen = 0; 770 771 copied = skb->len; 772 if (len < copied) { 773 msg->msg_flags |= MSG_TRUNC; 774 copied = len; 775 } 776 777 skb_reset_transport_header(skb); 778 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 779 780 switch (hci_pi(sk)->channel) { 781 case HCI_CHANNEL_RAW: 782 hci_sock_cmsg(sk, msg, skb); 783 break; 784 case HCI_CHANNEL_CONTROL: 785 case HCI_CHANNEL_MONITOR: 786 sock_recv_timestamp(msg, sk, skb); 787 break; 788 } 789 790 skb_free_datagram(sk, skb); 791 792 return err ? : copied; 793 } 794 795 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, 796 struct msghdr *msg, size_t len) 797 { 798 struct sock *sk = sock->sk; 799 struct hci_dev *hdev; 800 struct sk_buff *skb; 801 int err; 802 803 BT_DBG("sock %p sk %p", sock, sk); 804 805 if (msg->msg_flags & MSG_OOB) 806 return -EOPNOTSUPP; 807 808 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) 809 return -EINVAL; 810 811 if (len < 4 || len > HCI_MAX_FRAME_SIZE) 812 return -EINVAL; 813 814 lock_sock(sk); 815 816 switch (hci_pi(sk)->channel) { 817 case HCI_CHANNEL_RAW: 818 break; 819 case HCI_CHANNEL_CONTROL: 820 err = mgmt_control(sk, msg, len); 821 goto done; 822 case HCI_CHANNEL_MONITOR: 823 err = -EOPNOTSUPP; 824 goto done; 825 default: 826 err = -EINVAL; 827 goto done; 828 } 829 830 hdev = hci_pi(sk)->hdev; 831 if (!hdev) { 832 err = -EBADFD; 833 goto done; 834 } 835 836 if (!test_bit(HCI_UP, &hdev->flags)) { 837 err = -ENETDOWN; 838 goto done; 839 } 840 841 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err); 842 if (!skb) 843 goto done; 844 845 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 846 err = -EFAULT; 847 goto drop; 848 } 849 850 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data); 851 skb_pull(skb, 1); 852 skb->dev = (void *) hdev; 853 854 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { 855 u16 opcode = get_unaligned_le16(skb->data); 856 u16 ogf = hci_opcode_ogf(opcode); 857 u16 ocf = hci_opcode_ocf(opcode); 858 859 if (((ogf > HCI_SFLT_MAX_OGF) || 860 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && 861 !capable(CAP_NET_RAW)) { 862 err = -EPERM; 863 goto drop; 864 } 865 866 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) { 867 skb_queue_tail(&hdev->raw_q, skb); 868 queue_work(hdev->workqueue, &hdev->tx_work); 869 } else { 870 skb_queue_tail(&hdev->cmd_q, skb); 871 queue_work(hdev->workqueue, &hdev->cmd_work); 872 } 873 } else { 874 if (!capable(CAP_NET_RAW)) { 875 err = -EPERM; 876 goto drop; 877 } 878 879 skb_queue_tail(&hdev->raw_q, skb); 880 queue_work(hdev->workqueue, &hdev->tx_work); 881 } 882 883 err = len; 884 885 done: 886 release_sock(sk); 887 return err; 888 889 drop: 890 kfree_skb(skb); 891 goto done; 892 } 893 894 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) 895 { 896 struct hci_ufilter uf = { .opcode = 0 }; 897 struct sock *sk = sock->sk; 898 int err = 0, opt = 0; 899 900 BT_DBG("sk %p, opt %d", sk, optname); 901 902 lock_sock(sk); 903 904 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { 905 err = -EINVAL; 906 goto done; 907 } 908 909 switch (optname) { 910 case HCI_DATA_DIR: 911 if (get_user(opt, (int __user *)optval)) { 912 err = -EFAULT; 913 break; 914 } 915 916 if (opt) 917 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; 918 else 919 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; 920 break; 921 922 case HCI_TIME_STAMP: 923 if (get_user(opt, (int __user *)optval)) { 924 err = -EFAULT; 925 break; 926 } 927 928 if (opt) 929 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; 930 else 931 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; 932 break; 933 934 case HCI_FILTER: 935 { 936 struct hci_filter *f = &hci_pi(sk)->filter; 937 938 uf.type_mask = f->type_mask; 939 uf.opcode = f->opcode; 940 uf.event_mask[0] = *((u32 *) f->event_mask + 0); 941 uf.event_mask[1] = *((u32 *) f->event_mask + 1); 942 } 943 944 len = min_t(unsigned int, len, sizeof(uf)); 945 if (copy_from_user(&uf, optval, len)) { 946 err = -EFAULT; 947 break; 948 } 949 950 if (!capable(CAP_NET_RAW)) { 951 uf.type_mask &= hci_sec_filter.type_mask; 952 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); 953 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); 954 } 955 956 { 957 struct hci_filter *f = &hci_pi(sk)->filter; 958 959 f->type_mask = uf.type_mask; 960 f->opcode = uf.opcode; 961 *((u32 *) f->event_mask + 0) = uf.event_mask[0]; 962 *((u32 *) f->event_mask + 1) = uf.event_mask[1]; 963 } 964 break; 965 966 default: 967 err = -ENOPROTOOPT; 968 break; 969 } 970 971 done: 972 release_sock(sk); 973 return err; 974 } 975 976 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 977 { 978 struct hci_ufilter uf; 979 struct sock *sk = sock->sk; 980 int len, opt, err = 0; 981 982 BT_DBG("sk %p, opt %d", sk, optname); 983 984 if (get_user(len, optlen)) 985 return -EFAULT; 986 987 lock_sock(sk); 988 989 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { 990 err = -EINVAL; 991 goto done; 992 } 993 994 switch (optname) { 995 case HCI_DATA_DIR: 996 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) 997 opt = 1; 998 else 999 opt = 0; 1000 1001 if (put_user(opt, optval)) 1002 err = -EFAULT; 1003 break; 1004 1005 case HCI_TIME_STAMP: 1006 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) 1007 opt = 1; 1008 else 1009 opt = 0; 1010 1011 if (put_user(opt, optval)) 1012 err = -EFAULT; 1013 break; 1014 1015 case HCI_FILTER: 1016 { 1017 struct hci_filter *f = &hci_pi(sk)->filter; 1018 1019 uf.type_mask = f->type_mask; 1020 uf.opcode = f->opcode; 1021 uf.event_mask[0] = *((u32 *) f->event_mask + 0); 1022 uf.event_mask[1] = *((u32 *) f->event_mask + 1); 1023 } 1024 1025 len = min_t(unsigned int, len, sizeof(uf)); 1026 if (copy_to_user(optval, &uf, len)) 1027 err = -EFAULT; 1028 break; 1029 1030 default: 1031 err = -ENOPROTOOPT; 1032 break; 1033 } 1034 1035 done: 1036 release_sock(sk); 1037 return err; 1038 } 1039 1040 static const struct proto_ops hci_sock_ops = { 1041 .family = PF_BLUETOOTH, 1042 .owner = THIS_MODULE, 1043 .release = hci_sock_release, 1044 .bind = hci_sock_bind, 1045 .getname = hci_sock_getname, 1046 .sendmsg = hci_sock_sendmsg, 1047 .recvmsg = hci_sock_recvmsg, 1048 .ioctl = hci_sock_ioctl, 1049 .poll = datagram_poll, 1050 .listen = sock_no_listen, 1051 .shutdown = sock_no_shutdown, 1052 .setsockopt = hci_sock_setsockopt, 1053 .getsockopt = hci_sock_getsockopt, 1054 .connect = sock_no_connect, 1055 .socketpair = sock_no_socketpair, 1056 .accept = sock_no_accept, 1057 .mmap = sock_no_mmap 1058 }; 1059 1060 static struct proto hci_sk_proto = { 1061 .name = "HCI", 1062 .owner = THIS_MODULE, 1063 .obj_size = sizeof(struct hci_pinfo) 1064 }; 1065 1066 static int hci_sock_create(struct net *net, struct socket *sock, int protocol, 1067 int kern) 1068 { 1069 struct sock *sk; 1070 1071 BT_DBG("sock %p", sock); 1072 1073 if (sock->type != SOCK_RAW) 1074 return -ESOCKTNOSUPPORT; 1075 1076 sock->ops = &hci_sock_ops; 1077 1078 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto); 1079 if (!sk) 1080 return -ENOMEM; 1081 1082 sock_init_data(sock, sk); 1083 1084 sock_reset_flag(sk, SOCK_ZAPPED); 1085 1086 sk->sk_protocol = protocol; 1087 1088 sock->state = SS_UNCONNECTED; 1089 sk->sk_state = BT_OPEN; 1090 1091 bt_sock_link(&hci_sk_list, sk); 1092 return 0; 1093 } 1094 1095 static const struct net_proto_family hci_sock_family_ops = { 1096 .family = PF_BLUETOOTH, 1097 .owner = THIS_MODULE, 1098 .create = hci_sock_create, 1099 }; 1100 1101 int __init hci_sock_init(void) 1102 { 1103 int err; 1104 1105 err = proto_register(&hci_sk_proto, 0); 1106 if (err < 0) 1107 return err; 1108 1109 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); 1110 if (err < 0) 1111 goto error; 1112 1113 BT_INFO("HCI socket layer initialized"); 1114 1115 return 0; 1116 1117 error: 1118 BT_ERR("HCI socket registration failed"); 1119 proto_unregister(&hci_sk_proto); 1120 return err; 1121 } 1122 1123 void hci_sock_cleanup(void) 1124 { 1125 if (bt_sock_unregister(BTPROTO_HCI) < 0) 1126 BT_ERR("HCI socket unregistration failed"); 1127 1128 proto_unregister(&hci_sk_proto); 1129 } 1130