1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI sockets. */ 26 #include <linux/compat.h> 27 #include <linux/export.h> 28 #include <linux/utsname.h> 29 #include <linux/sched.h> 30 #include <asm/unaligned.h> 31 32 #include <net/bluetooth/bluetooth.h> 33 #include <net/bluetooth/hci_core.h> 34 #include <net/bluetooth/hci_mon.h> 35 #include <net/bluetooth/mgmt.h> 36 37 #include "mgmt_util.h" 38 39 static LIST_HEAD(mgmt_chan_list); 40 static DEFINE_MUTEX(mgmt_chan_list_lock); 41 42 static DEFINE_IDA(sock_cookie_ida); 43 44 static atomic_t monitor_promisc = ATOMIC_INIT(0); 45 46 /* ----- HCI socket interface ----- */ 47 48 /* Socket info */ 49 #define hci_pi(sk) ((struct hci_pinfo *) sk) 50 51 struct hci_pinfo { 52 struct bt_sock bt; 53 struct hci_dev *hdev; 54 struct hci_filter filter; 55 __u8 cmsg_mask; 56 unsigned short channel; 57 unsigned long flags; 58 __u32 cookie; 59 char comm[TASK_COMM_LEN]; 60 __u16 mtu; 61 }; 62 63 static struct hci_dev *hci_hdev_from_sock(struct sock *sk) 64 { 65 struct hci_dev *hdev = hci_pi(sk)->hdev; 66 67 if (!hdev) 68 return ERR_PTR(-EBADFD); 69 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) 70 return ERR_PTR(-EPIPE); 71 return hdev; 72 } 73 74 void hci_sock_set_flag(struct sock *sk, int nr) 75 { 76 set_bit(nr, &hci_pi(sk)->flags); 77 } 78 79 void hci_sock_clear_flag(struct sock *sk, int nr) 80 { 81 clear_bit(nr, &hci_pi(sk)->flags); 82 } 83 84 int hci_sock_test_flag(struct sock *sk, int nr) 85 { 86 return test_bit(nr, &hci_pi(sk)->flags); 87 } 88 89 unsigned short hci_sock_get_channel(struct sock *sk) 90 { 91 return hci_pi(sk)->channel; 92 } 93 94 u32 hci_sock_get_cookie(struct sock *sk) 95 { 96 return hci_pi(sk)->cookie; 97 } 98 99 static bool hci_sock_gen_cookie(struct sock *sk) 100 { 101 int id = hci_pi(sk)->cookie; 102 103 if (!id) { 104 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL); 105 if (id < 0) 106 id = 0xffffffff; 107 108 hci_pi(sk)->cookie = id; 109 get_task_comm(hci_pi(sk)->comm, current); 110 return true; 111 } 112 113 return false; 114 } 115 116 static void hci_sock_free_cookie(struct sock *sk) 117 { 118 int id = hci_pi(sk)->cookie; 119 120 if (id) { 121 hci_pi(sk)->cookie = 0xffffffff; 122 ida_simple_remove(&sock_cookie_ida, id); 123 } 124 } 125 126 static inline int hci_test_bit(int nr, const void *addr) 127 { 128 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); 129 } 130 131 /* Security filter */ 132 #define HCI_SFLT_MAX_OGF 5 133 134 struct hci_sec_filter { 135 __u32 type_mask; 136 __u32 event_mask[2]; 137 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4]; 138 }; 139 140 static const struct hci_sec_filter hci_sec_filter = { 141 /* Packet types */ 142 0x10, 143 /* Events */ 144 { 0x1000d9fe, 0x0000b00c }, 145 /* Commands */ 146 { 147 { 0x0 }, 148 /* OGF_LINK_CTL */ 149 { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, 150 /* OGF_LINK_POLICY */ 151 { 0x00005200, 0x00000000, 0x00000000, 0x00 }, 152 /* OGF_HOST_CTL */ 153 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, 154 /* OGF_INFO_PARAM */ 155 { 0x000002be, 0x00000000, 0x00000000, 0x00 }, 156 /* OGF_STATUS_PARAM */ 157 { 0x000000ea, 0x00000000, 0x00000000, 0x00 } 158 } 159 }; 160 161 static struct bt_sock_list hci_sk_list = { 162 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) 163 }; 164 165 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) 166 { 167 struct hci_filter *flt; 168 int flt_type, flt_event; 169 170 /* Apply filter */ 171 flt = &hci_pi(sk)->filter; 172 173 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS; 174 175 if (!test_bit(flt_type, &flt->type_mask)) 176 return true; 177 178 /* Extra filter for event packets only */ 179 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT) 180 return false; 181 182 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); 183 184 if (!hci_test_bit(flt_event, &flt->event_mask)) 185 return true; 186 187 /* Check filter only when opcode is set */ 188 if (!flt->opcode) 189 return false; 190 191 if (flt_event == HCI_EV_CMD_COMPLETE && 192 flt->opcode != get_unaligned((__le16 *)(skb->data + 3))) 193 return true; 194 195 if (flt_event == HCI_EV_CMD_STATUS && 196 flt->opcode != get_unaligned((__le16 *)(skb->data + 4))) 197 return true; 198 199 return false; 200 } 201 202 /* Send frame to RAW socket */ 203 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) 204 { 205 struct sock *sk; 206 struct sk_buff *skb_copy = NULL; 207 208 BT_DBG("hdev %p len %d", hdev, skb->len); 209 210 read_lock(&hci_sk_list.lock); 211 212 sk_for_each(sk, &hci_sk_list.head) { 213 struct sk_buff *nskb; 214 215 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) 216 continue; 217 218 /* Don't send frame to the socket it came from */ 219 if (skb->sk == sk) 220 continue; 221 222 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { 223 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && 224 hci_skb_pkt_type(skb) != HCI_EVENT_PKT && 225 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && 226 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && 227 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) 228 continue; 229 if (is_filtered_packet(sk, skb)) 230 continue; 231 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { 232 if (!bt_cb(skb)->incoming) 233 continue; 234 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && 235 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && 236 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && 237 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) 238 continue; 239 } else { 240 /* Don't send frame to other channel types */ 241 continue; 242 } 243 244 if (!skb_copy) { 245 /* Create a private copy with headroom */ 246 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true); 247 if (!skb_copy) 248 continue; 249 250 /* Put type byte before the data */ 251 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1); 252 } 253 254 nskb = skb_clone(skb_copy, GFP_ATOMIC); 255 if (!nskb) 256 continue; 257 258 if (sock_queue_rcv_skb(sk, nskb)) 259 kfree_skb(nskb); 260 } 261 262 read_unlock(&hci_sk_list.lock); 263 264 kfree_skb(skb_copy); 265 } 266 267 /* Send frame to sockets with specific channel */ 268 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 269 int flag, struct sock *skip_sk) 270 { 271 struct sock *sk; 272 273 BT_DBG("channel %u len %d", channel, skb->len); 274 275 sk_for_each(sk, &hci_sk_list.head) { 276 struct sk_buff *nskb; 277 278 /* Ignore socket without the flag set */ 279 if (!hci_sock_test_flag(sk, flag)) 280 continue; 281 282 /* Skip the original socket */ 283 if (sk == skip_sk) 284 continue; 285 286 if (sk->sk_state != BT_BOUND) 287 continue; 288 289 if (hci_pi(sk)->channel != channel) 290 continue; 291 292 nskb = skb_clone(skb, GFP_ATOMIC); 293 if (!nskb) 294 continue; 295 296 if (sock_queue_rcv_skb(sk, nskb)) 297 kfree_skb(nskb); 298 } 299 300 } 301 302 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, 303 int flag, struct sock *skip_sk) 304 { 305 read_lock(&hci_sk_list.lock); 306 __hci_send_to_channel(channel, skb, flag, skip_sk); 307 read_unlock(&hci_sk_list.lock); 308 } 309 310 /* Send frame to monitor socket */ 311 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) 312 { 313 struct sk_buff *skb_copy = NULL; 314 struct hci_mon_hdr *hdr; 315 __le16 opcode; 316 317 if (!atomic_read(&monitor_promisc)) 318 return; 319 320 BT_DBG("hdev %p len %d", hdev, skb->len); 321 322 switch (hci_skb_pkt_type(skb)) { 323 case HCI_COMMAND_PKT: 324 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT); 325 break; 326 case HCI_EVENT_PKT: 327 opcode = cpu_to_le16(HCI_MON_EVENT_PKT); 328 break; 329 case HCI_ACLDATA_PKT: 330 if (bt_cb(skb)->incoming) 331 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT); 332 else 333 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT); 334 break; 335 case HCI_SCODATA_PKT: 336 if (bt_cb(skb)->incoming) 337 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT); 338 else 339 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT); 340 break; 341 case HCI_ISODATA_PKT: 342 if (bt_cb(skb)->incoming) 343 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT); 344 else 345 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT); 346 break; 347 case HCI_DIAG_PKT: 348 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); 349 break; 350 default: 351 return; 352 } 353 354 /* Create a private copy with headroom */ 355 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true); 356 if (!skb_copy) 357 return; 358 359 /* Put header before the data */ 360 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE); 361 hdr->opcode = opcode; 362 hdr->index = cpu_to_le16(hdev->id); 363 hdr->len = cpu_to_le16(skb->len); 364 365 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, 366 HCI_SOCK_TRUSTED, NULL); 367 kfree_skb(skb_copy); 368 } 369 370 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, 371 void *data, u16 data_len, ktime_t tstamp, 372 int flag, struct sock *skip_sk) 373 { 374 struct sock *sk; 375 __le16 index; 376 377 if (hdev) 378 index = cpu_to_le16(hdev->id); 379 else 380 index = cpu_to_le16(MGMT_INDEX_NONE); 381 382 read_lock(&hci_sk_list.lock); 383 384 sk_for_each(sk, &hci_sk_list.head) { 385 struct hci_mon_hdr *hdr; 386 struct sk_buff *skb; 387 388 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) 389 continue; 390 391 /* Ignore socket without the flag set */ 392 if (!hci_sock_test_flag(sk, flag)) 393 continue; 394 395 /* Skip the original socket */ 396 if (sk == skip_sk) 397 continue; 398 399 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC); 400 if (!skb) 401 continue; 402 403 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); 404 put_unaligned_le16(event, skb_put(skb, 2)); 405 406 if (data) 407 skb_put_data(skb, data, data_len); 408 409 skb->tstamp = tstamp; 410 411 hdr = skb_push(skb, HCI_MON_HDR_SIZE); 412 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); 413 hdr->index = index; 414 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 415 416 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 417 HCI_SOCK_TRUSTED, NULL); 418 kfree_skb(skb); 419 } 420 421 read_unlock(&hci_sk_list.lock); 422 } 423 424 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) 425 { 426 struct hci_mon_hdr *hdr; 427 struct hci_mon_new_index *ni; 428 struct hci_mon_index_info *ii; 429 struct sk_buff *skb; 430 __le16 opcode; 431 432 switch (event) { 433 case HCI_DEV_REG: 434 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC); 435 if (!skb) 436 return NULL; 437 438 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE); 439 ni->type = hdev->dev_type; 440 ni->bus = hdev->bus; 441 bacpy(&ni->bdaddr, &hdev->bdaddr); 442 memcpy(ni->name, hdev->name, 8); 443 444 opcode = cpu_to_le16(HCI_MON_NEW_INDEX); 445 break; 446 447 case HCI_DEV_UNREG: 448 skb = bt_skb_alloc(0, GFP_ATOMIC); 449 if (!skb) 450 return NULL; 451 452 opcode = cpu_to_le16(HCI_MON_DEL_INDEX); 453 break; 454 455 case HCI_DEV_SETUP: 456 if (hdev->manufacturer == 0xffff) 457 return NULL; 458 fallthrough; 459 460 case HCI_DEV_UP: 461 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); 462 if (!skb) 463 return NULL; 464 465 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE); 466 bacpy(&ii->bdaddr, &hdev->bdaddr); 467 ii->manufacturer = cpu_to_le16(hdev->manufacturer); 468 469 opcode = cpu_to_le16(HCI_MON_INDEX_INFO); 470 break; 471 472 case HCI_DEV_OPEN: 473 skb = bt_skb_alloc(0, GFP_ATOMIC); 474 if (!skb) 475 return NULL; 476 477 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX); 478 break; 479 480 case HCI_DEV_CLOSE: 481 skb = bt_skb_alloc(0, GFP_ATOMIC); 482 if (!skb) 483 return NULL; 484 485 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX); 486 break; 487 488 default: 489 return NULL; 490 } 491 492 __net_timestamp(skb); 493 494 hdr = skb_push(skb, HCI_MON_HDR_SIZE); 495 hdr->opcode = opcode; 496 hdr->index = cpu_to_le16(hdev->id); 497 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 498 499 return skb; 500 } 501 502 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk) 503 { 504 struct hci_mon_hdr *hdr; 505 struct sk_buff *skb; 506 u16 format; 507 u8 ver[3]; 508 u32 flags; 509 510 /* No message needed when cookie is not present */ 511 if (!hci_pi(sk)->cookie) 512 return NULL; 513 514 switch (hci_pi(sk)->channel) { 515 case HCI_CHANNEL_RAW: 516 format = 0x0000; 517 ver[0] = BT_SUBSYS_VERSION; 518 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); 519 break; 520 case HCI_CHANNEL_USER: 521 format = 0x0001; 522 ver[0] = BT_SUBSYS_VERSION; 523 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); 524 break; 525 case HCI_CHANNEL_CONTROL: 526 format = 0x0002; 527 mgmt_fill_version_info(ver); 528 break; 529 default: 530 /* No message for unsupported format */ 531 return NULL; 532 } 533 534 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC); 535 if (!skb) 536 return NULL; 537 538 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0; 539 540 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); 541 put_unaligned_le16(format, skb_put(skb, 2)); 542 skb_put_data(skb, ver, sizeof(ver)); 543 put_unaligned_le32(flags, skb_put(skb, 4)); 544 skb_put_u8(skb, TASK_COMM_LEN); 545 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN); 546 547 __net_timestamp(skb); 548 549 hdr = skb_push(skb, HCI_MON_HDR_SIZE); 550 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN); 551 if (hci_pi(sk)->hdev) 552 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); 553 else 554 hdr->index = cpu_to_le16(HCI_DEV_NONE); 555 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 556 557 return skb; 558 } 559 560 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk) 561 { 562 struct hci_mon_hdr *hdr; 563 struct sk_buff *skb; 564 565 /* No message needed when cookie is not present */ 566 if (!hci_pi(sk)->cookie) 567 return NULL; 568 569 switch (hci_pi(sk)->channel) { 570 case HCI_CHANNEL_RAW: 571 case HCI_CHANNEL_USER: 572 case HCI_CHANNEL_CONTROL: 573 break; 574 default: 575 /* No message for unsupported format */ 576 return NULL; 577 } 578 579 skb = bt_skb_alloc(4, GFP_ATOMIC); 580 if (!skb) 581 return NULL; 582 583 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); 584 585 __net_timestamp(skb); 586 587 hdr = skb_push(skb, HCI_MON_HDR_SIZE); 588 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE); 589 if (hci_pi(sk)->hdev) 590 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); 591 else 592 hdr->index = cpu_to_le16(HCI_DEV_NONE); 593 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 594 595 return skb; 596 } 597 598 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index, 599 u16 opcode, u16 len, 600 const void *buf) 601 { 602 struct hci_mon_hdr *hdr; 603 struct sk_buff *skb; 604 605 skb = bt_skb_alloc(6 + len, GFP_ATOMIC); 606 if (!skb) 607 return NULL; 608 609 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); 610 put_unaligned_le16(opcode, skb_put(skb, 2)); 611 612 if (buf) 613 skb_put_data(skb, buf, len); 614 615 __net_timestamp(skb); 616 617 hdr = skb_push(skb, HCI_MON_HDR_SIZE); 618 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND); 619 hdr->index = cpu_to_le16(index); 620 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 621 622 return skb; 623 } 624 625 static void __printf(2, 3) 626 send_monitor_note(struct sock *sk, const char *fmt, ...) 627 { 628 size_t len; 629 struct hci_mon_hdr *hdr; 630 struct sk_buff *skb; 631 va_list args; 632 633 va_start(args, fmt); 634 len = vsnprintf(NULL, 0, fmt, args); 635 va_end(args); 636 637 skb = bt_skb_alloc(len + 1, GFP_ATOMIC); 638 if (!skb) 639 return; 640 641 va_start(args, fmt); 642 vsprintf(skb_put(skb, len), fmt, args); 643 *(u8 *)skb_put(skb, 1) = 0; 644 va_end(args); 645 646 __net_timestamp(skb); 647 648 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE); 649 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE); 650 hdr->index = cpu_to_le16(HCI_DEV_NONE); 651 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); 652 653 if (sock_queue_rcv_skb(sk, skb)) 654 kfree_skb(skb); 655 } 656 657 static void send_monitor_replay(struct sock *sk) 658 { 659 struct hci_dev *hdev; 660 661 read_lock(&hci_dev_list_lock); 662 663 list_for_each_entry(hdev, &hci_dev_list, list) { 664 struct sk_buff *skb; 665 666 skb = create_monitor_event(hdev, HCI_DEV_REG); 667 if (!skb) 668 continue; 669 670 if (sock_queue_rcv_skb(sk, skb)) 671 kfree_skb(skb); 672 673 if (!test_bit(HCI_RUNNING, &hdev->flags)) 674 continue; 675 676 skb = create_monitor_event(hdev, HCI_DEV_OPEN); 677 if (!skb) 678 continue; 679 680 if (sock_queue_rcv_skb(sk, skb)) 681 kfree_skb(skb); 682 683 if (test_bit(HCI_UP, &hdev->flags)) 684 skb = create_monitor_event(hdev, HCI_DEV_UP); 685 else if (hci_dev_test_flag(hdev, HCI_SETUP)) 686 skb = create_monitor_event(hdev, HCI_DEV_SETUP); 687 else 688 skb = NULL; 689 690 if (skb) { 691 if (sock_queue_rcv_skb(sk, skb)) 692 kfree_skb(skb); 693 } 694 } 695 696 read_unlock(&hci_dev_list_lock); 697 } 698 699 static void send_monitor_control_replay(struct sock *mon_sk) 700 { 701 struct sock *sk; 702 703 read_lock(&hci_sk_list.lock); 704 705 sk_for_each(sk, &hci_sk_list.head) { 706 struct sk_buff *skb; 707 708 skb = create_monitor_ctrl_open(sk); 709 if (!skb) 710 continue; 711 712 if (sock_queue_rcv_skb(mon_sk, skb)) 713 kfree_skb(skb); 714 } 715 716 read_unlock(&hci_sk_list.lock); 717 } 718 719 /* Generate internal stack event */ 720 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) 721 { 722 struct hci_event_hdr *hdr; 723 struct hci_ev_stack_internal *ev; 724 struct sk_buff *skb; 725 726 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); 727 if (!skb) 728 return; 729 730 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE); 731 hdr->evt = HCI_EV_STACK_INTERNAL; 732 hdr->plen = sizeof(*ev) + dlen; 733 734 ev = skb_put(skb, sizeof(*ev) + dlen); 735 ev->type = type; 736 memcpy(ev->data, data, dlen); 737 738 bt_cb(skb)->incoming = 1; 739 __net_timestamp(skb); 740 741 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 742 hci_send_to_sock(hdev, skb); 743 kfree_skb(skb); 744 } 745 746 void hci_sock_dev_event(struct hci_dev *hdev, int event) 747 { 748 BT_DBG("hdev %s event %d", hdev->name, event); 749 750 if (atomic_read(&monitor_promisc)) { 751 struct sk_buff *skb; 752 753 /* Send event to monitor */ 754 skb = create_monitor_event(hdev, event); 755 if (skb) { 756 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 757 HCI_SOCK_TRUSTED, NULL); 758 kfree_skb(skb); 759 } 760 } 761 762 if (event <= HCI_DEV_DOWN) { 763 struct hci_ev_si_device ev; 764 765 /* Send event to sockets */ 766 ev.event = event; 767 ev.dev_id = hdev->id; 768 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); 769 } 770 771 if (event == HCI_DEV_UNREG) { 772 struct sock *sk; 773 774 /* Wake up sockets using this dead device */ 775 read_lock(&hci_sk_list.lock); 776 sk_for_each(sk, &hci_sk_list.head) { 777 if (hci_pi(sk)->hdev == hdev) { 778 sk->sk_err = EPIPE; 779 sk->sk_state_change(sk); 780 } 781 } 782 read_unlock(&hci_sk_list.lock); 783 } 784 } 785 786 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel) 787 { 788 struct hci_mgmt_chan *c; 789 790 list_for_each_entry(c, &mgmt_chan_list, list) { 791 if (c->channel == channel) 792 return c; 793 } 794 795 return NULL; 796 } 797 798 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel) 799 { 800 struct hci_mgmt_chan *c; 801 802 mutex_lock(&mgmt_chan_list_lock); 803 c = __hci_mgmt_chan_find(channel); 804 mutex_unlock(&mgmt_chan_list_lock); 805 806 return c; 807 } 808 809 int hci_mgmt_chan_register(struct hci_mgmt_chan *c) 810 { 811 if (c->channel < HCI_CHANNEL_CONTROL) 812 return -EINVAL; 813 814 mutex_lock(&mgmt_chan_list_lock); 815 if (__hci_mgmt_chan_find(c->channel)) { 816 mutex_unlock(&mgmt_chan_list_lock); 817 return -EALREADY; 818 } 819 820 list_add_tail(&c->list, &mgmt_chan_list); 821 822 mutex_unlock(&mgmt_chan_list_lock); 823 824 return 0; 825 } 826 EXPORT_SYMBOL(hci_mgmt_chan_register); 827 828 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c) 829 { 830 mutex_lock(&mgmt_chan_list_lock); 831 list_del(&c->list); 832 mutex_unlock(&mgmt_chan_list_lock); 833 } 834 EXPORT_SYMBOL(hci_mgmt_chan_unregister); 835 836 static int hci_sock_release(struct socket *sock) 837 { 838 struct sock *sk = sock->sk; 839 struct hci_dev *hdev; 840 struct sk_buff *skb; 841 842 BT_DBG("sock %p sk %p", sock, sk); 843 844 if (!sk) 845 return 0; 846 847 lock_sock(sk); 848 849 switch (hci_pi(sk)->channel) { 850 case HCI_CHANNEL_MONITOR: 851 atomic_dec(&monitor_promisc); 852 break; 853 case HCI_CHANNEL_RAW: 854 case HCI_CHANNEL_USER: 855 case HCI_CHANNEL_CONTROL: 856 /* Send event to monitor */ 857 skb = create_monitor_ctrl_close(sk); 858 if (skb) { 859 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 860 HCI_SOCK_TRUSTED, NULL); 861 kfree_skb(skb); 862 } 863 864 hci_sock_free_cookie(sk); 865 break; 866 } 867 868 bt_sock_unlink(&hci_sk_list, sk); 869 870 hdev = hci_pi(sk)->hdev; 871 if (hdev) { 872 if (hci_pi(sk)->channel == HCI_CHANNEL_USER && 873 !hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 874 /* When releasing a user channel exclusive access, 875 * call hci_dev_do_close directly instead of calling 876 * hci_dev_close to ensure the exclusive access will 877 * be released and the controller brought back down. 878 * 879 * The checking of HCI_AUTO_OFF is not needed in this 880 * case since it will have been cleared already when 881 * opening the user channel. 882 * 883 * Make sure to also check that we haven't already 884 * unregistered since all the cleanup will have already 885 * been complete and hdev will get released when we put 886 * below. 887 */ 888 hci_dev_do_close(hdev); 889 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); 890 hci_register_suspend_notifier(hdev); 891 mgmt_index_added(hdev); 892 } 893 894 atomic_dec(&hdev->promisc); 895 hci_dev_put(hdev); 896 } 897 898 sock_orphan(sk); 899 release_sock(sk); 900 sock_put(sk); 901 return 0; 902 } 903 904 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg) 905 { 906 bdaddr_t bdaddr; 907 int err; 908 909 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 910 return -EFAULT; 911 912 hci_dev_lock(hdev); 913 914 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR); 915 916 hci_dev_unlock(hdev); 917 918 return err; 919 } 920 921 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg) 922 { 923 bdaddr_t bdaddr; 924 int err; 925 926 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 927 return -EFAULT; 928 929 hci_dev_lock(hdev); 930 931 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR); 932 933 hci_dev_unlock(hdev); 934 935 return err; 936 } 937 938 /* Ioctls that require bound socket */ 939 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, 940 unsigned long arg) 941 { 942 struct hci_dev *hdev = hci_hdev_from_sock(sk); 943 944 if (IS_ERR(hdev)) 945 return PTR_ERR(hdev); 946 947 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 948 return -EBUSY; 949 950 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 951 return -EOPNOTSUPP; 952 953 if (hdev->dev_type != HCI_PRIMARY) 954 return -EOPNOTSUPP; 955 956 switch (cmd) { 957 case HCISETRAW: 958 if (!capable(CAP_NET_ADMIN)) 959 return -EPERM; 960 return -EOPNOTSUPP; 961 962 case HCIGETCONNINFO: 963 return hci_get_conn_info(hdev, (void __user *)arg); 964 965 case HCIGETAUTHINFO: 966 return hci_get_auth_info(hdev, (void __user *)arg); 967 968 case HCIBLOCKADDR: 969 if (!capable(CAP_NET_ADMIN)) 970 return -EPERM; 971 return hci_sock_reject_list_add(hdev, (void __user *)arg); 972 973 case HCIUNBLOCKADDR: 974 if (!capable(CAP_NET_ADMIN)) 975 return -EPERM; 976 return hci_sock_reject_list_del(hdev, (void __user *)arg); 977 } 978 979 return -ENOIOCTLCMD; 980 } 981 982 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, 983 unsigned long arg) 984 { 985 void __user *argp = (void __user *)arg; 986 struct sock *sk = sock->sk; 987 int err; 988 989 BT_DBG("cmd %x arg %lx", cmd, arg); 990 991 lock_sock(sk); 992 993 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { 994 err = -EBADFD; 995 goto done; 996 } 997 998 /* When calling an ioctl on an unbound raw socket, then ensure 999 * that the monitor gets informed. Ensure that the resulting event 1000 * is only send once by checking if the cookie exists or not. The 1001 * socket cookie will be only ever generated once for the lifetime 1002 * of a given socket. 1003 */ 1004 if (hci_sock_gen_cookie(sk)) { 1005 struct sk_buff *skb; 1006 1007 if (capable(CAP_NET_ADMIN)) 1008 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 1009 1010 /* Send event to monitor */ 1011 skb = create_monitor_ctrl_open(sk); 1012 if (skb) { 1013 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1014 HCI_SOCK_TRUSTED, NULL); 1015 kfree_skb(skb); 1016 } 1017 } 1018 1019 release_sock(sk); 1020 1021 switch (cmd) { 1022 case HCIGETDEVLIST: 1023 return hci_get_dev_list(argp); 1024 1025 case HCIGETDEVINFO: 1026 return hci_get_dev_info(argp); 1027 1028 case HCIGETCONNLIST: 1029 return hci_get_conn_list(argp); 1030 1031 case HCIDEVUP: 1032 if (!capable(CAP_NET_ADMIN)) 1033 return -EPERM; 1034 return hci_dev_open(arg); 1035 1036 case HCIDEVDOWN: 1037 if (!capable(CAP_NET_ADMIN)) 1038 return -EPERM; 1039 return hci_dev_close(arg); 1040 1041 case HCIDEVRESET: 1042 if (!capable(CAP_NET_ADMIN)) 1043 return -EPERM; 1044 return hci_dev_reset(arg); 1045 1046 case HCIDEVRESTAT: 1047 if (!capable(CAP_NET_ADMIN)) 1048 return -EPERM; 1049 return hci_dev_reset_stat(arg); 1050 1051 case HCISETSCAN: 1052 case HCISETAUTH: 1053 case HCISETENCRYPT: 1054 case HCISETPTYPE: 1055 case HCISETLINKPOL: 1056 case HCISETLINKMODE: 1057 case HCISETACLMTU: 1058 case HCISETSCOMTU: 1059 if (!capable(CAP_NET_ADMIN)) 1060 return -EPERM; 1061 return hci_dev_cmd(cmd, argp); 1062 1063 case HCIINQUIRY: 1064 return hci_inquiry(argp); 1065 } 1066 1067 lock_sock(sk); 1068 1069 err = hci_sock_bound_ioctl(sk, cmd, arg); 1070 1071 done: 1072 release_sock(sk); 1073 return err; 1074 } 1075 1076 #ifdef CONFIG_COMPAT 1077 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd, 1078 unsigned long arg) 1079 { 1080 switch (cmd) { 1081 case HCIDEVUP: 1082 case HCIDEVDOWN: 1083 case HCIDEVRESET: 1084 case HCIDEVRESTAT: 1085 return hci_sock_ioctl(sock, cmd, arg); 1086 } 1087 1088 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); 1089 } 1090 #endif 1091 1092 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, 1093 int addr_len) 1094 { 1095 struct sockaddr_hci haddr; 1096 struct sock *sk = sock->sk; 1097 struct hci_dev *hdev = NULL; 1098 struct sk_buff *skb; 1099 int len, err = 0; 1100 1101 BT_DBG("sock %p sk %p", sock, sk); 1102 1103 if (!addr) 1104 return -EINVAL; 1105 1106 memset(&haddr, 0, sizeof(haddr)); 1107 len = min_t(unsigned int, sizeof(haddr), addr_len); 1108 memcpy(&haddr, addr, len); 1109 1110 if (haddr.hci_family != AF_BLUETOOTH) 1111 return -EINVAL; 1112 1113 lock_sock(sk); 1114 1115 /* Allow detaching from dead device and attaching to alive device, if 1116 * the caller wants to re-bind (instead of close) this socket in 1117 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. 1118 */ 1119 hdev = hci_pi(sk)->hdev; 1120 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 1121 hci_pi(sk)->hdev = NULL; 1122 sk->sk_state = BT_OPEN; 1123 hci_dev_put(hdev); 1124 } 1125 hdev = NULL; 1126 1127 if (sk->sk_state == BT_BOUND) { 1128 err = -EALREADY; 1129 goto done; 1130 } 1131 1132 switch (haddr.hci_channel) { 1133 case HCI_CHANNEL_RAW: 1134 if (hci_pi(sk)->hdev) { 1135 err = -EALREADY; 1136 goto done; 1137 } 1138 1139 if (haddr.hci_dev != HCI_DEV_NONE) { 1140 hdev = hci_dev_get(haddr.hci_dev); 1141 if (!hdev) { 1142 err = -ENODEV; 1143 goto done; 1144 } 1145 1146 atomic_inc(&hdev->promisc); 1147 } 1148 1149 hci_pi(sk)->channel = haddr.hci_channel; 1150 1151 if (!hci_sock_gen_cookie(sk)) { 1152 /* In the case when a cookie has already been assigned, 1153 * then there has been already an ioctl issued against 1154 * an unbound socket and with that triggered an open 1155 * notification. Send a close notification first to 1156 * allow the state transition to bounded. 1157 */ 1158 skb = create_monitor_ctrl_close(sk); 1159 if (skb) { 1160 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1161 HCI_SOCK_TRUSTED, NULL); 1162 kfree_skb(skb); 1163 } 1164 } 1165 1166 if (capable(CAP_NET_ADMIN)) 1167 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 1168 1169 hci_pi(sk)->hdev = hdev; 1170 1171 /* Send event to monitor */ 1172 skb = create_monitor_ctrl_open(sk); 1173 if (skb) { 1174 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1175 HCI_SOCK_TRUSTED, NULL); 1176 kfree_skb(skb); 1177 } 1178 break; 1179 1180 case HCI_CHANNEL_USER: 1181 if (hci_pi(sk)->hdev) { 1182 err = -EALREADY; 1183 goto done; 1184 } 1185 1186 if (haddr.hci_dev == HCI_DEV_NONE) { 1187 err = -EINVAL; 1188 goto done; 1189 } 1190 1191 if (!capable(CAP_NET_ADMIN)) { 1192 err = -EPERM; 1193 goto done; 1194 } 1195 1196 hdev = hci_dev_get(haddr.hci_dev); 1197 if (!hdev) { 1198 err = -ENODEV; 1199 goto done; 1200 } 1201 1202 if (test_bit(HCI_INIT, &hdev->flags) || 1203 hci_dev_test_flag(hdev, HCI_SETUP) || 1204 hci_dev_test_flag(hdev, HCI_CONFIG) || 1205 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && 1206 test_bit(HCI_UP, &hdev->flags))) { 1207 err = -EBUSY; 1208 hci_dev_put(hdev); 1209 goto done; 1210 } 1211 1212 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) { 1213 err = -EUSERS; 1214 hci_dev_put(hdev); 1215 goto done; 1216 } 1217 1218 mgmt_index_removed(hdev); 1219 hci_unregister_suspend_notifier(hdev); 1220 1221 err = hci_dev_open(hdev->id); 1222 if (err) { 1223 if (err == -EALREADY) { 1224 /* In case the transport is already up and 1225 * running, clear the error here. 1226 * 1227 * This can happen when opening a user 1228 * channel and HCI_AUTO_OFF grace period 1229 * is still active. 1230 */ 1231 err = 0; 1232 } else { 1233 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); 1234 hci_register_suspend_notifier(hdev); 1235 mgmt_index_added(hdev); 1236 hci_dev_put(hdev); 1237 goto done; 1238 } 1239 } 1240 1241 hci_pi(sk)->channel = haddr.hci_channel; 1242 1243 if (!hci_sock_gen_cookie(sk)) { 1244 /* In the case when a cookie has already been assigned, 1245 * this socket will transition from a raw socket into 1246 * a user channel socket. For a clean transition, send 1247 * the close notification first. 1248 */ 1249 skb = create_monitor_ctrl_close(sk); 1250 if (skb) { 1251 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1252 HCI_SOCK_TRUSTED, NULL); 1253 kfree_skb(skb); 1254 } 1255 } 1256 1257 /* The user channel is restricted to CAP_NET_ADMIN 1258 * capabilities and with that implicitly trusted. 1259 */ 1260 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 1261 1262 hci_pi(sk)->hdev = hdev; 1263 1264 /* Send event to monitor */ 1265 skb = create_monitor_ctrl_open(sk); 1266 if (skb) { 1267 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1268 HCI_SOCK_TRUSTED, NULL); 1269 kfree_skb(skb); 1270 } 1271 1272 atomic_inc(&hdev->promisc); 1273 break; 1274 1275 case HCI_CHANNEL_MONITOR: 1276 if (haddr.hci_dev != HCI_DEV_NONE) { 1277 err = -EINVAL; 1278 goto done; 1279 } 1280 1281 if (!capable(CAP_NET_RAW)) { 1282 err = -EPERM; 1283 goto done; 1284 } 1285 1286 hci_pi(sk)->channel = haddr.hci_channel; 1287 1288 /* The monitor interface is restricted to CAP_NET_RAW 1289 * capabilities and with that implicitly trusted. 1290 */ 1291 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 1292 1293 send_monitor_note(sk, "Linux version %s (%s)", 1294 init_utsname()->release, 1295 init_utsname()->machine); 1296 send_monitor_note(sk, "Bluetooth subsystem version %u.%u", 1297 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION); 1298 send_monitor_replay(sk); 1299 send_monitor_control_replay(sk); 1300 1301 atomic_inc(&monitor_promisc); 1302 break; 1303 1304 case HCI_CHANNEL_LOGGING: 1305 if (haddr.hci_dev != HCI_DEV_NONE) { 1306 err = -EINVAL; 1307 goto done; 1308 } 1309 1310 if (!capable(CAP_NET_ADMIN)) { 1311 err = -EPERM; 1312 goto done; 1313 } 1314 1315 hci_pi(sk)->channel = haddr.hci_channel; 1316 break; 1317 1318 default: 1319 if (!hci_mgmt_chan_find(haddr.hci_channel)) { 1320 err = -EINVAL; 1321 goto done; 1322 } 1323 1324 if (haddr.hci_dev != HCI_DEV_NONE) { 1325 err = -EINVAL; 1326 goto done; 1327 } 1328 1329 /* Users with CAP_NET_ADMIN capabilities are allowed 1330 * access to all management commands and events. For 1331 * untrusted users the interface is restricted and 1332 * also only untrusted events are sent. 1333 */ 1334 if (capable(CAP_NET_ADMIN)) 1335 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); 1336 1337 hci_pi(sk)->channel = haddr.hci_channel; 1338 1339 /* At the moment the index and unconfigured index events 1340 * are enabled unconditionally. Setting them on each 1341 * socket when binding keeps this functionality. They 1342 * however might be cleared later and then sending of these 1343 * events will be disabled, but that is then intentional. 1344 * 1345 * This also enables generic events that are safe to be 1346 * received by untrusted users. Example for such events 1347 * are changes to settings, class of device, name etc. 1348 */ 1349 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) { 1350 if (!hci_sock_gen_cookie(sk)) { 1351 /* In the case when a cookie has already been 1352 * assigned, this socket will transition from 1353 * a raw socket into a control socket. To 1354 * allow for a clean transition, send the 1355 * close notification first. 1356 */ 1357 skb = create_monitor_ctrl_close(sk); 1358 if (skb) { 1359 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1360 HCI_SOCK_TRUSTED, NULL); 1361 kfree_skb(skb); 1362 } 1363 } 1364 1365 /* Send event to monitor */ 1366 skb = create_monitor_ctrl_open(sk); 1367 if (skb) { 1368 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, 1369 HCI_SOCK_TRUSTED, NULL); 1370 kfree_skb(skb); 1371 } 1372 1373 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS); 1374 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); 1375 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS); 1376 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS); 1377 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS); 1378 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS); 1379 } 1380 break; 1381 } 1382 1383 /* Default MTU to HCI_MAX_FRAME_SIZE if not set */ 1384 if (!hci_pi(sk)->mtu) 1385 hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE; 1386 1387 sk->sk_state = BT_BOUND; 1388 1389 done: 1390 release_sock(sk); 1391 return err; 1392 } 1393 1394 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, 1395 int peer) 1396 { 1397 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr; 1398 struct sock *sk = sock->sk; 1399 struct hci_dev *hdev; 1400 int err = 0; 1401 1402 BT_DBG("sock %p sk %p", sock, sk); 1403 1404 if (peer) 1405 return -EOPNOTSUPP; 1406 1407 lock_sock(sk); 1408 1409 hdev = hci_hdev_from_sock(sk); 1410 if (IS_ERR(hdev)) { 1411 err = PTR_ERR(hdev); 1412 goto done; 1413 } 1414 1415 haddr->hci_family = AF_BLUETOOTH; 1416 haddr->hci_dev = hdev->id; 1417 haddr->hci_channel= hci_pi(sk)->channel; 1418 err = sizeof(*haddr); 1419 1420 done: 1421 release_sock(sk); 1422 return err; 1423 } 1424 1425 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, 1426 struct sk_buff *skb) 1427 { 1428 __u8 mask = hci_pi(sk)->cmsg_mask; 1429 1430 if (mask & HCI_CMSG_DIR) { 1431 int incoming = bt_cb(skb)->incoming; 1432 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), 1433 &incoming); 1434 } 1435 1436 if (mask & HCI_CMSG_TSTAMP) { 1437 #ifdef CONFIG_COMPAT 1438 struct old_timeval32 ctv; 1439 #endif 1440 struct __kernel_old_timeval tv; 1441 void *data; 1442 int len; 1443 1444 skb_get_timestamp(skb, &tv); 1445 1446 data = &tv; 1447 len = sizeof(tv); 1448 #ifdef CONFIG_COMPAT 1449 if (!COMPAT_USE_64BIT_TIME && 1450 (msg->msg_flags & MSG_CMSG_COMPAT)) { 1451 ctv.tv_sec = tv.tv_sec; 1452 ctv.tv_usec = tv.tv_usec; 1453 data = &ctv; 1454 len = sizeof(ctv); 1455 } 1456 #endif 1457 1458 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); 1459 } 1460 } 1461 1462 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, 1463 size_t len, int flags) 1464 { 1465 struct sock *sk = sock->sk; 1466 struct sk_buff *skb; 1467 int copied, err; 1468 unsigned int skblen; 1469 1470 BT_DBG("sock %p, sk %p", sock, sk); 1471 1472 if (flags & MSG_OOB) 1473 return -EOPNOTSUPP; 1474 1475 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING) 1476 return -EOPNOTSUPP; 1477 1478 if (sk->sk_state == BT_CLOSED) 1479 return 0; 1480 1481 skb = skb_recv_datagram(sk, flags, &err); 1482 if (!skb) 1483 return err; 1484 1485 skblen = skb->len; 1486 copied = skb->len; 1487 if (len < copied) { 1488 msg->msg_flags |= MSG_TRUNC; 1489 copied = len; 1490 } 1491 1492 skb_reset_transport_header(skb); 1493 err = skb_copy_datagram_msg(skb, 0, msg, copied); 1494 1495 switch (hci_pi(sk)->channel) { 1496 case HCI_CHANNEL_RAW: 1497 hci_sock_cmsg(sk, msg, skb); 1498 break; 1499 case HCI_CHANNEL_USER: 1500 case HCI_CHANNEL_MONITOR: 1501 sock_recv_timestamp(msg, sk, skb); 1502 break; 1503 default: 1504 if (hci_mgmt_chan_find(hci_pi(sk)->channel)) 1505 sock_recv_timestamp(msg, sk, skb); 1506 break; 1507 } 1508 1509 skb_free_datagram(sk, skb); 1510 1511 if (flags & MSG_TRUNC) 1512 copied = skblen; 1513 1514 return err ? : copied; 1515 } 1516 1517 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, 1518 struct sk_buff *skb) 1519 { 1520 u8 *cp; 1521 struct mgmt_hdr *hdr; 1522 u16 opcode, index, len; 1523 struct hci_dev *hdev = NULL; 1524 const struct hci_mgmt_handler *handler; 1525 bool var_len, no_hdev; 1526 int err; 1527 1528 BT_DBG("got %d bytes", skb->len); 1529 1530 if (skb->len < sizeof(*hdr)) 1531 return -EINVAL; 1532 1533 hdr = (void *)skb->data; 1534 opcode = __le16_to_cpu(hdr->opcode); 1535 index = __le16_to_cpu(hdr->index); 1536 len = __le16_to_cpu(hdr->len); 1537 1538 if (len != skb->len - sizeof(*hdr)) { 1539 err = -EINVAL; 1540 goto done; 1541 } 1542 1543 if (chan->channel == HCI_CHANNEL_CONTROL) { 1544 struct sk_buff *cmd; 1545 1546 /* Send event to monitor */ 1547 cmd = create_monitor_ctrl_command(sk, index, opcode, len, 1548 skb->data + sizeof(*hdr)); 1549 if (cmd) { 1550 hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd, 1551 HCI_SOCK_TRUSTED, NULL); 1552 kfree_skb(cmd); 1553 } 1554 } 1555 1556 if (opcode >= chan->handler_count || 1557 chan->handlers[opcode].func == NULL) { 1558 BT_DBG("Unknown op %u", opcode); 1559 err = mgmt_cmd_status(sk, index, opcode, 1560 MGMT_STATUS_UNKNOWN_COMMAND); 1561 goto done; 1562 } 1563 1564 handler = &chan->handlers[opcode]; 1565 1566 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) && 1567 !(handler->flags & HCI_MGMT_UNTRUSTED)) { 1568 err = mgmt_cmd_status(sk, index, opcode, 1569 MGMT_STATUS_PERMISSION_DENIED); 1570 goto done; 1571 } 1572 1573 if (index != MGMT_INDEX_NONE) { 1574 hdev = hci_dev_get(index); 1575 if (!hdev) { 1576 err = mgmt_cmd_status(sk, index, opcode, 1577 MGMT_STATUS_INVALID_INDEX); 1578 goto done; 1579 } 1580 1581 if (hci_dev_test_flag(hdev, HCI_SETUP) || 1582 hci_dev_test_flag(hdev, HCI_CONFIG) || 1583 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1584 err = mgmt_cmd_status(sk, index, opcode, 1585 MGMT_STATUS_INVALID_INDEX); 1586 goto done; 1587 } 1588 1589 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1590 !(handler->flags & HCI_MGMT_UNCONFIGURED)) { 1591 err = mgmt_cmd_status(sk, index, opcode, 1592 MGMT_STATUS_INVALID_INDEX); 1593 goto done; 1594 } 1595 } 1596 1597 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) { 1598 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV); 1599 if (no_hdev != !hdev) { 1600 err = mgmt_cmd_status(sk, index, opcode, 1601 MGMT_STATUS_INVALID_INDEX); 1602 goto done; 1603 } 1604 } 1605 1606 var_len = (handler->flags & HCI_MGMT_VAR_LEN); 1607 if ((var_len && len < handler->data_len) || 1608 (!var_len && len != handler->data_len)) { 1609 err = mgmt_cmd_status(sk, index, opcode, 1610 MGMT_STATUS_INVALID_PARAMS); 1611 goto done; 1612 } 1613 1614 if (hdev && chan->hdev_init) 1615 chan->hdev_init(sk, hdev); 1616 1617 cp = skb->data + sizeof(*hdr); 1618 1619 err = handler->func(sk, hdev, cp, len); 1620 if (err < 0) 1621 goto done; 1622 1623 err = skb->len; 1624 1625 done: 1626 if (hdev) 1627 hci_dev_put(hdev); 1628 1629 return err; 1630 } 1631 1632 static int hci_logging_frame(struct sock *sk, struct sk_buff *skb, 1633 unsigned int flags) 1634 { 1635 struct hci_mon_hdr *hdr; 1636 struct hci_dev *hdev; 1637 u16 index; 1638 int err; 1639 1640 /* The logging frame consists at minimum of the standard header, 1641 * the priority byte, the ident length byte and at least one string 1642 * terminator NUL byte. Anything shorter are invalid packets. 1643 */ 1644 if (skb->len < sizeof(*hdr) + 3) 1645 return -EINVAL; 1646 1647 hdr = (void *)skb->data; 1648 1649 if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr)) 1650 return -EINVAL; 1651 1652 if (__le16_to_cpu(hdr->opcode) == 0x0000) { 1653 __u8 priority = skb->data[sizeof(*hdr)]; 1654 __u8 ident_len = skb->data[sizeof(*hdr) + 1]; 1655 1656 /* Only the priorities 0-7 are valid and with that any other 1657 * value results in an invalid packet. 1658 * 1659 * The priority byte is followed by an ident length byte and 1660 * the NUL terminated ident string. Check that the ident 1661 * length is not overflowing the packet and also that the 1662 * ident string itself is NUL terminated. In case the ident 1663 * length is zero, the length value actually doubles as NUL 1664 * terminator identifier. 1665 * 1666 * The message follows the ident string (if present) and 1667 * must be NUL terminated. Otherwise it is not a valid packet. 1668 */ 1669 if (priority > 7 || skb->data[skb->len - 1] != 0x00 || 1670 ident_len > skb->len - sizeof(*hdr) - 3 || 1671 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) 1672 return -EINVAL; 1673 } else { 1674 return -EINVAL; 1675 } 1676 1677 index = __le16_to_cpu(hdr->index); 1678 1679 if (index != MGMT_INDEX_NONE) { 1680 hdev = hci_dev_get(index); 1681 if (!hdev) 1682 return -ENODEV; 1683 } else { 1684 hdev = NULL; 1685 } 1686 1687 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING); 1688 1689 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); 1690 err = skb->len; 1691 1692 if (hdev) 1693 hci_dev_put(hdev); 1694 1695 return err; 1696 } 1697 1698 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, 1699 size_t len) 1700 { 1701 struct sock *sk = sock->sk; 1702 struct hci_mgmt_chan *chan; 1703 struct hci_dev *hdev; 1704 struct sk_buff *skb; 1705 int err; 1706 const unsigned int flags = msg->msg_flags; 1707 1708 BT_DBG("sock %p sk %p", sock, sk); 1709 1710 if (flags & MSG_OOB) 1711 return -EOPNOTSUPP; 1712 1713 if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT)) 1714 return -EINVAL; 1715 1716 if (len < 4 || len > hci_pi(sk)->mtu) 1717 return -EINVAL; 1718 1719 skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); 1720 if (IS_ERR(skb)) 1721 return PTR_ERR(skb); 1722 1723 lock_sock(sk); 1724 1725 switch (hci_pi(sk)->channel) { 1726 case HCI_CHANNEL_RAW: 1727 case HCI_CHANNEL_USER: 1728 break; 1729 case HCI_CHANNEL_MONITOR: 1730 err = -EOPNOTSUPP; 1731 goto drop; 1732 case HCI_CHANNEL_LOGGING: 1733 err = hci_logging_frame(sk, skb, flags); 1734 goto drop; 1735 default: 1736 mutex_lock(&mgmt_chan_list_lock); 1737 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); 1738 if (chan) 1739 err = hci_mgmt_cmd(chan, sk, skb); 1740 else 1741 err = -EINVAL; 1742 1743 mutex_unlock(&mgmt_chan_list_lock); 1744 goto drop; 1745 } 1746 1747 hdev = hci_hdev_from_sock(sk); 1748 if (IS_ERR(hdev)) { 1749 err = PTR_ERR(hdev); 1750 goto drop; 1751 } 1752 1753 if (!test_bit(HCI_UP, &hdev->flags)) { 1754 err = -ENETDOWN; 1755 goto drop; 1756 } 1757 1758 hci_skb_pkt_type(skb) = skb->data[0]; 1759 skb_pull(skb, 1); 1760 1761 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { 1762 /* No permission check is needed for user channel 1763 * since that gets enforced when binding the socket. 1764 * 1765 * However check that the packet type is valid. 1766 */ 1767 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && 1768 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && 1769 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && 1770 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { 1771 err = -EINVAL; 1772 goto drop; 1773 } 1774 1775 skb_queue_tail(&hdev->raw_q, skb); 1776 queue_work(hdev->workqueue, &hdev->tx_work); 1777 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) { 1778 u16 opcode = get_unaligned_le16(skb->data); 1779 u16 ogf = hci_opcode_ogf(opcode); 1780 u16 ocf = hci_opcode_ocf(opcode); 1781 1782 if (((ogf > HCI_SFLT_MAX_OGF) || 1783 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, 1784 &hci_sec_filter.ocf_mask[ogf])) && 1785 !capable(CAP_NET_RAW)) { 1786 err = -EPERM; 1787 goto drop; 1788 } 1789 1790 /* Since the opcode has already been extracted here, store 1791 * a copy of the value for later use by the drivers. 1792 */ 1793 hci_skb_opcode(skb) = opcode; 1794 1795 if (ogf == 0x3f) { 1796 skb_queue_tail(&hdev->raw_q, skb); 1797 queue_work(hdev->workqueue, &hdev->tx_work); 1798 } else { 1799 /* Stand-alone HCI commands must be flagged as 1800 * single-command requests. 1801 */ 1802 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 1803 1804 skb_queue_tail(&hdev->cmd_q, skb); 1805 queue_work(hdev->workqueue, &hdev->cmd_work); 1806 } 1807 } else { 1808 if (!capable(CAP_NET_RAW)) { 1809 err = -EPERM; 1810 goto drop; 1811 } 1812 1813 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && 1814 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && 1815 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { 1816 err = -EINVAL; 1817 goto drop; 1818 } 1819 1820 skb_queue_tail(&hdev->raw_q, skb); 1821 queue_work(hdev->workqueue, &hdev->tx_work); 1822 } 1823 1824 err = len; 1825 1826 done: 1827 release_sock(sk); 1828 return err; 1829 1830 drop: 1831 kfree_skb(skb); 1832 goto done; 1833 } 1834 1835 static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, 1836 sockptr_t optval, unsigned int len) 1837 { 1838 struct hci_ufilter uf = { .opcode = 0 }; 1839 struct sock *sk = sock->sk; 1840 int err = 0, opt = 0; 1841 1842 BT_DBG("sk %p, opt %d", sk, optname); 1843 1844 lock_sock(sk); 1845 1846 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { 1847 err = -EBADFD; 1848 goto done; 1849 } 1850 1851 switch (optname) { 1852 case HCI_DATA_DIR: 1853 if (copy_from_sockptr(&opt, optval, sizeof(opt))) { 1854 err = -EFAULT; 1855 break; 1856 } 1857 1858 if (opt) 1859 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; 1860 else 1861 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; 1862 break; 1863 1864 case HCI_TIME_STAMP: 1865 if (copy_from_sockptr(&opt, optval, sizeof(opt))) { 1866 err = -EFAULT; 1867 break; 1868 } 1869 1870 if (opt) 1871 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; 1872 else 1873 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; 1874 break; 1875 1876 case HCI_FILTER: 1877 { 1878 struct hci_filter *f = &hci_pi(sk)->filter; 1879 1880 uf.type_mask = f->type_mask; 1881 uf.opcode = f->opcode; 1882 uf.event_mask[0] = *((u32 *) f->event_mask + 0); 1883 uf.event_mask[1] = *((u32 *) f->event_mask + 1); 1884 } 1885 1886 len = min_t(unsigned int, len, sizeof(uf)); 1887 if (copy_from_sockptr(&uf, optval, len)) { 1888 err = -EFAULT; 1889 break; 1890 } 1891 1892 if (!capable(CAP_NET_RAW)) { 1893 uf.type_mask &= hci_sec_filter.type_mask; 1894 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); 1895 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); 1896 } 1897 1898 { 1899 struct hci_filter *f = &hci_pi(sk)->filter; 1900 1901 f->type_mask = uf.type_mask; 1902 f->opcode = uf.opcode; 1903 *((u32 *) f->event_mask + 0) = uf.event_mask[0]; 1904 *((u32 *) f->event_mask + 1) = uf.event_mask[1]; 1905 } 1906 break; 1907 1908 default: 1909 err = -ENOPROTOOPT; 1910 break; 1911 } 1912 1913 done: 1914 release_sock(sk); 1915 return err; 1916 } 1917 1918 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, 1919 sockptr_t optval, unsigned int len) 1920 { 1921 struct sock *sk = sock->sk; 1922 int err = 0; 1923 u16 opt; 1924 1925 BT_DBG("sk %p, opt %d", sk, optname); 1926 1927 if (level == SOL_HCI) 1928 return hci_sock_setsockopt_old(sock, level, optname, optval, 1929 len); 1930 1931 if (level != SOL_BLUETOOTH) 1932 return -ENOPROTOOPT; 1933 1934 lock_sock(sk); 1935 1936 switch (optname) { 1937 case BT_SNDMTU: 1938 case BT_RCVMTU: 1939 switch (hci_pi(sk)->channel) { 1940 /* Don't allow changing MTU for channels that are meant for HCI 1941 * traffic only. 1942 */ 1943 case HCI_CHANNEL_RAW: 1944 case HCI_CHANNEL_USER: 1945 err = -ENOPROTOOPT; 1946 goto done; 1947 } 1948 1949 if (copy_from_sockptr(&opt, optval, sizeof(opt))) { 1950 err = -EFAULT; 1951 break; 1952 } 1953 1954 hci_pi(sk)->mtu = opt; 1955 break; 1956 1957 default: 1958 err = -ENOPROTOOPT; 1959 break; 1960 } 1961 1962 done: 1963 release_sock(sk); 1964 return err; 1965 } 1966 1967 static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname, 1968 char __user *optval, int __user *optlen) 1969 { 1970 struct hci_ufilter uf; 1971 struct sock *sk = sock->sk; 1972 int len, opt, err = 0; 1973 1974 BT_DBG("sk %p, opt %d", sk, optname); 1975 1976 if (get_user(len, optlen)) 1977 return -EFAULT; 1978 1979 lock_sock(sk); 1980 1981 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { 1982 err = -EBADFD; 1983 goto done; 1984 } 1985 1986 switch (optname) { 1987 case HCI_DATA_DIR: 1988 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) 1989 opt = 1; 1990 else 1991 opt = 0; 1992 1993 if (put_user(opt, optval)) 1994 err = -EFAULT; 1995 break; 1996 1997 case HCI_TIME_STAMP: 1998 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) 1999 opt = 1; 2000 else 2001 opt = 0; 2002 2003 if (put_user(opt, optval)) 2004 err = -EFAULT; 2005 break; 2006 2007 case HCI_FILTER: 2008 { 2009 struct hci_filter *f = &hci_pi(sk)->filter; 2010 2011 memset(&uf, 0, sizeof(uf)); 2012 uf.type_mask = f->type_mask; 2013 uf.opcode = f->opcode; 2014 uf.event_mask[0] = *((u32 *) f->event_mask + 0); 2015 uf.event_mask[1] = *((u32 *) f->event_mask + 1); 2016 } 2017 2018 len = min_t(unsigned int, len, sizeof(uf)); 2019 if (copy_to_user(optval, &uf, len)) 2020 err = -EFAULT; 2021 break; 2022 2023 default: 2024 err = -ENOPROTOOPT; 2025 break; 2026 } 2027 2028 done: 2029 release_sock(sk); 2030 return err; 2031 } 2032 2033 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, 2034 char __user *optval, int __user *optlen) 2035 { 2036 struct sock *sk = sock->sk; 2037 int err = 0; 2038 2039 BT_DBG("sk %p, opt %d", sk, optname); 2040 2041 if (level == SOL_HCI) 2042 return hci_sock_getsockopt_old(sock, level, optname, optval, 2043 optlen); 2044 2045 if (level != SOL_BLUETOOTH) 2046 return -ENOPROTOOPT; 2047 2048 lock_sock(sk); 2049 2050 switch (optname) { 2051 case BT_SNDMTU: 2052 case BT_RCVMTU: 2053 if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval)) 2054 err = -EFAULT; 2055 break; 2056 2057 default: 2058 err = -ENOPROTOOPT; 2059 break; 2060 } 2061 2062 release_sock(sk); 2063 return err; 2064 } 2065 2066 static void hci_sock_destruct(struct sock *sk) 2067 { 2068 skb_queue_purge(&sk->sk_receive_queue); 2069 skb_queue_purge(&sk->sk_write_queue); 2070 } 2071 2072 static const struct proto_ops hci_sock_ops = { 2073 .family = PF_BLUETOOTH, 2074 .owner = THIS_MODULE, 2075 .release = hci_sock_release, 2076 .bind = hci_sock_bind, 2077 .getname = hci_sock_getname, 2078 .sendmsg = hci_sock_sendmsg, 2079 .recvmsg = hci_sock_recvmsg, 2080 .ioctl = hci_sock_ioctl, 2081 #ifdef CONFIG_COMPAT 2082 .compat_ioctl = hci_sock_compat_ioctl, 2083 #endif 2084 .poll = datagram_poll, 2085 .listen = sock_no_listen, 2086 .shutdown = sock_no_shutdown, 2087 .setsockopt = hci_sock_setsockopt, 2088 .getsockopt = hci_sock_getsockopt, 2089 .connect = sock_no_connect, 2090 .socketpair = sock_no_socketpair, 2091 .accept = sock_no_accept, 2092 .mmap = sock_no_mmap 2093 }; 2094 2095 static struct proto hci_sk_proto = { 2096 .name = "HCI", 2097 .owner = THIS_MODULE, 2098 .obj_size = sizeof(struct hci_pinfo) 2099 }; 2100 2101 static int hci_sock_create(struct net *net, struct socket *sock, int protocol, 2102 int kern) 2103 { 2104 struct sock *sk; 2105 2106 BT_DBG("sock %p", sock); 2107 2108 if (sock->type != SOCK_RAW) 2109 return -ESOCKTNOSUPPORT; 2110 2111 sock->ops = &hci_sock_ops; 2112 2113 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern); 2114 if (!sk) 2115 return -ENOMEM; 2116 2117 sock_init_data(sock, sk); 2118 2119 sock_reset_flag(sk, SOCK_ZAPPED); 2120 2121 sk->sk_protocol = protocol; 2122 2123 sock->state = SS_UNCONNECTED; 2124 sk->sk_state = BT_OPEN; 2125 sk->sk_destruct = hci_sock_destruct; 2126 2127 bt_sock_link(&hci_sk_list, sk); 2128 return 0; 2129 } 2130 2131 static const struct net_proto_family hci_sock_family_ops = { 2132 .family = PF_BLUETOOTH, 2133 .owner = THIS_MODULE, 2134 .create = hci_sock_create, 2135 }; 2136 2137 int __init hci_sock_init(void) 2138 { 2139 int err; 2140 2141 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr)); 2142 2143 err = proto_register(&hci_sk_proto, 0); 2144 if (err < 0) 2145 return err; 2146 2147 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); 2148 if (err < 0) { 2149 BT_ERR("HCI socket registration failed"); 2150 goto error; 2151 } 2152 2153 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL); 2154 if (err < 0) { 2155 BT_ERR("Failed to create HCI proc file"); 2156 bt_sock_unregister(BTPROTO_HCI); 2157 goto error; 2158 } 2159 2160 BT_INFO("HCI socket layer initialized"); 2161 2162 return 0; 2163 2164 error: 2165 proto_unregister(&hci_sk_proto); 2166 return err; 2167 } 2168 2169 void hci_sock_cleanup(void) 2170 { 2171 bt_procfs_cleanup(&init_net, "hci"); 2172 bt_sock_unregister(BTPROTO_HCI); 2173 proto_unregister(&hci_sk_proto); 2174 } 2175