1 /* 2 * raw.c - Raw sockets for protocol family CAN 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/init.h> 44 #include <linux/uio.h> 45 #include <linux/net.h> 46 #include <linux/slab.h> 47 #include <linux/netdevice.h> 48 #include <linux/socket.h> 49 #include <linux/if_arp.h> 50 #include <linux/skbuff.h> 51 #include <linux/can.h> 52 #include <linux/can/core.h> 53 #include <linux/can/skb.h> 54 #include <linux/can/raw.h> 55 #include <net/sock.h> 56 #include <net/net_namespace.h> 57 58 #define CAN_RAW_VERSION CAN_VERSION 59 60 MODULE_DESCRIPTION("PF_CAN raw protocol"); 61 MODULE_LICENSE("Dual BSD/GPL"); 62 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); 63 MODULE_ALIAS("can-proto-1"); 64 65 #define MASK_ALL 0 66 67 /* 68 * A raw socket has a list of can_filters attached to it, each receiving 69 * the CAN frames matching that filter. If the filter list is empty, 70 * no CAN frames will be received by the socket. The default after 71 * opening the socket, is to have one filter which receives all frames. 72 * The filter list is allocated dynamically with the exception of the 73 * list containing only one item. This common case is optimized by 74 * storing the single filter in dfilter, to avoid using dynamic memory. 75 */ 76 77 struct uniqframe { 78 int skbcnt; 79 const struct sk_buff *skb; 80 unsigned int join_rx_count; 81 }; 82 83 struct raw_sock { 84 struct sock sk; 85 int bound; 86 int ifindex; 87 struct notifier_block notifier; 88 int loopback; 89 int recv_own_msgs; 90 int fd_frames; 91 int join_filters; 92 int count; /* number of active filters */ 93 struct can_filter dfilter; /* default/single filter */ 94 struct can_filter *filter; /* pointer to filter(s) */ 95 can_err_mask_t err_mask; 96 struct uniqframe __percpu *uniq; 97 }; 98 99 /* 100 * Return pointer to store the extra msg flags for raw_recvmsg(). 101 * We use the space of one unsigned int beyond the 'struct sockaddr_can' 102 * in skb->cb. 103 */ 104 static inline unsigned int *raw_flags(struct sk_buff *skb) 105 { 106 sock_skb_cb_check_size(sizeof(struct sockaddr_can) + 107 sizeof(unsigned int)); 108 109 /* return pointer after struct sockaddr_can */ 110 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]); 111 } 112 113 static inline struct raw_sock *raw_sk(const struct sock *sk) 114 { 115 return (struct raw_sock *)sk; 116 } 117 118 static void raw_rcv(struct sk_buff *oskb, void *data) 119 { 120 struct sock *sk = (struct sock *)data; 121 struct raw_sock *ro = raw_sk(sk); 122 struct sockaddr_can *addr; 123 struct sk_buff *skb; 124 unsigned int *pflags; 125 126 /* check the received tx sock reference */ 127 if (!ro->recv_own_msgs && oskb->sk == sk) 128 return; 129 130 /* do not pass non-CAN2.0 frames to a legacy socket */ 131 if (!ro->fd_frames && oskb->len != CAN_MTU) 132 return; 133 134 /* eliminate multiple filter matches for the same skb */ 135 if (this_cpu_ptr(ro->uniq)->skb == oskb && 136 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) { 137 if (ro->join_filters) { 138 this_cpu_inc(ro->uniq->join_rx_count); 139 /* drop frame until all enabled filters matched */ 140 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count) 141 return; 142 } else { 143 return; 144 } 145 } else { 146 this_cpu_ptr(ro->uniq)->skb = oskb; 147 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt; 148 this_cpu_ptr(ro->uniq)->join_rx_count = 1; 149 /* drop first frame to check all enabled filters? */ 150 if (ro->join_filters && ro->count > 1) 151 return; 152 } 153 154 /* clone the given skb to be able to enqueue it into the rcv queue */ 155 skb = skb_clone(oskb, GFP_ATOMIC); 156 if (!skb) 157 return; 158 159 /* 160 * Put the datagram to the queue so that raw_recvmsg() can 161 * get it from there. We need to pass the interface index to 162 * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 163 * containing the interface index. 164 */ 165 166 sock_skb_cb_check_size(sizeof(struct sockaddr_can)); 167 addr = (struct sockaddr_can *)skb->cb; 168 memset(addr, 0, sizeof(*addr)); 169 addr->can_family = AF_CAN; 170 addr->can_ifindex = skb->dev->ifindex; 171 172 /* add CAN specific message flags for raw_recvmsg() */ 173 pflags = raw_flags(skb); 174 *pflags = 0; 175 if (oskb->sk) 176 *pflags |= MSG_DONTROUTE; 177 if (oskb->sk == sk) 178 *pflags |= MSG_CONFIRM; 179 180 if (sock_queue_rcv_skb(sk, skb) < 0) 181 kfree_skb(skb); 182 } 183 184 static int raw_enable_filters(struct net_device *dev, struct sock *sk, 185 struct can_filter *filter, int count) 186 { 187 int err = 0; 188 int i; 189 190 for (i = 0; i < count; i++) { 191 err = can_rx_register(dev, filter[i].can_id, 192 filter[i].can_mask, 193 raw_rcv, sk, "raw", sk); 194 if (err) { 195 /* clean up successfully registered filters */ 196 while (--i >= 0) 197 can_rx_unregister(dev, filter[i].can_id, 198 filter[i].can_mask, 199 raw_rcv, sk); 200 break; 201 } 202 } 203 204 return err; 205 } 206 207 static int raw_enable_errfilter(struct net_device *dev, struct sock *sk, 208 can_err_mask_t err_mask) 209 { 210 int err = 0; 211 212 if (err_mask) 213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, 214 raw_rcv, sk, "raw", sk); 215 216 return err; 217 } 218 219 static void raw_disable_filters(struct net_device *dev, struct sock *sk, 220 struct can_filter *filter, int count) 221 { 222 int i; 223 224 for (i = 0; i < count; i++) 225 can_rx_unregister(dev, filter[i].can_id, filter[i].can_mask, 226 raw_rcv, sk); 227 } 228 229 static inline void raw_disable_errfilter(struct net_device *dev, 230 struct sock *sk, 231 can_err_mask_t err_mask) 232 233 { 234 if (err_mask) 235 can_rx_unregister(dev, 0, err_mask | CAN_ERR_FLAG, 236 raw_rcv, sk); 237 } 238 239 static inline void raw_disable_allfilters(struct net_device *dev, 240 struct sock *sk) 241 { 242 struct raw_sock *ro = raw_sk(sk); 243 244 raw_disable_filters(dev, sk, ro->filter, ro->count); 245 raw_disable_errfilter(dev, sk, ro->err_mask); 246 } 247 248 static int raw_enable_allfilters(struct net_device *dev, struct sock *sk) 249 { 250 struct raw_sock *ro = raw_sk(sk); 251 int err; 252 253 err = raw_enable_filters(dev, sk, ro->filter, ro->count); 254 if (!err) { 255 err = raw_enable_errfilter(dev, sk, ro->err_mask); 256 if (err) 257 raw_disable_filters(dev, sk, ro->filter, ro->count); 258 } 259 260 return err; 261 } 262 263 static int raw_notifier(struct notifier_block *nb, 264 unsigned long msg, void *ptr) 265 { 266 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 267 struct raw_sock *ro = container_of(nb, struct raw_sock, notifier); 268 struct sock *sk = &ro->sk; 269 270 if (!net_eq(dev_net(dev), &init_net)) 271 return NOTIFY_DONE; 272 273 if (dev->type != ARPHRD_CAN) 274 return NOTIFY_DONE; 275 276 if (ro->ifindex != dev->ifindex) 277 return NOTIFY_DONE; 278 279 switch (msg) { 280 281 case NETDEV_UNREGISTER: 282 lock_sock(sk); 283 /* remove current filters & unregister */ 284 if (ro->bound) 285 raw_disable_allfilters(dev, sk); 286 287 if (ro->count > 1) 288 kfree(ro->filter); 289 290 ro->ifindex = 0; 291 ro->bound = 0; 292 ro->count = 0; 293 release_sock(sk); 294 295 sk->sk_err = ENODEV; 296 if (!sock_flag(sk, SOCK_DEAD)) 297 sk->sk_error_report(sk); 298 break; 299 300 case NETDEV_DOWN: 301 sk->sk_err = ENETDOWN; 302 if (!sock_flag(sk, SOCK_DEAD)) 303 sk->sk_error_report(sk); 304 break; 305 } 306 307 return NOTIFY_DONE; 308 } 309 310 static int raw_init(struct sock *sk) 311 { 312 struct raw_sock *ro = raw_sk(sk); 313 314 ro->bound = 0; 315 ro->ifindex = 0; 316 317 /* set default filter to single entry dfilter */ 318 ro->dfilter.can_id = 0; 319 ro->dfilter.can_mask = MASK_ALL; 320 ro->filter = &ro->dfilter; 321 ro->count = 1; 322 323 /* set default loopback behaviour */ 324 ro->loopback = 1; 325 ro->recv_own_msgs = 0; 326 ro->fd_frames = 0; 327 ro->join_filters = 0; 328 329 /* alloc_percpu provides zero'ed memory */ 330 ro->uniq = alloc_percpu(struct uniqframe); 331 if (unlikely(!ro->uniq)) 332 return -ENOMEM; 333 334 /* set notifier */ 335 ro->notifier.notifier_call = raw_notifier; 336 337 register_netdevice_notifier(&ro->notifier); 338 339 return 0; 340 } 341 342 static int raw_release(struct socket *sock) 343 { 344 struct sock *sk = sock->sk; 345 struct raw_sock *ro; 346 347 if (!sk) 348 return 0; 349 350 ro = raw_sk(sk); 351 352 unregister_netdevice_notifier(&ro->notifier); 353 354 lock_sock(sk); 355 356 /* remove current filters & unregister */ 357 if (ro->bound) { 358 if (ro->ifindex) { 359 struct net_device *dev; 360 361 dev = dev_get_by_index(&init_net, ro->ifindex); 362 if (dev) { 363 raw_disable_allfilters(dev, sk); 364 dev_put(dev); 365 } 366 } else 367 raw_disable_allfilters(NULL, sk); 368 } 369 370 if (ro->count > 1) 371 kfree(ro->filter); 372 373 ro->ifindex = 0; 374 ro->bound = 0; 375 ro->count = 0; 376 free_percpu(ro->uniq); 377 378 sock_orphan(sk); 379 sock->sk = NULL; 380 381 release_sock(sk); 382 sock_put(sk); 383 384 return 0; 385 } 386 387 static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) 388 { 389 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 390 struct sock *sk = sock->sk; 391 struct raw_sock *ro = raw_sk(sk); 392 int ifindex; 393 int err = 0; 394 int notify_enetdown = 0; 395 396 if (len < sizeof(*addr)) 397 return -EINVAL; 398 399 lock_sock(sk); 400 401 if (ro->bound && addr->can_ifindex == ro->ifindex) 402 goto out; 403 404 if (addr->can_ifindex) { 405 struct net_device *dev; 406 407 dev = dev_get_by_index(&init_net, addr->can_ifindex); 408 if (!dev) { 409 err = -ENODEV; 410 goto out; 411 } 412 if (dev->type != ARPHRD_CAN) { 413 dev_put(dev); 414 err = -ENODEV; 415 goto out; 416 } 417 if (!(dev->flags & IFF_UP)) 418 notify_enetdown = 1; 419 420 ifindex = dev->ifindex; 421 422 /* filters set by default/setsockopt */ 423 err = raw_enable_allfilters(dev, sk); 424 dev_put(dev); 425 } else { 426 ifindex = 0; 427 428 /* filters set by default/setsockopt */ 429 err = raw_enable_allfilters(NULL, sk); 430 } 431 432 if (!err) { 433 if (ro->bound) { 434 /* unregister old filters */ 435 if (ro->ifindex) { 436 struct net_device *dev; 437 438 dev = dev_get_by_index(&init_net, ro->ifindex); 439 if (dev) { 440 raw_disable_allfilters(dev, sk); 441 dev_put(dev); 442 } 443 } else 444 raw_disable_allfilters(NULL, sk); 445 } 446 ro->ifindex = ifindex; 447 ro->bound = 1; 448 } 449 450 out: 451 release_sock(sk); 452 453 if (notify_enetdown) { 454 sk->sk_err = ENETDOWN; 455 if (!sock_flag(sk, SOCK_DEAD)) 456 sk->sk_error_report(sk); 457 } 458 459 return err; 460 } 461 462 static int raw_getname(struct socket *sock, struct sockaddr *uaddr, 463 int *len, int peer) 464 { 465 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 466 struct sock *sk = sock->sk; 467 struct raw_sock *ro = raw_sk(sk); 468 469 if (peer) 470 return -EOPNOTSUPP; 471 472 memset(addr, 0, sizeof(*addr)); 473 addr->can_family = AF_CAN; 474 addr->can_ifindex = ro->ifindex; 475 476 *len = sizeof(*addr); 477 478 return 0; 479 } 480 481 static int raw_setsockopt(struct socket *sock, int level, int optname, 482 char __user *optval, unsigned int optlen) 483 { 484 struct sock *sk = sock->sk; 485 struct raw_sock *ro = raw_sk(sk); 486 struct can_filter *filter = NULL; /* dyn. alloc'ed filters */ 487 struct can_filter sfilter; /* single filter */ 488 struct net_device *dev = NULL; 489 can_err_mask_t err_mask = 0; 490 int count = 0; 491 int err = 0; 492 493 if (level != SOL_CAN_RAW) 494 return -EINVAL; 495 496 switch (optname) { 497 498 case CAN_RAW_FILTER: 499 if (optlen % sizeof(struct can_filter) != 0) 500 return -EINVAL; 501 502 if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter)) 503 return -EINVAL; 504 505 count = optlen / sizeof(struct can_filter); 506 507 if (count > 1) { 508 /* filter does not fit into dfilter => alloc space */ 509 filter = memdup_user(optval, optlen); 510 if (IS_ERR(filter)) 511 return PTR_ERR(filter); 512 } else if (count == 1) { 513 if (copy_from_user(&sfilter, optval, sizeof(sfilter))) 514 return -EFAULT; 515 } 516 517 lock_sock(sk); 518 519 if (ro->bound && ro->ifindex) 520 dev = dev_get_by_index(&init_net, ro->ifindex); 521 522 if (ro->bound) { 523 /* (try to) register the new filters */ 524 if (count == 1) 525 err = raw_enable_filters(dev, sk, &sfilter, 1); 526 else 527 err = raw_enable_filters(dev, sk, filter, 528 count); 529 if (err) { 530 if (count > 1) 531 kfree(filter); 532 goto out_fil; 533 } 534 535 /* remove old filter registrations */ 536 raw_disable_filters(dev, sk, ro->filter, ro->count); 537 } 538 539 /* remove old filter space */ 540 if (ro->count > 1) 541 kfree(ro->filter); 542 543 /* link new filters to the socket */ 544 if (count == 1) { 545 /* copy filter data for single filter */ 546 ro->dfilter = sfilter; 547 filter = &ro->dfilter; 548 } 549 ro->filter = filter; 550 ro->count = count; 551 552 out_fil: 553 if (dev) 554 dev_put(dev); 555 556 release_sock(sk); 557 558 break; 559 560 case CAN_RAW_ERR_FILTER: 561 if (optlen != sizeof(err_mask)) 562 return -EINVAL; 563 564 if (copy_from_user(&err_mask, optval, optlen)) 565 return -EFAULT; 566 567 err_mask &= CAN_ERR_MASK; 568 569 lock_sock(sk); 570 571 if (ro->bound && ro->ifindex) 572 dev = dev_get_by_index(&init_net, ro->ifindex); 573 574 /* remove current error mask */ 575 if (ro->bound) { 576 /* (try to) register the new err_mask */ 577 err = raw_enable_errfilter(dev, sk, err_mask); 578 579 if (err) 580 goto out_err; 581 582 /* remove old err_mask registration */ 583 raw_disable_errfilter(dev, sk, ro->err_mask); 584 } 585 586 /* link new err_mask to the socket */ 587 ro->err_mask = err_mask; 588 589 out_err: 590 if (dev) 591 dev_put(dev); 592 593 release_sock(sk); 594 595 break; 596 597 case CAN_RAW_LOOPBACK: 598 if (optlen != sizeof(ro->loopback)) 599 return -EINVAL; 600 601 if (copy_from_user(&ro->loopback, optval, optlen)) 602 return -EFAULT; 603 604 break; 605 606 case CAN_RAW_RECV_OWN_MSGS: 607 if (optlen != sizeof(ro->recv_own_msgs)) 608 return -EINVAL; 609 610 if (copy_from_user(&ro->recv_own_msgs, optval, optlen)) 611 return -EFAULT; 612 613 break; 614 615 case CAN_RAW_FD_FRAMES: 616 if (optlen != sizeof(ro->fd_frames)) 617 return -EINVAL; 618 619 if (copy_from_user(&ro->fd_frames, optval, optlen)) 620 return -EFAULT; 621 622 break; 623 624 case CAN_RAW_JOIN_FILTERS: 625 if (optlen != sizeof(ro->join_filters)) 626 return -EINVAL; 627 628 if (copy_from_user(&ro->join_filters, optval, optlen)) 629 return -EFAULT; 630 631 break; 632 633 default: 634 return -ENOPROTOOPT; 635 } 636 return err; 637 } 638 639 static int raw_getsockopt(struct socket *sock, int level, int optname, 640 char __user *optval, int __user *optlen) 641 { 642 struct sock *sk = sock->sk; 643 struct raw_sock *ro = raw_sk(sk); 644 int len; 645 void *val; 646 int err = 0; 647 648 if (level != SOL_CAN_RAW) 649 return -EINVAL; 650 if (get_user(len, optlen)) 651 return -EFAULT; 652 if (len < 0) 653 return -EINVAL; 654 655 switch (optname) { 656 657 case CAN_RAW_FILTER: 658 lock_sock(sk); 659 if (ro->count > 0) { 660 int fsize = ro->count * sizeof(struct can_filter); 661 if (len > fsize) 662 len = fsize; 663 if (copy_to_user(optval, ro->filter, len)) 664 err = -EFAULT; 665 } else 666 len = 0; 667 release_sock(sk); 668 669 if (!err) 670 err = put_user(len, optlen); 671 return err; 672 673 case CAN_RAW_ERR_FILTER: 674 if (len > sizeof(can_err_mask_t)) 675 len = sizeof(can_err_mask_t); 676 val = &ro->err_mask; 677 break; 678 679 case CAN_RAW_LOOPBACK: 680 if (len > sizeof(int)) 681 len = sizeof(int); 682 val = &ro->loopback; 683 break; 684 685 case CAN_RAW_RECV_OWN_MSGS: 686 if (len > sizeof(int)) 687 len = sizeof(int); 688 val = &ro->recv_own_msgs; 689 break; 690 691 case CAN_RAW_FD_FRAMES: 692 if (len > sizeof(int)) 693 len = sizeof(int); 694 val = &ro->fd_frames; 695 break; 696 697 case CAN_RAW_JOIN_FILTERS: 698 if (len > sizeof(int)) 699 len = sizeof(int); 700 val = &ro->join_filters; 701 break; 702 703 default: 704 return -ENOPROTOOPT; 705 } 706 707 if (put_user(len, optlen)) 708 return -EFAULT; 709 if (copy_to_user(optval, val, len)) 710 return -EFAULT; 711 return 0; 712 } 713 714 static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 715 { 716 struct sock *sk = sock->sk; 717 struct raw_sock *ro = raw_sk(sk); 718 struct sk_buff *skb; 719 struct net_device *dev; 720 int ifindex; 721 int err; 722 723 if (msg->msg_name) { 724 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 725 726 if (msg->msg_namelen < sizeof(*addr)) 727 return -EINVAL; 728 729 if (addr->can_family != AF_CAN) 730 return -EINVAL; 731 732 ifindex = addr->can_ifindex; 733 } else 734 ifindex = ro->ifindex; 735 736 if (ro->fd_frames) { 737 if (unlikely(size != CANFD_MTU && size != CAN_MTU)) 738 return -EINVAL; 739 } else { 740 if (unlikely(size != CAN_MTU)) 741 return -EINVAL; 742 } 743 744 dev = dev_get_by_index(&init_net, ifindex); 745 if (!dev) 746 return -ENXIO; 747 748 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), 749 msg->msg_flags & MSG_DONTWAIT, &err); 750 if (!skb) 751 goto put_dev; 752 753 can_skb_reserve(skb); 754 can_skb_prv(skb)->ifindex = dev->ifindex; 755 can_skb_prv(skb)->skbcnt = 0; 756 757 err = memcpy_from_msg(skb_put(skb, size), msg, size); 758 if (err < 0) 759 goto free_skb; 760 761 sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags); 762 763 skb->dev = dev; 764 skb->sk = sk; 765 skb->priority = sk->sk_priority; 766 767 err = can_send(skb, ro->loopback); 768 769 dev_put(dev); 770 771 if (err) 772 goto send_failed; 773 774 return size; 775 776 free_skb: 777 kfree_skb(skb); 778 put_dev: 779 dev_put(dev); 780 send_failed: 781 return err; 782 } 783 784 static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 785 int flags) 786 { 787 struct sock *sk = sock->sk; 788 struct sk_buff *skb; 789 int err = 0; 790 int noblock; 791 792 noblock = flags & MSG_DONTWAIT; 793 flags &= ~MSG_DONTWAIT; 794 795 skb = skb_recv_datagram(sk, flags, noblock, &err); 796 if (!skb) 797 return err; 798 799 if (size < skb->len) 800 msg->msg_flags |= MSG_TRUNC; 801 else 802 size = skb->len; 803 804 err = memcpy_to_msg(msg, skb->data, size); 805 if (err < 0) { 806 skb_free_datagram(sk, skb); 807 return err; 808 } 809 810 sock_recv_ts_and_drops(msg, sk, skb); 811 812 if (msg->msg_name) { 813 __sockaddr_check_size(sizeof(struct sockaddr_can)); 814 msg->msg_namelen = sizeof(struct sockaddr_can); 815 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 816 } 817 818 /* assign the flags that have been recorded in raw_rcv() */ 819 msg->msg_flags |= *(raw_flags(skb)); 820 821 skb_free_datagram(sk, skb); 822 823 return size; 824 } 825 826 static const struct proto_ops raw_ops = { 827 .family = PF_CAN, 828 .release = raw_release, 829 .bind = raw_bind, 830 .connect = sock_no_connect, 831 .socketpair = sock_no_socketpair, 832 .accept = sock_no_accept, 833 .getname = raw_getname, 834 .poll = datagram_poll, 835 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ 836 .listen = sock_no_listen, 837 .shutdown = sock_no_shutdown, 838 .setsockopt = raw_setsockopt, 839 .getsockopt = raw_getsockopt, 840 .sendmsg = raw_sendmsg, 841 .recvmsg = raw_recvmsg, 842 .mmap = sock_no_mmap, 843 .sendpage = sock_no_sendpage, 844 }; 845 846 static struct proto raw_proto __read_mostly = { 847 .name = "CAN_RAW", 848 .owner = THIS_MODULE, 849 .obj_size = sizeof(struct raw_sock), 850 .init = raw_init, 851 }; 852 853 static const struct can_proto raw_can_proto = { 854 .type = SOCK_RAW, 855 .protocol = CAN_RAW, 856 .ops = &raw_ops, 857 .prot = &raw_proto, 858 }; 859 860 static __init int raw_module_init(void) 861 { 862 int err; 863 864 pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n"); 865 866 err = can_proto_register(&raw_can_proto); 867 if (err < 0) 868 printk(KERN_ERR "can: registration of raw protocol failed\n"); 869 870 return err; 871 } 872 873 static __exit void raw_module_exit(void) 874 { 875 can_proto_unregister(&raw_can_proto); 876 } 877 878 module_init(raw_module_init); 879 module_exit(raw_module_exit); 880