1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* raw.c - Raw sockets for protocol family CAN 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/init.h> 44 #include <linux/uio.h> 45 #include <linux/net.h> 46 #include <linux/slab.h> 47 #include <linux/netdevice.h> 48 #include <linux/socket.h> 49 #include <linux/if_arp.h> 50 #include <linux/skbuff.h> 51 #include <linux/can.h> 52 #include <linux/can/core.h> 53 #include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */ 54 #include <linux/can/skb.h> 55 #include <linux/can/raw.h> 56 #include <net/sock.h> 57 #include <net/net_namespace.h> 58 59 MODULE_DESCRIPTION("PF_CAN raw protocol"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); 62 MODULE_ALIAS("can-proto-1"); 63 64 #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) 65 66 #define MASK_ALL 0 67 68 /* A raw socket has a list of can_filters attached to it, each receiving 69 * the CAN frames matching that filter. If the filter list is empty, 70 * no CAN frames will be received by the socket. The default after 71 * opening the socket, is to have one filter which receives all frames. 72 * The filter list is allocated dynamically with the exception of the 73 * list containing only one item. This common case is optimized by 74 * storing the single filter in dfilter, to avoid using dynamic memory. 75 */ 76 77 struct uniqframe { 78 int skbcnt; 79 const struct sk_buff *skb; 80 unsigned int join_rx_count; 81 }; 82 83 struct raw_sock { 84 struct sock sk; 85 int bound; 86 int ifindex; 87 struct net_device *dev; 88 struct list_head notifier; 89 int loopback; 90 int recv_own_msgs; 91 int fd_frames; 92 int xl_frames; 93 int join_filters; 94 int count; /* number of active filters */ 95 struct can_filter dfilter; /* default/single filter */ 96 struct can_filter *filter; /* pointer to filter(s) */ 97 can_err_mask_t err_mask; 98 struct uniqframe __percpu *uniq; 99 }; 100 101 static LIST_HEAD(raw_notifier_list); 102 static DEFINE_SPINLOCK(raw_notifier_lock); 103 static struct raw_sock *raw_busy_notifier; 104 105 /* Return pointer to store the extra msg flags for raw_recvmsg(). 106 * We use the space of one unsigned int beyond the 'struct sockaddr_can' 107 * in skb->cb. 108 */ 109 static inline unsigned int *raw_flags(struct sk_buff *skb) 110 { 111 sock_skb_cb_check_size(sizeof(struct sockaddr_can) + 112 sizeof(unsigned int)); 113 114 /* return pointer after struct sockaddr_can */ 115 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]); 116 } 117 118 static inline struct raw_sock *raw_sk(const struct sock *sk) 119 { 120 return (struct raw_sock *)sk; 121 } 122 123 static void raw_rcv(struct sk_buff *oskb, void *data) 124 { 125 struct sock *sk = (struct sock *)data; 126 struct raw_sock *ro = raw_sk(sk); 127 struct sockaddr_can *addr; 128 struct sk_buff *skb; 129 unsigned int *pflags; 130 131 /* check the received tx sock reference */ 132 if (!ro->recv_own_msgs && oskb->sk == sk) 133 return; 134 135 /* make sure to not pass oversized frames to the socket */ 136 if ((!ro->fd_frames && can_is_canfd_skb(oskb)) || 137 (!ro->xl_frames && can_is_canxl_skb(oskb))) 138 return; 139 140 /* eliminate multiple filter matches for the same skb */ 141 if (this_cpu_ptr(ro->uniq)->skb == oskb && 142 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) { 143 if (!ro->join_filters) 144 return; 145 146 this_cpu_inc(ro->uniq->join_rx_count); 147 /* drop frame until all enabled filters matched */ 148 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count) 149 return; 150 } else { 151 this_cpu_ptr(ro->uniq)->skb = oskb; 152 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt; 153 this_cpu_ptr(ro->uniq)->join_rx_count = 1; 154 /* drop first frame to check all enabled filters? */ 155 if (ro->join_filters && ro->count > 1) 156 return; 157 } 158 159 /* clone the given skb to be able to enqueue it into the rcv queue */ 160 skb = skb_clone(oskb, GFP_ATOMIC); 161 if (!skb) 162 return; 163 164 /* Put the datagram to the queue so that raw_recvmsg() can get 165 * it from there. We need to pass the interface index to 166 * raw_recvmsg(). We pass a whole struct sockaddr_can in 167 * skb->cb containing the interface index. 168 */ 169 170 sock_skb_cb_check_size(sizeof(struct sockaddr_can)); 171 addr = (struct sockaddr_can *)skb->cb; 172 memset(addr, 0, sizeof(*addr)); 173 addr->can_family = AF_CAN; 174 addr->can_ifindex = skb->dev->ifindex; 175 176 /* add CAN specific message flags for raw_recvmsg() */ 177 pflags = raw_flags(skb); 178 *pflags = 0; 179 if (oskb->sk) 180 *pflags |= MSG_DONTROUTE; 181 if (oskb->sk == sk) 182 *pflags |= MSG_CONFIRM; 183 184 if (sock_queue_rcv_skb(sk, skb) < 0) 185 kfree_skb(skb); 186 } 187 188 static int raw_enable_filters(struct net *net, struct net_device *dev, 189 struct sock *sk, struct can_filter *filter, 190 int count) 191 { 192 int err = 0; 193 int i; 194 195 for (i = 0; i < count; i++) { 196 err = can_rx_register(net, dev, filter[i].can_id, 197 filter[i].can_mask, 198 raw_rcv, sk, "raw", sk); 199 if (err) { 200 /* clean up successfully registered filters */ 201 while (--i >= 0) 202 can_rx_unregister(net, dev, filter[i].can_id, 203 filter[i].can_mask, 204 raw_rcv, sk); 205 break; 206 } 207 } 208 209 return err; 210 } 211 212 static int raw_enable_errfilter(struct net *net, struct net_device *dev, 213 struct sock *sk, can_err_mask_t err_mask) 214 { 215 int err = 0; 216 217 if (err_mask) 218 err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG, 219 raw_rcv, sk, "raw", sk); 220 221 return err; 222 } 223 224 static void raw_disable_filters(struct net *net, struct net_device *dev, 225 struct sock *sk, struct can_filter *filter, 226 int count) 227 { 228 int i; 229 230 for (i = 0; i < count; i++) 231 can_rx_unregister(net, dev, filter[i].can_id, 232 filter[i].can_mask, raw_rcv, sk); 233 } 234 235 static inline void raw_disable_errfilter(struct net *net, 236 struct net_device *dev, 237 struct sock *sk, 238 can_err_mask_t err_mask) 239 240 { 241 if (err_mask) 242 can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG, 243 raw_rcv, sk); 244 } 245 246 static inline void raw_disable_allfilters(struct net *net, 247 struct net_device *dev, 248 struct sock *sk) 249 { 250 struct raw_sock *ro = raw_sk(sk); 251 252 raw_disable_filters(net, dev, sk, ro->filter, ro->count); 253 raw_disable_errfilter(net, dev, sk, ro->err_mask); 254 } 255 256 static int raw_enable_allfilters(struct net *net, struct net_device *dev, 257 struct sock *sk) 258 { 259 struct raw_sock *ro = raw_sk(sk); 260 int err; 261 262 err = raw_enable_filters(net, dev, sk, ro->filter, ro->count); 263 if (!err) { 264 err = raw_enable_errfilter(net, dev, sk, ro->err_mask); 265 if (err) 266 raw_disable_filters(net, dev, sk, ro->filter, 267 ro->count); 268 } 269 270 return err; 271 } 272 273 static void raw_notify(struct raw_sock *ro, unsigned long msg, 274 struct net_device *dev) 275 { 276 struct sock *sk = &ro->sk; 277 278 if (!net_eq(dev_net(dev), sock_net(sk))) 279 return; 280 281 if (ro->dev != dev) 282 return; 283 284 switch (msg) { 285 case NETDEV_UNREGISTER: 286 lock_sock(sk); 287 /* remove current filters & unregister */ 288 if (ro->bound) 289 raw_disable_allfilters(dev_net(dev), dev, sk); 290 291 if (ro->count > 1) 292 kfree(ro->filter); 293 294 ro->ifindex = 0; 295 ro->bound = 0; 296 ro->dev = NULL; 297 ro->count = 0; 298 release_sock(sk); 299 300 sk->sk_err = ENODEV; 301 if (!sock_flag(sk, SOCK_DEAD)) 302 sk_error_report(sk); 303 break; 304 305 case NETDEV_DOWN: 306 sk->sk_err = ENETDOWN; 307 if (!sock_flag(sk, SOCK_DEAD)) 308 sk_error_report(sk); 309 break; 310 } 311 } 312 313 static int raw_notifier(struct notifier_block *nb, unsigned long msg, 314 void *ptr) 315 { 316 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 317 318 if (dev->type != ARPHRD_CAN) 319 return NOTIFY_DONE; 320 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) 321 return NOTIFY_DONE; 322 if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */ 323 return NOTIFY_DONE; 324 325 spin_lock(&raw_notifier_lock); 326 list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) { 327 spin_unlock(&raw_notifier_lock); 328 raw_notify(raw_busy_notifier, msg, dev); 329 spin_lock(&raw_notifier_lock); 330 } 331 raw_busy_notifier = NULL; 332 spin_unlock(&raw_notifier_lock); 333 return NOTIFY_DONE; 334 } 335 336 static int raw_init(struct sock *sk) 337 { 338 struct raw_sock *ro = raw_sk(sk); 339 340 ro->bound = 0; 341 ro->ifindex = 0; 342 ro->dev = NULL; 343 344 /* set default filter to single entry dfilter */ 345 ro->dfilter.can_id = 0; 346 ro->dfilter.can_mask = MASK_ALL; 347 ro->filter = &ro->dfilter; 348 ro->count = 1; 349 350 /* set default loopback behaviour */ 351 ro->loopback = 1; 352 ro->recv_own_msgs = 0; 353 ro->fd_frames = 0; 354 ro->xl_frames = 0; 355 ro->join_filters = 0; 356 357 /* alloc_percpu provides zero'ed memory */ 358 ro->uniq = alloc_percpu(struct uniqframe); 359 if (unlikely(!ro->uniq)) 360 return -ENOMEM; 361 362 /* set notifier */ 363 spin_lock(&raw_notifier_lock); 364 list_add_tail(&ro->notifier, &raw_notifier_list); 365 spin_unlock(&raw_notifier_lock); 366 367 return 0; 368 } 369 370 static int raw_release(struct socket *sock) 371 { 372 struct sock *sk = sock->sk; 373 struct raw_sock *ro; 374 375 if (!sk) 376 return 0; 377 378 ro = raw_sk(sk); 379 380 spin_lock(&raw_notifier_lock); 381 while (raw_busy_notifier == ro) { 382 spin_unlock(&raw_notifier_lock); 383 schedule_timeout_uninterruptible(1); 384 spin_lock(&raw_notifier_lock); 385 } 386 list_del(&ro->notifier); 387 spin_unlock(&raw_notifier_lock); 388 389 lock_sock(sk); 390 391 rtnl_lock(); 392 /* remove current filters & unregister */ 393 if (ro->bound) { 394 if (ro->dev) 395 raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); 396 else 397 raw_disable_allfilters(sock_net(sk), NULL, sk); 398 } 399 400 if (ro->count > 1) 401 kfree(ro->filter); 402 403 ro->ifindex = 0; 404 ro->bound = 0; 405 ro->dev = NULL; 406 ro->count = 0; 407 free_percpu(ro->uniq); 408 rtnl_unlock(); 409 410 sock_orphan(sk); 411 sock->sk = NULL; 412 413 release_sock(sk); 414 sock_put(sk); 415 416 return 0; 417 } 418 419 static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) 420 { 421 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 422 struct sock *sk = sock->sk; 423 struct raw_sock *ro = raw_sk(sk); 424 struct net_device *dev = NULL; 425 int ifindex; 426 int err = 0; 427 int notify_enetdown = 0; 428 429 if (len < RAW_MIN_NAMELEN) 430 return -EINVAL; 431 if (addr->can_family != AF_CAN) 432 return -EINVAL; 433 434 rtnl_lock(); 435 lock_sock(sk); 436 437 if (ro->bound && addr->can_ifindex == ro->ifindex) 438 goto out; 439 440 if (addr->can_ifindex) { 441 dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); 442 if (!dev) { 443 err = -ENODEV; 444 goto out; 445 } 446 if (dev->type != ARPHRD_CAN) { 447 dev_put(dev); 448 err = -ENODEV; 449 goto out; 450 } 451 if (!(dev->flags & IFF_UP)) 452 notify_enetdown = 1; 453 454 ifindex = dev->ifindex; 455 456 /* filters set by default/setsockopt */ 457 err = raw_enable_allfilters(sock_net(sk), dev, sk); 458 dev_put(dev); 459 } else { 460 ifindex = 0; 461 462 /* filters set by default/setsockopt */ 463 err = raw_enable_allfilters(sock_net(sk), NULL, sk); 464 } 465 466 if (!err) { 467 if (ro->bound) { 468 /* unregister old filters */ 469 if (ro->dev) 470 raw_disable_allfilters(dev_net(ro->dev), 471 ro->dev, sk); 472 else 473 raw_disable_allfilters(sock_net(sk), NULL, sk); 474 } 475 ro->ifindex = ifindex; 476 ro->bound = 1; 477 ro->dev = dev; 478 } 479 480 out: 481 release_sock(sk); 482 rtnl_unlock(); 483 484 if (notify_enetdown) { 485 sk->sk_err = ENETDOWN; 486 if (!sock_flag(sk, SOCK_DEAD)) 487 sk_error_report(sk); 488 } 489 490 return err; 491 } 492 493 static int raw_getname(struct socket *sock, struct sockaddr *uaddr, 494 int peer) 495 { 496 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 497 struct sock *sk = sock->sk; 498 struct raw_sock *ro = raw_sk(sk); 499 500 if (peer) 501 return -EOPNOTSUPP; 502 503 memset(addr, 0, RAW_MIN_NAMELEN); 504 addr->can_family = AF_CAN; 505 addr->can_ifindex = ro->ifindex; 506 507 return RAW_MIN_NAMELEN; 508 } 509 510 static int raw_setsockopt(struct socket *sock, int level, int optname, 511 sockptr_t optval, unsigned int optlen) 512 { 513 struct sock *sk = sock->sk; 514 struct raw_sock *ro = raw_sk(sk); 515 struct can_filter *filter = NULL; /* dyn. alloc'ed filters */ 516 struct can_filter sfilter; /* single filter */ 517 struct net_device *dev = NULL; 518 can_err_mask_t err_mask = 0; 519 int fd_frames; 520 int count = 0; 521 int err = 0; 522 523 if (level != SOL_CAN_RAW) 524 return -EINVAL; 525 526 switch (optname) { 527 case CAN_RAW_FILTER: 528 if (optlen % sizeof(struct can_filter) != 0) 529 return -EINVAL; 530 531 if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter)) 532 return -EINVAL; 533 534 count = optlen / sizeof(struct can_filter); 535 536 if (count > 1) { 537 /* filter does not fit into dfilter => alloc space */ 538 filter = memdup_sockptr(optval, optlen); 539 if (IS_ERR(filter)) 540 return PTR_ERR(filter); 541 } else if (count == 1) { 542 if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter))) 543 return -EFAULT; 544 } 545 546 rtnl_lock(); 547 lock_sock(sk); 548 549 dev = ro->dev; 550 if (ro->bound && dev) { 551 if (dev->reg_state != NETREG_REGISTERED) { 552 if (count > 1) 553 kfree(filter); 554 err = -ENODEV; 555 goto out_fil; 556 } 557 } 558 559 if (ro->bound) { 560 /* (try to) register the new filters */ 561 if (count == 1) 562 err = raw_enable_filters(sock_net(sk), dev, sk, 563 &sfilter, 1); 564 else 565 err = raw_enable_filters(sock_net(sk), dev, sk, 566 filter, count); 567 if (err) { 568 if (count > 1) 569 kfree(filter); 570 goto out_fil; 571 } 572 573 /* remove old filter registrations */ 574 raw_disable_filters(sock_net(sk), dev, sk, ro->filter, 575 ro->count); 576 } 577 578 /* remove old filter space */ 579 if (ro->count > 1) 580 kfree(ro->filter); 581 582 /* link new filters to the socket */ 583 if (count == 1) { 584 /* copy filter data for single filter */ 585 ro->dfilter = sfilter; 586 filter = &ro->dfilter; 587 } 588 ro->filter = filter; 589 ro->count = count; 590 591 out_fil: 592 release_sock(sk); 593 rtnl_unlock(); 594 595 break; 596 597 case CAN_RAW_ERR_FILTER: 598 if (optlen != sizeof(err_mask)) 599 return -EINVAL; 600 601 if (copy_from_sockptr(&err_mask, optval, optlen)) 602 return -EFAULT; 603 604 err_mask &= CAN_ERR_MASK; 605 606 rtnl_lock(); 607 lock_sock(sk); 608 609 dev = ro->dev; 610 if (ro->bound && dev) { 611 if (dev->reg_state != NETREG_REGISTERED) { 612 err = -ENODEV; 613 goto out_err; 614 } 615 } 616 617 /* remove current error mask */ 618 if (ro->bound) { 619 /* (try to) register the new err_mask */ 620 err = raw_enable_errfilter(sock_net(sk), dev, sk, 621 err_mask); 622 623 if (err) 624 goto out_err; 625 626 /* remove old err_mask registration */ 627 raw_disable_errfilter(sock_net(sk), dev, sk, 628 ro->err_mask); 629 } 630 631 /* link new err_mask to the socket */ 632 ro->err_mask = err_mask; 633 634 out_err: 635 release_sock(sk); 636 rtnl_unlock(); 637 638 break; 639 640 case CAN_RAW_LOOPBACK: 641 if (optlen != sizeof(ro->loopback)) 642 return -EINVAL; 643 644 if (copy_from_sockptr(&ro->loopback, optval, optlen)) 645 return -EFAULT; 646 647 break; 648 649 case CAN_RAW_RECV_OWN_MSGS: 650 if (optlen != sizeof(ro->recv_own_msgs)) 651 return -EINVAL; 652 653 if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen)) 654 return -EFAULT; 655 656 break; 657 658 case CAN_RAW_FD_FRAMES: 659 if (optlen != sizeof(fd_frames)) 660 return -EINVAL; 661 662 if (copy_from_sockptr(&fd_frames, optval, optlen)) 663 return -EFAULT; 664 665 /* Enabling CAN XL includes CAN FD */ 666 if (ro->xl_frames && !fd_frames) 667 return -EINVAL; 668 669 ro->fd_frames = fd_frames; 670 break; 671 672 case CAN_RAW_XL_FRAMES: 673 if (optlen != sizeof(ro->xl_frames)) 674 return -EINVAL; 675 676 if (copy_from_sockptr(&ro->xl_frames, optval, optlen)) 677 return -EFAULT; 678 679 /* Enabling CAN XL includes CAN FD */ 680 if (ro->xl_frames) 681 ro->fd_frames = ro->xl_frames; 682 break; 683 684 case CAN_RAW_JOIN_FILTERS: 685 if (optlen != sizeof(ro->join_filters)) 686 return -EINVAL; 687 688 if (copy_from_sockptr(&ro->join_filters, optval, optlen)) 689 return -EFAULT; 690 691 break; 692 693 default: 694 return -ENOPROTOOPT; 695 } 696 return err; 697 } 698 699 static int raw_getsockopt(struct socket *sock, int level, int optname, 700 char __user *optval, int __user *optlen) 701 { 702 struct sock *sk = sock->sk; 703 struct raw_sock *ro = raw_sk(sk); 704 int len; 705 void *val; 706 int err = 0; 707 708 if (level != SOL_CAN_RAW) 709 return -EINVAL; 710 if (get_user(len, optlen)) 711 return -EFAULT; 712 if (len < 0) 713 return -EINVAL; 714 715 switch (optname) { 716 case CAN_RAW_FILTER: 717 lock_sock(sk); 718 if (ro->count > 0) { 719 int fsize = ro->count * sizeof(struct can_filter); 720 721 /* user space buffer to small for filter list? */ 722 if (len < fsize) { 723 /* return -ERANGE and needed space in optlen */ 724 err = -ERANGE; 725 if (put_user(fsize, optlen)) 726 err = -EFAULT; 727 } else { 728 if (len > fsize) 729 len = fsize; 730 if (copy_to_user(optval, ro->filter, len)) 731 err = -EFAULT; 732 } 733 } else { 734 len = 0; 735 } 736 release_sock(sk); 737 738 if (!err) 739 err = put_user(len, optlen); 740 return err; 741 742 case CAN_RAW_ERR_FILTER: 743 if (len > sizeof(can_err_mask_t)) 744 len = sizeof(can_err_mask_t); 745 val = &ro->err_mask; 746 break; 747 748 case CAN_RAW_LOOPBACK: 749 if (len > sizeof(int)) 750 len = sizeof(int); 751 val = &ro->loopback; 752 break; 753 754 case CAN_RAW_RECV_OWN_MSGS: 755 if (len > sizeof(int)) 756 len = sizeof(int); 757 val = &ro->recv_own_msgs; 758 break; 759 760 case CAN_RAW_FD_FRAMES: 761 if (len > sizeof(int)) 762 len = sizeof(int); 763 val = &ro->fd_frames; 764 break; 765 766 case CAN_RAW_XL_FRAMES: 767 if (len > sizeof(int)) 768 len = sizeof(int); 769 val = &ro->xl_frames; 770 break; 771 772 case CAN_RAW_JOIN_FILTERS: 773 if (len > sizeof(int)) 774 len = sizeof(int); 775 val = &ro->join_filters; 776 break; 777 778 default: 779 return -ENOPROTOOPT; 780 } 781 782 if (put_user(len, optlen)) 783 return -EFAULT; 784 if (copy_to_user(optval, val, len)) 785 return -EFAULT; 786 return 0; 787 } 788 789 static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu) 790 { 791 /* Classical CAN -> no checks for flags and device capabilities */ 792 if (can_is_can_skb(skb)) 793 return false; 794 795 /* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */ 796 if (ro->fd_frames && can_is_canfd_skb(skb) && 797 (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu))) 798 return false; 799 800 /* CAN XL -> needs to be enabled and a CAN XL device */ 801 if (ro->xl_frames && can_is_canxl_skb(skb) && 802 can_is_canxl_dev_mtu(mtu)) 803 return false; 804 805 return true; 806 } 807 808 static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 809 { 810 struct sock *sk = sock->sk; 811 struct raw_sock *ro = raw_sk(sk); 812 struct sockcm_cookie sockc; 813 struct sk_buff *skb; 814 struct net_device *dev; 815 int ifindex; 816 int err = -EINVAL; 817 818 /* check for valid CAN frame sizes */ 819 if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU) 820 return -EINVAL; 821 822 if (msg->msg_name) { 823 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 824 825 if (msg->msg_namelen < RAW_MIN_NAMELEN) 826 return -EINVAL; 827 828 if (addr->can_family != AF_CAN) 829 return -EINVAL; 830 831 ifindex = addr->can_ifindex; 832 } else { 833 ifindex = ro->ifindex; 834 } 835 836 dev = dev_get_by_index(sock_net(sk), ifindex); 837 if (!dev) 838 return -ENXIO; 839 840 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), 841 msg->msg_flags & MSG_DONTWAIT, &err); 842 if (!skb) 843 goto put_dev; 844 845 can_skb_reserve(skb); 846 can_skb_prv(skb)->ifindex = dev->ifindex; 847 can_skb_prv(skb)->skbcnt = 0; 848 849 /* fill the skb before testing for valid CAN frames */ 850 err = memcpy_from_msg(skb_put(skb, size), msg, size); 851 if (err < 0) 852 goto free_skb; 853 854 err = -EINVAL; 855 if (raw_bad_txframe(ro, skb, dev->mtu)) 856 goto free_skb; 857 858 sockcm_init(&sockc, sk); 859 if (msg->msg_controllen) { 860 err = sock_cmsg_send(sk, msg, &sockc); 861 if (unlikely(err)) 862 goto free_skb; 863 } 864 865 skb->dev = dev; 866 skb->priority = sk->sk_priority; 867 skb->mark = sk->sk_mark; 868 skb->tstamp = sockc.transmit_time; 869 870 skb_setup_tx_timestamp(skb, sockc.tsflags); 871 872 err = can_send(skb, ro->loopback); 873 874 dev_put(dev); 875 876 if (err) 877 goto send_failed; 878 879 return size; 880 881 free_skb: 882 kfree_skb(skb); 883 put_dev: 884 dev_put(dev); 885 send_failed: 886 return err; 887 } 888 889 static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 890 int flags) 891 { 892 struct sock *sk = sock->sk; 893 struct sk_buff *skb; 894 int err = 0; 895 896 if (flags & MSG_ERRQUEUE) 897 return sock_recv_errqueue(sk, msg, size, 898 SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE); 899 900 skb = skb_recv_datagram(sk, flags, &err); 901 if (!skb) 902 return err; 903 904 if (size < skb->len) 905 msg->msg_flags |= MSG_TRUNC; 906 else 907 size = skb->len; 908 909 err = memcpy_to_msg(msg, skb->data, size); 910 if (err < 0) { 911 skb_free_datagram(sk, skb); 912 return err; 913 } 914 915 sock_recv_cmsgs(msg, sk, skb); 916 917 if (msg->msg_name) { 918 __sockaddr_check_size(RAW_MIN_NAMELEN); 919 msg->msg_namelen = RAW_MIN_NAMELEN; 920 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 921 } 922 923 /* assign the flags that have been recorded in raw_rcv() */ 924 msg->msg_flags |= *(raw_flags(skb)); 925 926 skb_free_datagram(sk, skb); 927 928 return size; 929 } 930 931 static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, 932 unsigned long arg) 933 { 934 /* no ioctls for socket layer -> hand it down to NIC layer */ 935 return -ENOIOCTLCMD; 936 } 937 938 static const struct proto_ops raw_ops = { 939 .family = PF_CAN, 940 .release = raw_release, 941 .bind = raw_bind, 942 .connect = sock_no_connect, 943 .socketpair = sock_no_socketpair, 944 .accept = sock_no_accept, 945 .getname = raw_getname, 946 .poll = datagram_poll, 947 .ioctl = raw_sock_no_ioctlcmd, 948 .gettstamp = sock_gettstamp, 949 .listen = sock_no_listen, 950 .shutdown = sock_no_shutdown, 951 .setsockopt = raw_setsockopt, 952 .getsockopt = raw_getsockopt, 953 .sendmsg = raw_sendmsg, 954 .recvmsg = raw_recvmsg, 955 .mmap = sock_no_mmap, 956 }; 957 958 static struct proto raw_proto __read_mostly = { 959 .name = "CAN_RAW", 960 .owner = THIS_MODULE, 961 .obj_size = sizeof(struct raw_sock), 962 .init = raw_init, 963 }; 964 965 static const struct can_proto raw_can_proto = { 966 .type = SOCK_RAW, 967 .protocol = CAN_RAW, 968 .ops = &raw_ops, 969 .prot = &raw_proto, 970 }; 971 972 static struct notifier_block canraw_notifier = { 973 .notifier_call = raw_notifier 974 }; 975 976 static __init int raw_module_init(void) 977 { 978 int err; 979 980 pr_info("can: raw protocol\n"); 981 982 err = register_netdevice_notifier(&canraw_notifier); 983 if (err) 984 return err; 985 986 err = can_proto_register(&raw_can_proto); 987 if (err < 0) { 988 pr_err("can: registration of raw protocol failed\n"); 989 goto register_proto_failed; 990 } 991 992 return 0; 993 994 register_proto_failed: 995 unregister_netdevice_notifier(&canraw_notifier); 996 return err; 997 } 998 999 static __exit void raw_module_exit(void) 1000 { 1001 can_proto_unregister(&raw_can_proto); 1002 unregister_netdevice_notifier(&canraw_notifier); 1003 } 1004 1005 module_init(raw_module_init); 1006 module_exit(raw_module_exit); 1007