1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* raw.c - Raw sockets for protocol family CAN 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/init.h> 44 #include <linux/uio.h> 45 #include <linux/net.h> 46 #include <linux/slab.h> 47 #include <linux/netdevice.h> 48 #include <linux/socket.h> 49 #include <linux/if_arp.h> 50 #include <linux/skbuff.h> 51 #include <linux/can.h> 52 #include <linux/can/core.h> 53 #include <linux/can/dev.h> /* for can_is_canxl_dev_mtu() */ 54 #include <linux/can/skb.h> 55 #include <linux/can/raw.h> 56 #include <net/sock.h> 57 #include <net/net_namespace.h> 58 59 MODULE_DESCRIPTION("PF_CAN raw protocol"); 60 MODULE_LICENSE("Dual BSD/GPL"); 61 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>"); 62 MODULE_ALIAS("can-proto-1"); 63 64 #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) 65 66 #define MASK_ALL 0 67 68 /* A raw socket has a list of can_filters attached to it, each receiving 69 * the CAN frames matching that filter. If the filter list is empty, 70 * no CAN frames will be received by the socket. The default after 71 * opening the socket, is to have one filter which receives all frames. 72 * The filter list is allocated dynamically with the exception of the 73 * list containing only one item. This common case is optimized by 74 * storing the single filter in dfilter, to avoid using dynamic memory. 75 */ 76 77 struct uniqframe { 78 int skbcnt; 79 const struct sk_buff *skb; 80 unsigned int join_rx_count; 81 }; 82 83 struct raw_sock { 84 struct sock sk; 85 int bound; 86 int ifindex; 87 struct net_device *dev; 88 struct list_head notifier; 89 int loopback; 90 int recv_own_msgs; 91 int fd_frames; 92 int xl_frames; 93 int join_filters; 94 int count; /* number of active filters */ 95 struct can_filter dfilter; /* default/single filter */ 96 struct can_filter *filter; /* pointer to filter(s) */ 97 can_err_mask_t err_mask; 98 struct uniqframe __percpu *uniq; 99 }; 100 101 static LIST_HEAD(raw_notifier_list); 102 static DEFINE_SPINLOCK(raw_notifier_lock); 103 static struct raw_sock *raw_busy_notifier; 104 105 /* Return pointer to store the extra msg flags for raw_recvmsg(). 106 * We use the space of one unsigned int beyond the 'struct sockaddr_can' 107 * in skb->cb. 108 */ 109 static inline unsigned int *raw_flags(struct sk_buff *skb) 110 { 111 sock_skb_cb_check_size(sizeof(struct sockaddr_can) + 112 sizeof(unsigned int)); 113 114 /* return pointer after struct sockaddr_can */ 115 return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]); 116 } 117 118 static inline struct raw_sock *raw_sk(const struct sock *sk) 119 { 120 return (struct raw_sock *)sk; 121 } 122 123 static void raw_rcv(struct sk_buff *oskb, void *data) 124 { 125 struct sock *sk = (struct sock *)data; 126 struct raw_sock *ro = raw_sk(sk); 127 struct sockaddr_can *addr; 128 struct sk_buff *skb; 129 unsigned int *pflags; 130 131 /* check the received tx sock reference */ 132 if (!ro->recv_own_msgs && oskb->sk == sk) 133 return; 134 135 /* make sure to not pass oversized frames to the socket */ 136 if ((!ro->fd_frames && can_is_canfd_skb(oskb)) || 137 (!ro->xl_frames && can_is_canxl_skb(oskb))) 138 return; 139 140 /* eliminate multiple filter matches for the same skb */ 141 if (this_cpu_ptr(ro->uniq)->skb == oskb && 142 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) { 143 if (!ro->join_filters) 144 return; 145 146 this_cpu_inc(ro->uniq->join_rx_count); 147 /* drop frame until all enabled filters matched */ 148 if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count) 149 return; 150 } else { 151 this_cpu_ptr(ro->uniq)->skb = oskb; 152 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt; 153 this_cpu_ptr(ro->uniq)->join_rx_count = 1; 154 /* drop first frame to check all enabled filters? */ 155 if (ro->join_filters && ro->count > 1) 156 return; 157 } 158 159 /* clone the given skb to be able to enqueue it into the rcv queue */ 160 skb = skb_clone(oskb, GFP_ATOMIC); 161 if (!skb) 162 return; 163 164 /* Put the datagram to the queue so that raw_recvmsg() can get 165 * it from there. We need to pass the interface index to 166 * raw_recvmsg(). We pass a whole struct sockaddr_can in 167 * skb->cb containing the interface index. 168 */ 169 170 sock_skb_cb_check_size(sizeof(struct sockaddr_can)); 171 addr = (struct sockaddr_can *)skb->cb; 172 memset(addr, 0, sizeof(*addr)); 173 addr->can_family = AF_CAN; 174 addr->can_ifindex = skb->dev->ifindex; 175 176 /* add CAN specific message flags for raw_recvmsg() */ 177 pflags = raw_flags(skb); 178 *pflags = 0; 179 if (oskb->sk) 180 *pflags |= MSG_DONTROUTE; 181 if (oskb->sk == sk) 182 *pflags |= MSG_CONFIRM; 183 184 if (sock_queue_rcv_skb(sk, skb) < 0) 185 kfree_skb(skb); 186 } 187 188 static int raw_enable_filters(struct net *net, struct net_device *dev, 189 struct sock *sk, struct can_filter *filter, 190 int count) 191 { 192 int err = 0; 193 int i; 194 195 for (i = 0; i < count; i++) { 196 err = can_rx_register(net, dev, filter[i].can_id, 197 filter[i].can_mask, 198 raw_rcv, sk, "raw", sk); 199 if (err) { 200 /* clean up successfully registered filters */ 201 while (--i >= 0) 202 can_rx_unregister(net, dev, filter[i].can_id, 203 filter[i].can_mask, 204 raw_rcv, sk); 205 break; 206 } 207 } 208 209 return err; 210 } 211 212 static int raw_enable_errfilter(struct net *net, struct net_device *dev, 213 struct sock *sk, can_err_mask_t err_mask) 214 { 215 int err = 0; 216 217 if (err_mask) 218 err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG, 219 raw_rcv, sk, "raw", sk); 220 221 return err; 222 } 223 224 static void raw_disable_filters(struct net *net, struct net_device *dev, 225 struct sock *sk, struct can_filter *filter, 226 int count) 227 { 228 int i; 229 230 for (i = 0; i < count; i++) 231 can_rx_unregister(net, dev, filter[i].can_id, 232 filter[i].can_mask, raw_rcv, sk); 233 } 234 235 static inline void raw_disable_errfilter(struct net *net, 236 struct net_device *dev, 237 struct sock *sk, 238 can_err_mask_t err_mask) 239 240 { 241 if (err_mask) 242 can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG, 243 raw_rcv, sk); 244 } 245 246 static inline void raw_disable_allfilters(struct net *net, 247 struct net_device *dev, 248 struct sock *sk) 249 { 250 struct raw_sock *ro = raw_sk(sk); 251 252 raw_disable_filters(net, dev, sk, ro->filter, ro->count); 253 raw_disable_errfilter(net, dev, sk, ro->err_mask); 254 } 255 256 static int raw_enable_allfilters(struct net *net, struct net_device *dev, 257 struct sock *sk) 258 { 259 struct raw_sock *ro = raw_sk(sk); 260 int err; 261 262 err = raw_enable_filters(net, dev, sk, ro->filter, ro->count); 263 if (!err) { 264 err = raw_enable_errfilter(net, dev, sk, ro->err_mask); 265 if (err) 266 raw_disable_filters(net, dev, sk, ro->filter, 267 ro->count); 268 } 269 270 return err; 271 } 272 273 static void raw_notify(struct raw_sock *ro, unsigned long msg, 274 struct net_device *dev) 275 { 276 struct sock *sk = &ro->sk; 277 278 if (!net_eq(dev_net(dev), sock_net(sk))) 279 return; 280 281 if (ro->dev != dev) 282 return; 283 284 switch (msg) { 285 case NETDEV_UNREGISTER: 286 lock_sock(sk); 287 /* remove current filters & unregister */ 288 if (ro->bound) 289 raw_disable_allfilters(dev_net(dev), dev, sk); 290 291 if (ro->count > 1) 292 kfree(ro->filter); 293 294 ro->ifindex = 0; 295 ro->bound = 0; 296 ro->dev = NULL; 297 ro->count = 0; 298 release_sock(sk); 299 300 sk->sk_err = ENODEV; 301 if (!sock_flag(sk, SOCK_DEAD)) 302 sk_error_report(sk); 303 break; 304 305 case NETDEV_DOWN: 306 sk->sk_err = ENETDOWN; 307 if (!sock_flag(sk, SOCK_DEAD)) 308 sk_error_report(sk); 309 break; 310 } 311 } 312 313 static int raw_notifier(struct notifier_block *nb, unsigned long msg, 314 void *ptr) 315 { 316 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 317 318 if (dev->type != ARPHRD_CAN) 319 return NOTIFY_DONE; 320 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) 321 return NOTIFY_DONE; 322 if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */ 323 return NOTIFY_DONE; 324 325 spin_lock(&raw_notifier_lock); 326 list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) { 327 spin_unlock(&raw_notifier_lock); 328 raw_notify(raw_busy_notifier, msg, dev); 329 spin_lock(&raw_notifier_lock); 330 } 331 raw_busy_notifier = NULL; 332 spin_unlock(&raw_notifier_lock); 333 return NOTIFY_DONE; 334 } 335 336 static int raw_init(struct sock *sk) 337 { 338 struct raw_sock *ro = raw_sk(sk); 339 340 ro->bound = 0; 341 ro->ifindex = 0; 342 ro->dev = NULL; 343 344 /* set default filter to single entry dfilter */ 345 ro->dfilter.can_id = 0; 346 ro->dfilter.can_mask = MASK_ALL; 347 ro->filter = &ro->dfilter; 348 ro->count = 1; 349 350 /* set default loopback behaviour */ 351 ro->loopback = 1; 352 ro->recv_own_msgs = 0; 353 ro->fd_frames = 0; 354 ro->xl_frames = 0; 355 ro->join_filters = 0; 356 357 /* alloc_percpu provides zero'ed memory */ 358 ro->uniq = alloc_percpu(struct uniqframe); 359 if (unlikely(!ro->uniq)) 360 return -ENOMEM; 361 362 /* set notifier */ 363 spin_lock(&raw_notifier_lock); 364 list_add_tail(&ro->notifier, &raw_notifier_list); 365 spin_unlock(&raw_notifier_lock); 366 367 return 0; 368 } 369 370 static int raw_release(struct socket *sock) 371 { 372 struct sock *sk = sock->sk; 373 struct raw_sock *ro; 374 375 if (!sk) 376 return 0; 377 378 ro = raw_sk(sk); 379 380 spin_lock(&raw_notifier_lock); 381 while (raw_busy_notifier == ro) { 382 spin_unlock(&raw_notifier_lock); 383 schedule_timeout_uninterruptible(1); 384 spin_lock(&raw_notifier_lock); 385 } 386 list_del(&ro->notifier); 387 spin_unlock(&raw_notifier_lock); 388 389 rtnl_lock(); 390 lock_sock(sk); 391 392 /* remove current filters & unregister */ 393 if (ro->bound) { 394 if (ro->dev) 395 raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); 396 else 397 raw_disable_allfilters(sock_net(sk), NULL, sk); 398 } 399 400 if (ro->count > 1) 401 kfree(ro->filter); 402 403 ro->ifindex = 0; 404 ro->bound = 0; 405 ro->dev = NULL; 406 ro->count = 0; 407 free_percpu(ro->uniq); 408 409 sock_orphan(sk); 410 sock->sk = NULL; 411 412 release_sock(sk); 413 rtnl_unlock(); 414 415 sock_put(sk); 416 417 return 0; 418 } 419 420 static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) 421 { 422 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 423 struct sock *sk = sock->sk; 424 struct raw_sock *ro = raw_sk(sk); 425 struct net_device *dev = NULL; 426 int ifindex; 427 int err = 0; 428 int notify_enetdown = 0; 429 430 if (len < RAW_MIN_NAMELEN) 431 return -EINVAL; 432 if (addr->can_family != AF_CAN) 433 return -EINVAL; 434 435 rtnl_lock(); 436 lock_sock(sk); 437 438 if (ro->bound && addr->can_ifindex == ro->ifindex) 439 goto out; 440 441 if (addr->can_ifindex) { 442 dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); 443 if (!dev) { 444 err = -ENODEV; 445 goto out; 446 } 447 if (dev->type != ARPHRD_CAN) { 448 dev_put(dev); 449 err = -ENODEV; 450 goto out; 451 } 452 if (!(dev->flags & IFF_UP)) 453 notify_enetdown = 1; 454 455 ifindex = dev->ifindex; 456 457 /* filters set by default/setsockopt */ 458 err = raw_enable_allfilters(sock_net(sk), dev, sk); 459 dev_put(dev); 460 } else { 461 ifindex = 0; 462 463 /* filters set by default/setsockopt */ 464 err = raw_enable_allfilters(sock_net(sk), NULL, sk); 465 } 466 467 if (!err) { 468 if (ro->bound) { 469 /* unregister old filters */ 470 if (ro->dev) 471 raw_disable_allfilters(dev_net(ro->dev), 472 ro->dev, sk); 473 else 474 raw_disable_allfilters(sock_net(sk), NULL, sk); 475 } 476 ro->ifindex = ifindex; 477 ro->bound = 1; 478 ro->dev = dev; 479 } 480 481 out: 482 release_sock(sk); 483 rtnl_unlock(); 484 485 if (notify_enetdown) { 486 sk->sk_err = ENETDOWN; 487 if (!sock_flag(sk, SOCK_DEAD)) 488 sk_error_report(sk); 489 } 490 491 return err; 492 } 493 494 static int raw_getname(struct socket *sock, struct sockaddr *uaddr, 495 int peer) 496 { 497 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 498 struct sock *sk = sock->sk; 499 struct raw_sock *ro = raw_sk(sk); 500 501 if (peer) 502 return -EOPNOTSUPP; 503 504 memset(addr, 0, RAW_MIN_NAMELEN); 505 addr->can_family = AF_CAN; 506 addr->can_ifindex = ro->ifindex; 507 508 return RAW_MIN_NAMELEN; 509 } 510 511 static int raw_setsockopt(struct socket *sock, int level, int optname, 512 sockptr_t optval, unsigned int optlen) 513 { 514 struct sock *sk = sock->sk; 515 struct raw_sock *ro = raw_sk(sk); 516 struct can_filter *filter = NULL; /* dyn. alloc'ed filters */ 517 struct can_filter sfilter; /* single filter */ 518 struct net_device *dev = NULL; 519 can_err_mask_t err_mask = 0; 520 int fd_frames; 521 int count = 0; 522 int err = 0; 523 524 if (level != SOL_CAN_RAW) 525 return -EINVAL; 526 527 switch (optname) { 528 case CAN_RAW_FILTER: 529 if (optlen % sizeof(struct can_filter) != 0) 530 return -EINVAL; 531 532 if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter)) 533 return -EINVAL; 534 535 count = optlen / sizeof(struct can_filter); 536 537 if (count > 1) { 538 /* filter does not fit into dfilter => alloc space */ 539 filter = memdup_sockptr(optval, optlen); 540 if (IS_ERR(filter)) 541 return PTR_ERR(filter); 542 } else if (count == 1) { 543 if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter))) 544 return -EFAULT; 545 } 546 547 rtnl_lock(); 548 lock_sock(sk); 549 550 dev = ro->dev; 551 if (ro->bound && dev) { 552 if (dev->reg_state != NETREG_REGISTERED) { 553 if (count > 1) 554 kfree(filter); 555 err = -ENODEV; 556 goto out_fil; 557 } 558 } 559 560 if (ro->bound) { 561 /* (try to) register the new filters */ 562 if (count == 1) 563 err = raw_enable_filters(sock_net(sk), dev, sk, 564 &sfilter, 1); 565 else 566 err = raw_enable_filters(sock_net(sk), dev, sk, 567 filter, count); 568 if (err) { 569 if (count > 1) 570 kfree(filter); 571 goto out_fil; 572 } 573 574 /* remove old filter registrations */ 575 raw_disable_filters(sock_net(sk), dev, sk, ro->filter, 576 ro->count); 577 } 578 579 /* remove old filter space */ 580 if (ro->count > 1) 581 kfree(ro->filter); 582 583 /* link new filters to the socket */ 584 if (count == 1) { 585 /* copy filter data for single filter */ 586 ro->dfilter = sfilter; 587 filter = &ro->dfilter; 588 } 589 ro->filter = filter; 590 ro->count = count; 591 592 out_fil: 593 release_sock(sk); 594 rtnl_unlock(); 595 596 break; 597 598 case CAN_RAW_ERR_FILTER: 599 if (optlen != sizeof(err_mask)) 600 return -EINVAL; 601 602 if (copy_from_sockptr(&err_mask, optval, optlen)) 603 return -EFAULT; 604 605 err_mask &= CAN_ERR_MASK; 606 607 rtnl_lock(); 608 lock_sock(sk); 609 610 dev = ro->dev; 611 if (ro->bound && dev) { 612 if (dev->reg_state != NETREG_REGISTERED) { 613 err = -ENODEV; 614 goto out_err; 615 } 616 } 617 618 /* remove current error mask */ 619 if (ro->bound) { 620 /* (try to) register the new err_mask */ 621 err = raw_enable_errfilter(sock_net(sk), dev, sk, 622 err_mask); 623 624 if (err) 625 goto out_err; 626 627 /* remove old err_mask registration */ 628 raw_disable_errfilter(sock_net(sk), dev, sk, 629 ro->err_mask); 630 } 631 632 /* link new err_mask to the socket */ 633 ro->err_mask = err_mask; 634 635 out_err: 636 release_sock(sk); 637 rtnl_unlock(); 638 639 break; 640 641 case CAN_RAW_LOOPBACK: 642 if (optlen != sizeof(ro->loopback)) 643 return -EINVAL; 644 645 if (copy_from_sockptr(&ro->loopback, optval, optlen)) 646 return -EFAULT; 647 648 break; 649 650 case CAN_RAW_RECV_OWN_MSGS: 651 if (optlen != sizeof(ro->recv_own_msgs)) 652 return -EINVAL; 653 654 if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen)) 655 return -EFAULT; 656 657 break; 658 659 case CAN_RAW_FD_FRAMES: 660 if (optlen != sizeof(fd_frames)) 661 return -EINVAL; 662 663 if (copy_from_sockptr(&fd_frames, optval, optlen)) 664 return -EFAULT; 665 666 /* Enabling CAN XL includes CAN FD */ 667 if (ro->xl_frames && !fd_frames) 668 return -EINVAL; 669 670 ro->fd_frames = fd_frames; 671 break; 672 673 case CAN_RAW_XL_FRAMES: 674 if (optlen != sizeof(ro->xl_frames)) 675 return -EINVAL; 676 677 if (copy_from_sockptr(&ro->xl_frames, optval, optlen)) 678 return -EFAULT; 679 680 /* Enabling CAN XL includes CAN FD */ 681 if (ro->xl_frames) 682 ro->fd_frames = ro->xl_frames; 683 break; 684 685 case CAN_RAW_JOIN_FILTERS: 686 if (optlen != sizeof(ro->join_filters)) 687 return -EINVAL; 688 689 if (copy_from_sockptr(&ro->join_filters, optval, optlen)) 690 return -EFAULT; 691 692 break; 693 694 default: 695 return -ENOPROTOOPT; 696 } 697 return err; 698 } 699 700 static int raw_getsockopt(struct socket *sock, int level, int optname, 701 char __user *optval, int __user *optlen) 702 { 703 struct sock *sk = sock->sk; 704 struct raw_sock *ro = raw_sk(sk); 705 int len; 706 void *val; 707 int err = 0; 708 709 if (level != SOL_CAN_RAW) 710 return -EINVAL; 711 if (get_user(len, optlen)) 712 return -EFAULT; 713 if (len < 0) 714 return -EINVAL; 715 716 switch (optname) { 717 case CAN_RAW_FILTER: 718 lock_sock(sk); 719 if (ro->count > 0) { 720 int fsize = ro->count * sizeof(struct can_filter); 721 722 /* user space buffer to small for filter list? */ 723 if (len < fsize) { 724 /* return -ERANGE and needed space in optlen */ 725 err = -ERANGE; 726 if (put_user(fsize, optlen)) 727 err = -EFAULT; 728 } else { 729 if (len > fsize) 730 len = fsize; 731 if (copy_to_user(optval, ro->filter, len)) 732 err = -EFAULT; 733 } 734 } else { 735 len = 0; 736 } 737 release_sock(sk); 738 739 if (!err) 740 err = put_user(len, optlen); 741 return err; 742 743 case CAN_RAW_ERR_FILTER: 744 if (len > sizeof(can_err_mask_t)) 745 len = sizeof(can_err_mask_t); 746 val = &ro->err_mask; 747 break; 748 749 case CAN_RAW_LOOPBACK: 750 if (len > sizeof(int)) 751 len = sizeof(int); 752 val = &ro->loopback; 753 break; 754 755 case CAN_RAW_RECV_OWN_MSGS: 756 if (len > sizeof(int)) 757 len = sizeof(int); 758 val = &ro->recv_own_msgs; 759 break; 760 761 case CAN_RAW_FD_FRAMES: 762 if (len > sizeof(int)) 763 len = sizeof(int); 764 val = &ro->fd_frames; 765 break; 766 767 case CAN_RAW_XL_FRAMES: 768 if (len > sizeof(int)) 769 len = sizeof(int); 770 val = &ro->xl_frames; 771 break; 772 773 case CAN_RAW_JOIN_FILTERS: 774 if (len > sizeof(int)) 775 len = sizeof(int); 776 val = &ro->join_filters; 777 break; 778 779 default: 780 return -ENOPROTOOPT; 781 } 782 783 if (put_user(len, optlen)) 784 return -EFAULT; 785 if (copy_to_user(optval, val, len)) 786 return -EFAULT; 787 return 0; 788 } 789 790 static bool raw_bad_txframe(struct raw_sock *ro, struct sk_buff *skb, int mtu) 791 { 792 /* Classical CAN -> no checks for flags and device capabilities */ 793 if (can_is_can_skb(skb)) 794 return false; 795 796 /* CAN FD -> needs to be enabled and a CAN FD or CAN XL device */ 797 if (ro->fd_frames && can_is_canfd_skb(skb) && 798 (mtu == CANFD_MTU || can_is_canxl_dev_mtu(mtu))) 799 return false; 800 801 /* CAN XL -> needs to be enabled and a CAN XL device */ 802 if (ro->xl_frames && can_is_canxl_skb(skb) && 803 can_is_canxl_dev_mtu(mtu)) 804 return false; 805 806 return true; 807 } 808 809 static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 810 { 811 struct sock *sk = sock->sk; 812 struct raw_sock *ro = raw_sk(sk); 813 struct sockcm_cookie sockc; 814 struct sk_buff *skb; 815 struct net_device *dev; 816 int ifindex; 817 int err = -EINVAL; 818 819 /* check for valid CAN frame sizes */ 820 if (size < CANXL_HDR_SIZE + CANXL_MIN_DLEN || size > CANXL_MTU) 821 return -EINVAL; 822 823 if (msg->msg_name) { 824 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 825 826 if (msg->msg_namelen < RAW_MIN_NAMELEN) 827 return -EINVAL; 828 829 if (addr->can_family != AF_CAN) 830 return -EINVAL; 831 832 ifindex = addr->can_ifindex; 833 } else { 834 ifindex = ro->ifindex; 835 } 836 837 dev = dev_get_by_index(sock_net(sk), ifindex); 838 if (!dev) 839 return -ENXIO; 840 841 skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv), 842 msg->msg_flags & MSG_DONTWAIT, &err); 843 if (!skb) 844 goto put_dev; 845 846 can_skb_reserve(skb); 847 can_skb_prv(skb)->ifindex = dev->ifindex; 848 can_skb_prv(skb)->skbcnt = 0; 849 850 /* fill the skb before testing for valid CAN frames */ 851 err = memcpy_from_msg(skb_put(skb, size), msg, size); 852 if (err < 0) 853 goto free_skb; 854 855 err = -EINVAL; 856 if (raw_bad_txframe(ro, skb, dev->mtu)) 857 goto free_skb; 858 859 sockcm_init(&sockc, sk); 860 if (msg->msg_controllen) { 861 err = sock_cmsg_send(sk, msg, &sockc); 862 if (unlikely(err)) 863 goto free_skb; 864 } 865 866 skb->dev = dev; 867 skb->priority = sk->sk_priority; 868 skb->mark = READ_ONCE(sk->sk_mark); 869 skb->tstamp = sockc.transmit_time; 870 871 skb_setup_tx_timestamp(skb, sockc.tsflags); 872 873 err = can_send(skb, ro->loopback); 874 875 dev_put(dev); 876 877 if (err) 878 goto send_failed; 879 880 return size; 881 882 free_skb: 883 kfree_skb(skb); 884 put_dev: 885 dev_put(dev); 886 send_failed: 887 return err; 888 } 889 890 static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 891 int flags) 892 { 893 struct sock *sk = sock->sk; 894 struct sk_buff *skb; 895 int err = 0; 896 897 if (flags & MSG_ERRQUEUE) 898 return sock_recv_errqueue(sk, msg, size, 899 SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE); 900 901 skb = skb_recv_datagram(sk, flags, &err); 902 if (!skb) 903 return err; 904 905 if (size < skb->len) 906 msg->msg_flags |= MSG_TRUNC; 907 else 908 size = skb->len; 909 910 err = memcpy_to_msg(msg, skb->data, size); 911 if (err < 0) { 912 skb_free_datagram(sk, skb); 913 return err; 914 } 915 916 sock_recv_cmsgs(msg, sk, skb); 917 918 if (msg->msg_name) { 919 __sockaddr_check_size(RAW_MIN_NAMELEN); 920 msg->msg_namelen = RAW_MIN_NAMELEN; 921 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 922 } 923 924 /* assign the flags that have been recorded in raw_rcv() */ 925 msg->msg_flags |= *(raw_flags(skb)); 926 927 skb_free_datagram(sk, skb); 928 929 return size; 930 } 931 932 static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, 933 unsigned long arg) 934 { 935 /* no ioctls for socket layer -> hand it down to NIC layer */ 936 return -ENOIOCTLCMD; 937 } 938 939 static const struct proto_ops raw_ops = { 940 .family = PF_CAN, 941 .release = raw_release, 942 .bind = raw_bind, 943 .connect = sock_no_connect, 944 .socketpair = sock_no_socketpair, 945 .accept = sock_no_accept, 946 .getname = raw_getname, 947 .poll = datagram_poll, 948 .ioctl = raw_sock_no_ioctlcmd, 949 .gettstamp = sock_gettstamp, 950 .listen = sock_no_listen, 951 .shutdown = sock_no_shutdown, 952 .setsockopt = raw_setsockopt, 953 .getsockopt = raw_getsockopt, 954 .sendmsg = raw_sendmsg, 955 .recvmsg = raw_recvmsg, 956 .mmap = sock_no_mmap, 957 }; 958 959 static struct proto raw_proto __read_mostly = { 960 .name = "CAN_RAW", 961 .owner = THIS_MODULE, 962 .obj_size = sizeof(struct raw_sock), 963 .init = raw_init, 964 }; 965 966 static const struct can_proto raw_can_proto = { 967 .type = SOCK_RAW, 968 .protocol = CAN_RAW, 969 .ops = &raw_ops, 970 .prot = &raw_proto, 971 }; 972 973 static struct notifier_block canraw_notifier = { 974 .notifier_call = raw_notifier 975 }; 976 977 static __init int raw_module_init(void) 978 { 979 int err; 980 981 pr_info("can: raw protocol\n"); 982 983 err = register_netdevice_notifier(&canraw_notifier); 984 if (err) 985 return err; 986 987 err = can_proto_register(&raw_can_proto); 988 if (err < 0) { 989 pr_err("can: registration of raw protocol failed\n"); 990 goto register_proto_failed; 991 } 992 993 return 0; 994 995 register_proto_failed: 996 unregister_netdevice_notifier(&canraw_notifier); 997 return err; 998 } 999 1000 static __exit void raw_module_exit(void) 1001 { 1002 can_proto_unregister(&raw_can_proto); 1003 unregister_netdevice_notifier(&canraw_notifier); 1004 } 1005 1006 module_init(raw_module_init); 1007 module_exit(raw_module_exit); 1008