1 /* 2 * af_can.c - Protocol family CAN core module 3 * (used by different CAN protocol modules) 4 * 5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of Volkswagen nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * Alternatively, provided that this notice is retained in full, this 21 * software may be distributed under the terms of the GNU General 22 * Public License ("GPL") version 2, in which case the provisions of the 23 * GPL apply INSTEAD OF those given above. 24 * 25 * The provided data structures and external interfaces from this code 26 * are not restricted to be used by modules with a GPL compatible license. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 39 * DAMAGE. 40 * 41 */ 42 43 #include <linux/module.h> 44 #include <linux/stddef.h> 45 #include <linux/init.h> 46 #include <linux/kmod.h> 47 #include <linux/slab.h> 48 #include <linux/list.h> 49 #include <linux/spinlock.h> 50 #include <linux/rcupdate.h> 51 #include <linux/uaccess.h> 52 #include <linux/net.h> 53 #include <linux/netdevice.h> 54 #include <linux/socket.h> 55 #include <linux/if_ether.h> 56 #include <linux/if_arp.h> 57 #include <linux/skbuff.h> 58 #include <linux/can.h> 59 #include <linux/can/core.h> 60 #include <linux/can/skb.h> 61 #include <linux/ratelimit.h> 62 #include <net/net_namespace.h> 63 #include <net/sock.h> 64 65 #include "af_can.h" 66 67 MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); 68 MODULE_LICENSE("Dual BSD/GPL"); 69 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, " 70 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 71 72 MODULE_ALIAS_NETPROTO(PF_CAN); 73 74 static int stats_timer __read_mostly = 1; 75 module_param(stats_timer, int, S_IRUGO); 76 MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 77 78 static struct kmem_cache *rcv_cache __read_mostly; 79 80 /* table of registered CAN protocols */ 81 static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly; 82 static DEFINE_MUTEX(proto_tab_lock); 83 84 static atomic_t skbcounter = ATOMIC_INIT(0); 85 86 /* 87 * af_can socket functions 88 */ 89 90 int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 91 { 92 struct sock *sk = sock->sk; 93 94 switch (cmd) { 95 96 case SIOCGSTAMP: 97 return sock_get_timestamp(sk, (struct timeval __user *)arg); 98 99 default: 100 return -ENOIOCTLCMD; 101 } 102 } 103 EXPORT_SYMBOL(can_ioctl); 104 105 static void can_sock_destruct(struct sock *sk) 106 { 107 skb_queue_purge(&sk->sk_receive_queue); 108 } 109 110 static const struct can_proto *can_get_proto(int protocol) 111 { 112 const struct can_proto *cp; 113 114 rcu_read_lock(); 115 cp = rcu_dereference(proto_tab[protocol]); 116 if (cp && !try_module_get(cp->prot->owner)) 117 cp = NULL; 118 rcu_read_unlock(); 119 120 return cp; 121 } 122 123 static inline void can_put_proto(const struct can_proto *cp) 124 { 125 module_put(cp->prot->owner); 126 } 127 128 static int can_create(struct net *net, struct socket *sock, int protocol, 129 int kern) 130 { 131 struct sock *sk; 132 const struct can_proto *cp; 133 int err = 0; 134 135 sock->state = SS_UNCONNECTED; 136 137 if (protocol < 0 || protocol >= CAN_NPROTO) 138 return -EINVAL; 139 140 cp = can_get_proto(protocol); 141 142 #ifdef CONFIG_MODULES 143 if (!cp) { 144 /* try to load protocol module if kernel is modular */ 145 146 err = request_module("can-proto-%d", protocol); 147 148 /* 149 * In case of error we only print a message but don't 150 * return the error code immediately. Below we will 151 * return -EPROTONOSUPPORT 152 */ 153 if (err) 154 printk_ratelimited(KERN_ERR "can: request_module " 155 "(can-proto-%d) failed.\n", protocol); 156 157 cp = can_get_proto(protocol); 158 } 159 #endif 160 161 /* check for available protocol and correct usage */ 162 163 if (!cp) 164 return -EPROTONOSUPPORT; 165 166 if (cp->type != sock->type) { 167 err = -EPROTOTYPE; 168 goto errout; 169 } 170 171 sock->ops = cp->ops; 172 173 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern); 174 if (!sk) { 175 err = -ENOMEM; 176 goto errout; 177 } 178 179 sock_init_data(sock, sk); 180 sk->sk_destruct = can_sock_destruct; 181 182 if (sk->sk_prot->init) 183 err = sk->sk_prot->init(sk); 184 185 if (err) { 186 /* release sk on errors */ 187 sock_orphan(sk); 188 sock_put(sk); 189 } 190 191 errout: 192 can_put_proto(cp); 193 return err; 194 } 195 196 /* 197 * af_can tx path 198 */ 199 200 /** 201 * can_send - transmit a CAN frame (optional with local loopback) 202 * @skb: pointer to socket buffer with CAN frame in data section 203 * @loop: loopback for listeners on local CAN sockets (recommended default!) 204 * 205 * Due to the loopback this routine must not be called from hardirq context. 206 * 207 * Return: 208 * 0 on success 209 * -ENETDOWN when the selected interface is down 210 * -ENOBUFS on full driver queue (see net_xmit_errno()) 211 * -ENOMEM when local loopback failed at calling skb_clone() 212 * -EPERM when trying to send on a non-CAN interface 213 * -EMSGSIZE CAN frame size is bigger than CAN interface MTU 214 * -EINVAL when the skb->data does not contain a valid CAN frame 215 */ 216 int can_send(struct sk_buff *skb, int loop) 217 { 218 struct sk_buff *newskb = NULL; 219 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 220 struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats; 221 int err = -EINVAL; 222 223 if (skb->len == CAN_MTU) { 224 skb->protocol = htons(ETH_P_CAN); 225 if (unlikely(cfd->len > CAN_MAX_DLEN)) 226 goto inval_skb; 227 } else if (skb->len == CANFD_MTU) { 228 skb->protocol = htons(ETH_P_CANFD); 229 if (unlikely(cfd->len > CANFD_MAX_DLEN)) 230 goto inval_skb; 231 } else 232 goto inval_skb; 233 234 /* 235 * Make sure the CAN frame can pass the selected CAN netdevice. 236 * As structs can_frame and canfd_frame are similar, we can provide 237 * CAN FD frames to legacy CAN drivers as long as the length is <= 8 238 */ 239 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) { 240 err = -EMSGSIZE; 241 goto inval_skb; 242 } 243 244 if (unlikely(skb->dev->type != ARPHRD_CAN)) { 245 err = -EPERM; 246 goto inval_skb; 247 } 248 249 if (unlikely(!(skb->dev->flags & IFF_UP))) { 250 err = -ENETDOWN; 251 goto inval_skb; 252 } 253 254 skb->ip_summed = CHECKSUM_UNNECESSARY; 255 256 skb_reset_mac_header(skb); 257 skb_reset_network_header(skb); 258 skb_reset_transport_header(skb); 259 260 if (loop) { 261 /* local loopback of sent CAN frames */ 262 263 /* indication for the CAN driver: do loopback */ 264 skb->pkt_type = PACKET_LOOPBACK; 265 266 /* 267 * The reference to the originating sock may be required 268 * by the receiving socket to check whether the frame is 269 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS 270 * Therefore we have to ensure that skb->sk remains the 271 * reference to the originating sock by restoring skb->sk 272 * after each skb_clone() or skb_orphan() usage. 273 */ 274 275 if (!(skb->dev->flags & IFF_ECHO)) { 276 /* 277 * If the interface is not capable to do loopback 278 * itself, we do it here. 279 */ 280 newskb = skb_clone(skb, GFP_ATOMIC); 281 if (!newskb) { 282 kfree_skb(skb); 283 return -ENOMEM; 284 } 285 286 can_skb_set_owner(newskb, skb->sk); 287 newskb->ip_summed = CHECKSUM_UNNECESSARY; 288 newskb->pkt_type = PACKET_BROADCAST; 289 } 290 } else { 291 /* indication for the CAN driver: no loopback required */ 292 skb->pkt_type = PACKET_HOST; 293 } 294 295 /* send to netdevice */ 296 err = dev_queue_xmit(skb); 297 if (err > 0) 298 err = net_xmit_errno(err); 299 300 if (err) { 301 kfree_skb(newskb); 302 return err; 303 } 304 305 if (newskb) 306 netif_rx_ni(newskb); 307 308 /* update statistics */ 309 can_stats->tx_frames++; 310 can_stats->tx_frames_delta++; 311 312 return 0; 313 314 inval_skb: 315 kfree_skb(skb); 316 return err; 317 } 318 EXPORT_SYMBOL(can_send); 319 320 /* 321 * af_can rx path 322 */ 323 324 static struct dev_rcv_lists *find_dev_rcv_lists(struct net *net, 325 struct net_device *dev) 326 { 327 if (!dev) 328 return net->can.can_rx_alldev_list; 329 else 330 return (struct dev_rcv_lists *)dev->ml_priv; 331 } 332 333 /** 334 * effhash - hash function for 29 bit CAN identifier reduction 335 * @can_id: 29 bit CAN identifier 336 * 337 * Description: 338 * To reduce the linear traversal in one linked list of _single_ EFF CAN 339 * frame subscriptions the 29 bit identifier is mapped to 10 bits. 340 * (see CAN_EFF_RCV_HASH_BITS definition) 341 * 342 * Return: 343 * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask ) 344 */ 345 static unsigned int effhash(canid_t can_id) 346 { 347 unsigned int hash; 348 349 hash = can_id; 350 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS; 351 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS); 352 353 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1); 354 } 355 356 /** 357 * find_rcv_list - determine optimal filterlist inside device filter struct 358 * @can_id: pointer to CAN identifier of a given can_filter 359 * @mask: pointer to CAN mask of a given can_filter 360 * @d: pointer to the device filter struct 361 * 362 * Description: 363 * Returns the optimal filterlist to reduce the filter handling in the 364 * receive path. This function is called by service functions that need 365 * to register or unregister a can_filter in the filter lists. 366 * 367 * A filter matches in general, when 368 * 369 * <received_can_id> & mask == can_id & mask 370 * 371 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe 372 * relevant bits for the filter. 373 * 374 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 375 * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg 376 * frames there is a special filterlist and a special rx path filter handling. 377 * 378 * Return: 379 * Pointer to optimal filterlist for the given can_id/mask pair. 380 * Constistency checked mask. 381 * Reduced can_id to have a preprocessed filter compare value. 382 */ 383 static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 384 struct dev_rcv_lists *d) 385 { 386 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 387 388 /* filter for error message frames in extra filterlist */ 389 if (*mask & CAN_ERR_FLAG) { 390 /* clear CAN_ERR_FLAG in filter entry */ 391 *mask &= CAN_ERR_MASK; 392 return &d->rx[RX_ERR]; 393 } 394 395 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ 396 397 #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) 398 399 /* ensure valid values in can_mask for 'SFF only' frame filtering */ 400 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) 401 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); 402 403 /* reduce condition testing at receive time */ 404 *can_id &= *mask; 405 406 /* inverse can_id/can_mask filter */ 407 if (inv) 408 return &d->rx[RX_INV]; 409 410 /* mask == 0 => no condition testing at receive time */ 411 if (!(*mask)) 412 return &d->rx[RX_ALL]; 413 414 /* extra filterlists for the subscription of a single non-RTR can_id */ 415 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && 416 !(*can_id & CAN_RTR_FLAG)) { 417 418 if (*can_id & CAN_EFF_FLAG) { 419 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) 420 return &d->rx_eff[effhash(*can_id)]; 421 } else { 422 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 423 return &d->rx_sff[*can_id]; 424 } 425 } 426 427 /* default: filter via can_id/can_mask */ 428 return &d->rx[RX_FIL]; 429 } 430 431 /** 432 * can_rx_register - subscribe CAN frames from a specific interface 433 * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list) 434 * @can_id: CAN identifier (see description) 435 * @mask: CAN mask (see description) 436 * @func: callback function on filter match 437 * @data: returned parameter for callback function 438 * @ident: string for calling module identification 439 * @sk: socket pointer (might be NULL) 440 * 441 * Description: 442 * Invokes the callback function with the received sk_buff and the given 443 * parameter 'data' on a matching receive filter. A filter matches, when 444 * 445 * <received_can_id> & mask == can_id & mask 446 * 447 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 448 * filter for error message frames (CAN_ERR_FLAG bit set in mask). 449 * 450 * The provided pointer to the sk_buff is guaranteed to be valid as long as 451 * the callback function is running. The callback function must *not* free 452 * the given sk_buff while processing it's task. When the given sk_buff is 453 * needed after the end of the callback function it must be cloned inside 454 * the callback function with skb_clone(). 455 * 456 * Return: 457 * 0 on success 458 * -ENOMEM on missing cache mem to create subscription entry 459 * -ENODEV unknown device 460 */ 461 int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, 462 canid_t mask, void (*func)(struct sk_buff *, void *), 463 void *data, char *ident, struct sock *sk) 464 { 465 struct receiver *r; 466 struct hlist_head *rl; 467 struct dev_rcv_lists *d; 468 struct s_pstats *can_pstats = net->can.can_pstats; 469 int err = 0; 470 471 /* insert new receiver (dev,canid,mask) -> (func,data) */ 472 473 if (dev && dev->type != ARPHRD_CAN) 474 return -ENODEV; 475 476 if (dev && !net_eq(net, dev_net(dev))) 477 return -ENODEV; 478 479 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 480 if (!r) 481 return -ENOMEM; 482 483 spin_lock(&net->can.can_rcvlists_lock); 484 485 d = find_dev_rcv_lists(net, dev); 486 if (d) { 487 rl = find_rcv_list(&can_id, &mask, d); 488 489 r->can_id = can_id; 490 r->mask = mask; 491 r->matches = 0; 492 r->func = func; 493 r->data = data; 494 r->ident = ident; 495 r->sk = sk; 496 497 hlist_add_head_rcu(&r->list, rl); 498 d->entries++; 499 500 can_pstats->rcv_entries++; 501 if (can_pstats->rcv_entries_max < can_pstats->rcv_entries) 502 can_pstats->rcv_entries_max = can_pstats->rcv_entries; 503 } else { 504 kmem_cache_free(rcv_cache, r); 505 err = -ENODEV; 506 } 507 508 spin_unlock(&net->can.can_rcvlists_lock); 509 510 return err; 511 } 512 EXPORT_SYMBOL(can_rx_register); 513 514 /* 515 * can_rx_delete_receiver - rcu callback for single receiver entry removal 516 */ 517 static void can_rx_delete_receiver(struct rcu_head *rp) 518 { 519 struct receiver *r = container_of(rp, struct receiver, rcu); 520 struct sock *sk = r->sk; 521 522 kmem_cache_free(rcv_cache, r); 523 if (sk) 524 sock_put(sk); 525 } 526 527 /** 528 * can_rx_unregister - unsubscribe CAN frames from a specific interface 529 * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list) 530 * @can_id: CAN identifier 531 * @mask: CAN mask 532 * @func: callback function on filter match 533 * @data: returned parameter for callback function 534 * 535 * Description: 536 * Removes subscription entry depending on given (subscription) values. 537 */ 538 void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, 539 canid_t mask, void (*func)(struct sk_buff *, void *), 540 void *data) 541 { 542 struct receiver *r = NULL; 543 struct hlist_head *rl; 544 struct s_pstats *can_pstats = net->can.can_pstats; 545 struct dev_rcv_lists *d; 546 547 if (dev && dev->type != ARPHRD_CAN) 548 return; 549 550 if (dev && !net_eq(net, dev_net(dev))) 551 return; 552 553 spin_lock(&net->can.can_rcvlists_lock); 554 555 d = find_dev_rcv_lists(net, dev); 556 if (!d) { 557 pr_err("BUG: receive list not found for " 558 "dev %s, id %03X, mask %03X\n", 559 DNAME(dev), can_id, mask); 560 goto out; 561 } 562 563 rl = find_rcv_list(&can_id, &mask, d); 564 565 /* 566 * Search the receiver list for the item to delete. This should 567 * exist, since no receiver may be unregistered that hasn't 568 * been registered before. 569 */ 570 571 hlist_for_each_entry_rcu(r, rl, list) { 572 if (r->can_id == can_id && r->mask == mask && 573 r->func == func && r->data == data) 574 break; 575 } 576 577 /* 578 * Check for bugs in CAN protocol implementations using af_can.c: 579 * 'r' will be NULL if no matching list item was found for removal. 580 */ 581 582 if (!r) { 583 WARN(1, "BUG: receive list entry not found for dev %s, " 584 "id %03X, mask %03X\n", DNAME(dev), can_id, mask); 585 goto out; 586 } 587 588 hlist_del_rcu(&r->list); 589 d->entries--; 590 591 if (can_pstats->rcv_entries > 0) 592 can_pstats->rcv_entries--; 593 594 /* remove device structure requested by NETDEV_UNREGISTER */ 595 if (d->remove_on_zero_entries && !d->entries) { 596 kfree(d); 597 dev->ml_priv = NULL; 598 } 599 600 out: 601 spin_unlock(&net->can.can_rcvlists_lock); 602 603 /* schedule the receiver item for deletion */ 604 if (r) { 605 if (r->sk) 606 sock_hold(r->sk); 607 call_rcu(&r->rcu, can_rx_delete_receiver); 608 } 609 } 610 EXPORT_SYMBOL(can_rx_unregister); 611 612 static inline void deliver(struct sk_buff *skb, struct receiver *r) 613 { 614 r->func(skb, r->data); 615 r->matches++; 616 } 617 618 static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) 619 { 620 struct receiver *r; 621 int matches = 0; 622 struct can_frame *cf = (struct can_frame *)skb->data; 623 canid_t can_id = cf->can_id; 624 625 if (d->entries == 0) 626 return 0; 627 628 if (can_id & CAN_ERR_FLAG) { 629 /* check for error message frame entries only */ 630 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) { 631 if (can_id & r->mask) { 632 deliver(skb, r); 633 matches++; 634 } 635 } 636 return matches; 637 } 638 639 /* check for unfiltered entries */ 640 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) { 641 deliver(skb, r); 642 matches++; 643 } 644 645 /* check for can_id/mask entries */ 646 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) { 647 if ((can_id & r->mask) == r->can_id) { 648 deliver(skb, r); 649 matches++; 650 } 651 } 652 653 /* check for inverted can_id/mask entries */ 654 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) { 655 if ((can_id & r->mask) != r->can_id) { 656 deliver(skb, r); 657 matches++; 658 } 659 } 660 661 /* check filterlists for single non-RTR can_ids */ 662 if (can_id & CAN_RTR_FLAG) 663 return matches; 664 665 if (can_id & CAN_EFF_FLAG) { 666 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) { 667 if (r->can_id == can_id) { 668 deliver(skb, r); 669 matches++; 670 } 671 } 672 } else { 673 can_id &= CAN_SFF_MASK; 674 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) { 675 deliver(skb, r); 676 matches++; 677 } 678 } 679 680 return matches; 681 } 682 683 static void can_receive(struct sk_buff *skb, struct net_device *dev) 684 { 685 struct dev_rcv_lists *d; 686 struct net *net = dev_net(dev); 687 struct s_stats *can_stats = net->can.can_stats; 688 int matches; 689 690 /* update statistics */ 691 can_stats->rx_frames++; 692 can_stats->rx_frames_delta++; 693 694 /* create non-zero unique skb identifier together with *skb */ 695 while (!(can_skb_prv(skb)->skbcnt)) 696 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter); 697 698 rcu_read_lock(); 699 700 /* deliver the packet to sockets listening on all devices */ 701 matches = can_rcv_filter(net->can.can_rx_alldev_list, skb); 702 703 /* find receive list for this device */ 704 d = find_dev_rcv_lists(net, dev); 705 if (d) 706 matches += can_rcv_filter(d, skb); 707 708 rcu_read_unlock(); 709 710 /* consume the skbuff allocated by the netdevice driver */ 711 consume_skb(skb); 712 713 if (matches > 0) { 714 can_stats->matches++; 715 can_stats->matches_delta++; 716 } 717 } 718 719 static int can_rcv(struct sk_buff *skb, struct net_device *dev, 720 struct packet_type *pt, struct net_device *orig_dev) 721 { 722 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 723 724 if (WARN_ONCE(dev->type != ARPHRD_CAN || 725 skb->len != CAN_MTU || 726 cfd->len > CAN_MAX_DLEN, 727 "PF_CAN: dropped non conform CAN skbuf: " 728 "dev type %d, len %d, datalen %d\n", 729 dev->type, skb->len, cfd->len)) 730 goto drop; 731 732 can_receive(skb, dev); 733 return NET_RX_SUCCESS; 734 735 drop: 736 kfree_skb(skb); 737 return NET_RX_DROP; 738 } 739 740 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, 741 struct packet_type *pt, struct net_device *orig_dev) 742 { 743 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 744 745 if (WARN_ONCE(dev->type != ARPHRD_CAN || 746 skb->len != CANFD_MTU || 747 cfd->len > CANFD_MAX_DLEN, 748 "PF_CAN: dropped non conform CAN FD skbuf: " 749 "dev type %d, len %d, datalen %d\n", 750 dev->type, skb->len, cfd->len)) 751 goto drop; 752 753 can_receive(skb, dev); 754 return NET_RX_SUCCESS; 755 756 drop: 757 kfree_skb(skb); 758 return NET_RX_DROP; 759 } 760 761 /* 762 * af_can protocol functions 763 */ 764 765 /** 766 * can_proto_register - register CAN transport protocol 767 * @cp: pointer to CAN protocol structure 768 * 769 * Return: 770 * 0 on success 771 * -EINVAL invalid (out of range) protocol number 772 * -EBUSY protocol already in use 773 * -ENOBUF if proto_register() fails 774 */ 775 int can_proto_register(const struct can_proto *cp) 776 { 777 int proto = cp->protocol; 778 int err = 0; 779 780 if (proto < 0 || proto >= CAN_NPROTO) { 781 pr_err("can: protocol number %d out of range\n", proto); 782 return -EINVAL; 783 } 784 785 err = proto_register(cp->prot, 0); 786 if (err < 0) 787 return err; 788 789 mutex_lock(&proto_tab_lock); 790 791 if (rcu_access_pointer(proto_tab[proto])) { 792 pr_err("can: protocol %d already registered\n", proto); 793 err = -EBUSY; 794 } else 795 RCU_INIT_POINTER(proto_tab[proto], cp); 796 797 mutex_unlock(&proto_tab_lock); 798 799 if (err < 0) 800 proto_unregister(cp->prot); 801 802 return err; 803 } 804 EXPORT_SYMBOL(can_proto_register); 805 806 /** 807 * can_proto_unregister - unregister CAN transport protocol 808 * @cp: pointer to CAN protocol structure 809 */ 810 void can_proto_unregister(const struct can_proto *cp) 811 { 812 int proto = cp->protocol; 813 814 mutex_lock(&proto_tab_lock); 815 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp); 816 RCU_INIT_POINTER(proto_tab[proto], NULL); 817 mutex_unlock(&proto_tab_lock); 818 819 synchronize_rcu(); 820 821 proto_unregister(cp->prot); 822 } 823 EXPORT_SYMBOL(can_proto_unregister); 824 825 /* 826 * af_can notifier to create/remove CAN netdevice specific structs 827 */ 828 static int can_notifier(struct notifier_block *nb, unsigned long msg, 829 void *ptr) 830 { 831 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 832 struct dev_rcv_lists *d; 833 834 if (dev->type != ARPHRD_CAN) 835 return NOTIFY_DONE; 836 837 switch (msg) { 838 839 case NETDEV_REGISTER: 840 841 /* create new dev_rcv_lists for this device */ 842 d = kzalloc(sizeof(*d), GFP_KERNEL); 843 if (!d) 844 return NOTIFY_DONE; 845 BUG_ON(dev->ml_priv); 846 dev->ml_priv = d; 847 848 break; 849 850 case NETDEV_UNREGISTER: 851 spin_lock(&dev_net(dev)->can.can_rcvlists_lock); 852 853 d = dev->ml_priv; 854 if (d) { 855 if (d->entries) 856 d->remove_on_zero_entries = 1; 857 else { 858 kfree(d); 859 dev->ml_priv = NULL; 860 } 861 } else 862 pr_err("can: notifier: receive list not found for dev " 863 "%s\n", dev->name); 864 865 spin_unlock(&dev_net(dev)->can.can_rcvlists_lock); 866 867 break; 868 } 869 870 return NOTIFY_DONE; 871 } 872 873 static int can_pernet_init(struct net *net) 874 { 875 spin_lock_init(&net->can.can_rcvlists_lock); 876 net->can.can_rx_alldev_list = 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 878 if (!net->can.can_rx_alldev_list) 879 goto out; 880 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); 881 if (!net->can.can_stats) 882 goto out_free_alldev_list; 883 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); 884 if (!net->can.can_pstats) 885 goto out_free_can_stats; 886 887 if (IS_ENABLED(CONFIG_PROC_FS)) { 888 /* the statistics are updated every second (timer triggered) */ 889 if (stats_timer) { 890 timer_setup(&net->can.can_stattimer, can_stat_update, 891 0); 892 mod_timer(&net->can.can_stattimer, 893 round_jiffies(jiffies + HZ)); 894 } 895 net->can.can_stats->jiffies_init = jiffies; 896 can_init_proc(net); 897 } 898 899 return 0; 900 901 out_free_can_stats: 902 kfree(net->can.can_stats); 903 out_free_alldev_list: 904 kfree(net->can.can_rx_alldev_list); 905 out: 906 return -ENOMEM; 907 } 908 909 static void can_pernet_exit(struct net *net) 910 { 911 struct net_device *dev; 912 913 if (IS_ENABLED(CONFIG_PROC_FS)) { 914 can_remove_proc(net); 915 if (stats_timer) 916 del_timer_sync(&net->can.can_stattimer); 917 } 918 919 /* remove created dev_rcv_lists from still registered CAN devices */ 920 rcu_read_lock(); 921 for_each_netdev_rcu(net, dev) { 922 if (dev->type == ARPHRD_CAN && dev->ml_priv) { 923 struct dev_rcv_lists *d = dev->ml_priv; 924 925 BUG_ON(d->entries); 926 kfree(d); 927 dev->ml_priv = NULL; 928 } 929 } 930 rcu_read_unlock(); 931 932 kfree(net->can.can_rx_alldev_list); 933 kfree(net->can.can_stats); 934 kfree(net->can.can_pstats); 935 } 936 937 /* 938 * af_can module init/exit functions 939 */ 940 941 static struct packet_type can_packet __read_mostly = { 942 .type = cpu_to_be16(ETH_P_CAN), 943 .func = can_rcv, 944 }; 945 946 static struct packet_type canfd_packet __read_mostly = { 947 .type = cpu_to_be16(ETH_P_CANFD), 948 .func = canfd_rcv, 949 }; 950 951 static const struct net_proto_family can_family_ops = { 952 .family = PF_CAN, 953 .create = can_create, 954 .owner = THIS_MODULE, 955 }; 956 957 /* notifier block for netdevice event */ 958 static struct notifier_block can_netdev_notifier __read_mostly = { 959 .notifier_call = can_notifier, 960 }; 961 962 static struct pernet_operations can_pernet_ops __read_mostly = { 963 .init = can_pernet_init, 964 .exit = can_pernet_exit, 965 }; 966 967 static __init int can_init(void) 968 { 969 /* check for correct padding to be able to use the structs similarly */ 970 BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) != 971 offsetof(struct canfd_frame, len) || 972 offsetof(struct can_frame, data) != 973 offsetof(struct canfd_frame, data)); 974 975 pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n"); 976 977 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 978 0, 0, NULL); 979 if (!rcv_cache) 980 return -ENOMEM; 981 982 register_pernet_subsys(&can_pernet_ops); 983 984 /* protocol register */ 985 sock_register(&can_family_ops); 986 register_netdevice_notifier(&can_netdev_notifier); 987 dev_add_pack(&can_packet); 988 dev_add_pack(&canfd_packet); 989 990 return 0; 991 } 992 993 static __exit void can_exit(void) 994 { 995 /* protocol unregister */ 996 dev_remove_pack(&canfd_packet); 997 dev_remove_pack(&can_packet); 998 unregister_netdevice_notifier(&can_netdev_notifier); 999 sock_unregister(PF_CAN); 1000 1001 unregister_pernet_subsys(&can_pernet_ops); 1002 1003 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1004 1005 kmem_cache_destroy(rcv_cache); 1006 } 1007 1008 module_init(can_init); 1009 module_exit(can_exit); 1010