1 /* 2 * af_can.c - Protocol family CAN core module 3 * (used by different CAN protocol modules) 4 * 5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of Volkswagen nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * Alternatively, provided that this notice is retained in full, this 21 * software may be distributed under the terms of the GNU General 22 * Public License ("GPL") version 2, in which case the provisions of the 23 * GPL apply INSTEAD OF those given above. 24 * 25 * The provided data structures and external interfaces from this code 26 * are not restricted to be used by modules with a GPL compatible license. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 39 * DAMAGE. 40 * 41 */ 42 43 #include <linux/module.h> 44 #include <linux/stddef.h> 45 #include <linux/init.h> 46 #include <linux/kmod.h> 47 #include <linux/slab.h> 48 #include <linux/list.h> 49 #include <linux/spinlock.h> 50 #include <linux/rcupdate.h> 51 #include <linux/uaccess.h> 52 #include <linux/net.h> 53 #include <linux/netdevice.h> 54 #include <linux/socket.h> 55 #include <linux/if_ether.h> 56 #include <linux/if_arp.h> 57 #include <linux/skbuff.h> 58 #include <linux/can.h> 59 #include <linux/can/core.h> 60 #include <linux/can/skb.h> 61 #include <linux/ratelimit.h> 62 #include <net/net_namespace.h> 63 #include <net/sock.h> 64 65 #include "af_can.h" 66 67 MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); 68 MODULE_LICENSE("Dual BSD/GPL"); 69 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, " 70 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 71 72 MODULE_ALIAS_NETPROTO(PF_CAN); 73 74 static int stats_timer __read_mostly = 1; 75 module_param(stats_timer, int, 0444); 76 MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 77 78 static struct kmem_cache *rcv_cache __read_mostly; 79 80 /* table of registered CAN protocols */ 81 static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly; 82 static DEFINE_MUTEX(proto_tab_lock); 83 84 static atomic_t skbcounter = ATOMIC_INIT(0); 85 86 /* 87 * af_can socket functions 88 */ 89 90 int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 91 { 92 switch (cmd) { 93 default: 94 return -ENOIOCTLCMD; 95 } 96 } 97 EXPORT_SYMBOL(can_ioctl); 98 99 static void can_sock_destruct(struct sock *sk) 100 { 101 skb_queue_purge(&sk->sk_receive_queue); 102 skb_queue_purge(&sk->sk_error_queue); 103 } 104 105 static const struct can_proto *can_get_proto(int protocol) 106 { 107 const struct can_proto *cp; 108 109 rcu_read_lock(); 110 cp = rcu_dereference(proto_tab[protocol]); 111 if (cp && !try_module_get(cp->prot->owner)) 112 cp = NULL; 113 rcu_read_unlock(); 114 115 return cp; 116 } 117 118 static inline void can_put_proto(const struct can_proto *cp) 119 { 120 module_put(cp->prot->owner); 121 } 122 123 static int can_create(struct net *net, struct socket *sock, int protocol, 124 int kern) 125 { 126 struct sock *sk; 127 const struct can_proto *cp; 128 int err = 0; 129 130 sock->state = SS_UNCONNECTED; 131 132 if (protocol < 0 || protocol >= CAN_NPROTO) 133 return -EINVAL; 134 135 cp = can_get_proto(protocol); 136 137 #ifdef CONFIG_MODULES 138 if (!cp) { 139 /* try to load protocol module if kernel is modular */ 140 141 err = request_module("can-proto-%d", protocol); 142 143 /* 144 * In case of error we only print a message but don't 145 * return the error code immediately. Below we will 146 * return -EPROTONOSUPPORT 147 */ 148 if (err) 149 printk_ratelimited(KERN_ERR "can: request_module " 150 "(can-proto-%d) failed.\n", protocol); 151 152 cp = can_get_proto(protocol); 153 } 154 #endif 155 156 /* check for available protocol and correct usage */ 157 158 if (!cp) 159 return -EPROTONOSUPPORT; 160 161 if (cp->type != sock->type) { 162 err = -EPROTOTYPE; 163 goto errout; 164 } 165 166 sock->ops = cp->ops; 167 168 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern); 169 if (!sk) { 170 err = -ENOMEM; 171 goto errout; 172 } 173 174 sock_init_data(sock, sk); 175 sk->sk_destruct = can_sock_destruct; 176 177 if (sk->sk_prot->init) 178 err = sk->sk_prot->init(sk); 179 180 if (err) { 181 /* release sk on errors */ 182 sock_orphan(sk); 183 sock_put(sk); 184 } 185 186 errout: 187 can_put_proto(cp); 188 return err; 189 } 190 191 /* 192 * af_can tx path 193 */ 194 195 /** 196 * can_send - transmit a CAN frame (optional with local loopback) 197 * @skb: pointer to socket buffer with CAN frame in data section 198 * @loop: loopback for listeners on local CAN sockets (recommended default!) 199 * 200 * Due to the loopback this routine must not be called from hardirq context. 201 * 202 * Return: 203 * 0 on success 204 * -ENETDOWN when the selected interface is down 205 * -ENOBUFS on full driver queue (see net_xmit_errno()) 206 * -ENOMEM when local loopback failed at calling skb_clone() 207 * -EPERM when trying to send on a non-CAN interface 208 * -EMSGSIZE CAN frame size is bigger than CAN interface MTU 209 * -EINVAL when the skb->data does not contain a valid CAN frame 210 */ 211 int can_send(struct sk_buff *skb, int loop) 212 { 213 struct sk_buff *newskb = NULL; 214 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 215 struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats; 216 int err = -EINVAL; 217 218 if (skb->len == CAN_MTU) { 219 skb->protocol = htons(ETH_P_CAN); 220 if (unlikely(cfd->len > CAN_MAX_DLEN)) 221 goto inval_skb; 222 } else if (skb->len == CANFD_MTU) { 223 skb->protocol = htons(ETH_P_CANFD); 224 if (unlikely(cfd->len > CANFD_MAX_DLEN)) 225 goto inval_skb; 226 } else 227 goto inval_skb; 228 229 /* 230 * Make sure the CAN frame can pass the selected CAN netdevice. 231 * As structs can_frame and canfd_frame are similar, we can provide 232 * CAN FD frames to legacy CAN drivers as long as the length is <= 8 233 */ 234 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) { 235 err = -EMSGSIZE; 236 goto inval_skb; 237 } 238 239 if (unlikely(skb->dev->type != ARPHRD_CAN)) { 240 err = -EPERM; 241 goto inval_skb; 242 } 243 244 if (unlikely(!(skb->dev->flags & IFF_UP))) { 245 err = -ENETDOWN; 246 goto inval_skb; 247 } 248 249 skb->ip_summed = CHECKSUM_UNNECESSARY; 250 251 skb_reset_mac_header(skb); 252 skb_reset_network_header(skb); 253 skb_reset_transport_header(skb); 254 255 if (loop) { 256 /* local loopback of sent CAN frames */ 257 258 /* indication for the CAN driver: do loopback */ 259 skb->pkt_type = PACKET_LOOPBACK; 260 261 /* 262 * The reference to the originating sock may be required 263 * by the receiving socket to check whether the frame is 264 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS 265 * Therefore we have to ensure that skb->sk remains the 266 * reference to the originating sock by restoring skb->sk 267 * after each skb_clone() or skb_orphan() usage. 268 */ 269 270 if (!(skb->dev->flags & IFF_ECHO)) { 271 /* 272 * If the interface is not capable to do loopback 273 * itself, we do it here. 274 */ 275 newskb = skb_clone(skb, GFP_ATOMIC); 276 if (!newskb) { 277 kfree_skb(skb); 278 return -ENOMEM; 279 } 280 281 can_skb_set_owner(newskb, skb->sk); 282 newskb->ip_summed = CHECKSUM_UNNECESSARY; 283 newskb->pkt_type = PACKET_BROADCAST; 284 } 285 } else { 286 /* indication for the CAN driver: no loopback required */ 287 skb->pkt_type = PACKET_HOST; 288 } 289 290 /* send to netdevice */ 291 err = dev_queue_xmit(skb); 292 if (err > 0) 293 err = net_xmit_errno(err); 294 295 if (err) { 296 kfree_skb(newskb); 297 return err; 298 } 299 300 if (newskb) 301 netif_rx_ni(newskb); 302 303 /* update statistics */ 304 can_stats->tx_frames++; 305 can_stats->tx_frames_delta++; 306 307 return 0; 308 309 inval_skb: 310 kfree_skb(skb); 311 return err; 312 } 313 EXPORT_SYMBOL(can_send); 314 315 /* 316 * af_can rx path 317 */ 318 319 static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net, 320 struct net_device *dev) 321 { 322 if (!dev) 323 return net->can.can_rx_alldev_list; 324 else 325 return (struct can_dev_rcv_lists *)dev->ml_priv; 326 } 327 328 /** 329 * effhash - hash function for 29 bit CAN identifier reduction 330 * @can_id: 29 bit CAN identifier 331 * 332 * Description: 333 * To reduce the linear traversal in one linked list of _single_ EFF CAN 334 * frame subscriptions the 29 bit identifier is mapped to 10 bits. 335 * (see CAN_EFF_RCV_HASH_BITS definition) 336 * 337 * Return: 338 * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask ) 339 */ 340 static unsigned int effhash(canid_t can_id) 341 { 342 unsigned int hash; 343 344 hash = can_id; 345 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS; 346 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS); 347 348 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1); 349 } 350 351 /** 352 * find_rcv_list - determine optimal filterlist inside device filter struct 353 * @can_id: pointer to CAN identifier of a given can_filter 354 * @mask: pointer to CAN mask of a given can_filter 355 * @d: pointer to the device filter struct 356 * 357 * Description: 358 * Returns the optimal filterlist to reduce the filter handling in the 359 * receive path. This function is called by service functions that need 360 * to register or unregister a can_filter in the filter lists. 361 * 362 * A filter matches in general, when 363 * 364 * <received_can_id> & mask == can_id & mask 365 * 366 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe 367 * relevant bits for the filter. 368 * 369 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 370 * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg 371 * frames there is a special filterlist and a special rx path filter handling. 372 * 373 * Return: 374 * Pointer to optimal filterlist for the given can_id/mask pair. 375 * Constistency checked mask. 376 * Reduced can_id to have a preprocessed filter compare value. 377 */ 378 static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 379 struct can_dev_rcv_lists *d) 380 { 381 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 382 383 /* filter for error message frames in extra filterlist */ 384 if (*mask & CAN_ERR_FLAG) { 385 /* clear CAN_ERR_FLAG in filter entry */ 386 *mask &= CAN_ERR_MASK; 387 return &d->rx[RX_ERR]; 388 } 389 390 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ 391 392 #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) 393 394 /* ensure valid values in can_mask for 'SFF only' frame filtering */ 395 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) 396 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); 397 398 /* reduce condition testing at receive time */ 399 *can_id &= *mask; 400 401 /* inverse can_id/can_mask filter */ 402 if (inv) 403 return &d->rx[RX_INV]; 404 405 /* mask == 0 => no condition testing at receive time */ 406 if (!(*mask)) 407 return &d->rx[RX_ALL]; 408 409 /* extra filterlists for the subscription of a single non-RTR can_id */ 410 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && 411 !(*can_id & CAN_RTR_FLAG)) { 412 413 if (*can_id & CAN_EFF_FLAG) { 414 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) 415 return &d->rx_eff[effhash(*can_id)]; 416 } else { 417 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 418 return &d->rx_sff[*can_id]; 419 } 420 } 421 422 /* default: filter via can_id/can_mask */ 423 return &d->rx[RX_FIL]; 424 } 425 426 /** 427 * can_rx_register - subscribe CAN frames from a specific interface 428 * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list) 429 * @can_id: CAN identifier (see description) 430 * @mask: CAN mask (see description) 431 * @func: callback function on filter match 432 * @data: returned parameter for callback function 433 * @ident: string for calling module identification 434 * @sk: socket pointer (might be NULL) 435 * 436 * Description: 437 * Invokes the callback function with the received sk_buff and the given 438 * parameter 'data' on a matching receive filter. A filter matches, when 439 * 440 * <received_can_id> & mask == can_id & mask 441 * 442 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 443 * filter for error message frames (CAN_ERR_FLAG bit set in mask). 444 * 445 * The provided pointer to the sk_buff is guaranteed to be valid as long as 446 * the callback function is running. The callback function must *not* free 447 * the given sk_buff while processing it's task. When the given sk_buff is 448 * needed after the end of the callback function it must be cloned inside 449 * the callback function with skb_clone(). 450 * 451 * Return: 452 * 0 on success 453 * -ENOMEM on missing cache mem to create subscription entry 454 * -ENODEV unknown device 455 */ 456 int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, 457 canid_t mask, void (*func)(struct sk_buff *, void *), 458 void *data, char *ident, struct sock *sk) 459 { 460 struct receiver *r; 461 struct hlist_head *rl; 462 struct can_dev_rcv_lists *d; 463 struct s_pstats *can_pstats = net->can.can_pstats; 464 int err = 0; 465 466 /* insert new receiver (dev,canid,mask) -> (func,data) */ 467 468 if (dev && dev->type != ARPHRD_CAN) 469 return -ENODEV; 470 471 if (dev && !net_eq(net, dev_net(dev))) 472 return -ENODEV; 473 474 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 475 if (!r) 476 return -ENOMEM; 477 478 spin_lock(&net->can.can_rcvlists_lock); 479 480 d = find_dev_rcv_lists(net, dev); 481 if (d) { 482 rl = find_rcv_list(&can_id, &mask, d); 483 484 r->can_id = can_id; 485 r->mask = mask; 486 r->matches = 0; 487 r->func = func; 488 r->data = data; 489 r->ident = ident; 490 r->sk = sk; 491 492 hlist_add_head_rcu(&r->list, rl); 493 d->entries++; 494 495 can_pstats->rcv_entries++; 496 if (can_pstats->rcv_entries_max < can_pstats->rcv_entries) 497 can_pstats->rcv_entries_max = can_pstats->rcv_entries; 498 } else { 499 kmem_cache_free(rcv_cache, r); 500 err = -ENODEV; 501 } 502 503 spin_unlock(&net->can.can_rcvlists_lock); 504 505 return err; 506 } 507 EXPORT_SYMBOL(can_rx_register); 508 509 /* 510 * can_rx_delete_receiver - rcu callback for single receiver entry removal 511 */ 512 static void can_rx_delete_receiver(struct rcu_head *rp) 513 { 514 struct receiver *r = container_of(rp, struct receiver, rcu); 515 struct sock *sk = r->sk; 516 517 kmem_cache_free(rcv_cache, r); 518 if (sk) 519 sock_put(sk); 520 } 521 522 /** 523 * can_rx_unregister - unsubscribe CAN frames from a specific interface 524 * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list) 525 * @can_id: CAN identifier 526 * @mask: CAN mask 527 * @func: callback function on filter match 528 * @data: returned parameter for callback function 529 * 530 * Description: 531 * Removes subscription entry depending on given (subscription) values. 532 */ 533 void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, 534 canid_t mask, void (*func)(struct sk_buff *, void *), 535 void *data) 536 { 537 struct receiver *r = NULL; 538 struct hlist_head *rl; 539 struct s_pstats *can_pstats = net->can.can_pstats; 540 struct can_dev_rcv_lists *d; 541 542 if (dev && dev->type != ARPHRD_CAN) 543 return; 544 545 if (dev && !net_eq(net, dev_net(dev))) 546 return; 547 548 spin_lock(&net->can.can_rcvlists_lock); 549 550 d = find_dev_rcv_lists(net, dev); 551 if (!d) { 552 pr_err("BUG: receive list not found for " 553 "dev %s, id %03X, mask %03X\n", 554 DNAME(dev), can_id, mask); 555 goto out; 556 } 557 558 rl = find_rcv_list(&can_id, &mask, d); 559 560 /* 561 * Search the receiver list for the item to delete. This should 562 * exist, since no receiver may be unregistered that hasn't 563 * been registered before. 564 */ 565 566 hlist_for_each_entry_rcu(r, rl, list) { 567 if (r->can_id == can_id && r->mask == mask && 568 r->func == func && r->data == data) 569 break; 570 } 571 572 /* 573 * Check for bugs in CAN protocol implementations using af_can.c: 574 * 'r' will be NULL if no matching list item was found for removal. 575 */ 576 577 if (!r) { 578 WARN(1, "BUG: receive list entry not found for dev %s, " 579 "id %03X, mask %03X\n", DNAME(dev), can_id, mask); 580 goto out; 581 } 582 583 hlist_del_rcu(&r->list); 584 d->entries--; 585 586 if (can_pstats->rcv_entries > 0) 587 can_pstats->rcv_entries--; 588 589 /* remove device structure requested by NETDEV_UNREGISTER */ 590 if (d->remove_on_zero_entries && !d->entries) { 591 kfree(d); 592 dev->ml_priv = NULL; 593 } 594 595 out: 596 spin_unlock(&net->can.can_rcvlists_lock); 597 598 /* schedule the receiver item for deletion */ 599 if (r) { 600 if (r->sk) 601 sock_hold(r->sk); 602 call_rcu(&r->rcu, can_rx_delete_receiver); 603 } 604 } 605 EXPORT_SYMBOL(can_rx_unregister); 606 607 static inline void deliver(struct sk_buff *skb, struct receiver *r) 608 { 609 r->func(skb, r->data); 610 r->matches++; 611 } 612 613 static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb) 614 { 615 struct receiver *r; 616 int matches = 0; 617 struct can_frame *cf = (struct can_frame *)skb->data; 618 canid_t can_id = cf->can_id; 619 620 if (d->entries == 0) 621 return 0; 622 623 if (can_id & CAN_ERR_FLAG) { 624 /* check for error message frame entries only */ 625 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) { 626 if (can_id & r->mask) { 627 deliver(skb, r); 628 matches++; 629 } 630 } 631 return matches; 632 } 633 634 /* check for unfiltered entries */ 635 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) { 636 deliver(skb, r); 637 matches++; 638 } 639 640 /* check for can_id/mask entries */ 641 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) { 642 if ((can_id & r->mask) == r->can_id) { 643 deliver(skb, r); 644 matches++; 645 } 646 } 647 648 /* check for inverted can_id/mask entries */ 649 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) { 650 if ((can_id & r->mask) != r->can_id) { 651 deliver(skb, r); 652 matches++; 653 } 654 } 655 656 /* check filterlists for single non-RTR can_ids */ 657 if (can_id & CAN_RTR_FLAG) 658 return matches; 659 660 if (can_id & CAN_EFF_FLAG) { 661 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) { 662 if (r->can_id == can_id) { 663 deliver(skb, r); 664 matches++; 665 } 666 } 667 } else { 668 can_id &= CAN_SFF_MASK; 669 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) { 670 deliver(skb, r); 671 matches++; 672 } 673 } 674 675 return matches; 676 } 677 678 static void can_receive(struct sk_buff *skb, struct net_device *dev) 679 { 680 struct can_dev_rcv_lists *d; 681 struct net *net = dev_net(dev); 682 struct s_stats *can_stats = net->can.can_stats; 683 int matches; 684 685 /* update statistics */ 686 can_stats->rx_frames++; 687 can_stats->rx_frames_delta++; 688 689 /* create non-zero unique skb identifier together with *skb */ 690 while (!(can_skb_prv(skb)->skbcnt)) 691 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter); 692 693 rcu_read_lock(); 694 695 /* deliver the packet to sockets listening on all devices */ 696 matches = can_rcv_filter(net->can.can_rx_alldev_list, skb); 697 698 /* find receive list for this device */ 699 d = find_dev_rcv_lists(net, dev); 700 if (d) 701 matches += can_rcv_filter(d, skb); 702 703 rcu_read_unlock(); 704 705 /* consume the skbuff allocated by the netdevice driver */ 706 consume_skb(skb); 707 708 if (matches > 0) { 709 can_stats->matches++; 710 can_stats->matches_delta++; 711 } 712 } 713 714 static int can_rcv(struct sk_buff *skb, struct net_device *dev, 715 struct packet_type *pt, struct net_device *orig_dev) 716 { 717 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 718 719 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU || 720 cfd->len > CAN_MAX_DLEN)) { 721 pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n", 722 dev->type, skb->len, cfd->len); 723 kfree_skb(skb); 724 return NET_RX_DROP; 725 } 726 727 can_receive(skb, dev); 728 return NET_RX_SUCCESS; 729 } 730 731 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, 732 struct packet_type *pt, struct net_device *orig_dev) 733 { 734 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 735 736 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU || 737 cfd->len > CANFD_MAX_DLEN)) { 738 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n", 739 dev->type, skb->len, cfd->len); 740 kfree_skb(skb); 741 return NET_RX_DROP; 742 } 743 744 can_receive(skb, dev); 745 return NET_RX_SUCCESS; 746 } 747 748 /* 749 * af_can protocol functions 750 */ 751 752 /** 753 * can_proto_register - register CAN transport protocol 754 * @cp: pointer to CAN protocol structure 755 * 756 * Return: 757 * 0 on success 758 * -EINVAL invalid (out of range) protocol number 759 * -EBUSY protocol already in use 760 * -ENOBUF if proto_register() fails 761 */ 762 int can_proto_register(const struct can_proto *cp) 763 { 764 int proto = cp->protocol; 765 int err = 0; 766 767 if (proto < 0 || proto >= CAN_NPROTO) { 768 pr_err("can: protocol number %d out of range\n", proto); 769 return -EINVAL; 770 } 771 772 err = proto_register(cp->prot, 0); 773 if (err < 0) 774 return err; 775 776 mutex_lock(&proto_tab_lock); 777 778 if (rcu_access_pointer(proto_tab[proto])) { 779 pr_err("can: protocol %d already registered\n", proto); 780 err = -EBUSY; 781 } else 782 RCU_INIT_POINTER(proto_tab[proto], cp); 783 784 mutex_unlock(&proto_tab_lock); 785 786 if (err < 0) 787 proto_unregister(cp->prot); 788 789 return err; 790 } 791 EXPORT_SYMBOL(can_proto_register); 792 793 /** 794 * can_proto_unregister - unregister CAN transport protocol 795 * @cp: pointer to CAN protocol structure 796 */ 797 void can_proto_unregister(const struct can_proto *cp) 798 { 799 int proto = cp->protocol; 800 801 mutex_lock(&proto_tab_lock); 802 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp); 803 RCU_INIT_POINTER(proto_tab[proto], NULL); 804 mutex_unlock(&proto_tab_lock); 805 806 synchronize_rcu(); 807 808 proto_unregister(cp->prot); 809 } 810 EXPORT_SYMBOL(can_proto_unregister); 811 812 /* 813 * af_can notifier to create/remove CAN netdevice specific structs 814 */ 815 static int can_notifier(struct notifier_block *nb, unsigned long msg, 816 void *ptr) 817 { 818 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 819 struct can_dev_rcv_lists *d; 820 821 if (dev->type != ARPHRD_CAN) 822 return NOTIFY_DONE; 823 824 switch (msg) { 825 826 case NETDEV_REGISTER: 827 828 /* create new dev_rcv_lists for this device */ 829 d = kzalloc(sizeof(*d), GFP_KERNEL); 830 if (!d) 831 return NOTIFY_DONE; 832 BUG_ON(dev->ml_priv); 833 dev->ml_priv = d; 834 835 break; 836 837 case NETDEV_UNREGISTER: 838 spin_lock(&dev_net(dev)->can.can_rcvlists_lock); 839 840 d = dev->ml_priv; 841 if (d) { 842 if (d->entries) 843 d->remove_on_zero_entries = 1; 844 else { 845 kfree(d); 846 dev->ml_priv = NULL; 847 } 848 } else 849 pr_err("can: notifier: receive list not found for dev " 850 "%s\n", dev->name); 851 852 spin_unlock(&dev_net(dev)->can.can_rcvlists_lock); 853 854 break; 855 } 856 857 return NOTIFY_DONE; 858 } 859 860 static int can_pernet_init(struct net *net) 861 { 862 spin_lock_init(&net->can.can_rcvlists_lock); 863 net->can.can_rx_alldev_list = 864 kzalloc(sizeof(struct can_dev_rcv_lists), GFP_KERNEL); 865 if (!net->can.can_rx_alldev_list) 866 goto out; 867 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); 868 if (!net->can.can_stats) 869 goto out_free_alldev_list; 870 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); 871 if (!net->can.can_pstats) 872 goto out_free_can_stats; 873 874 if (IS_ENABLED(CONFIG_PROC_FS)) { 875 /* the statistics are updated every second (timer triggered) */ 876 if (stats_timer) { 877 timer_setup(&net->can.can_stattimer, can_stat_update, 878 0); 879 mod_timer(&net->can.can_stattimer, 880 round_jiffies(jiffies + HZ)); 881 } 882 net->can.can_stats->jiffies_init = jiffies; 883 can_init_proc(net); 884 } 885 886 return 0; 887 888 out_free_can_stats: 889 kfree(net->can.can_stats); 890 out_free_alldev_list: 891 kfree(net->can.can_rx_alldev_list); 892 out: 893 return -ENOMEM; 894 } 895 896 static void can_pernet_exit(struct net *net) 897 { 898 struct net_device *dev; 899 900 if (IS_ENABLED(CONFIG_PROC_FS)) { 901 can_remove_proc(net); 902 if (stats_timer) 903 del_timer_sync(&net->can.can_stattimer); 904 } 905 906 /* remove created dev_rcv_lists from still registered CAN devices */ 907 rcu_read_lock(); 908 for_each_netdev_rcu(net, dev) { 909 if (dev->type == ARPHRD_CAN && dev->ml_priv) { 910 struct can_dev_rcv_lists *d = dev->ml_priv; 911 912 BUG_ON(d->entries); 913 kfree(d); 914 dev->ml_priv = NULL; 915 } 916 } 917 rcu_read_unlock(); 918 919 kfree(net->can.can_rx_alldev_list); 920 kfree(net->can.can_stats); 921 kfree(net->can.can_pstats); 922 } 923 924 /* 925 * af_can module init/exit functions 926 */ 927 928 static struct packet_type can_packet __read_mostly = { 929 .type = cpu_to_be16(ETH_P_CAN), 930 .func = can_rcv, 931 }; 932 933 static struct packet_type canfd_packet __read_mostly = { 934 .type = cpu_to_be16(ETH_P_CANFD), 935 .func = canfd_rcv, 936 }; 937 938 static const struct net_proto_family can_family_ops = { 939 .family = PF_CAN, 940 .create = can_create, 941 .owner = THIS_MODULE, 942 }; 943 944 /* notifier block for netdevice event */ 945 static struct notifier_block can_netdev_notifier __read_mostly = { 946 .notifier_call = can_notifier, 947 }; 948 949 static struct pernet_operations can_pernet_ops __read_mostly = { 950 .init = can_pernet_init, 951 .exit = can_pernet_exit, 952 }; 953 954 static __init int can_init(void) 955 { 956 int err; 957 958 /* check for correct padding to be able to use the structs similarly */ 959 BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) != 960 offsetof(struct canfd_frame, len) || 961 offsetof(struct can_frame, data) != 962 offsetof(struct canfd_frame, data)); 963 964 pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n"); 965 966 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 967 0, 0, NULL); 968 if (!rcv_cache) 969 return -ENOMEM; 970 971 err = register_pernet_subsys(&can_pernet_ops); 972 if (err) 973 goto out_pernet; 974 975 /* protocol register */ 976 err = sock_register(&can_family_ops); 977 if (err) 978 goto out_sock; 979 err = register_netdevice_notifier(&can_netdev_notifier); 980 if (err) 981 goto out_notifier; 982 983 dev_add_pack(&can_packet); 984 dev_add_pack(&canfd_packet); 985 986 return 0; 987 988 out_notifier: 989 sock_unregister(PF_CAN); 990 out_sock: 991 unregister_pernet_subsys(&can_pernet_ops); 992 out_pernet: 993 kmem_cache_destroy(rcv_cache); 994 995 return err; 996 } 997 998 static __exit void can_exit(void) 999 { 1000 /* protocol unregister */ 1001 dev_remove_pack(&canfd_packet); 1002 dev_remove_pack(&can_packet); 1003 unregister_netdevice_notifier(&can_netdev_notifier); 1004 sock_unregister(PF_CAN); 1005 1006 unregister_pernet_subsys(&can_pernet_ops); 1007 1008 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1009 1010 kmem_cache_destroy(rcv_cache); 1011 } 1012 1013 module_init(can_init); 1014 module_exit(can_exit); 1015