1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* af_can.c - Protocol family CAN core module 3 * (used by different CAN protocol modules) 4 * 5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of Volkswagen nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * Alternatively, provided that this notice is retained in full, this 21 * software may be distributed under the terms of the GNU General 22 * Public License ("GPL") version 2, in which case the provisions of the 23 * GPL apply INSTEAD OF those given above. 24 * 25 * The provided data structures and external interfaces from this code 26 * are not restricted to be used by modules with a GPL compatible license. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 39 * DAMAGE. 40 * 41 */ 42 43 #include <linux/module.h> 44 #include <linux/stddef.h> 45 #include <linux/init.h> 46 #include <linux/kmod.h> 47 #include <linux/slab.h> 48 #include <linux/list.h> 49 #include <linux/spinlock.h> 50 #include <linux/rcupdate.h> 51 #include <linux/uaccess.h> 52 #include <linux/net.h> 53 #include <linux/netdevice.h> 54 #include <linux/socket.h> 55 #include <linux/if_ether.h> 56 #include <linux/if_arp.h> 57 #include <linux/skbuff.h> 58 #include <linux/can.h> 59 #include <linux/can/core.h> 60 #include <linux/can/skb.h> 61 #include <linux/ratelimit.h> 62 #include <net/net_namespace.h> 63 #include <net/sock.h> 64 65 #include "af_can.h" 66 67 MODULE_DESCRIPTION("Controller Area Network PF_CAN core"); 68 MODULE_LICENSE("Dual BSD/GPL"); 69 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, " 70 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 71 72 MODULE_ALIAS_NETPROTO(PF_CAN); 73 74 static int stats_timer __read_mostly = 1; 75 module_param(stats_timer, int, 0444); 76 MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); 77 78 static struct kmem_cache *rcv_cache __read_mostly; 79 80 /* table of registered CAN protocols */ 81 static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly; 82 static DEFINE_MUTEX(proto_tab_lock); 83 84 static atomic_t skbcounter = ATOMIC_INIT(0); 85 86 /* af_can socket functions */ 87 88 static void can_sock_destruct(struct sock *sk) 89 { 90 skb_queue_purge(&sk->sk_receive_queue); 91 skb_queue_purge(&sk->sk_error_queue); 92 } 93 94 static const struct can_proto *can_get_proto(int protocol) 95 { 96 const struct can_proto *cp; 97 98 rcu_read_lock(); 99 cp = rcu_dereference(proto_tab[protocol]); 100 if (cp && !try_module_get(cp->prot->owner)) 101 cp = NULL; 102 rcu_read_unlock(); 103 104 return cp; 105 } 106 107 static inline void can_put_proto(const struct can_proto *cp) 108 { 109 module_put(cp->prot->owner); 110 } 111 112 static int can_create(struct net *net, struct socket *sock, int protocol, 113 int kern) 114 { 115 struct sock *sk; 116 const struct can_proto *cp; 117 int err = 0; 118 119 sock->state = SS_UNCONNECTED; 120 121 if (protocol < 0 || protocol >= CAN_NPROTO) 122 return -EINVAL; 123 124 cp = can_get_proto(protocol); 125 126 #ifdef CONFIG_MODULES 127 if (!cp) { 128 /* try to load protocol module if kernel is modular */ 129 130 err = request_module("can-proto-%d", protocol); 131 132 /* In case of error we only print a message but don't 133 * return the error code immediately. Below we will 134 * return -EPROTONOSUPPORT 135 */ 136 if (err) 137 pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n", 138 protocol); 139 140 cp = can_get_proto(protocol); 141 } 142 #endif 143 144 /* check for available protocol and correct usage */ 145 146 if (!cp) 147 return -EPROTONOSUPPORT; 148 149 if (cp->type != sock->type) { 150 err = -EPROTOTYPE; 151 goto errout; 152 } 153 154 sock->ops = cp->ops; 155 156 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern); 157 if (!sk) { 158 err = -ENOMEM; 159 goto errout; 160 } 161 162 sock_init_data(sock, sk); 163 sk->sk_destruct = can_sock_destruct; 164 165 if (sk->sk_prot->init) 166 err = sk->sk_prot->init(sk); 167 168 if (err) { 169 /* release sk on errors */ 170 sock_orphan(sk); 171 sock_put(sk); 172 } 173 174 errout: 175 can_put_proto(cp); 176 return err; 177 } 178 179 /* af_can tx path */ 180 181 /** 182 * can_send - transmit a CAN frame (optional with local loopback) 183 * @skb: pointer to socket buffer with CAN frame in data section 184 * @loop: loopback for listeners on local CAN sockets (recommended default!) 185 * 186 * Due to the loopback this routine must not be called from hardirq context. 187 * 188 * Return: 189 * 0 on success 190 * -ENETDOWN when the selected interface is down 191 * -ENOBUFS on full driver queue (see net_xmit_errno()) 192 * -ENOMEM when local loopback failed at calling skb_clone() 193 * -EPERM when trying to send on a non-CAN interface 194 * -EMSGSIZE CAN frame size is bigger than CAN interface MTU 195 * -EINVAL when the skb->data does not contain a valid CAN frame 196 */ 197 int can_send(struct sk_buff *skb, int loop) 198 { 199 struct sk_buff *newskb = NULL; 200 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 201 struct s_stats *can_stats = dev_net(skb->dev)->can.can_stats; 202 int err = -EINVAL; 203 204 if (skb->len == CAN_MTU) { 205 skb->protocol = htons(ETH_P_CAN); 206 if (unlikely(cfd->len > CAN_MAX_DLEN)) 207 goto inval_skb; 208 } else if (skb->len == CANFD_MTU) { 209 skb->protocol = htons(ETH_P_CANFD); 210 if (unlikely(cfd->len > CANFD_MAX_DLEN)) 211 goto inval_skb; 212 } else { 213 goto inval_skb; 214 } 215 216 /* Make sure the CAN frame can pass the selected CAN netdevice. 217 * As structs can_frame and canfd_frame are similar, we can provide 218 * CAN FD frames to legacy CAN drivers as long as the length is <= 8 219 */ 220 if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) { 221 err = -EMSGSIZE; 222 goto inval_skb; 223 } 224 225 if (unlikely(skb->dev->type != ARPHRD_CAN)) { 226 err = -EPERM; 227 goto inval_skb; 228 } 229 230 if (unlikely(!(skb->dev->flags & IFF_UP))) { 231 err = -ENETDOWN; 232 goto inval_skb; 233 } 234 235 skb->ip_summed = CHECKSUM_UNNECESSARY; 236 237 skb_reset_mac_header(skb); 238 skb_reset_network_header(skb); 239 skb_reset_transport_header(skb); 240 241 if (loop) { 242 /* local loopback of sent CAN frames */ 243 244 /* indication for the CAN driver: do loopback */ 245 skb->pkt_type = PACKET_LOOPBACK; 246 247 /* The reference to the originating sock may be required 248 * by the receiving socket to check whether the frame is 249 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS 250 * Therefore we have to ensure that skb->sk remains the 251 * reference to the originating sock by restoring skb->sk 252 * after each skb_clone() or skb_orphan() usage. 253 */ 254 255 if (!(skb->dev->flags & IFF_ECHO)) { 256 /* If the interface is not capable to do loopback 257 * itself, we do it here. 258 */ 259 newskb = skb_clone(skb, GFP_ATOMIC); 260 if (!newskb) { 261 kfree_skb(skb); 262 return -ENOMEM; 263 } 264 265 can_skb_set_owner(newskb, skb->sk); 266 newskb->ip_summed = CHECKSUM_UNNECESSARY; 267 newskb->pkt_type = PACKET_BROADCAST; 268 } 269 } else { 270 /* indication for the CAN driver: no loopback required */ 271 skb->pkt_type = PACKET_HOST; 272 } 273 274 /* send to netdevice */ 275 err = dev_queue_xmit(skb); 276 if (err > 0) 277 err = net_xmit_errno(err); 278 279 if (err) { 280 kfree_skb(newskb); 281 return err; 282 } 283 284 if (newskb) 285 netif_rx_ni(newskb); 286 287 /* update statistics */ 288 can_stats->tx_frames++; 289 can_stats->tx_frames_delta++; 290 291 return 0; 292 293 inval_skb: 294 kfree_skb(skb); 295 return err; 296 } 297 EXPORT_SYMBOL(can_send); 298 299 /* af_can rx path */ 300 301 static struct can_dev_rcv_lists *find_dev_rcv_lists(struct net *net, 302 struct net_device *dev) 303 { 304 if (!dev) 305 return net->can.can_rx_alldev_list; 306 else 307 return (struct can_dev_rcv_lists *)dev->ml_priv; 308 } 309 310 /** 311 * effhash - hash function for 29 bit CAN identifier reduction 312 * @can_id: 29 bit CAN identifier 313 * 314 * Description: 315 * To reduce the linear traversal in one linked list of _single_ EFF CAN 316 * frame subscriptions the 29 bit identifier is mapped to 10 bits. 317 * (see CAN_EFF_RCV_HASH_BITS definition) 318 * 319 * Return: 320 * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask ) 321 */ 322 static unsigned int effhash(canid_t can_id) 323 { 324 unsigned int hash; 325 326 hash = can_id; 327 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS; 328 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS); 329 330 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1); 331 } 332 333 /** 334 * find_rcv_list - determine optimal filterlist inside device filter struct 335 * @can_id: pointer to CAN identifier of a given can_filter 336 * @mask: pointer to CAN mask of a given can_filter 337 * @d: pointer to the device filter struct 338 * 339 * Description: 340 * Returns the optimal filterlist to reduce the filter handling in the 341 * receive path. This function is called by service functions that need 342 * to register or unregister a can_filter in the filter lists. 343 * 344 * A filter matches in general, when 345 * 346 * <received_can_id> & mask == can_id & mask 347 * 348 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe 349 * relevant bits for the filter. 350 * 351 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 352 * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg 353 * frames there is a special filterlist and a special rx path filter handling. 354 * 355 * Return: 356 * Pointer to optimal filterlist for the given can_id/mask pair. 357 * Constistency checked mask. 358 * Reduced can_id to have a preprocessed filter compare value. 359 */ 360 static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, 361 struct can_dev_rcv_lists *d) 362 { 363 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ 364 365 /* filter for error message frames in extra filterlist */ 366 if (*mask & CAN_ERR_FLAG) { 367 /* clear CAN_ERR_FLAG in filter entry */ 368 *mask &= CAN_ERR_MASK; 369 return &d->rx[RX_ERR]; 370 } 371 372 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ 373 374 #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) 375 376 /* ensure valid values in can_mask for 'SFF only' frame filtering */ 377 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) 378 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); 379 380 /* reduce condition testing at receive time */ 381 *can_id &= *mask; 382 383 /* inverse can_id/can_mask filter */ 384 if (inv) 385 return &d->rx[RX_INV]; 386 387 /* mask == 0 => no condition testing at receive time */ 388 if (!(*mask)) 389 return &d->rx[RX_ALL]; 390 391 /* extra filterlists for the subscription of a single non-RTR can_id */ 392 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && 393 !(*can_id & CAN_RTR_FLAG)) { 394 if (*can_id & CAN_EFF_FLAG) { 395 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) 396 return &d->rx_eff[effhash(*can_id)]; 397 } else { 398 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) 399 return &d->rx_sff[*can_id]; 400 } 401 } 402 403 /* default: filter via can_id/can_mask */ 404 return &d->rx[RX_FIL]; 405 } 406 407 /** 408 * can_rx_register - subscribe CAN frames from a specific interface 409 * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list) 410 * @can_id: CAN identifier (see description) 411 * @mask: CAN mask (see description) 412 * @func: callback function on filter match 413 * @data: returned parameter for callback function 414 * @ident: string for calling module identification 415 * @sk: socket pointer (might be NULL) 416 * 417 * Description: 418 * Invokes the callback function with the received sk_buff and the given 419 * parameter 'data' on a matching receive filter. A filter matches, when 420 * 421 * <received_can_id> & mask == can_id & mask 422 * 423 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can 424 * filter for error message frames (CAN_ERR_FLAG bit set in mask). 425 * 426 * The provided pointer to the sk_buff is guaranteed to be valid as long as 427 * the callback function is running. The callback function must *not* free 428 * the given sk_buff while processing it's task. When the given sk_buff is 429 * needed after the end of the callback function it must be cloned inside 430 * the callback function with skb_clone(). 431 * 432 * Return: 433 * 0 on success 434 * -ENOMEM on missing cache mem to create subscription entry 435 * -ENODEV unknown device 436 */ 437 int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id, 438 canid_t mask, void (*func)(struct sk_buff *, void *), 439 void *data, char *ident, struct sock *sk) 440 { 441 struct receiver *r; 442 struct hlist_head *rl; 443 struct can_dev_rcv_lists *d; 444 struct s_pstats *can_pstats = net->can.can_pstats; 445 int err = 0; 446 447 /* insert new receiver (dev,canid,mask) -> (func,data) */ 448 449 if (dev && dev->type != ARPHRD_CAN) 450 return -ENODEV; 451 452 if (dev && !net_eq(net, dev_net(dev))) 453 return -ENODEV; 454 455 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL); 456 if (!r) 457 return -ENOMEM; 458 459 spin_lock(&net->can.can_rcvlists_lock); 460 461 d = find_dev_rcv_lists(net, dev); 462 if (d) { 463 rl = find_rcv_list(&can_id, &mask, d); 464 465 r->can_id = can_id; 466 r->mask = mask; 467 r->matches = 0; 468 r->func = func; 469 r->data = data; 470 r->ident = ident; 471 r->sk = sk; 472 473 hlist_add_head_rcu(&r->list, rl); 474 d->entries++; 475 476 can_pstats->rcv_entries++; 477 if (can_pstats->rcv_entries_max < can_pstats->rcv_entries) 478 can_pstats->rcv_entries_max = can_pstats->rcv_entries; 479 } else { 480 kmem_cache_free(rcv_cache, r); 481 err = -ENODEV; 482 } 483 484 spin_unlock(&net->can.can_rcvlists_lock); 485 486 return err; 487 } 488 EXPORT_SYMBOL(can_rx_register); 489 490 /* can_rx_delete_receiver - rcu callback for single receiver entry removal */ 491 static void can_rx_delete_receiver(struct rcu_head *rp) 492 { 493 struct receiver *r = container_of(rp, struct receiver, rcu); 494 struct sock *sk = r->sk; 495 496 kmem_cache_free(rcv_cache, r); 497 if (sk) 498 sock_put(sk); 499 } 500 501 /** 502 * can_rx_unregister - unsubscribe CAN frames from a specific interface 503 * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list) 504 * @can_id: CAN identifier 505 * @mask: CAN mask 506 * @func: callback function on filter match 507 * @data: returned parameter for callback function 508 * 509 * Description: 510 * Removes subscription entry depending on given (subscription) values. 511 */ 512 void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id, 513 canid_t mask, void (*func)(struct sk_buff *, void *), 514 void *data) 515 { 516 struct receiver *r = NULL; 517 struct hlist_head *rl; 518 struct s_pstats *can_pstats = net->can.can_pstats; 519 struct can_dev_rcv_lists *d; 520 521 if (dev && dev->type != ARPHRD_CAN) 522 return; 523 524 if (dev && !net_eq(net, dev_net(dev))) 525 return; 526 527 spin_lock(&net->can.can_rcvlists_lock); 528 529 d = find_dev_rcv_lists(net, dev); 530 if (!d) { 531 pr_err("BUG: receive list not found for dev %s, id %03X, mask %03X\n", 532 DNAME(dev), can_id, mask); 533 goto out; 534 } 535 536 rl = find_rcv_list(&can_id, &mask, d); 537 538 /* Search the receiver list for the item to delete. This should 539 * exist, since no receiver may be unregistered that hasn't 540 * been registered before. 541 */ 542 543 hlist_for_each_entry_rcu(r, rl, list) { 544 if (r->can_id == can_id && r->mask == mask && 545 r->func == func && r->data == data) 546 break; 547 } 548 549 /* Check for bugs in CAN protocol implementations using af_can.c: 550 * 'r' will be NULL if no matching list item was found for removal. 551 */ 552 553 if (!r) { 554 WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n", 555 DNAME(dev), can_id, mask); 556 goto out; 557 } 558 559 hlist_del_rcu(&r->list); 560 d->entries--; 561 562 if (can_pstats->rcv_entries > 0) 563 can_pstats->rcv_entries--; 564 565 /* remove device structure requested by NETDEV_UNREGISTER */ 566 if (d->remove_on_zero_entries && !d->entries) { 567 kfree(d); 568 dev->ml_priv = NULL; 569 } 570 571 out: 572 spin_unlock(&net->can.can_rcvlists_lock); 573 574 /* schedule the receiver item for deletion */ 575 if (r) { 576 if (r->sk) 577 sock_hold(r->sk); 578 call_rcu(&r->rcu, can_rx_delete_receiver); 579 } 580 } 581 EXPORT_SYMBOL(can_rx_unregister); 582 583 static inline void deliver(struct sk_buff *skb, struct receiver *r) 584 { 585 r->func(skb, r->data); 586 r->matches++; 587 } 588 589 static int can_rcv_filter(struct can_dev_rcv_lists *d, struct sk_buff *skb) 590 { 591 struct receiver *r; 592 int matches = 0; 593 struct can_frame *cf = (struct can_frame *)skb->data; 594 canid_t can_id = cf->can_id; 595 596 if (d->entries == 0) 597 return 0; 598 599 if (can_id & CAN_ERR_FLAG) { 600 /* check for error message frame entries only */ 601 hlist_for_each_entry_rcu(r, &d->rx[RX_ERR], list) { 602 if (can_id & r->mask) { 603 deliver(skb, r); 604 matches++; 605 } 606 } 607 return matches; 608 } 609 610 /* check for unfiltered entries */ 611 hlist_for_each_entry_rcu(r, &d->rx[RX_ALL], list) { 612 deliver(skb, r); 613 matches++; 614 } 615 616 /* check for can_id/mask entries */ 617 hlist_for_each_entry_rcu(r, &d->rx[RX_FIL], list) { 618 if ((can_id & r->mask) == r->can_id) { 619 deliver(skb, r); 620 matches++; 621 } 622 } 623 624 /* check for inverted can_id/mask entries */ 625 hlist_for_each_entry_rcu(r, &d->rx[RX_INV], list) { 626 if ((can_id & r->mask) != r->can_id) { 627 deliver(skb, r); 628 matches++; 629 } 630 } 631 632 /* check filterlists for single non-RTR can_ids */ 633 if (can_id & CAN_RTR_FLAG) 634 return matches; 635 636 if (can_id & CAN_EFF_FLAG) { 637 hlist_for_each_entry_rcu(r, &d->rx_eff[effhash(can_id)], list) { 638 if (r->can_id == can_id) { 639 deliver(skb, r); 640 matches++; 641 } 642 } 643 } else { 644 can_id &= CAN_SFF_MASK; 645 hlist_for_each_entry_rcu(r, &d->rx_sff[can_id], list) { 646 deliver(skb, r); 647 matches++; 648 } 649 } 650 651 return matches; 652 } 653 654 static void can_receive(struct sk_buff *skb, struct net_device *dev) 655 { 656 struct can_dev_rcv_lists *d; 657 struct net *net = dev_net(dev); 658 struct s_stats *can_stats = net->can.can_stats; 659 int matches; 660 661 /* update statistics */ 662 can_stats->rx_frames++; 663 can_stats->rx_frames_delta++; 664 665 /* create non-zero unique skb identifier together with *skb */ 666 while (!(can_skb_prv(skb)->skbcnt)) 667 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter); 668 669 rcu_read_lock(); 670 671 /* deliver the packet to sockets listening on all devices */ 672 matches = can_rcv_filter(net->can.can_rx_alldev_list, skb); 673 674 /* find receive list for this device */ 675 d = find_dev_rcv_lists(net, dev); 676 if (d) 677 matches += can_rcv_filter(d, skb); 678 679 rcu_read_unlock(); 680 681 /* consume the skbuff allocated by the netdevice driver */ 682 consume_skb(skb); 683 684 if (matches > 0) { 685 can_stats->matches++; 686 can_stats->matches_delta++; 687 } 688 } 689 690 static int can_rcv(struct sk_buff *skb, struct net_device *dev, 691 struct packet_type *pt, struct net_device *orig_dev) 692 { 693 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 694 695 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU || 696 cfd->len > CAN_MAX_DLEN)) { 697 pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n", 698 dev->type, skb->len, cfd->len); 699 kfree_skb(skb); 700 return NET_RX_DROP; 701 } 702 703 can_receive(skb, dev); 704 return NET_RX_SUCCESS; 705 } 706 707 static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, 708 struct packet_type *pt, struct net_device *orig_dev) 709 { 710 struct canfd_frame *cfd = (struct canfd_frame *)skb->data; 711 712 if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU || 713 cfd->len > CANFD_MAX_DLEN)) { 714 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n", 715 dev->type, skb->len, cfd->len); 716 kfree_skb(skb); 717 return NET_RX_DROP; 718 } 719 720 can_receive(skb, dev); 721 return NET_RX_SUCCESS; 722 } 723 724 /* af_can protocol functions */ 725 726 /** 727 * can_proto_register - register CAN transport protocol 728 * @cp: pointer to CAN protocol structure 729 * 730 * Return: 731 * 0 on success 732 * -EINVAL invalid (out of range) protocol number 733 * -EBUSY protocol already in use 734 * -ENOBUF if proto_register() fails 735 */ 736 int can_proto_register(const struct can_proto *cp) 737 { 738 int proto = cp->protocol; 739 int err = 0; 740 741 if (proto < 0 || proto >= CAN_NPROTO) { 742 pr_err("can: protocol number %d out of range\n", proto); 743 return -EINVAL; 744 } 745 746 err = proto_register(cp->prot, 0); 747 if (err < 0) 748 return err; 749 750 mutex_lock(&proto_tab_lock); 751 752 if (rcu_access_pointer(proto_tab[proto])) { 753 pr_err("can: protocol %d already registered\n", proto); 754 err = -EBUSY; 755 } else { 756 RCU_INIT_POINTER(proto_tab[proto], cp); 757 } 758 759 mutex_unlock(&proto_tab_lock); 760 761 if (err < 0) 762 proto_unregister(cp->prot); 763 764 return err; 765 } 766 EXPORT_SYMBOL(can_proto_register); 767 768 /** 769 * can_proto_unregister - unregister CAN transport protocol 770 * @cp: pointer to CAN protocol structure 771 */ 772 void can_proto_unregister(const struct can_proto *cp) 773 { 774 int proto = cp->protocol; 775 776 mutex_lock(&proto_tab_lock); 777 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp); 778 RCU_INIT_POINTER(proto_tab[proto], NULL); 779 mutex_unlock(&proto_tab_lock); 780 781 synchronize_rcu(); 782 783 proto_unregister(cp->prot); 784 } 785 EXPORT_SYMBOL(can_proto_unregister); 786 787 /* af_can notifier to create/remove CAN netdevice specific structs */ 788 static int can_notifier(struct notifier_block *nb, unsigned long msg, 789 void *ptr) 790 { 791 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 792 struct can_dev_rcv_lists *d; 793 794 if (dev->type != ARPHRD_CAN) 795 return NOTIFY_DONE; 796 797 switch (msg) { 798 case NETDEV_REGISTER: 799 800 /* create new dev_rcv_lists for this device */ 801 d = kzalloc(sizeof(*d), GFP_KERNEL); 802 if (!d) 803 return NOTIFY_DONE; 804 BUG_ON(dev->ml_priv); 805 dev->ml_priv = d; 806 807 break; 808 809 case NETDEV_UNREGISTER: 810 spin_lock(&dev_net(dev)->can.can_rcvlists_lock); 811 812 d = dev->ml_priv; 813 if (d) { 814 if (d->entries) { 815 d->remove_on_zero_entries = 1; 816 } else { 817 kfree(d); 818 dev->ml_priv = NULL; 819 } 820 } else { 821 pr_err("can: notifier: receive list not found for dev %s\n", 822 dev->name); 823 } 824 825 spin_unlock(&dev_net(dev)->can.can_rcvlists_lock); 826 827 break; 828 } 829 830 return NOTIFY_DONE; 831 } 832 833 static int can_pernet_init(struct net *net) 834 { 835 spin_lock_init(&net->can.can_rcvlists_lock); 836 net->can.can_rx_alldev_list = 837 kzalloc(sizeof(*net->can.can_rx_alldev_list), GFP_KERNEL); 838 if (!net->can.can_rx_alldev_list) 839 goto out; 840 net->can.can_stats = kzalloc(sizeof(*net->can.can_stats), GFP_KERNEL); 841 if (!net->can.can_stats) 842 goto out_free_alldev_list; 843 net->can.can_pstats = kzalloc(sizeof(*net->can.can_pstats), GFP_KERNEL); 844 if (!net->can.can_pstats) 845 goto out_free_can_stats; 846 847 if (IS_ENABLED(CONFIG_PROC_FS)) { 848 /* the statistics are updated every second (timer triggered) */ 849 if (stats_timer) { 850 timer_setup(&net->can.can_stattimer, can_stat_update, 851 0); 852 mod_timer(&net->can.can_stattimer, 853 round_jiffies(jiffies + HZ)); 854 } 855 net->can.can_stats->jiffies_init = jiffies; 856 can_init_proc(net); 857 } 858 859 return 0; 860 861 out_free_can_stats: 862 kfree(net->can.can_stats); 863 out_free_alldev_list: 864 kfree(net->can.can_rx_alldev_list); 865 out: 866 return -ENOMEM; 867 } 868 869 static void can_pernet_exit(struct net *net) 870 { 871 struct net_device *dev; 872 873 if (IS_ENABLED(CONFIG_PROC_FS)) { 874 can_remove_proc(net); 875 if (stats_timer) 876 del_timer_sync(&net->can.can_stattimer); 877 } 878 879 /* remove created dev_rcv_lists from still registered CAN devices */ 880 rcu_read_lock(); 881 for_each_netdev_rcu(net, dev) { 882 if (dev->type == ARPHRD_CAN && dev->ml_priv) { 883 struct can_dev_rcv_lists *d = dev->ml_priv; 884 885 BUG_ON(d->entries); 886 kfree(d); 887 dev->ml_priv = NULL; 888 } 889 } 890 rcu_read_unlock(); 891 892 kfree(net->can.can_rx_alldev_list); 893 kfree(net->can.can_stats); 894 kfree(net->can.can_pstats); 895 } 896 897 /* af_can module init/exit functions */ 898 899 static struct packet_type can_packet __read_mostly = { 900 .type = cpu_to_be16(ETH_P_CAN), 901 .func = can_rcv, 902 }; 903 904 static struct packet_type canfd_packet __read_mostly = { 905 .type = cpu_to_be16(ETH_P_CANFD), 906 .func = canfd_rcv, 907 }; 908 909 static const struct net_proto_family can_family_ops = { 910 .family = PF_CAN, 911 .create = can_create, 912 .owner = THIS_MODULE, 913 }; 914 915 /* notifier block for netdevice event */ 916 static struct notifier_block can_netdev_notifier __read_mostly = { 917 .notifier_call = can_notifier, 918 }; 919 920 static struct pernet_operations can_pernet_ops __read_mostly = { 921 .init = can_pernet_init, 922 .exit = can_pernet_exit, 923 }; 924 925 static __init int can_init(void) 926 { 927 int err; 928 929 /* check for correct padding to be able to use the structs similarly */ 930 BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) != 931 offsetof(struct canfd_frame, len) || 932 offsetof(struct can_frame, data) != 933 offsetof(struct canfd_frame, data)); 934 935 pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n"); 936 937 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver), 938 0, 0, NULL); 939 if (!rcv_cache) 940 return -ENOMEM; 941 942 err = register_pernet_subsys(&can_pernet_ops); 943 if (err) 944 goto out_pernet; 945 946 /* protocol register */ 947 err = sock_register(&can_family_ops); 948 if (err) 949 goto out_sock; 950 err = register_netdevice_notifier(&can_netdev_notifier); 951 if (err) 952 goto out_notifier; 953 954 dev_add_pack(&can_packet); 955 dev_add_pack(&canfd_packet); 956 957 return 0; 958 959 out_notifier: 960 sock_unregister(PF_CAN); 961 out_sock: 962 unregister_pernet_subsys(&can_pernet_ops); 963 out_pernet: 964 kmem_cache_destroy(rcv_cache); 965 966 return err; 967 } 968 969 static __exit void can_exit(void) 970 { 971 /* protocol unregister */ 972 dev_remove_pack(&canfd_packet); 973 dev_remove_pack(&can_packet); 974 unregister_netdevice_notifier(&can_netdev_notifier); 975 sock_unregister(PF_CAN); 976 977 unregister_pernet_subsys(&can_pernet_ops); 978 979 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 980 981 kmem_cache_destroy(rcv_cache); 982 } 983 984 module_init(can_init); 985 module_exit(can_exit); 986