1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 /* 3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 4 * 5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of Volkswagen nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * Alternatively, provided that this notice is retained in full, this 21 * software may be distributed under the terms of the GNU General 22 * Public License ("GPL") version 2, in which case the provisions of the 23 * GPL apply INSTEAD OF those given above. 24 * 25 * The provided data structures and external interfaces from this code 26 * are not restricted to be used by modules with a GPL compatible license. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 39 * DAMAGE. 40 * 41 */ 42 43 #include <linux/module.h> 44 #include <linux/init.h> 45 #include <linux/interrupt.h> 46 #include <linux/hrtimer.h> 47 #include <linux/list.h> 48 #include <linux/proc_fs.h> 49 #include <linux/seq_file.h> 50 #include <linux/uio.h> 51 #include <linux/net.h> 52 #include <linux/netdevice.h> 53 #include <linux/socket.h> 54 #include <linux/if_arp.h> 55 #include <linux/skbuff.h> 56 #include <linux/can.h> 57 #include <linux/can/core.h> 58 #include <linux/can/skb.h> 59 #include <linux/can/bcm.h> 60 #include <linux/slab.h> 61 #include <net/sock.h> 62 #include <net/net_namespace.h> 63 64 /* 65 * To send multiple CAN frame content within TX_SETUP or to filter 66 * CAN messages with multiplex index within RX_SETUP, the number of 67 * different filters is limited to 256 due to the one byte index value. 68 */ 69 #define MAX_NFRAMES 256 70 71 /* limit timers to 400 days for sending/timeouts */ 72 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) 73 74 /* use of last_frames[index].flags */ 75 #define RX_RECV 0x40 /* received data for this element */ 76 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 77 #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */ 78 79 /* get best masking value for can_rx_register() for a given single can_id */ 80 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 81 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 82 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 83 84 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 85 MODULE_LICENSE("Dual BSD/GPL"); 86 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 87 MODULE_ALIAS("can-proto-2"); 88 89 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex) 90 91 /* 92 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is 93 * 64 bit aligned so the offset has to be multiples of 8 which is ensured 94 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler(). 95 */ 96 static inline u64 get_u64(const struct canfd_frame *cp, int offset) 97 { 98 return *(u64 *)(cp->data + offset); 99 } 100 101 struct bcm_op { 102 struct list_head list; 103 struct rcu_head rcu; 104 int ifindex; 105 canid_t can_id; 106 u32 flags; 107 unsigned long frames_abs, frames_filtered; 108 struct bcm_timeval ival1, ival2; 109 struct hrtimer timer, thrtimer; 110 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 111 int rx_ifindex; 112 int cfsiz; 113 u32 count; 114 u32 nframes; 115 u32 currframe; 116 /* void pointers to arrays of struct can[fd]_frame */ 117 void *frames; 118 void *last_frames; 119 struct canfd_frame sframe; 120 struct canfd_frame last_sframe; 121 struct sock *sk; 122 struct net_device *rx_reg_dev; 123 }; 124 125 struct bcm_sock { 126 struct sock sk; 127 int bound; 128 int ifindex; 129 struct list_head notifier; 130 struct list_head rx_ops; 131 struct list_head tx_ops; 132 unsigned long dropped_usr_msgs; 133 struct proc_dir_entry *bcm_proc_read; 134 char procname [32]; /* inode number in decimal with \0 */ 135 }; 136 137 static LIST_HEAD(bcm_notifier_list); 138 static DEFINE_SPINLOCK(bcm_notifier_lock); 139 static struct bcm_sock *bcm_busy_notifier; 140 141 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 142 { 143 return (struct bcm_sock *)sk; 144 } 145 146 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) 147 { 148 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); 149 } 150 151 /* check limitations for timeval provided by user */ 152 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) 153 { 154 if ((msg_head->ival1.tv_sec < 0) || 155 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || 156 (msg_head->ival1.tv_usec < 0) || 157 (msg_head->ival1.tv_usec >= USEC_PER_SEC) || 158 (msg_head->ival2.tv_sec < 0) || 159 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || 160 (msg_head->ival2.tv_usec < 0) || 161 (msg_head->ival2.tv_usec >= USEC_PER_SEC)) 162 return true; 163 164 return false; 165 } 166 167 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) 168 #define OPSIZ sizeof(struct bcm_op) 169 #define MHSIZ sizeof(struct bcm_msg_head) 170 171 /* 172 * procfs functions 173 */ 174 #if IS_ENABLED(CONFIG_PROC_FS) 175 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex) 176 { 177 struct net_device *dev; 178 179 if (!ifindex) 180 return "any"; 181 182 rcu_read_lock(); 183 dev = dev_get_by_index_rcu(net, ifindex); 184 if (dev) 185 strcpy(result, dev->name); 186 else 187 strcpy(result, "???"); 188 rcu_read_unlock(); 189 190 return result; 191 } 192 193 static int bcm_proc_show(struct seq_file *m, void *v) 194 { 195 char ifname[IFNAMSIZ]; 196 struct net *net = m->private; 197 struct sock *sk = (struct sock *)pde_data(m->file->f_inode); 198 struct bcm_sock *bo = bcm_sk(sk); 199 struct bcm_op *op; 200 201 seq_printf(m, ">>> socket %pK", sk->sk_socket); 202 seq_printf(m, " / sk %pK", sk); 203 seq_printf(m, " / bo %pK", bo); 204 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 205 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); 206 seq_printf(m, " <<<\n"); 207 208 list_for_each_entry(op, &bo->rx_ops, list) { 209 210 unsigned long reduction; 211 212 /* print only active entries & prevent division by zero */ 213 if (!op->frames_abs) 214 continue; 215 216 seq_printf(m, "rx_op: %03X %-5s ", op->can_id, 217 bcm_proc_getifname(net, ifname, op->ifindex)); 218 219 if (op->flags & CAN_FD_FRAME) 220 seq_printf(m, "(%u)", op->nframes); 221 else 222 seq_printf(m, "[%u]", op->nframes); 223 224 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' '); 225 226 if (op->kt_ival1) 227 seq_printf(m, "timeo=%lld ", 228 (long long)ktime_to_us(op->kt_ival1)); 229 230 if (op->kt_ival2) 231 seq_printf(m, "thr=%lld ", 232 (long long)ktime_to_us(op->kt_ival2)); 233 234 seq_printf(m, "# recv %ld (%ld) => reduction: ", 235 op->frames_filtered, op->frames_abs); 236 237 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 238 239 seq_printf(m, "%s%ld%%\n", 240 (reduction == 100) ? "near " : "", reduction); 241 } 242 243 list_for_each_entry(op, &bo->tx_ops, list) { 244 245 seq_printf(m, "tx_op: %03X %s ", op->can_id, 246 bcm_proc_getifname(net, ifname, op->ifindex)); 247 248 if (op->flags & CAN_FD_FRAME) 249 seq_printf(m, "(%u) ", op->nframes); 250 else 251 seq_printf(m, "[%u] ", op->nframes); 252 253 if (op->kt_ival1) 254 seq_printf(m, "t1=%lld ", 255 (long long)ktime_to_us(op->kt_ival1)); 256 257 if (op->kt_ival2) 258 seq_printf(m, "t2=%lld ", 259 (long long)ktime_to_us(op->kt_ival2)); 260 261 seq_printf(m, "# sent %ld\n", op->frames_abs); 262 } 263 seq_putc(m, '\n'); 264 return 0; 265 } 266 #endif /* CONFIG_PROC_FS */ 267 268 /* 269 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 270 * of the given bcm tx op 271 */ 272 static void bcm_can_tx(struct bcm_op *op) 273 { 274 struct sk_buff *skb; 275 struct net_device *dev; 276 struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; 277 int err; 278 279 /* no target device? => exit */ 280 if (!op->ifindex) 281 return; 282 283 dev = dev_get_by_index(sock_net(op->sk), op->ifindex); 284 if (!dev) { 285 /* RFC: should this bcm_op remove itself here? */ 286 return; 287 } 288 289 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any()); 290 if (!skb) 291 goto out; 292 293 can_skb_reserve(skb); 294 can_skb_prv(skb)->ifindex = dev->ifindex; 295 can_skb_prv(skb)->skbcnt = 0; 296 297 skb_put_data(skb, cf, op->cfsiz); 298 299 /* send with loopback */ 300 skb->dev = dev; 301 can_skb_set_owner(skb, op->sk); 302 err = can_send(skb, 1); 303 if (!err) 304 op->frames_abs++; 305 306 op->currframe++; 307 308 /* reached last frame? */ 309 if (op->currframe >= op->nframes) 310 op->currframe = 0; 311 out: 312 dev_put(dev); 313 } 314 315 /* 316 * bcm_send_to_user - send a BCM message to the userspace 317 * (consisting of bcm_msg_head + x CAN frames) 318 */ 319 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 320 struct canfd_frame *frames, int has_timestamp) 321 { 322 struct sk_buff *skb; 323 struct canfd_frame *firstframe; 324 struct sockaddr_can *addr; 325 struct sock *sk = op->sk; 326 unsigned int datalen = head->nframes * op->cfsiz; 327 int err; 328 329 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 330 if (!skb) 331 return; 332 333 skb_put_data(skb, head, sizeof(*head)); 334 335 if (head->nframes) { 336 /* CAN frames starting here */ 337 firstframe = (struct canfd_frame *)skb_tail_pointer(skb); 338 339 skb_put_data(skb, frames, datalen); 340 341 /* 342 * the BCM uses the flags-element of the canfd_frame 343 * structure for internal purposes. This is only 344 * relevant for updates that are generated by the 345 * BCM, where nframes is 1 346 */ 347 if (head->nframes == 1) 348 firstframe->flags &= BCM_CAN_FLAGS_MASK; 349 } 350 351 if (has_timestamp) { 352 /* restore rx timestamp */ 353 skb->tstamp = op->rx_stamp; 354 } 355 356 /* 357 * Put the datagram to the queue so that bcm_recvmsg() can 358 * get it from there. We need to pass the interface index to 359 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 360 * containing the interface index. 361 */ 362 363 sock_skb_cb_check_size(sizeof(struct sockaddr_can)); 364 addr = (struct sockaddr_can *)skb->cb; 365 memset(addr, 0, sizeof(*addr)); 366 addr->can_family = AF_CAN; 367 addr->can_ifindex = op->rx_ifindex; 368 369 err = sock_queue_rcv_skb(sk, skb); 370 if (err < 0) { 371 struct bcm_sock *bo = bcm_sk(sk); 372 373 kfree_skb(skb); 374 /* don't care about overflows in this statistic */ 375 bo->dropped_usr_msgs++; 376 } 377 } 378 379 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt) 380 { 381 ktime_t ival; 382 383 if (op->kt_ival1 && op->count) 384 ival = op->kt_ival1; 385 else if (op->kt_ival2) 386 ival = op->kt_ival2; 387 else 388 return false; 389 390 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival)); 391 return true; 392 } 393 394 static void bcm_tx_start_timer(struct bcm_op *op) 395 { 396 if (bcm_tx_set_expiry(op, &op->timer)) 397 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT); 398 } 399 400 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */ 401 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 402 { 403 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 404 struct bcm_msg_head msg_head; 405 406 if (op->kt_ival1 && (op->count > 0)) { 407 op->count--; 408 if (!op->count && (op->flags & TX_COUNTEVT)) { 409 410 /* create notification to user */ 411 memset(&msg_head, 0, sizeof(msg_head)); 412 msg_head.opcode = TX_EXPIRED; 413 msg_head.flags = op->flags; 414 msg_head.count = op->count; 415 msg_head.ival1 = op->ival1; 416 msg_head.ival2 = op->ival2; 417 msg_head.can_id = op->can_id; 418 msg_head.nframes = 0; 419 420 bcm_send_to_user(op, &msg_head, NULL, 0); 421 } 422 bcm_can_tx(op); 423 424 } else if (op->kt_ival2) { 425 bcm_can_tx(op); 426 } 427 428 return bcm_tx_set_expiry(op, &op->timer) ? 429 HRTIMER_RESTART : HRTIMER_NORESTART; 430 } 431 432 /* 433 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 434 */ 435 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data) 436 { 437 struct bcm_msg_head head; 438 439 /* update statistics */ 440 op->frames_filtered++; 441 442 /* prevent statistics overflow */ 443 if (op->frames_filtered > ULONG_MAX/100) 444 op->frames_filtered = op->frames_abs = 0; 445 446 /* this element is not throttled anymore */ 447 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV); 448 449 memset(&head, 0, sizeof(head)); 450 head.opcode = RX_CHANGED; 451 head.flags = op->flags; 452 head.count = op->count; 453 head.ival1 = op->ival1; 454 head.ival2 = op->ival2; 455 head.can_id = op->can_id; 456 head.nframes = 1; 457 458 bcm_send_to_user(op, &head, data, 1); 459 } 460 461 /* 462 * bcm_rx_update_and_send - process a detected relevant receive content change 463 * 1. update the last received data 464 * 2. send a notification to the user (if possible) 465 */ 466 static void bcm_rx_update_and_send(struct bcm_op *op, 467 struct canfd_frame *lastdata, 468 const struct canfd_frame *rxdata) 469 { 470 memcpy(lastdata, rxdata, op->cfsiz); 471 472 /* mark as used and throttled by default */ 473 lastdata->flags |= (RX_RECV|RX_THR); 474 475 /* throttling mode inactive ? */ 476 if (!op->kt_ival2) { 477 /* send RX_CHANGED to the user immediately */ 478 bcm_rx_changed(op, lastdata); 479 return; 480 } 481 482 /* with active throttling timer we are just done here */ 483 if (hrtimer_active(&op->thrtimer)) 484 return; 485 486 /* first reception with enabled throttling mode */ 487 if (!op->kt_lastmsg) 488 goto rx_changed_settime; 489 490 /* got a second frame inside a potential throttle period? */ 491 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 492 ktime_to_us(op->kt_ival2)) { 493 /* do not send the saved data - only start throttle timer */ 494 hrtimer_start(&op->thrtimer, 495 ktime_add(op->kt_lastmsg, op->kt_ival2), 496 HRTIMER_MODE_ABS_SOFT); 497 return; 498 } 499 500 /* the gap was that big, that throttling was not needed here */ 501 rx_changed_settime: 502 bcm_rx_changed(op, lastdata); 503 op->kt_lastmsg = ktime_get(); 504 } 505 506 /* 507 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 508 * received data stored in op->last_frames[] 509 */ 510 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, 511 const struct canfd_frame *rxdata) 512 { 513 struct canfd_frame *cf = op->frames + op->cfsiz * index; 514 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; 515 int i; 516 517 /* 518 * no one uses the MSBs of flags for comparison, 519 * so we use it here to detect the first time of reception 520 */ 521 522 if (!(lcf->flags & RX_RECV)) { 523 /* received data for the first time => send update to user */ 524 bcm_rx_update_and_send(op, lcf, rxdata); 525 return; 526 } 527 528 /* do a real check in CAN frame data section */ 529 for (i = 0; i < rxdata->len; i += 8) { 530 if ((get_u64(cf, i) & get_u64(rxdata, i)) != 531 (get_u64(cf, i) & get_u64(lcf, i))) { 532 bcm_rx_update_and_send(op, lcf, rxdata); 533 return; 534 } 535 } 536 537 if (op->flags & RX_CHECK_DLC) { 538 /* do a real check in CAN frame length */ 539 if (rxdata->len != lcf->len) { 540 bcm_rx_update_and_send(op, lcf, rxdata); 541 return; 542 } 543 } 544 } 545 546 /* 547 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception 548 */ 549 static void bcm_rx_starttimer(struct bcm_op *op) 550 { 551 if (op->flags & RX_NO_AUTOTIMER) 552 return; 553 554 if (op->kt_ival1) 555 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT); 556 } 557 558 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */ 559 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 560 { 561 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 562 struct bcm_msg_head msg_head; 563 564 /* if user wants to be informed, when cyclic CAN-Messages come back */ 565 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 566 /* clear received CAN frames to indicate 'nothing received' */ 567 memset(op->last_frames, 0, op->nframes * op->cfsiz); 568 } 569 570 /* create notification to user */ 571 memset(&msg_head, 0, sizeof(msg_head)); 572 msg_head.opcode = RX_TIMEOUT; 573 msg_head.flags = op->flags; 574 msg_head.count = op->count; 575 msg_head.ival1 = op->ival1; 576 msg_head.ival2 = op->ival2; 577 msg_head.can_id = op->can_id; 578 msg_head.nframes = 0; 579 580 bcm_send_to_user(op, &msg_head, NULL, 0); 581 582 return HRTIMER_NORESTART; 583 } 584 585 /* 586 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 587 */ 588 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index) 589 { 590 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index; 591 592 if ((op->last_frames) && (lcf->flags & RX_THR)) { 593 bcm_rx_changed(op, lcf); 594 return 1; 595 } 596 return 0; 597 } 598 599 /* 600 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 601 */ 602 static int bcm_rx_thr_flush(struct bcm_op *op) 603 { 604 int updated = 0; 605 606 if (op->nframes > 1) { 607 unsigned int i; 608 609 /* for MUX filter we start at index 1 */ 610 for (i = 1; i < op->nframes; i++) 611 updated += bcm_rx_do_flush(op, i); 612 613 } else { 614 /* for RX_FILTER_ID and simple filter */ 615 updated += bcm_rx_do_flush(op, 0); 616 } 617 618 return updated; 619 } 620 621 /* 622 * bcm_rx_thr_handler - the time for blocked content updates is over now: 623 * Check for throttled data and send it to the userspace 624 */ 625 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 626 { 627 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 628 629 if (bcm_rx_thr_flush(op)) { 630 hrtimer_forward_now(hrtimer, op->kt_ival2); 631 return HRTIMER_RESTART; 632 } else { 633 /* rearm throttle handling */ 634 op->kt_lastmsg = 0; 635 return HRTIMER_NORESTART; 636 } 637 } 638 639 /* 640 * bcm_rx_handler - handle a CAN frame reception 641 */ 642 static void bcm_rx_handler(struct sk_buff *skb, void *data) 643 { 644 struct bcm_op *op = (struct bcm_op *)data; 645 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data; 646 unsigned int i; 647 648 if (op->can_id != rxframe->can_id) 649 return; 650 651 /* make sure to handle the correct frame type (CAN / CAN FD) */ 652 if (op->flags & CAN_FD_FRAME) { 653 if (!can_is_canfd_skb(skb)) 654 return; 655 } else { 656 if (!can_is_can_skb(skb)) 657 return; 658 } 659 660 /* disable timeout */ 661 hrtimer_cancel(&op->timer); 662 663 /* save rx timestamp */ 664 op->rx_stamp = skb->tstamp; 665 /* save originator for recvfrom() */ 666 op->rx_ifindex = skb->dev->ifindex; 667 /* update statistics */ 668 op->frames_abs++; 669 670 if (op->flags & RX_RTR_FRAME) { 671 /* send reply for RTR-request (placed in op->frames[0]) */ 672 bcm_can_tx(op); 673 return; 674 } 675 676 if (op->flags & RX_FILTER_ID) { 677 /* the easiest case */ 678 bcm_rx_update_and_send(op, op->last_frames, rxframe); 679 goto rx_starttimer; 680 } 681 682 if (op->nframes == 1) { 683 /* simple compare with index 0 */ 684 bcm_rx_cmp_to_index(op, 0, rxframe); 685 goto rx_starttimer; 686 } 687 688 if (op->nframes > 1) { 689 /* 690 * multiplex compare 691 * 692 * find the first multiplex mask that fits. 693 * Remark: The MUX-mask is stored in index 0 - but only the 694 * first 64 bits of the frame data[] are relevant (CAN FD) 695 */ 696 697 for (i = 1; i < op->nframes; i++) { 698 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) == 699 (get_u64(op->frames, 0) & 700 get_u64(op->frames + op->cfsiz * i, 0))) { 701 bcm_rx_cmp_to_index(op, i, rxframe); 702 break; 703 } 704 } 705 } 706 707 rx_starttimer: 708 bcm_rx_starttimer(op); 709 } 710 711 /* 712 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 713 */ 714 static struct bcm_op *bcm_find_op(struct list_head *ops, 715 struct bcm_msg_head *mh, int ifindex) 716 { 717 struct bcm_op *op; 718 719 list_for_each_entry(op, ops, list) { 720 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 721 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) 722 return op; 723 } 724 725 return NULL; 726 } 727 728 static void bcm_free_op_rcu(struct rcu_head *rcu_head) 729 { 730 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu); 731 732 if ((op->frames) && (op->frames != &op->sframe)) 733 kfree(op->frames); 734 735 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 736 kfree(op->last_frames); 737 738 kfree(op); 739 } 740 741 static void bcm_remove_op(struct bcm_op *op) 742 { 743 hrtimer_cancel(&op->timer); 744 hrtimer_cancel(&op->thrtimer); 745 746 call_rcu(&op->rcu, bcm_free_op_rcu); 747 } 748 749 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 750 { 751 if (op->rx_reg_dev == dev) { 752 can_rx_unregister(dev_net(dev), dev, op->can_id, 753 REGMASK(op->can_id), bcm_rx_handler, op); 754 755 /* mark as removed subscription */ 756 op->rx_reg_dev = NULL; 757 } else 758 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 759 "mismatch %p %p\n", op->rx_reg_dev, dev); 760 } 761 762 /* 763 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 764 */ 765 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, 766 int ifindex) 767 { 768 struct bcm_op *op, *n; 769 770 list_for_each_entry_safe(op, n, ops, list) { 771 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 772 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { 773 774 /* disable automatic timer on frame reception */ 775 op->flags |= RX_NO_AUTOTIMER; 776 777 /* 778 * Don't care if we're bound or not (due to netdev 779 * problems) can_rx_unregister() is always a save 780 * thing to do here. 781 */ 782 if (op->ifindex) { 783 /* 784 * Only remove subscriptions that had not 785 * been removed due to NETDEV_UNREGISTER 786 * in bcm_notifier() 787 */ 788 if (op->rx_reg_dev) { 789 struct net_device *dev; 790 791 dev = dev_get_by_index(sock_net(op->sk), 792 op->ifindex); 793 if (dev) { 794 bcm_rx_unreg(dev, op); 795 dev_put(dev); 796 } 797 } 798 } else 799 can_rx_unregister(sock_net(op->sk), NULL, 800 op->can_id, 801 REGMASK(op->can_id), 802 bcm_rx_handler, op); 803 804 list_del(&op->list); 805 bcm_remove_op(op); 806 return 1; /* done */ 807 } 808 } 809 810 return 0; /* not found */ 811 } 812 813 /* 814 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 815 */ 816 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, 817 int ifindex) 818 { 819 struct bcm_op *op, *n; 820 821 list_for_each_entry_safe(op, n, ops, list) { 822 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && 823 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { 824 list_del(&op->list); 825 bcm_remove_op(op); 826 return 1; /* done */ 827 } 828 } 829 830 return 0; /* not found */ 831 } 832 833 /* 834 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 835 */ 836 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 837 int ifindex) 838 { 839 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex); 840 841 if (!op) 842 return -EINVAL; 843 844 /* put current values into msg_head */ 845 msg_head->flags = op->flags; 846 msg_head->count = op->count; 847 msg_head->ival1 = op->ival1; 848 msg_head->ival2 = op->ival2; 849 msg_head->nframes = op->nframes; 850 851 bcm_send_to_user(op, msg_head, op->frames, 0); 852 853 return MHSIZ; 854 } 855 856 /* 857 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 858 */ 859 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 860 int ifindex, struct sock *sk) 861 { 862 struct bcm_sock *bo = bcm_sk(sk); 863 struct bcm_op *op; 864 struct canfd_frame *cf; 865 unsigned int i; 866 int err; 867 868 /* we need a real device to send frames */ 869 if (!ifindex) 870 return -ENODEV; 871 872 /* check nframes boundaries - we need at least one CAN frame */ 873 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 874 return -EINVAL; 875 876 /* check timeval limitations */ 877 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) 878 return -EINVAL; 879 880 /* check the given can_id */ 881 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); 882 if (op) { 883 /* update existing BCM operation */ 884 885 /* 886 * Do we need more space for the CAN frames than currently 887 * allocated? -> This is a _really_ unusual use-case and 888 * therefore (complexity / locking) it is not supported. 889 */ 890 if (msg_head->nframes > op->nframes) 891 return -E2BIG; 892 893 /* update CAN frames content */ 894 for (i = 0; i < msg_head->nframes; i++) { 895 896 cf = op->frames + op->cfsiz * i; 897 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); 898 899 if (op->flags & CAN_FD_FRAME) { 900 if (cf->len > 64) 901 err = -EINVAL; 902 } else { 903 if (cf->len > 8) 904 err = -EINVAL; 905 } 906 907 if (err < 0) 908 return err; 909 910 if (msg_head->flags & TX_CP_CAN_ID) { 911 /* copy can_id into frame */ 912 cf->can_id = msg_head->can_id; 913 } 914 } 915 op->flags = msg_head->flags; 916 917 } else { 918 /* insert new BCM operation for the given can_id */ 919 920 op = kzalloc(OPSIZ, GFP_KERNEL); 921 if (!op) 922 return -ENOMEM; 923 924 op->can_id = msg_head->can_id; 925 op->cfsiz = CFSIZ(msg_head->flags); 926 op->flags = msg_head->flags; 927 928 /* create array for CAN frames and copy the data */ 929 if (msg_head->nframes > 1) { 930 op->frames = kmalloc_array(msg_head->nframes, 931 op->cfsiz, 932 GFP_KERNEL); 933 if (!op->frames) { 934 kfree(op); 935 return -ENOMEM; 936 } 937 } else 938 op->frames = &op->sframe; 939 940 for (i = 0; i < msg_head->nframes; i++) { 941 942 cf = op->frames + op->cfsiz * i; 943 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz); 944 if (err < 0) 945 goto free_op; 946 947 if (op->flags & CAN_FD_FRAME) { 948 if (cf->len > 64) 949 err = -EINVAL; 950 } else { 951 if (cf->len > 8) 952 err = -EINVAL; 953 } 954 955 if (err < 0) 956 goto free_op; 957 958 if (msg_head->flags & TX_CP_CAN_ID) { 959 /* copy can_id into frame */ 960 cf->can_id = msg_head->can_id; 961 } 962 } 963 964 /* tx_ops never compare with previous received messages */ 965 op->last_frames = NULL; 966 967 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 968 op->sk = sk; 969 op->ifindex = ifindex; 970 971 /* initialize uninitialized (kzalloc) structure */ 972 hrtimer_init(&op->timer, CLOCK_MONOTONIC, 973 HRTIMER_MODE_REL_SOFT); 974 op->timer.function = bcm_tx_timeout_handler; 975 976 /* currently unused in tx_ops */ 977 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, 978 HRTIMER_MODE_REL_SOFT); 979 980 /* add this bcm_op to the list of the tx_ops */ 981 list_add(&op->list, &bo->tx_ops); 982 983 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 984 985 if (op->nframes != msg_head->nframes) { 986 op->nframes = msg_head->nframes; 987 /* start multiple frame transmission with index 0 */ 988 op->currframe = 0; 989 } 990 991 /* check flags */ 992 993 if (op->flags & TX_RESET_MULTI_IDX) { 994 /* start multiple frame transmission with index 0 */ 995 op->currframe = 0; 996 } 997 998 if (op->flags & SETTIMER) { 999 /* set timer values */ 1000 op->count = msg_head->count; 1001 op->ival1 = msg_head->ival1; 1002 op->ival2 = msg_head->ival2; 1003 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); 1004 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1005 1006 /* disable an active timer due to zero values? */ 1007 if (!op->kt_ival1 && !op->kt_ival2) 1008 hrtimer_cancel(&op->timer); 1009 } 1010 1011 if (op->flags & STARTTIMER) { 1012 hrtimer_cancel(&op->timer); 1013 /* spec: send CAN frame when starting timer */ 1014 op->flags |= TX_ANNOUNCE; 1015 } 1016 1017 if (op->flags & TX_ANNOUNCE) { 1018 bcm_can_tx(op); 1019 if (op->count) 1020 op->count--; 1021 } 1022 1023 if (op->flags & STARTTIMER) 1024 bcm_tx_start_timer(op); 1025 1026 return msg_head->nframes * op->cfsiz + MHSIZ; 1027 1028 free_op: 1029 if (op->frames != &op->sframe) 1030 kfree(op->frames); 1031 kfree(op); 1032 return err; 1033 } 1034 1035 /* 1036 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 1037 */ 1038 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 1039 int ifindex, struct sock *sk) 1040 { 1041 struct bcm_sock *bo = bcm_sk(sk); 1042 struct bcm_op *op; 1043 int do_rx_register; 1044 int err = 0; 1045 1046 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 1047 /* be robust against wrong usage ... */ 1048 msg_head->flags |= RX_FILTER_ID; 1049 /* ignore trailing garbage */ 1050 msg_head->nframes = 0; 1051 } 1052 1053 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ 1054 if (msg_head->nframes > MAX_NFRAMES + 1) 1055 return -EINVAL; 1056 1057 if ((msg_head->flags & RX_RTR_FRAME) && 1058 ((msg_head->nframes != 1) || 1059 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1060 return -EINVAL; 1061 1062 /* check timeval limitations */ 1063 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) 1064 return -EINVAL; 1065 1066 /* check the given can_id */ 1067 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); 1068 if (op) { 1069 /* update existing BCM operation */ 1070 1071 /* 1072 * Do we need more space for the CAN frames than currently 1073 * allocated? -> This is a _really_ unusual use-case and 1074 * therefore (complexity / locking) it is not supported. 1075 */ 1076 if (msg_head->nframes > op->nframes) 1077 return -E2BIG; 1078 1079 if (msg_head->nframes) { 1080 /* update CAN frames content */ 1081 err = memcpy_from_msg(op->frames, msg, 1082 msg_head->nframes * op->cfsiz); 1083 if (err < 0) 1084 return err; 1085 1086 /* clear last_frames to indicate 'nothing received' */ 1087 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz); 1088 } 1089 1090 op->nframes = msg_head->nframes; 1091 op->flags = msg_head->flags; 1092 1093 /* Only an update -> do not call can_rx_register() */ 1094 do_rx_register = 0; 1095 1096 } else { 1097 /* insert new BCM operation for the given can_id */ 1098 op = kzalloc(OPSIZ, GFP_KERNEL); 1099 if (!op) 1100 return -ENOMEM; 1101 1102 op->can_id = msg_head->can_id; 1103 op->nframes = msg_head->nframes; 1104 op->cfsiz = CFSIZ(msg_head->flags); 1105 op->flags = msg_head->flags; 1106 1107 if (msg_head->nframes > 1) { 1108 /* create array for CAN frames and copy the data */ 1109 op->frames = kmalloc_array(msg_head->nframes, 1110 op->cfsiz, 1111 GFP_KERNEL); 1112 if (!op->frames) { 1113 kfree(op); 1114 return -ENOMEM; 1115 } 1116 1117 /* create and init array for received CAN frames */ 1118 op->last_frames = kcalloc(msg_head->nframes, 1119 op->cfsiz, 1120 GFP_KERNEL); 1121 if (!op->last_frames) { 1122 kfree(op->frames); 1123 kfree(op); 1124 return -ENOMEM; 1125 } 1126 1127 } else { 1128 op->frames = &op->sframe; 1129 op->last_frames = &op->last_sframe; 1130 } 1131 1132 if (msg_head->nframes) { 1133 err = memcpy_from_msg(op->frames, msg, 1134 msg_head->nframes * op->cfsiz); 1135 if (err < 0) { 1136 if (op->frames != &op->sframe) 1137 kfree(op->frames); 1138 if (op->last_frames != &op->last_sframe) 1139 kfree(op->last_frames); 1140 kfree(op); 1141 return err; 1142 } 1143 } 1144 1145 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1146 op->sk = sk; 1147 op->ifindex = ifindex; 1148 1149 /* ifindex for timeout events w/o previous frame reception */ 1150 op->rx_ifindex = ifindex; 1151 1152 /* initialize uninitialized (kzalloc) structure */ 1153 hrtimer_init(&op->timer, CLOCK_MONOTONIC, 1154 HRTIMER_MODE_REL_SOFT); 1155 op->timer.function = bcm_rx_timeout_handler; 1156 1157 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, 1158 HRTIMER_MODE_REL_SOFT); 1159 op->thrtimer.function = bcm_rx_thr_handler; 1160 1161 /* add this bcm_op to the list of the rx_ops */ 1162 list_add(&op->list, &bo->rx_ops); 1163 1164 /* call can_rx_register() */ 1165 do_rx_register = 1; 1166 1167 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1168 1169 /* check flags */ 1170 1171 if (op->flags & RX_RTR_FRAME) { 1172 struct canfd_frame *frame0 = op->frames; 1173 1174 /* no timers in RTR-mode */ 1175 hrtimer_cancel(&op->thrtimer); 1176 hrtimer_cancel(&op->timer); 1177 1178 /* 1179 * funny feature in RX(!)_SETUP only for RTR-mode: 1180 * copy can_id into frame BUT without RTR-flag to 1181 * prevent a full-load-loopback-test ... ;-] 1182 */ 1183 if ((op->flags & TX_CP_CAN_ID) || 1184 (frame0->can_id == op->can_id)) 1185 frame0->can_id = op->can_id & ~CAN_RTR_FLAG; 1186 1187 } else { 1188 if (op->flags & SETTIMER) { 1189 1190 /* set timer value */ 1191 op->ival1 = msg_head->ival1; 1192 op->ival2 = msg_head->ival2; 1193 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); 1194 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2); 1195 1196 /* disable an active timer due to zero value? */ 1197 if (!op->kt_ival1) 1198 hrtimer_cancel(&op->timer); 1199 1200 /* 1201 * In any case cancel the throttle timer, flush 1202 * potentially blocked msgs and reset throttle handling 1203 */ 1204 op->kt_lastmsg = 0; 1205 hrtimer_cancel(&op->thrtimer); 1206 bcm_rx_thr_flush(op); 1207 } 1208 1209 if ((op->flags & STARTTIMER) && op->kt_ival1) 1210 hrtimer_start(&op->timer, op->kt_ival1, 1211 HRTIMER_MODE_REL_SOFT); 1212 } 1213 1214 /* now we can register for can_ids, if we added a new bcm_op */ 1215 if (do_rx_register) { 1216 if (ifindex) { 1217 struct net_device *dev; 1218 1219 dev = dev_get_by_index(sock_net(sk), ifindex); 1220 if (dev) { 1221 err = can_rx_register(sock_net(sk), dev, 1222 op->can_id, 1223 REGMASK(op->can_id), 1224 bcm_rx_handler, op, 1225 "bcm", sk); 1226 1227 op->rx_reg_dev = dev; 1228 dev_put(dev); 1229 } 1230 1231 } else 1232 err = can_rx_register(sock_net(sk), NULL, op->can_id, 1233 REGMASK(op->can_id), 1234 bcm_rx_handler, op, "bcm", sk); 1235 if (err) { 1236 /* this bcm rx op is broken -> remove it */ 1237 list_del(&op->list); 1238 bcm_remove_op(op); 1239 return err; 1240 } 1241 } 1242 1243 return msg_head->nframes * op->cfsiz + MHSIZ; 1244 } 1245 1246 /* 1247 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1248 */ 1249 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk, 1250 int cfsiz) 1251 { 1252 struct sk_buff *skb; 1253 struct net_device *dev; 1254 int err; 1255 1256 /* we need a real device to send frames */ 1257 if (!ifindex) 1258 return -ENODEV; 1259 1260 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL); 1261 if (!skb) 1262 return -ENOMEM; 1263 1264 can_skb_reserve(skb); 1265 1266 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz); 1267 if (err < 0) { 1268 kfree_skb(skb); 1269 return err; 1270 } 1271 1272 dev = dev_get_by_index(sock_net(sk), ifindex); 1273 if (!dev) { 1274 kfree_skb(skb); 1275 return -ENODEV; 1276 } 1277 1278 can_skb_prv(skb)->ifindex = dev->ifindex; 1279 can_skb_prv(skb)->skbcnt = 0; 1280 skb->dev = dev; 1281 can_skb_set_owner(skb, sk); 1282 err = can_send(skb, 1); /* send with loopback */ 1283 dev_put(dev); 1284 1285 if (err) 1286 return err; 1287 1288 return cfsiz + MHSIZ; 1289 } 1290 1291 /* 1292 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1293 */ 1294 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 1295 { 1296 struct sock *sk = sock->sk; 1297 struct bcm_sock *bo = bcm_sk(sk); 1298 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1299 struct bcm_msg_head msg_head; 1300 int cfsiz; 1301 int ret; /* read bytes or error codes as return value */ 1302 1303 if (!bo->bound) 1304 return -ENOTCONN; 1305 1306 /* check for valid message length from userspace */ 1307 if (size < MHSIZ) 1308 return -EINVAL; 1309 1310 /* read message head information */ 1311 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ); 1312 if (ret < 0) 1313 return ret; 1314 1315 cfsiz = CFSIZ(msg_head.flags); 1316 if ((size - MHSIZ) % cfsiz) 1317 return -EINVAL; 1318 1319 /* check for alternative ifindex for this bcm_op */ 1320 1321 if (!ifindex && msg->msg_name) { 1322 /* no bound device as default => check msg_name */ 1323 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 1324 1325 if (msg->msg_namelen < BCM_MIN_NAMELEN) 1326 return -EINVAL; 1327 1328 if (addr->can_family != AF_CAN) 1329 return -EINVAL; 1330 1331 /* ifindex from sendto() */ 1332 ifindex = addr->can_ifindex; 1333 1334 if (ifindex) { 1335 struct net_device *dev; 1336 1337 dev = dev_get_by_index(sock_net(sk), ifindex); 1338 if (!dev) 1339 return -ENODEV; 1340 1341 if (dev->type != ARPHRD_CAN) { 1342 dev_put(dev); 1343 return -ENODEV; 1344 } 1345 1346 dev_put(dev); 1347 } 1348 } 1349 1350 lock_sock(sk); 1351 1352 switch (msg_head.opcode) { 1353 1354 case TX_SETUP: 1355 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1356 break; 1357 1358 case RX_SETUP: 1359 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1360 break; 1361 1362 case TX_DELETE: 1363 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex)) 1364 ret = MHSIZ; 1365 else 1366 ret = -EINVAL; 1367 break; 1368 1369 case RX_DELETE: 1370 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex)) 1371 ret = MHSIZ; 1372 else 1373 ret = -EINVAL; 1374 break; 1375 1376 case TX_READ: 1377 /* reuse msg_head for the reply to TX_READ */ 1378 msg_head.opcode = TX_STATUS; 1379 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1380 break; 1381 1382 case RX_READ: 1383 /* reuse msg_head for the reply to RX_READ */ 1384 msg_head.opcode = RX_STATUS; 1385 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1386 break; 1387 1388 case TX_SEND: 1389 /* we need exactly one CAN frame behind the msg head */ 1390 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ)) 1391 ret = -EINVAL; 1392 else 1393 ret = bcm_tx_send(msg, ifindex, sk, cfsiz); 1394 break; 1395 1396 default: 1397 ret = -EINVAL; 1398 break; 1399 } 1400 1401 release_sock(sk); 1402 1403 return ret; 1404 } 1405 1406 /* 1407 * notification handler for netdevice status changes 1408 */ 1409 static void bcm_notify(struct bcm_sock *bo, unsigned long msg, 1410 struct net_device *dev) 1411 { 1412 struct sock *sk = &bo->sk; 1413 struct bcm_op *op; 1414 int notify_enodev = 0; 1415 1416 if (!net_eq(dev_net(dev), sock_net(sk))) 1417 return; 1418 1419 switch (msg) { 1420 1421 case NETDEV_UNREGISTER: 1422 lock_sock(sk); 1423 1424 /* remove device specific receive entries */ 1425 list_for_each_entry(op, &bo->rx_ops, list) 1426 if (op->rx_reg_dev == dev) 1427 bcm_rx_unreg(dev, op); 1428 1429 /* remove device reference, if this is our bound device */ 1430 if (bo->bound && bo->ifindex == dev->ifindex) { 1431 #if IS_ENABLED(CONFIG_PROC_FS) 1432 if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) 1433 remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir); 1434 #endif 1435 bo->bound = 0; 1436 bo->ifindex = 0; 1437 notify_enodev = 1; 1438 } 1439 1440 release_sock(sk); 1441 1442 if (notify_enodev) { 1443 sk->sk_err = ENODEV; 1444 if (!sock_flag(sk, SOCK_DEAD)) 1445 sk_error_report(sk); 1446 } 1447 break; 1448 1449 case NETDEV_DOWN: 1450 if (bo->bound && bo->ifindex == dev->ifindex) { 1451 sk->sk_err = ENETDOWN; 1452 if (!sock_flag(sk, SOCK_DEAD)) 1453 sk_error_report(sk); 1454 } 1455 } 1456 } 1457 1458 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1459 void *ptr) 1460 { 1461 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1462 1463 if (dev->type != ARPHRD_CAN) 1464 return NOTIFY_DONE; 1465 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN) 1466 return NOTIFY_DONE; 1467 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */ 1468 return NOTIFY_DONE; 1469 1470 spin_lock(&bcm_notifier_lock); 1471 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) { 1472 spin_unlock(&bcm_notifier_lock); 1473 bcm_notify(bcm_busy_notifier, msg, dev); 1474 spin_lock(&bcm_notifier_lock); 1475 } 1476 bcm_busy_notifier = NULL; 1477 spin_unlock(&bcm_notifier_lock); 1478 return NOTIFY_DONE; 1479 } 1480 1481 /* 1482 * initial settings for all BCM sockets to be set at socket creation time 1483 */ 1484 static int bcm_init(struct sock *sk) 1485 { 1486 struct bcm_sock *bo = bcm_sk(sk); 1487 1488 bo->bound = 0; 1489 bo->ifindex = 0; 1490 bo->dropped_usr_msgs = 0; 1491 bo->bcm_proc_read = NULL; 1492 1493 INIT_LIST_HEAD(&bo->tx_ops); 1494 INIT_LIST_HEAD(&bo->rx_ops); 1495 1496 /* set notifier */ 1497 spin_lock(&bcm_notifier_lock); 1498 list_add_tail(&bo->notifier, &bcm_notifier_list); 1499 spin_unlock(&bcm_notifier_lock); 1500 1501 return 0; 1502 } 1503 1504 /* 1505 * standard socket functions 1506 */ 1507 static int bcm_release(struct socket *sock) 1508 { 1509 struct sock *sk = sock->sk; 1510 struct net *net; 1511 struct bcm_sock *bo; 1512 struct bcm_op *op, *next; 1513 1514 if (!sk) 1515 return 0; 1516 1517 net = sock_net(sk); 1518 bo = bcm_sk(sk); 1519 1520 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1521 1522 spin_lock(&bcm_notifier_lock); 1523 while (bcm_busy_notifier == bo) { 1524 spin_unlock(&bcm_notifier_lock); 1525 schedule_timeout_uninterruptible(1); 1526 spin_lock(&bcm_notifier_lock); 1527 } 1528 list_del(&bo->notifier); 1529 spin_unlock(&bcm_notifier_lock); 1530 1531 lock_sock(sk); 1532 1533 #if IS_ENABLED(CONFIG_PROC_FS) 1534 /* remove procfs entry */ 1535 if (net->can.bcmproc_dir && bo->bcm_proc_read) 1536 remove_proc_entry(bo->procname, net->can.bcmproc_dir); 1537 #endif /* CONFIG_PROC_FS */ 1538 1539 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1540 bcm_remove_op(op); 1541 1542 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1543 /* 1544 * Don't care if we're bound or not (due to netdev problems) 1545 * can_rx_unregister() is always a save thing to do here. 1546 */ 1547 if (op->ifindex) { 1548 /* 1549 * Only remove subscriptions that had not 1550 * been removed due to NETDEV_UNREGISTER 1551 * in bcm_notifier() 1552 */ 1553 if (op->rx_reg_dev) { 1554 struct net_device *dev; 1555 1556 dev = dev_get_by_index(net, op->ifindex); 1557 if (dev) { 1558 bcm_rx_unreg(dev, op); 1559 dev_put(dev); 1560 } 1561 } 1562 } else 1563 can_rx_unregister(net, NULL, op->can_id, 1564 REGMASK(op->can_id), 1565 bcm_rx_handler, op); 1566 1567 } 1568 1569 synchronize_rcu(); 1570 1571 list_for_each_entry_safe(op, next, &bo->rx_ops, list) 1572 bcm_remove_op(op); 1573 1574 /* remove device reference */ 1575 if (bo->bound) { 1576 bo->bound = 0; 1577 bo->ifindex = 0; 1578 } 1579 1580 sock_orphan(sk); 1581 sock->sk = NULL; 1582 1583 release_sock(sk); 1584 sock_put(sk); 1585 1586 return 0; 1587 } 1588 1589 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, 1590 int flags) 1591 { 1592 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1593 struct sock *sk = sock->sk; 1594 struct bcm_sock *bo = bcm_sk(sk); 1595 struct net *net = sock_net(sk); 1596 int ret = 0; 1597 1598 if (len < BCM_MIN_NAMELEN) 1599 return -EINVAL; 1600 1601 lock_sock(sk); 1602 1603 if (bo->bound) { 1604 ret = -EISCONN; 1605 goto fail; 1606 } 1607 1608 /* bind a device to this socket */ 1609 if (addr->can_ifindex) { 1610 struct net_device *dev; 1611 1612 dev = dev_get_by_index(net, addr->can_ifindex); 1613 if (!dev) { 1614 ret = -ENODEV; 1615 goto fail; 1616 } 1617 if (dev->type != ARPHRD_CAN) { 1618 dev_put(dev); 1619 ret = -ENODEV; 1620 goto fail; 1621 } 1622 1623 bo->ifindex = dev->ifindex; 1624 dev_put(dev); 1625 1626 } else { 1627 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1628 bo->ifindex = 0; 1629 } 1630 1631 #if IS_ENABLED(CONFIG_PROC_FS) 1632 if (net->can.bcmproc_dir) { 1633 /* unique socket address as filename */ 1634 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1635 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644, 1636 net->can.bcmproc_dir, 1637 bcm_proc_show, sk); 1638 if (!bo->bcm_proc_read) { 1639 ret = -ENOMEM; 1640 goto fail; 1641 } 1642 } 1643 #endif /* CONFIG_PROC_FS */ 1644 1645 bo->bound = 1; 1646 1647 fail: 1648 release_sock(sk); 1649 1650 return ret; 1651 } 1652 1653 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1654 int flags) 1655 { 1656 struct sock *sk = sock->sk; 1657 struct sk_buff *skb; 1658 int error = 0; 1659 int err; 1660 1661 skb = skb_recv_datagram(sk, flags, &error); 1662 if (!skb) 1663 return error; 1664 1665 if (skb->len < size) 1666 size = skb->len; 1667 1668 err = memcpy_to_msg(msg, skb->data, size); 1669 if (err < 0) { 1670 skb_free_datagram(sk, skb); 1671 return err; 1672 } 1673 1674 sock_recv_cmsgs(msg, sk, skb); 1675 1676 if (msg->msg_name) { 1677 __sockaddr_check_size(BCM_MIN_NAMELEN); 1678 msg->msg_namelen = BCM_MIN_NAMELEN; 1679 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1680 } 1681 1682 skb_free_datagram(sk, skb); 1683 1684 return size; 1685 } 1686 1687 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd, 1688 unsigned long arg) 1689 { 1690 /* no ioctls for socket layer -> hand it down to NIC layer */ 1691 return -ENOIOCTLCMD; 1692 } 1693 1694 static const struct proto_ops bcm_ops = { 1695 .family = PF_CAN, 1696 .release = bcm_release, 1697 .bind = sock_no_bind, 1698 .connect = bcm_connect, 1699 .socketpair = sock_no_socketpair, 1700 .accept = sock_no_accept, 1701 .getname = sock_no_getname, 1702 .poll = datagram_poll, 1703 .ioctl = bcm_sock_no_ioctlcmd, 1704 .gettstamp = sock_gettstamp, 1705 .listen = sock_no_listen, 1706 .shutdown = sock_no_shutdown, 1707 .sendmsg = bcm_sendmsg, 1708 .recvmsg = bcm_recvmsg, 1709 .mmap = sock_no_mmap, 1710 }; 1711 1712 static struct proto bcm_proto __read_mostly = { 1713 .name = "CAN_BCM", 1714 .owner = THIS_MODULE, 1715 .obj_size = sizeof(struct bcm_sock), 1716 .init = bcm_init, 1717 }; 1718 1719 static const struct can_proto bcm_can_proto = { 1720 .type = SOCK_DGRAM, 1721 .protocol = CAN_BCM, 1722 .ops = &bcm_ops, 1723 .prot = &bcm_proto, 1724 }; 1725 1726 static int canbcm_pernet_init(struct net *net) 1727 { 1728 #if IS_ENABLED(CONFIG_PROC_FS) 1729 /* create /proc/net/can-bcm directory */ 1730 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net); 1731 #endif /* CONFIG_PROC_FS */ 1732 1733 return 0; 1734 } 1735 1736 static void canbcm_pernet_exit(struct net *net) 1737 { 1738 #if IS_ENABLED(CONFIG_PROC_FS) 1739 /* remove /proc/net/can-bcm directory */ 1740 if (net->can.bcmproc_dir) 1741 remove_proc_entry("can-bcm", net->proc_net); 1742 #endif /* CONFIG_PROC_FS */ 1743 } 1744 1745 static struct pernet_operations canbcm_pernet_ops __read_mostly = { 1746 .init = canbcm_pernet_init, 1747 .exit = canbcm_pernet_exit, 1748 }; 1749 1750 static struct notifier_block canbcm_notifier = { 1751 .notifier_call = bcm_notifier 1752 }; 1753 1754 static int __init bcm_module_init(void) 1755 { 1756 int err; 1757 1758 pr_info("can: broadcast manager protocol\n"); 1759 1760 err = register_pernet_subsys(&canbcm_pernet_ops); 1761 if (err) 1762 return err; 1763 1764 err = register_netdevice_notifier(&canbcm_notifier); 1765 if (err) 1766 goto register_notifier_failed; 1767 1768 err = can_proto_register(&bcm_can_proto); 1769 if (err < 0) { 1770 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1771 goto register_proto_failed; 1772 } 1773 1774 return 0; 1775 1776 register_proto_failed: 1777 unregister_netdevice_notifier(&canbcm_notifier); 1778 register_notifier_failed: 1779 unregister_pernet_subsys(&canbcm_pernet_ops); 1780 return err; 1781 } 1782 1783 static void __exit bcm_module_exit(void) 1784 { 1785 can_proto_unregister(&bcm_can_proto); 1786 unregister_netdevice_notifier(&canbcm_notifier); 1787 unregister_pernet_subsys(&canbcm_pernet_ops); 1788 } 1789 1790 module_init(bcm_module_init); 1791 module_exit(bcm_module_exit); 1792