1 /* 2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 */ 41 42 #include <linux/module.h> 43 #include <linux/init.h> 44 #include <linux/interrupt.h> 45 #include <linux/hrtimer.h> 46 #include <linux/list.h> 47 #include <linux/proc_fs.h> 48 #include <linux/seq_file.h> 49 #include <linux/uio.h> 50 #include <linux/net.h> 51 #include <linux/netdevice.h> 52 #include <linux/socket.h> 53 #include <linux/if_arp.h> 54 #include <linux/skbuff.h> 55 #include <linux/can.h> 56 #include <linux/can/core.h> 57 #include <linux/can/skb.h> 58 #include <linux/can/bcm.h> 59 #include <linux/slab.h> 60 #include <net/sock.h> 61 #include <net/net_namespace.h> 62 63 /* 64 * To send multiple CAN frame content within TX_SETUP or to filter 65 * CAN messages with multiplex index within RX_SETUP, the number of 66 * different filters is limited to 256 due to the one byte index value. 67 */ 68 #define MAX_NFRAMES 256 69 70 /* use of last_frames[index].can_dlc */ 71 #define RX_RECV 0x40 /* received data for this element */ 72 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 73 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 74 75 /* get best masking value for can_rx_register() for a given single can_id */ 76 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 79 80 #define CAN_BCM_VERSION CAN_VERSION 81 static __initconst const char banner[] = KERN_INFO 82 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; 83 84 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 85 MODULE_LICENSE("Dual BSD/GPL"); 86 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 87 MODULE_ALIAS("can-proto-2"); 88 89 /* easy access to can_frame payload */ 90 static inline u64 GET_U64(const struct can_frame *cp) 91 { 92 return *(u64 *)cp->data; 93 } 94 95 struct bcm_op { 96 struct list_head list; 97 int ifindex; 98 canid_t can_id; 99 u32 flags; 100 unsigned long frames_abs, frames_filtered; 101 struct timeval ival1, ival2; 102 struct hrtimer timer, thrtimer; 103 struct tasklet_struct tsklet, thrtsklet; 104 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 105 int rx_ifindex; 106 u32 count; 107 u32 nframes; 108 u32 currframe; 109 struct can_frame *frames; 110 struct can_frame *last_frames; 111 struct can_frame sframe; 112 struct can_frame last_sframe; 113 struct sock *sk; 114 struct net_device *rx_reg_dev; 115 }; 116 117 static struct proc_dir_entry *proc_dir; 118 119 struct bcm_sock { 120 struct sock sk; 121 int bound; 122 int ifindex; 123 struct notifier_block notifier; 124 struct list_head rx_ops; 125 struct list_head tx_ops; 126 unsigned long dropped_usr_msgs; 127 struct proc_dir_entry *bcm_proc_read; 128 char procname [32]; /* inode number in decimal with \0 */ 129 }; 130 131 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 132 { 133 return (struct bcm_sock *)sk; 134 } 135 136 #define CFSIZ sizeof(struct can_frame) 137 #define OPSIZ sizeof(struct bcm_op) 138 #define MHSIZ sizeof(struct bcm_msg_head) 139 140 /* 141 * procfs functions 142 */ 143 static char *bcm_proc_getifname(char *result, int ifindex) 144 { 145 struct net_device *dev; 146 147 if (!ifindex) 148 return "any"; 149 150 rcu_read_lock(); 151 dev = dev_get_by_index_rcu(&init_net, ifindex); 152 if (dev) 153 strcpy(result, dev->name); 154 else 155 strcpy(result, "???"); 156 rcu_read_unlock(); 157 158 return result; 159 } 160 161 static int bcm_proc_show(struct seq_file *m, void *v) 162 { 163 char ifname[IFNAMSIZ]; 164 struct sock *sk = (struct sock *)m->private; 165 struct bcm_sock *bo = bcm_sk(sk); 166 struct bcm_op *op; 167 168 seq_printf(m, ">>> socket %pK", sk->sk_socket); 169 seq_printf(m, " / sk %pK", sk); 170 seq_printf(m, " / bo %pK", bo); 171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); 173 seq_printf(m, " <<<\n"); 174 175 list_for_each_entry(op, &bo->rx_ops, list) { 176 177 unsigned long reduction; 178 179 /* print only active entries & prevent division by zero */ 180 if (!op->frames_abs) 181 continue; 182 183 seq_printf(m, "rx_op: %03X %-5s ", 184 op->can_id, bcm_proc_getifname(ifname, op->ifindex)); 185 seq_printf(m, "[%u]%c ", op->nframes, 186 (op->flags & RX_CHECK_DLC)?'d':' '); 187 if (op->kt_ival1.tv64) 188 seq_printf(m, "timeo=%lld ", 189 (long long) 190 ktime_to_us(op->kt_ival1)); 191 192 if (op->kt_ival2.tv64) 193 seq_printf(m, "thr=%lld ", 194 (long long) 195 ktime_to_us(op->kt_ival2)); 196 197 seq_printf(m, "# recv %ld (%ld) => reduction: ", 198 op->frames_filtered, op->frames_abs); 199 200 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 201 202 seq_printf(m, "%s%ld%%\n", 203 (reduction == 100)?"near ":"", reduction); 204 } 205 206 list_for_each_entry(op, &bo->tx_ops, list) { 207 208 seq_printf(m, "tx_op: %03X %s [%u] ", 209 op->can_id, 210 bcm_proc_getifname(ifname, op->ifindex), 211 op->nframes); 212 213 if (op->kt_ival1.tv64) 214 seq_printf(m, "t1=%lld ", 215 (long long) ktime_to_us(op->kt_ival1)); 216 217 if (op->kt_ival2.tv64) 218 seq_printf(m, "t2=%lld ", 219 (long long) ktime_to_us(op->kt_ival2)); 220 221 seq_printf(m, "# sent %ld\n", op->frames_abs); 222 } 223 seq_putc(m, '\n'); 224 return 0; 225 } 226 227 static int bcm_proc_open(struct inode *inode, struct file *file) 228 { 229 return single_open(file, bcm_proc_show, PDE_DATA(inode)); 230 } 231 232 static const struct file_operations bcm_proc_fops = { 233 .owner = THIS_MODULE, 234 .open = bcm_proc_open, 235 .read = seq_read, 236 .llseek = seq_lseek, 237 .release = single_release, 238 }; 239 240 /* 241 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 242 * of the given bcm tx op 243 */ 244 static void bcm_can_tx(struct bcm_op *op) 245 { 246 struct sk_buff *skb; 247 struct net_device *dev; 248 struct can_frame *cf = &op->frames[op->currframe]; 249 250 /* no target device? => exit */ 251 if (!op->ifindex) 252 return; 253 254 dev = dev_get_by_index(&init_net, op->ifindex); 255 if (!dev) { 256 /* RFC: should this bcm_op remove itself here? */ 257 return; 258 } 259 260 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any()); 261 if (!skb) 262 goto out; 263 264 can_skb_reserve(skb); 265 can_skb_prv(skb)->ifindex = dev->ifindex; 266 267 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 268 269 /* send with loopback */ 270 skb->dev = dev; 271 can_skb_set_owner(skb, op->sk); 272 can_send(skb, 1); 273 274 /* update statistics */ 275 op->currframe++; 276 op->frames_abs++; 277 278 /* reached last frame? */ 279 if (op->currframe >= op->nframes) 280 op->currframe = 0; 281 out: 282 dev_put(dev); 283 } 284 285 /* 286 * bcm_send_to_user - send a BCM message to the userspace 287 * (consisting of bcm_msg_head + x CAN frames) 288 */ 289 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 290 struct can_frame *frames, int has_timestamp) 291 { 292 struct sk_buff *skb; 293 struct can_frame *firstframe; 294 struct sockaddr_can *addr; 295 struct sock *sk = op->sk; 296 unsigned int datalen = head->nframes * CFSIZ; 297 int err; 298 299 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 300 if (!skb) 301 return; 302 303 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head)); 304 305 if (head->nframes) { 306 /* can_frames starting here */ 307 firstframe = (struct can_frame *)skb_tail_pointer(skb); 308 309 memcpy(skb_put(skb, datalen), frames, datalen); 310 311 /* 312 * the BCM uses the can_dlc-element of the can_frame 313 * structure for internal purposes. This is only 314 * relevant for updates that are generated by the 315 * BCM, where nframes is 1 316 */ 317 if (head->nframes == 1) 318 firstframe->can_dlc &= BCM_CAN_DLC_MASK; 319 } 320 321 if (has_timestamp) { 322 /* restore rx timestamp */ 323 skb->tstamp = op->rx_stamp; 324 } 325 326 /* 327 * Put the datagram to the queue so that bcm_recvmsg() can 328 * get it from there. We need to pass the interface index to 329 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 330 * containing the interface index. 331 */ 332 333 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); 334 addr = (struct sockaddr_can *)skb->cb; 335 memset(addr, 0, sizeof(*addr)); 336 addr->can_family = AF_CAN; 337 addr->can_ifindex = op->rx_ifindex; 338 339 err = sock_queue_rcv_skb(sk, skb); 340 if (err < 0) { 341 struct bcm_sock *bo = bcm_sk(sk); 342 343 kfree_skb(skb); 344 /* don't care about overflows in this statistic */ 345 bo->dropped_usr_msgs++; 346 } 347 } 348 349 static void bcm_tx_start_timer(struct bcm_op *op) 350 { 351 if (op->kt_ival1.tv64 && op->count) 352 hrtimer_start(&op->timer, 353 ktime_add(ktime_get(), op->kt_ival1), 354 HRTIMER_MODE_ABS); 355 else if (op->kt_ival2.tv64) 356 hrtimer_start(&op->timer, 357 ktime_add(ktime_get(), op->kt_ival2), 358 HRTIMER_MODE_ABS); 359 } 360 361 static void bcm_tx_timeout_tsklet(unsigned long data) 362 { 363 struct bcm_op *op = (struct bcm_op *)data; 364 struct bcm_msg_head msg_head; 365 366 if (op->kt_ival1.tv64 && (op->count > 0)) { 367 368 op->count--; 369 if (!op->count && (op->flags & TX_COUNTEVT)) { 370 371 /* create notification to user */ 372 msg_head.opcode = TX_EXPIRED; 373 msg_head.flags = op->flags; 374 msg_head.count = op->count; 375 msg_head.ival1 = op->ival1; 376 msg_head.ival2 = op->ival2; 377 msg_head.can_id = op->can_id; 378 msg_head.nframes = 0; 379 380 bcm_send_to_user(op, &msg_head, NULL, 0); 381 } 382 bcm_can_tx(op); 383 384 } else if (op->kt_ival2.tv64) 385 bcm_can_tx(op); 386 387 bcm_tx_start_timer(op); 388 } 389 390 /* 391 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions 392 */ 393 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 394 { 395 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 396 397 tasklet_schedule(&op->tsklet); 398 399 return HRTIMER_NORESTART; 400 } 401 402 /* 403 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 404 */ 405 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) 406 { 407 struct bcm_msg_head head; 408 409 /* update statistics */ 410 op->frames_filtered++; 411 412 /* prevent statistics overflow */ 413 if (op->frames_filtered > ULONG_MAX/100) 414 op->frames_filtered = op->frames_abs = 0; 415 416 /* this element is not throttled anymore */ 417 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); 418 419 head.opcode = RX_CHANGED; 420 head.flags = op->flags; 421 head.count = op->count; 422 head.ival1 = op->ival1; 423 head.ival2 = op->ival2; 424 head.can_id = op->can_id; 425 head.nframes = 1; 426 427 bcm_send_to_user(op, &head, data, 1); 428 } 429 430 /* 431 * bcm_rx_update_and_send - process a detected relevant receive content change 432 * 1. update the last received data 433 * 2. send a notification to the user (if possible) 434 */ 435 static void bcm_rx_update_and_send(struct bcm_op *op, 436 struct can_frame *lastdata, 437 const struct can_frame *rxdata) 438 { 439 memcpy(lastdata, rxdata, CFSIZ); 440 441 /* mark as used and throttled by default */ 442 lastdata->can_dlc |= (RX_RECV|RX_THR); 443 444 /* throtteling mode inactive ? */ 445 if (!op->kt_ival2.tv64) { 446 /* send RX_CHANGED to the user immediately */ 447 bcm_rx_changed(op, lastdata); 448 return; 449 } 450 451 /* with active throttling timer we are just done here */ 452 if (hrtimer_active(&op->thrtimer)) 453 return; 454 455 /* first receiption with enabled throttling mode */ 456 if (!op->kt_lastmsg.tv64) 457 goto rx_changed_settime; 458 459 /* got a second frame inside a potential throttle period? */ 460 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 461 ktime_to_us(op->kt_ival2)) { 462 /* do not send the saved data - only start throttle timer */ 463 hrtimer_start(&op->thrtimer, 464 ktime_add(op->kt_lastmsg, op->kt_ival2), 465 HRTIMER_MODE_ABS); 466 return; 467 } 468 469 /* the gap was that big, that throttling was not needed here */ 470 rx_changed_settime: 471 bcm_rx_changed(op, lastdata); 472 op->kt_lastmsg = ktime_get(); 473 } 474 475 /* 476 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 477 * received data stored in op->last_frames[] 478 */ 479 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, 480 const struct can_frame *rxdata) 481 { 482 /* 483 * no one uses the MSBs of can_dlc for comparation, 484 * so we use it here to detect the first time of reception 485 */ 486 487 if (!(op->last_frames[index].can_dlc & RX_RECV)) { 488 /* received data for the first time => send update to user */ 489 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 490 return; 491 } 492 493 /* do a real check in can_frame data section */ 494 495 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) != 496 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) { 497 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 498 return; 499 } 500 501 if (op->flags & RX_CHECK_DLC) { 502 /* do a real check in can_frame dlc */ 503 if (rxdata->can_dlc != (op->last_frames[index].can_dlc & 504 BCM_CAN_DLC_MASK)) { 505 bcm_rx_update_and_send(op, &op->last_frames[index], 506 rxdata); 507 return; 508 } 509 } 510 } 511 512 /* 513 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption 514 */ 515 static void bcm_rx_starttimer(struct bcm_op *op) 516 { 517 if (op->flags & RX_NO_AUTOTIMER) 518 return; 519 520 if (op->kt_ival1.tv64) 521 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 522 } 523 524 static void bcm_rx_timeout_tsklet(unsigned long data) 525 { 526 struct bcm_op *op = (struct bcm_op *)data; 527 struct bcm_msg_head msg_head; 528 529 /* create notification to user */ 530 msg_head.opcode = RX_TIMEOUT; 531 msg_head.flags = op->flags; 532 msg_head.count = op->count; 533 msg_head.ival1 = op->ival1; 534 msg_head.ival2 = op->ival2; 535 msg_head.can_id = op->can_id; 536 msg_head.nframes = 0; 537 538 bcm_send_to_user(op, &msg_head, NULL, 0); 539 } 540 541 /* 542 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out 543 */ 544 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 545 { 546 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 547 548 /* schedule before NET_RX_SOFTIRQ */ 549 tasklet_hi_schedule(&op->tsklet); 550 551 /* no restart of the timer is done here! */ 552 553 /* if user wants to be informed, when cyclic CAN-Messages come back */ 554 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 555 /* clear received can_frames to indicate 'nothing received' */ 556 memset(op->last_frames, 0, op->nframes * CFSIZ); 557 } 558 559 return HRTIMER_NORESTART; 560 } 561 562 /* 563 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 564 */ 565 static inline int bcm_rx_do_flush(struct bcm_op *op, int update, 566 unsigned int index) 567 { 568 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { 569 if (update) 570 bcm_rx_changed(op, &op->last_frames[index]); 571 return 1; 572 } 573 return 0; 574 } 575 576 /* 577 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 578 * 579 * update == 0 : just check if throttled data is available (any irq context) 580 * update == 1 : check and send throttled data to userspace (soft_irq context) 581 */ 582 static int bcm_rx_thr_flush(struct bcm_op *op, int update) 583 { 584 int updated = 0; 585 586 if (op->nframes > 1) { 587 unsigned int i; 588 589 /* for MUX filter we start at index 1 */ 590 for (i = 1; i < op->nframes; i++) 591 updated += bcm_rx_do_flush(op, update, i); 592 593 } else { 594 /* for RX_FILTER_ID and simple filter */ 595 updated += bcm_rx_do_flush(op, update, 0); 596 } 597 598 return updated; 599 } 600 601 static void bcm_rx_thr_tsklet(unsigned long data) 602 { 603 struct bcm_op *op = (struct bcm_op *)data; 604 605 /* push the changed data to the userspace */ 606 bcm_rx_thr_flush(op, 1); 607 } 608 609 /* 610 * bcm_rx_thr_handler - the time for blocked content updates is over now: 611 * Check for throttled data and send it to the userspace 612 */ 613 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 614 { 615 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 616 617 tasklet_schedule(&op->thrtsklet); 618 619 if (bcm_rx_thr_flush(op, 0)) { 620 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 621 return HRTIMER_RESTART; 622 } else { 623 /* rearm throttle handling */ 624 op->kt_lastmsg = ktime_set(0, 0); 625 return HRTIMER_NORESTART; 626 } 627 } 628 629 /* 630 * bcm_rx_handler - handle a CAN frame receiption 631 */ 632 static void bcm_rx_handler(struct sk_buff *skb, void *data) 633 { 634 struct bcm_op *op = (struct bcm_op *)data; 635 const struct can_frame *rxframe = (struct can_frame *)skb->data; 636 unsigned int i; 637 638 /* disable timeout */ 639 hrtimer_cancel(&op->timer); 640 641 if (op->can_id != rxframe->can_id) 642 return; 643 644 /* save rx timestamp */ 645 op->rx_stamp = skb->tstamp; 646 /* save originator for recvfrom() */ 647 op->rx_ifindex = skb->dev->ifindex; 648 /* update statistics */ 649 op->frames_abs++; 650 651 if (op->flags & RX_RTR_FRAME) { 652 /* send reply for RTR-request (placed in op->frames[0]) */ 653 bcm_can_tx(op); 654 return; 655 } 656 657 if (op->flags & RX_FILTER_ID) { 658 /* the easiest case */ 659 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 660 goto rx_starttimer; 661 } 662 663 if (op->nframes == 1) { 664 /* simple compare with index 0 */ 665 bcm_rx_cmp_to_index(op, 0, rxframe); 666 goto rx_starttimer; 667 } 668 669 if (op->nframes > 1) { 670 /* 671 * multiplex compare 672 * 673 * find the first multiplex mask that fits. 674 * Remark: The MUX-mask is stored in index 0 675 */ 676 677 for (i = 1; i < op->nframes; i++) { 678 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == 679 (GET_U64(&op->frames[0]) & 680 GET_U64(&op->frames[i]))) { 681 bcm_rx_cmp_to_index(op, i, rxframe); 682 break; 683 } 684 } 685 } 686 687 rx_starttimer: 688 bcm_rx_starttimer(op); 689 } 690 691 /* 692 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 693 */ 694 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, 695 int ifindex) 696 { 697 struct bcm_op *op; 698 699 list_for_each_entry(op, ops, list) { 700 if ((op->can_id == can_id) && (op->ifindex == ifindex)) 701 return op; 702 } 703 704 return NULL; 705 } 706 707 static void bcm_remove_op(struct bcm_op *op) 708 { 709 hrtimer_cancel(&op->timer); 710 hrtimer_cancel(&op->thrtimer); 711 712 if (op->tsklet.func) 713 tasklet_kill(&op->tsklet); 714 715 if (op->thrtsklet.func) 716 tasklet_kill(&op->thrtsklet); 717 718 if ((op->frames) && (op->frames != &op->sframe)) 719 kfree(op->frames); 720 721 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 722 kfree(op->last_frames); 723 724 kfree(op); 725 } 726 727 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 728 { 729 if (op->rx_reg_dev == dev) { 730 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id), 731 bcm_rx_handler, op); 732 733 /* mark as removed subscription */ 734 op->rx_reg_dev = NULL; 735 } else 736 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 737 "mismatch %p %p\n", op->rx_reg_dev, dev); 738 } 739 740 /* 741 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 742 */ 743 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) 744 { 745 struct bcm_op *op, *n; 746 747 list_for_each_entry_safe(op, n, ops, list) { 748 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 749 750 /* 751 * Don't care if we're bound or not (due to netdev 752 * problems) can_rx_unregister() is always a save 753 * thing to do here. 754 */ 755 if (op->ifindex) { 756 /* 757 * Only remove subscriptions that had not 758 * been removed due to NETDEV_UNREGISTER 759 * in bcm_notifier() 760 */ 761 if (op->rx_reg_dev) { 762 struct net_device *dev; 763 764 dev = dev_get_by_index(&init_net, 765 op->ifindex); 766 if (dev) { 767 bcm_rx_unreg(dev, op); 768 dev_put(dev); 769 } 770 } 771 } else 772 can_rx_unregister(NULL, op->can_id, 773 REGMASK(op->can_id), 774 bcm_rx_handler, op); 775 776 list_del(&op->list); 777 bcm_remove_op(op); 778 return 1; /* done */ 779 } 780 } 781 782 return 0; /* not found */ 783 } 784 785 /* 786 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 787 */ 788 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex) 789 { 790 struct bcm_op *op, *n; 791 792 list_for_each_entry_safe(op, n, ops, list) { 793 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 794 list_del(&op->list); 795 bcm_remove_op(op); 796 return 1; /* done */ 797 } 798 } 799 800 return 0; /* not found */ 801 } 802 803 /* 804 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 805 */ 806 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 807 int ifindex) 808 { 809 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex); 810 811 if (!op) 812 return -EINVAL; 813 814 /* put current values into msg_head */ 815 msg_head->flags = op->flags; 816 msg_head->count = op->count; 817 msg_head->ival1 = op->ival1; 818 msg_head->ival2 = op->ival2; 819 msg_head->nframes = op->nframes; 820 821 bcm_send_to_user(op, msg_head, op->frames, 0); 822 823 return MHSIZ; 824 } 825 826 /* 827 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 828 */ 829 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 830 int ifindex, struct sock *sk) 831 { 832 struct bcm_sock *bo = bcm_sk(sk); 833 struct bcm_op *op; 834 unsigned int i; 835 int err; 836 837 /* we need a real device to send frames */ 838 if (!ifindex) 839 return -ENODEV; 840 841 /* check nframes boundaries - we need at least one can_frame */ 842 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 843 return -EINVAL; 844 845 /* check the given can_id */ 846 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); 847 848 if (op) { 849 /* update existing BCM operation */ 850 851 /* 852 * Do we need more space for the can_frames than currently 853 * allocated? -> This is a _really_ unusual use-case and 854 * therefore (complexity / locking) it is not supported. 855 */ 856 if (msg_head->nframes > op->nframes) 857 return -E2BIG; 858 859 /* update can_frames content */ 860 for (i = 0; i < msg_head->nframes; i++) { 861 err = memcpy_fromiovec((u8 *)&op->frames[i], 862 msg->msg_iov, CFSIZ); 863 864 if (op->frames[i].can_dlc > 8) 865 err = -EINVAL; 866 867 if (err < 0) 868 return err; 869 870 if (msg_head->flags & TX_CP_CAN_ID) { 871 /* copy can_id into frame */ 872 op->frames[i].can_id = msg_head->can_id; 873 } 874 } 875 876 } else { 877 /* insert new BCM operation for the given can_id */ 878 879 op = kzalloc(OPSIZ, GFP_KERNEL); 880 if (!op) 881 return -ENOMEM; 882 883 op->can_id = msg_head->can_id; 884 885 /* create array for can_frames and copy the data */ 886 if (msg_head->nframes > 1) { 887 op->frames = kmalloc(msg_head->nframes * CFSIZ, 888 GFP_KERNEL); 889 if (!op->frames) { 890 kfree(op); 891 return -ENOMEM; 892 } 893 } else 894 op->frames = &op->sframe; 895 896 for (i = 0; i < msg_head->nframes; i++) { 897 err = memcpy_fromiovec((u8 *)&op->frames[i], 898 msg->msg_iov, CFSIZ); 899 900 if (op->frames[i].can_dlc > 8) 901 err = -EINVAL; 902 903 if (err < 0) { 904 if (op->frames != &op->sframe) 905 kfree(op->frames); 906 kfree(op); 907 return err; 908 } 909 910 if (msg_head->flags & TX_CP_CAN_ID) { 911 /* copy can_id into frame */ 912 op->frames[i].can_id = msg_head->can_id; 913 } 914 } 915 916 /* tx_ops never compare with previous received messages */ 917 op->last_frames = NULL; 918 919 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 920 op->sk = sk; 921 op->ifindex = ifindex; 922 923 /* initialize uninitialized (kzalloc) structure */ 924 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 925 op->timer.function = bcm_tx_timeout_handler; 926 927 /* initialize tasklet for tx countevent notification */ 928 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, 929 (unsigned long) op); 930 931 /* currently unused in tx_ops */ 932 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 933 934 /* add this bcm_op to the list of the tx_ops */ 935 list_add(&op->list, &bo->tx_ops); 936 937 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 938 939 if (op->nframes != msg_head->nframes) { 940 op->nframes = msg_head->nframes; 941 /* start multiple frame transmission with index 0 */ 942 op->currframe = 0; 943 } 944 945 /* check flags */ 946 947 op->flags = msg_head->flags; 948 949 if (op->flags & TX_RESET_MULTI_IDX) { 950 /* start multiple frame transmission with index 0 */ 951 op->currframe = 0; 952 } 953 954 if (op->flags & SETTIMER) { 955 /* set timer values */ 956 op->count = msg_head->count; 957 op->ival1 = msg_head->ival1; 958 op->ival2 = msg_head->ival2; 959 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 960 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 961 962 /* disable an active timer due to zero values? */ 963 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 964 hrtimer_cancel(&op->timer); 965 } 966 967 if (op->flags & STARTTIMER) { 968 hrtimer_cancel(&op->timer); 969 /* spec: send can_frame when starting timer */ 970 op->flags |= TX_ANNOUNCE; 971 } 972 973 if (op->flags & TX_ANNOUNCE) { 974 bcm_can_tx(op); 975 if (op->count) 976 op->count--; 977 } 978 979 if (op->flags & STARTTIMER) 980 bcm_tx_start_timer(op); 981 982 return msg_head->nframes * CFSIZ + MHSIZ; 983 } 984 985 /* 986 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 987 */ 988 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 989 int ifindex, struct sock *sk) 990 { 991 struct bcm_sock *bo = bcm_sk(sk); 992 struct bcm_op *op; 993 int do_rx_register; 994 int err = 0; 995 996 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 997 /* be robust against wrong usage ... */ 998 msg_head->flags |= RX_FILTER_ID; 999 /* ignore trailing garbage */ 1000 msg_head->nframes = 0; 1001 } 1002 1003 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ 1004 if (msg_head->nframes > MAX_NFRAMES + 1) 1005 return -EINVAL; 1006 1007 if ((msg_head->flags & RX_RTR_FRAME) && 1008 ((msg_head->nframes != 1) || 1009 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1010 return -EINVAL; 1011 1012 /* check the given can_id */ 1013 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); 1014 if (op) { 1015 /* update existing BCM operation */ 1016 1017 /* 1018 * Do we need more space for the can_frames than currently 1019 * allocated? -> This is a _really_ unusual use-case and 1020 * therefore (complexity / locking) it is not supported. 1021 */ 1022 if (msg_head->nframes > op->nframes) 1023 return -E2BIG; 1024 1025 if (msg_head->nframes) { 1026 /* update can_frames content */ 1027 err = memcpy_fromiovec((u8 *)op->frames, 1028 msg->msg_iov, 1029 msg_head->nframes * CFSIZ); 1030 if (err < 0) 1031 return err; 1032 1033 /* clear last_frames to indicate 'nothing received' */ 1034 memset(op->last_frames, 0, msg_head->nframes * CFSIZ); 1035 } 1036 1037 op->nframes = msg_head->nframes; 1038 1039 /* Only an update -> do not call can_rx_register() */ 1040 do_rx_register = 0; 1041 1042 } else { 1043 /* insert new BCM operation for the given can_id */ 1044 op = kzalloc(OPSIZ, GFP_KERNEL); 1045 if (!op) 1046 return -ENOMEM; 1047 1048 op->can_id = msg_head->can_id; 1049 op->nframes = msg_head->nframes; 1050 1051 if (msg_head->nframes > 1) { 1052 /* create array for can_frames and copy the data */ 1053 op->frames = kmalloc(msg_head->nframes * CFSIZ, 1054 GFP_KERNEL); 1055 if (!op->frames) { 1056 kfree(op); 1057 return -ENOMEM; 1058 } 1059 1060 /* create and init array for received can_frames */ 1061 op->last_frames = kzalloc(msg_head->nframes * CFSIZ, 1062 GFP_KERNEL); 1063 if (!op->last_frames) { 1064 kfree(op->frames); 1065 kfree(op); 1066 return -ENOMEM; 1067 } 1068 1069 } else { 1070 op->frames = &op->sframe; 1071 op->last_frames = &op->last_sframe; 1072 } 1073 1074 if (msg_head->nframes) { 1075 err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov, 1076 msg_head->nframes * CFSIZ); 1077 if (err < 0) { 1078 if (op->frames != &op->sframe) 1079 kfree(op->frames); 1080 if (op->last_frames != &op->last_sframe) 1081 kfree(op->last_frames); 1082 kfree(op); 1083 return err; 1084 } 1085 } 1086 1087 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1088 op->sk = sk; 1089 op->ifindex = ifindex; 1090 1091 /* ifindex for timeout events w/o previous frame reception */ 1092 op->rx_ifindex = ifindex; 1093 1094 /* initialize uninitialized (kzalloc) structure */ 1095 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1096 op->timer.function = bcm_rx_timeout_handler; 1097 1098 /* initialize tasklet for rx timeout notification */ 1099 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, 1100 (unsigned long) op); 1101 1102 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1103 op->thrtimer.function = bcm_rx_thr_handler; 1104 1105 /* initialize tasklet for rx throttle handling */ 1106 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, 1107 (unsigned long) op); 1108 1109 /* add this bcm_op to the list of the rx_ops */ 1110 list_add(&op->list, &bo->rx_ops); 1111 1112 /* call can_rx_register() */ 1113 do_rx_register = 1; 1114 1115 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1116 1117 /* check flags */ 1118 op->flags = msg_head->flags; 1119 1120 if (op->flags & RX_RTR_FRAME) { 1121 1122 /* no timers in RTR-mode */ 1123 hrtimer_cancel(&op->thrtimer); 1124 hrtimer_cancel(&op->timer); 1125 1126 /* 1127 * funny feature in RX(!)_SETUP only for RTR-mode: 1128 * copy can_id into frame BUT without RTR-flag to 1129 * prevent a full-load-loopback-test ... ;-] 1130 */ 1131 if ((op->flags & TX_CP_CAN_ID) || 1132 (op->frames[0].can_id == op->can_id)) 1133 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1134 1135 } else { 1136 if (op->flags & SETTIMER) { 1137 1138 /* set timer value */ 1139 op->ival1 = msg_head->ival1; 1140 op->ival2 = msg_head->ival2; 1141 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 1142 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 1143 1144 /* disable an active timer due to zero value? */ 1145 if (!op->kt_ival1.tv64) 1146 hrtimer_cancel(&op->timer); 1147 1148 /* 1149 * In any case cancel the throttle timer, flush 1150 * potentially blocked msgs and reset throttle handling 1151 */ 1152 op->kt_lastmsg = ktime_set(0, 0); 1153 hrtimer_cancel(&op->thrtimer); 1154 bcm_rx_thr_flush(op, 1); 1155 } 1156 1157 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1158 hrtimer_start(&op->timer, op->kt_ival1, 1159 HRTIMER_MODE_REL); 1160 } 1161 1162 /* now we can register for can_ids, if we added a new bcm_op */ 1163 if (do_rx_register) { 1164 if (ifindex) { 1165 struct net_device *dev; 1166 1167 dev = dev_get_by_index(&init_net, ifindex); 1168 if (dev) { 1169 err = can_rx_register(dev, op->can_id, 1170 REGMASK(op->can_id), 1171 bcm_rx_handler, op, 1172 "bcm"); 1173 1174 op->rx_reg_dev = dev; 1175 dev_put(dev); 1176 } 1177 1178 } else 1179 err = can_rx_register(NULL, op->can_id, 1180 REGMASK(op->can_id), 1181 bcm_rx_handler, op, "bcm"); 1182 if (err) { 1183 /* this bcm rx op is broken -> remove it */ 1184 list_del(&op->list); 1185 bcm_remove_op(op); 1186 return err; 1187 } 1188 } 1189 1190 return msg_head->nframes * CFSIZ + MHSIZ; 1191 } 1192 1193 /* 1194 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1195 */ 1196 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) 1197 { 1198 struct sk_buff *skb; 1199 struct net_device *dev; 1200 int err; 1201 1202 /* we need a real device to send frames */ 1203 if (!ifindex) 1204 return -ENODEV; 1205 1206 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL); 1207 if (!skb) 1208 return -ENOMEM; 1209 1210 can_skb_reserve(skb); 1211 1212 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ); 1213 if (err < 0) { 1214 kfree_skb(skb); 1215 return err; 1216 } 1217 1218 dev = dev_get_by_index(&init_net, ifindex); 1219 if (!dev) { 1220 kfree_skb(skb); 1221 return -ENODEV; 1222 } 1223 1224 can_skb_prv(skb)->ifindex = dev->ifindex; 1225 skb->dev = dev; 1226 can_skb_set_owner(skb, sk); 1227 err = can_send(skb, 1); /* send with loopback */ 1228 dev_put(dev); 1229 1230 if (err) 1231 return err; 1232 1233 return CFSIZ + MHSIZ; 1234 } 1235 1236 /* 1237 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1238 */ 1239 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, 1240 struct msghdr *msg, size_t size) 1241 { 1242 struct sock *sk = sock->sk; 1243 struct bcm_sock *bo = bcm_sk(sk); 1244 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1245 struct bcm_msg_head msg_head; 1246 int ret; /* read bytes or error codes as return value */ 1247 1248 if (!bo->bound) 1249 return -ENOTCONN; 1250 1251 /* check for valid message length from userspace */ 1252 if (size < MHSIZ || (size - MHSIZ) % CFSIZ) 1253 return -EINVAL; 1254 1255 /* check for alternative ifindex for this bcm_op */ 1256 1257 if (!ifindex && msg->msg_name) { 1258 /* no bound device as default => check msg_name */ 1259 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name); 1260 1261 if (msg->msg_namelen < sizeof(*addr)) 1262 return -EINVAL; 1263 1264 if (addr->can_family != AF_CAN) 1265 return -EINVAL; 1266 1267 /* ifindex from sendto() */ 1268 ifindex = addr->can_ifindex; 1269 1270 if (ifindex) { 1271 struct net_device *dev; 1272 1273 dev = dev_get_by_index(&init_net, ifindex); 1274 if (!dev) 1275 return -ENODEV; 1276 1277 if (dev->type != ARPHRD_CAN) { 1278 dev_put(dev); 1279 return -ENODEV; 1280 } 1281 1282 dev_put(dev); 1283 } 1284 } 1285 1286 /* read message head information */ 1287 1288 ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ); 1289 if (ret < 0) 1290 return ret; 1291 1292 lock_sock(sk); 1293 1294 switch (msg_head.opcode) { 1295 1296 case TX_SETUP: 1297 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1298 break; 1299 1300 case RX_SETUP: 1301 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1302 break; 1303 1304 case TX_DELETE: 1305 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) 1306 ret = MHSIZ; 1307 else 1308 ret = -EINVAL; 1309 break; 1310 1311 case RX_DELETE: 1312 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) 1313 ret = MHSIZ; 1314 else 1315 ret = -EINVAL; 1316 break; 1317 1318 case TX_READ: 1319 /* reuse msg_head for the reply to TX_READ */ 1320 msg_head.opcode = TX_STATUS; 1321 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1322 break; 1323 1324 case RX_READ: 1325 /* reuse msg_head for the reply to RX_READ */ 1326 msg_head.opcode = RX_STATUS; 1327 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1328 break; 1329 1330 case TX_SEND: 1331 /* we need exactly one can_frame behind the msg head */ 1332 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ)) 1333 ret = -EINVAL; 1334 else 1335 ret = bcm_tx_send(msg, ifindex, sk); 1336 break; 1337 1338 default: 1339 ret = -EINVAL; 1340 break; 1341 } 1342 1343 release_sock(sk); 1344 1345 return ret; 1346 } 1347 1348 /* 1349 * notification handler for netdevice status changes 1350 */ 1351 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1352 void *ptr) 1353 { 1354 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1355 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); 1356 struct sock *sk = &bo->sk; 1357 struct bcm_op *op; 1358 int notify_enodev = 0; 1359 1360 if (!net_eq(dev_net(dev), &init_net)) 1361 return NOTIFY_DONE; 1362 1363 if (dev->type != ARPHRD_CAN) 1364 return NOTIFY_DONE; 1365 1366 switch (msg) { 1367 1368 case NETDEV_UNREGISTER: 1369 lock_sock(sk); 1370 1371 /* remove device specific receive entries */ 1372 list_for_each_entry(op, &bo->rx_ops, list) 1373 if (op->rx_reg_dev == dev) 1374 bcm_rx_unreg(dev, op); 1375 1376 /* remove device reference, if this is our bound device */ 1377 if (bo->bound && bo->ifindex == dev->ifindex) { 1378 bo->bound = 0; 1379 bo->ifindex = 0; 1380 notify_enodev = 1; 1381 } 1382 1383 release_sock(sk); 1384 1385 if (notify_enodev) { 1386 sk->sk_err = ENODEV; 1387 if (!sock_flag(sk, SOCK_DEAD)) 1388 sk->sk_error_report(sk); 1389 } 1390 break; 1391 1392 case NETDEV_DOWN: 1393 if (bo->bound && bo->ifindex == dev->ifindex) { 1394 sk->sk_err = ENETDOWN; 1395 if (!sock_flag(sk, SOCK_DEAD)) 1396 sk->sk_error_report(sk); 1397 } 1398 } 1399 1400 return NOTIFY_DONE; 1401 } 1402 1403 /* 1404 * initial settings for all BCM sockets to be set at socket creation time 1405 */ 1406 static int bcm_init(struct sock *sk) 1407 { 1408 struct bcm_sock *bo = bcm_sk(sk); 1409 1410 bo->bound = 0; 1411 bo->ifindex = 0; 1412 bo->dropped_usr_msgs = 0; 1413 bo->bcm_proc_read = NULL; 1414 1415 INIT_LIST_HEAD(&bo->tx_ops); 1416 INIT_LIST_HEAD(&bo->rx_ops); 1417 1418 /* set notifier */ 1419 bo->notifier.notifier_call = bcm_notifier; 1420 1421 register_netdevice_notifier(&bo->notifier); 1422 1423 return 0; 1424 } 1425 1426 /* 1427 * standard socket functions 1428 */ 1429 static int bcm_release(struct socket *sock) 1430 { 1431 struct sock *sk = sock->sk; 1432 struct bcm_sock *bo; 1433 struct bcm_op *op, *next; 1434 1435 if (sk == NULL) 1436 return 0; 1437 1438 bo = bcm_sk(sk); 1439 1440 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1441 1442 unregister_netdevice_notifier(&bo->notifier); 1443 1444 lock_sock(sk); 1445 1446 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1447 bcm_remove_op(op); 1448 1449 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1450 /* 1451 * Don't care if we're bound or not (due to netdev problems) 1452 * can_rx_unregister() is always a save thing to do here. 1453 */ 1454 if (op->ifindex) { 1455 /* 1456 * Only remove subscriptions that had not 1457 * been removed due to NETDEV_UNREGISTER 1458 * in bcm_notifier() 1459 */ 1460 if (op->rx_reg_dev) { 1461 struct net_device *dev; 1462 1463 dev = dev_get_by_index(&init_net, op->ifindex); 1464 if (dev) { 1465 bcm_rx_unreg(dev, op); 1466 dev_put(dev); 1467 } 1468 } 1469 } else 1470 can_rx_unregister(NULL, op->can_id, 1471 REGMASK(op->can_id), 1472 bcm_rx_handler, op); 1473 1474 bcm_remove_op(op); 1475 } 1476 1477 /* remove procfs entry */ 1478 if (proc_dir && bo->bcm_proc_read) 1479 remove_proc_entry(bo->procname, proc_dir); 1480 1481 /* remove device reference */ 1482 if (bo->bound) { 1483 bo->bound = 0; 1484 bo->ifindex = 0; 1485 } 1486 1487 sock_orphan(sk); 1488 sock->sk = NULL; 1489 1490 release_sock(sk); 1491 sock_put(sk); 1492 1493 return 0; 1494 } 1495 1496 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, 1497 int flags) 1498 { 1499 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1500 struct sock *sk = sock->sk; 1501 struct bcm_sock *bo = bcm_sk(sk); 1502 1503 if (len < sizeof(*addr)) 1504 return -EINVAL; 1505 1506 if (bo->bound) 1507 return -EISCONN; 1508 1509 /* bind a device to this socket */ 1510 if (addr->can_ifindex) { 1511 struct net_device *dev; 1512 1513 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1514 if (!dev) 1515 return -ENODEV; 1516 1517 if (dev->type != ARPHRD_CAN) { 1518 dev_put(dev); 1519 return -ENODEV; 1520 } 1521 1522 bo->ifindex = dev->ifindex; 1523 dev_put(dev); 1524 1525 } else { 1526 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1527 bo->ifindex = 0; 1528 } 1529 1530 bo->bound = 1; 1531 1532 if (proc_dir) { 1533 /* unique socket address as filename */ 1534 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1535 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1536 proc_dir, 1537 &bcm_proc_fops, sk); 1538 } 1539 1540 return 0; 1541 } 1542 1543 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, 1544 struct msghdr *msg, size_t size, int flags) 1545 { 1546 struct sock *sk = sock->sk; 1547 struct sk_buff *skb; 1548 int error = 0; 1549 int noblock; 1550 int err; 1551 1552 noblock = flags & MSG_DONTWAIT; 1553 flags &= ~MSG_DONTWAIT; 1554 skb = skb_recv_datagram(sk, flags, noblock, &error); 1555 if (!skb) 1556 return error; 1557 1558 if (skb->len < size) 1559 size = skb->len; 1560 1561 err = memcpy_toiovec(msg->msg_iov, skb->data, size); 1562 if (err < 0) { 1563 skb_free_datagram(sk, skb); 1564 return err; 1565 } 1566 1567 sock_recv_ts_and_drops(msg, sk, skb); 1568 1569 if (msg->msg_name) { 1570 __sockaddr_check_size(sizeof(struct sockaddr_can)); 1571 msg->msg_namelen = sizeof(struct sockaddr_can); 1572 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1573 } 1574 1575 skb_free_datagram(sk, skb); 1576 1577 return size; 1578 } 1579 1580 static const struct proto_ops bcm_ops = { 1581 .family = PF_CAN, 1582 .release = bcm_release, 1583 .bind = sock_no_bind, 1584 .connect = bcm_connect, 1585 .socketpair = sock_no_socketpair, 1586 .accept = sock_no_accept, 1587 .getname = sock_no_getname, 1588 .poll = datagram_poll, 1589 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ 1590 .listen = sock_no_listen, 1591 .shutdown = sock_no_shutdown, 1592 .setsockopt = sock_no_setsockopt, 1593 .getsockopt = sock_no_getsockopt, 1594 .sendmsg = bcm_sendmsg, 1595 .recvmsg = bcm_recvmsg, 1596 .mmap = sock_no_mmap, 1597 .sendpage = sock_no_sendpage, 1598 }; 1599 1600 static struct proto bcm_proto __read_mostly = { 1601 .name = "CAN_BCM", 1602 .owner = THIS_MODULE, 1603 .obj_size = sizeof(struct bcm_sock), 1604 .init = bcm_init, 1605 }; 1606 1607 static const struct can_proto bcm_can_proto = { 1608 .type = SOCK_DGRAM, 1609 .protocol = CAN_BCM, 1610 .ops = &bcm_ops, 1611 .prot = &bcm_proto, 1612 }; 1613 1614 static int __init bcm_module_init(void) 1615 { 1616 int err; 1617 1618 printk(banner); 1619 1620 err = can_proto_register(&bcm_can_proto); 1621 if (err < 0) { 1622 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1623 return err; 1624 } 1625 1626 /* create /proc/net/can-bcm directory */ 1627 proc_dir = proc_mkdir("can-bcm", init_net.proc_net); 1628 return 0; 1629 } 1630 1631 static void __exit bcm_module_exit(void) 1632 { 1633 can_proto_unregister(&bcm_can_proto); 1634 1635 if (proc_dir) 1636 remove_proc_entry("can-bcm", init_net.proc_net); 1637 } 1638 1639 module_init(bcm_module_init); 1640 module_exit(bcm_module_exit); 1641