1 /* 2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content 3 * 4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of Volkswagen nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * Alternatively, provided that this notice is retained in full, this 20 * software may be distributed under the terms of the GNU General 21 * Public License ("GPL") version 2, in which case the provisions of the 22 * GPL apply INSTEAD OF those given above. 23 * 24 * The provided data structures and external interfaces from this code 25 * are not restricted to be used by modules with a GPL compatible license. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 38 * DAMAGE. 39 * 40 * Send feedback to <socketcan-users@lists.berlios.de> 41 * 42 */ 43 44 #include <linux/module.h> 45 #include <linux/init.h> 46 #include <linux/hrtimer.h> 47 #include <linux/list.h> 48 #include <linux/proc_fs.h> 49 #include <linux/uio.h> 50 #include <linux/net.h> 51 #include <linux/netdevice.h> 52 #include <linux/socket.h> 53 #include <linux/if_arp.h> 54 #include <linux/skbuff.h> 55 #include <linux/can.h> 56 #include <linux/can/core.h> 57 #include <linux/can/bcm.h> 58 #include <net/sock.h> 59 #include <net/net_namespace.h> 60 61 /* use of last_frames[index].can_dlc */ 62 #define RX_RECV 0x40 /* received data for this element */ 63 #define RX_THR 0x80 /* element not been sent due to throttle feature */ 64 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ 65 66 /* get best masking value for can_rx_register() for a given single can_id */ 67 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ 68 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 69 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 70 71 #define CAN_BCM_VERSION CAN_VERSION 72 static __initdata const char banner[] = KERN_INFO 73 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; 74 75 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 76 MODULE_LICENSE("Dual BSD/GPL"); 77 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 78 79 /* easy access to can_frame payload */ 80 static inline u64 GET_U64(const struct can_frame *cp) 81 { 82 return *(u64 *)cp->data; 83 } 84 85 struct bcm_op { 86 struct list_head list; 87 int ifindex; 88 canid_t can_id; 89 int flags; 90 unsigned long frames_abs, frames_filtered; 91 struct timeval ival1, ival2; 92 struct hrtimer timer, thrtimer; 93 struct tasklet_struct tsklet, thrtsklet; 94 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; 95 int rx_ifindex; 96 int count; 97 int nframes; 98 int currframe; 99 struct can_frame *frames; 100 struct can_frame *last_frames; 101 struct can_frame sframe; 102 struct can_frame last_sframe; 103 struct sock *sk; 104 struct net_device *rx_reg_dev; 105 }; 106 107 static struct proc_dir_entry *proc_dir; 108 109 struct bcm_sock { 110 struct sock sk; 111 int bound; 112 int ifindex; 113 struct notifier_block notifier; 114 struct list_head rx_ops; 115 struct list_head tx_ops; 116 unsigned long dropped_usr_msgs; 117 struct proc_dir_entry *bcm_proc_read; 118 char procname [9]; /* pointer printed in ASCII with \0 */ 119 }; 120 121 static inline struct bcm_sock *bcm_sk(const struct sock *sk) 122 { 123 return (struct bcm_sock *)sk; 124 } 125 126 #define CFSIZ sizeof(struct can_frame) 127 #define OPSIZ sizeof(struct bcm_op) 128 #define MHSIZ sizeof(struct bcm_msg_head) 129 130 /* 131 * procfs functions 132 */ 133 static char *bcm_proc_getifname(int ifindex) 134 { 135 struct net_device *dev; 136 137 if (!ifindex) 138 return "any"; 139 140 /* no usage counting */ 141 dev = __dev_get_by_index(&init_net, ifindex); 142 if (dev) 143 return dev->name; 144 145 return "???"; 146 } 147 148 static int bcm_read_proc(char *page, char **start, off_t off, 149 int count, int *eof, void *data) 150 { 151 int len = 0; 152 struct sock *sk = (struct sock *)data; 153 struct bcm_sock *bo = bcm_sk(sk); 154 struct bcm_op *op; 155 156 len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p", 157 sk->sk_socket); 158 len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk); 159 len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo); 160 len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu", 161 bo->dropped_usr_msgs); 162 len += snprintf(page + len, PAGE_SIZE - len, " / bound %s", 163 bcm_proc_getifname(bo->ifindex)); 164 len += snprintf(page + len, PAGE_SIZE - len, " <<<\n"); 165 166 list_for_each_entry(op, &bo->rx_ops, list) { 167 168 unsigned long reduction; 169 170 /* print only active entries & prevent division by zero */ 171 if (!op->frames_abs) 172 continue; 173 174 len += snprintf(page + len, PAGE_SIZE - len, 175 "rx_op: %03X %-5s ", 176 op->can_id, bcm_proc_getifname(op->ifindex)); 177 len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ", 178 op->nframes, 179 (op->flags & RX_CHECK_DLC)?'d':' '); 180 if (op->kt_ival1.tv64) 181 len += snprintf(page + len, PAGE_SIZE - len, 182 "timeo=%lld ", 183 (long long) 184 ktime_to_us(op->kt_ival1)); 185 186 if (op->kt_ival2.tv64) 187 len += snprintf(page + len, PAGE_SIZE - len, 188 "thr=%lld ", 189 (long long) 190 ktime_to_us(op->kt_ival2)); 191 192 len += snprintf(page + len, PAGE_SIZE - len, 193 "# recv %ld (%ld) => reduction: ", 194 op->frames_filtered, op->frames_abs); 195 196 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; 197 198 len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n", 199 (reduction == 100)?"near ":"", reduction); 200 201 if (len > PAGE_SIZE - 200) { 202 /* mark output cut off */ 203 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n"); 204 break; 205 } 206 } 207 208 list_for_each_entry(op, &bo->tx_ops, list) { 209 210 len += snprintf(page + len, PAGE_SIZE - len, 211 "tx_op: %03X %s [%d] ", 212 op->can_id, bcm_proc_getifname(op->ifindex), 213 op->nframes); 214 215 if (op->kt_ival1.tv64) 216 len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ", 217 (long long) ktime_to_us(op->kt_ival1)); 218 219 if (op->kt_ival2.tv64) 220 len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ", 221 (long long) ktime_to_us(op->kt_ival2)); 222 223 len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n", 224 op->frames_abs); 225 226 if (len > PAGE_SIZE - 100) { 227 /* mark output cut off */ 228 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n"); 229 break; 230 } 231 } 232 233 len += snprintf(page + len, PAGE_SIZE - len, "\n"); 234 235 *eof = 1; 236 return len; 237 } 238 239 /* 240 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface 241 * of the given bcm tx op 242 */ 243 static void bcm_can_tx(struct bcm_op *op) 244 { 245 struct sk_buff *skb; 246 struct net_device *dev; 247 struct can_frame *cf = &op->frames[op->currframe]; 248 249 /* no target device? => exit */ 250 if (!op->ifindex) 251 return; 252 253 dev = dev_get_by_index(&init_net, op->ifindex); 254 if (!dev) { 255 /* RFC: should this bcm_op remove itself here? */ 256 return; 257 } 258 259 skb = alloc_skb(CFSIZ, gfp_any()); 260 if (!skb) 261 goto out; 262 263 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 264 265 /* send with loopback */ 266 skb->dev = dev; 267 skb->sk = op->sk; 268 can_send(skb, 1); 269 270 /* update statistics */ 271 op->currframe++; 272 op->frames_abs++; 273 274 /* reached last frame? */ 275 if (op->currframe >= op->nframes) 276 op->currframe = 0; 277 out: 278 dev_put(dev); 279 } 280 281 /* 282 * bcm_send_to_user - send a BCM message to the userspace 283 * (consisting of bcm_msg_head + x CAN frames) 284 */ 285 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, 286 struct can_frame *frames, int has_timestamp) 287 { 288 struct sk_buff *skb; 289 struct can_frame *firstframe; 290 struct sockaddr_can *addr; 291 struct sock *sk = op->sk; 292 int datalen = head->nframes * CFSIZ; 293 int err; 294 295 skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); 296 if (!skb) 297 return; 298 299 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head)); 300 301 if (head->nframes) { 302 /* can_frames starting here */ 303 firstframe = (struct can_frame *)skb_tail_pointer(skb); 304 305 memcpy(skb_put(skb, datalen), frames, datalen); 306 307 /* 308 * the BCM uses the can_dlc-element of the can_frame 309 * structure for internal purposes. This is only 310 * relevant for updates that are generated by the 311 * BCM, where nframes is 1 312 */ 313 if (head->nframes == 1) 314 firstframe->can_dlc &= BCM_CAN_DLC_MASK; 315 } 316 317 if (has_timestamp) { 318 /* restore rx timestamp */ 319 skb->tstamp = op->rx_stamp; 320 } 321 322 /* 323 * Put the datagram to the queue so that bcm_recvmsg() can 324 * get it from there. We need to pass the interface index to 325 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb 326 * containing the interface index. 327 */ 328 329 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); 330 addr = (struct sockaddr_can *)skb->cb; 331 memset(addr, 0, sizeof(*addr)); 332 addr->can_family = AF_CAN; 333 addr->can_ifindex = op->rx_ifindex; 334 335 err = sock_queue_rcv_skb(sk, skb); 336 if (err < 0) { 337 struct bcm_sock *bo = bcm_sk(sk); 338 339 kfree_skb(skb); 340 /* don't care about overflows in this statistic */ 341 bo->dropped_usr_msgs++; 342 } 343 } 344 345 static void bcm_tx_timeout_tsklet(unsigned long data) 346 { 347 struct bcm_op *op = (struct bcm_op *)data; 348 struct bcm_msg_head msg_head; 349 350 if (op->kt_ival1.tv64 && (op->count > 0)) { 351 352 op->count--; 353 if (!op->count && (op->flags & TX_COUNTEVT)) { 354 355 /* create notification to user */ 356 msg_head.opcode = TX_EXPIRED; 357 msg_head.flags = op->flags; 358 msg_head.count = op->count; 359 msg_head.ival1 = op->ival1; 360 msg_head.ival2 = op->ival2; 361 msg_head.can_id = op->can_id; 362 msg_head.nframes = 0; 363 364 bcm_send_to_user(op, &msg_head, NULL, 0); 365 } 366 } 367 368 if (op->kt_ival1.tv64 && (op->count > 0)) { 369 370 /* send (next) frame */ 371 bcm_can_tx(op); 372 hrtimer_start(&op->timer, 373 ktime_add(ktime_get(), op->kt_ival1), 374 HRTIMER_MODE_ABS); 375 376 } else { 377 if (op->kt_ival2.tv64) { 378 379 /* send (next) frame */ 380 bcm_can_tx(op); 381 hrtimer_start(&op->timer, 382 ktime_add(ktime_get(), op->kt_ival2), 383 HRTIMER_MODE_ABS); 384 } 385 } 386 } 387 388 /* 389 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions 390 */ 391 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 392 { 393 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 394 395 tasklet_schedule(&op->tsklet); 396 397 return HRTIMER_NORESTART; 398 } 399 400 /* 401 * bcm_rx_changed - create a RX_CHANGED notification due to changed content 402 */ 403 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) 404 { 405 struct bcm_msg_head head; 406 407 /* update statistics */ 408 op->frames_filtered++; 409 410 /* prevent statistics overflow */ 411 if (op->frames_filtered > ULONG_MAX/100) 412 op->frames_filtered = op->frames_abs = 0; 413 414 /* this element is not throttled anymore */ 415 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); 416 417 head.opcode = RX_CHANGED; 418 head.flags = op->flags; 419 head.count = op->count; 420 head.ival1 = op->ival1; 421 head.ival2 = op->ival2; 422 head.can_id = op->can_id; 423 head.nframes = 1; 424 425 bcm_send_to_user(op, &head, data, 1); 426 } 427 428 /* 429 * bcm_rx_update_and_send - process a detected relevant receive content change 430 * 1. update the last received data 431 * 2. send a notification to the user (if possible) 432 */ 433 static void bcm_rx_update_and_send(struct bcm_op *op, 434 struct can_frame *lastdata, 435 const struct can_frame *rxdata) 436 { 437 memcpy(lastdata, rxdata, CFSIZ); 438 439 /* mark as used and throttled by default */ 440 lastdata->can_dlc |= (RX_RECV|RX_THR); 441 442 /* throtteling mode inactive ? */ 443 if (!op->kt_ival2.tv64) { 444 /* send RX_CHANGED to the user immediately */ 445 bcm_rx_changed(op, lastdata); 446 return; 447 } 448 449 /* with active throttling timer we are just done here */ 450 if (hrtimer_active(&op->thrtimer)) 451 return; 452 453 /* first receiption with enabled throttling mode */ 454 if (!op->kt_lastmsg.tv64) 455 goto rx_changed_settime; 456 457 /* got a second frame inside a potential throttle period? */ 458 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < 459 ktime_to_us(op->kt_ival2)) { 460 /* do not send the saved data - only start throttle timer */ 461 hrtimer_start(&op->thrtimer, 462 ktime_add(op->kt_lastmsg, op->kt_ival2), 463 HRTIMER_MODE_ABS); 464 return; 465 } 466 467 /* the gap was that big, that throttling was not needed here */ 468 rx_changed_settime: 469 bcm_rx_changed(op, lastdata); 470 op->kt_lastmsg = ktime_get(); 471 } 472 473 /* 474 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly 475 * received data stored in op->last_frames[] 476 */ 477 static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, 478 const struct can_frame *rxdata) 479 { 480 /* 481 * no one uses the MSBs of can_dlc for comparation, 482 * so we use it here to detect the first time of reception 483 */ 484 485 if (!(op->last_frames[index].can_dlc & RX_RECV)) { 486 /* received data for the first time => send update to user */ 487 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 488 return; 489 } 490 491 /* do a real check in can_frame data section */ 492 493 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) != 494 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) { 495 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); 496 return; 497 } 498 499 if (op->flags & RX_CHECK_DLC) { 500 /* do a real check in can_frame dlc */ 501 if (rxdata->can_dlc != (op->last_frames[index].can_dlc & 502 BCM_CAN_DLC_MASK)) { 503 bcm_rx_update_and_send(op, &op->last_frames[index], 504 rxdata); 505 return; 506 } 507 } 508 } 509 510 /* 511 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption 512 */ 513 static void bcm_rx_starttimer(struct bcm_op *op) 514 { 515 if (op->flags & RX_NO_AUTOTIMER) 516 return; 517 518 if (op->kt_ival1.tv64) 519 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); 520 } 521 522 static void bcm_rx_timeout_tsklet(unsigned long data) 523 { 524 struct bcm_op *op = (struct bcm_op *)data; 525 struct bcm_msg_head msg_head; 526 527 /* create notification to user */ 528 msg_head.opcode = RX_TIMEOUT; 529 msg_head.flags = op->flags; 530 msg_head.count = op->count; 531 msg_head.ival1 = op->ival1; 532 msg_head.ival2 = op->ival2; 533 msg_head.can_id = op->can_id; 534 msg_head.nframes = 0; 535 536 bcm_send_to_user(op, &msg_head, NULL, 0); 537 } 538 539 /* 540 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out 541 */ 542 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) 543 { 544 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); 545 546 /* schedule before NET_RX_SOFTIRQ */ 547 tasklet_hi_schedule(&op->tsklet); 548 549 /* no restart of the timer is done here! */ 550 551 /* if user wants to be informed, when cyclic CAN-Messages come back */ 552 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { 553 /* clear received can_frames to indicate 'nothing received' */ 554 memset(op->last_frames, 0, op->nframes * CFSIZ); 555 } 556 557 return HRTIMER_NORESTART; 558 } 559 560 /* 561 * bcm_rx_do_flush - helper for bcm_rx_thr_flush 562 */ 563 static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) 564 { 565 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { 566 if (update) 567 bcm_rx_changed(op, &op->last_frames[index]); 568 return 1; 569 } 570 return 0; 571 } 572 573 /* 574 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace 575 * 576 * update == 0 : just check if throttled data is available (any irq context) 577 * update == 1 : check and send throttled data to userspace (soft_irq context) 578 */ 579 static int bcm_rx_thr_flush(struct bcm_op *op, int update) 580 { 581 int updated = 0; 582 583 if (op->nframes > 1) { 584 int i; 585 586 /* for MUX filter we start at index 1 */ 587 for (i = 1; i < op->nframes; i++) 588 updated += bcm_rx_do_flush(op, update, i); 589 590 } else { 591 /* for RX_FILTER_ID and simple filter */ 592 updated += bcm_rx_do_flush(op, update, 0); 593 } 594 595 return updated; 596 } 597 598 static void bcm_rx_thr_tsklet(unsigned long data) 599 { 600 struct bcm_op *op = (struct bcm_op *)data; 601 602 /* push the changed data to the userspace */ 603 bcm_rx_thr_flush(op, 1); 604 } 605 606 /* 607 * bcm_rx_thr_handler - the time for blocked content updates is over now: 608 * Check for throttled data and send it to the userspace 609 */ 610 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) 611 { 612 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); 613 614 tasklet_schedule(&op->thrtsklet); 615 616 if (bcm_rx_thr_flush(op, 0)) { 617 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); 618 return HRTIMER_RESTART; 619 } else { 620 /* rearm throttle handling */ 621 op->kt_lastmsg = ktime_set(0, 0); 622 return HRTIMER_NORESTART; 623 } 624 } 625 626 /* 627 * bcm_rx_handler - handle a CAN frame receiption 628 */ 629 static void bcm_rx_handler(struct sk_buff *skb, void *data) 630 { 631 struct bcm_op *op = (struct bcm_op *)data; 632 const struct can_frame *rxframe = (struct can_frame *)skb->data; 633 int i; 634 635 /* disable timeout */ 636 hrtimer_cancel(&op->timer); 637 638 if (op->can_id != rxframe->can_id) 639 return; 640 641 /* save rx timestamp */ 642 op->rx_stamp = skb->tstamp; 643 /* save originator for recvfrom() */ 644 op->rx_ifindex = skb->dev->ifindex; 645 /* update statistics */ 646 op->frames_abs++; 647 648 if (op->flags & RX_RTR_FRAME) { 649 /* send reply for RTR-request (placed in op->frames[0]) */ 650 bcm_can_tx(op); 651 return; 652 } 653 654 if (op->flags & RX_FILTER_ID) { 655 /* the easiest case */ 656 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 657 goto rx_starttimer; 658 } 659 660 if (op->nframes == 1) { 661 /* simple compare with index 0 */ 662 bcm_rx_cmp_to_index(op, 0, rxframe); 663 goto rx_starttimer; 664 } 665 666 if (op->nframes > 1) { 667 /* 668 * multiplex compare 669 * 670 * find the first multiplex mask that fits. 671 * Remark: The MUX-mask is stored in index 0 672 */ 673 674 for (i = 1; i < op->nframes; i++) { 675 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == 676 (GET_U64(&op->frames[0]) & 677 GET_U64(&op->frames[i]))) { 678 bcm_rx_cmp_to_index(op, i, rxframe); 679 break; 680 } 681 } 682 } 683 684 rx_starttimer: 685 bcm_rx_starttimer(op); 686 } 687 688 /* 689 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements 690 */ 691 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, 692 int ifindex) 693 { 694 struct bcm_op *op; 695 696 list_for_each_entry(op, ops, list) { 697 if ((op->can_id == can_id) && (op->ifindex == ifindex)) 698 return op; 699 } 700 701 return NULL; 702 } 703 704 static void bcm_remove_op(struct bcm_op *op) 705 { 706 hrtimer_cancel(&op->timer); 707 hrtimer_cancel(&op->thrtimer); 708 709 if (op->tsklet.func) 710 tasklet_kill(&op->tsklet); 711 712 if (op->thrtsklet.func) 713 tasklet_kill(&op->thrtsklet); 714 715 if ((op->frames) && (op->frames != &op->sframe)) 716 kfree(op->frames); 717 718 if ((op->last_frames) && (op->last_frames != &op->last_sframe)) 719 kfree(op->last_frames); 720 721 kfree(op); 722 723 return; 724 } 725 726 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) 727 { 728 if (op->rx_reg_dev == dev) { 729 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id), 730 bcm_rx_handler, op); 731 732 /* mark as removed subscription */ 733 op->rx_reg_dev = NULL; 734 } else 735 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " 736 "mismatch %p %p\n", op->rx_reg_dev, dev); 737 } 738 739 /* 740 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) 741 */ 742 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) 743 { 744 struct bcm_op *op, *n; 745 746 list_for_each_entry_safe(op, n, ops, list) { 747 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 748 749 /* 750 * Don't care if we're bound or not (due to netdev 751 * problems) can_rx_unregister() is always a save 752 * thing to do here. 753 */ 754 if (op->ifindex) { 755 /* 756 * Only remove subscriptions that had not 757 * been removed due to NETDEV_UNREGISTER 758 * in bcm_notifier() 759 */ 760 if (op->rx_reg_dev) { 761 struct net_device *dev; 762 763 dev = dev_get_by_index(&init_net, 764 op->ifindex); 765 if (dev) { 766 bcm_rx_unreg(dev, op); 767 dev_put(dev); 768 } 769 } 770 } else 771 can_rx_unregister(NULL, op->can_id, 772 REGMASK(op->can_id), 773 bcm_rx_handler, op); 774 775 list_del(&op->list); 776 bcm_remove_op(op); 777 return 1; /* done */ 778 } 779 } 780 781 return 0; /* not found */ 782 } 783 784 /* 785 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) 786 */ 787 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex) 788 { 789 struct bcm_op *op, *n; 790 791 list_for_each_entry_safe(op, n, ops, list) { 792 if ((op->can_id == can_id) && (op->ifindex == ifindex)) { 793 list_del(&op->list); 794 bcm_remove_op(op); 795 return 1; /* done */ 796 } 797 } 798 799 return 0; /* not found */ 800 } 801 802 /* 803 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) 804 */ 805 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, 806 int ifindex) 807 { 808 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex); 809 810 if (!op) 811 return -EINVAL; 812 813 /* put current values into msg_head */ 814 msg_head->flags = op->flags; 815 msg_head->count = op->count; 816 msg_head->ival1 = op->ival1; 817 msg_head->ival2 = op->ival2; 818 msg_head->nframes = op->nframes; 819 820 bcm_send_to_user(op, msg_head, op->frames, 0); 821 822 return MHSIZ; 823 } 824 825 /* 826 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) 827 */ 828 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 829 int ifindex, struct sock *sk) 830 { 831 struct bcm_sock *bo = bcm_sk(sk); 832 struct bcm_op *op; 833 int i, err; 834 835 /* we need a real device to send frames */ 836 if (!ifindex) 837 return -ENODEV; 838 839 /* we need at least one can_frame */ 840 if (msg_head->nframes < 1) 841 return -EINVAL; 842 843 /* check the given can_id */ 844 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); 845 846 if (op) { 847 /* update existing BCM operation */ 848 849 /* 850 * Do we need more space for the can_frames than currently 851 * allocated? -> This is a _really_ unusual use-case and 852 * therefore (complexity / locking) it is not supported. 853 */ 854 if (msg_head->nframes > op->nframes) 855 return -E2BIG; 856 857 /* update can_frames content */ 858 for (i = 0; i < msg_head->nframes; i++) { 859 err = memcpy_fromiovec((u8 *)&op->frames[i], 860 msg->msg_iov, CFSIZ); 861 862 if (op->frames[i].can_dlc > 8) 863 err = -EINVAL; 864 865 if (err < 0) 866 return err; 867 868 if (msg_head->flags & TX_CP_CAN_ID) { 869 /* copy can_id into frame */ 870 op->frames[i].can_id = msg_head->can_id; 871 } 872 } 873 874 } else { 875 /* insert new BCM operation for the given can_id */ 876 877 op = kzalloc(OPSIZ, GFP_KERNEL); 878 if (!op) 879 return -ENOMEM; 880 881 op->can_id = msg_head->can_id; 882 883 /* create array for can_frames and copy the data */ 884 if (msg_head->nframes > 1) { 885 op->frames = kmalloc(msg_head->nframes * CFSIZ, 886 GFP_KERNEL); 887 if (!op->frames) { 888 kfree(op); 889 return -ENOMEM; 890 } 891 } else 892 op->frames = &op->sframe; 893 894 for (i = 0; i < msg_head->nframes; i++) { 895 err = memcpy_fromiovec((u8 *)&op->frames[i], 896 msg->msg_iov, CFSIZ); 897 898 if (op->frames[i].can_dlc > 8) 899 err = -EINVAL; 900 901 if (err < 0) { 902 if (op->frames != &op->sframe) 903 kfree(op->frames); 904 kfree(op); 905 return err; 906 } 907 908 if (msg_head->flags & TX_CP_CAN_ID) { 909 /* copy can_id into frame */ 910 op->frames[i].can_id = msg_head->can_id; 911 } 912 } 913 914 /* tx_ops never compare with previous received messages */ 915 op->last_frames = NULL; 916 917 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 918 op->sk = sk; 919 op->ifindex = ifindex; 920 921 /* initialize uninitialized (kzalloc) structure */ 922 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 923 op->timer.function = bcm_tx_timeout_handler; 924 925 /* initialize tasklet for tx countevent notification */ 926 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, 927 (unsigned long) op); 928 929 /* currently unused in tx_ops */ 930 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 931 932 /* add this bcm_op to the list of the tx_ops */ 933 list_add(&op->list, &bo->tx_ops); 934 935 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ 936 937 if (op->nframes != msg_head->nframes) { 938 op->nframes = msg_head->nframes; 939 /* start multiple frame transmission with index 0 */ 940 op->currframe = 0; 941 } 942 943 /* check flags */ 944 945 op->flags = msg_head->flags; 946 947 if (op->flags & TX_RESET_MULTI_IDX) { 948 /* start multiple frame transmission with index 0 */ 949 op->currframe = 0; 950 } 951 952 if (op->flags & SETTIMER) { 953 /* set timer values */ 954 op->count = msg_head->count; 955 op->ival1 = msg_head->ival1; 956 op->ival2 = msg_head->ival2; 957 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 958 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 959 960 /* disable an active timer due to zero values? */ 961 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) 962 hrtimer_cancel(&op->timer); 963 } 964 965 if ((op->flags & STARTTIMER) && 966 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { 967 968 /* spec: send can_frame when starting timer */ 969 op->flags |= TX_ANNOUNCE; 970 971 if (op->kt_ival1.tv64 && (op->count > 0)) { 972 /* op->count-- is done in bcm_tx_timeout_handler */ 973 hrtimer_start(&op->timer, op->kt_ival1, 974 HRTIMER_MODE_REL); 975 } else 976 hrtimer_start(&op->timer, op->kt_ival2, 977 HRTIMER_MODE_REL); 978 } 979 980 if (op->flags & TX_ANNOUNCE) 981 bcm_can_tx(op); 982 983 return msg_head->nframes * CFSIZ + MHSIZ; 984 } 985 986 /* 987 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) 988 */ 989 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, 990 int ifindex, struct sock *sk) 991 { 992 struct bcm_sock *bo = bcm_sk(sk); 993 struct bcm_op *op; 994 int do_rx_register; 995 int err = 0; 996 997 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { 998 /* be robust against wrong usage ... */ 999 msg_head->flags |= RX_FILTER_ID; 1000 /* ignore trailing garbage */ 1001 msg_head->nframes = 0; 1002 } 1003 1004 if ((msg_head->flags & RX_RTR_FRAME) && 1005 ((msg_head->nframes != 1) || 1006 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1007 return -EINVAL; 1008 1009 /* check the given can_id */ 1010 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); 1011 if (op) { 1012 /* update existing BCM operation */ 1013 1014 /* 1015 * Do we need more space for the can_frames than currently 1016 * allocated? -> This is a _really_ unusual use-case and 1017 * therefore (complexity / locking) it is not supported. 1018 */ 1019 if (msg_head->nframes > op->nframes) 1020 return -E2BIG; 1021 1022 if (msg_head->nframes) { 1023 /* update can_frames content */ 1024 err = memcpy_fromiovec((u8 *)op->frames, 1025 msg->msg_iov, 1026 msg_head->nframes * CFSIZ); 1027 if (err < 0) 1028 return err; 1029 1030 /* clear last_frames to indicate 'nothing received' */ 1031 memset(op->last_frames, 0, msg_head->nframes * CFSIZ); 1032 } 1033 1034 op->nframes = msg_head->nframes; 1035 1036 /* Only an update -> do not call can_rx_register() */ 1037 do_rx_register = 0; 1038 1039 } else { 1040 /* insert new BCM operation for the given can_id */ 1041 op = kzalloc(OPSIZ, GFP_KERNEL); 1042 if (!op) 1043 return -ENOMEM; 1044 1045 op->can_id = msg_head->can_id; 1046 op->nframes = msg_head->nframes; 1047 1048 if (msg_head->nframes > 1) { 1049 /* create array for can_frames and copy the data */ 1050 op->frames = kmalloc(msg_head->nframes * CFSIZ, 1051 GFP_KERNEL); 1052 if (!op->frames) { 1053 kfree(op); 1054 return -ENOMEM; 1055 } 1056 1057 /* create and init array for received can_frames */ 1058 op->last_frames = kzalloc(msg_head->nframes * CFSIZ, 1059 GFP_KERNEL); 1060 if (!op->last_frames) { 1061 kfree(op->frames); 1062 kfree(op); 1063 return -ENOMEM; 1064 } 1065 1066 } else { 1067 op->frames = &op->sframe; 1068 op->last_frames = &op->last_sframe; 1069 } 1070 1071 if (msg_head->nframes) { 1072 err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov, 1073 msg_head->nframes * CFSIZ); 1074 if (err < 0) { 1075 if (op->frames != &op->sframe) 1076 kfree(op->frames); 1077 if (op->last_frames != &op->last_sframe) 1078 kfree(op->last_frames); 1079 kfree(op); 1080 return err; 1081 } 1082 } 1083 1084 /* bcm_can_tx / bcm_tx_timeout_handler needs this */ 1085 op->sk = sk; 1086 op->ifindex = ifindex; 1087 1088 /* initialize uninitialized (kzalloc) structure */ 1089 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1090 op->timer.function = bcm_rx_timeout_handler; 1091 1092 /* initialize tasklet for rx timeout notification */ 1093 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, 1094 (unsigned long) op); 1095 1096 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1097 op->thrtimer.function = bcm_rx_thr_handler; 1098 1099 /* initialize tasklet for rx throttle handling */ 1100 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, 1101 (unsigned long) op); 1102 1103 /* add this bcm_op to the list of the rx_ops */ 1104 list_add(&op->list, &bo->rx_ops); 1105 1106 /* call can_rx_register() */ 1107 do_rx_register = 1; 1108 1109 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ 1110 1111 /* check flags */ 1112 op->flags = msg_head->flags; 1113 1114 if (op->flags & RX_RTR_FRAME) { 1115 1116 /* no timers in RTR-mode */ 1117 hrtimer_cancel(&op->thrtimer); 1118 hrtimer_cancel(&op->timer); 1119 1120 /* 1121 * funny feature in RX(!)_SETUP only for RTR-mode: 1122 * copy can_id into frame BUT without RTR-flag to 1123 * prevent a full-load-loopback-test ... ;-] 1124 */ 1125 if ((op->flags & TX_CP_CAN_ID) || 1126 (op->frames[0].can_id == op->can_id)) 1127 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1128 1129 } else { 1130 if (op->flags & SETTIMER) { 1131 1132 /* set timer value */ 1133 op->ival1 = msg_head->ival1; 1134 op->ival2 = msg_head->ival2; 1135 op->kt_ival1 = timeval_to_ktime(msg_head->ival1); 1136 op->kt_ival2 = timeval_to_ktime(msg_head->ival2); 1137 1138 /* disable an active timer due to zero value? */ 1139 if (!op->kt_ival1.tv64) 1140 hrtimer_cancel(&op->timer); 1141 1142 /* 1143 * In any case cancel the throttle timer, flush 1144 * potentially blocked msgs and reset throttle handling 1145 */ 1146 op->kt_lastmsg = ktime_set(0, 0); 1147 hrtimer_cancel(&op->thrtimer); 1148 bcm_rx_thr_flush(op, 1); 1149 } 1150 1151 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) 1152 hrtimer_start(&op->timer, op->kt_ival1, 1153 HRTIMER_MODE_REL); 1154 } 1155 1156 /* now we can register for can_ids, if we added a new bcm_op */ 1157 if (do_rx_register) { 1158 if (ifindex) { 1159 struct net_device *dev; 1160 1161 dev = dev_get_by_index(&init_net, ifindex); 1162 if (dev) { 1163 err = can_rx_register(dev, op->can_id, 1164 REGMASK(op->can_id), 1165 bcm_rx_handler, op, 1166 "bcm"); 1167 1168 op->rx_reg_dev = dev; 1169 dev_put(dev); 1170 } 1171 1172 } else 1173 err = can_rx_register(NULL, op->can_id, 1174 REGMASK(op->can_id), 1175 bcm_rx_handler, op, "bcm"); 1176 if (err) { 1177 /* this bcm rx op is broken -> remove it */ 1178 list_del(&op->list); 1179 bcm_remove_op(op); 1180 return err; 1181 } 1182 } 1183 1184 return msg_head->nframes * CFSIZ + MHSIZ; 1185 } 1186 1187 /* 1188 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) 1189 */ 1190 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) 1191 { 1192 struct sk_buff *skb; 1193 struct net_device *dev; 1194 int err; 1195 1196 /* we need a real device to send frames */ 1197 if (!ifindex) 1198 return -ENODEV; 1199 1200 skb = alloc_skb(CFSIZ, GFP_KERNEL); 1201 1202 if (!skb) 1203 return -ENOMEM; 1204 1205 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ); 1206 if (err < 0) { 1207 kfree_skb(skb); 1208 return err; 1209 } 1210 1211 dev = dev_get_by_index(&init_net, ifindex); 1212 if (!dev) { 1213 kfree_skb(skb); 1214 return -ENODEV; 1215 } 1216 1217 skb->dev = dev; 1218 skb->sk = sk; 1219 err = can_send(skb, 1); /* send with loopback */ 1220 dev_put(dev); 1221 1222 if (err) 1223 return err; 1224 1225 return CFSIZ + MHSIZ; 1226 } 1227 1228 /* 1229 * bcm_sendmsg - process BCM commands (opcodes) from the userspace 1230 */ 1231 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, 1232 struct msghdr *msg, size_t size) 1233 { 1234 struct sock *sk = sock->sk; 1235 struct bcm_sock *bo = bcm_sk(sk); 1236 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ 1237 struct bcm_msg_head msg_head; 1238 int ret; /* read bytes or error codes as return value */ 1239 1240 if (!bo->bound) 1241 return -ENOTCONN; 1242 1243 /* check for valid message length from userspace */ 1244 if (size < MHSIZ || (size - MHSIZ) % CFSIZ) 1245 return -EINVAL; 1246 1247 /* check for alternative ifindex for this bcm_op */ 1248 1249 if (!ifindex && msg->msg_name) { 1250 /* no bound device as default => check msg_name */ 1251 struct sockaddr_can *addr = 1252 (struct sockaddr_can *)msg->msg_name; 1253 1254 if (addr->can_family != AF_CAN) 1255 return -EINVAL; 1256 1257 /* ifindex from sendto() */ 1258 ifindex = addr->can_ifindex; 1259 1260 if (ifindex) { 1261 struct net_device *dev; 1262 1263 dev = dev_get_by_index(&init_net, ifindex); 1264 if (!dev) 1265 return -ENODEV; 1266 1267 if (dev->type != ARPHRD_CAN) { 1268 dev_put(dev); 1269 return -ENODEV; 1270 } 1271 1272 dev_put(dev); 1273 } 1274 } 1275 1276 /* read message head information */ 1277 1278 ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ); 1279 if (ret < 0) 1280 return ret; 1281 1282 lock_sock(sk); 1283 1284 switch (msg_head.opcode) { 1285 1286 case TX_SETUP: 1287 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); 1288 break; 1289 1290 case RX_SETUP: 1291 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); 1292 break; 1293 1294 case TX_DELETE: 1295 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) 1296 ret = MHSIZ; 1297 else 1298 ret = -EINVAL; 1299 break; 1300 1301 case RX_DELETE: 1302 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) 1303 ret = MHSIZ; 1304 else 1305 ret = -EINVAL; 1306 break; 1307 1308 case TX_READ: 1309 /* reuse msg_head for the reply to TX_READ */ 1310 msg_head.opcode = TX_STATUS; 1311 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); 1312 break; 1313 1314 case RX_READ: 1315 /* reuse msg_head for the reply to RX_READ */ 1316 msg_head.opcode = RX_STATUS; 1317 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); 1318 break; 1319 1320 case TX_SEND: 1321 /* we need exactly one can_frame behind the msg head */ 1322 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ)) 1323 ret = -EINVAL; 1324 else 1325 ret = bcm_tx_send(msg, ifindex, sk); 1326 break; 1327 1328 default: 1329 ret = -EINVAL; 1330 break; 1331 } 1332 1333 release_sock(sk); 1334 1335 return ret; 1336 } 1337 1338 /* 1339 * notification handler for netdevice status changes 1340 */ 1341 static int bcm_notifier(struct notifier_block *nb, unsigned long msg, 1342 void *data) 1343 { 1344 struct net_device *dev = (struct net_device *)data; 1345 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); 1346 struct sock *sk = &bo->sk; 1347 struct bcm_op *op; 1348 int notify_enodev = 0; 1349 1350 if (!net_eq(dev_net(dev), &init_net)) 1351 return NOTIFY_DONE; 1352 1353 if (dev->type != ARPHRD_CAN) 1354 return NOTIFY_DONE; 1355 1356 switch (msg) { 1357 1358 case NETDEV_UNREGISTER: 1359 lock_sock(sk); 1360 1361 /* remove device specific receive entries */ 1362 list_for_each_entry(op, &bo->rx_ops, list) 1363 if (op->rx_reg_dev == dev) 1364 bcm_rx_unreg(dev, op); 1365 1366 /* remove device reference, if this is our bound device */ 1367 if (bo->bound && bo->ifindex == dev->ifindex) { 1368 bo->bound = 0; 1369 bo->ifindex = 0; 1370 notify_enodev = 1; 1371 } 1372 1373 release_sock(sk); 1374 1375 if (notify_enodev) { 1376 sk->sk_err = ENODEV; 1377 if (!sock_flag(sk, SOCK_DEAD)) 1378 sk->sk_error_report(sk); 1379 } 1380 break; 1381 1382 case NETDEV_DOWN: 1383 if (bo->bound && bo->ifindex == dev->ifindex) { 1384 sk->sk_err = ENETDOWN; 1385 if (!sock_flag(sk, SOCK_DEAD)) 1386 sk->sk_error_report(sk); 1387 } 1388 } 1389 1390 return NOTIFY_DONE; 1391 } 1392 1393 /* 1394 * initial settings for all BCM sockets to be set at socket creation time 1395 */ 1396 static int bcm_init(struct sock *sk) 1397 { 1398 struct bcm_sock *bo = bcm_sk(sk); 1399 1400 bo->bound = 0; 1401 bo->ifindex = 0; 1402 bo->dropped_usr_msgs = 0; 1403 bo->bcm_proc_read = NULL; 1404 1405 INIT_LIST_HEAD(&bo->tx_ops); 1406 INIT_LIST_HEAD(&bo->rx_ops); 1407 1408 /* set notifier */ 1409 bo->notifier.notifier_call = bcm_notifier; 1410 1411 register_netdevice_notifier(&bo->notifier); 1412 1413 return 0; 1414 } 1415 1416 /* 1417 * standard socket functions 1418 */ 1419 static int bcm_release(struct socket *sock) 1420 { 1421 struct sock *sk = sock->sk; 1422 struct bcm_sock *bo = bcm_sk(sk); 1423 struct bcm_op *op, *next; 1424 1425 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1426 1427 unregister_netdevice_notifier(&bo->notifier); 1428 1429 lock_sock(sk); 1430 1431 list_for_each_entry_safe(op, next, &bo->tx_ops, list) 1432 bcm_remove_op(op); 1433 1434 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { 1435 /* 1436 * Don't care if we're bound or not (due to netdev problems) 1437 * can_rx_unregister() is always a save thing to do here. 1438 */ 1439 if (op->ifindex) { 1440 /* 1441 * Only remove subscriptions that had not 1442 * been removed due to NETDEV_UNREGISTER 1443 * in bcm_notifier() 1444 */ 1445 if (op->rx_reg_dev) { 1446 struct net_device *dev; 1447 1448 dev = dev_get_by_index(&init_net, op->ifindex); 1449 if (dev) { 1450 bcm_rx_unreg(dev, op); 1451 dev_put(dev); 1452 } 1453 } 1454 } else 1455 can_rx_unregister(NULL, op->can_id, 1456 REGMASK(op->can_id), 1457 bcm_rx_handler, op); 1458 1459 bcm_remove_op(op); 1460 } 1461 1462 /* remove procfs entry */ 1463 if (proc_dir && bo->bcm_proc_read) 1464 remove_proc_entry(bo->procname, proc_dir); 1465 1466 /* remove device reference */ 1467 if (bo->bound) { 1468 bo->bound = 0; 1469 bo->ifindex = 0; 1470 } 1471 1472 release_sock(sk); 1473 sock_put(sk); 1474 1475 return 0; 1476 } 1477 1478 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, 1479 int flags) 1480 { 1481 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1482 struct sock *sk = sock->sk; 1483 struct bcm_sock *bo = bcm_sk(sk); 1484 1485 if (bo->bound) 1486 return -EISCONN; 1487 1488 /* bind a device to this socket */ 1489 if (addr->can_ifindex) { 1490 struct net_device *dev; 1491 1492 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1493 if (!dev) 1494 return -ENODEV; 1495 1496 if (dev->type != ARPHRD_CAN) { 1497 dev_put(dev); 1498 return -ENODEV; 1499 } 1500 1501 bo->ifindex = dev->ifindex; 1502 dev_put(dev); 1503 1504 } else { 1505 /* no interface reference for ifindex = 0 ('any' CAN device) */ 1506 bo->ifindex = 0; 1507 } 1508 1509 bo->bound = 1; 1510 1511 if (proc_dir) { 1512 /* unique socket address as filename */ 1513 sprintf(bo->procname, "%p", sock); 1514 bo->bcm_proc_read = create_proc_read_entry(bo->procname, 0644, 1515 proc_dir, 1516 bcm_read_proc, sk); 1517 } 1518 1519 return 0; 1520 } 1521 1522 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, 1523 struct msghdr *msg, size_t size, int flags) 1524 { 1525 struct sock *sk = sock->sk; 1526 struct sk_buff *skb; 1527 int error = 0; 1528 int noblock; 1529 int err; 1530 1531 noblock = flags & MSG_DONTWAIT; 1532 flags &= ~MSG_DONTWAIT; 1533 skb = skb_recv_datagram(sk, flags, noblock, &error); 1534 if (!skb) 1535 return error; 1536 1537 if (skb->len < size) 1538 size = skb->len; 1539 1540 err = memcpy_toiovec(msg->msg_iov, skb->data, size); 1541 if (err < 0) { 1542 skb_free_datagram(sk, skb); 1543 return err; 1544 } 1545 1546 sock_recv_timestamp(msg, sk, skb); 1547 1548 if (msg->msg_name) { 1549 msg->msg_namelen = sizeof(struct sockaddr_can); 1550 memcpy(msg->msg_name, skb->cb, msg->msg_namelen); 1551 } 1552 1553 skb_free_datagram(sk, skb); 1554 1555 return size; 1556 } 1557 1558 static struct proto_ops bcm_ops __read_mostly = { 1559 .family = PF_CAN, 1560 .release = bcm_release, 1561 .bind = sock_no_bind, 1562 .connect = bcm_connect, 1563 .socketpair = sock_no_socketpair, 1564 .accept = sock_no_accept, 1565 .getname = sock_no_getname, 1566 .poll = datagram_poll, 1567 .ioctl = NULL, /* use can_ioctl() from af_can.c */ 1568 .listen = sock_no_listen, 1569 .shutdown = sock_no_shutdown, 1570 .setsockopt = sock_no_setsockopt, 1571 .getsockopt = sock_no_getsockopt, 1572 .sendmsg = bcm_sendmsg, 1573 .recvmsg = bcm_recvmsg, 1574 .mmap = sock_no_mmap, 1575 .sendpage = sock_no_sendpage, 1576 }; 1577 1578 static struct proto bcm_proto __read_mostly = { 1579 .name = "CAN_BCM", 1580 .owner = THIS_MODULE, 1581 .obj_size = sizeof(struct bcm_sock), 1582 .init = bcm_init, 1583 }; 1584 1585 static struct can_proto bcm_can_proto __read_mostly = { 1586 .type = SOCK_DGRAM, 1587 .protocol = CAN_BCM, 1588 .capability = -1, 1589 .ops = &bcm_ops, 1590 .prot = &bcm_proto, 1591 }; 1592 1593 static int __init bcm_module_init(void) 1594 { 1595 int err; 1596 1597 printk(banner); 1598 1599 err = can_proto_register(&bcm_can_proto); 1600 if (err < 0) { 1601 printk(KERN_ERR "can: registration of bcm protocol failed\n"); 1602 return err; 1603 } 1604 1605 /* create /proc/net/can-bcm directory */ 1606 proc_dir = proc_mkdir("can-bcm", init_net.proc_net); 1607 return 0; 1608 } 1609 1610 static void __exit bcm_module_exit(void) 1611 { 1612 can_proto_unregister(&bcm_can_proto); 1613 1614 if (proc_dir) 1615 proc_net_remove(&init_net, "can-bcm"); 1616 } 1617 1618 module_init(bcm_module_init); 1619 module_exit(bcm_module_exit); 1620