1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/interrupt.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/skb.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <linux/spinlock.h>
62 #include <net/sock.h>
63 #include <net/net_namespace.h>
64
65 /*
66 * To send multiple CAN frame content within TX_SETUP or to filter
67 * CAN messages with multiplex index within RX_SETUP, the number of
68 * different filters is limited to 256 due to the one byte index value.
69 */
70 #define MAX_NFRAMES 256
71
72 /* limit timers to 400 days for sending/timeouts */
73 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
74
75 /* use of last_frames[index].flags */
76 #define RX_RECV 0x40 /* received data for this element */
77 #define RX_THR 0x80 /* element not been sent due to throttle feature */
78 #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
79
80 /* get best masking value for can_rx_register() for a given single can_id */
81 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
82 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
83 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
84
85 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
86 MODULE_LICENSE("Dual BSD/GPL");
87 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
88 MODULE_ALIAS("can-proto-2");
89
90 #define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
91
92 /*
93 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
94 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
95 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
96 */
get_u64(const struct canfd_frame * cp,int offset)97 static inline u64 get_u64(const struct canfd_frame *cp, int offset)
98 {
99 return *(u64 *)(cp->data + offset);
100 }
101
102 struct bcm_op {
103 struct list_head list;
104 struct rcu_head rcu;
105 int ifindex;
106 canid_t can_id;
107 u32 flags;
108 unsigned long frames_abs, frames_filtered;
109 struct bcm_timeval ival1, ival2;
110 struct hrtimer timer, thrtimer;
111 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
112 int rx_ifindex;
113 int cfsiz;
114 u32 count;
115 u32 nframes;
116 u32 currframe;
117 /* void pointers to arrays of struct can[fd]_frame */
118 void *frames;
119 void *last_frames;
120 struct canfd_frame sframe;
121 struct canfd_frame last_sframe;
122 struct sock *sk;
123 struct net_device *rx_reg_dev;
124 spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */
125 };
126
127 struct bcm_sock {
128 struct sock sk;
129 int bound;
130 int ifindex;
131 struct list_head notifier;
132 struct list_head rx_ops;
133 struct list_head tx_ops;
134 unsigned long dropped_usr_msgs;
135 struct proc_dir_entry *bcm_proc_read;
136 char procname [32]; /* inode number in decimal with \0 */
137 };
138
139 static LIST_HEAD(bcm_notifier_list);
140 static DEFINE_SPINLOCK(bcm_notifier_lock);
141 static struct bcm_sock *bcm_busy_notifier;
142
bcm_sk(const struct sock * sk)143 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
144 {
145 return (struct bcm_sock *)sk;
146 }
147
bcm_timeval_to_ktime(struct bcm_timeval tv)148 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
149 {
150 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
151 }
152
153 /* check limitations for timeval provided by user */
bcm_is_invalid_tv(struct bcm_msg_head * msg_head)154 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
155 {
156 if ((msg_head->ival1.tv_sec < 0) ||
157 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
158 (msg_head->ival1.tv_usec < 0) ||
159 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
160 (msg_head->ival2.tv_sec < 0) ||
161 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
162 (msg_head->ival2.tv_usec < 0) ||
163 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
164 return true;
165
166 return false;
167 }
168
169 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
170 #define OPSIZ sizeof(struct bcm_op)
171 #define MHSIZ sizeof(struct bcm_msg_head)
172
173 /*
174 * procfs functions
175 */
176 #if IS_ENABLED(CONFIG_PROC_FS)
bcm_proc_getifname(struct net * net,char * result,int ifindex)177 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
178 {
179 struct net_device *dev;
180
181 if (!ifindex)
182 return "any";
183
184 rcu_read_lock();
185 dev = dev_get_by_index_rcu(net, ifindex);
186 if (dev)
187 strcpy(result, dev->name);
188 else
189 strcpy(result, "???");
190 rcu_read_unlock();
191
192 return result;
193 }
194
bcm_proc_show(struct seq_file * m,void * v)195 static int bcm_proc_show(struct seq_file *m, void *v)
196 {
197 char ifname[IFNAMSIZ];
198 struct net *net = m->private;
199 struct sock *sk = (struct sock *)pde_data(m->file->f_inode);
200 struct bcm_sock *bo = bcm_sk(sk);
201 struct bcm_op *op;
202
203 seq_printf(m, ">>> socket %pK", sk->sk_socket);
204 seq_printf(m, " / sk %pK", sk);
205 seq_printf(m, " / bo %pK", bo);
206 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
207 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
208 seq_printf(m, " <<<\n");
209
210 rcu_read_lock();
211
212 list_for_each_entry_rcu(op, &bo->rx_ops, list) {
213
214 unsigned long reduction;
215
216 /* print only active entries & prevent division by zero */
217 if (!op->frames_abs)
218 continue;
219
220 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
221 bcm_proc_getifname(net, ifname, op->ifindex));
222
223 if (op->flags & CAN_FD_FRAME)
224 seq_printf(m, "(%u)", op->nframes);
225 else
226 seq_printf(m, "[%u]", op->nframes);
227
228 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
229
230 if (op->kt_ival1)
231 seq_printf(m, "timeo=%lld ",
232 (long long)ktime_to_us(op->kt_ival1));
233
234 if (op->kt_ival2)
235 seq_printf(m, "thr=%lld ",
236 (long long)ktime_to_us(op->kt_ival2));
237
238 seq_printf(m, "# recv %ld (%ld) => reduction: ",
239 op->frames_filtered, op->frames_abs);
240
241 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
242
243 seq_printf(m, "%s%ld%%\n",
244 (reduction == 100) ? "near " : "", reduction);
245 }
246
247 list_for_each_entry(op, &bo->tx_ops, list) {
248
249 seq_printf(m, "tx_op: %03X %s ", op->can_id,
250 bcm_proc_getifname(net, ifname, op->ifindex));
251
252 if (op->flags & CAN_FD_FRAME)
253 seq_printf(m, "(%u) ", op->nframes);
254 else
255 seq_printf(m, "[%u] ", op->nframes);
256
257 if (op->kt_ival1)
258 seq_printf(m, "t1=%lld ",
259 (long long)ktime_to_us(op->kt_ival1));
260
261 if (op->kt_ival2)
262 seq_printf(m, "t2=%lld ",
263 (long long)ktime_to_us(op->kt_ival2));
264
265 seq_printf(m, "# sent %ld\n", op->frames_abs);
266 }
267 seq_putc(m, '\n');
268
269 rcu_read_unlock();
270
271 return 0;
272 }
273 #endif /* CONFIG_PROC_FS */
274
275 /*
276 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
277 * of the given bcm tx op
278 */
bcm_can_tx(struct bcm_op * op)279 static void bcm_can_tx(struct bcm_op *op)
280 {
281 struct sk_buff *skb;
282 struct net_device *dev;
283 struct canfd_frame *cf;
284 int err;
285
286 /* no target device? => exit */
287 if (!op->ifindex)
288 return;
289
290 /* read currframe under lock protection */
291 spin_lock_bh(&op->bcm_tx_lock);
292 cf = op->frames + op->cfsiz * op->currframe;
293 spin_unlock_bh(&op->bcm_tx_lock);
294
295 dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
296 if (!dev) {
297 /* RFC: should this bcm_op remove itself here? */
298 return;
299 }
300
301 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
302 if (!skb)
303 goto out;
304
305 can_skb_reserve(skb);
306 can_skb_prv(skb)->ifindex = dev->ifindex;
307 can_skb_prv(skb)->skbcnt = 0;
308
309 skb_put_data(skb, cf, op->cfsiz);
310
311 /* send with loopback */
312 skb->dev = dev;
313 can_skb_set_owner(skb, op->sk);
314 err = can_send(skb, 1);
315
316 /* update currframe and count under lock protection */
317 spin_lock_bh(&op->bcm_tx_lock);
318
319 if (!err)
320 op->frames_abs++;
321
322 op->currframe++;
323
324 /* reached last frame? */
325 if (op->currframe >= op->nframes)
326 op->currframe = 0;
327
328 if (op->count > 0)
329 op->count--;
330
331 spin_unlock_bh(&op->bcm_tx_lock);
332 out:
333 dev_put(dev);
334 }
335
336 /*
337 * bcm_send_to_user - send a BCM message to the userspace
338 * (consisting of bcm_msg_head + x CAN frames)
339 */
bcm_send_to_user(struct bcm_op * op,struct bcm_msg_head * head,struct canfd_frame * frames,int has_timestamp)340 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
341 struct canfd_frame *frames, int has_timestamp)
342 {
343 struct sk_buff *skb;
344 struct canfd_frame *firstframe;
345 struct sockaddr_can *addr;
346 struct sock *sk = op->sk;
347 unsigned int datalen = head->nframes * op->cfsiz;
348 int err;
349
350 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
351 if (!skb)
352 return;
353
354 skb_put_data(skb, head, sizeof(*head));
355
356 if (head->nframes) {
357 /* CAN frames starting here */
358 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
359
360 skb_put_data(skb, frames, datalen);
361
362 /*
363 * the BCM uses the flags-element of the canfd_frame
364 * structure for internal purposes. This is only
365 * relevant for updates that are generated by the
366 * BCM, where nframes is 1
367 */
368 if (head->nframes == 1)
369 firstframe->flags &= BCM_CAN_FLAGS_MASK;
370 }
371
372 if (has_timestamp) {
373 /* restore rx timestamp */
374 skb->tstamp = op->rx_stamp;
375 }
376
377 /*
378 * Put the datagram to the queue so that bcm_recvmsg() can
379 * get it from there. We need to pass the interface index to
380 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
381 * containing the interface index.
382 */
383
384 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
385 addr = (struct sockaddr_can *)skb->cb;
386 memset(addr, 0, sizeof(*addr));
387 addr->can_family = AF_CAN;
388 addr->can_ifindex = op->rx_ifindex;
389
390 err = sock_queue_rcv_skb(sk, skb);
391 if (err < 0) {
392 struct bcm_sock *bo = bcm_sk(sk);
393
394 kfree_skb(skb);
395 /* don't care about overflows in this statistic */
396 bo->dropped_usr_msgs++;
397 }
398 }
399
bcm_tx_set_expiry(struct bcm_op * op,struct hrtimer * hrt)400 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
401 {
402 ktime_t ival;
403
404 if (op->kt_ival1 && op->count)
405 ival = op->kt_ival1;
406 else if (op->kt_ival2)
407 ival = op->kt_ival2;
408 else
409 return false;
410
411 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
412 return true;
413 }
414
bcm_tx_start_timer(struct bcm_op * op)415 static void bcm_tx_start_timer(struct bcm_op *op)
416 {
417 if (bcm_tx_set_expiry(op, &op->timer))
418 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
419 }
420
421 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
bcm_tx_timeout_handler(struct hrtimer * hrtimer)422 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
423 {
424 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
425 struct bcm_msg_head msg_head;
426
427 if (op->kt_ival1 && (op->count > 0)) {
428 bcm_can_tx(op);
429 if (!op->count && (op->flags & TX_COUNTEVT)) {
430
431 /* create notification to user */
432 memset(&msg_head, 0, sizeof(msg_head));
433 msg_head.opcode = TX_EXPIRED;
434 msg_head.flags = op->flags;
435 msg_head.count = op->count;
436 msg_head.ival1 = op->ival1;
437 msg_head.ival2 = op->ival2;
438 msg_head.can_id = op->can_id;
439 msg_head.nframes = 0;
440
441 bcm_send_to_user(op, &msg_head, NULL, 0);
442 }
443
444 } else if (op->kt_ival2) {
445 bcm_can_tx(op);
446 }
447
448 return bcm_tx_set_expiry(op, &op->timer) ?
449 HRTIMER_RESTART : HRTIMER_NORESTART;
450 }
451
452 /*
453 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
454 */
bcm_rx_changed(struct bcm_op * op,struct canfd_frame * data)455 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
456 {
457 struct bcm_msg_head head;
458
459 /* update statistics */
460 op->frames_filtered++;
461
462 /* prevent statistics overflow */
463 if (op->frames_filtered > ULONG_MAX/100)
464 op->frames_filtered = op->frames_abs = 0;
465
466 /* this element is not throttled anymore */
467 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
468
469 memset(&head, 0, sizeof(head));
470 head.opcode = RX_CHANGED;
471 head.flags = op->flags;
472 head.count = op->count;
473 head.ival1 = op->ival1;
474 head.ival2 = op->ival2;
475 head.can_id = op->can_id;
476 head.nframes = 1;
477
478 bcm_send_to_user(op, &head, data, 1);
479 }
480
481 /*
482 * bcm_rx_update_and_send - process a detected relevant receive content change
483 * 1. update the last received data
484 * 2. send a notification to the user (if possible)
485 */
bcm_rx_update_and_send(struct bcm_op * op,struct canfd_frame * lastdata,const struct canfd_frame * rxdata)486 static void bcm_rx_update_and_send(struct bcm_op *op,
487 struct canfd_frame *lastdata,
488 const struct canfd_frame *rxdata)
489 {
490 memcpy(lastdata, rxdata, op->cfsiz);
491
492 /* mark as used and throttled by default */
493 lastdata->flags |= (RX_RECV|RX_THR);
494
495 /* throttling mode inactive ? */
496 if (!op->kt_ival2) {
497 /* send RX_CHANGED to the user immediately */
498 bcm_rx_changed(op, lastdata);
499 return;
500 }
501
502 /* with active throttling timer we are just done here */
503 if (hrtimer_active(&op->thrtimer))
504 return;
505
506 /* first reception with enabled throttling mode */
507 if (!op->kt_lastmsg)
508 goto rx_changed_settime;
509
510 /* got a second frame inside a potential throttle period? */
511 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
512 ktime_to_us(op->kt_ival2)) {
513 /* do not send the saved data - only start throttle timer */
514 hrtimer_start(&op->thrtimer,
515 ktime_add(op->kt_lastmsg, op->kt_ival2),
516 HRTIMER_MODE_ABS_SOFT);
517 return;
518 }
519
520 /* the gap was that big, that throttling was not needed here */
521 rx_changed_settime:
522 bcm_rx_changed(op, lastdata);
523 op->kt_lastmsg = ktime_get();
524 }
525
526 /*
527 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
528 * received data stored in op->last_frames[]
529 */
bcm_rx_cmp_to_index(struct bcm_op * op,unsigned int index,const struct canfd_frame * rxdata)530 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
531 const struct canfd_frame *rxdata)
532 {
533 struct canfd_frame *cf = op->frames + op->cfsiz * index;
534 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
535 int i;
536
537 /*
538 * no one uses the MSBs of flags for comparison,
539 * so we use it here to detect the first time of reception
540 */
541
542 if (!(lcf->flags & RX_RECV)) {
543 /* received data for the first time => send update to user */
544 bcm_rx_update_and_send(op, lcf, rxdata);
545 return;
546 }
547
548 /* do a real check in CAN frame data section */
549 for (i = 0; i < rxdata->len; i += 8) {
550 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
551 (get_u64(cf, i) & get_u64(lcf, i))) {
552 bcm_rx_update_and_send(op, lcf, rxdata);
553 return;
554 }
555 }
556
557 if (op->flags & RX_CHECK_DLC) {
558 /* do a real check in CAN frame length */
559 if (rxdata->len != lcf->len) {
560 bcm_rx_update_and_send(op, lcf, rxdata);
561 return;
562 }
563 }
564 }
565
566 /*
567 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
568 */
bcm_rx_starttimer(struct bcm_op * op)569 static void bcm_rx_starttimer(struct bcm_op *op)
570 {
571 if (op->flags & RX_NO_AUTOTIMER)
572 return;
573
574 if (op->kt_ival1)
575 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
576 }
577
578 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
bcm_rx_timeout_handler(struct hrtimer * hrtimer)579 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
580 {
581 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
582 struct bcm_msg_head msg_head;
583
584 /* if user wants to be informed, when cyclic CAN-Messages come back */
585 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
586 /* clear received CAN frames to indicate 'nothing received' */
587 memset(op->last_frames, 0, op->nframes * op->cfsiz);
588 }
589
590 /* create notification to user */
591 memset(&msg_head, 0, sizeof(msg_head));
592 msg_head.opcode = RX_TIMEOUT;
593 msg_head.flags = op->flags;
594 msg_head.count = op->count;
595 msg_head.ival1 = op->ival1;
596 msg_head.ival2 = op->ival2;
597 msg_head.can_id = op->can_id;
598 msg_head.nframes = 0;
599
600 bcm_send_to_user(op, &msg_head, NULL, 0);
601
602 return HRTIMER_NORESTART;
603 }
604
605 /*
606 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
607 */
bcm_rx_do_flush(struct bcm_op * op,unsigned int index)608 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
609 {
610 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
611
612 if ((op->last_frames) && (lcf->flags & RX_THR)) {
613 bcm_rx_changed(op, lcf);
614 return 1;
615 }
616 return 0;
617 }
618
619 /*
620 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
621 */
bcm_rx_thr_flush(struct bcm_op * op)622 static int bcm_rx_thr_flush(struct bcm_op *op)
623 {
624 int updated = 0;
625
626 if (op->nframes > 1) {
627 unsigned int i;
628
629 /* for MUX filter we start at index 1 */
630 for (i = 1; i < op->nframes; i++)
631 updated += bcm_rx_do_flush(op, i);
632
633 } else {
634 /* for RX_FILTER_ID and simple filter */
635 updated += bcm_rx_do_flush(op, 0);
636 }
637
638 return updated;
639 }
640
641 /*
642 * bcm_rx_thr_handler - the time for blocked content updates is over now:
643 * Check for throttled data and send it to the userspace
644 */
bcm_rx_thr_handler(struct hrtimer * hrtimer)645 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
646 {
647 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
648
649 if (bcm_rx_thr_flush(op)) {
650 hrtimer_forward_now(hrtimer, op->kt_ival2);
651 return HRTIMER_RESTART;
652 } else {
653 /* rearm throttle handling */
654 op->kt_lastmsg = 0;
655 return HRTIMER_NORESTART;
656 }
657 }
658
659 /*
660 * bcm_rx_handler - handle a CAN frame reception
661 */
bcm_rx_handler(struct sk_buff * skb,void * data)662 static void bcm_rx_handler(struct sk_buff *skb, void *data)
663 {
664 struct bcm_op *op = (struct bcm_op *)data;
665 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
666 unsigned int i;
667
668 if (op->can_id != rxframe->can_id)
669 return;
670
671 /* make sure to handle the correct frame type (CAN / CAN FD) */
672 if (op->flags & CAN_FD_FRAME) {
673 if (!can_is_canfd_skb(skb))
674 return;
675 } else {
676 if (!can_is_can_skb(skb))
677 return;
678 }
679
680 /* disable timeout */
681 hrtimer_cancel(&op->timer);
682
683 /* save rx timestamp */
684 op->rx_stamp = skb->tstamp;
685 /* save originator for recvfrom() */
686 op->rx_ifindex = skb->dev->ifindex;
687 /* update statistics */
688 op->frames_abs++;
689
690 if (op->flags & RX_RTR_FRAME) {
691 /* send reply for RTR-request (placed in op->frames[0]) */
692 bcm_can_tx(op);
693 return;
694 }
695
696 if (op->flags & RX_FILTER_ID) {
697 /* the easiest case */
698 bcm_rx_update_and_send(op, op->last_frames, rxframe);
699 goto rx_starttimer;
700 }
701
702 if (op->nframes == 1) {
703 /* simple compare with index 0 */
704 bcm_rx_cmp_to_index(op, 0, rxframe);
705 goto rx_starttimer;
706 }
707
708 if (op->nframes > 1) {
709 /*
710 * multiplex compare
711 *
712 * find the first multiplex mask that fits.
713 * Remark: The MUX-mask is stored in index 0 - but only the
714 * first 64 bits of the frame data[] are relevant (CAN FD)
715 */
716
717 for (i = 1; i < op->nframes; i++) {
718 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
719 (get_u64(op->frames, 0) &
720 get_u64(op->frames + op->cfsiz * i, 0))) {
721 bcm_rx_cmp_to_index(op, i, rxframe);
722 break;
723 }
724 }
725 }
726
727 rx_starttimer:
728 bcm_rx_starttimer(op);
729 }
730
731 /*
732 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
733 */
bcm_find_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)734 static struct bcm_op *bcm_find_op(struct list_head *ops,
735 struct bcm_msg_head *mh, int ifindex)
736 {
737 struct bcm_op *op;
738
739 list_for_each_entry(op, ops, list) {
740 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
741 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
742 return op;
743 }
744
745 return NULL;
746 }
747
bcm_free_op_rcu(struct rcu_head * rcu_head)748 static void bcm_free_op_rcu(struct rcu_head *rcu_head)
749 {
750 struct bcm_op *op = container_of(rcu_head, struct bcm_op, rcu);
751
752 if ((op->frames) && (op->frames != &op->sframe))
753 kfree(op->frames);
754
755 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
756 kfree(op->last_frames);
757
758 kfree(op);
759 }
760
bcm_remove_op(struct bcm_op * op)761 static void bcm_remove_op(struct bcm_op *op)
762 {
763 hrtimer_cancel(&op->timer);
764 hrtimer_cancel(&op->thrtimer);
765
766 call_rcu(&op->rcu, bcm_free_op_rcu);
767 }
768
bcm_rx_unreg(struct net_device * dev,struct bcm_op * op)769 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
770 {
771 if (op->rx_reg_dev == dev) {
772 can_rx_unregister(dev_net(dev), dev, op->can_id,
773 REGMASK(op->can_id), bcm_rx_handler, op);
774
775 /* mark as removed subscription */
776 op->rx_reg_dev = NULL;
777 } else
778 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
779 "mismatch %p %p\n", op->rx_reg_dev, dev);
780 }
781
782 /*
783 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
784 */
bcm_delete_rx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)785 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
786 int ifindex)
787 {
788 struct bcm_op *op, *n;
789
790 list_for_each_entry_safe(op, n, ops, list) {
791 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
792 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
793
794 /* disable automatic timer on frame reception */
795 op->flags |= RX_NO_AUTOTIMER;
796
797 /*
798 * Don't care if we're bound or not (due to netdev
799 * problems) can_rx_unregister() is always a save
800 * thing to do here.
801 */
802 if (op->ifindex) {
803 /*
804 * Only remove subscriptions that had not
805 * been removed due to NETDEV_UNREGISTER
806 * in bcm_notifier()
807 */
808 if (op->rx_reg_dev) {
809 struct net_device *dev;
810
811 dev = dev_get_by_index(sock_net(op->sk),
812 op->ifindex);
813 if (dev) {
814 bcm_rx_unreg(dev, op);
815 dev_put(dev);
816 }
817 }
818 } else
819 can_rx_unregister(sock_net(op->sk), NULL,
820 op->can_id,
821 REGMASK(op->can_id),
822 bcm_rx_handler, op);
823
824 list_del_rcu(&op->list);
825 bcm_remove_op(op);
826 return 1; /* done */
827 }
828 }
829
830 return 0; /* not found */
831 }
832
833 /*
834 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
835 */
bcm_delete_tx_op(struct list_head * ops,struct bcm_msg_head * mh,int ifindex)836 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
837 int ifindex)
838 {
839 struct bcm_op *op, *n;
840
841 list_for_each_entry_safe(op, n, ops, list) {
842 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
843 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
844 list_del_rcu(&op->list);
845 bcm_remove_op(op);
846 return 1; /* done */
847 }
848 }
849
850 return 0; /* not found */
851 }
852
853 /*
854 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
855 */
bcm_read_op(struct list_head * ops,struct bcm_msg_head * msg_head,int ifindex)856 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
857 int ifindex)
858 {
859 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
860
861 if (!op)
862 return -EINVAL;
863
864 /* put current values into msg_head */
865 msg_head->flags = op->flags;
866 msg_head->count = op->count;
867 msg_head->ival1 = op->ival1;
868 msg_head->ival2 = op->ival2;
869 msg_head->nframes = op->nframes;
870
871 bcm_send_to_user(op, msg_head, op->frames, 0);
872
873 return MHSIZ;
874 }
875
876 /*
877 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
878 */
bcm_tx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)879 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
880 int ifindex, struct sock *sk)
881 {
882 struct bcm_sock *bo = bcm_sk(sk);
883 struct bcm_op *op;
884 struct canfd_frame *cf;
885 unsigned int i;
886 int err;
887
888 /* we need a real device to send frames */
889 if (!ifindex)
890 return -ENODEV;
891
892 /* check nframes boundaries - we need at least one CAN frame */
893 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
894 return -EINVAL;
895
896 /* check timeval limitations */
897 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
898 return -EINVAL;
899
900 /* check the given can_id */
901 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
902 if (op) {
903 /* update existing BCM operation */
904
905 /*
906 * Do we need more space for the CAN frames than currently
907 * allocated? -> This is a _really_ unusual use-case and
908 * therefore (complexity / locking) it is not supported.
909 */
910 if (msg_head->nframes > op->nframes)
911 return -E2BIG;
912
913 /* update CAN frames content */
914 for (i = 0; i < msg_head->nframes; i++) {
915
916 cf = op->frames + op->cfsiz * i;
917 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
918
919 if (op->flags & CAN_FD_FRAME) {
920 if (cf->len > 64)
921 err = -EINVAL;
922 } else {
923 if (cf->len > 8)
924 err = -EINVAL;
925 }
926
927 if (err < 0)
928 return err;
929
930 if (msg_head->flags & TX_CP_CAN_ID) {
931 /* copy can_id into frame */
932 cf->can_id = msg_head->can_id;
933 }
934 }
935 op->flags = msg_head->flags;
936
937 /* only lock for unlikely count/nframes/currframe changes */
938 if (op->nframes != msg_head->nframes ||
939 op->flags & TX_RESET_MULTI_IDX ||
940 op->flags & SETTIMER) {
941
942 spin_lock_bh(&op->bcm_tx_lock);
943
944 if (op->nframes != msg_head->nframes ||
945 op->flags & TX_RESET_MULTI_IDX) {
946 /* potentially update changed nframes */
947 op->nframes = msg_head->nframes;
948 /* restart multiple frame transmission */
949 op->currframe = 0;
950 }
951
952 if (op->flags & SETTIMER)
953 op->count = msg_head->count;
954
955 spin_unlock_bh(&op->bcm_tx_lock);
956 }
957
958 } else {
959 /* insert new BCM operation for the given can_id */
960
961 op = kzalloc(OPSIZ, GFP_KERNEL);
962 if (!op)
963 return -ENOMEM;
964
965 spin_lock_init(&op->bcm_tx_lock);
966 op->can_id = msg_head->can_id;
967 op->cfsiz = CFSIZ(msg_head->flags);
968 op->flags = msg_head->flags;
969 op->nframes = msg_head->nframes;
970
971 if (op->flags & SETTIMER)
972 op->count = msg_head->count;
973
974 /* create array for CAN frames and copy the data */
975 if (msg_head->nframes > 1) {
976 op->frames = kmalloc_array(msg_head->nframes,
977 op->cfsiz,
978 GFP_KERNEL);
979 if (!op->frames) {
980 kfree(op);
981 return -ENOMEM;
982 }
983 } else
984 op->frames = &op->sframe;
985
986 for (i = 0; i < msg_head->nframes; i++) {
987
988 cf = op->frames + op->cfsiz * i;
989 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
990 if (err < 0)
991 goto free_op;
992
993 if (op->flags & CAN_FD_FRAME) {
994 if (cf->len > 64)
995 err = -EINVAL;
996 } else {
997 if (cf->len > 8)
998 err = -EINVAL;
999 }
1000
1001 if (err < 0)
1002 goto free_op;
1003
1004 if (msg_head->flags & TX_CP_CAN_ID) {
1005 /* copy can_id into frame */
1006 cf->can_id = msg_head->can_id;
1007 }
1008 }
1009
1010 /* tx_ops never compare with previous received messages */
1011 op->last_frames = NULL;
1012
1013 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1014 op->sk = sk;
1015 op->ifindex = ifindex;
1016
1017 /* initialize uninitialized (kzalloc) structure */
1018 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1019 HRTIMER_MODE_REL_SOFT);
1020 op->timer.function = bcm_tx_timeout_handler;
1021
1022 /* currently unused in tx_ops */
1023 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1024 HRTIMER_MODE_REL_SOFT);
1025
1026 /* add this bcm_op to the list of the tx_ops */
1027 list_add(&op->list, &bo->tx_ops);
1028
1029 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
1030
1031 if (op->flags & SETTIMER) {
1032 /* set timer values */
1033 op->ival1 = msg_head->ival1;
1034 op->ival2 = msg_head->ival2;
1035 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1036 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1037
1038 /* disable an active timer due to zero values? */
1039 if (!op->kt_ival1 && !op->kt_ival2)
1040 hrtimer_cancel(&op->timer);
1041 }
1042
1043 if (op->flags & STARTTIMER) {
1044 hrtimer_cancel(&op->timer);
1045 /* spec: send CAN frame when starting timer */
1046 op->flags |= TX_ANNOUNCE;
1047 }
1048
1049 if (op->flags & TX_ANNOUNCE)
1050 bcm_can_tx(op);
1051
1052 if (op->flags & STARTTIMER)
1053 bcm_tx_start_timer(op);
1054
1055 return msg_head->nframes * op->cfsiz + MHSIZ;
1056
1057 free_op:
1058 if (op->frames != &op->sframe)
1059 kfree(op->frames);
1060 kfree(op);
1061 return err;
1062 }
1063
1064 /*
1065 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1066 */
bcm_rx_setup(struct bcm_msg_head * msg_head,struct msghdr * msg,int ifindex,struct sock * sk)1067 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1068 int ifindex, struct sock *sk)
1069 {
1070 struct bcm_sock *bo = bcm_sk(sk);
1071 struct bcm_op *op;
1072 int do_rx_register;
1073 int err = 0;
1074
1075 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1076 /* be robust against wrong usage ... */
1077 msg_head->flags |= RX_FILTER_ID;
1078 /* ignore trailing garbage */
1079 msg_head->nframes = 0;
1080 }
1081
1082 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1083 if (msg_head->nframes > MAX_NFRAMES + 1)
1084 return -EINVAL;
1085
1086 if ((msg_head->flags & RX_RTR_FRAME) &&
1087 ((msg_head->nframes != 1) ||
1088 (!(msg_head->can_id & CAN_RTR_FLAG))))
1089 return -EINVAL;
1090
1091 /* check timeval limitations */
1092 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1093 return -EINVAL;
1094
1095 /* check the given can_id */
1096 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1097 if (op) {
1098 /* update existing BCM operation */
1099
1100 /*
1101 * Do we need more space for the CAN frames than currently
1102 * allocated? -> This is a _really_ unusual use-case and
1103 * therefore (complexity / locking) it is not supported.
1104 */
1105 if (msg_head->nframes > op->nframes)
1106 return -E2BIG;
1107
1108 if (msg_head->nframes) {
1109 /* update CAN frames content */
1110 err = memcpy_from_msg(op->frames, msg,
1111 msg_head->nframes * op->cfsiz);
1112 if (err < 0)
1113 return err;
1114
1115 /* clear last_frames to indicate 'nothing received' */
1116 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1117 }
1118
1119 op->nframes = msg_head->nframes;
1120 op->flags = msg_head->flags;
1121
1122 /* Only an update -> do not call can_rx_register() */
1123 do_rx_register = 0;
1124
1125 } else {
1126 /* insert new BCM operation for the given can_id */
1127 op = kzalloc(OPSIZ, GFP_KERNEL);
1128 if (!op)
1129 return -ENOMEM;
1130
1131 op->can_id = msg_head->can_id;
1132 op->nframes = msg_head->nframes;
1133 op->cfsiz = CFSIZ(msg_head->flags);
1134 op->flags = msg_head->flags;
1135
1136 if (msg_head->nframes > 1) {
1137 /* create array for CAN frames and copy the data */
1138 op->frames = kmalloc_array(msg_head->nframes,
1139 op->cfsiz,
1140 GFP_KERNEL);
1141 if (!op->frames) {
1142 kfree(op);
1143 return -ENOMEM;
1144 }
1145
1146 /* create and init array for received CAN frames */
1147 op->last_frames = kcalloc(msg_head->nframes,
1148 op->cfsiz,
1149 GFP_KERNEL);
1150 if (!op->last_frames) {
1151 kfree(op->frames);
1152 kfree(op);
1153 return -ENOMEM;
1154 }
1155
1156 } else {
1157 op->frames = &op->sframe;
1158 op->last_frames = &op->last_sframe;
1159 }
1160
1161 if (msg_head->nframes) {
1162 err = memcpy_from_msg(op->frames, msg,
1163 msg_head->nframes * op->cfsiz);
1164 if (err < 0) {
1165 if (op->frames != &op->sframe)
1166 kfree(op->frames);
1167 if (op->last_frames != &op->last_sframe)
1168 kfree(op->last_frames);
1169 kfree(op);
1170 return err;
1171 }
1172 }
1173
1174 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1175 op->sk = sk;
1176 op->ifindex = ifindex;
1177
1178 /* ifindex for timeout events w/o previous frame reception */
1179 op->rx_ifindex = ifindex;
1180
1181 /* initialize uninitialized (kzalloc) structure */
1182 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1183 HRTIMER_MODE_REL_SOFT);
1184 op->timer.function = bcm_rx_timeout_handler;
1185
1186 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1187 HRTIMER_MODE_REL_SOFT);
1188 op->thrtimer.function = bcm_rx_thr_handler;
1189
1190 /* add this bcm_op to the list of the rx_ops */
1191 list_add(&op->list, &bo->rx_ops);
1192
1193 /* call can_rx_register() */
1194 do_rx_register = 1;
1195
1196 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1197
1198 /* check flags */
1199
1200 if (op->flags & RX_RTR_FRAME) {
1201 struct canfd_frame *frame0 = op->frames;
1202
1203 /* no timers in RTR-mode */
1204 hrtimer_cancel(&op->thrtimer);
1205 hrtimer_cancel(&op->timer);
1206
1207 /*
1208 * funny feature in RX(!)_SETUP only for RTR-mode:
1209 * copy can_id into frame BUT without RTR-flag to
1210 * prevent a full-load-loopback-test ... ;-]
1211 */
1212 if ((op->flags & TX_CP_CAN_ID) ||
1213 (frame0->can_id == op->can_id))
1214 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1215
1216 } else {
1217 if (op->flags & SETTIMER) {
1218
1219 /* set timer value */
1220 op->ival1 = msg_head->ival1;
1221 op->ival2 = msg_head->ival2;
1222 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1223 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1224
1225 /* disable an active timer due to zero value? */
1226 if (!op->kt_ival1)
1227 hrtimer_cancel(&op->timer);
1228
1229 /*
1230 * In any case cancel the throttle timer, flush
1231 * potentially blocked msgs and reset throttle handling
1232 */
1233 op->kt_lastmsg = 0;
1234 hrtimer_cancel(&op->thrtimer);
1235 bcm_rx_thr_flush(op);
1236 }
1237
1238 if ((op->flags & STARTTIMER) && op->kt_ival1)
1239 hrtimer_start(&op->timer, op->kt_ival1,
1240 HRTIMER_MODE_REL_SOFT);
1241 }
1242
1243 /* now we can register for can_ids, if we added a new bcm_op */
1244 if (do_rx_register) {
1245 if (ifindex) {
1246 struct net_device *dev;
1247
1248 dev = dev_get_by_index(sock_net(sk), ifindex);
1249 if (dev) {
1250 err = can_rx_register(sock_net(sk), dev,
1251 op->can_id,
1252 REGMASK(op->can_id),
1253 bcm_rx_handler, op,
1254 "bcm", sk);
1255
1256 op->rx_reg_dev = dev;
1257 dev_put(dev);
1258 }
1259
1260 } else
1261 err = can_rx_register(sock_net(sk), NULL, op->can_id,
1262 REGMASK(op->can_id),
1263 bcm_rx_handler, op, "bcm", sk);
1264 if (err) {
1265 /* this bcm rx op is broken -> remove it */
1266 list_del_rcu(&op->list);
1267 bcm_remove_op(op);
1268 return err;
1269 }
1270 }
1271
1272 return msg_head->nframes * op->cfsiz + MHSIZ;
1273 }
1274
1275 /*
1276 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1277 */
bcm_tx_send(struct msghdr * msg,int ifindex,struct sock * sk,int cfsiz)1278 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1279 int cfsiz)
1280 {
1281 struct sk_buff *skb;
1282 struct net_device *dev;
1283 int err;
1284
1285 /* we need a real device to send frames */
1286 if (!ifindex)
1287 return -ENODEV;
1288
1289 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1290 if (!skb)
1291 return -ENOMEM;
1292
1293 can_skb_reserve(skb);
1294
1295 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1296 if (err < 0) {
1297 kfree_skb(skb);
1298 return err;
1299 }
1300
1301 dev = dev_get_by_index(sock_net(sk), ifindex);
1302 if (!dev) {
1303 kfree_skb(skb);
1304 return -ENODEV;
1305 }
1306
1307 can_skb_prv(skb)->ifindex = dev->ifindex;
1308 can_skb_prv(skb)->skbcnt = 0;
1309 skb->dev = dev;
1310 can_skb_set_owner(skb, sk);
1311 err = can_send(skb, 1); /* send with loopback */
1312 dev_put(dev);
1313
1314 if (err)
1315 return err;
1316
1317 return cfsiz + MHSIZ;
1318 }
1319
1320 /*
1321 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1322 */
bcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)1323 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1324 {
1325 struct sock *sk = sock->sk;
1326 struct bcm_sock *bo = bcm_sk(sk);
1327 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1328 struct bcm_msg_head msg_head;
1329 int cfsiz;
1330 int ret; /* read bytes or error codes as return value */
1331
1332 if (!bo->bound)
1333 return -ENOTCONN;
1334
1335 /* check for valid message length from userspace */
1336 if (size < MHSIZ)
1337 return -EINVAL;
1338
1339 /* read message head information */
1340 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1341 if (ret < 0)
1342 return ret;
1343
1344 cfsiz = CFSIZ(msg_head.flags);
1345 if ((size - MHSIZ) % cfsiz)
1346 return -EINVAL;
1347
1348 /* check for alternative ifindex for this bcm_op */
1349
1350 if (!ifindex && msg->msg_name) {
1351 /* no bound device as default => check msg_name */
1352 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1353
1354 if (msg->msg_namelen < BCM_MIN_NAMELEN)
1355 return -EINVAL;
1356
1357 if (addr->can_family != AF_CAN)
1358 return -EINVAL;
1359
1360 /* ifindex from sendto() */
1361 ifindex = addr->can_ifindex;
1362
1363 if (ifindex) {
1364 struct net_device *dev;
1365
1366 dev = dev_get_by_index(sock_net(sk), ifindex);
1367 if (!dev)
1368 return -ENODEV;
1369
1370 if (dev->type != ARPHRD_CAN) {
1371 dev_put(dev);
1372 return -ENODEV;
1373 }
1374
1375 dev_put(dev);
1376 }
1377 }
1378
1379 lock_sock(sk);
1380
1381 switch (msg_head.opcode) {
1382
1383 case TX_SETUP:
1384 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1385 break;
1386
1387 case RX_SETUP:
1388 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1389 break;
1390
1391 case TX_DELETE:
1392 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1393 ret = MHSIZ;
1394 else
1395 ret = -EINVAL;
1396 break;
1397
1398 case RX_DELETE:
1399 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1400 ret = MHSIZ;
1401 else
1402 ret = -EINVAL;
1403 break;
1404
1405 case TX_READ:
1406 /* reuse msg_head for the reply to TX_READ */
1407 msg_head.opcode = TX_STATUS;
1408 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1409 break;
1410
1411 case RX_READ:
1412 /* reuse msg_head for the reply to RX_READ */
1413 msg_head.opcode = RX_STATUS;
1414 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1415 break;
1416
1417 case TX_SEND:
1418 /* we need exactly one CAN frame behind the msg head */
1419 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1420 ret = -EINVAL;
1421 else
1422 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1423 break;
1424
1425 default:
1426 ret = -EINVAL;
1427 break;
1428 }
1429
1430 release_sock(sk);
1431
1432 return ret;
1433 }
1434
1435 /*
1436 * notification handler for netdevice status changes
1437 */
bcm_notify(struct bcm_sock * bo,unsigned long msg,struct net_device * dev)1438 static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1439 struct net_device *dev)
1440 {
1441 struct sock *sk = &bo->sk;
1442 struct bcm_op *op;
1443 int notify_enodev = 0;
1444
1445 if (!net_eq(dev_net(dev), sock_net(sk)))
1446 return;
1447
1448 switch (msg) {
1449
1450 case NETDEV_UNREGISTER:
1451 lock_sock(sk);
1452
1453 /* remove device specific receive entries */
1454 list_for_each_entry(op, &bo->rx_ops, list)
1455 if (op->rx_reg_dev == dev)
1456 bcm_rx_unreg(dev, op);
1457
1458 /* remove device reference, if this is our bound device */
1459 if (bo->bound && bo->ifindex == dev->ifindex) {
1460 #if IS_ENABLED(CONFIG_PROC_FS)
1461 if (sock_net(sk)->can.bcmproc_dir && bo->bcm_proc_read) {
1462 remove_proc_entry(bo->procname, sock_net(sk)->can.bcmproc_dir);
1463 bo->bcm_proc_read = NULL;
1464 }
1465 #endif
1466 bo->bound = 0;
1467 bo->ifindex = 0;
1468 notify_enodev = 1;
1469 }
1470
1471 release_sock(sk);
1472
1473 if (notify_enodev) {
1474 sk->sk_err = ENODEV;
1475 if (!sock_flag(sk, SOCK_DEAD))
1476 sk_error_report(sk);
1477 }
1478 break;
1479
1480 case NETDEV_DOWN:
1481 if (bo->bound && bo->ifindex == dev->ifindex) {
1482 sk->sk_err = ENETDOWN;
1483 if (!sock_flag(sk, SOCK_DEAD))
1484 sk_error_report(sk);
1485 }
1486 }
1487 }
1488
bcm_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)1489 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1490 void *ptr)
1491 {
1492 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1493
1494 if (dev->type != ARPHRD_CAN)
1495 return NOTIFY_DONE;
1496 if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1497 return NOTIFY_DONE;
1498 if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1499 return NOTIFY_DONE;
1500
1501 spin_lock(&bcm_notifier_lock);
1502 list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1503 spin_unlock(&bcm_notifier_lock);
1504 bcm_notify(bcm_busy_notifier, msg, dev);
1505 spin_lock(&bcm_notifier_lock);
1506 }
1507 bcm_busy_notifier = NULL;
1508 spin_unlock(&bcm_notifier_lock);
1509 return NOTIFY_DONE;
1510 }
1511
1512 /*
1513 * initial settings for all BCM sockets to be set at socket creation time
1514 */
bcm_init(struct sock * sk)1515 static int bcm_init(struct sock *sk)
1516 {
1517 struct bcm_sock *bo = bcm_sk(sk);
1518
1519 bo->bound = 0;
1520 bo->ifindex = 0;
1521 bo->dropped_usr_msgs = 0;
1522 bo->bcm_proc_read = NULL;
1523
1524 INIT_LIST_HEAD(&bo->tx_ops);
1525 INIT_LIST_HEAD(&bo->rx_ops);
1526
1527 /* set notifier */
1528 spin_lock(&bcm_notifier_lock);
1529 list_add_tail(&bo->notifier, &bcm_notifier_list);
1530 spin_unlock(&bcm_notifier_lock);
1531
1532 return 0;
1533 }
1534
1535 /*
1536 * standard socket functions
1537 */
bcm_release(struct socket * sock)1538 static int bcm_release(struct socket *sock)
1539 {
1540 struct sock *sk = sock->sk;
1541 struct net *net;
1542 struct bcm_sock *bo;
1543 struct bcm_op *op, *next;
1544
1545 if (!sk)
1546 return 0;
1547
1548 net = sock_net(sk);
1549 bo = bcm_sk(sk);
1550
1551 /* remove bcm_ops, timer, rx_unregister(), etc. */
1552
1553 spin_lock(&bcm_notifier_lock);
1554 while (bcm_busy_notifier == bo) {
1555 spin_unlock(&bcm_notifier_lock);
1556 schedule_timeout_uninterruptible(1);
1557 spin_lock(&bcm_notifier_lock);
1558 }
1559 list_del(&bo->notifier);
1560 spin_unlock(&bcm_notifier_lock);
1561
1562 lock_sock(sk);
1563
1564 #if IS_ENABLED(CONFIG_PROC_FS)
1565 /* remove procfs entry */
1566 if (net->can.bcmproc_dir && bo->bcm_proc_read)
1567 remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1568 #endif /* CONFIG_PROC_FS */
1569
1570 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1571 bcm_remove_op(op);
1572
1573 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1574 /*
1575 * Don't care if we're bound or not (due to netdev problems)
1576 * can_rx_unregister() is always a save thing to do here.
1577 */
1578 if (op->ifindex) {
1579 /*
1580 * Only remove subscriptions that had not
1581 * been removed due to NETDEV_UNREGISTER
1582 * in bcm_notifier()
1583 */
1584 if (op->rx_reg_dev) {
1585 struct net_device *dev;
1586
1587 dev = dev_get_by_index(net, op->ifindex);
1588 if (dev) {
1589 bcm_rx_unreg(dev, op);
1590 dev_put(dev);
1591 }
1592 }
1593 } else
1594 can_rx_unregister(net, NULL, op->can_id,
1595 REGMASK(op->can_id),
1596 bcm_rx_handler, op);
1597
1598 }
1599
1600 synchronize_rcu();
1601
1602 list_for_each_entry_safe(op, next, &bo->rx_ops, list)
1603 bcm_remove_op(op);
1604
1605 /* remove device reference */
1606 if (bo->bound) {
1607 bo->bound = 0;
1608 bo->ifindex = 0;
1609 }
1610
1611 sock_orphan(sk);
1612 sock->sk = NULL;
1613
1614 release_sock(sk);
1615 sock_put(sk);
1616
1617 return 0;
1618 }
1619
bcm_connect(struct socket * sock,struct sockaddr * uaddr,int len,int flags)1620 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1621 int flags)
1622 {
1623 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1624 struct sock *sk = sock->sk;
1625 struct bcm_sock *bo = bcm_sk(sk);
1626 struct net *net = sock_net(sk);
1627 int ret = 0;
1628
1629 if (len < BCM_MIN_NAMELEN)
1630 return -EINVAL;
1631
1632 lock_sock(sk);
1633
1634 if (bo->bound) {
1635 ret = -EISCONN;
1636 goto fail;
1637 }
1638
1639 /* bind a device to this socket */
1640 if (addr->can_ifindex) {
1641 struct net_device *dev;
1642
1643 dev = dev_get_by_index(net, addr->can_ifindex);
1644 if (!dev) {
1645 ret = -ENODEV;
1646 goto fail;
1647 }
1648 if (dev->type != ARPHRD_CAN) {
1649 dev_put(dev);
1650 ret = -ENODEV;
1651 goto fail;
1652 }
1653
1654 bo->ifindex = dev->ifindex;
1655 dev_put(dev);
1656
1657 } else {
1658 /* no interface reference for ifindex = 0 ('any' CAN device) */
1659 bo->ifindex = 0;
1660 }
1661
1662 #if IS_ENABLED(CONFIG_PROC_FS)
1663 if (net->can.bcmproc_dir) {
1664 /* unique socket address as filename */
1665 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1666 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1667 net->can.bcmproc_dir,
1668 bcm_proc_show, sk);
1669 if (!bo->bcm_proc_read) {
1670 ret = -ENOMEM;
1671 goto fail;
1672 }
1673 }
1674 #endif /* CONFIG_PROC_FS */
1675
1676 bo->bound = 1;
1677
1678 fail:
1679 release_sock(sk);
1680
1681 return ret;
1682 }
1683
bcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1684 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1685 int flags)
1686 {
1687 struct sock *sk = sock->sk;
1688 struct sk_buff *skb;
1689 int error = 0;
1690 int err;
1691
1692 skb = skb_recv_datagram(sk, flags, &error);
1693 if (!skb)
1694 return error;
1695
1696 if (skb->len < size)
1697 size = skb->len;
1698
1699 err = memcpy_to_msg(msg, skb->data, size);
1700 if (err < 0) {
1701 skb_free_datagram(sk, skb);
1702 return err;
1703 }
1704
1705 sock_recv_cmsgs(msg, sk, skb);
1706
1707 if (msg->msg_name) {
1708 __sockaddr_check_size(BCM_MIN_NAMELEN);
1709 msg->msg_namelen = BCM_MIN_NAMELEN;
1710 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1711 }
1712
1713 skb_free_datagram(sk, skb);
1714
1715 return size;
1716 }
1717
bcm_sock_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)1718 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1719 unsigned long arg)
1720 {
1721 /* no ioctls for socket layer -> hand it down to NIC layer */
1722 return -ENOIOCTLCMD;
1723 }
1724
1725 static const struct proto_ops bcm_ops = {
1726 .family = PF_CAN,
1727 .release = bcm_release,
1728 .bind = sock_no_bind,
1729 .connect = bcm_connect,
1730 .socketpair = sock_no_socketpair,
1731 .accept = sock_no_accept,
1732 .getname = sock_no_getname,
1733 .poll = datagram_poll,
1734 .ioctl = bcm_sock_no_ioctlcmd,
1735 .gettstamp = sock_gettstamp,
1736 .listen = sock_no_listen,
1737 .shutdown = sock_no_shutdown,
1738 .sendmsg = bcm_sendmsg,
1739 .recvmsg = bcm_recvmsg,
1740 .mmap = sock_no_mmap,
1741 };
1742
1743 static struct proto bcm_proto __read_mostly = {
1744 .name = "CAN_BCM",
1745 .owner = THIS_MODULE,
1746 .obj_size = sizeof(struct bcm_sock),
1747 .init = bcm_init,
1748 };
1749
1750 static const struct can_proto bcm_can_proto = {
1751 .type = SOCK_DGRAM,
1752 .protocol = CAN_BCM,
1753 .ops = &bcm_ops,
1754 .prot = &bcm_proto,
1755 };
1756
canbcm_pernet_init(struct net * net)1757 static int canbcm_pernet_init(struct net *net)
1758 {
1759 #if IS_ENABLED(CONFIG_PROC_FS)
1760 /* create /proc/net/can-bcm directory */
1761 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1762 #endif /* CONFIG_PROC_FS */
1763
1764 return 0;
1765 }
1766
canbcm_pernet_exit(struct net * net)1767 static void canbcm_pernet_exit(struct net *net)
1768 {
1769 #if IS_ENABLED(CONFIG_PROC_FS)
1770 /* remove /proc/net/can-bcm directory */
1771 if (net->can.bcmproc_dir)
1772 remove_proc_entry("can-bcm", net->proc_net);
1773 #endif /* CONFIG_PROC_FS */
1774 }
1775
1776 static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1777 .init = canbcm_pernet_init,
1778 .exit = canbcm_pernet_exit,
1779 };
1780
1781 static struct notifier_block canbcm_notifier = {
1782 .notifier_call = bcm_notifier
1783 };
1784
bcm_module_init(void)1785 static int __init bcm_module_init(void)
1786 {
1787 int err;
1788
1789 pr_info("can: broadcast manager protocol\n");
1790
1791 err = register_pernet_subsys(&canbcm_pernet_ops);
1792 if (err)
1793 return err;
1794
1795 err = register_netdevice_notifier(&canbcm_notifier);
1796 if (err)
1797 goto register_notifier_failed;
1798
1799 err = can_proto_register(&bcm_can_proto);
1800 if (err < 0) {
1801 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1802 goto register_proto_failed;
1803 }
1804
1805 return 0;
1806
1807 register_proto_failed:
1808 unregister_netdevice_notifier(&canbcm_notifier);
1809 register_notifier_failed:
1810 unregister_pernet_subsys(&canbcm_pernet_ops);
1811 return err;
1812 }
1813
bcm_module_exit(void)1814 static void __exit bcm_module_exit(void)
1815 {
1816 can_proto_unregister(&bcm_can_proto);
1817 unregister_netdevice_notifier(&canbcm_notifier);
1818 unregister_pernet_subsys(&canbcm_pernet_ops);
1819 }
1820
1821 module_init(bcm_module_init);
1822 module_exit(bcm_module_exit);
1823