xref: /openbmc/linux/net/can/bcm.c (revision 75f25bd3)
1 /*
2  * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3  *
4  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of Volkswagen nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * Alternatively, provided that this notice is retained in full, this
20  * software may be distributed under the terms of the GNU General
21  * Public License ("GPL") version 2, in which case the provisions of the
22  * GPL apply INSTEAD OF those given above.
23  *
24  * The provided data structures and external interfaces from this code
25  * are not restricted to be used by modules with a GPL compatible license.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38  * DAMAGE.
39  *
40  * Send feedback to <socketcan-users@lists.berlios.de>
41  *
42  */
43 
44 #include <linux/module.h>
45 #include <linux/init.h>
46 #include <linux/interrupt.h>
47 #include <linux/hrtimer.h>
48 #include <linux/list.h>
49 #include <linux/proc_fs.h>
50 #include <linux/seq_file.h>
51 #include <linux/uio.h>
52 #include <linux/net.h>
53 #include <linux/netdevice.h>
54 #include <linux/socket.h>
55 #include <linux/if_arp.h>
56 #include <linux/skbuff.h>
57 #include <linux/can.h>
58 #include <linux/can/core.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <net/sock.h>
62 #include <net/net_namespace.h>
63 
64 /*
65  * To send multiple CAN frame content within TX_SETUP or to filter
66  * CAN messages with multiplex index within RX_SETUP, the number of
67  * different filters is limited to 256 due to the one byte index value.
68  */
69 #define MAX_NFRAMES 256
70 
71 /* use of last_frames[index].can_dlc */
72 #define RX_RECV    0x40 /* received data for this element */
73 #define RX_THR     0x80 /* element not been sent due to throttle feature */
74 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
75 
76 /* get best masking value for can_rx_register() for a given single can_id */
77 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
78 		     (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
79 		     (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
80 
81 #define CAN_BCM_VERSION CAN_VERSION
82 static __initdata const char banner[] = KERN_INFO
83 	"can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
84 
85 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
86 MODULE_LICENSE("Dual BSD/GPL");
87 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
88 MODULE_ALIAS("can-proto-2");
89 
90 /* easy access to can_frame payload */
91 static inline u64 GET_U64(const struct can_frame *cp)
92 {
93 	return *(u64 *)cp->data;
94 }
95 
96 struct bcm_op {
97 	struct list_head list;
98 	int ifindex;
99 	canid_t can_id;
100 	u32 flags;
101 	unsigned long frames_abs, frames_filtered;
102 	struct timeval ival1, ival2;
103 	struct hrtimer timer, thrtimer;
104 	struct tasklet_struct tsklet, thrtsklet;
105 	ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
106 	int rx_ifindex;
107 	u32 count;
108 	u32 nframes;
109 	u32 currframe;
110 	struct can_frame *frames;
111 	struct can_frame *last_frames;
112 	struct can_frame sframe;
113 	struct can_frame last_sframe;
114 	struct sock *sk;
115 	struct net_device *rx_reg_dev;
116 };
117 
118 static struct proc_dir_entry *proc_dir;
119 
120 struct bcm_sock {
121 	struct sock sk;
122 	int bound;
123 	int ifindex;
124 	struct notifier_block notifier;
125 	struct list_head rx_ops;
126 	struct list_head tx_ops;
127 	unsigned long dropped_usr_msgs;
128 	struct proc_dir_entry *bcm_proc_read;
129 	char procname [32]; /* inode number in decimal with \0 */
130 };
131 
132 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
133 {
134 	return (struct bcm_sock *)sk;
135 }
136 
137 #define CFSIZ sizeof(struct can_frame)
138 #define OPSIZ sizeof(struct bcm_op)
139 #define MHSIZ sizeof(struct bcm_msg_head)
140 
141 /*
142  * procfs functions
143  */
144 static char *bcm_proc_getifname(char *result, int ifindex)
145 {
146 	struct net_device *dev;
147 
148 	if (!ifindex)
149 		return "any";
150 
151 	rcu_read_lock();
152 	dev = dev_get_by_index_rcu(&init_net, ifindex);
153 	if (dev)
154 		strcpy(result, dev->name);
155 	else
156 		strcpy(result, "???");
157 	rcu_read_unlock();
158 
159 	return result;
160 }
161 
162 static int bcm_proc_show(struct seq_file *m, void *v)
163 {
164 	char ifname[IFNAMSIZ];
165 	struct sock *sk = (struct sock *)m->private;
166 	struct bcm_sock *bo = bcm_sk(sk);
167 	struct bcm_op *op;
168 
169 	seq_printf(m, ">>> socket %pK", sk->sk_socket);
170 	seq_printf(m, " / sk %pK", sk);
171 	seq_printf(m, " / bo %pK", bo);
172 	seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
173 	seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
174 	seq_printf(m, " <<<\n");
175 
176 	list_for_each_entry(op, &bo->rx_ops, list) {
177 
178 		unsigned long reduction;
179 
180 		/* print only active entries & prevent division by zero */
181 		if (!op->frames_abs)
182 			continue;
183 
184 		seq_printf(m, "rx_op: %03X %-5s ",
185 				op->can_id, bcm_proc_getifname(ifname, op->ifindex));
186 		seq_printf(m, "[%u]%c ", op->nframes,
187 				(op->flags & RX_CHECK_DLC)?'d':' ');
188 		if (op->kt_ival1.tv64)
189 			seq_printf(m, "timeo=%lld ",
190 					(long long)
191 					ktime_to_us(op->kt_ival1));
192 
193 		if (op->kt_ival2.tv64)
194 			seq_printf(m, "thr=%lld ",
195 					(long long)
196 					ktime_to_us(op->kt_ival2));
197 
198 		seq_printf(m, "# recv %ld (%ld) => reduction: ",
199 				op->frames_filtered, op->frames_abs);
200 
201 		reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
202 
203 		seq_printf(m, "%s%ld%%\n",
204 				(reduction == 100)?"near ":"", reduction);
205 	}
206 
207 	list_for_each_entry(op, &bo->tx_ops, list) {
208 
209 		seq_printf(m, "tx_op: %03X %s [%u] ",
210 				op->can_id,
211 				bcm_proc_getifname(ifname, op->ifindex),
212 				op->nframes);
213 
214 		if (op->kt_ival1.tv64)
215 			seq_printf(m, "t1=%lld ",
216 					(long long) ktime_to_us(op->kt_ival1));
217 
218 		if (op->kt_ival2.tv64)
219 			seq_printf(m, "t2=%lld ",
220 					(long long) ktime_to_us(op->kt_ival2));
221 
222 		seq_printf(m, "# sent %ld\n", op->frames_abs);
223 	}
224 	seq_putc(m, '\n');
225 	return 0;
226 }
227 
228 static int bcm_proc_open(struct inode *inode, struct file *file)
229 {
230 	return single_open(file, bcm_proc_show, PDE(inode)->data);
231 }
232 
233 static const struct file_operations bcm_proc_fops = {
234 	.owner		= THIS_MODULE,
235 	.open		= bcm_proc_open,
236 	.read		= seq_read,
237 	.llseek		= seq_lseek,
238 	.release	= single_release,
239 };
240 
241 /*
242  * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
243  *              of the given bcm tx op
244  */
245 static void bcm_can_tx(struct bcm_op *op)
246 {
247 	struct sk_buff *skb;
248 	struct net_device *dev;
249 	struct can_frame *cf = &op->frames[op->currframe];
250 
251 	/* no target device? => exit */
252 	if (!op->ifindex)
253 		return;
254 
255 	dev = dev_get_by_index(&init_net, op->ifindex);
256 	if (!dev) {
257 		/* RFC: should this bcm_op remove itself here? */
258 		return;
259 	}
260 
261 	skb = alloc_skb(CFSIZ, gfp_any());
262 	if (!skb)
263 		goto out;
264 
265 	memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
266 
267 	/* send with loopback */
268 	skb->dev = dev;
269 	skb->sk = op->sk;
270 	can_send(skb, 1);
271 
272 	/* update statistics */
273 	op->currframe++;
274 	op->frames_abs++;
275 
276 	/* reached last frame? */
277 	if (op->currframe >= op->nframes)
278 		op->currframe = 0;
279  out:
280 	dev_put(dev);
281 }
282 
283 /*
284  * bcm_send_to_user - send a BCM message to the userspace
285  *                    (consisting of bcm_msg_head + x CAN frames)
286  */
287 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
288 			     struct can_frame *frames, int has_timestamp)
289 {
290 	struct sk_buff *skb;
291 	struct can_frame *firstframe;
292 	struct sockaddr_can *addr;
293 	struct sock *sk = op->sk;
294 	unsigned int datalen = head->nframes * CFSIZ;
295 	int err;
296 
297 	skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
298 	if (!skb)
299 		return;
300 
301 	memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
302 
303 	if (head->nframes) {
304 		/* can_frames starting here */
305 		firstframe = (struct can_frame *)skb_tail_pointer(skb);
306 
307 		memcpy(skb_put(skb, datalen), frames, datalen);
308 
309 		/*
310 		 * the BCM uses the can_dlc-element of the can_frame
311 		 * structure for internal purposes. This is only
312 		 * relevant for updates that are generated by the
313 		 * BCM, where nframes is 1
314 		 */
315 		if (head->nframes == 1)
316 			firstframe->can_dlc &= BCM_CAN_DLC_MASK;
317 	}
318 
319 	if (has_timestamp) {
320 		/* restore rx timestamp */
321 		skb->tstamp = op->rx_stamp;
322 	}
323 
324 	/*
325 	 *  Put the datagram to the queue so that bcm_recvmsg() can
326 	 *  get it from there.  We need to pass the interface index to
327 	 *  bcm_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
328 	 *  containing the interface index.
329 	 */
330 
331 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
332 	addr = (struct sockaddr_can *)skb->cb;
333 	memset(addr, 0, sizeof(*addr));
334 	addr->can_family  = AF_CAN;
335 	addr->can_ifindex = op->rx_ifindex;
336 
337 	err = sock_queue_rcv_skb(sk, skb);
338 	if (err < 0) {
339 		struct bcm_sock *bo = bcm_sk(sk);
340 
341 		kfree_skb(skb);
342 		/* don't care about overflows in this statistic */
343 		bo->dropped_usr_msgs++;
344 	}
345 }
346 
347 static void bcm_tx_timeout_tsklet(unsigned long data)
348 {
349 	struct bcm_op *op = (struct bcm_op *)data;
350 	struct bcm_msg_head msg_head;
351 
352 	if (op->kt_ival1.tv64 && (op->count > 0)) {
353 
354 		op->count--;
355 		if (!op->count && (op->flags & TX_COUNTEVT)) {
356 
357 			/* create notification to user */
358 			msg_head.opcode  = TX_EXPIRED;
359 			msg_head.flags   = op->flags;
360 			msg_head.count   = op->count;
361 			msg_head.ival1   = op->ival1;
362 			msg_head.ival2   = op->ival2;
363 			msg_head.can_id  = op->can_id;
364 			msg_head.nframes = 0;
365 
366 			bcm_send_to_user(op, &msg_head, NULL, 0);
367 		}
368 	}
369 
370 	if (op->kt_ival1.tv64 && (op->count > 0)) {
371 
372 		/* send (next) frame */
373 		bcm_can_tx(op);
374 		hrtimer_start(&op->timer,
375 			      ktime_add(ktime_get(), op->kt_ival1),
376 			      HRTIMER_MODE_ABS);
377 
378 	} else {
379 		if (op->kt_ival2.tv64) {
380 
381 			/* send (next) frame */
382 			bcm_can_tx(op);
383 			hrtimer_start(&op->timer,
384 				      ktime_add(ktime_get(), op->kt_ival2),
385 				      HRTIMER_MODE_ABS);
386 		}
387 	}
388 }
389 
390 /*
391  * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
392  */
393 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
394 {
395 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
396 
397 	tasklet_schedule(&op->tsklet);
398 
399 	return HRTIMER_NORESTART;
400 }
401 
402 /*
403  * bcm_rx_changed - create a RX_CHANGED notification due to changed content
404  */
405 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
406 {
407 	struct bcm_msg_head head;
408 
409 	/* update statistics */
410 	op->frames_filtered++;
411 
412 	/* prevent statistics overflow */
413 	if (op->frames_filtered > ULONG_MAX/100)
414 		op->frames_filtered = op->frames_abs = 0;
415 
416 	/* this element is not throttled anymore */
417 	data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
418 
419 	head.opcode  = RX_CHANGED;
420 	head.flags   = op->flags;
421 	head.count   = op->count;
422 	head.ival1   = op->ival1;
423 	head.ival2   = op->ival2;
424 	head.can_id  = op->can_id;
425 	head.nframes = 1;
426 
427 	bcm_send_to_user(op, &head, data, 1);
428 }
429 
430 /*
431  * bcm_rx_update_and_send - process a detected relevant receive content change
432  *                          1. update the last received data
433  *                          2. send a notification to the user (if possible)
434  */
435 static void bcm_rx_update_and_send(struct bcm_op *op,
436 				   struct can_frame *lastdata,
437 				   const struct can_frame *rxdata)
438 {
439 	memcpy(lastdata, rxdata, CFSIZ);
440 
441 	/* mark as used and throttled by default */
442 	lastdata->can_dlc |= (RX_RECV|RX_THR);
443 
444 	/* throtteling mode inactive ? */
445 	if (!op->kt_ival2.tv64) {
446 		/* send RX_CHANGED to the user immediately */
447 		bcm_rx_changed(op, lastdata);
448 		return;
449 	}
450 
451 	/* with active throttling timer we are just done here */
452 	if (hrtimer_active(&op->thrtimer))
453 		return;
454 
455 	/* first receiption with enabled throttling mode */
456 	if (!op->kt_lastmsg.tv64)
457 		goto rx_changed_settime;
458 
459 	/* got a second frame inside a potential throttle period? */
460 	if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
461 	    ktime_to_us(op->kt_ival2)) {
462 		/* do not send the saved data - only start throttle timer */
463 		hrtimer_start(&op->thrtimer,
464 			      ktime_add(op->kt_lastmsg, op->kt_ival2),
465 			      HRTIMER_MODE_ABS);
466 		return;
467 	}
468 
469 	/* the gap was that big, that throttling was not needed here */
470 rx_changed_settime:
471 	bcm_rx_changed(op, lastdata);
472 	op->kt_lastmsg = ktime_get();
473 }
474 
475 /*
476  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
477  *                       received data stored in op->last_frames[]
478  */
479 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
480 				const struct can_frame *rxdata)
481 {
482 	/*
483 	 * no one uses the MSBs of can_dlc for comparation,
484 	 * so we use it here to detect the first time of reception
485 	 */
486 
487 	if (!(op->last_frames[index].can_dlc & RX_RECV)) {
488 		/* received data for the first time => send update to user */
489 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
490 		return;
491 	}
492 
493 	/* do a real check in can_frame data section */
494 
495 	if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
496 	    (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
497 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
498 		return;
499 	}
500 
501 	if (op->flags & RX_CHECK_DLC) {
502 		/* do a real check in can_frame dlc */
503 		if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
504 					BCM_CAN_DLC_MASK)) {
505 			bcm_rx_update_and_send(op, &op->last_frames[index],
506 					       rxdata);
507 			return;
508 		}
509 	}
510 }
511 
512 /*
513  * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
514  */
515 static void bcm_rx_starttimer(struct bcm_op *op)
516 {
517 	if (op->flags & RX_NO_AUTOTIMER)
518 		return;
519 
520 	if (op->kt_ival1.tv64)
521 		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
522 }
523 
524 static void bcm_rx_timeout_tsklet(unsigned long data)
525 {
526 	struct bcm_op *op = (struct bcm_op *)data;
527 	struct bcm_msg_head msg_head;
528 
529 	/* create notification to user */
530 	msg_head.opcode  = RX_TIMEOUT;
531 	msg_head.flags   = op->flags;
532 	msg_head.count   = op->count;
533 	msg_head.ival1   = op->ival1;
534 	msg_head.ival2   = op->ival2;
535 	msg_head.can_id  = op->can_id;
536 	msg_head.nframes = 0;
537 
538 	bcm_send_to_user(op, &msg_head, NULL, 0);
539 }
540 
541 /*
542  * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
543  */
544 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
545 {
546 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
547 
548 	/* schedule before NET_RX_SOFTIRQ */
549 	tasklet_hi_schedule(&op->tsklet);
550 
551 	/* no restart of the timer is done here! */
552 
553 	/* if user wants to be informed, when cyclic CAN-Messages come back */
554 	if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
555 		/* clear received can_frames to indicate 'nothing received' */
556 		memset(op->last_frames, 0, op->nframes * CFSIZ);
557 	}
558 
559 	return HRTIMER_NORESTART;
560 }
561 
562 /*
563  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
564  */
565 static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
566 				  unsigned int index)
567 {
568 	if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
569 		if (update)
570 			bcm_rx_changed(op, &op->last_frames[index]);
571 		return 1;
572 	}
573 	return 0;
574 }
575 
576 /*
577  * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
578  *
579  * update == 0 : just check if throttled data is available  (any irq context)
580  * update == 1 : check and send throttled data to userspace (soft_irq context)
581  */
582 static int bcm_rx_thr_flush(struct bcm_op *op, int update)
583 {
584 	int updated = 0;
585 
586 	if (op->nframes > 1) {
587 		unsigned int i;
588 
589 		/* for MUX filter we start at index 1 */
590 		for (i = 1; i < op->nframes; i++)
591 			updated += bcm_rx_do_flush(op, update, i);
592 
593 	} else {
594 		/* for RX_FILTER_ID and simple filter */
595 		updated += bcm_rx_do_flush(op, update, 0);
596 	}
597 
598 	return updated;
599 }
600 
601 static void bcm_rx_thr_tsklet(unsigned long data)
602 {
603 	struct bcm_op *op = (struct bcm_op *)data;
604 
605 	/* push the changed data to the userspace */
606 	bcm_rx_thr_flush(op, 1);
607 }
608 
609 /*
610  * bcm_rx_thr_handler - the time for blocked content updates is over now:
611  *                      Check for throttled data and send it to the userspace
612  */
613 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
614 {
615 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
616 
617 	tasklet_schedule(&op->thrtsklet);
618 
619 	if (bcm_rx_thr_flush(op, 0)) {
620 		hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
621 		return HRTIMER_RESTART;
622 	} else {
623 		/* rearm throttle handling */
624 		op->kt_lastmsg = ktime_set(0, 0);
625 		return HRTIMER_NORESTART;
626 	}
627 }
628 
629 /*
630  * bcm_rx_handler - handle a CAN frame receiption
631  */
632 static void bcm_rx_handler(struct sk_buff *skb, void *data)
633 {
634 	struct bcm_op *op = (struct bcm_op *)data;
635 	const struct can_frame *rxframe = (struct can_frame *)skb->data;
636 	unsigned int i;
637 
638 	/* disable timeout */
639 	hrtimer_cancel(&op->timer);
640 
641 	if (op->can_id != rxframe->can_id)
642 		return;
643 
644 	/* save rx timestamp */
645 	op->rx_stamp = skb->tstamp;
646 	/* save originator for recvfrom() */
647 	op->rx_ifindex = skb->dev->ifindex;
648 	/* update statistics */
649 	op->frames_abs++;
650 
651 	if (op->flags & RX_RTR_FRAME) {
652 		/* send reply for RTR-request (placed in op->frames[0]) */
653 		bcm_can_tx(op);
654 		return;
655 	}
656 
657 	if (op->flags & RX_FILTER_ID) {
658 		/* the easiest case */
659 		bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
660 		goto rx_starttimer;
661 	}
662 
663 	if (op->nframes == 1) {
664 		/* simple compare with index 0 */
665 		bcm_rx_cmp_to_index(op, 0, rxframe);
666 		goto rx_starttimer;
667 	}
668 
669 	if (op->nframes > 1) {
670 		/*
671 		 * multiplex compare
672 		 *
673 		 * find the first multiplex mask that fits.
674 		 * Remark: The MUX-mask is stored in index 0
675 		 */
676 
677 		for (i = 1; i < op->nframes; i++) {
678 			if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
679 			    (GET_U64(&op->frames[0]) &
680 			     GET_U64(&op->frames[i]))) {
681 				bcm_rx_cmp_to_index(op, i, rxframe);
682 				break;
683 			}
684 		}
685 	}
686 
687 rx_starttimer:
688 	bcm_rx_starttimer(op);
689 }
690 
691 /*
692  * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
693  */
694 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
695 				  int ifindex)
696 {
697 	struct bcm_op *op;
698 
699 	list_for_each_entry(op, ops, list) {
700 		if ((op->can_id == can_id) && (op->ifindex == ifindex))
701 			return op;
702 	}
703 
704 	return NULL;
705 }
706 
707 static void bcm_remove_op(struct bcm_op *op)
708 {
709 	hrtimer_cancel(&op->timer);
710 	hrtimer_cancel(&op->thrtimer);
711 
712 	if (op->tsklet.func)
713 		tasklet_kill(&op->tsklet);
714 
715 	if (op->thrtsklet.func)
716 		tasklet_kill(&op->thrtsklet);
717 
718 	if ((op->frames) && (op->frames != &op->sframe))
719 		kfree(op->frames);
720 
721 	if ((op->last_frames) && (op->last_frames != &op->last_sframe))
722 		kfree(op->last_frames);
723 
724 	kfree(op);
725 }
726 
727 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
728 {
729 	if (op->rx_reg_dev == dev) {
730 		can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
731 				  bcm_rx_handler, op);
732 
733 		/* mark as removed subscription */
734 		op->rx_reg_dev = NULL;
735 	} else
736 		printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
737 		       "mismatch %p %p\n", op->rx_reg_dev, dev);
738 }
739 
740 /*
741  * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
742  */
743 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
744 {
745 	struct bcm_op *op, *n;
746 
747 	list_for_each_entry_safe(op, n, ops, list) {
748 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
749 
750 			/*
751 			 * Don't care if we're bound or not (due to netdev
752 			 * problems) can_rx_unregister() is always a save
753 			 * thing to do here.
754 			 */
755 			if (op->ifindex) {
756 				/*
757 				 * Only remove subscriptions that had not
758 				 * been removed due to NETDEV_UNREGISTER
759 				 * in bcm_notifier()
760 				 */
761 				if (op->rx_reg_dev) {
762 					struct net_device *dev;
763 
764 					dev = dev_get_by_index(&init_net,
765 							       op->ifindex);
766 					if (dev) {
767 						bcm_rx_unreg(dev, op);
768 						dev_put(dev);
769 					}
770 				}
771 			} else
772 				can_rx_unregister(NULL, op->can_id,
773 						  REGMASK(op->can_id),
774 						  bcm_rx_handler, op);
775 
776 			list_del(&op->list);
777 			bcm_remove_op(op);
778 			return 1; /* done */
779 		}
780 	}
781 
782 	return 0; /* not found */
783 }
784 
785 /*
786  * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
787  */
788 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
789 {
790 	struct bcm_op *op, *n;
791 
792 	list_for_each_entry_safe(op, n, ops, list) {
793 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
794 			list_del(&op->list);
795 			bcm_remove_op(op);
796 			return 1; /* done */
797 		}
798 	}
799 
800 	return 0; /* not found */
801 }
802 
803 /*
804  * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
805  */
806 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
807 		       int ifindex)
808 {
809 	struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
810 
811 	if (!op)
812 		return -EINVAL;
813 
814 	/* put current values into msg_head */
815 	msg_head->flags   = op->flags;
816 	msg_head->count   = op->count;
817 	msg_head->ival1   = op->ival1;
818 	msg_head->ival2   = op->ival2;
819 	msg_head->nframes = op->nframes;
820 
821 	bcm_send_to_user(op, msg_head, op->frames, 0);
822 
823 	return MHSIZ;
824 }
825 
826 /*
827  * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
828  */
829 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
830 			int ifindex, struct sock *sk)
831 {
832 	struct bcm_sock *bo = bcm_sk(sk);
833 	struct bcm_op *op;
834 	unsigned int i;
835 	int err;
836 
837 	/* we need a real device to send frames */
838 	if (!ifindex)
839 		return -ENODEV;
840 
841 	/* check nframes boundaries - we need at least one can_frame */
842 	if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
843 		return -EINVAL;
844 
845 	/* check the given can_id */
846 	op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
847 
848 	if (op) {
849 		/* update existing BCM operation */
850 
851 		/*
852 		 * Do we need more space for the can_frames than currently
853 		 * allocated? -> This is a _really_ unusual use-case and
854 		 * therefore (complexity / locking) it is not supported.
855 		 */
856 		if (msg_head->nframes > op->nframes)
857 			return -E2BIG;
858 
859 		/* update can_frames content */
860 		for (i = 0; i < msg_head->nframes; i++) {
861 			err = memcpy_fromiovec((u8 *)&op->frames[i],
862 					       msg->msg_iov, CFSIZ);
863 
864 			if (op->frames[i].can_dlc > 8)
865 				err = -EINVAL;
866 
867 			if (err < 0)
868 				return err;
869 
870 			if (msg_head->flags & TX_CP_CAN_ID) {
871 				/* copy can_id into frame */
872 				op->frames[i].can_id = msg_head->can_id;
873 			}
874 		}
875 
876 	} else {
877 		/* insert new BCM operation for the given can_id */
878 
879 		op = kzalloc(OPSIZ, GFP_KERNEL);
880 		if (!op)
881 			return -ENOMEM;
882 
883 		op->can_id    = msg_head->can_id;
884 
885 		/* create array for can_frames and copy the data */
886 		if (msg_head->nframes > 1) {
887 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
888 					     GFP_KERNEL);
889 			if (!op->frames) {
890 				kfree(op);
891 				return -ENOMEM;
892 			}
893 		} else
894 			op->frames = &op->sframe;
895 
896 		for (i = 0; i < msg_head->nframes; i++) {
897 			err = memcpy_fromiovec((u8 *)&op->frames[i],
898 					       msg->msg_iov, CFSIZ);
899 
900 			if (op->frames[i].can_dlc > 8)
901 				err = -EINVAL;
902 
903 			if (err < 0) {
904 				if (op->frames != &op->sframe)
905 					kfree(op->frames);
906 				kfree(op);
907 				return err;
908 			}
909 
910 			if (msg_head->flags & TX_CP_CAN_ID) {
911 				/* copy can_id into frame */
912 				op->frames[i].can_id = msg_head->can_id;
913 			}
914 		}
915 
916 		/* tx_ops never compare with previous received messages */
917 		op->last_frames = NULL;
918 
919 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
920 		op->sk = sk;
921 		op->ifindex = ifindex;
922 
923 		/* initialize uninitialized (kzalloc) structure */
924 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
925 		op->timer.function = bcm_tx_timeout_handler;
926 
927 		/* initialize tasklet for tx countevent notification */
928 		tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
929 			     (unsigned long) op);
930 
931 		/* currently unused in tx_ops */
932 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
933 
934 		/* add this bcm_op to the list of the tx_ops */
935 		list_add(&op->list, &bo->tx_ops);
936 
937 	} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
938 
939 	if (op->nframes != msg_head->nframes) {
940 		op->nframes   = msg_head->nframes;
941 		/* start multiple frame transmission with index 0 */
942 		op->currframe = 0;
943 	}
944 
945 	/* check flags */
946 
947 	op->flags = msg_head->flags;
948 
949 	if (op->flags & TX_RESET_MULTI_IDX) {
950 		/* start multiple frame transmission with index 0 */
951 		op->currframe = 0;
952 	}
953 
954 	if (op->flags & SETTIMER) {
955 		/* set timer values */
956 		op->count = msg_head->count;
957 		op->ival1 = msg_head->ival1;
958 		op->ival2 = msg_head->ival2;
959 		op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
960 		op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
961 
962 		/* disable an active timer due to zero values? */
963 		if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
964 			hrtimer_cancel(&op->timer);
965 	}
966 
967 	if ((op->flags & STARTTIMER) &&
968 	    ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
969 
970 		/* spec: send can_frame when starting timer */
971 		op->flags |= TX_ANNOUNCE;
972 
973 		if (op->kt_ival1.tv64 && (op->count > 0)) {
974 			/* op->count-- is done in bcm_tx_timeout_handler */
975 			hrtimer_start(&op->timer, op->kt_ival1,
976 				      HRTIMER_MODE_REL);
977 		} else
978 			hrtimer_start(&op->timer, op->kt_ival2,
979 				      HRTIMER_MODE_REL);
980 	}
981 
982 	if (op->flags & TX_ANNOUNCE)
983 		bcm_can_tx(op);
984 
985 	return msg_head->nframes * CFSIZ + MHSIZ;
986 }
987 
988 /*
989  * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
990  */
991 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
992 			int ifindex, struct sock *sk)
993 {
994 	struct bcm_sock *bo = bcm_sk(sk);
995 	struct bcm_op *op;
996 	int do_rx_register;
997 	int err = 0;
998 
999 	if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1000 		/* be robust against wrong usage ... */
1001 		msg_head->flags |= RX_FILTER_ID;
1002 		/* ignore trailing garbage */
1003 		msg_head->nframes = 0;
1004 	}
1005 
1006 	/* the first element contains the mux-mask => MAX_NFRAMES + 1  */
1007 	if (msg_head->nframes > MAX_NFRAMES + 1)
1008 		return -EINVAL;
1009 
1010 	if ((msg_head->flags & RX_RTR_FRAME) &&
1011 	    ((msg_head->nframes != 1) ||
1012 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
1013 		return -EINVAL;
1014 
1015 	/* check the given can_id */
1016 	op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1017 	if (op) {
1018 		/* update existing BCM operation */
1019 
1020 		/*
1021 		 * Do we need more space for the can_frames than currently
1022 		 * allocated? -> This is a _really_ unusual use-case and
1023 		 * therefore (complexity / locking) it is not supported.
1024 		 */
1025 		if (msg_head->nframes > op->nframes)
1026 			return -E2BIG;
1027 
1028 		if (msg_head->nframes) {
1029 			/* update can_frames content */
1030 			err = memcpy_fromiovec((u8 *)op->frames,
1031 					       msg->msg_iov,
1032 					       msg_head->nframes * CFSIZ);
1033 			if (err < 0)
1034 				return err;
1035 
1036 			/* clear last_frames to indicate 'nothing received' */
1037 			memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1038 		}
1039 
1040 		op->nframes = msg_head->nframes;
1041 
1042 		/* Only an update -> do not call can_rx_register() */
1043 		do_rx_register = 0;
1044 
1045 	} else {
1046 		/* insert new BCM operation for the given can_id */
1047 		op = kzalloc(OPSIZ, GFP_KERNEL);
1048 		if (!op)
1049 			return -ENOMEM;
1050 
1051 		op->can_id    = msg_head->can_id;
1052 		op->nframes   = msg_head->nframes;
1053 
1054 		if (msg_head->nframes > 1) {
1055 			/* create array for can_frames and copy the data */
1056 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
1057 					     GFP_KERNEL);
1058 			if (!op->frames) {
1059 				kfree(op);
1060 				return -ENOMEM;
1061 			}
1062 
1063 			/* create and init array for received can_frames */
1064 			op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1065 						  GFP_KERNEL);
1066 			if (!op->last_frames) {
1067 				kfree(op->frames);
1068 				kfree(op);
1069 				return -ENOMEM;
1070 			}
1071 
1072 		} else {
1073 			op->frames = &op->sframe;
1074 			op->last_frames = &op->last_sframe;
1075 		}
1076 
1077 		if (msg_head->nframes) {
1078 			err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1079 					       msg_head->nframes * CFSIZ);
1080 			if (err < 0) {
1081 				if (op->frames != &op->sframe)
1082 					kfree(op->frames);
1083 				if (op->last_frames != &op->last_sframe)
1084 					kfree(op->last_frames);
1085 				kfree(op);
1086 				return err;
1087 			}
1088 		}
1089 
1090 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1091 		op->sk = sk;
1092 		op->ifindex = ifindex;
1093 
1094 		/* initialize uninitialized (kzalloc) structure */
1095 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1096 		op->timer.function = bcm_rx_timeout_handler;
1097 
1098 		/* initialize tasklet for rx timeout notification */
1099 		tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1100 			     (unsigned long) op);
1101 
1102 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1103 		op->thrtimer.function = bcm_rx_thr_handler;
1104 
1105 		/* initialize tasklet for rx throttle handling */
1106 		tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1107 			     (unsigned long) op);
1108 
1109 		/* add this bcm_op to the list of the rx_ops */
1110 		list_add(&op->list, &bo->rx_ops);
1111 
1112 		/* call can_rx_register() */
1113 		do_rx_register = 1;
1114 
1115 	} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1116 
1117 	/* check flags */
1118 	op->flags = msg_head->flags;
1119 
1120 	if (op->flags & RX_RTR_FRAME) {
1121 
1122 		/* no timers in RTR-mode */
1123 		hrtimer_cancel(&op->thrtimer);
1124 		hrtimer_cancel(&op->timer);
1125 
1126 		/*
1127 		 * funny feature in RX(!)_SETUP only for RTR-mode:
1128 		 * copy can_id into frame BUT without RTR-flag to
1129 		 * prevent a full-load-loopback-test ... ;-]
1130 		 */
1131 		if ((op->flags & TX_CP_CAN_ID) ||
1132 		    (op->frames[0].can_id == op->can_id))
1133 			op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1134 
1135 	} else {
1136 		if (op->flags & SETTIMER) {
1137 
1138 			/* set timer value */
1139 			op->ival1 = msg_head->ival1;
1140 			op->ival2 = msg_head->ival2;
1141 			op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1142 			op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1143 
1144 			/* disable an active timer due to zero value? */
1145 			if (!op->kt_ival1.tv64)
1146 				hrtimer_cancel(&op->timer);
1147 
1148 			/*
1149 			 * In any case cancel the throttle timer, flush
1150 			 * potentially blocked msgs and reset throttle handling
1151 			 */
1152 			op->kt_lastmsg = ktime_set(0, 0);
1153 			hrtimer_cancel(&op->thrtimer);
1154 			bcm_rx_thr_flush(op, 1);
1155 		}
1156 
1157 		if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1158 			hrtimer_start(&op->timer, op->kt_ival1,
1159 				      HRTIMER_MODE_REL);
1160 	}
1161 
1162 	/* now we can register for can_ids, if we added a new bcm_op */
1163 	if (do_rx_register) {
1164 		if (ifindex) {
1165 			struct net_device *dev;
1166 
1167 			dev = dev_get_by_index(&init_net, ifindex);
1168 			if (dev) {
1169 				err = can_rx_register(dev, op->can_id,
1170 						      REGMASK(op->can_id),
1171 						      bcm_rx_handler, op,
1172 						      "bcm");
1173 
1174 				op->rx_reg_dev = dev;
1175 				dev_put(dev);
1176 			}
1177 
1178 		} else
1179 			err = can_rx_register(NULL, op->can_id,
1180 					      REGMASK(op->can_id),
1181 					      bcm_rx_handler, op, "bcm");
1182 		if (err) {
1183 			/* this bcm rx op is broken -> remove it */
1184 			list_del(&op->list);
1185 			bcm_remove_op(op);
1186 			return err;
1187 		}
1188 	}
1189 
1190 	return msg_head->nframes * CFSIZ + MHSIZ;
1191 }
1192 
1193 /*
1194  * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1195  */
1196 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1197 {
1198 	struct sk_buff *skb;
1199 	struct net_device *dev;
1200 	int err;
1201 
1202 	/* we need a real device to send frames */
1203 	if (!ifindex)
1204 		return -ENODEV;
1205 
1206 	skb = alloc_skb(CFSIZ, GFP_KERNEL);
1207 
1208 	if (!skb)
1209 		return -ENOMEM;
1210 
1211 	err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1212 	if (err < 0) {
1213 		kfree_skb(skb);
1214 		return err;
1215 	}
1216 
1217 	dev = dev_get_by_index(&init_net, ifindex);
1218 	if (!dev) {
1219 		kfree_skb(skb);
1220 		return -ENODEV;
1221 	}
1222 
1223 	skb->dev = dev;
1224 	skb->sk  = sk;
1225 	err = can_send(skb, 1); /* send with loopback */
1226 	dev_put(dev);
1227 
1228 	if (err)
1229 		return err;
1230 
1231 	return CFSIZ + MHSIZ;
1232 }
1233 
1234 /*
1235  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1236  */
1237 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1238 		       struct msghdr *msg, size_t size)
1239 {
1240 	struct sock *sk = sock->sk;
1241 	struct bcm_sock *bo = bcm_sk(sk);
1242 	int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1243 	struct bcm_msg_head msg_head;
1244 	int ret; /* read bytes or error codes as return value */
1245 
1246 	if (!bo->bound)
1247 		return -ENOTCONN;
1248 
1249 	/* check for valid message length from userspace */
1250 	if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1251 		return -EINVAL;
1252 
1253 	/* check for alternative ifindex for this bcm_op */
1254 
1255 	if (!ifindex && msg->msg_name) {
1256 		/* no bound device as default => check msg_name */
1257 		struct sockaddr_can *addr =
1258 			(struct sockaddr_can *)msg->msg_name;
1259 
1260 		if (msg->msg_namelen < sizeof(*addr))
1261 			return -EINVAL;
1262 
1263 		if (addr->can_family != AF_CAN)
1264 			return -EINVAL;
1265 
1266 		/* ifindex from sendto() */
1267 		ifindex = addr->can_ifindex;
1268 
1269 		if (ifindex) {
1270 			struct net_device *dev;
1271 
1272 			dev = dev_get_by_index(&init_net, ifindex);
1273 			if (!dev)
1274 				return -ENODEV;
1275 
1276 			if (dev->type != ARPHRD_CAN) {
1277 				dev_put(dev);
1278 				return -ENODEV;
1279 			}
1280 
1281 			dev_put(dev);
1282 		}
1283 	}
1284 
1285 	/* read message head information */
1286 
1287 	ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1288 	if (ret < 0)
1289 		return ret;
1290 
1291 	lock_sock(sk);
1292 
1293 	switch (msg_head.opcode) {
1294 
1295 	case TX_SETUP:
1296 		ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1297 		break;
1298 
1299 	case RX_SETUP:
1300 		ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1301 		break;
1302 
1303 	case TX_DELETE:
1304 		if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1305 			ret = MHSIZ;
1306 		else
1307 			ret = -EINVAL;
1308 		break;
1309 
1310 	case RX_DELETE:
1311 		if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1312 			ret = MHSIZ;
1313 		else
1314 			ret = -EINVAL;
1315 		break;
1316 
1317 	case TX_READ:
1318 		/* reuse msg_head for the reply to TX_READ */
1319 		msg_head.opcode  = TX_STATUS;
1320 		ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1321 		break;
1322 
1323 	case RX_READ:
1324 		/* reuse msg_head for the reply to RX_READ */
1325 		msg_head.opcode  = RX_STATUS;
1326 		ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1327 		break;
1328 
1329 	case TX_SEND:
1330 		/* we need exactly one can_frame behind the msg head */
1331 		if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1332 			ret = -EINVAL;
1333 		else
1334 			ret = bcm_tx_send(msg, ifindex, sk);
1335 		break;
1336 
1337 	default:
1338 		ret = -EINVAL;
1339 		break;
1340 	}
1341 
1342 	release_sock(sk);
1343 
1344 	return ret;
1345 }
1346 
1347 /*
1348  * notification handler for netdevice status changes
1349  */
1350 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1351 			void *data)
1352 {
1353 	struct net_device *dev = (struct net_device *)data;
1354 	struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1355 	struct sock *sk = &bo->sk;
1356 	struct bcm_op *op;
1357 	int notify_enodev = 0;
1358 
1359 	if (!net_eq(dev_net(dev), &init_net))
1360 		return NOTIFY_DONE;
1361 
1362 	if (dev->type != ARPHRD_CAN)
1363 		return NOTIFY_DONE;
1364 
1365 	switch (msg) {
1366 
1367 	case NETDEV_UNREGISTER:
1368 		lock_sock(sk);
1369 
1370 		/* remove device specific receive entries */
1371 		list_for_each_entry(op, &bo->rx_ops, list)
1372 			if (op->rx_reg_dev == dev)
1373 				bcm_rx_unreg(dev, op);
1374 
1375 		/* remove device reference, if this is our bound device */
1376 		if (bo->bound && bo->ifindex == dev->ifindex) {
1377 			bo->bound   = 0;
1378 			bo->ifindex = 0;
1379 			notify_enodev = 1;
1380 		}
1381 
1382 		release_sock(sk);
1383 
1384 		if (notify_enodev) {
1385 			sk->sk_err = ENODEV;
1386 			if (!sock_flag(sk, SOCK_DEAD))
1387 				sk->sk_error_report(sk);
1388 		}
1389 		break;
1390 
1391 	case NETDEV_DOWN:
1392 		if (bo->bound && bo->ifindex == dev->ifindex) {
1393 			sk->sk_err = ENETDOWN;
1394 			if (!sock_flag(sk, SOCK_DEAD))
1395 				sk->sk_error_report(sk);
1396 		}
1397 	}
1398 
1399 	return NOTIFY_DONE;
1400 }
1401 
1402 /*
1403  * initial settings for all BCM sockets to be set at socket creation time
1404  */
1405 static int bcm_init(struct sock *sk)
1406 {
1407 	struct bcm_sock *bo = bcm_sk(sk);
1408 
1409 	bo->bound            = 0;
1410 	bo->ifindex          = 0;
1411 	bo->dropped_usr_msgs = 0;
1412 	bo->bcm_proc_read    = NULL;
1413 
1414 	INIT_LIST_HEAD(&bo->tx_ops);
1415 	INIT_LIST_HEAD(&bo->rx_ops);
1416 
1417 	/* set notifier */
1418 	bo->notifier.notifier_call = bcm_notifier;
1419 
1420 	register_netdevice_notifier(&bo->notifier);
1421 
1422 	return 0;
1423 }
1424 
1425 /*
1426  * standard socket functions
1427  */
1428 static int bcm_release(struct socket *sock)
1429 {
1430 	struct sock *sk = sock->sk;
1431 	struct bcm_sock *bo;
1432 	struct bcm_op *op, *next;
1433 
1434 	if (sk == NULL)
1435 		return 0;
1436 
1437 	bo = bcm_sk(sk);
1438 
1439 	/* remove bcm_ops, timer, rx_unregister(), etc. */
1440 
1441 	unregister_netdevice_notifier(&bo->notifier);
1442 
1443 	lock_sock(sk);
1444 
1445 	list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1446 		bcm_remove_op(op);
1447 
1448 	list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1449 		/*
1450 		 * Don't care if we're bound or not (due to netdev problems)
1451 		 * can_rx_unregister() is always a save thing to do here.
1452 		 */
1453 		if (op->ifindex) {
1454 			/*
1455 			 * Only remove subscriptions that had not
1456 			 * been removed due to NETDEV_UNREGISTER
1457 			 * in bcm_notifier()
1458 			 */
1459 			if (op->rx_reg_dev) {
1460 				struct net_device *dev;
1461 
1462 				dev = dev_get_by_index(&init_net, op->ifindex);
1463 				if (dev) {
1464 					bcm_rx_unreg(dev, op);
1465 					dev_put(dev);
1466 				}
1467 			}
1468 		} else
1469 			can_rx_unregister(NULL, op->can_id,
1470 					  REGMASK(op->can_id),
1471 					  bcm_rx_handler, op);
1472 
1473 		bcm_remove_op(op);
1474 	}
1475 
1476 	/* remove procfs entry */
1477 	if (proc_dir && bo->bcm_proc_read)
1478 		remove_proc_entry(bo->procname, proc_dir);
1479 
1480 	/* remove device reference */
1481 	if (bo->bound) {
1482 		bo->bound   = 0;
1483 		bo->ifindex = 0;
1484 	}
1485 
1486 	sock_orphan(sk);
1487 	sock->sk = NULL;
1488 
1489 	release_sock(sk);
1490 	sock_put(sk);
1491 
1492 	return 0;
1493 }
1494 
1495 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1496 		       int flags)
1497 {
1498 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1499 	struct sock *sk = sock->sk;
1500 	struct bcm_sock *bo = bcm_sk(sk);
1501 
1502 	if (len < sizeof(*addr))
1503 		return -EINVAL;
1504 
1505 	if (bo->bound)
1506 		return -EISCONN;
1507 
1508 	/* bind a device to this socket */
1509 	if (addr->can_ifindex) {
1510 		struct net_device *dev;
1511 
1512 		dev = dev_get_by_index(&init_net, addr->can_ifindex);
1513 		if (!dev)
1514 			return -ENODEV;
1515 
1516 		if (dev->type != ARPHRD_CAN) {
1517 			dev_put(dev);
1518 			return -ENODEV;
1519 		}
1520 
1521 		bo->ifindex = dev->ifindex;
1522 		dev_put(dev);
1523 
1524 	} else {
1525 		/* no interface reference for ifindex = 0 ('any' CAN device) */
1526 		bo->ifindex = 0;
1527 	}
1528 
1529 	bo->bound = 1;
1530 
1531 	if (proc_dir) {
1532 		/* unique socket address as filename */
1533 		sprintf(bo->procname, "%lu", sock_i_ino(sk));
1534 		bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1535 						     proc_dir,
1536 						     &bcm_proc_fops, sk);
1537 	}
1538 
1539 	return 0;
1540 }
1541 
1542 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1543 		       struct msghdr *msg, size_t size, int flags)
1544 {
1545 	struct sock *sk = sock->sk;
1546 	struct sk_buff *skb;
1547 	int error = 0;
1548 	int noblock;
1549 	int err;
1550 
1551 	noblock =  flags & MSG_DONTWAIT;
1552 	flags   &= ~MSG_DONTWAIT;
1553 	skb = skb_recv_datagram(sk, flags, noblock, &error);
1554 	if (!skb)
1555 		return error;
1556 
1557 	if (skb->len < size)
1558 		size = skb->len;
1559 
1560 	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1561 	if (err < 0) {
1562 		skb_free_datagram(sk, skb);
1563 		return err;
1564 	}
1565 
1566 	sock_recv_ts_and_drops(msg, sk, skb);
1567 
1568 	if (msg->msg_name) {
1569 		msg->msg_namelen = sizeof(struct sockaddr_can);
1570 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1571 	}
1572 
1573 	skb_free_datagram(sk, skb);
1574 
1575 	return size;
1576 }
1577 
1578 static const struct proto_ops bcm_ops = {
1579 	.family        = PF_CAN,
1580 	.release       = bcm_release,
1581 	.bind          = sock_no_bind,
1582 	.connect       = bcm_connect,
1583 	.socketpair    = sock_no_socketpair,
1584 	.accept        = sock_no_accept,
1585 	.getname       = sock_no_getname,
1586 	.poll          = datagram_poll,
1587 	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
1588 	.listen        = sock_no_listen,
1589 	.shutdown      = sock_no_shutdown,
1590 	.setsockopt    = sock_no_setsockopt,
1591 	.getsockopt    = sock_no_getsockopt,
1592 	.sendmsg       = bcm_sendmsg,
1593 	.recvmsg       = bcm_recvmsg,
1594 	.mmap          = sock_no_mmap,
1595 	.sendpage      = sock_no_sendpage,
1596 };
1597 
1598 static struct proto bcm_proto __read_mostly = {
1599 	.name       = "CAN_BCM",
1600 	.owner      = THIS_MODULE,
1601 	.obj_size   = sizeof(struct bcm_sock),
1602 	.init       = bcm_init,
1603 };
1604 
1605 static const struct can_proto bcm_can_proto = {
1606 	.type       = SOCK_DGRAM,
1607 	.protocol   = CAN_BCM,
1608 	.ops        = &bcm_ops,
1609 	.prot       = &bcm_proto,
1610 };
1611 
1612 static int __init bcm_module_init(void)
1613 {
1614 	int err;
1615 
1616 	printk(banner);
1617 
1618 	err = can_proto_register(&bcm_can_proto);
1619 	if (err < 0) {
1620 		printk(KERN_ERR "can: registration of bcm protocol failed\n");
1621 		return err;
1622 	}
1623 
1624 	/* create /proc/net/can-bcm directory */
1625 	proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1626 	return 0;
1627 }
1628 
1629 static void __exit bcm_module_exit(void)
1630 {
1631 	can_proto_unregister(&bcm_can_proto);
1632 
1633 	if (proc_dir)
1634 		proc_net_remove(&init_net, "can-bcm");
1635 }
1636 
1637 module_init(bcm_module_init);
1638 module_exit(bcm_module_exit);
1639