xref: /openbmc/linux/net/can/raw.c (revision ebd09753)
1 /*
2  * raw.c - Raw sockets for protocol family CAN
3  *
4  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of Volkswagen nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * Alternatively, provided that this notice is retained in full, this
20  * software may be distributed under the terms of the GNU General
21  * Public License ("GPL") version 2, in which case the provisions of the
22  * GPL apply INSTEAD OF those given above.
23  *
24  * The provided data structures and external interfaces from this code
25  * are not restricted to be used by modules with a GPL compatible license.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38  * DAMAGE.
39  *
40  */
41 
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/uio.h>
45 #include <linux/net.h>
46 #include <linux/slab.h>
47 #include <linux/netdevice.h>
48 #include <linux/socket.h>
49 #include <linux/if_arp.h>
50 #include <linux/skbuff.h>
51 #include <linux/can.h>
52 #include <linux/can/core.h>
53 #include <linux/can/skb.h>
54 #include <linux/can/raw.h>
55 #include <net/sock.h>
56 #include <net/net_namespace.h>
57 
58 #define CAN_RAW_VERSION CAN_VERSION
59 
60 MODULE_DESCRIPTION("PF_CAN raw protocol");
61 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
63 MODULE_ALIAS("can-proto-1");
64 
65 #define MASK_ALL 0
66 
67 /*
68  * A raw socket has a list of can_filters attached to it, each receiving
69  * the CAN frames matching that filter.  If the filter list is empty,
70  * no CAN frames will be received by the socket.  The default after
71  * opening the socket, is to have one filter which receives all frames.
72  * The filter list is allocated dynamically with the exception of the
73  * list containing only one item.  This common case is optimized by
74  * storing the single filter in dfilter, to avoid using dynamic memory.
75  */
76 
77 struct uniqframe {
78 	int skbcnt;
79 	const struct sk_buff *skb;
80 	unsigned int join_rx_count;
81 };
82 
83 struct raw_sock {
84 	struct sock sk;
85 	int bound;
86 	int ifindex;
87 	struct notifier_block notifier;
88 	int loopback;
89 	int recv_own_msgs;
90 	int fd_frames;
91 	int join_filters;
92 	int count;                 /* number of active filters */
93 	struct can_filter dfilter; /* default/single filter */
94 	struct can_filter *filter; /* pointer to filter(s) */
95 	can_err_mask_t err_mask;
96 	struct uniqframe __percpu *uniq;
97 };
98 
99 /*
100  * Return pointer to store the extra msg flags for raw_recvmsg().
101  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
102  * in skb->cb.
103  */
104 static inline unsigned int *raw_flags(struct sk_buff *skb)
105 {
106 	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
107 			       sizeof(unsigned int));
108 
109 	/* return pointer after struct sockaddr_can */
110 	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
111 }
112 
113 static inline struct raw_sock *raw_sk(const struct sock *sk)
114 {
115 	return (struct raw_sock *)sk;
116 }
117 
118 static void raw_rcv(struct sk_buff *oskb, void *data)
119 {
120 	struct sock *sk = (struct sock *)data;
121 	struct raw_sock *ro = raw_sk(sk);
122 	struct sockaddr_can *addr;
123 	struct sk_buff *skb;
124 	unsigned int *pflags;
125 
126 	/* check the received tx sock reference */
127 	if (!ro->recv_own_msgs && oskb->sk == sk)
128 		return;
129 
130 	/* do not pass non-CAN2.0 frames to a legacy socket */
131 	if (!ro->fd_frames && oskb->len != CAN_MTU)
132 		return;
133 
134 	/* eliminate multiple filter matches for the same skb */
135 	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
136 	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
137 		if (ro->join_filters) {
138 			this_cpu_inc(ro->uniq->join_rx_count);
139 			/* drop frame until all enabled filters matched */
140 			if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
141 				return;
142 		} else {
143 			return;
144 		}
145 	} else {
146 		this_cpu_ptr(ro->uniq)->skb = oskb;
147 		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
148 		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
149 		/* drop first frame to check all enabled filters? */
150 		if (ro->join_filters && ro->count > 1)
151 			return;
152 	}
153 
154 	/* clone the given skb to be able to enqueue it into the rcv queue */
155 	skb = skb_clone(oskb, GFP_ATOMIC);
156 	if (!skb)
157 		return;
158 
159 	/*
160 	 *  Put the datagram to the queue so that raw_recvmsg() can
161 	 *  get it from there.  We need to pass the interface index to
162 	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
163 	 *  containing the interface index.
164 	 */
165 
166 	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
167 	addr = (struct sockaddr_can *)skb->cb;
168 	memset(addr, 0, sizeof(*addr));
169 	addr->can_family  = AF_CAN;
170 	addr->can_ifindex = skb->dev->ifindex;
171 
172 	/* add CAN specific message flags for raw_recvmsg() */
173 	pflags = raw_flags(skb);
174 	*pflags = 0;
175 	if (oskb->sk)
176 		*pflags |= MSG_DONTROUTE;
177 	if (oskb->sk == sk)
178 		*pflags |= MSG_CONFIRM;
179 
180 	if (sock_queue_rcv_skb(sk, skb) < 0)
181 		kfree_skb(skb);
182 }
183 
184 static int raw_enable_filters(struct net *net, struct net_device *dev,
185 			      struct sock *sk, struct can_filter *filter,
186 			      int count)
187 {
188 	int err = 0;
189 	int i;
190 
191 	for (i = 0; i < count; i++) {
192 		err = can_rx_register(net, dev, filter[i].can_id,
193 				      filter[i].can_mask,
194 				      raw_rcv, sk, "raw", sk);
195 		if (err) {
196 			/* clean up successfully registered filters */
197 			while (--i >= 0)
198 				can_rx_unregister(net, dev, filter[i].can_id,
199 						  filter[i].can_mask,
200 						  raw_rcv, sk);
201 			break;
202 		}
203 	}
204 
205 	return err;
206 }
207 
208 static int raw_enable_errfilter(struct net *net, struct net_device *dev,
209 				struct sock *sk, can_err_mask_t err_mask)
210 {
211 	int err = 0;
212 
213 	if (err_mask)
214 		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
215 				      raw_rcv, sk, "raw", sk);
216 
217 	return err;
218 }
219 
220 static void raw_disable_filters(struct net *net, struct net_device *dev,
221 				struct sock *sk, struct can_filter *filter,
222 				int count)
223 {
224 	int i;
225 
226 	for (i = 0; i < count; i++)
227 		can_rx_unregister(net, dev, filter[i].can_id,
228 				  filter[i].can_mask, raw_rcv, sk);
229 }
230 
231 static inline void raw_disable_errfilter(struct net *net,
232 					 struct net_device *dev,
233 					 struct sock *sk,
234 					 can_err_mask_t err_mask)
235 
236 {
237 	if (err_mask)
238 		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
239 				  raw_rcv, sk);
240 }
241 
242 static inline void raw_disable_allfilters(struct net *net,
243 					  struct net_device *dev,
244 					  struct sock *sk)
245 {
246 	struct raw_sock *ro = raw_sk(sk);
247 
248 	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
249 	raw_disable_errfilter(net, dev, sk, ro->err_mask);
250 }
251 
252 static int raw_enable_allfilters(struct net *net, struct net_device *dev,
253 				 struct sock *sk)
254 {
255 	struct raw_sock *ro = raw_sk(sk);
256 	int err;
257 
258 	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
259 	if (!err) {
260 		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
261 		if (err)
262 			raw_disable_filters(net, dev, sk, ro->filter,
263 					    ro->count);
264 	}
265 
266 	return err;
267 }
268 
269 static int raw_notifier(struct notifier_block *nb,
270 			unsigned long msg, void *ptr)
271 {
272 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
273 	struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
274 	struct sock *sk = &ro->sk;
275 
276 	if (!net_eq(dev_net(dev), sock_net(sk)))
277 		return NOTIFY_DONE;
278 
279 	if (dev->type != ARPHRD_CAN)
280 		return NOTIFY_DONE;
281 
282 	if (ro->ifindex != dev->ifindex)
283 		return NOTIFY_DONE;
284 
285 	switch (msg) {
286 
287 	case NETDEV_UNREGISTER:
288 		lock_sock(sk);
289 		/* remove current filters & unregister */
290 		if (ro->bound)
291 			raw_disable_allfilters(dev_net(dev), dev, sk);
292 
293 		if (ro->count > 1)
294 			kfree(ro->filter);
295 
296 		ro->ifindex = 0;
297 		ro->bound   = 0;
298 		ro->count   = 0;
299 		release_sock(sk);
300 
301 		sk->sk_err = ENODEV;
302 		if (!sock_flag(sk, SOCK_DEAD))
303 			sk->sk_error_report(sk);
304 		break;
305 
306 	case NETDEV_DOWN:
307 		sk->sk_err = ENETDOWN;
308 		if (!sock_flag(sk, SOCK_DEAD))
309 			sk->sk_error_report(sk);
310 		break;
311 	}
312 
313 	return NOTIFY_DONE;
314 }
315 
316 static int raw_init(struct sock *sk)
317 {
318 	struct raw_sock *ro = raw_sk(sk);
319 
320 	ro->bound            = 0;
321 	ro->ifindex          = 0;
322 
323 	/* set default filter to single entry dfilter */
324 	ro->dfilter.can_id   = 0;
325 	ro->dfilter.can_mask = MASK_ALL;
326 	ro->filter           = &ro->dfilter;
327 	ro->count            = 1;
328 
329 	/* set default loopback behaviour */
330 	ro->loopback         = 1;
331 	ro->recv_own_msgs    = 0;
332 	ro->fd_frames        = 0;
333 	ro->join_filters     = 0;
334 
335 	/* alloc_percpu provides zero'ed memory */
336 	ro->uniq = alloc_percpu(struct uniqframe);
337 	if (unlikely(!ro->uniq))
338 		return -ENOMEM;
339 
340 	/* set notifier */
341 	ro->notifier.notifier_call = raw_notifier;
342 
343 	register_netdevice_notifier(&ro->notifier);
344 
345 	return 0;
346 }
347 
348 static int raw_release(struct socket *sock)
349 {
350 	struct sock *sk = sock->sk;
351 	struct raw_sock *ro;
352 
353 	if (!sk)
354 		return 0;
355 
356 	ro = raw_sk(sk);
357 
358 	unregister_netdevice_notifier(&ro->notifier);
359 
360 	lock_sock(sk);
361 
362 	/* remove current filters & unregister */
363 	if (ro->bound) {
364 		if (ro->ifindex) {
365 			struct net_device *dev;
366 
367 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
368 			if (dev) {
369 				raw_disable_allfilters(dev_net(dev), dev, sk);
370 				dev_put(dev);
371 			}
372 		} else
373 			raw_disable_allfilters(sock_net(sk), NULL, sk);
374 	}
375 
376 	if (ro->count > 1)
377 		kfree(ro->filter);
378 
379 	ro->ifindex = 0;
380 	ro->bound   = 0;
381 	ro->count   = 0;
382 	free_percpu(ro->uniq);
383 
384 	sock_orphan(sk);
385 	sock->sk = NULL;
386 
387 	release_sock(sk);
388 	sock_put(sk);
389 
390 	return 0;
391 }
392 
393 static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
394 {
395 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
396 	struct sock *sk = sock->sk;
397 	struct raw_sock *ro = raw_sk(sk);
398 	int ifindex;
399 	int err = 0;
400 	int notify_enetdown = 0;
401 
402 	if (len < sizeof(*addr))
403 		return -EINVAL;
404 	if (addr->can_family != AF_CAN)
405 		return -EINVAL;
406 
407 	lock_sock(sk);
408 
409 	if (ro->bound && addr->can_ifindex == ro->ifindex)
410 		goto out;
411 
412 	if (addr->can_ifindex) {
413 		struct net_device *dev;
414 
415 		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
416 		if (!dev) {
417 			err = -ENODEV;
418 			goto out;
419 		}
420 		if (dev->type != ARPHRD_CAN) {
421 			dev_put(dev);
422 			err = -ENODEV;
423 			goto out;
424 		}
425 		if (!(dev->flags & IFF_UP))
426 			notify_enetdown = 1;
427 
428 		ifindex = dev->ifindex;
429 
430 		/* filters set by default/setsockopt */
431 		err = raw_enable_allfilters(sock_net(sk), dev, sk);
432 		dev_put(dev);
433 	} else {
434 		ifindex = 0;
435 
436 		/* filters set by default/setsockopt */
437 		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
438 	}
439 
440 	if (!err) {
441 		if (ro->bound) {
442 			/* unregister old filters */
443 			if (ro->ifindex) {
444 				struct net_device *dev;
445 
446 				dev = dev_get_by_index(sock_net(sk),
447 						       ro->ifindex);
448 				if (dev) {
449 					raw_disable_allfilters(dev_net(dev),
450 							       dev, sk);
451 					dev_put(dev);
452 				}
453 			} else
454 				raw_disable_allfilters(sock_net(sk), NULL, sk);
455 		}
456 		ro->ifindex = ifindex;
457 		ro->bound = 1;
458 	}
459 
460  out:
461 	release_sock(sk);
462 
463 	if (notify_enetdown) {
464 		sk->sk_err = ENETDOWN;
465 		if (!sock_flag(sk, SOCK_DEAD))
466 			sk->sk_error_report(sk);
467 	}
468 
469 	return err;
470 }
471 
472 static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
473 		       int peer)
474 {
475 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
476 	struct sock *sk = sock->sk;
477 	struct raw_sock *ro = raw_sk(sk);
478 
479 	if (peer)
480 		return -EOPNOTSUPP;
481 
482 	memset(addr, 0, sizeof(*addr));
483 	addr->can_family  = AF_CAN;
484 	addr->can_ifindex = ro->ifindex;
485 
486 	return sizeof(*addr);
487 }
488 
489 static int raw_setsockopt(struct socket *sock, int level, int optname,
490 			  char __user *optval, unsigned int optlen)
491 {
492 	struct sock *sk = sock->sk;
493 	struct raw_sock *ro = raw_sk(sk);
494 	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
495 	struct can_filter sfilter;         /* single filter */
496 	struct net_device *dev = NULL;
497 	can_err_mask_t err_mask = 0;
498 	int count = 0;
499 	int err = 0;
500 
501 	if (level != SOL_CAN_RAW)
502 		return -EINVAL;
503 
504 	switch (optname) {
505 
506 	case CAN_RAW_FILTER:
507 		if (optlen % sizeof(struct can_filter) != 0)
508 			return -EINVAL;
509 
510 		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
511 			return -EINVAL;
512 
513 		count = optlen / sizeof(struct can_filter);
514 
515 		if (count > 1) {
516 			/* filter does not fit into dfilter => alloc space */
517 			filter = memdup_user(optval, optlen);
518 			if (IS_ERR(filter))
519 				return PTR_ERR(filter);
520 		} else if (count == 1) {
521 			if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
522 				return -EFAULT;
523 		}
524 
525 		lock_sock(sk);
526 
527 		if (ro->bound && ro->ifindex)
528 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
529 
530 		if (ro->bound) {
531 			/* (try to) register the new filters */
532 			if (count == 1)
533 				err = raw_enable_filters(sock_net(sk), dev, sk,
534 							 &sfilter, 1);
535 			else
536 				err = raw_enable_filters(sock_net(sk), dev, sk,
537 							 filter, count);
538 			if (err) {
539 				if (count > 1)
540 					kfree(filter);
541 				goto out_fil;
542 			}
543 
544 			/* remove old filter registrations */
545 			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
546 					    ro->count);
547 		}
548 
549 		/* remove old filter space */
550 		if (ro->count > 1)
551 			kfree(ro->filter);
552 
553 		/* link new filters to the socket */
554 		if (count == 1) {
555 			/* copy filter data for single filter */
556 			ro->dfilter = sfilter;
557 			filter = &ro->dfilter;
558 		}
559 		ro->filter = filter;
560 		ro->count  = count;
561 
562  out_fil:
563 		if (dev)
564 			dev_put(dev);
565 
566 		release_sock(sk);
567 
568 		break;
569 
570 	case CAN_RAW_ERR_FILTER:
571 		if (optlen != sizeof(err_mask))
572 			return -EINVAL;
573 
574 		if (copy_from_user(&err_mask, optval, optlen))
575 			return -EFAULT;
576 
577 		err_mask &= CAN_ERR_MASK;
578 
579 		lock_sock(sk);
580 
581 		if (ro->bound && ro->ifindex)
582 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
583 
584 		/* remove current error mask */
585 		if (ro->bound) {
586 			/* (try to) register the new err_mask */
587 			err = raw_enable_errfilter(sock_net(sk), dev, sk,
588 						   err_mask);
589 
590 			if (err)
591 				goto out_err;
592 
593 			/* remove old err_mask registration */
594 			raw_disable_errfilter(sock_net(sk), dev, sk,
595 					      ro->err_mask);
596 		}
597 
598 		/* link new err_mask to the socket */
599 		ro->err_mask = err_mask;
600 
601  out_err:
602 		if (dev)
603 			dev_put(dev);
604 
605 		release_sock(sk);
606 
607 		break;
608 
609 	case CAN_RAW_LOOPBACK:
610 		if (optlen != sizeof(ro->loopback))
611 			return -EINVAL;
612 
613 		if (copy_from_user(&ro->loopback, optval, optlen))
614 			return -EFAULT;
615 
616 		break;
617 
618 	case CAN_RAW_RECV_OWN_MSGS:
619 		if (optlen != sizeof(ro->recv_own_msgs))
620 			return -EINVAL;
621 
622 		if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
623 			return -EFAULT;
624 
625 		break;
626 
627 	case CAN_RAW_FD_FRAMES:
628 		if (optlen != sizeof(ro->fd_frames))
629 			return -EINVAL;
630 
631 		if (copy_from_user(&ro->fd_frames, optval, optlen))
632 			return -EFAULT;
633 
634 		break;
635 
636 	case CAN_RAW_JOIN_FILTERS:
637 		if (optlen != sizeof(ro->join_filters))
638 			return -EINVAL;
639 
640 		if (copy_from_user(&ro->join_filters, optval, optlen))
641 			return -EFAULT;
642 
643 		break;
644 
645 	default:
646 		return -ENOPROTOOPT;
647 	}
648 	return err;
649 }
650 
651 static int raw_getsockopt(struct socket *sock, int level, int optname,
652 			  char __user *optval, int __user *optlen)
653 {
654 	struct sock *sk = sock->sk;
655 	struct raw_sock *ro = raw_sk(sk);
656 	int len;
657 	void *val;
658 	int err = 0;
659 
660 	if (level != SOL_CAN_RAW)
661 		return -EINVAL;
662 	if (get_user(len, optlen))
663 		return -EFAULT;
664 	if (len < 0)
665 		return -EINVAL;
666 
667 	switch (optname) {
668 
669 	case CAN_RAW_FILTER:
670 		lock_sock(sk);
671 		if (ro->count > 0) {
672 			int fsize = ro->count * sizeof(struct can_filter);
673 			if (len > fsize)
674 				len = fsize;
675 			if (copy_to_user(optval, ro->filter, len))
676 				err = -EFAULT;
677 		} else
678 			len = 0;
679 		release_sock(sk);
680 
681 		if (!err)
682 			err = put_user(len, optlen);
683 		return err;
684 
685 	case CAN_RAW_ERR_FILTER:
686 		if (len > sizeof(can_err_mask_t))
687 			len = sizeof(can_err_mask_t);
688 		val = &ro->err_mask;
689 		break;
690 
691 	case CAN_RAW_LOOPBACK:
692 		if (len > sizeof(int))
693 			len = sizeof(int);
694 		val = &ro->loopback;
695 		break;
696 
697 	case CAN_RAW_RECV_OWN_MSGS:
698 		if (len > sizeof(int))
699 			len = sizeof(int);
700 		val = &ro->recv_own_msgs;
701 		break;
702 
703 	case CAN_RAW_FD_FRAMES:
704 		if (len > sizeof(int))
705 			len = sizeof(int);
706 		val = &ro->fd_frames;
707 		break;
708 
709 	case CAN_RAW_JOIN_FILTERS:
710 		if (len > sizeof(int))
711 			len = sizeof(int);
712 		val = &ro->join_filters;
713 		break;
714 
715 	default:
716 		return -ENOPROTOOPT;
717 	}
718 
719 	if (put_user(len, optlen))
720 		return -EFAULT;
721 	if (copy_to_user(optval, val, len))
722 		return -EFAULT;
723 	return 0;
724 }
725 
726 static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
727 {
728 	struct sock *sk = sock->sk;
729 	struct raw_sock *ro = raw_sk(sk);
730 	struct sk_buff *skb;
731 	struct net_device *dev;
732 	int ifindex;
733 	int err;
734 
735 	if (msg->msg_name) {
736 		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
737 
738 		if (msg->msg_namelen < sizeof(*addr))
739 			return -EINVAL;
740 
741 		if (addr->can_family != AF_CAN)
742 			return -EINVAL;
743 
744 		ifindex = addr->can_ifindex;
745 	} else
746 		ifindex = ro->ifindex;
747 
748 	if (ro->fd_frames) {
749 		if (unlikely(size != CANFD_MTU && size != CAN_MTU))
750 			return -EINVAL;
751 	} else {
752 		if (unlikely(size != CAN_MTU))
753 			return -EINVAL;
754 	}
755 
756 	dev = dev_get_by_index(sock_net(sk), ifindex);
757 	if (!dev)
758 		return -ENXIO;
759 
760 	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
761 				  msg->msg_flags & MSG_DONTWAIT, &err);
762 	if (!skb)
763 		goto put_dev;
764 
765 	can_skb_reserve(skb);
766 	can_skb_prv(skb)->ifindex = dev->ifindex;
767 	can_skb_prv(skb)->skbcnt = 0;
768 
769 	err = memcpy_from_msg(skb_put(skb, size), msg, size);
770 	if (err < 0)
771 		goto free_skb;
772 
773 	sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
774 
775 	skb->dev = dev;
776 	skb->sk  = sk;
777 	skb->priority = sk->sk_priority;
778 
779 	err = can_send(skb, ro->loopback);
780 
781 	dev_put(dev);
782 
783 	if (err)
784 		goto send_failed;
785 
786 	return size;
787 
788 free_skb:
789 	kfree_skb(skb);
790 put_dev:
791 	dev_put(dev);
792 send_failed:
793 	return err;
794 }
795 
796 static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
797 		       int flags)
798 {
799 	struct sock *sk = sock->sk;
800 	struct sk_buff *skb;
801 	int err = 0;
802 	int noblock;
803 
804 	noblock =  flags & MSG_DONTWAIT;
805 	flags   &= ~MSG_DONTWAIT;
806 
807 	skb = skb_recv_datagram(sk, flags, noblock, &err);
808 	if (!skb)
809 		return err;
810 
811 	if (size < skb->len)
812 		msg->msg_flags |= MSG_TRUNC;
813 	else
814 		size = skb->len;
815 
816 	err = memcpy_to_msg(msg, skb->data, size);
817 	if (err < 0) {
818 		skb_free_datagram(sk, skb);
819 		return err;
820 	}
821 
822 	sock_recv_ts_and_drops(msg, sk, skb);
823 
824 	if (msg->msg_name) {
825 		__sockaddr_check_size(sizeof(struct sockaddr_can));
826 		msg->msg_namelen = sizeof(struct sockaddr_can);
827 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
828 	}
829 
830 	/* assign the flags that have been recorded in raw_rcv() */
831 	msg->msg_flags |= *(raw_flags(skb));
832 
833 	skb_free_datagram(sk, skb);
834 
835 	return size;
836 }
837 
838 static const struct proto_ops raw_ops = {
839 	.family        = PF_CAN,
840 	.release       = raw_release,
841 	.bind          = raw_bind,
842 	.connect       = sock_no_connect,
843 	.socketpair    = sock_no_socketpair,
844 	.accept        = sock_no_accept,
845 	.getname       = raw_getname,
846 	.poll          = datagram_poll,
847 	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
848 	.listen        = sock_no_listen,
849 	.shutdown      = sock_no_shutdown,
850 	.setsockopt    = raw_setsockopt,
851 	.getsockopt    = raw_getsockopt,
852 	.sendmsg       = raw_sendmsg,
853 	.recvmsg       = raw_recvmsg,
854 	.mmap          = sock_no_mmap,
855 	.sendpage      = sock_no_sendpage,
856 };
857 
858 static struct proto raw_proto __read_mostly = {
859 	.name       = "CAN_RAW",
860 	.owner      = THIS_MODULE,
861 	.obj_size   = sizeof(struct raw_sock),
862 	.init       = raw_init,
863 };
864 
865 static const struct can_proto raw_can_proto = {
866 	.type       = SOCK_RAW,
867 	.protocol   = CAN_RAW,
868 	.ops        = &raw_ops,
869 	.prot       = &raw_proto,
870 };
871 
872 static __init int raw_module_init(void)
873 {
874 	int err;
875 
876 	pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
877 
878 	err = can_proto_register(&raw_can_proto);
879 	if (err < 0)
880 		printk(KERN_ERR "can: registration of raw protocol failed\n");
881 
882 	return err;
883 }
884 
885 static __exit void raw_module_exit(void)
886 {
887 	can_proto_unregister(&raw_can_proto);
888 }
889 
890 module_init(raw_module_init);
891 module_exit(raw_module_exit);
892